repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxPutIfAbsentDuringJoinStressTest.java
|
package org.infinispan.distribution.rehash;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.annotations.Test;
/**
* Tests data loss during state transfer when the originator of a put operation becomes the primary owner of the
* modified key. See https://issues.jboss.org/browse/ISPN-3357
*
* @author Dan Berindei
*/
//unstable. test fails with DIST_SYNC and SCATTERED_SYNC (ISPN-3918)
@Test(groups = {"functional", "unstable"}, testName = "distribution.rehash.NonTxPutIfAbsentDuringJoinStressTest")
@CleanupAfterMethod
public class NonTxPutIfAbsentDuringJoinStressTest extends MultipleCacheManagersTest {
private static final int NUM_WRITERS = 4;
private static final int NUM_ORIGINATORS = 2;
private static final int NUM_KEYS = 100;
private final ConcurrentMap<String, String> insertedValues = new ConcurrentHashMap<>();
private volatile boolean stop = false;
@Override
public Object[] factory() {
return new Object[] {
new NonTxPutIfAbsentDuringJoinStressTest().cacheMode(CacheMode.DIST_SYNC),
};
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getConfigurationBuilder();
addClusterEnabledCacheManager(c);
addClusterEnabledCacheManager(c);
waitForClusterToForm();
}
private ConfigurationBuilder getConfigurationBuilder() {
return getDefaultClusteredCacheConfig(cacheMode, false);
}
public void testNodeJoiningDuringPutIfAbsent() throws Exception {
Future[] futures = new Future[NUM_WRITERS];
for (int i = 0; i < NUM_WRITERS; i++) {
final int writerIndex = i;
futures[i] = fork(() -> {
while (!stop) {
for (int j = 0; j < NUM_KEYS; j++) {
Cache<Object, Object> cache = cache(writerIndex % NUM_ORIGINATORS);
String key = "key_" + j;
String value = "value_" + j + "_" + writerIndex;
Object oldValue = cache.putIfAbsent(key, value);
Object newValue = cache.get(key);
if (oldValue == null) {
// succeeded
log.tracef("Successfully inserted value %s for key %s", value, key);
assertEquals(value, newValue);
boolean isFirst = insertedValues.putIfAbsent(key, value) == null;
assertTrue("A second putIfAbsent succeeded for " + key, isFirst);
} else {
// failed
assertEquals(oldValue, newValue);
}
}
}
});
}
addClusterEnabledCacheManager(getConfigurationBuilder());
waitForClusterToForm();
addClusterEnabledCacheManager(getConfigurationBuilder());
waitForClusterToForm();
stop = true;
for (int i = 0; i < NUM_WRITERS; i++) {
futures[i].get(10, TimeUnit.SECONDS);
for (int j = 0; j < NUM_KEYS; j++) {
for (int k = 0; k < caches().size(); k++) {
String key = "key_" + j;
assertEquals(insertedValues.get(key), cache(k).get(key));
}
}
}
for (int i = 0; i < caches().size(); i++) {
LockManager lockManager = advancedCache(i).getLockManager();
assertEquals(0, lockManager.getNumberOfLocksHeld());
for (int j = 0; j < NUM_KEYS; j++) {
String key = "key_" + j;
assertFalse(lockManager.isLocked(key));
}
}
}
}
| 4,133
| 35.910714
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxOriginatorBecomingPrimaryOwnerTest.java
|
package org.infinispan.distribution.rehash;
import static org.infinispan.test.TestingUtil.extractCacheTopology;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.distribution.MagicKey;
import org.infinispan.interceptors.distribution.TriangleDistributionInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
/**
* Tests data loss during state transfer when the originator of a put operation becomes the primary owner of the
* modified key. See https://issues.jboss.org/browse/ISPN-3366
*
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "distribution.rehash.NonTxOriginatorBecomingPrimaryOwnerTest")
@CleanupAfterMethod
public class NonTxOriginatorBecomingPrimaryOwnerTest extends MultipleCacheManagersTest {
private static final int NUM_KEYS = 10;
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC);
c.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
createCluster(TestDataSCI.INSTANCE, c, 3);
waitForClusterToForm();
}
public void testPrimaryOwnerLeavingDuringPut() throws Exception {
doTest(false);
}
public void testPrimaryOwnerLeavingDuringPutIfAbsent() throws Exception {
doTest(true);
}
private void doTest(final boolean conditional) throws Exception {
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
AdvancedCache<Object, Object> cache1 = advancedCache(1);
AdvancedCache<Object, Object> cache2 = advancedCache(2);
// Every PutKeyValueCommand will be blocked before reaching the distribution interceptor
CyclicBarrier distInterceptorBarrier = new CyclicBarrier(2);
BlockingInterceptor blockingInterceptor = new BlockingInterceptor<>(distInterceptorBarrier, PutKeyValueCommand.class, false, false);
cache0.getAsyncInterceptorChain().addInterceptorBefore(blockingInterceptor, TriangleDistributionInterceptor.class);
for (int i = 0; i < NUM_KEYS; i++) {
// Try to put a key/value from cache0 with cache1 the primary owner
final MagicKey key = new MagicKey("key" + i, cache1);
Future<Object> future = fork(() -> conditional ? cache0.putIfAbsent(key, "v") : cache0.put(key, "v"));
// Wait for the put command to pass through EntryWrappingInterceptor
distInterceptorBarrier.await(10, TimeUnit.SECONDS);
// Stop blocking new commands, to allow state transfer to finish
blockingInterceptor.suspend(true);
// Kill cache1
cache1.stop();
// Wait for the new topology to be installed
TestingUtil.waitForNoRebalance(cache0, cache2);
// Resume blocking new commands
blockingInterceptor.suspend(false);
// Unblock the command
distInterceptorBarrier.await(10, TimeUnit.SECONDS);
// StateTransferInterceptor retries the command, and it should block again in BlockingInterceptor.
distInterceptorBarrier.await(10, TimeUnit.SECONDS);
distInterceptorBarrier.await(10, TimeUnit.SECONDS);
if (extractCacheTopology(cache2).getDistribution(key).isPrimary()) {
// cache2 forwards the command back to cache0, blocking again
distInterceptorBarrier.await(10, TimeUnit.SECONDS);
distInterceptorBarrier.await(10, TimeUnit.SECONDS);
}
// Check that the put command didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertNull(result);
log.tracef("Put operation is done");
// Check the value on the remaining node
assertEquals("v", cache0.get(key));
// Prepare for the next iteration...
cache1.start();
TestingUtil.waitForNoRebalance(cache0, cache1, cache2);
}
}
}
| 4,549
| 40.363636
| 138
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/WorkDuringJoinTest.java
|
package org.infinispan.distribution.rehash;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.distribution.BaseDistFunctionalTest;
import org.infinispan.distribution.MagicKey;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* Tests performing some work on the joiner during a JOIN
*
* @author Manik Surtani
* @since 4.0
*/
// TODO This test makes no sense anymore, now that a joiner blocks until the join completes, before returning from cache.start(). This test needs to be re-thought and redesigned to test the eventual consistency (UnsureResponse) of a remote GET properly.
@Test(groups = "unstable", testName = "distribution.rehash.WorkDuringJoinTest", description = "original group: functional")
public class WorkDuringJoinTest extends BaseDistFunctionalTest<Object, String> {
EmbeddedCacheManager joinerManager;
Cache<Object, String> joiner;
public WorkDuringJoinTest() {
INIT_CLUSTER_SIZE = 2;
performRehashing = true;
}
private List<MagicKey> init() {
List<MagicKey> keys = new ArrayList<>(Arrays.asList(
new MagicKey("k1", c1), new MagicKey("k2", c2),
new MagicKey("k3", c1), new MagicKey("k4", c2)
));
int i = 0;
for (Cache<Object, String> c : caches) c.put(keys.get(i++), "v" + i);
log.infof("Initialized with keys %s", keys);
return keys;
}
Address startNewMember() {
joinerManager = addClusterEnabledCacheManager();
joinerManager.defineConfiguration(cacheName, configuration.build());
joiner = joinerManager.getCache(cacheName);
return manager(joiner).getAddress();
}
public void testJoinAndGet() {
List<MagicKey> keys = init();
KeyPartitioner keyPartitioner = TestingUtil.extractComponent(c1, KeyPartitioner.class);
ConsistentHash chOld = getCacheTopology(c1).getWriteConsistentHash();
Address joinerAddress = startNewMember();
List<Address> newMembers = new ArrayList<>(chOld.getMembers());
newMembers.add(joinerAddress);
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
ConsistentHash chNew = chf.rebalance(chf.updateMembers((DefaultConsistentHash) chOld, newMembers, null));
// which key should me mapped to the joiner?
MagicKey keyToTest = null;
for (MagicKey k: keys) {
int segment = keyPartitioner.getSegment(k);
if (chNew.isSegmentLocalToNode(joinerAddress, segment)) {
keyToTest = k;
break;
}
}
if (keyToTest == null) throw new NullPointerException("Couldn't find a key mapped to J!");
assert joiner.get(keyToTest) != null;
}
}
| 3,108
| 37.8625
| 254
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/L1StateTransferRemovesValueTest.java
|
package org.infinispan.distribution.rehash;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.BaseDistFunctionalTest;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.distribution.DistributionTestHelper;
import org.infinispan.distribution.L1Manager;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.statetransfer.StateConsumer;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Test that ensure when L1 cache is enabled that if writes occurs during a state transfer and vice versa that the
* proper data is available.
*
* @author William Burns
* @since 6.0
*/
@Test(groups = "functional", testName = "distribution.rehash.L1StateTransferRemovesValueTest")
public class L1StateTransferRemovesValueTest extends BaseDistFunctionalTest<String, String> {
public L1StateTransferRemovesValueTest() {
INIT_CLUSTER_SIZE = 3;
numOwners = 2;
performRehashing = true;
l1CacheEnabled = true;
cleanup = CleanupPhase.AFTER_METHOD;
}
private final String key = this.getClass() + "-key";
private final String startValue = "starting-value";
private final String newValue = "new-value";
protected final ControlledConsistentHashFactory factory = new ControlledConsistentHashFactory.Default(0, 1);
@AfterMethod
public void resetFactory() {
factory.setOwnerIndexes(0, 1);
}
@Override
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder builder = super.buildConfiguration();
builder.clustering().hash().
consistentHashFactory(factory).
numSegments(1);
return builder;
}
@Test
public void testStateTransferWithRequestorsForNonExistentL1Value() throws Exception {
// First 2 caches are primary and backup respectively at the beginning
L1Manager l1Manager = TestingUtil.extractComponent(c1, L1Manager.class);
l1Manager.addRequestor(key, c3.getCacheManager().getAddress());
assertNull(c3.get(key));
// Block the rebalance confirmation on nonOwnerCache
CheckPoint checkPoint = new CheckPoint();
// We have to wait until non owner has the new topology installed before transferring state
waitUntilToplogyInstalled(c3, checkPoint);
// Now make sure the owners doesn't have the new topology installed
waitUntilBeforeTopologyInstalled(c1, checkPoint);
waitUntilBeforeTopologyInstalled(c2, checkPoint);
// Now force 1 and 3 to be owners so then 3 will get invalidation and state transfer
factory.setOwnerIndexes(0, 2);
EmbeddedCacheManager cm = addClusterEnabledCacheManager();
cm.defineConfiguration(cacheName, configuration.build());
Future<Void> join = fork(() -> {
waitForClusterToForm(cacheName);
log.debug("4th has joined");
return null;
});
checkPoint.awaitStrict("post_topology_installed_invoked_" + c3, 10, TimeUnit.SECONDS);
checkPoint.awaitStrict("pre_topology_installed_invoked_" + c1, 10, TimeUnit.SECONDS);
checkPoint.awaitStrict("pre_topology_installed_invoked_" + c2, 10, TimeUnit.SECONDS);
assertNull(c1.put(key, newValue));
checkPoint.triggerForever("post_topology_installed_released_" + c3);
checkPoint.triggerForever("pre_topology_installed_released_" + c1);
checkPoint.triggerForever("pre_topology_installed_released_" + c2);
join.get(10, TimeUnit.SECONDS);
assertIsInContainerImmortal(c1, key);
assertIsNotInL1(c2, key);
assertIsInContainerImmortal(c3, key);
assertIsNotInL1(cm.getCache(cacheName), key);
// Make sure the ownership is all good still
assertTrue(DistributionTestHelper.isOwner(c1, key));
assertFalse(DistributionTestHelper.isOwner(c2, key));
assertTrue(DistributionTestHelper.isOwner(c3, key));
assertFalse(DistributionTestHelper.isOwner(cm.getCache(cacheName), key));
}
@Test(groups = "unstable")
public void testStateTransferWithL1InvalidationAboutToBeCommitted() throws Exception {
// First 2 caches are primary and backup respectively at the beginning
c1.put(key, startValue);
assertEquals(startValue, c3.get(key));
assertIsInL1(c3, key);
CyclicBarrier barrier = new CyclicBarrier(2);
TestingUtil.extractInterceptorChain(c3)
.addInterceptorAfter(new BlockingInterceptor<>(barrier, InvalidateL1Command.class, true, false),
EntryWrappingInterceptor.class);
Future<String> future = c1.putAsync(key, newValue);
barrier.await(10, TimeUnit.SECONDS);
// Block the rebalance confirmation on nonOwnerCache
CheckPoint checkPoint = new CheckPoint();
// We have to wait until non owner has the new topology installed before transferring state
waitUntilToplogyInstalled(c3, checkPoint);
// Now make sure the owners doesn't have the new topology installed
waitUntilBeforeTopologyInstalled(c1, checkPoint);
waitUntilBeforeTopologyInstalled(c2, checkPoint);
// Now force 1 and 3 to be owners so then 3 will get invalidation and state transfer
factory.setOwnerIndexes(0, 2);
EmbeddedCacheManager cm = addClusterEnabledCacheManager();
cm.defineConfiguration(cacheName, configuration.build());
Future<Void> join = fork(() -> {
waitForClusterToForm(cacheName);
log.debug("4th has joined");
return null;
});
checkPoint.awaitStrict("post_topology_installed_invoked_" + c3, 10, TimeUnit.SECONDS);
checkPoint.awaitStrict("pre_topology_installed_invoked_" + c1, 10, TimeUnit.SECONDS);
checkPoint.awaitStrict("pre_topology_installed_invoked_" + c2, 10, TimeUnit.SECONDS);
barrier.await(10, TimeUnit.SECONDS);
assertEquals(startValue, future.get(10, TimeUnit.SECONDS));
checkPoint.triggerForever("post_topology_installed_released_" + c3);
checkPoint.triggerForever("pre_topology_installed_released_" + c1);
checkPoint.triggerForever("pre_topology_installed_released_" + c2);
join.get(10, TimeUnit.SECONDS);
assertIsInContainerImmortal(c1, key);
assertIsNotInL1(c2, key);
assertIsInContainerImmortal(c3, key);
assertIsNotInL1(cm.getCache(cacheName), key);
// Make sure the ownership is all good still
assertTrue(DistributionTestHelper.isOwner(c1, key));
assertFalse(DistributionTestHelper.isOwner(c2, key));
assertTrue(DistributionTestHelper.isOwner(c3, key));
assertFalse(DistributionTestHelper.isOwner(cm.getCache(cacheName), key));
}
protected void waitUntilBeforeTopologyInstalled(final Cache<?, ?> cache, final CheckPoint checkPoint) {
StateConsumer sc = TestingUtil.extractComponent(cache, StateConsumer.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(sc);
StateConsumer mockConsumer = mock(StateConsumer.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
// Wait for main thread to sync up
checkPoint.trigger("pre_topology_installed_invoked_" + cache);
// Now wait until main thread lets us through
checkPoint.awaitStrict("pre_topology_installed_released_" + cache, 10, TimeUnit.SECONDS);
return forwardedAnswer.answer(invocation);
}).when(mockConsumer).onTopologyUpdate(any(CacheTopology.class), anyBoolean());
TestingUtil.replaceComponent(cache, StateConsumer.class, mockConsumer, true);
}
protected void waitUntilToplogyInstalled(final Cache<?, ?> cache, final CheckPoint checkPoint) {
StateTransferLock sc = TestingUtil.extractComponent(cache, StateTransferLock.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(sc);
StateTransferLock mockConsumer = mock(StateTransferLock.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
Object answer = forwardedAnswer.answer(invocation);
// Wait for main thread to sync up
checkPoint.trigger("post_topology_installed_invoked_" + cache);
// Now wait until main thread lets us through
checkPoint.awaitStrict("post_topology_installed_released_" + cache, 10, TimeUnit.SECONDS);
return answer;
}).when(mockConsumer).notifyTopologyInstalled(anyInt());
TestingUtil.replaceComponent(cache, StateTransferLock.class, mockConsumer, true);
}
}
| 9,570
| 42.504545
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashWithL1Test.java
|
package org.infinispan.distribution.rehash;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.marshall.Externalizer;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.util.BaseControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* Tests rehashing with distributed caches with L1 enabled.
*
* @author Galder Zamarreño
* @since 5.2
*/
@Test(groups = {"functional", "unstable"}, testName = "distribution.rehash.RehashWithL1Test", description = "See ISPN-7801")
public class RehashWithL1Test extends MultipleCacheManagersTest {
ConfigurationBuilder builder;
@Override
protected void createCacheManagers() throws Throwable {
MyBaseControlledConsistentHashFactory chf = new MyBaseControlledConsistentHashFactory();
builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false);
builder.clustering().hash().numSegments(1).numOwners(1).consistentHashFactory(chf);
builder.clustering().l1().enable().lifespan(10, TimeUnit.MINUTES);
createClusteredCaches(3, builder);
}
public void testPutWithRehashAndCacheClear() throws Exception {
int opCount = 10;
for (int i = 0; i < opCount; i++) {
cache(1).put("k" + i, "some data");
}
for (int j = 0; j < caches().size(); j++) {
log.debugf("Populating L1 on %s", address(j));
for (int i = 0; i < opCount; i++) {
assertEquals("Wrong value for k" + i, "some data", cache(j).get("k" + i));
}
}
int killIndex = caches().size() - 1;
log.debugf("Killing node %s", address(killIndex));
killMember(killIndex);
// All entries were owned by the killed node, but they survive in the L1 of cache(1)
for (int j = 0; j < caches().size(); j++) {
log.debugf("Checking values on %s", address(j));
for (int i = 0; i < opCount; i++) {
String key = "k" + i;
assertEquals("Wrong value for key " + key, "some data", cache(j).get(key));
}
}
log.debugf("Starting a new joiner");
EmbeddedCacheManager cm = addClusterEnabledCacheManager(builder);
cm.getCache();
// State transfer won't copy L1 entries to cache(2), and they're deleted on cache(1) afterwards
// Note: we would need eventually() if we checked the data container directly
for (int j = 0; j < caches().size() - 1; j++) {
log.debugf("Checking values on %s", address(j));
for (int i = 0; i < opCount; i++) {
assertNull("wrong value for k" + i, cache(j).get("k" + i));
}
}
for (int i = 0; i < opCount; i++) {
cache(0).remove("k" + i);
}
for (int i = 0; i < opCount; i++) {
String key = "k" + i;
assertFalse(cache(0).containsKey(key));
assertFalse("Key: " + key + " is present in cache at " + cache(0),
cache(0).containsKey(key));
assertFalse("Key: " + key + " is present in cache at " + cache(1),
cache(1).containsKey(key));
assertFalse("Key: " + key + " is present in cache at " + cache(2),
cache(2).containsKey(key));
}
assertEquals(0, cache(0).size());
assertEquals(0, cache(1).size());
assertEquals(0, cache(2).size());
}
@SerializeWith(MyBaseControlledConsistentHashFactory.Ext.class)
private static class MyBaseControlledConsistentHashFactory extends BaseControlledConsistentHashFactory<DefaultConsistentHash> {
public MyBaseControlledConsistentHashFactory() {
super(new DefaultTrait(), 1);
}
@Override
protected int[][] assignOwners(int numSegments, List<Address> members) {
return new int[][]{{members.size() - 1}};
}
public static final class Ext implements Externalizer<MyBaseControlledConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, MyBaseControlledConsistentHashFactory object) {
// No-op
}
@Override
public MyBaseControlledConsistentHashFactory readObject(ObjectInput input) {
return new MyBaseControlledConsistentHashFactory();
}
}
}
}
| 4,799
| 37.095238
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/PessimisticStateTransferLocksTest.java
|
package org.infinispan.distribution.rehash;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnComponentMethod;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnGlobalComponentMethod;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchMethodCall;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collections;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.statetransfer.StateConsumer;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.InvocationMatcher;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.RemoteTransaction;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Tests that state transfer properly replicates locks in a pessimistic cache, when the
* originator of the transaction is/was the primary owner.
*
* See ISPN-4091, ISPN-4108
*
* @author Dan Berindei
* @since 7.0
*/
@Test(groups = "functional", testName = "distribution.rehash.PessimisticStateTransferLocksTest")
public class PessimisticStateTransferLocksTest extends MultipleCacheManagersTest {
private static final String KEY = "key";
private static final String VALUE = "value";
{
cleanup = CleanupPhase.AFTER_METHOD;
}
private StateSequencer sequencer;
private ControlledConsistentHashFactory consistentHashFactory;
@AfterMethod(alwaysRun = true)
public void printSequencerState() {
log.debugf("Sequencer state: %s", sequencer);
if (sequencer != null) {
sequencer.stop();
sequencer = null;
}
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getConfigurationBuilder();
addClusterEnabledCacheManager(c);
addClusterEnabledCacheManager(c);
addClusterEnabledCacheManager(c);
waitForClusterToForm();
}
protected ConfigurationBuilder getConfigurationBuilder() {
consistentHashFactory = new ControlledConsistentHashFactory.Default(0, 1);
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC);
c.clustering().hash().consistentHashFactory(consistentHashFactory).numSegments(1);
c.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
c.transaction().lockingMode(LockingMode.PESSIMISTIC);
return c;
}
public void testPutStartedBeforeRebalance() throws Exception {
sequencer = new StateSequencer();
sequencer.logicalThread("tx", "tx:perform_op", "tx:check_locks", "tx:before_commit", "tx:after_commit");
sequencer.logicalThread("rebalance", "rebalance:before_get_tx", "rebalance:after_get_tx",
"rebalance:before_confirm", "rebalance:end");
sequencer.order("tx:perform_op", "rebalance:before_get_tx", "rebalance:after_get_tx", "tx:check_locks",
"rebalance:before_confirm", "rebalance:end", "tx:before_commit");
startTxWithPut();
startRebalance();
checkLocksBeforeCommit(false);
waitRebalanceEnd();
endTx();
checkLocksAfterCommit();
}
public void testLockStartedBeforeRebalance() throws Exception {
sequencer = new StateSequencer();
sequencer.logicalThread("tx", "tx:perform_op", "tx:check_locks", "tx:before_commit", "tx:after_commit");
sequencer.logicalThread("rebalance", "rebalance:before_get_tx", "rebalance:after_get_tx",
"rebalance:before_confirm", "rebalance:end");
sequencer.order("tx:perform_op", "rebalance:before_get_tx", "rebalance:after_get_tx", "tx:check_locks",
"rebalance:before_confirm", "rebalance:end", "tx:before_commit");
startTxWithLock();
startRebalance();
checkLocksBeforeCommit(false);
waitRebalanceEnd();
endTx();
checkLocksAfterCommit();
}
public void testPutStartedDuringRebalance() throws Exception {
sequencer = new StateSequencer();
sequencer.logicalThread("tx", "tx:perform_op", "tx:check_locks", "tx:before_commit",
"tx:after_commit");
sequencer.logicalThread("rebalance", "rebalance:before_get_tx", "rebalance:after_get_tx",
"rebalance:before_confirm", "rebalance:end");
sequencer.order("rebalance:after_get_tx", "tx:perform_op", "tx:check_locks",
"rebalance:before_confirm", "rebalance:end", "tx:before_commit");
startRebalance();
startTxWithPut();
checkLocksBeforeCommit(true);
waitRebalanceEnd();
endTx();
checkLocksAfterCommit();
}
public void testLockStartedDuringRebalance() throws Exception {
sequencer = new StateSequencer();
sequencer.logicalThread("tx", "tx:perform_op", "tx:check_locks", "tx:before_commit", "tx:after_commit");
sequencer.logicalThread("rebalance", "rebalance:before_get_tx", "rebalance:after_get_tx",
"rebalance:before_confirm", "rebalance:end");
sequencer.order("rebalance:after_get_tx", "tx:perform_op", "tx:check_locks",
"rebalance:before_confirm", "rebalance:end", "tx:before_commit");
startRebalance();
startTxWithLock();
checkLocksBeforeCommit(true);
waitRebalanceEnd();
endTx();
checkLocksAfterCommit();
}
private void startTxWithPut() throws Exception {
sequencer.enter("tx:perform_op");
tm(0).begin();
cache(0).put(KEY, VALUE);
sequencer.exit("tx:perform_op");
}
private void startTxWithLock() throws Exception {
sequencer.enter("tx:perform_op");
tm(0).begin();
advancedCache(0).lock(KEY);
sequencer.exit("tx:perform_op");
}
private void startRebalance() throws Exception {
InvocationMatcher rebalanceCompletedMatcher = matchMethodCall("handleRebalancePhaseConfirm")
.withParam(1, address(2)).matchCount(0).build();
advanceOnGlobalComponentMethod(sequencer, manager(0), ClusterTopologyManager.class, rebalanceCompletedMatcher)
.before("rebalance:before_confirm");
InvocationMatcher localRebalanceMatcher = matchMethodCall("onTopologyUpdate")
.withParam(1, true).matchCount(0).build();
advanceOnComponentMethod(sequencer, cache(2), StateConsumer.class, localRebalanceMatcher)
.before("rebalance:before_get_tx").afterAsync("rebalance:after_get_tx");
consistentHashFactory.setOwnerIndexes(2, 1);
consistentHashFactory.triggerRebalance(cache(0));
}
private void waitRebalanceEnd() throws Exception {
sequencer.advance("rebalance:end");
TestingUtil.waitForNoRebalance(caches());
}
private void endTx() throws Exception {
sequencer.advance("tx:before_commit");
tm(0).commit();
}
private void checkLocksBeforeCommit(boolean backupLockOnCache1) throws Exception {
sequencer.enter("tx:check_locks");
assertFalse(getTransactionTable(cache(0)).getLocalTransactions().isEmpty());
assertTrue(getTransactionTable(cache(0)).getRemoteTransactions().isEmpty());
LocalTransaction localTx = getTransactionTable(cache(0)).getLocalTransactions().iterator().next();
assertEquals(Collections.singleton(KEY), localTx.getLockedKeys());
assertEquals(Collections.emptySet(), localTx.getBackupLockedKeys());
assertTrue(getTransactionTable(cache(1)).getLocalTransactions().isEmpty());
assertEquals(backupLockOnCache1, !getTransactionTable(cache(1)).getRemoteTransactions().isEmpty());
assertTrue(getTransactionTable(cache(2)).getLocalTransactions().isEmpty());
assertFalse(getTransactionTable(cache(2)).getRemoteTransactions().isEmpty());
RemoteTransaction remoteTx = getTransactionTable(cache(2)).getRemoteTransactions().iterator().next();
assertEquals(Collections.emptySet(), remoteTx.getLockedKeys());
assertEquals(Collections.singleton(KEY), remoteTx.getBackupLockedKeys());
sequencer.exit("tx:check_locks");
}
private void checkLocksAfterCommit() {
for (Cache<Object, Object> c : caches()) {
final TransactionTable txTable = getTransactionTable(c);
assertTrue(txTable.getLocalTransactions().isEmpty());
eventuallyEquals(0, () -> txTable.getRemoteTransactions().size());
}
}
private TransactionTable getTransactionTable(Cache<Object, Object> c) {
return TestingUtil.extractComponent(c, TransactionTable.class);
}
}
| 9,009
| 40.906977
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxPrimaryOwnerLeavingTest.java
|
package org.infinispan.distribution.rehash;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.MagicKey;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.TopologyChanged;
import org.infinispan.notifications.cachelistener.event.TopologyChangedEvent;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.ControlledRpcManager;
import org.infinispan.util.concurrent.ReclosableLatch;
import org.infinispan.util.concurrent.TimeoutException;
import org.testng.annotations.Test;
/**
* Tests data loss during state transfer when the primary owner of a key leaves during a put operation.
* See https://issues.jboss.org/browse/ISPN-3366
*
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "distribution.rehash.NonTxPrimaryOwnerLeavingTest")
@CleanupAfterMethod
public class NonTxPrimaryOwnerLeavingTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC);
c.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
createCluster(TestDataSCI.INSTANCE, c, 3);
waitForClusterToForm();
}
@Test(groups = "unstable")
public void testPrimaryOwnerLeavingDuringPut() throws Exception {
doTest(TestWriteOperation.PUT_CREATE, false);
}
public void testPrimaryOwnerLeavingDuringPutIfAbsent() throws Exception {
doTest(TestWriteOperation.PUT_IF_ABSENT, false);
}
public void testPrimaryOwnerLeaveDuringPutAll() throws Exception {
doTest(TestWriteOperation.PUT_MAP_CREATE, false);
}
public void testPrimaryOwnerLeaveDuringPutAll2() throws Exception {
doTest(TestWriteOperation.PUT_MAP_CREATE, true);
}
private void doTest(TestWriteOperation operation, boolean blockTopologyOnOriginator) throws Exception {
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
AdvancedCache<Object, Object> cache1 = advancedCache(1);
AdvancedCache<Object, Object> cache2 = advancedCache(2);
TopologyUpdateListener listener0 = new TopologyUpdateListener();
cache0.addListener(listener0);
TopologyUpdateListener listener2 = new TopologyUpdateListener();
cache2.addListener(listener2);
// Block remote put commands invoked from cache0
ControlledRpcManager crm = ControlledRpcManager.replaceRpcManager(cache0);
crm.excludeCommands(StateTransferStartCommand.class, StateResponseCommand.class);
// Try to put a key/value from cache0 with cache1 the primary owner
final MagicKey key = new MagicKey(cache1);
Future<Object> future = fork(() -> operation.perform(cache0, key));
// After the write command was sent, kill cache1
ControlledRpcManager.BlockedRequest blockedWrite = crm.expectCommand(operation.getCommandClass());
cache1.stop();
if (!blockTopologyOnOriginator) {
listener0.unblockOnce();
listener0.waitForTopologyToFinish();
}
// Now that cache1 is stopped, unblock the write command and wait for the responses
blockedWrite.send().expectResponse(address(1), CacheNotFoundResponse.INSTANCE).receive();
if (blockTopologyOnOriginator) {
// The retry should be blocked on the originator until we unblock the topology update
crm.expectNoCommand(100, TimeUnit.MILLISECONDS);
listener0.unblockOnce();
listener0.waitForTopologyToFinish();
}
// Install the new topology without cache1 on cache2 as well
listener2.unblockOnce();
listener2.waitForTopologyToFinish();
// Retry the write command with a single owner (rebalance topology is blocked).
if (!cache0.getDistributionManager().getCacheTopology().getDistribution(key).isPrimary()) {
crm.expectCommand(operation.getCommandClass()).send().receiveAll();
}
// Check that the put command didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertNull(result);
log.tracef("Write operation is done");
cache0.removeListener(listener0);
cache2.removeListener(listener2);
listener0.unblockOnce();
listener0.unblockOnce();
crm.stopBlocking();
// Check the value on the remaining node
assertEquals(operation.getValue(), cache0.get(key));
assertEquals(operation.getValue(), cache2.get(key));
}
@Listener
public class TopologyUpdateListener {
private final ReclosableLatch preLatch = new ReclosableLatch();
private final ReclosableLatch postLatch = new ReclosableLatch();
private volatile boolean broken = false;
@TopologyChanged
public void onTopologyChange(TopologyChangedEvent e) throws InterruptedException {
if (e.isPre()) {
log.tracef("Blocking topology %d", e.getNewTopologyId());
broken = !preLatch.await(10, TimeUnit.SECONDS);
preLatch.close();
} else {
log.tracef("Signalling topology %d finished installing", e.getNewTopologyId());
postLatch.open();
}
}
void unblockOnce() {
preLatch.open();
assertFalse(broken);
}
private void waitForTopologyToFinish() throws InterruptedException {
if (!postLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
postLatch.close();
}
}
}
| 6,269
| 38.1875
| 106
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/ConcurrentJoinTest.java
|
package org.infinispan.distribution.rehash;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.CacheContainer;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.ConcurrentJoinTest", description = "See ISPN-1123")
public class ConcurrentJoinTest extends RehashTestBase {
List<EmbeddedCacheManager> joinerManagers;
List<Cache<Object, String>> joiners;
static final int NUM_JOINERS = 4;
void performRehashEvent(boolean offline) throws Exception {
joinerManagers = new CopyOnWriteArrayList<EmbeddedCacheManager>();
joiners = new CopyOnWriteArrayList<Cache<Object, String>>(new Cache[NUM_JOINERS]);
for (int i = 0; i < NUM_JOINERS; i++) {
EmbeddedCacheManager joinerManager = addClusterEnabledCacheManager(TestDataSCI.INSTANCE, new ConfigurationBuilder());
joinerManager.defineConfiguration(cacheName, configuration.build());
joinerManagers.add(joinerManager);
joiners.set(i, null);
}
Future<?>[] threads = new Future[NUM_JOINERS];
for (int i = 0; i < NUM_JOINERS; i++) {
final int ii = i;
threads[i] = fork(() -> {
EmbeddedCacheManager joinerManager = joinerManagers.get(ii);
Cache<Object, String> joiner = joinerManager.getCache(cacheName);
joiners.set(ii, joiner);
});
}
for (int i = 0; i < NUM_JOINERS; i++) {
threads[i].get(30, TimeUnit.SECONDS);
}
}
@SuppressWarnings("unchecked")
void waitForRehashCompletion() {
List<CacheContainer> allCacheManagers = new ArrayList<CacheContainer>(cacheManagers);
// Collection already contains all cache managers, no need to add more
TestingUtil.blockUntilViewsReceived(60000, false, allCacheManagers);
waitForClusterToForm(cacheName);
for (int i = 0; i < NUM_JOINERS; i++) {
caches.add(joiners.get(i));
}
}
}
| 2,324
| 36.5
| 126
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashWithSharedStoreTest.java
|
package org.infinispan.distribution.rehash;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Arrays;
import org.infinispan.Cache;
import org.infinispan.distribution.BaseDistStoreTest;
import org.infinispan.distribution.MagicKey;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
/**
* Should ensure that persistent state is not rehashed if the cache store is shared. See ISPN-861
*/
@Test (testName = "distribution.rehash.RehashWithSharedStoreTest", groups = "functional")
public class RehashWithSharedStoreTest extends BaseDistStoreTest<Object, String, RehashWithSharedStoreTest> {
private static final Log log = LogFactory.getLog(RehashWithSharedStoreTest.class);
@Override
public Object[] factory() {
return new Object[] {
new RehashWithSharedStoreTest().segmented(false),
new RehashWithSharedStoreTest().segmented(true),
};
}
public RehashWithSharedStoreTest() {
INIT_CLUSTER_SIZE = 3;
testRetVals = true;
performRehashing = true;
shared = true;
}
@BeforeMethod
public void afterMethod() {
clearStats(c1);
}
public void testRehashes() throws PersistenceException {
MagicKey k = new MagicKey("k", c1);
c1.put(k, "v");
Cache<Object, String>[] owners = getOwners(k);
log.infof("Initial owners list for key %s: %s", k, Arrays.asList(owners));
// Ensure the loader is shared!
for (Cache<Object, String> c: Arrays.asList(c1, c2, c3)) {
DummyInMemoryStore dims = TestingUtil.getFirstStore(c);
assertTrue("CacheStore on " + c + " should contain key " + k, dims.contains(k));
}
Cache<Object, String> primaryOwner = owners[0];
if (getCacheStoreStats(primaryOwner, "write") == 0) primaryOwner = owners[1];
for (Cache<Object, String> c: owners) {
int numWrites = getCacheStoreStats(c, "write");
assertEquals(1, numWrites);
}
log.infof("Stopping node %s", primaryOwner);
caches.remove(primaryOwner);
primaryOwner.stop();
primaryOwner.getCacheManager().stop();
TestingUtil.blockUntilViewsReceived(60000, false, caches);
TestingUtil.waitForNoRebalance(caches);
owners = getOwners(k);
log.infof("After shutting one node down, owners list for key %s: %s", k, Arrays.asList(owners));
assertEquals(numOwners, owners.length);
for (Cache<Object, String> o : owners) {
int numWrites = getCacheStoreStats(o, "write");
assertEquals(1, numWrites);
assertEquals("v", o.get(k));
}
}
}
| 2,921
| 30.419355
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxBackupOwnerBecomingPrimaryOwnerTest.java
|
package org.infinispan.distribution.rehash;
import static org.infinispan.test.TestingUtil.extractGlobalComponent;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.infinispan.test.TestingUtil.replaceComponent;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createClusteredCacheManager;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.distribution.TriangleDistributionInterceptor;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.protostream.annotations.ProtoName;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.StateTransferInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.test.op.TestOperation;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.BaseControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* Tests data loss during state transfer a backup owner of a key becomes the primary owner
* modified key while a write operation is in progress.
* See https://issues.jboss.org/browse/ISPN-3357
*
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "distribution.rehash.NonTxBackupOwnerBecomingPrimaryOwnerTest")
@CleanupAfterMethod
public class NonTxBackupOwnerBecomingPrimaryOwnerTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getConfigurationBuilder();
createCluster(DistributionRehashSCI.INSTANCE, c, 2);
waitForClusterToForm();
}
private ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC);
c.clustering().hash().numSegments(1).consistentHashFactory(new CustomConsistentHashFactory());
c.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
return c;
}
public void testPrimaryOwnerChangingDuringPut() throws Exception {
doTest(TestWriteOperation.PUT_CREATE);
}
public void testPrimaryOwnerChangingDuringPutOverwrite() throws Exception {
doTest(TestWriteOperation.PUT_OVERWRITE);
}
public void testPrimaryOwnerChangingDuringPutIfAbsent() throws Exception {
doTest(TestWriteOperation.PUT_IF_ABSENT);
}
public void testPrimaryOwnerChangingDuringReplace() throws Exception {
doTest(TestWriteOperation.REPLACE);
}
public void testPrimaryOwnerChangingDuringReplaceExact() throws Exception {
doTest(TestWriteOperation.REPLACE_EXACT);
}
public void testPrimaryOwnerChangingDuringRemove() throws Exception {
doTest(TestWriteOperation.REMOVE);
}
public void testPrimaryOwnerChangingDuringRemoveExact() throws Exception {
doTest(TestWriteOperation.REMOVE_EXACT);
}
protected void doTest(final TestOperation op) throws Exception {
final String key = "testkey";
final String cacheName = getDefaultCacheName();
op.insertPreviousValue(advancedCache(0, cacheName), key);
CheckPoint checkPoint = new CheckPoint();
LocalTopologyManager ltm0 = extractGlobalComponent(manager(0), LocalTopologyManager.class);
int preJoinTopologyId = ltm0.getCacheTopology(cacheName).getTopologyId();
int joinTopologyId = preJoinTopologyId + 1;
int stateReceivedTopologyId = joinTopologyId + 1;
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
addBlockingLocalTopologyManager(manager(0), checkPoint, joinTopologyId, stateReceivedTopologyId);
final AdvancedCache<Object, Object> cache1 = advancedCache(1);
addBlockingLocalTopologyManager(manager(1), checkPoint, joinTopologyId, stateReceivedTopologyId);
// Add a new member and block the rebalance before the final topology is installed
ConfigurationBuilder c = getConfigurationBuilder();
c.clustering().stateTransfer().awaitInitialTransfer(false);
CountDownLatch stateTransferLatch = new CountDownLatch(1);
if (op.getPreviousValue() != null) {
c.customInterceptors().addInterceptor()
.before(EntryWrappingInterceptor.class)
.interceptor(new StateTransferLatchInterceptor(stateTransferLatch));
} else {
stateTransferLatch.countDown();
}
// Add a new cache manager, but don't start it yet
GlobalConfigurationBuilder globalBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder();
globalBuilder.serialization().addContextInitializer(DistributionRehashSCI.INSTANCE);
EmbeddedCacheManager cm = createClusteredCacheManager(false, globalBuilder, c, new TransportFlags());
registerCacheManager(cm);
addBlockingLocalTopologyManager(manager(2), checkPoint, joinTopologyId, stateReceivedTopologyId);
log.tracef("Starting the cache on the joiner");
final AdvancedCache<Object,Object> cache2 = advancedCache(2);
checkPoint.trigger("allow_topology_" + joinTopologyId + "_on_" + address(0));
checkPoint.trigger("allow_topology_" + joinTopologyId + "_on_" + address(1));
checkPoint.trigger("allow_topology_" + joinTopologyId + "_on_" + address(2));
// Wait for the write CH to contain the joiner everywhere
eventually(() -> cache0.getRpcManager().getMembers().size() == 3 &&
cache1.getRpcManager().getMembers().size() == 3 &&
cache2.getRpcManager().getMembers().size() == 3);
CacheTopology duringJoinTopology = ltm0.getCacheTopology(cacheName);
assertEquals(joinTopologyId, duringJoinTopology.getTopologyId());
assertNotNull(duringJoinTopology.getPendingCH());
int keySegment = TestingUtil.getSegmentForKey(key, cache0);
log.tracef("Rebalance started. Found key %s with current owners %s and pending owners %s", key,
duringJoinTopology.getCurrentCH().locateOwnersForSegment(keySegment), duringJoinTopology.getPendingCH().locateOwnersForSegment(keySegment));
// We need to wait for the state transfer to insert the entry before inserting the blocking interceptor;
// otherwise we could block the PUT_FOR_STATE_TRANSFER instead
stateTransferLatch.await(10, TimeUnit.SECONDS);
// Every operation command will be blocked before reaching the distribution interceptor on cache1
CyclicBarrier beforeCache1Barrier = new CyclicBarrier(2);
BlockingInterceptor<?> blockingInterceptor1 = new BlockingInterceptor<>(beforeCache1Barrier,
op.getCommandClass(), false, false);
extractInterceptorChain(cache1).addInterceptorBefore(blockingInterceptor1, TriangleDistributionInterceptor.class);
// Every operation command will be blocked after returning to the distribution interceptor on cache2
CyclicBarrier afterCache2Barrier = new CyclicBarrier(2);
BlockingInterceptor<?> blockingInterceptor2 = new BlockingInterceptor<>(afterCache2Barrier,
op.getCommandClass(), true, false,
cmd -> !(cmd instanceof FlagAffectedCommand) || !((FlagAffectedCommand) cmd).hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER));
extractInterceptorChain(cache2).addInterceptorBefore(blockingInterceptor2, StateTransferInterceptor.class);
// Put from cache0 with cache0 as primary owner, cache2 will become the primary owner for the retry
Future<Object> future = fork(() -> op.perform(cache0, key));
// Wait for the command to be executed on cache2 and unblock it
afterCache2Barrier.await(10, TimeUnit.SECONDS);
afterCache2Barrier.await(10, TimeUnit.SECONDS);
// Allow the READ_ALL_WRITE_ALL PHASE topology update to proceed on all the caches
checkPoint.trigger("allow_topology_" + stateReceivedTopologyId + "_on_" + address(0));
checkPoint.trigger("allow_topology_" + stateReceivedTopologyId + "_on_" + address(1));
checkPoint.trigger("allow_topology_" + stateReceivedTopologyId + "_on_" + address(2));
// Wait for the topology to change everywhere
waitForNoRebalance(cache0, cache1, cache2);
// Allow the put command to throw an OutdatedTopologyException on cache1
log.tracef("Unblocking the put command on node " + address(1));
beforeCache1Barrier.await(10, TimeUnit.SECONDS);
beforeCache1Barrier.await(10, TimeUnit.SECONDS);
// Allow the retry to proceed on cache1
CacheTopology postReceiveStateTopology = ltm0.getCacheTopology(cacheName);
if (postReceiveStateTopology.getCurrentCH().locateOwnersForSegment(keySegment).contains(address(1))) {
beforeCache1Barrier.await(10, TimeUnit.SECONDS);
beforeCache1Barrier.await(10, TimeUnit.SECONDS);
}
// And allow the retry to finish successfully on cache2
afterCache2Barrier.await(10, TimeUnit.SECONDS);
afterCache2Barrier.await(10, TimeUnit.SECONDS);
// Check that the write command didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertEquals(op.getReturnValueWithRetry(), result);
log.tracef("Write operation is done");
// Check the value on all the nodes
assertEquals(op.getValue(), cache0.get(key));
assertEquals(op.getValue(), cache1.get(key));
assertEquals(op.getValue(), cache2.get(key));
// Check that there are no leaked locks
assertFalse(cache0.getAdvancedCache().getLockManager().isLocked(key));
assertFalse(cache1.getAdvancedCache().getLockManager().isLocked(key));
assertFalse(cache2.getAdvancedCache().getLockManager().isLocked(key));
}
@ProtoName("BackupOwnerCustomConsistentHashFactory")
public static class CustomConsistentHashFactory extends BaseControlledConsistentHashFactory.Default {
CustomConsistentHashFactory() {
super(1);
}
@Override
protected int[][] assignOwners(int numSegments, List<Address> members) {
switch (members.size()) {
case 1:
return new int[][]{{0}};
case 2:
return new int[][]{{0, 1}};
default:
return new int[][]{{members.size() - 1, 0}};
}
}
}
private void addBlockingLocalTopologyManager(final EmbeddedCacheManager manager, final CheckPoint checkPoint,
final Integer... blockedTopologyIds) {
LocalTopologyManager component = extractGlobalComponent(manager, LocalTopologyManager.class);
LocalTopologyManager spyLtm = spy(component);
doAnswer(invocation -> {
CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
// Ignore the first topology update on the joiner, which is with the topology before the join
if (Arrays.asList(blockedTopologyIds).contains(topology.getTopologyId())) {
checkPoint.trigger("pre_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress());
checkPoint.awaitStrict("allow_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress(),
10, TimeUnit.SECONDS);
}
return invocation.callRealMethod();
}).when(spyLtm).handleTopologyUpdate(eq(TestingUtil.getDefaultCacheName(manager)), any(CacheTopology.class),
any(AvailabilityMode.class), anyInt(), any(Address.class));
replaceComponent(manager, LocalTopologyManager.class, spyLtm, true);
}
static class StateTransferLatchInterceptor extends DDAsyncInterceptor {
private final CountDownLatch latch;
private StateTransferLatchInterceptor(CountDownLatch latch) {
this.latch = latch;
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, throwable) -> {
if (rCommand.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
latch.countDown();
}
});
}
}
}
| 13,634
| 47.523132
| 152
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxPrimaryOwnerBecomingNonOwnerTest.java
|
package org.infinispan.distribution.rehash;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.protostream.annotations.ProtoName;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.BaseControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* Tests that a conditional write is retried properly if the write is unsuccessful on the primary owner
* because it became a non-owner and doesn't have the entry any more.
*
* See https://issues.jboss.org/browse/ISPN-3830
*
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "distribution.rehash.NonTxPrimaryOwnerBecomingNonOwnerTest")
@CleanupAfterMethod
public class NonTxPrimaryOwnerBecomingNonOwnerTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getConfigurationBuilder();
createCluster(DistributionRehashSCI.INSTANCE, c, 2);
waitForClusterToForm();
}
private ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC);
c.clustering().hash().numSegments(1).consistentHashFactory(new CustomConsistentHashFactory());
c.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
return c;
}
public void testPrimaryOwnerChangingDuringPut() throws Exception {
doTest(TestWriteOperation.PUT_CREATE);
}
public void testPrimaryOwnerChangingDuringPutIfAbsent() throws Exception {
doTest(TestWriteOperation.PUT_IF_ABSENT);
}
public void testPrimaryOwnerChangingDuringReplace() throws Exception {
doTest(TestWriteOperation.REPLACE);
}
public void testPrimaryOwnerChangingDuringReplaceExact() throws Exception {
doTest(TestWriteOperation.REPLACE_EXACT);
}
public void testPrimaryOwnerChangingDuringRemove() throws Exception {
doTest(TestWriteOperation.REMOVE);
}
public void testPrimaryOwnerChangingDuringRemoveExact() throws Exception {
doTest(TestWriteOperation.REMOVE_EXACT);
}
private void doTest(final TestWriteOperation op) throws Exception {
final String key = "testkey";
final String cacheName = manager(0).getCacheManagerConfiguration().defaultCacheName().get();
if (op.getPreviousValue() != null) {
cache(0, cacheName).put(key, op.getPreviousValue());
}
CheckPoint checkPoint = new CheckPoint();
LocalTopologyManager ltm0 = TestingUtil.extractGlobalComponent(manager(0), LocalTopologyManager.class);
int preJoinTopologyId = ltm0.getCacheTopology(cacheName).getTopologyId();
int joinTopologyId = preJoinTopologyId + 1;
int stateReceivedTopologyId = joinTopologyId + 1;
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
addBlockingLocalTopologyManager(manager(0), checkPoint, joinTopologyId, stateReceivedTopologyId);
final AdvancedCache<Object, Object> cache1 = advancedCache(1);
addBlockingLocalTopologyManager(manager(1), checkPoint, joinTopologyId, stateReceivedTopologyId);
// Add a new member and block the rebalance before the final topology is installed
ConfigurationBuilder c = getConfigurationBuilder();
c.clustering().stateTransfer().awaitInitialTransfer(false);
addClusterEnabledCacheManager(DistributionRehashSCI.INSTANCE, c);
addBlockingLocalTopologyManager(manager(2), checkPoint, joinTopologyId, stateReceivedTopologyId);
log.tracef("Starting the cache on the joiner");
final AdvancedCache<Object,Object> cache2 = advancedCache(2);
checkPoint.trigger("allow_topology_" + joinTopologyId + "_on_" + address(0));
checkPoint.trigger("allow_topology_" + joinTopologyId + "_on_" + address(1));
checkPoint.trigger("allow_topology_" + joinTopologyId + "_on_" + address(2));
// Wait for the write CH to contain the joiner everywhere
Stream.of(cache0, cache1, cache2).forEach(cache ->
eventuallyEquals(3, () -> cache.getRpcManager().getMembers().size()));
CacheTopology duringJoinTopology = ltm0.getCacheTopology(cacheName);
assertEquals(CacheTopology.Phase.READ_OLD_WRITE_ALL, duringJoinTopology.getPhase());
assertEquals(joinTopologyId, duringJoinTopology.getTopologyId());
assertNotNull(duringJoinTopology.getPendingCH());
int keySegment = TestingUtil.getSegmentForKey(key, cache0);
log.tracef("Rebalance started. Found key %s with current owners %s and pending owners %s", key,
duringJoinTopology.getCurrentCH().locateOwnersForSegment(keySegment), duringJoinTopology.getPendingCH().locateOwnersForSegment(keySegment));
// Every operation command will be blocked before reaching the distribution interceptor on cache0 (the originator)
CyclicBarrier beforeCache0Barrier = new CyclicBarrier(2);
BlockingInterceptor<?> blockingInterceptor0 = new BlockingInterceptor<>(beforeCache0Barrier,
op.getCommandClass(), false, true);
extractInterceptorChain(cache0).addInterceptorBefore(blockingInterceptor0, EntryWrappingInterceptor.class);
// Write from cache0 with cache0 as primary owner, cache2 will become the primary owner for the retry
Future<Object> future = fork(() -> op.perform(cache0, key));
// Block the write command on cache0
beforeCache0Barrier.await(10, TimeUnit.SECONDS);
// Allow the topology update to proceed on cache0
checkPoint.trigger("allow_topology_" + stateReceivedTopologyId + "_on_" + address(0));
eventuallyEquals(stateReceivedTopologyId,
() -> cache0.getDistributionManager().getCacheTopology().getTopologyId());
assertEquals(CacheTopology.Phase.READ_ALL_WRITE_ALL, cache0.getDistributionManager().getCacheTopology().getPhase());
// Allow the command to proceed
log.tracef("Unblocking the write command on node " + address(1));
beforeCache0Barrier.await(10, TimeUnit.SECONDS);
// Wait for the retry after the OutdatedTopologyException
beforeCache0Barrier.await(10, TimeUnit.SECONDS);
// Do not block during (possible) further retries, and allow it to proceed
blockingInterceptor0.suspend(true);
beforeCache0Barrier.await(10, TimeUnit.SECONDS);
// Allow the topology update to proceed on the other caches
checkPoint.trigger("allow_topology_" + stateReceivedTopologyId + "_on_" + address(1));
checkPoint.trigger("allow_topology_" + stateReceivedTopologyId + "_on_" + address(2));
// Wait for the topology to change everywhere
TestingUtil.waitForNoRebalance(cache0, cache1, cache2);
// Check that the put command didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
// TODO ISPN-7590: Return values are not reliable, if the command is retried after being applied to both backup
// owners the retry will provide incorrect return value
// assertEquals(op.getReturnValue(), result);
log.tracef("Write operation is done");
// Check the value on all the nodes
assertEquals(op.getValue(), cache0.get(key));
assertEquals(op.getValue(), cache1.get(key));
assertEquals(op.getValue(), cache2.get(key));
// Check that there are no leaked locks
assertFalse(cache0.getAdvancedCache().getLockManager().isLocked(key));
assertFalse(cache1.getAdvancedCache().getLockManager().isLocked(key));
assertFalse(cache2.getAdvancedCache().getLockManager().isLocked(key));
}
@ProtoName("PrimaryOwnerCustomConsistentHashFactory")
public static class CustomConsistentHashFactory extends BaseControlledConsistentHashFactory.Default {
CustomConsistentHashFactory() {
super(1);
}
@Override
protected int[][] assignOwners(int numSegments, List<Address> members) {
switch (members.size()) {
case 1:
return new int[][]{{0}};
case 2:
return new int[][]{{0, 1}};
default:
return new int[][]{{members.size() - 1, 0}};
}
}
}
private void addBlockingLocalTopologyManager(final EmbeddedCacheManager manager, final CheckPoint checkPoint,
final Integer... blockedTopologyIds) {
LocalTopologyManager component = TestingUtil.extractGlobalComponent(manager, LocalTopologyManager.class);
LocalTopologyManager spyLtm = spy(component);
doAnswer(invocation -> {
CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
// Ignore the first topology update on the joiner, which is with the topology before the join
if (Arrays.asList(blockedTopologyIds).contains(topology.getTopologyId())) {
checkPoint.trigger("pre_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress());
checkPoint.awaitStrict("allow_topology_" + topology.getTopologyId() + "_on_" + manager.getAddress(),
10, TimeUnit.SECONDS);
}
return invocation.callRealMethod();
}).when(spyLtm).handleTopologyUpdate(eq(TestingUtil.getDefaultCacheName(manager)), any(CacheTopology.class),
any(AvailabilityMode.class), anyInt(), any(Address.class));
TestingUtil.replaceComponent(manager, LocalTopologyManager.class, spyLtm, true);
}
}
| 10,771
| 46.875556
| 152
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxPutIfAbsentDuringLeaveStressTest.java
|
package org.infinispan.distribution.rehash;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.testng.annotations.Test;
/**
* Tests data loss during state transfer when the originator of a put operation becomes the primary owner of the
* modified key. See https://issues.jboss.org/browse/ISPN-3357
*
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "distribution.rehash.NonTxPutIfAbsentDuringLeaveStressTest")
@CleanupAfterMethod
public class NonTxPutIfAbsentDuringLeaveStressTest extends MultipleCacheManagersTest {
private static final int NUM_WRITERS = 4;
private static final int NUM_ORIGINATORS = 2;
private static final int NUM_KEYS = 100;
@Override
public Object[] factory() {
return new Object[] {
new NonTxPutIfAbsentDuringLeaveStressTest().cacheMode(CacheMode.DIST_SYNC),
};
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getDefaultClusteredCacheConfig(cacheMode, false);
addClusterEnabledCacheManager(c);
addClusterEnabledCacheManager(c);
addClusterEnabledCacheManager(c);
addClusterEnabledCacheManager(c);
addClusterEnabledCacheManager(c);
waitForClusterToForm();
}
@Test(groups = "unstable", description = "ISPN-7682")
public void testNodeLeavingDuringPutIfAbsent() throws Exception {
ConcurrentMap<String, String> insertedValues = new ConcurrentHashMap<>();
AtomicBoolean stop = new AtomicBoolean(false);
Future[] futures = new Future[NUM_WRITERS];
for (int i = 0; i < NUM_WRITERS; i++) {
final int writerIndex = i;
futures[i] = fork(new Callable() {
@Override
public Object call() throws Exception {
while (!stop.get()) {
for (int j = 0; j < NUM_KEYS; j++) {
Cache<Object, Object> cache = cache(writerIndex % NUM_ORIGINATORS);
doPut(cache, "key_" + j, "value_" + j + "_" + writerIndex);
}
}
return null;
}
private void doPut(Cache<Object, Object> cache, String key, String value) {
Object oldValue = cache.putIfAbsent(key, value);
Object newValue = cache.get(key);
if (oldValue == null) {
// succeeded
log.tracef("Successfully inserted value %s for key %s", value, key);
assertEquals(value, newValue);
String duplicateInsertedValue = insertedValues.putIfAbsent(key, value);
if (duplicateInsertedValue != null) {
// ISPN-4286: two concurrent putIfAbsent operations can both return null
assertEquals(value, duplicateInsertedValue);
}
} else {
// failed
if (newValue == null) {
// ISPN-3918: cache.get(key) == null if another command succeeded but didn't finish
eventuallyEquals(oldValue, () -> cache.get(key));
} else {
assertEquals(oldValue, newValue);
}
}
}
});
}
killMember(4);
TestingUtil.waitForNoRebalance(caches());
killMember(3);
TestingUtil.waitForNoRebalance(caches());
stop.set(true);
for (int i = 0; i < NUM_WRITERS; i++) {
futures[i].get(10, TimeUnit.SECONDS);
for (int j = 0; j < NUM_KEYS; j++) {
for (int k = 0; k < caches().size(); k++) {
String key = "key_" + j;
assertEquals(insertedValues.get(key), cache(k).get(key));
}
}
}
}
}
| 4,323
| 35.957265
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxStateTransferOverwritingValue2Test.java
|
package org.infinispan.distribution.rehash;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.isNull;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commands.triangle.BackupWriteCommand;
import org.infinispan.commands.write.BackupAckCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ClearCacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.Mocks;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CacheEntryDelegator;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.ClusteringDependentLogicDelegator;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.ControlledRpcManager;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.Test;
/**
* Tests that state transfer can't overwrite a value written by a command during state transfer.
* See https://issues.jboss.org/browse/ISPN-3443
*
* @author Dan Berindei
* @since 6.0
*/
@Test(groups = "functional", testName = "distribution.rehash.NonTxStateTransferOverwritingValue2Test")
public class NonTxStateTransferOverwritingValue2Test extends MultipleCacheManagersTest {
{
cleanup = CleanupPhase.AFTER_METHOD;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getConfigurationBuilder();
addClusterEnabledCacheManager(c);
waitForClusterToForm();
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
protected ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC);
c.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
return c;
}
public void testBackupOwnerJoiningDuringPutOverwrite() throws Exception {
// Need a previous value for this test, so we can't test PUT_CREATE
doTest(TestWriteOperation.PUT_OVERWRITE);
}
public void testBackupOwnerJoiningDuringReplace() throws Exception {
doTest(TestWriteOperation.REPLACE);
}
public void testBackupOwnerJoiningDuringReplaceWithPreviousValue() throws Exception {
doTest(TestWriteOperation.REPLACE_EXACT);
}
public void testBackupOwnerJoiningDuringRemove() throws Exception {
doTest(TestWriteOperation.REMOVE);
}
public void testBackupOwnerJoiningDuringRemoveWithPreviousValue() throws Exception {
doTest(TestWriteOperation.REMOVE_EXACT);
}
private void doTest(final TestWriteOperation op) throws Exception {
// Test scenario:
// cache0 is the only member in the cluster, cache1 joins
// Key k is in the cache, and is transferred to cache1
// A user operation/tx also modifies key k
// Even if both state transfer and the user tx try to commit the entry for k concurrently,
// the value of k at the end should be the one set by the user tx.
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
final String key = "key";
// Prepare for replace/remove: put a previous value in cache0
final Object previousValue = op.getPreviousValue();
if (previousValue != null) {
cache0.put(key, previousValue);
assertEquals(previousValue, cache0.get(key));
log.tracef("Previous value inserted: %s = %s", key, previousValue);
}
int preJoinTopologyId = cache0.getDistributionManager().getCacheTopology().getTopologyId();
// Block any state response commands on cache0
// So that we can install the spy ClusteringDependentLogic on cache1 before state transfer is applied
final CheckPoint checkPoint = new CheckPoint();
ControlledRpcManager blockingRpcManager0 = ControlledRpcManager.replaceRpcManager(cache0);
blockingRpcManager0.excludeCommands(BackupWriteCommand.class, BackupAckCommand.class);
// Block the rebalance confirmation on coordinator (to avoid the retrying of commands)
blockRebalanceConfirmation(manager(0), checkPoint, preJoinTopologyId + 1);
// Start the joiner
log.tracef("Starting the cache on the joiner");
ConfigurationBuilder c = getConfigurationBuilder();
c.clustering().stateTransfer().awaitInitialTransfer(false);
addClusterEnabledCacheManager(c);
final AdvancedCache<Object,Object> cache1 = advancedCache(1);
// Wait for the write CH to contain the joiner everywhere
eventually(() -> cache0.getRpcManager().getMembers().size() == 2 &&
cache1.getRpcManager().getMembers().size() == 2);
// Every PutKeyValueCommand will be blocked before committing the entry on cache1
blockEntryCommit(checkPoint, cache1);
// Wait for cache0 to collect the state to send to cache1 (including our previous value).
ControlledRpcManager.BlockedRequest blockedStateResponse =
blockingRpcManager0.expectCommand(StateResponseCommand.class);
// Allow the state to be applied on cache1 (writing the old value for our entry)
ControlledRpcManager.SentRequest sentStateResponse = blockedStateResponse.send();
// Wait for state transfer tx/operation to call commitEntry on cache1 and block
checkPoint.awaitStrict("pre_commit_entry_" + key + "_from_" + null, 5, SECONDS);
// Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 as backup owner
// The put command will be blocked on cache1 just before committing the entry.
Future<Object> future = fork(() -> op.perform(cache0, key));
// Check that the user write is blocked by the state transfer write
boolean blocked = checkPoint.peek(1, SECONDS, "pre_commit_entry_" + key + "_from_" + address(0)) == null;
assertTrue(blocked);
// Allow state transfer to commit
checkPoint.trigger("resume_commit_entry_" + key + "_from_" + null);
// Check that the user operation can now commit the entry
checkPoint.awaitStrict("pre_commit_entry_" + key + "_from_" + address(0), 5, SECONDS);
// Allow the user put to commit
checkPoint.trigger("resume_commit_entry_" + key + "_from_" + address(0));
// Wait for both state transfer and the command to commit
checkPoint.awaitStrict("post_commit_entry_" + key + "_from_" + null, 10, SECONDS);
checkPoint.awaitStrict("post_commit_entry_" + key + "_from_" + address(0), 10, SECONDS);
// Wait for the command to finish and check that it didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertEquals(op.getReturnValue(), result);
log.tracef("%s operation is done", op);
// Receive the response for the state response command (only after all commits have finished)
sentStateResponse.receiveAll();
// Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
int rebalanceTopologyId = preJoinTopologyId + 1;
checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(0));
checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(1));
TestingUtil.waitForNoRebalance(cache0, cache1);
// Check the value on all the nodes
assertEquals(op.getValue(), cache0.get(key));
assertEquals(op.getValue(), cache1.get(key));
blockingRpcManager0.stopBlocking();
}
private void blockEntryCommit(final CheckPoint checkPoint, AdvancedCache<Object, Object> cache) {
ClusteringDependentLogic cdl1 = TestingUtil.extractComponent(cache, ClusteringDependentLogic.class);
ClusteringDependentLogic replaceCdl = new ClusteringDependentLogicDelegator(cdl1) {
@Override
public CompletionStage<Void> commitEntry(CacheEntry entry, FlagAffectedCommand command,
InvocationContext ctx, Flag trackFlag, boolean l1Invalidation) {
//skip for clear command!
if (entry instanceof ClearCacheEntry) {
return super.commitEntry(entry, command, ctx, trackFlag, l1Invalidation);
}
final Address source = ctx.getOrigin();
CacheEntry newEntry = new CacheEntryDelegator(entry) {
@Override
public void commit(DataContainer container) {
checkPoint.trigger("pre_commit_entry_" + getKey() + "_from_" + source);
try {
checkPoint.awaitStrict("resume_commit_entry_" + getKey() + "_from_" + source, 10,
SECONDS);
} catch (InterruptedException | TimeoutException e) {
throw new RuntimeException(e);
}
super.commit(container);
checkPoint.trigger("post_commit_entry_" + getKey() + "_from_" + source);
}
};
return super.commitEntry(newEntry, command, ctx, trackFlag, l1Invalidation);
}
};
TestingUtil.replaceComponent(cache, ClusteringDependentLogic.class, replaceCdl, true);
}
private void blockRebalanceConfirmation(final EmbeddedCacheManager manager, final CheckPoint checkPoint, int rebalanceTopologyId)
throws Exception {
ClusterTopologyManager ctm = TestingUtil.extractGlobalComponent(manager, ClusterTopologyManager.class);
Answer<?> forwardedAnswer = AdditionalAnswers.delegatesTo(ctm);
ClusterTopologyManager mock = mock(ClusterTopologyManager.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
Object[] arguments = invocation.getArguments();
Address source = (Address) arguments[1];
int topologyId = (Integer) arguments[2];
if (rebalanceTopologyId == topologyId) {
checkPoint.trigger("pre_rebalance_confirmation_" + topologyId + "_from_" + source);
return checkPoint.future("resume_rebalance_confirmation_" + topologyId + "_from_" + source, 10, SECONDS, testExecutor())
.thenCompose(__ -> Mocks.callAnotherAnswer(forwardedAnswer, invocation));
}
return forwardedAnswer.answer(invocation);
}).when(mock).handleRebalancePhaseConfirm(anyString(), any(Address.class), anyInt(), isNull(), anyInt());
TestingUtil.replaceComponent(manager, ClusterTopologyManager.class, mock, true);
}
}
| 11,815
| 46.453815
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/StateTransferOverwriteTest.java
|
package org.infinispan.distribution.rehash;
import java.util.concurrent.Callable;
import jakarta.transaction.TransactionManager;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.test.op.TestWriteOperation;
import org.testng.annotations.Test;
/**
* Test that ensures that state transfer values aren't overridden with a non tx without L1 enabled.
*
* @author William Burns
* @since 6.0
*/
@Test(groups = "functional", testName = "distribution.rehash.StateTransferOverwriteTest")
public class StateTransferOverwriteTest extends BaseTxStateTransferOverwriteTest {
@Override
public Object[] factory() {
return new Object[] {
new StateTransferOverwriteTest().l1(false),
new StateTransferOverwriteTest().l1(true),
};
}
public StateTransferOverwriteTest() {
transactional = false;
}
@Override
protected Class<? extends VisitableCommand> getVisitableCommand(TestWriteOperation op) {
return op.getCommandClass();
}
@Override
protected Callable<?> runWithTx(final TransactionManager tm, final Callable<?> callable) {
return callable;
}
}
| 1,142
| 26.878049
| 99
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/StateTransferOverwritingValueTest.java
|
package org.infinispan.distribution.rehash;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.util.ControlledRpcManager.replaceRpcManager;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.isNull;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.remote.recovery.TxCompletionNotificationCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commands.triangle.BackupWriteCommand;
import org.infinispan.commands.tx.AbstractTransactionBoundaryCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.ControlledRpcManager;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.IsolationLevel;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.Test;
/**
* Tests that state transfer can't overwrite a value written by a command during state transfer.
* See https://issues.jboss.org/browse/ISPN-3443
*
* @author Dan Berindei
* @since 6.0
*/
@Test(groups = "functional", testName = "distribution.rehash.StateTransferOverwritingValueTest")
public class StateTransferOverwritingValueTest extends MultipleCacheManagersTest {
@Override
public Object[] factory() {
return new Object[] {
new StateTransferOverwritingValueTest().cacheMode(CacheMode.DIST_SYNC).transactional(false),
new StateTransferOverwritingValueTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.OPTIMISTIC),
new StateTransferOverwritingValueTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.PESSIMISTIC),
};
}
{
cleanup = CleanupPhase.AFTER_METHOD;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getConfigurationBuilder();
addClusterEnabledCacheManager(c);
waitForClusterToForm();
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
protected ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(cacheMode);
c.transaction().transactionMode(transactionMode());
if (lockingMode != null) {
c.transaction().lockingMode(lockingMode);
}
c.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
return c;
}
public void testBackupOwnerJoiningDuringPut() throws Exception {
doTest(TestWriteOperation.PUT_CREATE);
}
public void testBackupOwnerJoiningDuringPutOverwrite() throws Exception {
doTest(TestWriteOperation.PUT_OVERWRITE);
}
public void testBackupOwnerJoiningDuringPutIfAbsent() throws Exception {
doTest(TestWriteOperation.PUT_IF_ABSENT);
}
public void testBackupOwnerJoiningDuringReplace() throws Exception {
doTest(TestWriteOperation.REPLACE);
}
public void testBackupOwnerJoiningDuringReplaceWithPreviousValue() throws Exception {
doTest(TestWriteOperation.REPLACE_EXACT);
}
public void testBackupOwnerJoiningDuringRemove() throws Exception {
doTest(TestWriteOperation.REMOVE);
}
public void testBackupOwnerJoiningDuringRemoveWithPreviousValue() throws Exception {
doTest(TestWriteOperation.REMOVE_EXACT);
}
private void doTest(final TestWriteOperation op) throws Exception {
// Test scenario:
// cache0 is the only member in the cluster, cache1 joins
// Key k is in the cache, and is transferred to cache1
// A user operation/tx also modifies key k
// The user tx must update k even if it doesn't find key k in the data container.
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
final String key = "key";
// Prepare for replace/remove: put a previous value in cache0
final Object previousValue = op.getPreviousValue();
if (previousValue != null) {
cache0.put(key, previousValue);
assertEquals(previousValue, cache0.get(key));
log.tracef("Previous value inserted: %s = %s", key, previousValue);
}
int preJoinTopologyId = cache0.getDistributionManager().getCacheTopology().getTopologyId();
// Block any state response commands on cache0
CheckPoint checkPoint = new CheckPoint();
ControlledRpcManager blockingRpcManager0 = replaceRpcManager(cache0);
blockingRpcManager0.excludeCommands(WriteCommand.class, BackupWriteCommand.class,
AbstractTransactionBoundaryCommand.class,
TxCompletionNotificationCommand.class);
int rebalanceTopologyId = preJoinTopologyId + 1;
// Block the rebalance confirmation on cache0
blockRebalanceConfirmation(manager(0), checkPoint, rebalanceTopologyId);
// Start the joiner
log.tracef("Starting the cache on the joiner");
ConfigurationBuilder c = getConfigurationBuilder();
c.clustering().stateTransfer().awaitInitialTransfer(false).timeout(30, SECONDS);
addClusterEnabledCacheManager(c);
final AdvancedCache<Object, Object> cache1 = advancedCache(1);
// Wait for joiner to finish requesting segments, so that write commands are not blocked
StateTransferLock stateTransferLock1 = TestingUtil.extractComponent(cache1, StateTransferLock.class);
stateTransferLock1.transactionDataFuture(rebalanceTopologyId).toCompletableFuture().get(10, SECONDS);
assertEquals(2, cache1.getRpcManager().getMembers().size());
// Every PutKeyValueCommand will be blocked before committing the entry on cache1
CyclicBarrier beforeCommitCache1Barrier = new CyclicBarrier(2);
// Scattered cache mode uses only PKVC or RemoveCommands for backup
BlockingInterceptor<?> blockingInterceptor1 =
new BlockingInterceptor<>(beforeCommitCache1Barrier, true, false,
t -> t.getClass().equals(op.getCommandClass()));
AsyncInterceptorChain interceptorChain1 = TestingUtil.extractInterceptorChain(cache1);
Class<? extends EntryWrappingInterceptor> ewi =
interceptorChain1.findInterceptorExtending(EntryWrappingInterceptor.class).getClass();
assertTrue(interceptorChain1.addInterceptorAfter(blockingInterceptor1, ewi));
// Wait for cache0 to collect the state to send to cache1 (including our previous value).
ControlledRpcManager.BlockedRequest<?> blockedStateResponse =
blockingRpcManager0.expectCommand(StateResponseCommand.class);
// Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 will become a backup owner for the retry
// The put command will be blocked on cache1 just before committing the entry.
Future<Object> future = fork(() -> op.perform(cache0, key));
// Wait for the entry to be wrapped on cache1
beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);
// Stop blocking, otherwise we'll block the state transfer put commands as well
blockingInterceptor1.suspend(true);
// Allow the state to be applied on cache1 (writing the old value for our entry)
blockedStateResponse.send().receiveAll();
// Wait for cache1 to finish applying the state, but don't allow the rebalance confirmation to be processed.
// (It would change the topology and it would trigger a retry for the command.)
checkPoint.awaitStrict("pre_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(1), 10, SECONDS);
// Now allow the command to commit on cache1
beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);
// Wait for the command to finish and check that it didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertEquals(op.getReturnValue(), result);
log.tracef("%s operation is done", op);
// Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(0));
checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(1));
TestingUtil.waitForNoRebalance(cache0, cache1);
// Check the value on all the nodes
assertEquals(op.getValue(), cache0.get(key));
assertEquals(op.getValue(), cache1.get(key));
blockingRpcManager0.stopBlocking();
}
private void blockRebalanceConfirmation(final EmbeddedCacheManager manager, final CheckPoint checkPoint,
int rebalanceTopologyId)
throws Exception {
ClusterTopologyManager ctm = TestingUtil.extractGlobalComponent(manager, ClusterTopologyManager.class);
Answer<?> forwardedAnswer = AdditionalAnswers.delegatesTo(ctm);
ClusterTopologyManager mock = mock(ClusterTopologyManager.class, withSettings().defaultAnswer(forwardedAnswer));
BlockingManager blockingManager = manager.getGlobalComponentRegistry().getComponent(BlockingManager.class);
doAnswer(invocation -> {
Object[] arguments = invocation.getArguments();
Address source = (Address) arguments[1];
int topologyId = (Integer) arguments[2];
if (topologyId == rebalanceTopologyId) {
checkPoint.trigger("pre_rebalance_confirmation_" + topologyId + "_from_" + source);
return checkPoint.awaitStrictAsync("resume_rebalance_confirmation_" + topologyId + "_from_" + source, 10, SECONDS, blockingManager.asExecutor("checkpoint"))
.thenCompose(unused -> {
try {
//noinspection unchecked
return (CompletionStage<Void>) forwardedAnswer.answer(invocation);
} catch (Throwable e) {
throw CompletableFutures.asCompletionException(e);
}
});
}
return forwardedAnswer.answer(invocation);
}).when(mock).handleRebalancePhaseConfirm(anyString(), any(Address.class), anyInt(), isNull(), anyInt());
TestingUtil.replaceComponent(manager, ClusterTopologyManager.class, mock, true);
}
}
| 11,725
| 46.666667
| 168
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/StateResponseOrderingTest.java
|
package org.infinispan.distribution.rehash;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInboundRpc;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnOutboundRpc;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchCommand;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commands.statetransfer.StateTransferGetTransactionsCommand;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.MagicKey;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.StateChunk;
import org.infinispan.statetransfer.StateTransferManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.CommandMatcher;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.util.ByteString;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* Start two rebalance operations by stopping two members of a cluster in sequence.
* Test that a delayed StateResponseCommand doesn't break state transfer.
* See https://issues.jboss.org/browse/ISPN-3120
*
* @author Dan Berindei
*/
@CleanupAfterMethod
@Test(groups = "functional", testName = "distribution.rehash.StateResponseOrderingTest")
public class StateResponseOrderingTest extends MultipleCacheManagersTest {
private ControlledConsistentHashFactory consistentHashFactory;
@Override
protected void createCacheManagers() throws Throwable {
consistentHashFactory = new ControlledConsistentHashFactory.Default(new int[][]{{1, 2, 3}, {1, 2, 3}});
ConfigurationBuilder builder = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
builder.clustering().cacheMode(CacheMode.DIST_SYNC).hash().numOwners(3);
builder.clustering().hash().numSegments(2).consistentHashFactory(consistentHashFactory);
createCluster(TestDataSCI.INSTANCE, builder, 4);
waitForClusterToForm();
}
public void testSimulatedOldStateResponse() throws Throwable {
// Initial owners for both segments are cache 1, 2, and 3
// Start a rebalance, with cache 0 becoming an owner of both CH segments
// Block the first StateTransferStartCommand on cache 0
// While state transfer is blocked, simulate an old state response command on cache 0
// Check that the old command is ignored and state transfer completes successfully
StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("st", "st:block_state_request", "st:simulate_old_response", "st:resume_state_request");
cache(1).put("k1", "v1");
cache(2).put("k2", "v2");
cache(3).put("k3", "v3");
DistributionManager dm0 = advancedCache(0).getDistributionManager();
final int initialTopologyId = dm0.getCacheTopology().getTopologyId();
assertEquals(Arrays.asList(address(1), address(2), address(3)),
dm0.getCacheTopology().getDistribution("k1").readOwners());
assertNull(dm0.getCacheTopology().getPendingCH());
// Block when cache 0 sends the first state request to cache 1
CommandMatcher segmentRequestMatcher = command -> command instanceof StateTransferStartCommand &&
((StateTransferStartCommand) command).getTopologyId() == initialTopologyId + 1;
advanceOnOutboundRpc(sequencer, cache(0), segmentRequestMatcher)
.before("st:block_state_request", "st:resume_state_request");
// Cache 0 will become an owner and will request state from cache 1
consistentHashFactory.setOwnerIndexes(new int[][]{{0, 1, 2}, {0, 1, 2}});
consistentHashFactory.triggerRebalance(cache(0));
sequencer.enter("st:simulate_old_response");
assertNotNull(dm0.getCacheTopology().getPendingCH());
assertEquals(Arrays.asList(address(0), address(1), address(2)),
dm0.getCacheTopology().getPendingCH().locateOwnersForSegment(0));
assertEquals(Arrays.asList(address(1), address(2), address(3), address(0)),
dm0.getCacheTopology().getDistribution("k1").writeOwners());
// Cache 0 didn't manage to request any segments yet, but it has registered all the inbound transfer tasks.
// We'll pretend it got a StateResponseCommand with an older topology id.
PerCacheInboundInvocationHandler handler = TestingUtil.extractComponent(cache(0), PerCacheInboundInvocationHandler.class);
StateChunk stateChunk0 = new StateChunk(0, Arrays.asList(new ImmortalCacheEntry("k0", "v0")), true);
StateChunk stateChunk1 = new StateChunk(1, Arrays.asList(new ImmortalCacheEntry("k0", "v0")), true);
String cacheName = manager(0).getCacheManagerConfiguration().defaultCacheName().get();
StateResponseCommand stateResponseCommand = new StateResponseCommand(ByteString.fromString(cacheName),
initialTopologyId, Arrays.asList(stateChunk0, stateChunk1), false);
// Call with preserveOrder = true to force the execution in the same thread
stateResponseCommand.setOrigin(address(3));
handler.handle(stateResponseCommand, Reply.NO_OP, DeliverOrder.PER_SENDER);
sequencer.exit("st:simulate_old_response");
waitForNoRebalance(cache(0), cache(1), cache(2), cache(3));
// Check that state wasn't lost
assertTrue(dm0.getCacheTopology().isReadOwner("k1"));
assertTrue(dm0.getCacheTopology().isReadOwner("k2"));
assertTrue(dm0.getCacheTopology().isReadOwner("k3"));
assertEquals("v1", cache(0).get("k1"));
assertEquals("v2", cache(0).get("k2"));
assertEquals("v3", cache(0).get("k3"));
// Check that the old state response was ignored
assertNull(cache(0).get("k0"));
}
public void testStateResponseWhileRestartingBrokenTransfers() throws Throwable {
// The initial topology is different from the other method's
consistentHashFactory.setOwnerIndexes(new int[][]{{1, 2, 3}, {2, 1, 3}});
consistentHashFactory.triggerRebalance(cache(0));
// waitForStableTopology doesn't work here, since the cache looks already "balanced"
// So we wait for the primary owner of segment 1 to change
eventuallyEquals(address(2), () -> advancedCache(0).getDistributionManager().getReadConsistentHash().locatePrimaryOwnerForSegment(1));
// See https://issues.jboss.org/browse/ISPN-3120?focusedCommentId=12777231
// Start with segment 0 owned by [cache1, cache2, cache3], and segment 1 owned by [cache2, cache1, cache3]
// Trigger a rebalance with cache0 becoming an owner for both segments
// Wait for either cache1 or cache2 to send a StateResponseCommand
// Block the state response on cache0
// Kill the node that didn't receive the request
// Block new state requests from cache0 so that the killed node's segment doesn't have a transfer task
// Unblock the first state response
// Check that the StateResponseCommand hasn't marked state transfer as completed
// Unblock the new state request
// Wait for the state transfer to end and check that state hasn't been lost
StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("st", "st:block_first_state_response", "st:kill_node", "st:block_second_state_request",
"st:resume_first_state_response", "st:after_first_state_response", "st:check_incomplete",
"st:resume_second_state_request");
final AtomicReference<Address> firstResponseSender = new AtomicReference<>();
CommandMatcher firstStateResponseMatcher = new CommandMatcher() {
CommandMatcher realMatcher = matchCommand(StateResponseCommand.class).matchCount(0).build();
public boolean accept(ReplicableCommand command) {
if (!realMatcher.accept(command))
return false;
firstResponseSender.set(((StateResponseCommand) command).getOrigin());
return true;
}
};
advanceOnInboundRpc(sequencer, cache(0), firstStateResponseMatcher)
.before("st:block_first_state_response", "st:resume_first_state_response")
.after("st:after_first_state_response");
CommandMatcher secondStateRequestMatcher = new CommandMatcher() {
private final AtomicInteger counter = new AtomicInteger();
@Override
public boolean accept(ReplicableCommand command) {
if (command instanceof StateTransferGetTransactionsCommand) {
// Commands 0 and 1 are sent during the first rebalance
// Command 2 is the first sent after the node is killed
if (counter.getAndIncrement() == 2)
return true;
log.debugf("Not blocking command %s", command);
}
return false;
}
};
advanceOnOutboundRpc(sequencer, cache(0), secondStateRequestMatcher)
.before("st:block_second_state_request", "st:resume_second_state_request");
DistributionManager dm0 = advancedCache(0).getDistributionManager();
StateTransferManager stm0 = TestingUtil.extractComponentRegistry(cache(0)).getStateTransferManager();
MagicKey k1 = new MagicKey("k1", cache(1));
assertEquals(Arrays.asList(address(1), address(2), address(3)),
dm0.getCacheTopology().getDistribution(k1).readOwners());
cache(0).put(k1, "v1");
MagicKey k2 = new MagicKey("k2", cache(2));
assertEquals(Arrays.asList(address(2), address(1), address(3)),
dm0.getCacheTopology().getDistribution(k2).readOwners());
cache(0).put(k2, "v2");
// Start the rebalance
consistentHashFactory.setOwnerIndexes(new int[][]{{0, 1, 2}, {0, 2, 1}});
consistentHashFactory.triggerRebalance(cache(0));
// Wait for cache0 to receive the state response
sequencer.enter("st:kill_node");
assertNotNull(dm0.getCacheTopology().getPendingCH());
// No need to update the owner indexes, the CH factory only knows about the cache members
int nodeToKeep = managerIndex(firstResponseSender.get());
int nodeToKill = nodeToKeep == 1 ? 2 : 1;
log.debugf("Blocked state response from %s, killing %s", firstResponseSender.get(), manager(nodeToKill));
cache(nodeToKill).stop();
eventuallyEquals(3, () -> dm0.getCacheTopology().getMembers().size());
sequencer.exit("st:kill_node");
sequencer.enter("st:check_incomplete");
assertTrue(stm0.isStateTransferInProgress());
sequencer.exit("st:check_incomplete");
// Only the 3 live caches are in the collection, wait for the rehash to end
waitForNoRebalance(cache(0), cache(nodeToKeep), cache(3));
assertTrue(dm0.getCacheTopology().isReadOwner(k1));
assertTrue(dm0.getCacheTopology().isReadOwner(k2));
assertEquals("v1", cache(0).get(k1));
assertEquals("v2", cache(0).get(k2));
}
}
| 12,028
| 51.073593
| 140
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/SingleLeaveTest.java
|
package org.infinispan.distribution.rehash;
import org.infinispan.manager.CacheContainer;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.SingleLeaveTest")
public class SingleLeaveTest extends RehashLeaveTestBase {
Address leaverAddress;
void performRehashEvent(boolean offline) {
// cause a node to LEAVE. Typically this is c4.
leaverAddress = addressOf(c4);
CacheContainer cm4 = c4.getCacheManager();
cacheManagers.remove(cm4);
caches.remove(c4);
TestingUtil.killCacheManagers(cm4);
}
}
| 677
| 31.285714
| 78
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/ConcurrentNonOverlappingLeaveTest.java
|
package org.infinispan.distribution.rehash;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.manager.CacheContainer;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.ConcurrentNonOverlappingLeaveTest")
public class ConcurrentNonOverlappingLeaveTest extends RehashLeaveTestBase {
Address l1, l2;
// since two nodes are leaving, we allow some entries to be lost
private Set<Integer> lostSegments = new HashSet<>();
@Override
protected void assertOwnershipAndNonOwnership(Object key, boolean allowL1) {
if (lostSegments.contains(getCacheTopology(c1).getSegment(key)))
return;
super.assertOwnershipAndNonOwnership(key, allowL1);
}
@Override
protected void assertOnAllCaches(Object key, String value) {
if (lostSegments.contains(getCacheTopology(c1).getSegment(key)))
return;
super.assertOnAllCaches(key, value);
}
void performRehashEvent(boolean offline) {
l1 = addressOf(c2);
l2 = addressOf(c4);
List<Address> killedNodes = Arrays.asList(l1, l2);
CacheContainer cm2 = c2.getCacheManager();
CacheContainer cm4 = c4.getCacheManager();
Set<Integer> overlappingSegments = new HashSet<>();
ConsistentHash ch = getCacheTopology(c1).getWriteConsistentHash();
for (int segment = 0; segment < ch.getNumSegments(); segment++) {
List<Address> owners = ch.locateOwnersForSegment(segment);
if (owners.containsAll(killedNodes)) {
overlappingSegments.add(segment);
}
}
lostSegments = overlappingSegments;
log.tracef("These segments will be lost after killing nodes %s: %s", killedNodes, lostSegments);
cacheManagers.removeAll(Arrays.asList(cm2, cm4));
caches.removeAll(Arrays.asList(c2, c4));
TestingUtil.killCacheManagers(cm2, cm4);
}
}
| 2,088
| 33.245902
| 102
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashTestBase.java
|
package org.infinispan.distribution.rehash;
import static org.testng.Assert.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.infinispan.Cache;
import org.infinispan.distribution.BaseDistFunctionalTest;
import org.infinispan.distribution.MagicKey;
import org.infinispan.test.TestingUtil;
import org.infinispan.commons.test.TestResourceTracker;
import org.testng.annotations.Test;
/**
* A base test for all rehashing tests
*/
@Test(groups = "functional")
public abstract class RehashTestBase extends BaseDistFunctionalTest<Object, String> {
protected RehashTestBase() {
cleanup = CleanupPhase.AFTER_METHOD;
transactional = true;
performRehashing = true;
}
// this setup has 4 running caches: {c1, c2, c3, c4}
/**
* This is overridden by subclasses. Could typically be a JOIN or LEAVE event.
* @param offline
*/
abstract void performRehashEvent(boolean offline) throws Throwable;
/**
* Blocks until a rehash completes.
*/
abstract void waitForRehashCompletion();
protected List<MagicKey> init() {
List<MagicKey> keys = new ArrayList<>(Arrays.asList(
new MagicKey("k1", c1), new MagicKey("k2", c2),
new MagicKey("k3", c3), new MagicKey("k4", c4)
));
assertEquals(caches.size(), keys.size(), "Received caches" + caches);
int i = 0;
for (Cache<Object, String> c : caches) c.put(keys.get(i++), "v0");
for (MagicKey key : keys) assertOwnershipAndNonOwnership(key, false);
log.infof("Initialized with keys %s", keys);
return keys;
}
/**
* Simple test. Put some state, trigger event, test results
*/
@Test
public void testNonTransactional() throws Throwable {
List<MagicKey> keys = init();
log.info("Invoking rehash event");
performRehashEvent(false);
waitForRehashCompletion();
log.info("Rehash complete");
for (MagicKey key : keys) assertOnAllCachesAndOwnership(key, "v0");
}
/**
* More complex - init some state. Start a new transaction, and midway trigger a rehash. Then complete transaction
* and test results.
*/
@Test
public void testTransactional() throws Throwable {
final List<MagicKey> keys = init();
final CountDownLatch l = new CountDownLatch(1);
final AtomicBoolean rollback = new AtomicBoolean(false);
Future<Void> future = fork(() -> {
try {
// start a transaction on c1.
TransactionManager t1 = TestingUtil.getTransactionManager(c1);
t1.begin();
c1.put(keys.get(0), "transactionally_replaced");
Transaction tx = t1.getTransaction();
tx.enlistResource(new XAResourceAdapter() {
public int prepare(Xid id) {
// this would be called *after* the cache prepares.
try {
log.debug("Unblocking commit");
l.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
return XAResource.XA_OK;
}
});
t1.commit();
} catch (Exception e) {
log.error("Error committing transaction", e);
rollback.set(true);
throw new RuntimeException(e);
}
});
log.info("Invoking rehash event");
performRehashEvent(true);
l.countDown();
future.get(30, TimeUnit.SECONDS);
//ownership can only be verified after the rehashing has completed
waitForRehashCompletion();
log.info("Rehash complete");
//only check for these values if tx was not rolled back
if (!rollback.get()) {
// the ownership of k1 might change during the tx and a cache might end up with it in L1
assertOwnershipAndNonOwnership(keys.get(0), true);
assertOwnershipAndNonOwnership(keys.get(1), false);
assertOwnershipAndNonOwnership(keys.get(2), false);
assertOwnershipAndNonOwnership(keys.get(3), false);
// checking the values will bring the keys to L1, so we want to do it after checking ownership
assertOnAllCaches(keys.get(0), "transactionally_replaced");
assertOnAllCaches(keys.get(1), "v0");
assertOnAllCaches(keys.get(2), "v0");
assertOnAllCaches(keys.get(3), "v0");
}
}
/**
* A stress test. One node is constantly modified while a rehash occurs.
*/
@Test(groups = "stress", timeOut = 15*60*1000)
public void testNonTransactionalStress() throws Throwable {
TestResourceTracker.testThreadStarted(this.getTestName());
stressTest(false);
}
/**
* A stress test. One node is constantly modified using transactions while a rehash occurs.
*/
@Test(groups = "stress", timeOut = 15*60*1000)
public void testTransactionalStress() throws Throwable {
TestResourceTracker.testThreadStarted(this.getTestName());
stressTest(true);
}
private void stressTest(boolean tx) throws Throwable {
final List<MagicKey> keys = init();
final CountDownLatch latch = new CountDownLatch(1);
List<Updater> updaters = new ArrayList<>(keys.size());
for (MagicKey k : keys) {
Updater u = new Updater(c1, k, latch, tx);
u.start();
updaters.add(u);
}
latch.countDown();
log.info("Invoking rehash event");
performRehashEvent(false);
for (Updater u : updaters) u.complete();
for (Updater u : updaters) u.join();
waitForRehashCompletion();
log.info("Rehash complete");
int i = 0;
for (MagicKey key : keys) assertOnAllCachesAndOwnership(key, "v" + updaters.get(i++).currentValue);
}
}
class Updater extends Thread {
static final Random r = new Random();
volatile int currentValue = 0;
MagicKey key;
Cache cache;
CountDownLatch latch;
volatile boolean running = true;
TransactionManager tm;
Updater(Cache cache, MagicKey key, CountDownLatch latch, boolean tx) {
super("Updater-" + key);
this.key = key;
this.cache = cache;
this.latch = latch;
if (tx) tm = TestingUtil.getTransactionManager(cache);
}
public void complete() {
running = false;
}
@Override
public void run() {
while (running) {
try {
currentValue++;
if (tm != null) tm.begin();
cache.put(key, "v" + currentValue);
if (tm != null) tm.commit();
TestingUtil.sleepThread(r.nextInt(10) * 10);
} catch (Exception e) {
// do nothing?
}
}
}
}
| 7,086
| 30.083333
| 119
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashStressTest.java
|
package org.infinispan.distribution.rehash;
import java.io.IOException;
import java.io.Serializable;
import java.util.LinkedList;
import java.util.Random;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import jakarta.transaction.Status;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.distribution.group.Group;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.jboss.logging.Logger;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* @author esalter
*/
@Test(groups = "stress", testName = "distribution.rehash.RehashStressTest", timeOut = 15*60*1000)
public class RehashStressTest extends AbstractInfinispanTest {
@AfterMethod
public void stopAllCacheManageres() {
while (!cacheManagers.isEmpty()) {
cacheManagers.poll().stop();
}
}
private static Logger log = Logger.getLogger(RehashStressTest.class.getName());
/*
* This test simulates concurrent threads submitting executor
* tasks to ISPN, at the same time a rehash occurs.. You should see that
* during high contention for the same lock, on occasion, a rehash will
* result in stale locks.
*/
private static final int KEY_RANGE = 10;
private static final int TEST_THREADS = 40;
private static final int TEST_LOOPS = 30000;
public static final int MAX_INTERVAL_BETWEEN_TASK = 1000;
LinkedList<EmbeddedCacheManager> cacheManagers = new LinkedList<EmbeddedCacheManager>();
Random random = new Random();
public void testRehash() throws IOException, InterruptedException {
EmbeddedCacheManager cacheManager = buildCacheManager();
cacheManagers.addLast(cacheManager);
cacheManager.getCache("serviceGroup");
new AddNodeTask().run();
new AddNodeTask().run();
new AddNodeTask().run();
Thread.sleep(3000);
log.info("Start testing");
ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(TEST_THREADS);
executor.prestartAllCoreThreads();
for (int i = 0; i < TEST_LOOPS; i++) {
executor.submit(new SimulateTask());
}
for (int i = 0; i < 10; i++) {
try {
Thread.sleep(3000);
if (i != 1) {
new AddNodeTask().run(); //2
} else {
new RemoveNodeTask().run();
}
} catch (RuntimeException e) {
log.warn("Error during add/remove node", e);
}
}
log.info("Rehash phase is completed...");
executor.shutdown();
executor.awaitTermination(1, TimeUnit.DAYS);
}
static class TestKey implements Serializable {
int key;
@Group
public String getGroup() {
return String.valueOf(key);
}
public int getKey() {
return key;
}
public void setKey(int key) {
this.key = key;
}
public TestKey(int key) {
this.key = key;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final TestKey other = (TestKey) obj;
if (this.key != other.key) {
return false;
}
return true;
}
@Override
public int hashCode() {
int hash = 5;
hash = 29 * hash + this.key;
return hash;
}
@Override
public String toString() {
return "TestKey{" + "key=" + key + '}';
}
}
private EmbeddedCacheManager buildCacheManager() throws IOException {
EmbeddedCacheManager cacheManager = TestCacheManagerFactory.fromXml("erm-cluster.xml");
return cacheManager;
}
/**
* Simulates a client task. There is a random delay before submitting it to
* prevent flooding of the executor service.
*/
class SimulateTask implements Runnable {
@Override
public void run() {
try {
Thread.sleep(random.nextInt(MAX_INTERVAL_BETWEEN_TASK)); //random sleep a while before really submit the task to ISPN
} catch (InterruptedException ex) {
}
TestKey key = new TestKey(random.nextInt(KEY_RANGE));
try {
log.info("Submitting a task " + key);
EmbeddedCacheManager cacheManager = cacheManagers.get(random.nextInt(cacheManagers.size()));
ClusterExecutor executor = cacheManager.executor();
AtomicReference<String> value = new AtomicReference<>();
CompletableFuture<Void> future = executor.submitConsumer(new TransactionTask("serviceGroup", key), (a, v, t) -> {
if (t != null) {
throw new CacheException(t);
}
value.set(v);
});
log.info("Task result=" + future.thenApply(ignore -> value.get()).get());
} catch (Exception ex) {
log.warn("error during executing task " + key, ex);
}
}
}
static class TransactionTask
implements Function<EmbeddedCacheManager, String>, Serializable {
private final String cacheName;
private final TestKey key;
TransactionTask(String cacheName, TestKey key) {
this.cacheName = cacheName;
this.key = key;
}
@Override
public String apply(EmbeddedCacheManager embeddedCacheManager) {
try {
Cache<TestKey, ?> cache = embeddedCacheManager.getCache(cacheName);
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
try {
tm.begin();
return performWork(cache);
} catch (Exception e) {
log.warn("error during perform work " + key, e);
tm.setRollbackOnly();
throw e;
} finally {
int status = -1;
try {
status = tm.getStatus();
} catch (Exception e) {
}
if (status == Status.STATUS_ACTIVE) {
tm.commit();
} else {
tm.rollback();
}
}
} catch (Exception e) {
throw new CacheException(e);
}
}
private String performWork(Cache<TestKey, ?> cache) {
log.info( "Locking " + key);
cache.getAdvancedCache().lock(key);
return "locked " + key;
}
}
class RemoveNodeTask implements Runnable {
@Override
public void run() {
try {
int size = cacheManagers.size();
int index = random.nextInt(size);
EmbeddedCacheManager cacheManager = cacheManagers.remove(index); //This is not thread safe, but should be ok for this test since the main thread is the only writrer to this list.
log.info("Shutting down " + cacheManager.getAddress());
cacheManager.stop();
log.info("Shut down " + cacheManager.getAddress() + " complete");
} catch (Exception e) {
log.warn("Error during node removal", e);
}
}
}
class AddNodeTask implements Runnable {
@Override
public void run() {
try {
log.info("Starting a new cache manager");
EmbeddedCacheManager cacheManager = buildCacheManager();
cacheManager.getCache("serviceGroup");
cacheManagers.addLast(cacheManager);
} catch (Exception e) {
log.warn("Error during node addition", e);
}
}
}
}
| 8,566
| 30.72963
| 194
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyAwareDistSyncUnsafeFuncTest.java
|
package org.infinispan.distribution.topologyaware;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.DistSyncUnsafeFuncTest;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test(testName="distribution.topologyaware.TopologyAwareDistSyncUnsafeFuncTest", groups = "functional")
public class TopologyAwareDistSyncUnsafeFuncTest extends DistSyncUnsafeFuncTest {
@Override
protected EmbeddedCacheManager addClusterEnabledCacheManager(TransportFlags flags) {
int index = cacheManagers.size();
String rack;
String machine;
switch (index) {
case 0 : {
rack = "r0";
machine = "m0";
break;
}
case 1 : {
rack = "r0";
machine = "m0";
break;
}
case 2 : {
rack = "r1";
machine = "m0";
break;
}
case 3 : {
rack = "r1";
machine = "m0";
break;
}
default : {
throw new RuntimeException("Bad!");
}
}
GlobalConfigurationBuilder gcb = GlobalConfigurationBuilder.defaultClusteredBuilder();
gcb.transport().rackId(rack).machineId(machine);
EmbeddedCacheManager cm = TestCacheManagerFactory.createClusteredCacheManager(gcb, getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC), flags);
cacheManagers.add(cm);
return cm;
}
}
| 1,728
| 29.875
| 149
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyInfoBroadcastTest.java
|
package org.infinispan.distribution.topologyaware;
import static org.testng.Assert.assertEquals;
import java.util.Arrays;
import java.util.List;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.impl.DistributionManagerImpl;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.TopologyAwareAddress;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test(groups = "functional", testName = "distribution.topologyaware.TopologyInfoBroadcastTest")
public class TopologyInfoBroadcastTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
GlobalConfigurationBuilder gc1 = GlobalConfigurationBuilder.defaultClusteredBuilder();
updatedSiteInfo(gc1, "s0", "r0", "m0");
EmbeddedCacheManager cm1 = TestCacheManagerFactory.createClusteredCacheManager(gc1, getClusterConfig());
cacheManagers.add(cm1);
GlobalConfigurationBuilder gc2 = GlobalConfigurationBuilder.defaultClusteredBuilder();
updatedSiteInfo(gc2, "s1", "r1", "m1");
EmbeddedCacheManager cm2 = TestCacheManagerFactory.createClusteredCacheManager(gc2, getClusterConfig());
cacheManagers.add(cm2);
GlobalConfigurationBuilder gc3 = GlobalConfigurationBuilder.defaultClusteredBuilder();
updatedSiteInfo(gc3, "s2", "r2", "m2");
EmbeddedCacheManager cm3 = TestCacheManagerFactory.createClusteredCacheManager(gc3, getClusterConfig());
cacheManagers.add(cm3);
log.info("Here it starts");
waitForClusterToForm();
log.info("Here it ends");
}
protected ConfigurationBuilder getClusterConfig() {
return getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
}
private void updatedSiteInfo(GlobalConfigurationBuilder gcb, String s, String r, String m) {
gcb.transport().siteId(s).rackId(r).machineId(m);
}
public void testIsReplicated() {
// assert advancedCache(0).getDistributionManager().getConsistentHash() instanceof TopologyAwareConsistentHash;
// assert advancedCache(1).getDistributionManager().getConsistentHash() instanceof TopologyAwareConsistentHash;
// assert advancedCache(2).getDistributionManager().getConsistentHash() instanceof TopologyAwareConsistentHash;
DistributionManagerImpl dmi = (DistributionManagerImpl) advancedCache(0).getDistributionManager();
log.trace("distributionManager.ConsistentHash() = " + dmi.getWriteConsistentHash());
assertTopologyInfo3Nodes(dmi.getWriteConsistentHash().getMembers());
dmi = (DistributionManagerImpl) advancedCache(1).getDistributionManager();
assertTopologyInfo3Nodes(dmi.getWriteConsistentHash().getMembers());
dmi = (DistributionManagerImpl) advancedCache(2).getDistributionManager();
assertTopologyInfo3Nodes(dmi.getWriteConsistentHash().getMembers());
ConsistentHash tach0 = advancedCache(0).getDistributionManager().getWriteConsistentHash();
ConsistentHash tach1 = advancedCache(1).getDistributionManager().getWriteConsistentHash();
assertEquals(tach0.getMembers(), tach1.getMembers());
ConsistentHash tach2 = advancedCache(2).getDistributionManager().getWriteConsistentHash();
assertEquals(tach0.getMembers(), tach2.getMembers());
}
@Test(dependsOnMethods = "testIsReplicated")
public void testNodeLeaves() {
TestingUtil.killCacheManagers(manager(1));
TestingUtil.blockUntilViewsReceived(60000, false, cache(0), cache(2));
TestingUtil.waitForNoRebalance(cache(0), cache(2));
DistributionManagerImpl dmi = (DistributionManagerImpl) advancedCache(0).getDistributionManager();
assertTopologyInfo2Nodes(dmi.getWriteConsistentHash().getMembers());
dmi = (DistributionManagerImpl) advancedCache(2).getDistributionManager();
assertTopologyInfo2Nodes(dmi.getWriteConsistentHash().getMembers());
ConsistentHash tach0 = advancedCache(0).getDistributionManager().getWriteConsistentHash();
ConsistentHash tach2 = advancedCache(2).getDistributionManager().getWriteConsistentHash();
assertEquals(tach0.getMembers(), tach2.getMembers());
}
private void assertTopologyInfo3Nodes(List<Address> caches) {
assertTopologyInfo2Nodes(Arrays.asList(caches.get(0), caches.get(2)));
TopologyAwareAddress address1 = (TopologyAwareAddress) caches.get(1);
assertEquals(address1.getSiteId(), "s1");
assertEquals(address1.getRackId(), "r1");
assertEquals(address1.getMachineId(), "m1");
}
private void assertTopologyInfo2Nodes(List<Address> caches) {
TopologyAwareAddress address0 = (TopologyAwareAddress) caches.get(0);
assertEquals(address0.getSiteId(), "s0");
assertEquals(address0.getRackId(), "r0");
assertEquals(address0.getMachineId(), "m0");
TopologyAwareAddress address2 = (TopologyAwareAddress) caches.get(1);
assertEquals(address2.getSiteId(), "s2");
assertEquals(address2.getRackId(), "r2");
assertEquals(address2.getMachineId(), "m2");
}
}
| 5,480
| 47.504425
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyInfoBroadcastNoRehashTest.java
|
package org.infinispan.distribution.topologyaware;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test(groups = "functional", testName = "distribution.topologyaware.TopologyInfoBroadcastNoRehashTest")
public class TopologyInfoBroadcastNoRehashTest extends TopologyInfoBroadcastTest {
@Override
protected ConfigurationBuilder getClusterConfig() {
ConfigurationBuilder configuration = super.getClusterConfig();
configuration.clustering().stateTransfer().fetchInMemoryState(false);
return configuration;
}
}
| 646
| 31.35
| 103
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyAwareConsistentHashFactoryTest.java
|
package org.infinispan.distribution.topologyaware;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.IntStream;
import org.infinispan.distribution.TestTopologyAwareAddress;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.HashFunctionPartitioner;
import org.infinispan.distribution.ch.impl.OwnershipStatistics;
import org.infinispan.distribution.ch.impl.TopologyAwareConsistentHashFactory;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.TopologyAwareAddress;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @author Dan Berindei
* @since 4.2
*/
@Test(groups = "unit", testName = "distribution.topologyaware.TopologyAwareConsistentHashFactoryTest")
public class TopologyAwareConsistentHashFactoryTest extends AbstractInfinispanTest {
private static final Log log = LogFactory.getLog(TopologyAwareConsistentHashFactoryTest.class);
private static final int ADDRESS_COUNT = 25;
public int numSegments = 100;
private TestTopologyAwareAddress[] testAddresses;
private List<Address> chMembers;
private Map<Address, Float> capacityFactors;
private ConsistentHashFactory<DefaultConsistentHash> chf;
private KeyPartitioner keyPartitioner;
protected DefaultConsistentHash ch;
@BeforeMethod()
public void setUp() {
chf = createConsistentHashFactory();
chMembers = new ArrayList<>(ADDRESS_COUNT);
capacityFactors = null;
testAddresses = new TestTopologyAwareAddress[ADDRESS_COUNT];
for (int i = 0; i < ADDRESS_COUNT; i++) {
testAddresses[i] = new TestTopologyAwareAddress(i * 100);
testAddresses[i].setName(Character.toString((char) ('A' + i)));
}
keyPartitioner = new HashFunctionPartitioner(numSegments);
}
protected ConsistentHashFactory<DefaultConsistentHash> createConsistentHashFactory() {
return new TopologyAwareConsistentHashFactory();
}
public void testNumberOfOwners() {
addNode(testAddresses[0], "m0", null, null);
updateConsistentHash(1);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 1));
updateConsistentHash(2);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 1));
addNode(testAddresses[1], "m1", null, null);
updateConsistentHash(1);
int numSegments = ch.getNumSegments();
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 1));
updateConsistentHash(2);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 2));
updateConsistentHash(3);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 2));
addNode(testAddresses[2], "m0", null, null);
updateConsistentHash(1);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 1));
updateConsistentHash(2);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 2));
updateConsistentHash(3);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 3));
updateConsistentHash(4);
IntStream.range(0, numSegments).forEach(i -> assertOwners(i, 3));
}
private void assertOwners(int segment, int expected) {
assertEquals(ch.locateOwnersForSegment(segment).size(), expected);
}
public void testDifferentMachines() {
addNode(testAddresses[0], "m0", null, null);
addNode(testAddresses[1], "m1", null, null);
addNode(testAddresses[2], "m0", null, null);
addNode(testAddresses[3], "m1", null, null);
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
}
public void testNumOwnerBiggerThanAvailableNodes() {
// test first with one node
addNode(testAddresses[0], "m0", null, null);
addNode(testAddresses[1], "m0", null, null);
addNode(testAddresses[2], "m0", null, null);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
assertAllLocationsWithRebalance(99);
}
public void testDifferentMachines2() {
addNode(testAddresses[0], "m0", null, null);
addNode(testAddresses[1], "m0", null, null);
addNode(testAddresses[2], "m1", null, null);
addNode(testAddresses[3], "m1", null, null);
addNode(testAddresses[4], "m2", null, null);
addNode(testAddresses[5], "m2", null, null);
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testDifferentMachines3() {
addNode(testAddresses[0], "m0", "r1", "s1");
addNode(testAddresses[1], "m1", "r1", "s1");
addNode(testAddresses[2], "m2", "r1", "s1");
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testDifferentRacksAndMachines() {
addNode(testAddresses[0], "m0", "r0", null);
addNode(testAddresses[1], "m1", "r0", null);
addNode(testAddresses[2], "m2", "r1", null);
addNode(testAddresses[3], "m3", "r2", null);
addNode(testAddresses[4], "m2", "r1", null);
addNode(testAddresses[5], "m2", "r2", null);
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testAllSameMachine() {
addNode(testAddresses[0], "m0", null, null);
addNode(testAddresses[1], "m0", null, null);
addNode(testAddresses[2], "m0", null, null);
addNode(testAddresses[3], "m0", null, null);
addNode(testAddresses[4], "m0", null, null);
addNode(testAddresses[5], "m0", null, null);
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testDifferentSites() {
addNode(testAddresses[0], "m0", null, "s0");
addNode(testAddresses[1], "m1", null, "s0");
addNode(testAddresses[2], "m2", null, "s1");
addNode(testAddresses[3], "m3", null, "s1");
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testSitesMachines2() {
addNode(testAddresses[0], "m0", null, "s0");
addNode(testAddresses[1], "m1", null, "s1");
addNode(testAddresses[2], "m2", null, "s0");
addNode(testAddresses[3], "m3", null, "s2");
addNode(testAddresses[4], "m4", null, "s1");
addNode(testAddresses[5], "m5", null, "s1");
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testSitesMachinesSameMachineName() {
addNode(testAddresses[0], "m0", null, "r0");
addNode(testAddresses[1], "m0", null, "r1");
addNode(testAddresses[2], "m0", null, "r0");
addNode(testAddresses[3], "m0", null, "r2");
addNode(testAddresses[4], "m0", null, "r1");
addNode(testAddresses[5], "m0", null, "r1");
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testDifferentRacks() {
addNode(testAddresses[0], "m0", "r0", null);
addNode(testAddresses[1], "m1", "r0", null);
addNode(testAddresses[2], "m2", "r1", null);
addNode(testAddresses[3], "m3", "r1", null);
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testRacksMachines2() {
addNode(testAddresses[0], "m0", "r0", null);
addNode(testAddresses[1], "m1", "r1", null);
addNode(testAddresses[2], "m2", "r0", null);
addNode(testAddresses[3], "m3", "r2", null);
addNode(testAddresses[4], "m4", "r1", null);
addNode(testAddresses[5], "m5", "r1", null);
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testRacksMachinesSameMachineName() {
addNode(testAddresses[0], "m0", "r0", null);
addNode(testAddresses[1], "m0", "r1", null);
addNode(testAddresses[2], "m0", "r0", null);
addNode(testAddresses[3], "m0", "r2", null);
addNode(testAddresses[4], "m0", "r1", null);
addNode(testAddresses[5], "m0", "r1", null);
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testComplexScenario() {
// {s0: {r0: {m0, m1}}, s1: {r0: {m0, m1, m2}, r1: {m0}}}
addNode(testAddresses[0], "m2", "r0", "s1");
addNode(testAddresses[1], "m1", "r0", "s0");
addNode(testAddresses[2], "m1", "r0", "s1");
addNode(testAddresses[3], "m1", "r1", "s0");
addNode(testAddresses[4], "m0", "r0", "s1");
addNode(testAddresses[5], "m0", "r1", "s1");
addNode(testAddresses[6], "m0", "r1", "s0");
addNode(testAddresses[7], "m0", "r0", "s1");
addNode(testAddresses[8], "m0", "r0", "s0");
addNode(testAddresses[9], "m0", "r0", "s0");
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
assertAllLocationsWithRebalance(4);
}
public void testComplexScenario2() {
// {s0: {r0: {m0, m1, m2}, r1: {m3, m4, m5}, r1: {m6, m7, m8}}}
addNode(testAddresses[0], "m0", "r0", "s0");
addNode(testAddresses[1], "m1", "r0", "s0");
addNode(testAddresses[2], "m2", "r0", "s0");
addNode(testAddresses[3], "m3", "r1", "s0");
addNode(testAddresses[4], "m4", "r1", "s0");
addNode(testAddresses[5], "m5", "r1", "s0");
addNode(testAddresses[6], "m6", "r2", "s0");
addNode(testAddresses[7], "m7", "r2", "s0");
addNode(testAddresses[8], "m8", "r2", "s0");
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
}
public void testLoadFactors() {
try {
capacityFactors = new HashMap<>();
capacityFactors.put(testAddresses[0], 2.0f);
capacityFactors.put(testAddresses[1], 0.0f);
capacityFactors.put(testAddresses[2], 1.0f);
capacityFactors.put(testAddresses[3], 2.0f);
capacityFactors.put(testAddresses[4], 0.0f);
capacityFactors.put(testAddresses[5], 1.0f);
capacityFactors.put(testAddresses[6], 2.0f);
capacityFactors.put(testAddresses[7], 0.0f);
capacityFactors.put(testAddresses[8], 1.0f);
// {s0: {r0: {m0, m1, m2}, r1: {m3, m4, m5}, r1: {m6, m7, m8}}}
addNode(testAddresses[0], "m0", "r0", "s0");
addNode(testAddresses[1], "m1", "r0", "s0");
addNode(testAddresses[2], "m2", "r0", "s0");
addNode(testAddresses[3], "m3", "r1", "s0");
addNode(testAddresses[4], "m4", "r1", "s0");
addNode(testAddresses[5], "m5", "r1", "s0");
addNode(testAddresses[6], "m6", "r2", "s0");
addNode(testAddresses[7], "m7", "r2", "s0");
addNode(testAddresses[8], "m8", "r2", "s0");
assertAllLocationsWithRebalance(1);
assertAllLocationsWithRebalance(2);
assertAllLocationsWithRebalance(3);
} finally {
capacityFactors = null;
}
}
private void assertAllLocationsWithRebalance(int numOwners) {
ch = chf.create(numOwners, numSegments, chMembers, capacityFactors);
List<Address> membersWithLoad = computeNodesWithLoad(chMembers);
assertAllLocations(membersWithLoad, numOwners);
assertDistribution(membersWithLoad, numOwners);
ch = chf.create(numOwners, numSegments, chMembers.subList(0, 1), capacityFactors);
assertAllLocations(chMembers.subList(0, 1), numOwners);
for (int i = 2; i <= chMembers.size(); i++) {
List<Address> currentMembers = chMembers.subList(0, i);
log.debugf("Created CH with numOwners %d, members %s", numOwners, currentMembers);
ch = chf.updateMembers(ch, currentMembers, capacityFactors);
ch = chf.rebalance(ch);
membersWithLoad = computeNodesWithLoad(currentMembers);
assertAllLocations(membersWithLoad, numOwners);
}
}
private List<Address> computeNodesWithLoad(List<Address> nodes) {
List<Address> membersWithLoad = new ArrayList<>(nodes.size());
for (Address a : nodes) {
if (capacityFactors == null || capacityFactors.get(a) > 0.0) {
membersWithLoad.add(a);
}
}
return membersWithLoad;
}
protected void assertDistribution(List<Address> currentMembers, int numOwners) {
assertDistribution(currentMembers, numOwners, numSegments);
}
protected void assertDistribution(List<Address> currentMembers, int numOwners, int numSegments) {
TopologyAwareOwnershipStatistics stats = new TopologyAwareOwnershipStatistics(ch);
log.tracef("Ownership stats: %s", stats);
for (Address node : currentMembers) {
float expectedPrimarySegments = stats.computeExpectedPrimarySegments(node);
float expectedOwnedSegments = stats.computeExpectedOwnedSegments(node);
int owned = stats.getOwned(node);
int primaryOwned = stats.getPrimaryOwned(node);
assertTrue(expectedPrimarySegments - 1 <= primaryOwned,
"Too few primary segments for node " + node);
assertTrue(primaryOwned <= expectedPrimarySegments + 1,
"Too many primary segments for node " + node);
assertTrue(Math.floor(expectedOwnedSegments * 0.7) <= owned,
"Too few segments for node " + node);
assertTrue(owned <= Math.ceil(expectedOwnedSegments * 1.25),
"Too many segments for node " + node);
}
}
private int countMachines(List<Address> addresses) {
Set<String> machines = new HashSet<>(addresses.size());
for (Address a : addresses) {
TopologyAwareAddress taa = (TopologyAwareAddress) a;
machines.add(taa.getMachineId() + taa.getRackId() + taa.getSiteId());
}
return machines.size();
}
private int countRacks(List<Address> addresses) {
Set<String> racks = new HashSet<>(addresses.size());
for (Address a : addresses) {
TopologyAwareAddress taa = (TopologyAwareAddress) a;
racks.add(taa.getRackId() + taa.getSiteId());
}
return racks.size();
}
private int countSites(List<Address> addresses) {
Set<String> sites = new HashSet<>(addresses.size());
for (Address a : addresses) {
TopologyAwareAddress taa = (TopologyAwareAddress) a;
sites.add(taa.getSiteId());
}
return sites.size();
}
private void assertAllLocations(List<Address> currentMembers, int numOwners) {
assertAllLocations(currentMembers, numOwners, numSegments);
}
private void assertAllLocations(List<Address> currentMembers, int numOwners, int numSegments) {
int expectedOwners = Math.min(numOwners, currentMembers.size());
int expectedMachines = Math.min(expectedOwners, countMachines(currentMembers));
int expectedRacks = Math.min(expectedOwners, countRacks(currentMembers));
int expectedSites = Math.min(expectedOwners, countSites(currentMembers));
for (int segment = 0; segment < numSegments; segment++) {
assertSegmentLocation(segment, expectedOwners, expectedMachines, expectedRacks, expectedSites);
}
}
public void testConsistencyWhenNodeLeaves() {
addNode(testAddresses[0], "m2", "r0", "s1");
addNode(testAddresses[1], "m1", "r0", "s0");
addNode(testAddresses[2], "m1", "r0", "s1");
addNode(testAddresses[3], "m1", "r1", "s0");
addNode(testAddresses[4], "m0", "r0", "s1");
addNode(testAddresses[5], "m0", "r1", "s1");
addNode(testAddresses[6], "m0", "r1", "s0");
addNode(testAddresses[7], "m0", "r0", "s3");
addNode(testAddresses[8], "m0", "r0", "s2");
addNode(testAddresses[9], "m0", "r0", "s0");
int numOwners = 3;
updateConsistentHash(numOwners);
assertAllLocations(chMembers, numOwners);
assertDistribution(chMembers, numOwners);
for (Address addr : chMembers) {
log.debugf("Removing node %s", addr);
List<Address> addressCopy = new ArrayList<>(chMembers);
addressCopy.remove(addr);
DefaultConsistentHash newCH = chf.updateMembers(ch, addressCopy, null);
newCH = chf.rebalance(newCH);
// Allow a small number of segment moves, even though this is a leave, because the CH factory
// generates extra moves trying to balance the CH.
AtomicInteger movedSegmentsCount = new AtomicInteger(0);
for (int segment = 0; segment < numSegments; segment++) {
checkConsistency(segment, numOwners, addr, newCH, movedSegmentsCount);
}
assert movedSegmentsCount.get() <= numSegments * numOwners * 0.1 :
String.format("Too many moved segments after leave: %d. CH after leave is: %s\nPrevious: %s",
movedSegmentsCount.get(), newCH, ch);
}
}
private void checkConsistency(int segment, int replCount, Address removedAddress,
DefaultConsistentHash newCH, AtomicInteger movedSegmentsCount) {
List<Address> removedOwners = new ArrayList<>(ch.locateOwnersForSegment(segment));
List<Address> currentOwners = newCH.locateOwnersForSegment(segment);
removedOwners.remove(removedAddress);
removedOwners.removeAll(currentOwners);
assertEquals(replCount, currentOwners.size(), currentOwners.toString());
if (!currentOwners.containsAll(removedOwners))
movedSegmentsCount.addAndGet(removedOwners.size());
}
private void assertSegmentLocation(int segment, int expectedOwners, int expectedMachines, int expectedRacks,
int expectedSites) {
List<Address> received = ch.locateOwnersForSegment(segment);
// Check the number of addresses and uniqueness
assertEquals(received.size(), expectedOwners);
Set<Address> receivedUnique = new HashSet<>(received);
assertEquals(receivedUnique.size(), expectedOwners);
// Check the number of machines
Set<String> receivedMachines = new HashSet<>();
for (Address a : received) {
TopologyAwareAddress taa = (TopologyAwareAddress) a;
receivedMachines.add(taa.getMachineId() + "|" + taa.getRackId() + "|" + taa.getSiteId());
}
assertEquals(receivedMachines.size(), expectedMachines);
// Check the number of racks
Set<String> receivedRacks = new HashSet<>();
for (Address a : received) {
TopologyAwareAddress taa = (TopologyAwareAddress) a;
receivedRacks.add(taa.getRackId() + "|" + taa.getSiteId());
}
assertEquals(receivedRacks.size(), expectedRacks);
// Check the number of sites
Set<String> receivedSites = new HashSet<>();
for (Address a : received) {
receivedSites.add(((TopologyAwareAddress) a).getSiteId());
}
assertEquals(receivedSites.size(), expectedSites);
}
private void addNode(TestTopologyAwareAddress address,
String machineId, String rackId, String siteId) {
address.setSiteId(siteId);
address.setRackId(rackId);
address.setMachineId(machineId);
chMembers.add(address);
}
protected void updateConsistentHash(int numOwners) {
updateConsistentHash(numOwners, numSegments);
}
private void updateConsistentHash(int numOwners, int numSegments) {
ch = chf.create(numOwners, numSegments, chMembers, capacityFactors);
log.debugf("Created CH with numOwners %d, members %s", numOwners, chMembers);
}
@Test(timeOut = 10000)
public void testSmallNumberOfSegments() {
for (int i = 0; i < 3; i++) {
addNode(testAddresses[i], "m0", "r0", "s0");
}
updateConsistentHash(2, 1);
assertAllLocations(chMembers, 2, 1);
assertDistribution(chMembers, 2, 1);
for (int i = 3; i < ADDRESS_COUNT; i++) {
addNode(testAddresses[i], "m0", "r0", "s0");
}
updateConsistentHash(2, 256);
assertAllLocations(chMembers, 2, 1);
assertDistribution(chMembers, 2, 1);
}
}
class TopologyAwareOwnershipStatistics {
TopologyInfo topologyInfo;
OwnershipStatistics stats;
private final int numSegments;
private final int numOwners;
public TopologyAwareOwnershipStatistics(DefaultConsistentHash ch) {
numSegments = ch.getNumSegments();
numOwners = ch.getNumOwners();
topologyInfo = new TopologyInfo(numSegments, numOwners, ch.getMembers(), ch.getCapacityFactors());
stats = new OwnershipStatistics(ch, ch.getMembers());
}
public TopologyAwareOwnershipStatistics(TopologyInfo topologyInfo, OwnershipStatistics stats, int numSegments, int numOwners) {
this.topologyInfo = topologyInfo;
this.stats = stats;
this.numSegments = numSegments;
this.numOwners = numOwners;
}
public int getSiteOwned(String site) {
int count = 0;
for (Address node : topologyInfo.getSiteNodes(site)) {
count += stats.getOwned(node);
}
return count;
}
public int getSitePrimaryOwned(String site) {
int count = 0;
for (Address node : topologyInfo.getSiteNodes(site)) {
count += stats.getPrimaryOwned(node);
}
return count;
}
public int getRackOwned(String site, String rack) {
int count = 0;
for (Address node : topologyInfo.getRackNodes(site, rack)) {
count += stats.getOwned(node);
}
return count;
}
public int getRackPrimaryOwned(String site, String rack) {
int count = 0;
for (Address node : topologyInfo.getRackNodes(site, rack)) {
count += stats.getPrimaryOwned(node);
}
return count;
}
public int getMachineOwned(String site, String rack, String machine) {
int count = 0;
for (Address node : topologyInfo.getMachineNodes(site, rack, machine)) {
count += stats.getOwned(node);
}
return count;
}
public int getMachinePrimaryOwned(String site, String rack, String machine) {
int count = 0;
for (Address node : topologyInfo.getMachineNodes(site, rack, machine)) {
count += stats.getPrimaryOwned(node);
}
return count;
}
public int getOwned(Address node) {
return stats.getOwned(node);
}
public int getPrimaryOwned(Address node) {
return stats.getPrimaryOwned(node);
}
public float computeExpectedPrimarySegments(Address node) {
return topologyInfo.getExpectedPrimarySegments(node);
}
public float computeExpectedOwnedSegments(Address node) {
return topologyInfo.getExpectedOwnedSegments(node);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("TopologyAwareOwnershipStatistics{\n");
sb.append(String.format("cluster: %d(%dp)\n", stats.sumOwned(), stats.sumPrimaryOwned()));
for (String site : topologyInfo.getAllSites()) {
sb.append(String.format(" %s: %d(%dp)\n", site, getSiteOwned(site), getSitePrimaryOwned(site)));
for (String rack : topologyInfo.getSiteRacks(site)) {
sb.append(String.format(" %s: %d(%dp)\n", rack, getRackOwned(site, rack),
getRackPrimaryOwned(site, rack)));
for (String machine : topologyInfo.getRackMachines(site, rack)) {
sb.append(String.format(" %s: %d(%dp)\n", machine, getMachineOwned(site, rack, machine),
getMachinePrimaryOwned(site, rack, machine)));
for (Address node : topologyInfo.getMachineNodes(site, rack, machine)) {
sb.append(String.format(" %s: %d(%dp) %.1f(%.1fp)\n", node, getOwned(node),
stats.getPrimaryOwned(node), topologyInfo.getExpectedOwnedSegments(node),
topologyInfo.getExpectedPrimarySegments(node)));
}
}
}
}
sb.append('}');
return sb.toString();
}
}
| 25,303
| 37.989214
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyAwareSyncConsistentHashFactoryTest.java
|
package org.infinispan.distribution.topologyaware;
import static org.testng.Assert.assertTrue;
import java.util.List;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.TopologyAwareSyncConsistentHashFactory;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @author Dan Berindei
* @since 5.2
*/
@Test(groups = "unit", testName = "distribution.topologyaware.TopologyAwareSyncConsistentHashFactoryTest")
public class TopologyAwareSyncConsistentHashFactoryTest extends TopologyAwareConsistentHashFactoryTest {
private final Log log = LogFactory.getLog(TopologyAwareSyncConsistentHashFactoryTest.class);
@Override
protected ConsistentHashFactory<DefaultConsistentHash> createConsistentHashFactory() {
return new TopologyAwareSyncConsistentHashFactory();
}
@Override
protected void assertDistribution(List<Address> currentMembers, int numOwners, int numSegments) {
TopologyAwareOwnershipStatistics stats = new TopologyAwareOwnershipStatistics(ch);
log.tracef("Ownership stats: " + stats);
for (Address node : currentMembers) {
float expectedPrimarySegments = stats.computeExpectedPrimarySegments(node);
float expectedOwnedSegments = stats.computeExpectedOwnedSegments(node);
int primaryOwned = stats.getPrimaryOwned(node);
int owned = stats.getOwned(node);
assertTrue(Math.floor(0.7 * expectedPrimarySegments) <= primaryOwned);
assertTrue(primaryOwned <= Math.ceil(1.2 * expectedPrimarySegments));
assertTrue(Math.floor(0.7 * expectedOwnedSegments) <= owned);
assertTrue(owned <= Math.ceil(1.2 * expectedOwnedSegments));
}
}
}
| 1,939
| 41.173913
| 106
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyAwareDistAsyncFuncTest.java
|
package org.infinispan.distribution.topologyaware;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.DistAsyncFuncTest;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test (groups = "functional", testName = "distribution.topologyaware.TopologyAwareDistAsyncFuncTest")
public class TopologyAwareDistAsyncFuncTest extends DistAsyncFuncTest {
@Override
protected EmbeddedCacheManager addClusterEnabledCacheManager(TransportFlags flags) {
int index = cacheManagers.size();
String rack;
String machine;
switch (index) {
case 0 : {
rack = "r0";
machine = "m0";
break;
}
case 1 : {
rack = "r1";
machine = "m0";
break;
}
case 2 : {
rack = "r1";
machine = "m0";
break;
}
case 3 : {
rack = "r1";
machine = "m1";
break;
}
default : {
throw new RuntimeException("Bad!");
}
}
GlobalConfigurationBuilder gcb = GlobalConfigurationBuilder.defaultClusteredBuilder();
gcb.transport().rackId(rack).machineId(machine);
EmbeddedCacheManager cm = TestCacheManagerFactory.createClusteredCacheManager(gcb, getDefaultClusteredCacheConfig(CacheMode.DIST_ASYNC), flags);
cacheManagers.add(cm);
return cm;
}
}
| 1,710
| 30.685185
| 150
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyAwareStateTransferTest.java
|
package org.infinispan.distribution.topologyaware;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterTest;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test(groups = "functional", testName = "distribution.topologyaware.TopologyAwareStateTransferTest")
@CleanupAfterTest
@InCacheMode({CacheMode.DIST_SYNC})
public class TopologyAwareStateTransferTest extends MultipleCacheManagersTest {
private Address[] addresses;
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder defaultConfig = getDefaultClusteredCacheConfig(cacheMode);
log.debug("defaultConfig = " + defaultConfig.build().clustering().hash().numOwners());
defaultConfig.clustering().l1().disable().stateTransfer().fetchInMemoryState(true);
createClusteredCaches(5, defaultConfig);
ConsistentHash hash = cache(0).getAdvancedCache().getDistributionManager().getWriteConsistentHash();
List<Address> members = hash.getMembers();
addresses = members.toArray(new Address[members.size()]);
}
@AfterMethod
@Override
protected void clearContent() throws Throwable {
}
Cache<?, ?> cache(Address addr) {
for (Cache<?, ?> c : caches()) {
if (c.getAdvancedCache().getRpcManager().getAddress().equals(addr)) return c;
}
throw new RuntimeException("Address: " + addr);
}
/**
* dependsOnMethods does not work well with multiple instances of test.
* See http://stackoverflow.com/questions/38345330 for details.
*/
public void test() {
cache(0).put(addresses[0],"v0");
cache(0).put(addresses[1],"v1");
cache(0).put(addresses[2],"v2");
cache(0).put(addresses[3],"v3");
cache(0).put(addresses[4],"v4");
log.debugf("Cache on node %s: %s", addresses[0], TestingUtil.printCache(cache(addresses[0])));
log.debugf("Cache on node %s: %s", addresses[1], TestingUtil.printCache(cache(addresses[1])));
log.debugf("Cache on node %s: %s", addresses[2], TestingUtil.printCache(cache(addresses[2])));
log.debugf("Cache on node %s: %s", addresses[3], TestingUtil.printCache(cache(addresses[3])));
assertExistence(addresses[0]);
assertExistence(addresses[1]);
assertExistence(addresses[2]);
assertExistence(addresses[3]);
assertExistence(addresses[4]);
EmbeddedCacheManager cm4 = cache(addresses[4]).getCacheManager();
log.info("Here is where ST starts");
TestingUtil.killCacheManagers(cm4);
cacheManagers.remove(cm4);
TestingUtil.blockUntilViewsReceived(60000, false, caches());
TestingUtil.waitForNoRebalance(caches());
log.info("Here is where ST ends");
List<Address> addressList = cache(addresses[0]).getAdvancedCache().getDistributionManager()
.getWriteConsistentHash().getMembers();
log.debug("After shutting down " + addresses[4] + " caches are " + addressList);
log.debugf("Cache on node %s: %s", addresses[0], TestingUtil.printCache(cache(addresses[0])));
log.debugf("Cache on node %s: %s", addresses[1], TestingUtil.printCache(cache(addresses[1])));
log.debugf("Cache on node %s: %s", addresses[2], TestingUtil.printCache(cache(addresses[2])));
log.debugf("Cache on node %s: %s", addresses[3], TestingUtil.printCache(cache(addresses[3])));
assertExistence(addresses[0]);
assertExistence(addresses[1]);
assertExistence(addresses[2]);
assertExistence(addresses[3]);
assertExistence(addresses[4]);
EmbeddedCacheManager cm2 = cache(addresses[2]).getCacheManager();
TestingUtil.killCacheManagers(cm2);
cacheManagers.remove(cm2);
TestingUtil.blockUntilViewsReceived(60000, false, caches());
TestingUtil.waitForNoRebalance(caches());
addressList = cache(addresses[0]).getAdvancedCache().getDistributionManager()
.getWriteConsistentHash().getMembers();
log.debug("After shutting down " + addresses[2] + " caches are " + addressList);
log.debugf("Cache on node %s: %s", addresses[0], TestingUtil.printCache(cache(addresses[0])));
log.debugf("Cache on node %s: %s", addresses[1], TestingUtil.printCache(cache(addresses[1])));
log.debugf("Cache on node %s: %s", addresses[3], TestingUtil.printCache(cache(addresses[3])));
assertExistence(addresses[0]);
assertExistence(addresses[1]);
assertExistence(addresses[2]);
assertExistence(addresses[3]);
assertExistence(addresses[4]);
EmbeddedCacheManager cm1 = cache(addresses[1]).getCacheManager();
TestingUtil.killCacheManagers(cm1);
cacheManagers.remove(cm1);
TestingUtil.blockUntilViewsReceived(60000, false, caches());
TestingUtil.waitForNoRebalance(caches());
addressList = cache(addresses[0]).getAdvancedCache().getDistributionManager()
.getWriteConsistentHash().getMembers();
log.debug("After shutting down " + addresses[1] + " caches are " + addressList);
log.debugf("Cache on node %s: %s", addresses[0], TestingUtil.printCache(cache(addresses[0])));
log.debugf("Cache on node %s: %s", addresses[3], TestingUtil.printCache(cache(addresses[3])));
assertExistence(addresses[0]);
assertExistence(addresses[1]);
assertExistence(addresses[2]);
assertExistence(addresses[3]);
assertExistence(addresses[4]);
}
private <K> void assertExistence(final K key) {
LocalizedCacheTopology cacheTopology =
cache(addresses[0]).getAdvancedCache().getDistributionManager().getCacheTopology();
final List<Address> addresses = cacheTopology.getDistribution(key).writeOwners();
log.debug(key + " should be present on = " + addresses);
eventuallyEquals(2, () -> caches().stream().mapToInt(c -> c.getAdvancedCache().getDataContainer().containsKey(key) ? 1 : 0).sum());
for (Cache<? super K, ?> c : caches()) {
eventuallyEquals("Failure for key " + key + " on cache " + address(c), addresses.contains(address(c)),
() -> c.getAdvancedCache().getDataContainer().containsKey(key));
}
}
@Override
protected EmbeddedCacheManager addClusterEnabledCacheManager(ConfigurationBuilder deConfiguration) {
int index = cacheManagers.size();
String rack;
String machine;
switch (index) {
case 0 : {
rack = "r0";
machine = "m0";
break;
}
case 1 : {
rack = "r0";
machine = "m1";
break;
}
case 2 : {
rack = "r1";
machine = "m0";
break;
}
case 3 : {
rack = "r2";
machine = "m0";
break;
}
case 4 : {
rack = "r2";
machine = "m0";
break;
}
default : {
throw new RuntimeException("Bad!");
}
}
GlobalConfigurationBuilder gcb = GlobalConfigurationBuilder.defaultClusteredBuilder();
gcb.transport().rackId(rack).machineId(machine);
EmbeddedCacheManager cm = TestCacheManagerFactory.createClusteredCacheManager(gcb, deConfiguration);
cacheManagers.add(cm);
return cm;
}
}
| 8,082
| 40.664948
| 137
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/topologyaware/TopologyAwareChFunctionalTest.java
|
package org.infinispan.distribution.topologyaware;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.DistSyncFuncTest;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test (groups = "functional", testName = "distribution.topologyaware.TopologyAwareChFunctionalTest")
public class TopologyAwareChFunctionalTest extends DistSyncFuncTest {
@Override
protected EmbeddedCacheManager addClusterEnabledCacheManager(TransportFlags flags) {
int index = cacheManagers.size();
String rack;
String machine;
switch (index) {
case 0 : {
rack = "r0";
machine = "m0";
break;
}
case 1 : {
rack = "r0";
machine = "m1";
break;
}
case 2 : {
rack = "r1";
machine = "m0";
break;
}
case 3 : {
rack = "r2";
machine = "m0";
break;
}
default : {
throw new RuntimeException("Bad!");
}
}
GlobalConfigurationBuilder gc = GlobalConfigurationBuilder.defaultClusteredBuilder();
gc.transport().rackId(rack).machineId(machine);
EmbeddedCacheManager cm = TestCacheManagerFactory.createClusteredCacheManager(gc, new ConfigurationBuilder());
cacheManagers.add(cm);
return cm;
}
public void testHashesInitiated() {
ConsistentHash hash = advancedCache(0, cacheName).getDistributionManager().getWriteConsistentHash();
containsAllHashes(hash);
containsAllHashes(advancedCache(1, cacheName).getDistributionManager().getWriteConsistentHash());
containsAllHashes(advancedCache(2, cacheName).getDistributionManager().getWriteConsistentHash());
containsAllHashes(advancedCache(3, cacheName).getDistributionManager().getWriteConsistentHash());
}
private void containsAllHashes(ConsistentHash ch) {
assert ch.getMembers().contains(address(0));
assert ch.getMembers().contains(address(1));
assert ch.getMembers().contains(address(2));
assert ch.getMembers().contains(address(3));
}
}
| 2,496
| 34.169014
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/TopologyAwareSyncConsistentHashFactoryKeyDistributionTest.java
|
package org.infinispan.distribution.ch;
import java.util.List;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.TopologyAwareSyncConsistentHashFactory;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.jgroups.JGroupsTopologyAwareAddress;
import org.jgroups.util.ExtendedUUID;
import org.testng.annotations.Test;
/**
* Tests the uniformity of the SyncConsistentHashFactory algorithm, which is very similar to the 5.1
* default consistent hash algorithm virtual nodes.
*
* <p>This test assumes that key hashes are random and follow a uniform distribution so a key has the same chance
* to land on each one of the 2^31 positions on the hash wheel.
*
* <p>The output should stay pretty much the same between runs, so I added and example output here: vnodes_key_dist.txt.
*
* <p>Notes about the test output:
* <ul>
* <li>{@code P(p)} is the probability of proposition {@code p} being true
* <li>In the "Primary" rows {@code mean == total_keys / num_nodes} (each key has only one primary owner),
* but in the "Any owner" rows {@code mean == total_keys / num_nodes * num_owners} (each key is stored on
* {@code num_owner} nodes).
* </ul>
* @author Dan Berindei
* @since 5.2
*/
@Test(testName = "distribution.ch.TopologyAwareSyncConsistentHashFactoryKeyDistributionTest", groups = "profiling")
public class TopologyAwareSyncConsistentHashFactoryKeyDistributionTest extends SyncConsistentHashFactoryKeyDistributionTest {
protected DefaultConsistentHash createConsistentHash(int numSegments, int numOwners, List<Address> members) {
ConsistentHashFactory<DefaultConsistentHash> chf = createFactory();
return chf.create(numOwners, numSegments, members, null);
}
@Override
protected ConsistentHashFactory<DefaultConsistentHash> createFactory() {
return new TopologyAwareSyncConsistentHashFactory();
}
@Override
protected Address createSingleAddress(int nodeIndex) {
ExtendedUUID uuid = JGroupsTopologyAwareAddress.randomUUID(null, "s" + (nodeIndex % 2), null, "m" + nodeIndex);
return new IndexedTopologyAwareJGroupsAddress(uuid, nodeIndex);
}
}
/**
* We extend JGroupsAddress to make mapping an address to a node easier.
*/
class IndexedTopologyAwareJGroupsAddress extends JGroupsTopologyAwareAddress {
final int nodeIndex;
public IndexedTopologyAwareJGroupsAddress(ExtendedUUID address, int nodeIndex) {
super(address);
this.nodeIndex = nodeIndex;
}
}
| 2,547
| 40.096774
| 125
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/DefaultConsistentHashPersistenceTest.java
|
package org.infinispan.distribution.ch;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.profiling.testinternals.Generator;
import org.infinispan.remoting.transport.Address;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.ch.DefaultConsistentHashPersistenceTest")
public class DefaultConsistentHashPersistenceTest extends BaseCHPersistenceTest {
@Override
protected ConsistentHashFactory<?> createConsistentHashFactory() {
return new DefaultConsistentHashFactory();
}
@Override
public ConsistentHash createConsistentHash() {
List<Address> members = new ArrayList<>();
members.add(Generator.generateAddress());
members.add(Generator.generateAddress());
members.add(Generator.generateAddress());
Map<Address, Float> capacityFactors = new HashMap<>();
for (Address member : members) {
capacityFactors.put(member, 1.0f);
}
DefaultConsistentHashFactory hashFactory = new DefaultConsistentHashFactory();
return hashFactory.create(2, 100, members, capacityFactors);
}
}
| 1,243
| 33.555556
| 95
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/ReplicatedConsistentHashPersistenceTest.java
|
package org.infinispan.distribution.ch;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHashFactory;
import org.infinispan.profiling.testinternals.Generator;
import org.infinispan.remoting.transport.Address;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.ch.ReplicatedConsistentHashPersistenceTest")
public class ReplicatedConsistentHashPersistenceTest extends BaseCHPersistenceTest {
@Override
protected ConsistentHashFactory<?> createConsistentHashFactory() {
return new ReplicatedConsistentHashFactory();
}
@Override
public ConsistentHash createConsistentHash() {
List<Address> members = new ArrayList<>();
members.add(Generator.generateAddress());
members.add(Generator.generateAddress());
members.add(Generator.generateAddress());
Map<Address, Float> capacityFactors = new HashMap<>();
for (Address member : members) {
capacityFactors.put(member, 1.0f);
}
ReplicatedConsistentHashFactory hashFactory = new ReplicatedConsistentHashFactory();
return hashFactory.create(2, 100, members, capacityFactors);
}
}
| 1,261
| 34.055556
| 98
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/AffinityPartitionerTest.java
|
package org.infinispan.distribution.ch;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.testng.AssertJUnit.assertEquals;
import java.util.stream.IntStream;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.impl.AffinityPartitioner;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
/**
* @author gustavonalle
* @since 8.2
*/
@Test(groups = "functional", testName = "distribution.ch.AffinityPartitionerTest")
public class AffinityPartitionerTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
addNodes(2);
}
private void addNodes(int count) {
final ConfigurationBuilder conf = getConfigurationBuilder();
createCluster(new DistributionSerializationContextImpl(), conf, count);
waitForClusterToForm();
}
@Test
public void testAffinityPartitioner() throws Exception {
Cache<AffinityKey, String> cache = cacheManagers.get(0).getCache();
IntStream.range(0, 10).boxed().forEach(num -> cache.put(new AffinityKey(num), "value"));
addNodes(1);
cacheManagers.stream().map(cm -> cm.getCache().getAdvancedCache()).forEach(advancedCache -> {
LocalizedCacheTopology cacheTopology = advancedCache.getDistributionManager().getCacheTopology();
advancedCache.getDataContainer().forEach(ice -> {
Object key = ice.getKey();
int keySegmentId = ((AffinityKey) key).segmentId;
assertEquals(cacheTopology.getSegment(key), keySegmentId);
});
});
}
private ConfigurationBuilder getConfigurationBuilder() {
final ConfigurationBuilder conf = getDefaultClusteredCacheConfig(DIST_SYNC, false);
conf.clustering().hash().keyPartitioner(new AffinityPartitioner()).numSegments(10).numOwners(1);
return conf;
}
public static class AffinityKey implements AffinityTaggedKey {
@ProtoField(number = 1, defaultValue = "0")
final int segmentId;
@ProtoFactory
AffinityKey(int segmentId) {
this.segmentId = segmentId;
}
@Override
public int getAffinitySegmentId() {
return segmentId;
}
}
@AutoProtoSchemaBuilder(
includeClasses = AffinityKey.class,
schemaFileName = "core.distribution.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.distribution",
service = false
)
interface DistributionSerializationContext extends SerializationContextInitializer {
}
}
| 2,994
| 34.235294
| 106
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/SyncConsistentHashFactoryKeyDistributionTest.java
|
package org.infinispan.distribution.ch;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.OwnershipStatistics;
import org.infinispan.distribution.ch.impl.SyncConsistentHashFactory;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.test.AbstractInfinispanTest;
import org.jgroups.util.UUID;
import org.testng.annotations.Test;
/**
* Tests the uniformity of the SyncConsistentHashFactory algorithm, which is very similar to the 5.1
* default consistent hash algorithm virtual nodes.
*
* <p>This test assumes that key hashes are random and follow a uniform distribution so a key has the same chance
* to land on each one of the 2^31 positions on the hash wheel.
*
* <p>The output should stay pretty much the same between runs, so I added and example output here: vnodes_key_dist.txt.
*
* <p>Notes about the test output:
* <ul>
* <li>{@code P(p)} is the probability of proposition {@code p} being true
* <li>In the "Primary" rows {@code mean == total_keys / num_nodes} (each key has only one primary owner),
* but in the "Any owner" rows {@code mean == total_keys / num_nodes * num_owners} (each key is stored on
* {@code num_owner} nodes).
* </ul>
* @author Dan Berindei
* @since 5.2
*/
@Test(testName = "distribution.ch.SyncConsistentHashFactoryKeyDistributionTest", groups = "profiling")
public class SyncConsistentHashFactoryKeyDistributionTest extends AbstractInfinispanTest {
// numbers of nodes to test
public static final int[] NUM_NODES = {11, 22};
// numbers of segments to test
public static final int[] NUM_SEGMENTS = {200, 1000};
// number of key owners
public static final int NUM_OWNERS = 2;
// controls precision + duration of test
public static final int LOOPS = 1000;
// confidence intervals to print for any owner
public static final double[] INTERVALS = { 0.8, 0.9, 1.10, 1.20 };
// confidence intervals to print for primary owner
public static final double[] INTERVALS_PRIMARY = { 0.8, 0.9, 1.10, 1.20 };
// percentiles to print
public static final double[] PERCENTILES = { .999 };
protected ConsistentHashFactory<DefaultConsistentHash> createFactory() {
return new SyncConsistentHashFactory();
}
protected List<Address> createAddresses(int numNodes) {
ArrayList<Address> addresses = new ArrayList<>(numNodes);
for (int i = 0; i < numNodes; i++) {
addresses.add(createSingleAddress(i));
}
return addresses;
}
public void testDistribution() {
for (int nn : NUM_NODES) {
Map<String, Map<Integer, String>> metrics = new TreeMap<>();
for (int ns : NUM_SEGMENTS) {
Map<String, String> iterationMetrics = computeMetrics(ns, NUM_OWNERS, nn);
iterationMetrics.forEach((metricName, metricValue) -> {
Map<Integer, String> metric = metrics.computeIfAbsent(metricName, k -> new HashMap<>());
metric.put(ns, metricValue);
});
}
printMetrics(nn, metrics);
}
}
public void testRebalanceDistribution() {
for (int nn : NUM_NODES) {
Map<String, Map<Integer, String>> metrics = new TreeMap<>();
for (int ns : NUM_SEGMENTS) {
Map<String, String> iterationMetrics = computeMetricsAfterRebalance(ns, NUM_OWNERS, nn);
iterationMetrics.forEach((metricName, metricValue) -> {
Map<Integer, String> metric = metrics.computeIfAbsent(metricName, k -> new HashMap<>());
metric.put(ns, metricValue);
});
}
printMetrics(nn, metrics);
}
}
protected void printMetrics(int nn, Map<String, Map<Integer, String>> metrics) {
// print the header
System.out.printf("Distribution for %3d nodes (relative to the average)\n===\n", nn);
System.out.printf("%35s = ", "Segments");
for (int numSegment : NUM_SEGMENTS) {
System.out.printf("%7d", numSegment);
}
System.out.println();
// print each metric for each vnodes setting
for (Map.Entry<String, Map<Integer, String>> entry : metrics.entrySet()) {
String metricName = entry.getKey();
Map<Integer, String> metricValues = entry.getValue();
System.out.printf("%35s = ", metricName);
for (int numSegment : NUM_SEGMENTS) {
System.out.print(metricValues.get(numSegment));
}
System.out.println();
}
System.out.println();
}
protected Map<String, String> computeMetrics(int numSegments, int numOwners, int numNodes) {
List<Address> members = createAddresses(numNodes);
Map<String, String> metrics = new HashMap<>();
long[] distribution = new long[LOOPS * numNodes];
long[] distributionPrimary = new long[LOOPS * numNodes];
double[] largestRatio = new double[LOOPS];
int distIndex = 0;
ConsistentHashFactory<DefaultConsistentHash> chf = createFactory();
for (int i = 0; i < LOOPS; i++) {
DefaultConsistentHash ch = chf.create(numOwners, numSegments, members, null);
OwnershipStatistics stats = new OwnershipStatistics(ch, ch.getMembers());
assertEquals(numSegments * numOwners, stats.sumOwned());
for (Address node : ch.getMembers()) {
distribution[distIndex] = stats.getOwned(node);
distributionPrimary[distIndex] = stats.getPrimaryOwned(node);
distIndex++;
}
largestRatio[i] = getSegmentsPerNodesMinMaxRatio(ch);
}
Arrays.sort(distribution);
Arrays.sort(distributionPrimary);
Arrays.sort(largestRatio);
addMetrics(metrics, "Any owner:", numSegments, numOwners, numNodes, distribution, INTERVALS);
addMetrics(metrics, "Primary:", numSegments, 1, numNodes, distributionPrimary, INTERVALS_PRIMARY);
addDoubleMetric(metrics, "Segments per node - max/min ratio", largestRatio[largestRatio.length -1]);
return metrics;
}
protected Map<String, String> computeMetricsAfterRebalance(int numSegments, int numOwners, int numNodes) {
List<Address> members = createAddresses(numNodes);
Map<String, String> metrics = new HashMap<>();
long[] distribution = new long[LOOPS * numNodes];
long[] distributionPrimary = new long[LOOPS * numNodes];
double[] largestRatio = new double[LOOPS];
int distIndex = 0;
ConsistentHashFactory<DefaultConsistentHash> chf = createFactory();
DefaultConsistentHash ch = chf.create(numOwners, numSegments, members, null);
// loop leave/join and rebalance
for (int i = 0; i < LOOPS; i++) {
// leave
members.remove(0);
DefaultConsistentHash rebalancedCH = chf.updateMembers(ch, members, null);
ch = chf.rebalance(rebalancedCH);
// join
Address joiner = createSingleAddress(numNodes + i);
members.add(joiner);
rebalancedCH = chf.updateMembers(ch, members, null);
ch = chf.rebalance(rebalancedCH);
// stats after rebalance
OwnershipStatistics stats = new OwnershipStatistics(ch, ch.getMembers());
assertEquals(numSegments * numOwners, stats.sumOwned());
for (Address node : ch.getMembers()) {
distribution[distIndex] = stats.getOwned(node);
distributionPrimary[distIndex] = stats.getPrimaryOwned(node);
distIndex++;
}
largestRatio[i] = getSegmentsPerNodesMinMaxRatio(ch);
}
Arrays.sort(distribution);
Arrays.sort(distributionPrimary);
Arrays.sort(largestRatio);
addMetrics(metrics, "Any owner:", numSegments, numOwners, numNodes, distribution, INTERVALS);
addMetrics(metrics, "Primary:", numSegments, 1, numNodes, distributionPrimary, INTERVALS_PRIMARY);
addDoubleMetric(metrics, "Segments per node - max/min ratio", largestRatio[largestRatio.length -1]);
return metrics;
}
protected void addMetrics(Map<String, String> metrics, String prefix, int numSegments, int numOwners,
int numNodes, long[] distribution, double[] intervals) {
long sum = 0;
for (long x : distribution) sum += x;
assertEquals(sum, LOOPS * numOwners * numSegments);
double mean = (double) sum / numNodes / LOOPS;
long min = distribution[0];
long max = distribution[distribution.length - 1];
addDoubleMetric(metrics, prefix + " min", (double) min / mean);
addDoubleMetric(metrics, prefix + " max", (double) max / mean);
double[] intervalProbability = new double[intervals.length];
int intervalIndex = 0;
for (int i = 0; i < distribution.length; i++) {
long x = distribution[i];
while (x > intervals[intervalIndex] * mean) {
intervalProbability[intervalIndex] = (double) i / distribution.length;
intervalIndex++;
if (intervalIndex >= intervals.length)
break;
}
}
for (int i = intervalIndex; i < intervals.length; i++) {
intervalProbability[i] = 1.;
}
for (int i = 0; i < intervals.length; i++) {
if (intervals[i] < 1) {
addPercentageMetric(metrics, String.format("%s %% < %3.2f", prefix, intervals[i]), intervalProbability[i]);
} else {
addPercentageMetric(metrics, String.format("%s %% > %3.2f", prefix, intervals[i]), 1 - intervalProbability[i]);
}
}
double[] percentiles = new double[PERCENTILES.length];
for (int i = 0; i < PERCENTILES.length; i++) {
percentiles[i] = (double)distribution[(int) Math.ceil(PERCENTILES[i] * (LOOPS * numNodes + 1))] / mean;
}
for (int i = 0; i < PERCENTILES.length; i++) {
addDoubleMetric(metrics, String.format("%s %5.2f%% percentile", prefix, PERCENTILES[i] * 100), percentiles[i]);
}
}
protected void addDoubleMetric(Map<String, String> metrics, String name, double value) {
metrics.put(name, String.format("%7.3f", value));
}
protected void addPercentageMetric(Map<String, String> metrics, String name, double value) {
metrics.put(name, String.format("%6.2f%%", value * 100));
}
protected Address createSingleAddress(int nodeIndex) {
return new IndexedJGroupsAddress(UUID.randomUUID(), nodeIndex);
}
protected double getSegmentsPerNodesMinMaxRatio(DefaultConsistentHash ch) {
int max = 0;
int min = Integer.MAX_VALUE;
for (Address addr : ch.getMembers()) {
int num = ch.getSegmentsForOwner(addr).size();
max = Math.max(max, num);
min = Math.min(min, num);
}
double d = ((double) max) / min;
// String result = String.format("min=%d, max=%d, ch=%s, d=%f", min, max, ch, d);
// System.out.println("segment result = " + result);
return d;
}
}
/**
* We extend JGroupsAddress to make mapping an address to a node easier.
*/
class IndexedJGroupsAddress extends JGroupsAddress {
final int nodeIndex;
IndexedJGroupsAddress(org.jgroups.Address address, int nodeIndex) {
super(address);
this.nodeIndex = nodeIndex;
}
public int getNodeIndex() {
return nodeIndex;
}
@Override
public String toString() {
return Integer.toString(nodeIndex);
}
}
| 11,603
| 39.152249
| 123
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/ReplicatedConsistentHashFactoryTest.java
|
package org.infinispan.distribution.ch;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import java.util.Arrays;
import java.util.List;
import org.infinispan.distribution.TestAddress;
import org.infinispan.distribution.ch.impl.OwnershipStatistics;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHash;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHashFactory;
import org.infinispan.remoting.transport.Address;
import org.testng.annotations.Test;
/**
* Test even distribution after membership change
*
* @author Radim Vansa <rvansa@redhat.com>
*/
@Test(groups = "unit", testName = "distribution.ch.ReplicatedConsistentHashFactoryTest")
public class ReplicatedConsistentHashFactoryTest {
public void test1() {
int[] testSegments = { 1, 2, 4, 8, 16, 31, 32, 33, 67, 128};
ReplicatedConsistentHashFactory factory = new ReplicatedConsistentHashFactory();
Address A = new TestAddress(0, "A");
Address B = new TestAddress(1, "B");
Address C = new TestAddress(2, "C");
Address D = new TestAddress(3, "D");
List<Address> a = Arrays.asList(A);
List<Address> ab = Arrays.asList(A, B);
List<Address> abc = Arrays.asList(A, B, C);
List<Address> abcd = Arrays.asList(A, B, C, D);
List<Address> bcd = Arrays.asList(B, C, D);
List<Address> c = Arrays.asList(C);
for (int segments : testSegments) {
ReplicatedConsistentHash ch = factory.create(0, segments, a, null);
checkDistribution(ch);
ch = factory.updateMembers(ch, ab, null);
ch = factory.rebalance(ch);
checkDistribution(ch);
ch = factory.updateMembers(ch, abc, null);
ch = factory.rebalance(ch);
checkDistribution(ch);
ch = factory.updateMembers(ch, abcd, null);
ch = factory.rebalance(ch);
checkDistribution(ch);
ch = factory.updateMembers(ch, bcd, null);
ch = factory.rebalance(ch);
checkDistribution(ch);
ch = factory.updateMembers(ch, c, null);
ch = factory.rebalance(ch);
checkDistribution(ch);
}
}
private void checkDistribution(ReplicatedConsistentHash ch) {
int minSegments = Integer.MAX_VALUE, maxSegments = Integer.MIN_VALUE;
OwnershipStatistics stats = new OwnershipStatistics(ch, ch.getMembers());
for (Address member : ch.getMembers()) {
int primary = stats.getPrimaryOwned(member);
minSegments = Math.min(minSegments, primary);
maxSegments = Math.max(maxSegments, primary);
assertEquals(stats.getOwned(member), ch.getNumSegments());
}
assertTrue(maxSegments - minSegments <= 1);
}
}
| 2,751
| 34.74026
| 88
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/BaseCHPersistenceTest.java
|
package org.infinispan.distribution.ch;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.globalstate.impl.ScopedPersistentStateImpl;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.PersistentUUID;
import org.infinispan.topology.PersistentUUIDManager;
import org.infinispan.topology.PersistentUUIDManagerImpl;
import org.testng.annotations.Test;
@Test(groups = "unit")
public abstract class BaseCHPersistenceTest {
protected abstract ConsistentHashFactory<?> createConsistentHashFactory();
protected abstract ConsistentHash createConsistentHash();
public void testCHPersistence() {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
ConsistentHash ch = createConsistentHash();
generateRandomPersistentUUIDs(ch.getMembers(), persistentUUIDManager);
ScopedPersistentState state = new ScopedPersistentStateImpl("scope");
ch.remapAddresses(persistentUUIDManager.addressToPersistentUUID()).toScopedState(state);
ConsistentHashFactory<?> hashFactory = createConsistentHashFactory();
ConsistentHash restoredCH = hashFactory.fromPersistentState(state).remapAddresses(persistentUUIDManager.persistentUUIDToAddress());
assertEquals(ch, restoredCH);
}
public void testCHPersistenceMissingMembers() {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
ConsistentHash ch = createConsistentHash();
Map<Address, PersistentUUID> addressMap = generateRandomPersistentUUIDs(ch.getMembers(), persistentUUIDManager);
ScopedPersistentState state = new ScopedPersistentStateImpl("scope");
ch.remapAddresses(persistentUUIDManager.addressToPersistentUUID()).toScopedState(state);
persistentUUIDManager.removePersistentAddressMapping(addressMap.keySet().iterator().next());
ConsistentHashFactory<?> hashFactory = createConsistentHashFactory();
ConsistentHash restoredCH = hashFactory.fromPersistentState(state).remapAddresses(persistentUUIDManager.persistentUUIDToAddress());
assertNull(restoredCH);
}
private Map<Address, PersistentUUID> generateRandomPersistentUUIDs(List<Address> members, PersistentUUIDManager persistentUUIDManager) {
Map<Address, PersistentUUID> addressMap = new HashMap<>();
for (Address member : members) {
PersistentUUID uuid = PersistentUUID.randomUUID();
persistentUUIDManager.addPersistentAddressMapping(member, uuid);
addressMap.put(member, uuid);
}
return addressMap;
}
}
| 2,758
| 42.109375
| 139
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/SyncConsistentHashPersistenceTest.java
|
package org.infinispan.distribution.ch;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.distribution.ch.impl.SyncConsistentHashFactory;
import org.infinispan.profiling.testinternals.Generator;
import org.infinispan.remoting.transport.Address;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.ch.SyncConsistentHashPersistenceTest")
public class SyncConsistentHashPersistenceTest extends BaseCHPersistenceTest {
@Override
protected ConsistentHashFactory<?> createConsistentHashFactory() {
return new SyncConsistentHashFactory();
}
@Override
public ConsistentHash createConsistentHash() {
List<Address> members = new ArrayList<>();
members.add(Generator.generateAddress());
members.add(Generator.generateAddress());
members.add(Generator.generateAddress());
Map<Address, Float> capacityFactors = new HashMap<>();
for (Address member : members) {
capacityFactors.put(member, 1.0f);
}
SyncConsistentHashFactory hashFactory = new SyncConsistentHashFactory();
return hashFactory.create(2, 100, members, capacityFactors);
}
}
| 1,225
| 33.055556
| 92
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/CapacityFactorsFunctionalTest.java
|
package org.infinispan.distribution.ch;
import static org.infinispan.test.TestingUtil.assertBetween;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Map;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.ch.impl.OwnershipStatistics;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
/**
* Test the capacity factors with the full stack.
*
* @author Dan Berindei
* @since 6.0
*/
@Test(groups = "unstable", testName = "distribution.ch.CapacityFactorsFunctionalTest", description = "to be fixed by ISPN-6470")
public class CapacityFactorsFunctionalTest extends MultipleCacheManagersTest {
public static final int NUM_SEGMENTS = 60;
@Override
protected void createCacheManagers() throws Throwable {
// Do nothing here, create the cache managers in the test
}
public void testCapacityFactors() {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(CacheMode.DIST_SYNC);
cb.clustering().hash().numSegments(NUM_SEGMENTS);
cb.clustering().hash().capacityFactor(0.5f);
addClusterEnabledCacheManager(cb);
waitForClusterToForm();
assertCapacityFactors(0.5f);
assertPrimaryOwned(NUM_SEGMENTS);
assertOwned(NUM_SEGMENTS);
cb.clustering().hash().capacityFactor(1.5f);
addClusterEnabledCacheManager(cb);
waitForClusterToForm();
assertCapacityFactors(0.5f, 1.5f);
assertPrimaryOwned(NUM_SEGMENTS / 4, NUM_SEGMENTS * 3 / 4);
assertOwned(NUM_SEGMENTS, NUM_SEGMENTS);
cb.clustering().hash().capacityFactor(0.0f);
addClusterEnabledCacheManager(cb);
waitForClusterToForm();
assertCapacityFactors(0.5f, 1.5f, 0.0f);
assertPrimaryOwned(NUM_SEGMENTS / 4, NUM_SEGMENTS * 3 / 4, 0);
assertOwned(NUM_SEGMENTS, NUM_SEGMENTS, 0);
cb.clustering().hash().capacityFactor(1.0f);
addClusterEnabledCacheManager(cb);
waitForClusterToForm();
assertCapacityFactors(0.5f, 1.5f, 0.0f, 1.0f);
assertPrimaryOwned(NUM_SEGMENTS / 6, NUM_SEGMENTS * 3 / 6, 0, NUM_SEGMENTS * 2 / 6);
assertOwned(NUM_SEGMENTS / 3, NUM_SEGMENTS, 0, NUM_SEGMENTS * 2 / 3);
}
private void assertCapacityFactors(float... expectedCapacityFactors) {
ConsistentHash ch = cache(0).getAdvancedCache().getDistributionManager().getReadConsistentHash();
int numNodes = expectedCapacityFactors.length;
Map<Address,Float> capacityFactors = ch.getCapacityFactors();
for (int i = 0; i < numNodes; i++) {
assertEquals(expectedCapacityFactors[i], capacityFactors.get(address(i)), 0.0);
}
}
private void assertPrimaryOwned(int... expectedPrimaryOwned) {
ConsistentHash ch = cache(0).getAdvancedCache().getDistributionManager().getReadConsistentHash();
OwnershipStatistics stats = new OwnershipStatistics(ch, ch.getMembers());
int numNodes = expectedPrimaryOwned.length;
for (int i = 0; i < numNodes; i++) {
double delta = expectedPrimaryOwned[i] * 0.15;
assertBetween(expectedPrimaryOwned[i] - 2 * delta, expectedPrimaryOwned[i] + delta,
stats.getPrimaryOwned(address(i)));
}
}
private void assertOwned(int... expectedOwned) {
ConsistentHash ch = cache(0).getAdvancedCache().getDistributionManager().getReadConsistentHash();
OwnershipStatistics stats = new OwnershipStatistics(ch, ch.getMembers());
int numNodes = expectedOwned.length;
for (int i = 0; i < numNodes; i++) {
double delta = expectedOwned[i] * 0.25;
assertBetween(expectedOwned[i] - 2 * delta, expectedOwned[i] + delta, stats.getOwned(address(i)));
}
}
}
| 3,847
| 39.505263
| 128
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/impl/DefaultConsistentHashFactoryTest.java
|
package org.infinispan.distribution.ch.impl;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertSame;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.infinispan.commons.hash.MurmurHash3;
import org.infinispan.distribution.TestAddress;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.Test;
/**
* Test the even distribution and number of moved segments after rebalance for {@link DefaultConsistentHashFactory}
*
* @author Dan Berindei
* @since 5.2
*/
@Test(groups = "unit", testName = "distribution.ch.DefaultConsistentHashFactoryTest")
public class DefaultConsistentHashFactoryTest extends AbstractInfinispanTest {
public static final int[] NUM_SEGMENTS = new int[]{1, 2, 4, 8, 16, 60, 256, 512};
public static final int[] NUM_NODES = new int[]{1, 2, 3, 4, 7, 10, 100};
public static final int[] NUM_OWNERS = new int[]{1, 2, 3, 5};
// Since the number of nodes changes, the capacity factors are repeated
public static final float[][] CAPACITY_FACTORS = new float[][]{{1}, {2}, {1, 100}, {2, 0, 1}};
// each element in the array is a pair of numbers: the first is the number of nodes to add
// the second is the number of nodes to remove (the index of the removed nodes are pseudo-random)
public static final int[][] NODE_CHANGES =
{{1, 0}, {2, 0}, {0, 1}, {0, 2}, {2, 1}, {1, 2}, {10, 0}, {0, 10}};
private int iterationCount = 0;
protected ConsistentHashFactory<DefaultConsistentHash> createConsistentHashFactory() {
return new DefaultConsistentHashFactory();
}
public void testConsistentHashDistribution() {
ConsistentHashFactory<DefaultConsistentHash> chf = createConsistentHashFactory();
for (int nn : NUM_NODES) {
List<Address> nodes = new ArrayList<>(nn);
for (int j = 0; j < nn; j++) {
nodes.add(new TestAddress(j, "TA"));
}
for (int ns : NUM_SEGMENTS) {
if (nn < ns) {
for (int no : NUM_OWNERS) {
for (float[] lf : CAPACITY_FACTORS) {
Map<Address, Float> lfMap = null;
if (lf != null) {
lfMap = new HashMap<>();
for (int i = 0; i < nn; i++) {
lfMap.put(nodes.get(i), lf[i % lf.length]);
}
}
testConsistentHashModifications(chf, nodes, ns, no, lfMap);
}
}
}
}
}
}
private void testConsistentHashModifications(ConsistentHashFactory<DefaultConsistentHash> chf,
List<Address> nodes, int ns, int no, Map<Address, Float> capacityFactors) {
log.tracef("Creating consistent hash with ns=%d, no=%d, members=(%d)%s",
ns, no, nodes.size(), membersString(nodes, capacityFactors));
DefaultConsistentHash baseCH = chf.create(no, ns, nodes, capacityFactors);
assertEquals(baseCH.getCapacityFactors(), capacityFactors);
checkDistribution(baseCH, capacityFactors);
// check that the base CH is already balanced
List<Address> baseMembers = baseCH.getMembers();
assertSame(baseCH, chf.updateMembers(baseCH, baseMembers, capacityFactors));
assertSame(baseCH, chf.rebalance(baseCH));
// starting point, so that we don't confuse nodes
int nodeIndex = baseMembers.size();
for (int[] nodeChange : NODE_CHANGES) {
int nodesToAdd = nodeChange[0];
int nodesToRemove = nodeChange[1];
if (nodesToRemove > baseMembers.size())
break;
if (nodesToRemove == baseMembers.size() && nodesToAdd == 0)
break;
List<Address> newMembers = new ArrayList<>(baseMembers);
HashMap<Address, Float> newCapacityFactors = capacityFactors != null ? new HashMap<>(capacityFactors) : null;
for (int k = 0; k < nodesToRemove; k++) {
int indexToRemove = Math.abs(MurmurHash3.getInstance().hash(k) % newMembers.size());
if (newCapacityFactors != null) {
newCapacityFactors.remove(newMembers.get(indexToRemove));
}
newMembers.remove(indexToRemove);
}
for (int k = 0; k < nodesToAdd; k++) {
TestAddress address = new TestAddress(nodeIndex++, "TA");
newMembers.add(address);
if (newCapacityFactors != null) {
newCapacityFactors.put(address, capacityFactors.get(baseMembers.get(k % baseMembers.size())));
}
}
log.tracef("Rebalance iteration %d, members=(%d)%s",
iterationCount, newMembers.size(), membersString(newMembers, newCapacityFactors));
baseCH = rebalanceIteration(chf, baseCH, nodesToAdd, nodesToRemove, newMembers, newCapacityFactors);
baseMembers = baseCH.getMembers();
capacityFactors = newCapacityFactors;
iterationCount++;
}
}
private String membersString(List<Address> newMembers, Map<Address, Float> newCapacityFactors) {
return newMembers.stream()
.map(a -> String.format("%s * %.1f", a, getCapacityFactor(newCapacityFactors, a)))
.collect(Collectors.joining(", ", "[", "]"));
}
private float getCapacityFactor(Map<Address, Float> capacityFactors, Address a) {
return capacityFactors != null ? capacityFactors.get(a) : 1f;
}
private DefaultConsistentHash rebalanceIteration(ConsistentHashFactory<DefaultConsistentHash> chf,
DefaultConsistentHash baseCH, int nodesToAdd,
int nodesToRemove, List<Address> newMembers,
Map<Address, Float> lfMap) {
int actualNumOwners = computeActualNumOwners(baseCH.getNumOwners(), newMembers, lfMap);
// first phase: just update the members list, removing the leavers
// and adding new owners, but not necessarily assigning segments to them
DefaultConsistentHash updatedMembersCH = chf.updateMembers(baseCH, newMembers, lfMap);
assertEquals(lfMap, updatedMembersCH.getCapacityFactors());
if (nodesToRemove > 0) {
for (int l = 0; l < updatedMembersCH.getNumSegments(); l++) {
assertTrue(updatedMembersCH.locateOwnersForSegment(l).size() > 0);
assertTrue(updatedMembersCH.locateOwnersForSegment(l).size() <= actualNumOwners);
}
}
// second phase: rebalance with the new members list
long startNanos = System.nanoTime();
DefaultConsistentHash rebalancedCH = chf.rebalance(updatedMembersCH);
long durationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
if (durationMillis >= 5) {
log.tracef("Rebalance took %dms", durationMillis);
}
checkDistribution(rebalancedCH, lfMap);
for (int l = 0; l < rebalancedCH.getNumSegments(); l++) {
assertTrue(rebalancedCH.locateOwnersForSegment(l).size() >= actualNumOwners);
}
checkMovedSegments(baseCH, rebalancedCH, nodesToAdd, nodesToRemove);
// union doesn't have to keep the CH balanced, but it does have to include owners from both CHs
DefaultConsistentHash unionCH = chf.union(updatedMembersCH, rebalancedCH);
for (int l = 0; l < updatedMembersCH.getNumSegments(); l++) {
assertTrue(unionCH.locateOwnersForSegment(l).containsAll(updatedMembersCH.locateOwnersForSegment(l)));
assertTrue(unionCH.locateOwnersForSegment(l).containsAll(rebalancedCH.locateOwnersForSegment(l)));
}
// switch to the new CH in the next iteration
assertEquals(baseCH.getNumSegments(), rebalancedCH.getNumSegments());
assertEquals(baseCH.getNumOwners(), rebalancedCH.getNumOwners());
assertEquals(newMembers, rebalancedCH.getMembers());
baseCH = rebalancedCH;
return baseCH;
}
protected void checkDistribution(DefaultConsistentHash ch, Map<Address, Float> lfMap) {
int numSegments = ch.getNumSegments();
List<Address> nodes = ch.getMembers();
int numNodesWithLoad = nodesWithLoad(nodes, lfMap);
int actualNumOwners = computeActualNumOwners(ch.getNumOwners(), nodes, lfMap);
OwnershipStatistics stats = new OwnershipStatistics(ch, nodes);
for (int s = 0; s < numSegments; s++) {
List<Address> owners = ch.locateOwnersForSegment(s);
assertEquals(actualNumOwners, owners.size());
for (int i = 1; i < owners.size(); i++) {
Address owner = owners.get(i);
assertEquals("Found the same owner twice in the owners list", i, owners.indexOf(owner));
}
}
float totalCapacity = computeTotalCapacity(nodes, lfMap);
Map<Address, Float> expectedOwnedMap =
computeExpectedOwned(numSegments, numNodesWithLoad, actualNumOwners, nodes, lfMap);
for (Address node : nodes) {
float capacityFactor = getCapacityFactor(lfMap, node);
float expectedPrimaryOwned = expectedPrimaryOwned(numSegments, numNodesWithLoad, totalCapacity, capacityFactor);
int minPrimaryOwned = (int) Math.floor(minOwned(numSegments, 1, numNodesWithLoad, expectedPrimaryOwned));
int maxPrimaryOwned = (int) Math.ceil(maxOwned(numSegments, 1, numNodesWithLoad, expectedPrimaryOwned));
int primaryOwned = stats.getPrimaryOwned(node);
if (primaryOwned < minPrimaryOwned || maxPrimaryOwned < primaryOwned) {
fail(String.format("Primary owned (%d) should have been between %d and %d", primaryOwned, minPrimaryOwned, maxPrimaryOwned));
}
float expectedOwned = expectedOwnedMap.get(node);
int minOwned = (int) Math.floor(minOwned(numSegments, actualNumOwners, numNodesWithLoad, expectedOwned));
int maxOwned = (int) Math.ceil(maxOwned(numSegments, actualNumOwners, numNodesWithLoad, expectedOwned));
int owned = stats.getOwned(node);
if (owned < minOwned || maxOwned < owned) {
fail(String.format("Owned (%d) should have been between %d and %d", owned, minOwned, maxOwned));
}
}
}
public int computeActualNumOwners(int numOwners, List<Address> members, Map<Address, Float> capacityFactors) {
int nodesWithLoad = nodesWithLoad(members, capacityFactors);
return Math.min(numOwners, nodesWithLoad);
}
int nodesWithLoad(List<Address> members, Map<Address, Float> capacityFactors) {
if (capacityFactors == null)
return members.size();
int nodesWithLoad = 0;
for (Address node : members) {
if (capacityFactors.get(node) != 0) {
nodesWithLoad++;
}
}
return nodesWithLoad;
}
protected float expectedPrimaryOwned(int numSegments, int numNodes, float totalCapacity, float nodeLoad) {
return numSegments * nodeLoad / totalCapacity;
}
protected Map<Address, Float> computeExpectedOwned(int numSegments, int numNodes, int actualNumOwners,
Collection<Address> nodes, Map<Address, Float> capacityFactors) {
// Insert all nodes in the initial order, even if we're going to replace the values later
Map<Address, Float> expectedOwned = new LinkedHashMap<>(numNodes * 2);
float expected = Math.min(numSegments, (float) numSegments * actualNumOwners / numNodes);
for (Address node : nodes) {
expectedOwned.put(node, expected);
}
if (capacityFactors == null)
return expectedOwned;
List<Address> sortedNodes = new ArrayList<>(nodes);
sortedNodes.sort((o1, o2) -> {
// Reverse order
return Float.compare(capacityFactors.get(o2), capacityFactors.get(o1));
});
float totalCapacity = computeTotalCapacity(nodes, capacityFactors);
int remainingCopies = actualNumOwners * numSegments;
for (Address node : sortedNodes) {
float nodeLoad = capacityFactors.get(node);
float nodeSegments;
if (remainingCopies * nodeLoad / totalCapacity > numSegments) {
nodeSegments = numSegments;
totalCapacity -= nodeLoad;
remainingCopies -= nodeSegments;
} else {
nodeSegments = nodeLoad != 0 ? remainingCopies * nodeLoad / totalCapacity : 0;
}
expectedOwned.put(node, nodeSegments);
}
return expectedOwned;
}
protected float maxOwned(int numSegments, int actualNumOwners, int numNodes, float expectedOwned) {
return expectedOwned + (numNodes - 1) + .01f * expectedOwned;
}
protected float minOwned(int numSegments, int actualNumOwners, int numNodes, float expectedOwned) {
return expectedOwned - Math.max(1, (numSegments * actualNumOwners) / expectedOwned * numNodes);
}
private float computeTotalCapacity(Collection<Address> nodes, Map<Address, Float> capacityFactors) {
if (capacityFactors == null)
return nodes.size();
float totalCapacity = 0;
for (Address node : nodes) {
totalCapacity += capacityFactors.get(node);
}
return totalCapacity;
}
protected float allowedExtraMoves(DefaultConsistentHash oldCH, DefaultConsistentHash newCH,
int joinerSegments, int leaverSegments) {
return Math.max(1, 0.1f * oldCH.getNumOwners() * oldCH.getNumSegments());
}
private void checkMovedSegments(DefaultConsistentHash oldCH, DefaultConsistentHash newCH,
int nodesAdded, int nodesRemoved) {
int numSegments = oldCH.getNumSegments();
int numOwners = oldCH.getNumOwners();
List<Address> oldMembers = oldCH.getMembers();
List<Address> newMembers = newCH.getMembers();
Set<Address> commonMembers = new HashSet<>(oldMembers);
commonMembers.retainAll(newMembers);
// Compute the number of segments owned by members that left or joined
int leaverSegments = 0;
for (Address node : oldMembers) {
if (!commonMembers.contains(node)) {
leaverSegments += oldCH.getSegmentsForOwner(node).size();
}
}
int joinerSegments = 0;
for (Address node : newMembers) {
if (!commonMembers.contains(node)) {
joinerSegments += newCH.getSegmentsForOwner(node).size();
}
}
// Compute the number of segments where a common member added/removed segments
int commonMembersAddedSegments = 0;
int commonMembersRemovedSegments = 0;
int primarySwitchedWithBackup = 0;
for (int segment = 0; segment < numSegments; segment++) {
List<Address> oldOwners = oldCH.locateOwnersForSegment(segment);
List<Address> newOwners = newCH.locateOwnersForSegment(segment);
for (Address newOwner : newOwners) {
if (commonMembers.contains(newOwner) && !oldOwners.contains(newOwner)) {
commonMembersAddedSegments++;
}
}
for (Address oldOwner : oldOwners) {
if (commonMembers.contains(oldOwner) && !newOwners.contains(oldOwner)) {
commonMembersRemovedSegments++;
}
}
Address oldPrimary = oldOwners.get(0);
Address newPrimary = newOwners.get(0);
if (!newPrimary.equals(oldPrimary) && newOwners.contains(oldPrimary) && oldOwners.contains(newPrimary)) {
primarySwitchedWithBackup++;
}
}
// When we have both joiners and leavers, leaverSegments may be > commonMembersAddedSegments
int movedSegments = Math.max(0, commonMembersAddedSegments - leaverSegments);
// When nodes with load < numOwners, commonMembersLostSegments is 0 but joinerSegments > 0
int movedSegments2 = Math.max(0, commonMembersRemovedSegments - joinerSegments);
assertEquals(movedSegments, movedSegments2);
int expectedExtraMoves = (int) Math.ceil(allowedExtraMoves(oldCH, newCH, joinerSegments, leaverSegments));
if (movedSegments > expectedExtraMoves / 2) {
log.tracef("%d of %d*%d extra segments moved, %fx of allowed (%d), %d leavers had %d, %d joiners have %d",
movedSegments, numOwners, numSegments, (float) movedSegments / expectedExtraMoves,
expectedExtraMoves, nodesRemoved, leaverSegments, nodesAdded, joinerSegments);
}
if (movedSegments > expectedExtraMoves) {
fail(String.format("Two many moved segments between %s and %s: expected %d, got %d",
oldCH, newCH, expectedExtraMoves, movedSegments));
}
if (primarySwitchedWithBackup > Math.ceil(0.05 * numSegments)) {
log.tracef("Primary owner switched with backup for %d segments of %d", primarySwitchedWithBackup, numSegments);
}
double acceptablePrimarySwitchedWithBackup = Math.ceil(0.5 * (joinerSegments + leaverSegments) / numOwners * numSegments);
if (primarySwitchedWithBackup > acceptablePrimarySwitchedWithBackup) {
fail(String.format("Primary owner switched with backup owner for too many segments: %d of %d", primarySwitchedWithBackup, numSegments));
}
}
public void testNullCapacityFactors() {
ConsistentHashFactory<DefaultConsistentHash> chf = createConsistentHashFactory();
TestAddress A = new TestAddress(0, "A");
TestAddress B = new TestAddress(1, "B");
TestAddress C = new TestAddress(2, "C");
TestAddress D = new TestAddress(3, "D");
Map<Address, Float> cf = new HashMap<>();
cf.put(A, 1f);
cf.put(B, 1f);
cf.put(C, 1f);
cf.put(D, 1f);
DefaultConsistentHash ch1 = chf.create(2, 60, Arrays.asList(A), cf);
DefaultConsistentHash ch1NoCF = chf.create(2, 60, Arrays.asList(A), null);
assertEquals(ch1, ch1NoCF);
DefaultConsistentHash ch2 = chf.updateMembers(ch1, Arrays.asList(A, B), cf);
ch2 = chf.rebalance(ch2);
DefaultConsistentHash ch2NoCF = chf.updateMembers(ch1, Arrays.asList(A, B), null);
ch2NoCF = chf.rebalance(ch2NoCF);
assertEquals(ch2, ch2NoCF);
DefaultConsistentHash ch3 = chf.updateMembers(ch2, Arrays.asList(A, B, C), cf);
ch3 = chf.rebalance(ch3);
DefaultConsistentHash ch3NoCF = chf.updateMembers(ch2, Arrays.asList(A, B, C), null);
ch3NoCF = chf.rebalance(ch3NoCF);
assertEquals(ch3, ch3NoCF);
DefaultConsistentHash ch4 = chf.updateMembers(ch3, Arrays.asList(A, B, C, D), cf);
ch4 = chf.rebalance(ch4);
DefaultConsistentHash ch4NoCF = chf.updateMembers(ch3, Arrays.asList(A, B, C, D), null);
ch4NoCF = chf.rebalance(ch4NoCF);
assertEquals(ch4, ch4NoCF);
}
public void testDifferentCapacityFactors() {
ConsistentHashFactory<DefaultConsistentHash> chf = createConsistentHashFactory();
TestAddress A = new TestAddress(0, "A");
TestAddress B = new TestAddress(1, "B");
TestAddress C = new TestAddress(2, "C");
TestAddress D = new TestAddress(3, "D");
Map<Address, Float> cf = new HashMap<>();
cf.put(A, 1f);
cf.put(B, 1f);
cf.put(C, 1f);
cf.put(D, 100f);
DefaultConsistentHash ch1 = chf.create(2, 60, Arrays.asList(A), cf);
checkDistribution(ch1, cf);
DefaultConsistentHash ch2 = chf.updateMembers(ch1, Arrays.asList(A, B), cf);
ch2 = chf.rebalance(ch2);
checkDistribution(ch2, cf);
DefaultConsistentHash ch3 = chf.updateMembers(ch2, Arrays.asList(A, B, C), cf);
ch3 = chf.rebalance(ch3);
checkDistribution(ch3, cf);
DefaultConsistentHash ch4 = chf.updateMembers(ch3, Arrays.asList(A, B, C, D), cf);
ch4 = chf.rebalance(ch4);
checkDistribution(ch4, cf);
}
}
| 20,294
| 44.709459
| 144
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ch/impl/SyncConsistentHashFactoryTest.java
|
package org.infinispan.distribution.ch.impl;
import static org.infinispan.distribution.ch.impl.SyncConsistentHashFactory.Builder.fudgeExpectedSegments;
import static org.testng.AssertJUnit.assertEquals;
import org.testng.annotations.Test;
/**
* Test the even distribution and number of moved segments after rebalance for {@link SyncConsistentHashFactory}
*
* @author Dan Berindei
* @since 5.2
*/
@Test(groups = "unit", testName = "distribution.ch.SyncConsistentHashFactoryTest")
public class SyncConsistentHashFactoryTest extends DefaultConsistentHashFactoryTest {
@Override
protected SyncConsistentHashFactory createConsistentHashFactory() {
return new SyncConsistentHashFactory();
}
// Disclaimer: These numbers just happen to work with our test addresses, they are by no means guaranteed
// by the SyncConsistentHashFactory algorithm. In theory it could trade stability of segments on join/leave
// in order to guarantee a better distribution, but I haven't done anything in that area yet.
protected float maxOwned(int numSegments, int actualNumOwners, int numNodes, float expectedOwned) {
if (expectedOwned == 0)
return 0;
float averageOwned = 1f * numSegments * actualNumOwners / numNodes;
float maxDiff;
if (expectedOwned >= averageOwned) {
maxDiff = .10f * expectedOwned;
} else {
maxDiff = .10f * (expectedOwned + averageOwned);
}
return expectedOwned + Math.max(maxDiff, 1);
}
protected float minOwned(int numSegments, int actualNumOwners, int numNodes, float expectedOwned) {
if (expectedOwned == 0)
return 0;
float averageOwned = 1f * numSegments * actualNumOwners / numNodes;
float maxDiff;
if (expectedOwned >= averageOwned) {
maxDiff = .15f * expectedOwned;
} else {
maxDiff = .05f * (expectedOwned + averageOwned);
}
return expectedOwned - Math.max(maxDiff, 1);
}
@Override
protected float allowedExtraMoves(DefaultConsistentHash oldCH, DefaultConsistentHash newCH,
int joinerSegments, int leaverSegments) {
int oldSize = nodesWithLoad(oldCH.getMembers(), oldCH.getCapacityFactors());
int newSize = nodesWithLoad(newCH.getMembers(), newCH.getCapacityFactors());
int maxSize = Math.max(oldSize, newSize);
return Math.max(maxSize, 0.15f * newCH.getNumOwners() * newCH.getNumSegments());
}
public void testFudgeExpectedSegments() {
float averageSegments = 10;
assertEquals(0, fudgeExpectedSegments(0.1f, averageSegments, 0));
assertEquals(0, fudgeExpectedSegments(0.1f, averageSegments, 1));
assertEquals(0, fudgeExpectedSegments(0.1f, averageSegments, 2));
assertEquals(0, fudgeExpectedSegments(0.1f, averageSegments, 3));
assertEquals(1, fudgeExpectedSegments(0.1f, averageSegments, 4));
assertEquals(2, fudgeExpectedSegments(0.1f, averageSegments, 5));
assertEquals(0, fudgeExpectedSegments(0.9f, averageSegments, 0));
assertEquals(0, fudgeExpectedSegments(0.9f, averageSegments, 1));
assertEquals(0, fudgeExpectedSegments(0.9f, averageSegments, 2));
assertEquals(1, fudgeExpectedSegments(0.9f, averageSegments, 3));
assertEquals(2, fudgeExpectedSegments(0.9f, averageSegments, 4));
assertEquals(0, fudgeExpectedSegments(1.4f, averageSegments, 0));
assertEquals(0, fudgeExpectedSegments(1.4f, averageSegments, 1));
assertEquals(0, fudgeExpectedSegments(1.4f, averageSegments, 2));
assertEquals(1, fudgeExpectedSegments(1.4f, averageSegments, 3));
assertEquals(2, fudgeExpectedSegments(1.4f, averageSegments, 4));
assertEquals(0, fudgeExpectedSegments(1.6f, averageSegments, 0));
assertEquals(0, fudgeExpectedSegments(1.6f, averageSegments, 1));
assertEquals(1, fudgeExpectedSegments(1.6f, averageSegments, 2));
assertEquals(2, fudgeExpectedSegments(1.6f, averageSegments, 3));
assertEquals(3, fudgeExpectedSegments(1.6f, averageSegments, 4));
assertEquals(1, fudgeExpectedSegments(4.4f, averageSegments, 0));
assertEquals(2, fudgeExpectedSegments(4.4f, averageSegments, 1));
assertEquals(3, fudgeExpectedSegments(4.4f, averageSegments, 2));
assertEquals(4, fudgeExpectedSegments(4.4f, averageSegments, 3));
assertEquals(5, fudgeExpectedSegments(4.4f, averageSegments, 4));
assertEquals(2, fudgeExpectedSegments(4.6f, averageSegments, 0));
assertEquals(3, fudgeExpectedSegments(4.6f, averageSegments, 1));
assertEquals(4, fudgeExpectedSegments(4.6f, averageSegments, 2));
assertEquals(5, fudgeExpectedSegments(4.6f, averageSegments, 3));
assertEquals(6, fudgeExpectedSegments(4.6f, averageSegments, 4));
assertEquals(7, fudgeExpectedSegments(10f, averageSegments, 0));
assertEquals(8, fudgeExpectedSegments(10f, averageSegments, 1));
assertEquals(9, fudgeExpectedSegments(10f, averageSegments, 2));
assertEquals(10, fudgeExpectedSegments(10f, averageSegments, 3));
assertEquals(11, fudgeExpectedSegments(10f, averageSegments, 4));
assertEquals(97, fudgeExpectedSegments(100f, averageSegments, 0));
assertEquals(98, fudgeExpectedSegments(100f, averageSegments, 1));
assertEquals(99, fudgeExpectedSegments(100f, averageSegments, 2));
assertEquals(100, fudgeExpectedSegments(100f, averageSegments, 3));
assertEquals(101, fudgeExpectedSegments(100f, averageSegments, 4));
assertEquals(997, fudgeExpectedSegments(1000f, averageSegments, 0));
assertEquals(998, fudgeExpectedSegments(1000f, averageSegments, 1));
assertEquals(999, fudgeExpectedSegments(1000f, averageSegments, 2));
assertEquals(1000, fudgeExpectedSegments(1000f, averageSegments, 3));
assertEquals(1001, fudgeExpectedSegments(1000f, averageSegments, 4));
}
}
| 5,887
| 48.478992
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/groups/GetGroupKeysTest.java
|
package org.infinispan.distribution.groups;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.Flag;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
import io.reactivex.rxjava3.core.Flowable;
/**
* It tests the grouping advanced interface.
*
* @author Pedro Ruivo
* @since 7.0
*/
@Test(groups = "functional", testName = "distribution.groups.GetGroupKeysTest")
public class GetGroupKeysTest extends BaseUtilGroupTest {
protected static final String PERSISTENCE_CACHE = "persistence-cache";
protected static final String PERSISTENCE_PASSIVATION_CACHE = "persistence-passivation-cache";
protected final boolean transactional;
@Override
public Object[] factory() {
return new Object[]{
new GetGroupKeysTest(false, TestCacheFactory.PRIMARY_OWNER),
new GetGroupKeysTest(false, TestCacheFactory.BACKUP_OWNER),
new GetGroupKeysTest(false, TestCacheFactory.NON_OWNER),
};
}
public GetGroupKeysTest() {
this(false, null);
}
protected GetGroupKeysTest(boolean transactional, TestCacheFactory factory) {
super(factory);
this.transactional = transactional;
}
public void testGetKeysInGroup() {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
assertEquals(expectedGroupSet, groupKeySet);
}
public void testGetKeysInGroupWithPersistence() {
TestCache testCache = createTestCacheAndReset(GROUP, caches(PERSISTENCE_CACHE));
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
assertEquals(expectedGroupSet, groupKeySet);
}
public void testGetKeysInGroupWithPersistenceAndPassivation() {
TestCache testCache = createTestCacheAndReset(GROUP, caches(PERSISTENCE_PASSIVATION_CACHE));
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
assertEquals(expectedGroupSet, groupKeySet);
}
public void testGetKeysInGroupWithPersistenceAndSkipCacheLoader() {
TestCache testCache = createTestCacheAndReset(GROUP, caches(PERSISTENCE_CACHE));
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.withFlags(Flag.SKIP_CACHE_LOAD).getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = new HashMap<>();
//noinspection unchecked
for (InternalCacheEntry<GroupKey, String> entry :
(DataContainer<GroupKey, String>) TestingUtil.extractComponent(extractTargetCache(testCache), InternalDataContainer.class)) {
if (entry.getKey().getGroup().equals(GROUP)) {
expectedGroupSet.put(entry.getKey(), entry.getValue());
}
}
assertEquals(expectedGroupSet, groupKeySet);
}
public void testRemoveGroupKeys() {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
assertEquals(expectedGroupSet, groupKeySet);
testCache.testCache.removeGroup(GROUP);
assertEquals(Collections.emptyMap(), testCache.testCache.getGroup(GROUP));
}
public void testRemoveGroupKeysWithPersistence() {
TestCache testCache = createTestCacheAndReset(GROUP, caches(PERSISTENCE_CACHE));
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
assertEquals(expectedGroupSet, groupKeySet);
testCache.testCache.removeGroup(GROUP);
assertEquals(Collections.emptyMap(), testCache.testCache.getGroup(GROUP));
}
public void testRemoveGroupKeysWithPersistenceAndPassivation() {
TestCache testCache = createTestCacheAndReset(GROUP, caches(PERSISTENCE_PASSIVATION_CACHE));
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
assertEquals(expectedGroupSet, groupKeySet);
testCache.testCache.removeGroup(GROUP);
assertEquals(Collections.emptyMap(), testCache.testCache.getGroup(GROUP));
}
public void testRemoveGroupKeysWithPersistenceAndSkipCacheWriter() {
TestCache testCache = createTestCacheAndReset(GROUP, caches(PERSISTENCE_CACHE));
initCache(testCache.primaryOwner);
Map<GroupKey, String> groupKeySet = testCache.testCache.getGroup(GROUP);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
assertEquals(expectedGroupSet, groupKeySet);
testCache.testCache.withFlags(Flag.SKIP_CACHE_STORE).removeGroup(GROUP);
Map<GroupKey, String> expectedGroupSet2 = new ConcurrentHashMap<>();
Flowable<MarshallableEntry<GroupKey, String>> flowable = Flowable.fromPublisher(
TestingUtil.extractComponent(extractTargetCache(testCache), PersistenceManager.class)
.publishEntries(true, true));
flowable.filter(me -> GROUP.equals(me.getKey().getGroup()))
.blockingForEach(me -> expectedGroupSet2.put(me.getKey(), me.getValue()));
assertEquals(new HashMap<>(expectedGroupSet2), testCache.testCache.getGroup(GROUP));
}
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(3, GroupTestsSCI.INSTANCE, amendConfiguration(createConfigurationBuilder(transactional)));
defineConfigurationOnAllManagers(PERSISTENCE_CACHE,
amendConfiguration(createConfigurationBuilderWithPersistence(transactional, false)));
waitForClusterToForm(PERSISTENCE_CACHE);
defineConfigurationOnAllManagers(PERSISTENCE_PASSIVATION_CACHE,
amendConfiguration(createConfigurationBuilderWithPersistence(transactional, true)));
waitForClusterToForm(PERSISTENCE_PASSIVATION_CACHE);
}
protected ConfigurationBuilder amendConfiguration(ConfigurationBuilder builder) {
return builder;
}
@Override
protected final void resetCaches(List<Cache<BaseUtilGroupTest.GroupKey, String>> cacheList) {
}
private ConfigurationBuilder createConfigurationBuilder(boolean transactional) {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(cacheMode, transactional);
builder.clustering().stateTransfer().fetchInMemoryState(false);
builder.clustering().hash().groups().enabled(true);
return builder;
}
private ConfigurationBuilder createConfigurationBuilderWithPersistence(boolean transactional, boolean passivation) {
ConfigurationBuilder builder = createConfigurationBuilder(transactional);
if (passivation) {
builder.memory().maxCount(2);
}
builder.persistence().passivation(passivation)
.addStore(DummyInMemoryStoreConfigurationBuilder.class);
return builder;
}
}
| 7,936
| 42.371585
| 137
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/groups/TransactionalGetGroupKeysTest.java
|
package org.infinispan.distribution.groups;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Map;
import jakarta.transaction.HeuristicMixedException;
import jakarta.transaction.HeuristicRollbackException;
import jakarta.transaction.NotSupportedException;
import jakarta.transaction.RollbackException;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* It tests the grouping advanced interface for transactional caches.
*
* @author Pedro Ruivo
* @since 7.0
*/
@Test(groups = "functional", testName = "distribution.groups.TransactionalGetGroupKeysTest")
public class TransactionalGetGroupKeysTest extends GetGroupKeysTest {
@Override
public Object[] factory() {
return new Object[]{
new TransactionalGetGroupKeysTest(TestCacheFactory.PRIMARY_OWNER).isolationLevel(IsolationLevel.READ_COMMITTED),
new TransactionalGetGroupKeysTest(TestCacheFactory.BACKUP_OWNER).isolationLevel(IsolationLevel.READ_COMMITTED),
new TransactionalGetGroupKeysTest(TestCacheFactory.NON_OWNER).isolationLevel(IsolationLevel.READ_COMMITTED),
};
}
public TransactionalGetGroupKeysTest() {
this(null);
}
protected TransactionalGetGroupKeysTest(TestCacheFactory factory) {
super(true, factory);
}
public void testGetGroupsInTransaction() throws SystemException, NotSupportedException, HeuristicRollbackException, HeuristicMixedException, RollbackException {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 12);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
testCache.testCache.put(key(10), value(10));
testCache.testCache.put(key(11), value(11));
// make sure that uncommitted value are shown in the transaction
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
tm.commit();
assertEquals(createMap(0, 12), testCache.testCache.getGroup(GROUP));
}
public void testGetGroupsWithConcurrentPut() throws Exception {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 12);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
testCache.testCache.put(key(10), value(10));
testCache.testCache.put(key(11), value(11));
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
Transaction tx = tm.suspend();
testCache.primaryOwner.put(key(12), value(12));
expectedGroupSet.put(key(12), value(12));
tm.resume(tx);
// k12 is committed, should be visible now
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
tm.commit();
// after commit, everything is visible
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
}
public void testGetGroupsWithConcurrentRemove() throws Exception {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 12);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
testCache.testCache.put(key(10), value(10));
testCache.testCache.put(key(11), value(11));
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
Transaction tx = tm.suspend();
testCache.primaryOwner.remove(key(1));
tm.resume(tx);
// previous getGroup() read k1, so the remove is not visible
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
tm.commit();
// after commit, everything is visible
expectedGroupSet.remove(key(1));
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
}
public void testGetGroupsWithConcurrentReplace() throws Exception {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 12);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
testCache.testCache.put(key(10), value(10));
testCache.testCache.put(key(11), value(11));
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
Transaction tx = tm.suspend();
testCache.primaryOwner.put(key(1), value(-1));
if (isolationLevel == IsolationLevel.READ_COMMITTED && factory != TestCacheFactory.NON_OWNER) {
// in ReadCommitted the entries are not wrapped (for read). So the changes are made immediately visible in write owners
// non owners, will use the entry in the context
expectedGroupSet.put(key(1), value(-1));
}
tm.resume(tx);
// cacheStream wraps entries; even with read-committed, we are unable to see entry k1=v-1
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
tm.commit();
// after commit, everything is visible
expectedGroupSet.put(key(1), value(-1));
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
}
@Override
protected ConfigurationBuilder amendConfiguration(ConfigurationBuilder builder) {
super.amendConfiguration(builder);
builder.locking().isolationLevel(isolationLevel);
builder.transaction().recovery().disable();
return builder;
}
}
| 5,740
| 37.019868
| 163
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/groups/GroupsChFunctionalTest.java
|
package org.infinispan.distribution.groups;
import org.infinispan.Cache;
import org.infinispan.distribution.DistSyncFuncTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* @author Pete Muir
* @since 5.0
*/
@Test(groups = "functional", testName = "distribution.groups.GroupsChFunctionalTest")
@CleanupAfterMethod
public class GroupsChFunctionalTest extends DistSyncFuncTest {
public GroupsChFunctionalTest() {
groupers = true;
}
public void testGrouper() throws Throwable {
for (Cache<Object, String> c : caches) assert c.isEmpty();
// Based on the grouping fn which uses computes a group by taking the digit from kX
// and doing a modulo 2 on it we can verify the owners of keys
Assert.assertNotSame(getOwners("k1"), getOwners("k2"));
Assert.assertNotSame(getOwners("k1"), getOwners("k4"));
Assert.assertNotSame(getOwners("k3"), getOwners("k2"));
Assert.assertNotSame(getOwners("k3"), getOwners("k4"));
Assert.assertEquals(getOwners("k1"), getOwners("k3"));
Assert.assertEquals(getOwners("k2"), getOwners("k4"));
}
public void testIntrinsicGrouping() throws Throwable {
for (Cache<Object, String> c : caches) assert c.isEmpty();
GroupedKey k1 = new GroupedKey("groupA", "k1");
GroupedKey k2 = new GroupedKey("groupB", "k2");
GroupedKey k3 = new GroupedKey("groupA", "k3");
GroupedKey k4 = new GroupedKey("groupB", "k4");
Assert.assertNotSame(getOwners(k1), getOwners(k2));
Assert.assertNotSame(getOwners(k1), getOwners(k4));
Assert.assertNotSame(getOwners(k3), getOwners(k2));
Assert.assertNotSame(getOwners(k3), getOwners(k4));
Assert.assertEquals(getOwners(k1), getOwners(k3));
Assert.assertEquals(getOwners(k2), getOwners(k4));
GroupedKey k1A = new GroupedKey("groupA", "k1");
GroupedKey k1B = new GroupedKey("groupB", "k1");
// Check that the same key in different groups is mapped to different nodes (nb this is not something you want to really do!)
Assert.assertNotSame(getOwners(k1A), getOwners(k1B));
}
public void testRehash() throws Throwable {
for (Cache<Object, String> c : caches) assert c.isEmpty();
GroupedKey k1 = new GroupedKey("groupA", "k1");
GroupedKey k2 = new GroupedKey("groupA", "k2");
GroupedKey k3 = new GroupedKey("groupA", "k3");
GroupedKey k4 = new GroupedKey("groupA", "k4");
Assert.assertEquals(getOwners(k1), getOwners(k2));
Assert.assertEquals(getOwners(k1), getOwners(k3));
Assert.assertEquals(getOwners(k1), getOwners(k4));
Cache<Object, String>[] owners1 = getOwners(k1);
Cache<Object, String>[] owners2 = getOwners(k2);
Cache<Object, String>[] owners3 = getOwners(k3);
Cache<Object, String>[] owners4 = getOwners(k4);
final Cache owner = getOwners("groupA")[0];
int ownerIndex = -1;
for (int i = 0; i < caches.size(); i++) {
if (owner == caches.get(i)) {
ownerIndex = i;
break;
}
}
assert ownerIndex != -1;
TestingUtil.killCacheManagers(manager(ownerIndex));
caches.remove(ownerIndex);
cacheManagers.remove(ownerIndex);
TestingUtil.waitForNoRebalance(caches);
Assert.assertNotSame(getOwners(k1), owners1);
Assert.assertNotSame(getOwners(k2), owners2);
Assert.assertNotSame(getOwners(k3), owners3);
Assert.assertNotSame(getOwners(k4), owners4);
Assert.assertEquals(getOwners(k1), getOwners(k2));
Assert.assertEquals(getOwners(k1), getOwners(k3));
Assert.assertEquals(getOwners(k1), getOwners(k4));
}
}
| 3,761
| 35.173077
| 131
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/groups/WriteSkewGetGroupKeysTest.java
|
package org.infinispan.distribution.groups;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import java.util.Map;
import jakarta.transaction.HeuristicMixedException;
import jakarta.transaction.HeuristicRollbackException;
import jakarta.transaction.RollbackException;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.transaction.WriteSkewException;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* It tests the grouping advanced interface for transactional caches with write-skew check enabled.
*
* @author Pedro Ruivo
* @since 7.0
*/
@Test(groups = "functional", testName = "distribution.groups.WriteSkewGetGroupKeysTest")
public class WriteSkewGetGroupKeysTest extends TransactionalGetGroupKeysTest {
@Override
public Object[] factory() {
return new Object[]{
new WriteSkewGetGroupKeysTest(TestCacheFactory.PRIMARY_OWNER),
new WriteSkewGetGroupKeysTest(TestCacheFactory.BACKUP_OWNER),
new WriteSkewGetGroupKeysTest(TestCacheFactory.NON_OWNER),
};
}
public WriteSkewGetGroupKeysTest() {
super(null);
}
public WriteSkewGetGroupKeysTest(TestCacheFactory factory) {
super(factory);
isolationLevel = IsolationLevel.REPEATABLE_READ;
}
public void testRemoveGroupWithConcurrentConflictingUpdate() throws Exception {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
// all keys (and versions) in group stay in context
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
Transaction tx = tm.suspend();
testCache.primaryOwner.put(key(1), value(-1));
tm.resume(tx);
try {
testCache.testCache.removeGroup(GROUP);
expectedGroupSet.clear();
// all keys in group are removed. It is visible inside the transaction
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
// removeGroup() conflicts with put(k1, v-1) and a WriteSkewException is expected!
assertCommitFail(tm);
} catch (WriteSkewException e) {
// On non-owner, the second retrieval of keys within the group will find out that one of the entries
// has different value and will throw WSE
tm.rollback();
}
// transaction rolled back, we should see all keys in group again.
//noinspection ReuseOfLocalVariable
expectedGroupSet = createMap(0, 10);
expectedGroupSet.put(key(1), value(-1));
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
}
public void testRemoveGroupWithConcurrentAdd() throws Exception {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
// all keys (and versions) in group stay in context
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
Transaction tx = tm.suspend();
testCache.primaryOwner.put(key(11), value(11));
tm.resume(tx);
// removeGroup sees k11 and it will be removed
testCache.testCache.removeGroup(GROUP);
expectedGroupSet.clear();
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
assertCommitOk(tm); //no write skew expected!
// no keys in group
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
}
public void testRemoveGroupWithConcurrentConflictingRemove() throws Exception {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
// all keys (and versions) in group stay in context
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
Transaction tx = tm.suspend();
testCache.primaryOwner.remove(key(9));
tm.resume(tx);
testCache.testCache.removeGroup(GROUP);
expectedGroupSet.clear();
// inside the transaction, no keys should be visible
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
assertCommitFail(tm); // write skew expected! 2 transactions removed k9 concurrently
// keys [0,8] not removed
assertEquals(createMap(0, 9), testCache.testCache.getGroup(GROUP));
}
public void testRemoveGroupWithConcurrentRemove() throws Exception {
TestCache testCache = createTestCacheAndReset(GROUP, caches());
initCache(testCache.primaryOwner);
Map<GroupKey, String> expectedGroupSet = createMap(0, 10);
TransactionManager tm = tm(testCache.testCache);
tm.begin();
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
Transaction tx = tm.suspend();
testCache.primaryOwner.put(key(11), value(11));
testCache.primaryOwner.put(key(12), value(12));
testCache.primaryOwner.remove(key(12));
tm.resume(tx);
testCache.testCache.removeGroup(GROUP);
expectedGroupSet.clear();
// everything is removed including the new keys
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
assertCommitOk(tm); //write skew should *not* abort the transaction
// everything is removed
assertEquals(expectedGroupSet, testCache.testCache.getGroup(GROUP));
}
private static void assertCommitFail(TransactionManager tm) throws SystemException {
try {
tm.commit();
fail("Commit should fail!");
} catch (RollbackException | HeuristicMixedException | HeuristicRollbackException e) {
//ignored, it is expected
}
}
private static void assertCommitOk(TransactionManager tm) throws Exception {
tm.commit();
}
}
| 6,239
| 35.705882
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/groups/KXGrouper.java
|
package org.infinispan.distribution.groups;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.infinispan.distribution.group.Grouper;
/**
* A simple grouper which groups String based keys using a pattern for kX keys
* @author Pete Muir
*
*/
public class KXGrouper implements Grouper<String> {
private static Pattern kPattern = Pattern.compile("(^k)(\\d)$");
@Override
public Object computeGroup(String key, Object group) {
Matcher matcher = kPattern.matcher(key);
if (matcher.matches()) {
String g = Integer.parseInt(matcher.group(2)) % 2 + "";
return g;
}
else
return null;
}
@Override
public Class<String> getKeyType() {
return String.class;
}
}
| 788
| 21.542857
| 78
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/groups/BaseUtilGroupTest.java
|
package org.infinispan.distribution.groups;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.group.Group;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
/**
* This class contains some utility methods to the grouping advanced interface tests.
*
* @author Pedro Ruivo
* @since 7.0
*/
public abstract class BaseUtilGroupTest extends MultipleCacheManagersTest {
protected static final String GROUP = "test-group";
protected final TestCacheFactory factory;
protected BaseUtilGroupTest(TestCacheFactory factory) {
this.factory = factory;
this.cacheMode = CacheMode.DIST_SYNC; // default for the transactional tests
}
@Override
protected String parameters() {
String parameters = super.parameters();
if (parameters == null) return "[" + factory + "]";
else return "[" + factory + ", " + parameters.substring(1);
}
protected static GroupKey key(int index) {
return new GroupKey(GROUP, index);
}
protected static String value(int index) {
return "v" + index;
}
protected abstract void resetCaches(List<Cache<GroupKey, String>> cacheList);
protected static boolean isGroupOwner(Cache<?, ?> cache, String groupName) {
return TestingUtil.extractComponent(cache, DistributionManager.class).getCacheTopology().isWriteOwner(groupName);
}
protected static AdvancedCache<GroupKey, String> extractTargetCache(TestCache testCache) {
if (isGroupOwner(testCache.testCache, GROUP)) {
return testCache.testCache;
} else {
//the command will be forwarded to the primary owner.
return testCache.primaryOwner.getAdvancedCache();
}
}
protected static void initCache(Cache<GroupKey, String> cache) {
for (int i = 0; i < 10; ++i) {
cache.put(key(i), value(i));
cache.put(new GroupKey("other-group", i), value(i));
}
}
protected static Map<GroupKey, String> createMap(int from, int to) {
Map<GroupKey, String> map = new HashMap<>();
for (int i = from; i < to; ++i) {
map.put(key(i), value(i));
}
return map;
}
protected final TestCache createTestCacheAndReset(String groupName, List<Cache<GroupKey, String>> cacheList) {
resetCaches(cacheList);
return factory.create(groupName, cacheList);
}
public enum TestCacheFactory {
PRIMARY_OWNER {
@Override
public TestCache create(String groupName, List<Cache<GroupKey, String>> cacheList) {
for (Cache<GroupKey, String> cache : cacheList) {
DistributionManager distributionManager = TestingUtil.extractComponent(cache, DistributionManager.class);
DistributionInfo distributionInfo = distributionManager.getCacheTopology().getDistribution(groupName);
if (distributionInfo.isPrimary()) {
return new TestCache(cache, cache.getAdvancedCache());
}
}
throw new IllegalStateException("didn't find a cache... should never happen!");
}
},
BACKUP_OWNER {
@Override
public TestCache create(String groupName, List<Cache<GroupKey, String>> cacheList) {
Cache<GroupKey, String> primaryOwner = null;
AdvancedCache<GroupKey, String> backupOwner = null;
for (Cache<GroupKey, String> cache : cacheList) {
DistributionManager distributionManager = TestingUtil.extractComponent(cache, DistributionManager.class);
DistributionInfo distributionInfo = distributionManager.getCacheTopology().getDistribution(groupName);
if (primaryOwner == null && distributionInfo.isPrimary()) {
primaryOwner = cache;
} else if (backupOwner == null && distributionInfo.isWriteOwner()) {
backupOwner = cache.getAdvancedCache();
}
if (primaryOwner != null && backupOwner != null) {
return new TestCache(primaryOwner, backupOwner);
}
}
throw new IllegalStateException("didn't find a cache... should never happen!");
}
},
NON_OWNER {
@Override
public TestCache create(String groupName, List<Cache<GroupKey, String>> cacheList) {
Cache<GroupKey, String> primaryOwner = null;
AdvancedCache<GroupKey, String> nonOwner = null;
for (Cache<GroupKey, String> cache : cacheList) {
DistributionManager distributionManager = TestingUtil.extractComponent(cache, DistributionManager.class);
DistributionInfo distributionInfo = distributionManager.getCacheTopology().getDistribution(groupName);
if (primaryOwner == null && distributionInfo.isPrimary()) {
primaryOwner = cache;
} else if (nonOwner == null && !distributionInfo.isWriteOwner()) {
nonOwner = cache.getAdvancedCache();
}
if (primaryOwner != null && nonOwner != null) {
return new TestCache(primaryOwner, nonOwner);
}
}
throw new IllegalStateException("didn't find a cache... should never happen!");
}
};
public abstract TestCache create(String groupName, List<Cache<GroupKey, String>> cacheList);
}
public static class GroupKey {
@ProtoField(1)
final String group;
@ProtoField(number = 2, defaultValue = "0")
final int key;
@ProtoFactory
GroupKey(String group, int key) {
this.group = group;
this.key = key;
}
@Group
public String getGroup() {
return group;
}
public int getKey() {
return key;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
GroupKey groupKey = (GroupKey) o;
return key == groupKey.key && group.equals(groupKey.group);
}
@Override
public int hashCode() {
int result = group.hashCode();
result = 31 * result + key;
return result;
}
@Override
public String toString() {
return "GroupKey{" +
"group='" + group + '\'' +
", key=" + key +
'}';
}
}
public static class TestCache {
public final Cache<GroupKey, String> primaryOwner;
public final AdvancedCache<GroupKey, String> testCache;
public TestCache(Cache<GroupKey, String> primaryOwner, AdvancedCache<GroupKey, String> testCache) {
this.primaryOwner = primaryOwner;
this.testCache = testCache;
}
}
@AutoProtoSchemaBuilder(
includeClasses = {
GroupKey.class,
CacheMode.class,
},
schemaFileName = "test.core.GroupTestsSCI.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.GroupTestsSCI",
service = false
)
interface GroupTestsSCI extends SerializationContextInitializer {
GroupTestsSCI INSTANCE = new GroupTestsSCIImpl();
}
}
| 7,805
| 34.97235
| 120
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/groups/GroupedKey.java
|
package org.infinispan.distribution.groups;
import org.infinispan.distribution.group.Group;
public class GroupedKey {
private final String group;
private final String key;
public GroupedKey(String group, String key) {
this.group = group;
this.key = key;
}
@Group
public String getGroup() {
return group;
}
@Override
public int hashCode() {
return key.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj instanceof GroupedKey)
return ((GroupedKey) obj).key.equals(this.key);
else
return false;
}
}
| 645
| 18
| 59
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/profiling/ProfileTest.java
|
package org.infinispan.profiling;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.infinispan.profiling.testinternals.Generator;
import org.infinispan.profiling.testinternals.TaskRunner;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* Test to use with a profiler to profile replication. To be used in conjunction with ProfileSlaveTest.
* <p/>
* Typical usage pattern:
* <p/>
* 1. Start a single test method in ProfileSlaveTest. This will block until you kill it. 2. Start the corresponding
* test in this class, with the same name, in a different JVM, and attached to a profiler. 3. Profile away!
* <p/>
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
*/
@Test(groups = "profiling", testName = "profiling.ProfileTest")
public class ProfileTest extends AbstractProfileTest {
/*
Test configuration flags
*/
protected static long NUM_OPERATIONS = 1000000; // DURATION is replaced with a fixed number of operations instead.
protected static final int NUM_THREADS = 25;
protected static final int MAX_RANDOM_SLEEP_MILLIS = 1;
protected static final int MAX_OVERALL_KEYS = 2000;
protected static final int WARMUP_LOOPS = 20000;
protected static final boolean USE_SLEEP = false; // throttle generation a bit
protected static final boolean SKIP_WARMUP = true;
private List<Object> keys = new ArrayList<Object>(MAX_OVERALL_KEYS);
protected static boolean USE_TRANSACTIONS = false;
public static void main(String[] args) throws Exception {
ProfileTest pst = new ProfileTest();
pst.startedInCmdLine = true;
String mode = args[0];
if (args.length > 1) USE_TRANSACTIONS = Boolean.parseBoolean(args[1]);
try {
if (args.length > 1) pst.clusterNameOverride = args[1];
pst.testWith(mode);
} finally {
pst.destroyAfterMethod();
pst.destroyAfterClass();
}
}
protected void testWith(String cacheName) throws Exception {
log.warnf("Starting profile test, cache name = %s", cacheName);
initTest();
cache = cacheManager.getCache(cacheName);
runCompleteTest(cacheName);
}
public void testLocalMode() throws Exception {
runCompleteTest(LOCAL_CACHE_NAME);
}
public void testReplMode() throws Exception {
runCompleteTest(REPL_SYNC_CACHE_NAME);
}
private void runCompleteTest(String cacheName) throws Exception {
cache = cacheManager.getCache(cacheName);
init();
startup();
if (!cacheName.equals(LOCAL_CACHE_NAME)) {
System.out.println("Waiting for members to join.");
TestingUtil.blockUntilViewReceived(cache, 2, 120000, true);
System.out.println("Cluster ready, cache mode is " + cache.getCacheConfiguration().clustering().cacheMode());
}
warmup();
doTest();
}
/**
* Thr following test phases can be profiled individually using triggers in JProfiler.
*/
protected void init() {
long startTime = System.currentTimeMillis();
log.warn("Starting init() phase");
keys.clear();
for (int i = 0; i < MAX_OVERALL_KEYS; i++) {
Object key;
do {
key = Generator.createRandomKey();
}
while (keys.contains(key));
if (i % 10 == 0) {
log.trace("Generated " + i + " keys");
}
keys.add(key);
}
System.gc();
long duration = System.currentTimeMillis() - startTime;
log.warn("Finished init() phase. " + printDuration(duration));
}
protected void startup() {
long startTime = System.currentTimeMillis();
log.warn("Starting cache");
cache.start();
long duration = System.currentTimeMillis() - startTime;
log.warn("Started cache. " + printDuration(duration));
}
private void warmup() throws InterruptedException {
if (SKIP_WARMUP) {
log.info("Skipping warmup; sleeping 3 secs");
TestingUtil.sleepThread(3000);
return;
}
long startTime = System.currentTimeMillis();
TaskRunner exec = new TaskRunner(NUM_THREADS, true);
log.warn("Starting warmup");
for (final Object key : keys) {
exec.execute(new Runnable() {
@Override
public void run() {
// this will create the necessary entries.
cache.put(key, Collections.emptyMap());
}
});
}
// loop through WARMUP_LOOPS gets and puts for JVM optimisation
for (int i = 0; i < WARMUP_LOOPS; i++) {
exec.execute(new Runnable() {
@Override
public void run() {
Object key = Generator.getRandomElement(keys);
cache.get(key);
cache.put(key, "Value");
cache.remove(key);
}
});
}
exec.stop();
long duration = System.currentTimeMillis() - startTime;
log.warn("Finished warmup. " + printDuration(duration));
cache.stop();
startup();
}
private void doTest() throws Exception {
TaskRunner exec = new TaskRunner(NUM_THREADS);
log.warn("Starting test");
int i;
long print = NUM_OPERATIONS / 10;
AtomicLong durationPuts = new AtomicLong();
AtomicLong durationGets = new AtomicLong();
AtomicLong durationRemoves = new AtomicLong();
long stElapsed = System.nanoTime();
for (i = 0; i < NUM_OPERATIONS; i++) {
MyRunnable r = null;
switch (i % 3) {
case 0:
r = new Putter(i, durationPuts);
break;
case 1:
r = new Getter(i, durationGets);
break;
case 2:
r = new Remover(i, durationRemoves);
break;
}
if (i % print == 0)
log.warn("processing iteration " + i);
exec.execute(r);
// if (USE_SLEEP) TestingUtil.sleepRandom(MAX_RANDOM_SLEEP_MILLIS);
if (USE_SLEEP) TestingUtil.sleepThread(MAX_RANDOM_SLEEP_MILLIS);
}
log.warn("Finished generating runnables; awaiting executor completion");
// wait for executors to complete!
exec.stop();
// wait up to 1 sec for each call?
long elapsedTimeNanos = System.nanoTime() - stElapsed;
log.warn("Finished test. " + printDuration((long) toMillis(elapsedTimeNanos)));
log.warn("Throughput: " + ((double) NUM_OPERATIONS * 1000 / toMillis(elapsedTimeNanos)) + " operations per second (roughly equal numbers of PUT, GET and REMOVE)");
log.warn("Average GET time: " + printAvg(durationGets.get()));
log.warn("Average PUT time: " + printAvg(durationPuts.get()));
log.warn("Average REMOVE time: " + printAvg(durationRemoves.get()));
}
private String printAvg(long totalNanos) {
double nOps = NUM_OPERATIONS / 3;
double avg = (totalNanos) / nOps;
double avgMicros = avg / 1000;
return avgMicros + " µs";
}
private double toMillis(long nanos) {
return ((double) nanos / (double) 1000000);
}
enum Mode {
PUT, GET, REMOVE
}
private abstract class MyRunnable implements Runnable {
int id;
Mode mode;
AtomicLong duration;
@Override
public void run() {
try {
Object key = Generator.getRandomElement(keys);
long d = 0, st = 0;
switch (mode) {
case PUT:
Object value = Generator.getRandomString();
st = System.nanoTime();
if (USE_TRANSACTIONS) TestingUtil.getTransactionManager(cache).begin();
cache.put(key, value);
if (USE_TRANSACTIONS) TestingUtil.getTransactionManager(cache).commit();
d = System.nanoTime() - st;
break;
case GET:
st = System.nanoTime();
if (USE_TRANSACTIONS) TestingUtil.getTransactionManager(cache).begin();
cache.get(key);
if (USE_TRANSACTIONS) TestingUtil.getTransactionManager(cache).commit();
d = System.nanoTime() - st;
break;
case REMOVE:
st = System.nanoTime();
if (USE_TRANSACTIONS) TestingUtil.getTransactionManager(cache).begin();
cache.remove(key);
if (USE_TRANSACTIONS) TestingUtil.getTransactionManager(cache).commit();
d = System.nanoTime() - st;
break;
}
duration.getAndAdd(d);
} catch (Exception e) {
log.error("Caught ", e);
}
}
}
private class Putter extends MyRunnable {
private Putter(int id, AtomicLong duration) {
this.id = id;
this.duration = duration;
mode = Mode.PUT;
}
}
private class Getter extends MyRunnable {
private Getter(int id, AtomicLong duration) {
this.id = id;
this.duration = duration;
mode = Mode.GET;
}
}
private class Remover extends MyRunnable {
private Remover(int id, AtomicLong duration) {
this.id = id;
this.duration = duration;
mode = Mode.REMOVE;
}
}
protected String printDuration(long duration) {
if (duration > 2000) {
double dSecs = ((double) duration / (double) 1000);
return "Duration: " + dSecs + " seconds";
} else {
return "Duration: " + duration + " millis";
}
}
}
| 9,701
| 32.112628
| 169
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/profiling/TestProfileSlave.java
|
package org.infinispan.profiling;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged;
import org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "profiling", testName = "profiling.TestProfileSlave")
public class TestProfileSlave extends AbstractProfileTest {
public static void main(String[] args) throws Exception {
TestProfileSlave pst = new TestProfileSlave();
pst.startedInCmdLine = true;
String mode = args[0];
try {
pst.testWith(mode);
} finally {
pst.destroyAfterMethod();
pst.destroyAfterClass();
}
}
public void testReplSync() throws Exception {
testWith(REPL_SYNC_CACHE_NAME);
}
public void testReplAsync() throws Exception {
testWith(REPL_ASYNC_CACHE_NAME);
}
public void testDistSync() throws Exception {
testWith(DIST_SYNC_CACHE_NAME);
}
public void testDistAsync() throws Exception {
testWith(DIST_ASYNC_CACHE_NAME);
}
public void testDistSyncL1() throws Exception {
testWith(DIST_SYNC_L1_CACHE_NAME);
}
public void testDistAsyncL1() throws Exception {
testWith(DIST_ASYNC_L1_CACHE_NAME);
}
private void waitForTest() throws Exception {
Thread t = new Thread("CompletionThread") {
@Override
public void run() {
try {
while (true) Thread.sleep(10000);
} catch (Exception e) {
}
}
};
// attach a view change listener
cacheManager.addListener(new ShutdownHook(t));
t.setDaemon(true);
t.start();
try {
t.join();
} catch (InterruptedException ie) {
// move on...
}
}
private void doTest() {
// trigger for JProfiler
}
protected void testWith(String cachename) throws Exception {
log.warnf("Starting slave, cache name = %s", cachename);
initTest();
cache = cacheManager.getCache(cachename);
System.out.println("Waiting for members to join.");
TestingUtil.blockUntilViewReceived(cache, 2, 120000, true);
System.out.println("Cluster ready, cache mode is " + cache.getCacheConfiguration().clustering().cacheMode());
System.out.println("Waiting for test completion. Hit CTRL-C when done.");
doTest();
waitForTest();
}
@Listener
public static final class ShutdownHook {
final Thread completionThread;
public ShutdownHook(Thread completionThread) {
this.completionThread = completionThread;
}
@ViewChanged
public void viewChanged(ViewChangedEvent vce) {
System.out.println("Saw view change event " + vce);
// if the new view ONLY contains me, die!
if (vce.getOldMembers().size() > vce.getNewMembers().size() && vce.getNewMembers().size() == 1 && vce.getNewMembers().contains(vce.getLocalAddress())) {
completionThread.interrupt();
}
}
}
}
| 3,119
| 28.714286
| 161
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/profiling/MemConsumptionTest.java
|
package org.infinispan.profiling;
import java.io.IOException;
import java.text.NumberFormat;
import java.util.Arrays;
import java.util.Random;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
@Test(groups = "profiling", testName = "profiling.MemConsumptionTest")
public class MemConsumptionTest extends AbstractInfinispanTest {
// adjust the next 4 values
int numEntries = 1000000;
int payloadSize = 60; // bytes
int keySize = 10; // bytes
PayloadType payloadType = PayloadType.STRINGS;
enum PayloadType {
STRINGS, BYTE_ARRAYS
}
int bytesPerCharacter = 2;
Random r = new Random();
public void testMemConsumption() throws IOException {
int kBytesCached = (bytesPerCharacter * numEntries * (payloadSize + keySize)) / 1024;
System.out.println("Bytes to be cached: " + NumberFormat.getIntegerInstance().format(kBytesCached) + " kb");
Cache c = TestCacheManagerFactory.createCacheManager().getCache();
for (int i = 0; i < numEntries; i++) {
switch (payloadType) {
case STRINGS:
c.put(generateUniqueString(i, keySize), generateRandomString(payloadSize));
break;
case BYTE_ARRAYS:
c.put(generateUniqueKey(i, keySize), generateBytePayload(payloadSize));
break;
default:
throw new CacheException("Unknown payload type");
}
if (i % 1000 == 0) System.out.println("Added " + i + " entries");
}
System.out.println("Calling System.gc()");
System.gc(); // clear any unnecessary objects
TestingUtil.sleepThread(1000); // wait for gc
// wait for manual test exit
System.out.println("Cache populated; check mem usage using jconsole, etc.!");
System.in.read();
}
private String generateUniqueString(int runNumber, int keySize) {
// string size should be exactly equal to key size but also be unique.
// start by creating a string from the run number
StringBuilder sb = new StringBuilder();
// append the run number
sb.append(runNumber);
for (int i = sb.length(); i < keySize; i++) sb.append("_");
return sb.toString();
}
private byte[] generateUniqueKey(int runNumber, int keySize) {
byte[] b = new byte[keySize];
b[0] = (byte) (runNumber);
b[1] = (byte) (runNumber >>> 8);
b[2] = (byte) (runNumber >>> 16);
b[3] = (byte) (runNumber >>> 24);
for (int i = 4; i < keySize; i++) b[i] = 0;
return b;
}
private byte[] generateBytePayload(int payloadSize) {
byte[] b = new byte[payloadSize];
Arrays.fill(b, (byte) 0);
return b;
}
private String generateRandomString(int stringSize) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < stringSize; i++) {
sb.append(r.nextInt(9)); // single digit
}
assert sb.length() == stringSize;
return sb.toString();
}
}
| 3,177
| 31.428571
| 114
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/profiling/AbstractProfileTest.java
|
package org.infinispan.profiling;
import static org.infinispan.configuration.cache.CacheMode.DIST_ASYNC;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.configuration.cache.CacheMode.REPL_ASYNC;
import static org.infinispan.configuration.cache.CacheMode.REPL_SYNC;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadFactory;
import org.infinispan.commons.executors.ThreadPoolExecutorFactory;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.lookup.JBossStandaloneJTAManagerLookup;
import org.infinispan.util.concurrent.WithinThreadExecutor;
import org.testng.annotations.Test;
@Test(groups = "profiling", testName = "profiling.AbstractProfileTest")
public abstract class AbstractProfileTest extends SingleCacheManagerTest {
protected static final String LOCAL_CACHE_NAME = "local";
protected static final String REPL_SYNC_CACHE_NAME = "repl_sync";
protected static final String REPL_ASYNC_CACHE_NAME = "repl_async";
protected static final String DIST_SYNC_L1_CACHE_NAME = "dist_sync_l1";
protected static final String DIST_ASYNC_L1_CACHE_NAME = "dist_async_l1";
protected static final String DIST_SYNC_CACHE_NAME = "dist_sync";
protected static final String DIST_ASYNC_CACHE_NAME = "dist_async";
boolean startedInCmdLine = false;
String clusterNameOverride = null;
protected void initTest() throws Exception {
System.out.println("Setting up test params!");
if (startedInCmdLine) cacheManager = createCacheManager();
}
private ConfigurationBuilder getBaseCfg() {
ConfigurationBuilder cfg = new ConfigurationBuilder();
cfg.locking().concurrencyLevel(5000).transaction().transactionManagerLookup(new JBossStandaloneJTAManagerLookup());
return cfg;
}
private ConfigurationBuilder getClusteredCfg(CacheMode mode, boolean l1) {
ConfigurationBuilder cfg = getBaseCfg();
cfg
.locking().lockAcquisitionTimeout(60000)
.clustering().cacheMode(mode).remoteTimeout(60000).stateTransfer().fetchInMemoryState(false);
if (mode.isDistributed()) {
cfg.clustering().l1().enabled(l1).lifespan(120000);
}
return cfg;
}
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
GlobalConfigurationBuilder builder = new GlobalConfigurationBuilder();
builder.transport().transportThreadPool().threadPoolFactory(new WithinThreadExecutorFactory());
cacheManager = TestCacheManagerFactory.createClusteredCacheManager(builder, new ConfigurationBuilder());
cacheManager.defineConfiguration(LOCAL_CACHE_NAME, getBaseCfg().build());
cacheManager.defineConfiguration(REPL_SYNC_CACHE_NAME, getClusteredCfg(REPL_SYNC, false).build());
cacheManager.defineConfiguration(REPL_ASYNC_CACHE_NAME, getClusteredCfg(REPL_ASYNC, false).build());
cacheManager.defineConfiguration(DIST_SYNC_CACHE_NAME, getClusteredCfg(DIST_SYNC, false).build());
cacheManager.defineConfiguration(DIST_ASYNC_CACHE_NAME, getClusteredCfg(DIST_ASYNC, false).build());
cacheManager.defineConfiguration(DIST_SYNC_L1_CACHE_NAME, getClusteredCfg(DIST_SYNC, true).build());
cacheManager.defineConfiguration(DIST_ASYNC_L1_CACHE_NAME, getClusteredCfg(DIST_ASYNC, true).build());
return cacheManager;
}
public static class WithinThreadExecutorFactory implements ThreadPoolExecutorFactory {
@Override
public ExecutorService createExecutor(ThreadFactory factory) {
return new WithinThreadExecutor();
}
@Override
public void validate() {
// No-op
}
}
}
| 3,993
| 43.876404
| 121
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/profiling/CacheCreationStressTest.java
|
package org.infinispan.profiling;
import static org.infinispan.test.TestingUtil.withCacheManager;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.CacheManagerCallable;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Test class that verifies how quickly Cache instances are created under different scenarios
*
* @author Galder Zamarreño
* @since 4.2
*/
@Test(groups = "profiling", testName = "profiling.CacheCreationStressTest")
public class CacheCreationStressTest extends AbstractInfinispanTest {
public void testCreateCachesFromSameContainer() {
final long start = System.currentTimeMillis();
withCacheManager(new CacheManagerCallable(TestCacheManagerFactory.createCacheManager()) {
@Override
public void call() {
for (int i = 0; i < 1000; i++) {
cm.getCache(generateRandomString(20));
}
System.out.println("Took: " + TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis() - start));
TestingUtil.sleepThread(2000);
}
});
}
public static String generateRandomString(int numberOfChars) {
Random r = new Random(System.currentTimeMillis());
StringBuilder sb = new StringBuilder();
for (int i = 0; i < numberOfChars; i++) sb.append((char) (64 + r.nextInt(26)));
return sb.toString();
}
}
| 1,528
| 32.977778
| 111
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/profiling/testinternals/TaskRunner.java
|
package org.infinispan.profiling.testinternals;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Essentially a delegate to an ExecutorService, but a special one that is only used by perf tests so it can be ignored
* when profiling.
*/
public class TaskRunner {
ExecutorService exec;
public TaskRunner(int numThreads) {
this(numThreads, false);
}
public TaskRunner(int numThreads, final boolean warmup) {
final AtomicInteger counter = new AtomicInteger(0);
final ThreadGroup tg = new ThreadGroup(Thread.currentThread().getThreadGroup(), warmup ? "WarmupLoadGenerators" : "LoadGenerators");
this.exec = Executors.newFixedThreadPool(numThreads, new ThreadFactory() {
public Thread newThread(Runnable r) {
return new Thread(tg, r, (warmup ? "WarmupLoadGenerator-" : "LoadGenerator-") + counter.incrementAndGet());
}
});
}
public void execute(Runnable r) {
exec.execute(r);
}
public void stop() throws InterruptedException {
exec.shutdown();
while (!exec.awaitTermination(30, TimeUnit.SECONDS)) Thread.sleep(30);
}
}
| 1,298
| 32.307692
| 138
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/profiling/testinternals/Generator.java
|
package org.infinispan.profiling.testinternals;
import java.util.List;
import java.util.Random;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.jgroups.util.UUID;
public class Generator {
private static final Random r = new Random();
public static String getRandomString() {
return getRandomString(10);
}
public static String getRandomString(int maxKeySize) {
StringBuilder sb = new StringBuilder();
int len = r.nextInt(maxKeySize) + 1;
for (int i = 0; i < len; i++) {
sb.append((char) ('A' + r.nextInt(26)));
}
return sb.toString();
}
public static <T> T getRandomElement(List<T> list) {
return list.get(r.nextInt(list.size()));
}
public static Object createRandomKey() {
return Integer.toHexString(r.nextInt(Integer.MAX_VALUE));
}
public static byte[] getRandomByteArray(int maxByteArraySize) {
int sz = r.nextInt(maxByteArraySize);
byte[] b = new byte[sz];
for (int i=0; i<sz; i++) b[i] = (byte) r.nextInt(Byte.MAX_VALUE);
return b;
}
public static Address generateAddress() {
return new JGroupsAddress(UUID.randomUUID());
}
}
| 1,244
| 26.065217
| 71
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/commons/marshall/PojoWithSerializeWith.java
|
package org.infinispan.commons.marshall;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
/**
* A test pojo that is marshalled using Infinispan's
* {@link org.infinispan.commons.marshall.Externalizer} which is annotated with
* {@link SerializeWith}
*
* @author Galder Zamarreño
* @since 5.0
*/
@SerializeWith(PojoWithSerializeWith.Externalizer.class)
public class PojoWithSerializeWith {
final PojoWithAttributes pojo;
public PojoWithSerializeWith(int age, String key) {
this.pojo = new PojoWithAttributes(age, key);
}
public PojoWithSerializeWith(PojoWithAttributes pojo) {
this.pojo = pojo;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PojoWithSerializeWith that = (PojoWithSerializeWith) o;
return !(pojo != null ? !pojo.equals(that.pojo) : that.pojo != null);
}
@Override
public int hashCode() {
return pojo != null ? pojo.hashCode() : 0;
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<PojoWithSerializeWith>, Serializable {
@Override
public void writeObject(ObjectOutput output, PojoWithSerializeWith object) throws IOException {
PojoWithAttributes.writeObject(output, object.pojo);
}
@Override
public PojoWithSerializeWith readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new PojoWithSerializeWith(PojoWithAttributes.readObject(input));
}
}
}
| 1,634
| 27.189655
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/commons/marshall/AdaptiveBufferSizePredictorTest.java
|
package org.infinispan.commons.marshall;
import org.testng.annotations.Test;
/**
* Tests that the adaptive buffer size predictor adjusts sizes
* in different circumstances.
*
* @author Galder Zamarreño
* @since 5.0
*/
@Test(groups = "functional", testName = "marshall.AdaptiveBufferSizePredictorTest")
public class AdaptiveBufferSizePredictorTest {
public void testAdaptivenesOfBufferSizeChanges() throws Exception {
AdaptiveBufferSizePredictor predictor = new AdaptiveBufferSizePredictor();
int size = 32;
int nextSize;
int prevNextSize = AdaptiveBufferSizePredictor.DEFAULT_INITIAL;
for (int i = 0; i < 100; i++) {
predictor.recordSize(size);
nextSize = predictor.nextSize(null);
if (i % 2 != 0) {
if ((nextSize * 0.88) < size)
break;
else {
assert nextSize < prevNextSize;
prevNextSize = nextSize;
}
}
}
size = 32768;
for (int i = 0; i < 100; i++) {
predictor.recordSize(size);
nextSize = predictor.nextSize(null);
if ((nextSize * 0.89) > size) {
break;
} else {
assert nextSize > prevNextSize;
prevNextSize = nextSize;
}
}
}
}
| 1,301
| 26.125
| 83
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/commons/marshall/PojoWithAttributes.java
|
package org.infinispan.commons.marshall;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.UUID;
import org.infinispan.commons.util.Util;
import org.infinispan.test.data.Key;
/**
* A test pojo with references to variables that are marshalled in different
* ways, including: primitives, objects that are marshalled with internal
* externalizers, objects that are {@link java.io.Externalizable} and objects
* that are {@link java.io.Serializable}
*
* @author Galder Zamarreño
* @since 5.0
*/
public class PojoWithAttributes {
final int age;
final Key key;
final UUID uuid;
public PojoWithAttributes(int age, String key) {
this.age = age;
this.key = new Key(key);
this.uuid = Util.threadLocalRandomUUID();
}
PojoWithAttributes(int age, Key key, UUID uuid) {
this.age = age;
this.key = key;
this.uuid = uuid;
}
public static void writeObject(ObjectOutput output, PojoWithAttributes pojo) throws IOException {
output.writeInt(pojo.age);
output.writeObject(pojo.key);
output.writeObject(pojo.uuid);
}
public static PojoWithAttributes readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int age = input.readInt();
Key key = (Key) input.readObject();
UUID uuid = (UUID) input.readObject();
return new PojoWithAttributes(age, key, uuid);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PojoWithAttributes that = (PojoWithAttributes) o;
if (age != that.age) return false;
if (key != null ? !key.equals(that.key) : that.key != null) return false;
if (uuid != null ? !uuid.equals(that.uuid) : that.uuid != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = age;
result = 31 * result + (key != null ? key.hashCode() : 0);
result = 31 * result + (uuid != null ? uuid.hashCode() : 0);
return result;
}
}
| 2,104
| 27.835616
| 110
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentStressTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* Verifies the atomic semantic of Infinispan's implementations of java.util.concurrent.ConcurrentMap'
* conditional operations.
*
* @author Sanne Grinovero <sanne@infinispan.org> (C) 2012 Red Hat Inc.
* @author William Burns
* @see java.util.concurrent.ConcurrentMap#replace(Object, Object, Object)
* @since 7.0
*/
@Test(groups = "stress", testName = "api.ConditionalOperationsConcurrentStressTest", timeOut = 15*60*1000, invocationCount = 1000)
public class ConditionalOperationsConcurrentStressTest extends ConditionalOperationsConcurrentTest {
@Override
public Object[] factory() {
return new Object[] {
new ConditionalOperationsConcurrentStressTest().cacheMode(CacheMode.DIST_SYNC),
};
}
public ConditionalOperationsConcurrentStressTest() {
super(3, 500, 4);
}
@Override
public void testReplace() throws Exception {
super.testReplace();
}
@Override
public void testConditionalRemove() throws Exception {
super.testConditionalRemove();
}
@Override
public void testPutIfAbsent() throws Exception {
super.testPutIfAbsent();
}
}
| 1,260
| 28.325581
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/CacheAPITest.java
|
package org.infinispan.api;
import static org.infinispan.test.TestingUtil.v;
import static org.infinispan.test.TestingUtil.withTx;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import jakarta.transaction.TransactionManager;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* Tests the {@link org.infinispan.Cache} public API at a high level
*
* @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
*/
@Test(groups = "functional")
public abstract class CacheAPITest extends APINonTxTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
// start a single cache instance
ConfigurationBuilder cb = getDefaultStandaloneCacheConfig(true);
cb.locking().isolationLevel(getIsolationLevel());
addEviction(cb);
amend(cb);
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(false);
cm.defineConfiguration("test", cb.build());
cache = cm.getCache("test");
return cm;
}
protected void amend(ConfigurationBuilder cb) {
}
protected abstract IsolationLevel getIsolationLevel();
protected ConfigurationBuilder addEviction(ConfigurationBuilder cb) {
return cb;
}
/**
* Tests that the configuration contains the values expected, as well as immutability of certain elements
*/
public void testConfiguration() {
Configuration c = cache.getCacheConfiguration();
assertEquals(CacheMode.LOCAL, c.clustering().cacheMode());
assertNotNull(c.transaction().transactionManagerLookup());
}
public void testGetMembersInLocalMode() {
assertNull("Cache members should be null if running in LOCAL mode", manager(cache).getAddress());
}
public void testRollbackAfterOverwrite() throws Exception {
String key = "key", value = "value", value2 = "value2";
int size = 1;
cache.put(key, value);
assertEquals(value, cache.get(key));
assertEquals(size, cache.size());
assertEquals(size, cache.keySet().size());
assertEquals(size, cache.values().size());
assertEquals(size, cache.entrySet().size());
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
TransactionManager tm = TestingUtil.getTransactionManager(cache);
TestingUtil.withTx(tm, () -> {
assertEquals(value, cache.put(key, value2));
assertEquals(value2, cache.get(key));
assertEquals(size, cache.size());
assertEquals(size, cache.keySet().size());
assertEquals(size, cache.values().size());
assertEquals(size, cache.entrySet().size());
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value2));
assertFalse(cache.values().contains(value));
tm.setRollbackOnly();
return null;
});
assertEquals(value, cache.get(key));
assertEquals(size, cache.size());
assertEquals(size, cache.keySet().size());
assertEquals(size, cache.values().size());
assertEquals(size, cache.entrySet().size());
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
}
public void testRollbackAfterRemove() throws Exception {
String key = "key", value = "value";
cache.put(key, value);
assertEquals(value, cache.get(key));
int size = 1;
assertEquals(size, cache.size());
assertEquals(size, cache.keySet().size());
assertEquals(size, cache.values().size());
assertEquals(size, cache.entrySet().size());
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
assertEquals(value, cache.remove(key));
assertNull(value, cache.get(key));
int tmSize = 0;
assertEquals(tmSize, cache.size());
assertEquals(tmSize, cache.keySet().size());
assertEquals(tmSize, cache.values().size());
assertEquals(tmSize, cache.entrySet().size());
assertFalse(cache.keySet().contains(key));
assertFalse(cache.values().contains(value));
tm.setRollbackOnly();
return false;
});
assertEquals(value, cache.get(key));
size = 1;
assertEquals(size, cache.size());
assertEquals(size, cache.keySet().size());
assertEquals(size, cache.values().size());
assertEquals(size, cache.entrySet().size());
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
}
public void testEntrySetEqualityInTx(Method m) throws Exception {
Map<Object, Object> dataIn = new HashMap<>();
dataIn.put(1, v(m, 1));
dataIn.put(2, v(m, 2));
cache.putAll(dataIn);
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
withTx(tm, () -> {
Map<Integer, String> txDataIn = new HashMap<>();
txDataIn.put(3, v(m, 3));
// Add an entry within tx
cache.putAll(txDataIn);
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
dataIn.putAll(txDataIn);
assertEquals(dataIn.entrySet(), entries);
return null;
});
}
public void testEntrySetIterationBeforeInTx(Method m) throws Exception {
Map<Integer, String> dataIn = new HashMap<>();
dataIn.put(1, v(m, 1));
dataIn.put(2, v(m, 2));
cache.putAll(dataIn);
Map<Object, Object> foundValues = new HashMap<>();
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
withTx(tm, () -> {
Set<Entry<Object, Object>> entries = cache.entrySet();
// Add an entry within tx
cache.put(3, v(m, 3));
cache.put(4, v(m, 4));
for (Entry<Object, Object> entry : entries) {
foundValues.put(entry.getKey(), entry.getValue());
}
tm.setRollbackOnly();
return null;
});
dataIn.put(3, v(m, 3));
dataIn.put(4, v(m, 4));
assertEquals(dataIn, foundValues);
}
public void testEntrySetIterationAfterInTx(Method m) throws Exception {
Map<Integer, String> dataIn = new HashMap<>();
dataIn.put(1, v(m, 1));
dataIn.put(2, v(m, 2));
cache.putAll(dataIn);
Map<Object, Object> foundValues = new HashMap<>();
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
withTx(tm, () -> {
Set<Entry<Object, Object>> entries = cache.entrySet();
Iterator<Entry<Object, Object>> itr = entries.iterator();
// Add an entry within tx
cache.put(3, v(m, 3));
cache.put(4, v(m, 4));
while (itr.hasNext()) {
Entry<Object, Object> entry = itr.next();
foundValues.put(entry.getKey(), entry.getValue());
}
tm.setRollbackOnly();
return null;
});
assertEquals(dataIn, foundValues);
}
public void testEntrySetIterationInTx(Method m) throws Exception {
Map<Integer, String> dataIn = new HashMap<>();
dataIn.put(1, v(m, 1));
dataIn.put(2, v(m, 2));
Map<Object, Object> foundValues = new HashMap<>();
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
withTx(tm, () -> {
// Add some entries before iteration start
cache.putAll(dataIn);
Set<Entry<Object, Object>> entries = cache.entrySet();
Iterator<Entry<Object, Object>> itr = entries.iterator();
// Add more entries during iteration
cache.put(3, v(m, 3));
cache.put(4, v(m, 4));
while (itr.hasNext()) {
Entry<Object, Object> entry = itr.next();
foundValues.put(entry.getKey(), entry.getValue());
}
tm.setRollbackOnly();
return null;
});
assertEquals(dataIn, foundValues);
}
public void testRollbackAfterPut() throws Exception {
String key = "key", value = "value", key2 = "keyTwo", value2 = "value2";
cache.put(key, value);
assertEquals(value, cache.get(key));
int size = 1;
assertEquals(size, cache.size());
assertEquals(size, cache.keySet().size());
assertEquals(size, cache.values().size());
assertEquals(size, cache.entrySet().size());
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
cache.put(key2, value2);
assertEquals(value2, cache.get(key2));
assertTrue(cache.keySet().contains(key2));
int tmSize = 2;
assertEquals(tmSize, cache.size());
assertEquals(tmSize, cache.keySet().size());
assertEquals(tmSize, cache.values().size());
assertEquals(tmSize, cache.entrySet().size());
assertTrue(cache.values().contains(value2));
assertTrue(cache.values().contains(value));
tm.setRollbackOnly();
return null;
});
assertEquals(value, cache.get(key));
assertEquals(size, cache.size());
assertEquals(size, cache.keySet().size());
assertEquals(size, cache.values().size());
assertEquals(size, cache.entrySet().size());
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
}
public void testSizeAfterClear() {
for (int i = 0; i < 10; i++) {
cache.put(i, "value" + i);
}
cache.clear();
assertTrue(cache.isEmpty());
}
public void testPutIfAbsentAfterRemoveInTx() throws Exception {
String key = "key_1", old_value = "old_value";
cache.put(key, old_value);
assertEquals(old_value, cache.get(key));
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
assertEquals(old_value, cache.remove(key));
assertNull(cache.get(key));
assertEquals(cache.putIfAbsent(key, "new_value"), null);
tm.setRollbackOnly();
return null;
});
assertEquals(old_value, cache.get(key));
}
public void testSizeInExplicitTxWithNonExistent() throws Exception {
assertEquals(0, cache.size());
cache.put("k", "v");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
assertNull(cache.get("no-exist"));
assertEquals(1, cache.size());
assertNull(cache.put("no-exist", "value"));
assertEquals(2, cache.size());
tm.setRollbackOnly();
return null;
});
}
public void testSizeInExplicitTxWithRemoveNonExistent() throws Exception {
assertEquals(0, cache.size());
cache.put("k", "v");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
assertNull(cache.remove("no-exist"));
assertEquals(
1, cache.size());
assertNull(cache.put("no-exist", "value"));
assertEquals(2, cache.size());
tm.setRollbackOnly();
return null;
});
}
public void testSizeInExplicitTxWithRemoveExistent() throws Exception {
assertEquals(0, cache.size());
cache.put("k", "v");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
assertNull(cache.put("exist", "value"));
assertEquals(2, cache.size());
assertEquals("value", cache.remove("exist"));
assertEquals(1, cache.size());
tm.setRollbackOnly();
return null;
});
}
public void testSizeInExplicitTx() throws Exception {
assertEquals(0, cache.size());
cache.put("k", "v");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
assertEquals(1, cache.size());
tm.setRollbackOnly();
return null;
});
}
public void testSizeInExplicitTxWithModification() throws Exception {
assertEquals(0, cache.size());
cache.put("k1", "v1");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
assertNull(cache.put("k2", "v2"));
assertEquals(2, cache.size());
tm.setRollbackOnly();
return null;
});
}
public void testEntrySetIteratorRemoveInExplicitTx() throws Exception {
assertEquals(0, cache.size());
cache.put("k1", "v1");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
try (CloseableIterator<Entry<Object, Object>> entryIterator = cache.entrySet().iterator()) {
entryIterator.next();
entryIterator.remove();
assertEquals(0, cache.size());
}
tm.setRollbackOnly();
return null;
});
}
public void testKeySetIteratorRemoveInExplicitTx() throws Exception {
assertEquals(0, cache.size());
cache.put("k1", "v1");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
for (CloseableIterator<Object> entryIterator = cache.keySet().iterator(); entryIterator.hasNext(); ) {
entryIterator.next();
entryIterator.remove();
assertEquals(0, cache.size());
}
tm.setRollbackOnly();
return null;
});
}
public void testEntrySetIteratorRemoveContextEntryInExplicitTx() throws Exception {
assertEquals(0, cache.size());
cache.put("k1", "v1");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
// This should be removed by iterator as well as the k1 entry
cache.put("k2", "v2");
assertEquals(2, cache.size());
for (CloseableIterator<Entry<Object, Object>> entryIterator = cache.entrySet().iterator(); entryIterator.hasNext(); ) {
entryIterator.next();
entryIterator.remove();
}
assertEquals(0, cache.size());
tm.setRollbackOnly();
return null;
});
assertEquals(1, cache.size());
}
public void testKeySetIteratorRemoveContextEntryInExplicitTx() throws Exception {
assertEquals(0, cache.size());
cache.put("k1", "v1");
TransactionManager tm = TestingUtil.getTransactionManager(cache);
withTx(tm, () -> {
// This should be removed by iterator as well as the k1 entry
cache.put("k2", "v2");
assertEquals(2, cache.size());
for (CloseableIterator<Object> keyIterator = cache.keySet().iterator(); keyIterator.hasNext(); ) {
keyIterator.next();
keyIterator.remove();
}
assertEquals(0, cache.size());
tm.setRollbackOnly();
return null;
});
assertEquals(1, cache.size());
}
public void testEntrySetForEachNonSerializable() {
assertEquals(0, cache.size());
cache.put("k1", "v1");
List<Object> values = new ArrayList<>();
cache.entrySet().forEach(values::add);
assertEquals(1, values.size());
Map.Entry<Object, Object> entry = (Map.Entry<Object, Object>) values.iterator().next();
assertEquals("k1", entry.getKey());
assertEquals("v1", entry.getValue());
}
public void testKeySetForEachNonSerializable() {
assertEquals(0, cache.size());
cache.put("k1", "v1");
List<Object> values = new ArrayList<>();
cache.keySet().forEach(values::add);
assertEquals(1, values.size());
assertEquals("k1", values.iterator().next());
}
public void testValuesForEachNonSerializable() {
assertEquals(0, cache.size());
cache.put("k1", "v1");
List<Object> values = new ArrayList<>();
cache.values().forEach(values::add);
assertEquals(1, values.size());
assertEquals("v1", values.iterator().next());
}
public void testMultipleWritesSameKeyInTx() throws Exception {
TransactionManager tm = TestingUtil.getTransactionManager(cache);
Object key = "key";
TestingUtil.withTx(tm, () -> {
assertNull(cache.put(key, "value1"));
assertEquals("value1", cache.put(key, "value2"));
assertEquals("value2", cache.put(key, "value3"));
return null;
});
}
}
| 17,245
| 32.038314
| 128
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/GetOnRemovedKeyTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.annotations.Test;
@Test (groups = "functional", testName = "api.GetOnRemovedKeyTest")
@InCacheMode({ CacheMode.REPL_SYNC, CacheMode.DIST_SYNC })
public class GetOnRemovedKeyTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
createCluster(TestDataSCI.INSTANCE, getDefaultClusteredCacheConfig(cacheMode, true), 2);
waitForClusterToForm();
}
public void testRemoveSeenCorrectly1() throws Throwable {
Object k = getKey();
cache(0).put(k, "v");
tm(0).begin();
cache(0).remove(k);
assertNull(cache(0).get(k));
tm(0).commit();
assertNull(cache(0).get(k));
}
public void testRemoveSeenCorrectly2() throws Throwable {
Object k = getKey();
cache(0).put(k, "v");
tm(0).begin();
cache(0).remove(k);
assertNull(cache(0).get(k));
tm(0).rollback();
assertEquals("v", cache(0).get(k));
}
protected Object getKey() {
return cacheMode.isDistributed() ? getKeyForCache(0) : "k";
}
}
| 1,393
| 29.304348
| 94
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentPessimisticStressTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @author William Burns
* @since 7.0
*/
@Test (groups = "stress", testName = "api.ConditionalOperationsConcurrentPessimisticStressTest")
public class ConditionalOperationsConcurrentPessimisticStressTest extends ConditionalOperationsConcurrentStressTest {
public ConditionalOperationsConcurrentPessimisticStressTest() {
cacheMode = CacheMode.DIST_SYNC;
transactional = true;
lockingMode = LockingMode.PESSIMISTIC;
}
}
| 640
| 29.52381
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/SimpleConditionalOperationTest.java
|
package org.infinispan.api;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNull;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.SimpleConditionalOperationTest")
public class SimpleConditionalOperationTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
createCluster(TestDataSCI.INSTANCE, getConfig(), 2);
waitForClusterToForm();
}
protected ConfigurationBuilder getConfig() {
return getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
}
public void testReplaceFromMainOwner() throws Throwable {
Object k = getKeyForCache(0);
cache(0).put(k, "0");
tm(0).begin();
cache(0).put("kkk", "vvv");
cache(0).replace(k, "v1", "v2");
tm(0).commit();
assertEquals(advancedCache(0).getDataContainer().get(k).getValue(), "0");
assertEquals(advancedCache(1).getDataContainer().get(k).getValue(), "0");
log.trace("here is the interesting replace.");
cache(0).replace(k, "0", "1");
assertEquals(advancedCache(0).getDataContainer().get(k).getValue(), "1");
assertEquals(advancedCache(1).getDataContainer().get(k).getValue(), "1");
}
public void testRemoveFromMainOwner() {
Object k = getKeyForCache(0);
cache(0).put(k, "0");
cache(0).remove(k, "1");
assertEquals(advancedCache(0).getDataContainer().get(k).getValue(), "0");
assertEquals(advancedCache(1).getDataContainer().get(k).getValue(), "0");
cache(0).remove(k, "0");
assertNull(advancedCache(0).getDataContainer().get(k));
assertNull(advancedCache(1).getDataContainer().get(k));
}
public void testPutIfAbsentFromMainOwner() {
Object k = getKeyForCache(0);
cache(0).put(k, "0");
cache(0).putIfAbsent(k, "1");
assertEquals(advancedCache(0).getDataContainer().get(k).getValue(), "0");
assertEquals(advancedCache(1).getDataContainer().get(k).getValue(), "0");
cache(0).remove(k);
cache(0).putIfAbsent(k, "1");
assertEquals(advancedCache(0).getDataContainer().get(k).getValue(), "1");
assertEquals(advancedCache(1).getDataContainer().get(k).getValue(), "1");
}
}
| 2,480
| 34.442857
| 79
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/DistributedPessimisticRepeatableReadIsolationTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.DistributedPessimisticRepeatableReadIsolationTest")
public class DistributedPessimisticRepeatableReadIsolationTest extends AbstractRepeatableReadIsolationTest {
public DistributedPessimisticRepeatableReadIsolationTest() {
super(CacheMode.DIST_SYNC, LockingMode.PESSIMISTIC);
}
}
| 548
| 29.5
| 108
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConcurrentOperationsStressTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @author William Burns
* @since 7.0
*/
@Test(groups = "stress", testName = "api.ConcurrentOperationsStressTest", timeOut = 15*60*1000)
public class ConcurrentOperationsStressTest extends ConcurrentOperationsTest {
public ConcurrentOperationsStressTest() {
super(CacheMode.DIST_SYNC, 3, 4, 300);
}
@Override
public void testNoTimeout() throws Throwable {
super.testNoTimeout();
}
@Override
public void testNoTimeoutAndCorrectness() throws Throwable {
super.testNoTimeoutAndCorrectness();
}
@Override
public void testReplace() {
super.testReplace();
}
}
| 764
| 22.90625
| 95
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/AsyncWithTxTest.java
|
package org.infinispan.api;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import jakarta.transaction.TransactionManager;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@Test(groups = "functional", testName = "api.AsyncWithTxTest")
public class AsyncWithTxTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder defaultConfig = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
addClusterEnabledCacheManager(defaultConfig);
addClusterEnabledCacheManager(defaultConfig);
}
public void testWithTx() throws Exception {
TransactionManager transactionManager = TestingUtil.getTransactionManager(cache(0));
cache(0).put("k","v1");
transactionManager.begin();
CompletableFuture<Object> future = cache(0).putAsync("k", "v2");
"v1".equals(future.get(2000, TimeUnit.MILLISECONDS));
transactionManager.commit();
}
}
| 1,255
| 33.888889
| 101
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/AbstractRepeatableReadIsolationTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import jakarta.transaction.RollbackException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.MagicKey;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional")
public abstract class AbstractRepeatableReadIsolationTest extends MultipleCacheManagersTest {
private static final String INITIAL_VALUE = "init";
private static final String FINAL_VALUE = "final";
private static final String OTHER_VALUE = "other";
private final CacheMode cacheMode;
private final LockingMode lockingMode;
protected AbstractRepeatableReadIsolationTest(CacheMode cacheMode, LockingMode lockingMode) {
this.cacheMode = cacheMode;
this.lockingMode = lockingMode;
}
public void testPutTxIsolationInOwnerWithKeyInitialized() throws Exception {
doIsolationTest(true, true, Operation.PUT);
}
public void testPutTxIsolationInOwnerWithKeyNoInitialized() throws Exception {
doIsolationTest(true, false, Operation.PUT);
}
public void testPutTxIsolationInNonOwnerWithKeyInitialized() throws Exception {
doIsolationTest(false, true, Operation.PUT);
}
public void testPutTxIsolationInNonOwnerWithKeyNonInitialized() throws Exception {
doIsolationTest(false, false, Operation.PUT);
}
public void testRemoveTxIsolationInOwnerWithKeyInitialized() throws Exception {
doIsolationTest(true, true, Operation.REMOVE);
}
public void testRemoveTxIsolationInOwnerWithKeyNoInitialized() throws Exception {
doIsolationTest(true, false, Operation.REMOVE);
}
public void testRemoveTxIsolationInNonOwnerWithKeyInitialized() throws Exception {
doIsolationTest(false, true, Operation.REMOVE);
}
public void testRemoveTxIsolationInNonOwnerWithKeyNonInitialized() throws Exception {
doIsolationTest(false, false, Operation.REMOVE);
}
public void testReplaceTxIsolationInOwnerWithKeyInitialized() throws Exception {
doIsolationTest(true, true, Operation.REPLACE);
}
public void testReplaceTxIsolationInOwnerWithKeyNoInitialized() throws Exception {
doIsolationTest(true, false, Operation.REPLACE);
}
public void testReplaceTxIsolationInNonOwnerWithKeyInitialized() throws Exception {
doIsolationTest(false, true, Operation.REPLACE);
}
public void testReplaceTxIsolationInNonOwnerWithKeyNonInitialized() throws Exception {
doIsolationTest(false, false, Operation.REPLACE);
}
public void testConditionalPutTxIsolationInOwnerWithKeyInitialized() throws Exception {
doIsolationTest(true, true, Operation.CONDITIONAL_PUT);
}
public void testConditionalPutTxIsolationInOwnerWithKeyNoInitialized() throws Exception {
doIsolationTest(true, false, Operation.CONDITIONAL_PUT);
}
public void testConditionalPutTxIsolationInNonOwnerWithKeyInitialized() throws Exception {
doIsolationTest(false, true, Operation.CONDITIONAL_PUT);
}
public void testConditionalPutTxIsolationInNonOwnerWithKeyNonInitialized() throws Exception {
doIsolationTest(false, false, Operation.CONDITIONAL_PUT);
}
public void testConditionalRemoveTxIsolationInOwnerWithKeyInitialized() throws Exception {
doIsolationTest(true, true, Operation.CONDITIONAL_REMOVE);
}
public void testConditionalRemoveTxIsolationInOwnerWithKeyNoInitialized() throws Exception {
doIsolationTest(true, false, Operation.CONDITIONAL_REMOVE);
}
public void testConditionalRemoveTxIsolationInNonOwnerWithKeyInitialized() throws Exception {
doIsolationTest(false, true, Operation.CONDITIONAL_REMOVE);
}
public void testConditionalRemoveTxIsolationInNonOwnerWithKeyNonInitialized() throws Exception {
doIsolationTest(false, false, Operation.CONDITIONAL_REMOVE);
}
public void testConditionalReplaceTxIsolationInOwnerWithKeyInitialized() throws Exception {
doIsolationTest(true, true, Operation.CONDITIONAL_REPLACE);
}
public void testConditionalReplaceTxIsolationInOwnerWithKeyNoInitialized() throws Exception {
doIsolationTest(true, false, Operation.CONDITIONAL_REPLACE);
}
public void testConditionalReplaceTxIsolationInNonOwnerWithKeyInitialized() throws Exception {
doIsolationTest(false, true, Operation.CONDITIONAL_REPLACE);
}
public void testConditionalReplaceTxIsolationInNonOwnerWithKeyNonInitialized() throws Exception {
doIsolationTest(false, false, Operation.CONDITIONAL_REPLACE);
}
public void testPutTxIsolationAfterRemoveInOwner() throws Exception {
doIsolationTestAfterRemove(true, Operation.PUT);
}
public void testPutTxIsolationAfterRemoveInNonOwner() throws Exception {
doIsolationTestAfterRemove(false, Operation.PUT);
}
public void testRemoveTxIsolationAfterRemoveInOwner() throws Exception {
doIsolationTestAfterRemove(true, Operation.REMOVE);
}
public void testRemoveTxIsolationAfterRemoveInNonOwner() throws Exception {
doIsolationTestAfterRemove(false, Operation.REMOVE);
}
public void testReplaceTxIsolationAfterRemoveInOwner() throws Exception {
doIsolationTestAfterRemove(true, Operation.REPLACE);
}
public void testReplaceTxIsolationAfterRemoveInNonOwner() throws Exception {
doIsolationTestAfterRemove(false, Operation.REPLACE);
}
public void testConditionalPutTxIsolationAfterRemoveInOwner() throws Exception {
doIsolationTestAfterRemove(true, Operation.CONDITIONAL_PUT);
}
public void testConditionalPutTxIsolationAfterRemoveInNonOwner() throws Exception {
doIsolationTestAfterRemove(false, Operation.CONDITIONAL_PUT);
}
public void testConditionalRemoveTxIsolationAfterRemoveInOwner() throws Exception {
doIsolationTestAfterRemove(true, Operation.CONDITIONAL_REMOVE);
}
public void testConditionalRemoveTxIsolationAfterRemoveInNonOwner() throws Exception {
doIsolationTestAfterRemove(false, Operation.CONDITIONAL_REMOVE);
}
public void testConditionalReplaceTxIsolationAfterRemoveInOwner() throws Exception {
doIsolationTestAfterRemove(true, Operation.CONDITIONAL_REPLACE);
}
public void testConditionalReplaceTxIsolationAfterRemoveInNonOwner() throws Exception {
doIsolationTestAfterRemove(false, Operation.CONDITIONAL_REPLACE);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(cacheMode, true);
builder.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
builder.transaction().lockingMode(lockingMode);
builder.clustering().hash().numOwners(1);
createClusteredCaches(2, TestDataSCI.INSTANCE, builder);
}
private void doIsolationTest(boolean executeOnOwner, boolean initialized, Operation operation) throws Exception {
final Cache<Object, Object> ownerCache = cache(0);
final Object key = new MagicKey("shared", ownerCache);
final Cache<Object, Object> cache = executeOnOwner ? cache(0) : cache(1);
final TransactionManager tm = executeOnOwner ? tm(0) : tm(1);
assertValueInAllCaches(key, null);
final Object initValue = initialized ? INITIAL_VALUE : null;
if (initialized) {
ownerCache.put(key, initValue);
assertValueInAllCaches(key, initValue);
}
tm.begin();
assertEquals("Wrong first get.", initValue, cache.get(key));
Transaction tx = tm.suspend();
ownerCache.put(key, OTHER_VALUE);
assertValueInAllCaches(key, OTHER_VALUE);
Object finalValueExpected = null;
boolean commitFails = false;
tm.resume(tx);
assertEquals("Wrong second get.", initValue, cache.get(key));
switch (operation) {
case PUT:
finalValueExpected = FINAL_VALUE;
commitFails = lockingMode == LockingMode.OPTIMISTIC;
assertEquals("Wrong put return value.", initValue, cache.put(key, FINAL_VALUE));
assertEquals("Wrong final get.", FINAL_VALUE, cache.get(key));
break;
case REMOVE:
finalValueExpected = null;
commitFails = lockingMode == LockingMode.OPTIMISTIC;
assertEquals("Wrong remove return value.", initValue, cache.remove(key));
assertEquals("Wrong final get.", null, cache.get(key));
break;
case REPLACE:
finalValueExpected = initialized ? FINAL_VALUE : OTHER_VALUE;
commitFails = lockingMode == LockingMode.OPTIMISTIC && initialized;
assertEquals("Wrong replace return value.", initValue, cache.replace(key, FINAL_VALUE));
assertEquals("Wrong final get.", initialized ? FINAL_VALUE : null, cache.get(key));
break;
case CONDITIONAL_PUT:
finalValueExpected = initialized ? OTHER_VALUE : FINAL_VALUE;
commitFails = lockingMode == LockingMode.OPTIMISTIC && !initialized;
assertEquals("Wrong put return value.", initialized ? initValue : null, cache.putIfAbsent(key, FINAL_VALUE));
assertEquals("Wrong final get.", initialized ? initValue : FINAL_VALUE, cache.get(key));
break;
case CONDITIONAL_REMOVE:
finalValueExpected = initialized ? null : OTHER_VALUE;
commitFails = lockingMode == LockingMode.OPTIMISTIC && initialized;
assertEquals("Wrong remove return value.", initialized, cache.remove(key, INITIAL_VALUE));
assertEquals("Wrong final get.", null, cache.get(key));
break;
case CONDITIONAL_REPLACE:
finalValueExpected = initialized ? FINAL_VALUE : OTHER_VALUE;
commitFails = lockingMode == LockingMode.OPTIMISTIC && initialized;
assertEquals("Wrong replace return value.", initialized, cache.replace(key, INITIAL_VALUE, FINAL_VALUE));
assertEquals("Wrong final get.", initialized ? FINAL_VALUE : null, cache.get(key));
break;
default:
fail("Unknown operation " + operation);
break;
}
if (commitFails) {
Exceptions.expectException(RollbackException.class, tm::commit);
} else {
tm.commit();
}
assertValueInAllCaches(key, lockingMode == LockingMode.PESSIMISTIC ? finalValueExpected : OTHER_VALUE);
assertNoTransactions();
}
private void doIsolationTestAfterRemove(boolean executeOnOwner, Operation operation) throws Exception {
final Cache<Object, Object> ownerCache = cache(0);
final Object key = new MagicKey("shared", ownerCache);
final Cache<Object, Object> cache = executeOnOwner ? cache(0) : cache(1);
final TransactionManager tm = executeOnOwner ? tm(0) : tm(1);
assertValueInAllCaches(key, null);
final Object initValue = INITIAL_VALUE;
ownerCache.put(key, initValue);
assertValueInAllCaches(key, initValue);
tm.begin();
assertEquals("Wrong first get.", initValue, cache.get(key));
Transaction tx = tm.suspend();
ownerCache.put(key, OTHER_VALUE);
assertValueInAllCaches(key, OTHER_VALUE);
Object finalValueExpected = null;
boolean commitFails = lockingMode == LockingMode.OPTIMISTIC;
tm.resume(tx);
assertEquals("Wrong second get.", initValue, cache.get(key));
assertEquals("Wrong value after remove.", initValue, cache.remove(key));
switch (operation) {
case PUT:
finalValueExpected = FINAL_VALUE;
assertEquals("Wrong put return value.", null, cache.put(key, FINAL_VALUE));
assertEquals("Wrong final get.", FINAL_VALUE, cache.get(key));
break;
case REMOVE:
finalValueExpected = null;
assertEquals("Wrong remove return value.", null, cache.remove(key));
assertEquals("Wrong final get.", null, cache.get(key));
break;
case REPLACE:
finalValueExpected = null;
assertEquals("Wrong replace return value.", null, cache.replace(key, FINAL_VALUE));
assertEquals("Wrong final get.", null, cache.get(key));
break;
case CONDITIONAL_PUT:
finalValueExpected = FINAL_VALUE;
assertEquals("Wrong put return value.", null, cache.putIfAbsent(key, FINAL_VALUE));
assertEquals("Wrong final get.", FINAL_VALUE, cache.get(key));
break;
case CONDITIONAL_REMOVE:
finalValueExpected = null;
assertEquals("Wrong remove return value.", false, cache.remove(key, INITIAL_VALUE));
assertEquals("Wrong final get.", null, cache.get(key));
break;
case CONDITIONAL_REPLACE:
finalValueExpected = null;
assertEquals("Wrong replace return value.", false, cache.replace(key, INITIAL_VALUE, FINAL_VALUE));
assertEquals("Wrong final get.", null, cache.get(key));
break;
default:
fail("Unknown operation " + operation);
break;
}
if (commitFails) {
Exceptions.expectException(RollbackException.class, tm::commit);
} else {
tm.commit();
}
assertValueInAllCaches(key, lockingMode == LockingMode.PESSIMISTIC ? finalValueExpected : OTHER_VALUE);
assertNoTransactions();
}
private void assertValueInAllCaches(final Object key, final Object value) {
for (Cache<Object, Object> cache : caches()) {
assertEquals("Wrong value.", value, cache.get(key));
}
}
private enum Operation {
PUT, REMOVE, REPLACE,
CONDITIONAL_PUT, CONDITIONAL_REMOVE, CONDITIONAL_REPLACE
}
}
| 14,227
| 39.535613
| 121
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ForceWriteLockTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertTrue;
import jakarta.transaction.TransactionManager;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.TransactionTable;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
*/
@Test(groups = "functional", testName = "api.ForceWriteLockTest")
public class ForceWriteLockTest extends SingleCacheManagerTest {
private TransactionManager tm;
private AdvancedCache<String, String> advancedCache;
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
ConfigurationBuilder cacheConfiguration = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
cacheConfiguration.transaction().lockingMode(LockingMode.PESSIMISTIC);
EmbeddedCacheManager cacheManager = TestCacheManagerFactory.createCacheManager(cacheConfiguration);
advancedCache = cacheManager.<String, String>getCache().getAdvancedCache();
tm = TestingUtil.getTransactionManager(advancedCache);
return cacheManager;
}
public void testWriteLockIsAcquired() throws Exception {
advancedCache.put("k","v");
assertEventuallyNotLocked(advancedCache, "k");
tm.begin();
advancedCache.withFlags(Flag.FORCE_WRITE_LOCK).get("k");
TransactionTable txTable = advancedCache.getComponentRegistry().getComponent(TransactionTable.class);
LocalTransaction tx = txTable.getLocalTransaction(tm.getTransaction());
assertTrue(tx.ownsLock("k"));
assertLocked(advancedCache,"k");
tm.commit();
assertEventuallyNotLocked(advancedCache, "k");
}
}
| 2,050
| 38.442308
| 107
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/TerminatedCacheTest.java
|
package org.infinispan.api;
import org.infinispan.Cache;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Test that verifies the behaviour of Cache and CacheContainer.getCache() calls after
* Cache and CacheContainer instances have been stopped. This emulates redeployment
* scenarios under a situations where the CacheContainer is a shared resource.
*
* @author Galder Zamarre�o
* @since 4.2
*/
@Test(groups = "functional", testName = "api.TerminatedCacheTest")
public class TerminatedCacheTest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
return TestCacheManagerFactory.createCacheManager(false);
}
@Test(expectedExceptions = IllegalLifecycleStateException.class)
public void testCacheStopFollowedByGetCache() {
Cache<String, String> cache = cacheManager.getCache();
cache.put("k", "v");
cache.stop();
Cache<String, String> cache2 = cacheManager.getCache();
cache2.put("k", "v2");
}
@Test(expectedExceptions = IllegalLifecycleStateException.class)
public void testCacheStopFollowedByCacheOp() {
cacheManager.defineConfiguration("big", cacheManager.getDefaultCacheConfiguration());
Cache<String, String> cache = cacheManager.getCache("big");
cache.put("k", "v");
cache.stop();
cache.put("k", "v2");
}
}
| 1,592
| 35.204545
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/SizeOptimizationTests.java
|
package org.infinispan.api;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.Assert.assertEquals;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.AbstractDelegatingInternalDataContainer;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.Flag;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.reactive.publisher.impl.ClusterPublisherManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.reactivestreams.Publisher;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.SizeOptimizationTests")
public class SizeOptimizationTests extends MultipleCacheManagersTest {
private static final String CACHE_NAME = "SizeOptimizationsTest";
private static final int ENTRIES_SIZE = 42;
private Optimization optimization;
public SizeOptimizationTests optimization(Optimization optimization) {
this.optimization = optimization;
return this;
}
enum Optimization {
/**
* If the store is not asynchronous and is shared we can optimize to directly read the store size.
* Using this optimization, the default implementation should never be used.
*/
SHARED {
@Override
public ConfigurationBuilder configure(ConfigurationBuilder builder) {
builder.persistence()
.passivation(false)
.addStore(DummyInMemoryStoreConfigurationBuilder.class)
.storeName(name())
.shared(true)
.async()
.disable();
return builder;
}
@Override
public void verify(Cache<Object, Object> cache, SizeOptimizationTests test) throws Exception {
neverCallDefault(cache);
replaceDataContainerNotIterable(cache);
assertEquals(cache.size(), ENTRIES_SIZE);
int halfEntries = ENTRIES_SIZE / 2;
for (int i = 0; i < halfEntries; i++) {
cache.remove("key-" + i);
}
assertEquals(cache.size(), halfEntries);
}
},
/**
* If the store is private, segmented, and we have the CACHE_MODE_LOCAL flag set, then we can optimize
* to directly call the container size. Using this optimization the default implementation should never be used.
*/
SEGMENTED {
@Override
public ConfigurationBuilder configure(ConfigurationBuilder builder) {
builder.persistence()
.passivation(false)
.addStore(DummyInMemoryStoreConfigurationBuilder.class)
.storeName(name())
.shared(false)
.segmented(true)
.async().disable()
.clustering()
.hash().numSegments(3);
return builder;
}
@Override
public void verify(Cache<Object, Object> cache, SizeOptimizationTests test) {
neverCallDefault(cache);
final Cache<Object, Object> localCache = cache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL);
// We retrieve the entries using __only__ the local cache and use it to verify the size.
// Do not use the constructor passing the original set because that will call the .size() method.
Set<Object> beforeEntries = new HashSet<>(ENTRIES_SIZE);
beforeEntries.addAll(localCache.entrySet());
// Replace the container only when verifying the size.
// We can not replace all because we retrieve the key set.
brieflyReplaceDataContainerNotIterable(localCache, () -> {
assertEquals(localCache.size(), beforeEntries.size());
return null;
});
int halfEntries = ENTRIES_SIZE / 2;
for (int i = 0; i < halfEntries; i++) {
cache.remove("key-" + i);
}
Set<Object> afterEntries = new HashSet<>(halfEntries);
afterEntries.addAll(localCache.entrySet());
brieflyReplaceDataContainerNotIterable(localCache, () -> {
assertEquals(localCache.size(), afterEntries.size());
return null;
});
}
},
/**
* If we are not using any store, do not have any entries that expire, and have the CACHE_MODE_LOCAL flag set,
* we can directly call the container size method. Here the test is a little longer because we verify that the
* optimization is called only when the entries with lifetime are removed.
*/
NO_STORE {
@Override
public ConfigurationBuilder configure(ConfigurationBuilder builder) {
builder.persistence()
.passivation(false)
.clearStores();
builder.transaction()
.lockingMode(LockingMode.OPTIMISTIC)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup());
return builder;
}
@Override
public void verify(Cache<Object, Object> cache, SizeOptimizationTests test) throws Exception {
final CheckPoint checkPoint = new CheckPoint();
final Cache<Object, Object> localCache = cache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL);
waitDefaultCalledMaxTo(localCache, checkPoint, 1);
// We do a cluster wide search here, since we have entries with expiration set, the optimization is
// not triggered.
Future<Void> verifySize = test.fork(() -> {
// Execution in a transaction skip the distributed optimization.
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
tm.begin();
try {
cache.put("key-tx", "value-tx");
assertEquals(cache.size(), ENTRIES_SIZE + 1);
cache.remove("key-tx");
} finally {
tm.rollback();
}
});
checkPoint.awaitStrict("default_invoked_done_" + localCache, 10, TimeUnit.SECONDS);
checkPoint.trigger("default_invoked_done_proceed_" + localCache, 1);
verifySize.get(10, TimeUnit.SECONDS);
for (int i = 0; i < ENTRIES_SIZE; i++) {
if ((i & 1) == 1) {
localCache.remove("key-" + i);
}
}
// We retrieve the entries using __only__ the local cache and use it to verify the size.
// Do not use the constructor passing the original set because that will call the .size() method.
Set<Object> entries = new HashSet<>(ENTRIES_SIZE / 2);
entries.addAll(localCache.entrySet());
replaceDataContainerNotIterable(cache);
assertEquals(localCache.size(), entries.size());
}
},
;
public abstract ConfigurationBuilder configure(ConfigurationBuilder builder);
public abstract void verify(Cache<Object, Object> cache, SizeOptimizationTests test) throws Exception;
}
@Override public Object[] factory() {
return Arrays.stream(Optimization.values())
.map(o -> new SizeOptimizationTests().optimization(o))
.toArray();
}
@Override
protected Object[] parameterValues() {
return new Object[] { optimization };
}
@Override
protected String[] parameterNames() {
return new String[] { "optimization" };
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
createClusteredCaches(3, CACHE_NAME, optimization.configure(builder));
}
public void testSizeReturnsCorrectly() throws Exception {
final Cache<Object, Object> cache = cache(0, CACHE_NAME);
for (int i = 0; i < ENTRIES_SIZE; i++) {
if ((i & 1) == 1) {
cache.put("key-" + i, "v" + i, 30, TimeUnit.SECONDS);
} else {
cache.put("key-" + i, "v" + i);
}
}
optimization.verify(cache, this);
}
private static void waitDefaultCalledMaxTo(final Cache<?, ?> cache, final CheckPoint checkPoint, int maxCalls) {
final AtomicInteger executionTimes = new AtomicInteger(maxCalls);
createMocking(cache, original -> invocation -> {
if (executionTimes.getAndDecrement() == 0) {
throw new TestException("Called more than " + maxCalls + " times to " + invocation.getMethod().getName());
}
CompletionStage<Object> result = ((CompletionStage<Object>) original.answer(invocation));
result.thenRun(() -> {
checkPoint.trigger("default_invoked_done_" + cache, 1);
try {
checkPoint.awaitStrict("default_invoked_done_proceed_" + cache, 10, TimeUnit.SECONDS);
} catch (InterruptedException | TimeoutException e) {
throw new TestException(e);
}
});
return result;
});
}
private static void neverCallDefault(final Cache<?, ?> cache) {
waitDefaultCalledMaxTo(cache, null, 0);
}
private static void createMocking(final Cache<?, ?> cache, Function<Answer<Object>, Answer<?>> forward) {
ClusterPublisherManager<?, ?> cpm = TestingUtil.extractComponent(cache, ClusterPublisherManager.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(cpm);
ClusterPublisherManager<?, ?> mockCpm = mock(ClusterPublisherManager.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(forward.apply(forwardedAnswer))
.when(mockCpm).keyReduction(anyBoolean(), any(), any(), any(), anyLong(), any(), any(), any());
TestingUtil.replaceComponent(cache, ClusterPublisherManager.class, mockCpm, true);
}
private static void brieflyReplaceDataContainerNotIterable(final Cache<?, ?> cache, Callable<Void> callable) {
IDCNotIterable controlled = new IDCNotIterable(cache);
TestingUtil.replaceComponent(cache, InternalDataContainer.class, controlled, true);
try {
callable.call();
} catch (Exception e) {
throw new TestException("Failed on callable", e);
} finally {
TestingUtil.replaceComponent(cache, InternalDataContainer.class, controlled.current, true);
}
}
private static void replaceDataContainerNotIterable(final Cache<?, ?> cache) {
InternalDataContainer<?, ?> controlled = new IDCNotIterable(cache);
TestingUtil.replaceComponent(cache, InternalDataContainer.class, controlled, true);
}
static class IDCNotIterable extends AbstractDelegatingInternalDataContainer {
final InternalDataContainer<?, ?> current;
IDCNotIterable(Cache<?, ?> cache) {
this.current = TestingUtil.extractComponent(cache, InternalDataContainer.class);
}
@Override
protected InternalDataContainer<?, ?> delegate() {
return current;
}
@Override
public Iterator<InternalCacheEntry<?, ?>> iterator() {
throw new TestException("Should not call iterator");
}
@Override
public Iterator<InternalCacheEntry<?, ?>> iterator(IntSet segments) {
throw new TestException("Should not call iterator");
}
@Override
public Iterator<InternalCacheEntry<?, ?>> iteratorIncludingExpired() {
throw new TestException("Should not call iterator");
}
@Override
public Iterator<InternalCacheEntry<?, ?>> iteratorIncludingExpired(IntSet segments) {
throw new TestException("Should not call iterator");
}
@Override
public Publisher<InternalCacheEntry> publisher(int segment) {
throw new TestException("Should not call publisher");
}
@Override
public Publisher<InternalCacheEntry> publisher(IntSet segments) {
throw new TestException("Should not call publisher");
}
}
}
| 13,365
| 38.779762
| 129
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentWriteSkewStressTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @author William Burns
* @since 7.0
*/
@Test(groups = "stress", testName = "api.ConditionalOperationsConcurrentWriteSkewStressTest", timeOut = 15*60*1000)
public class ConditionalOperationsConcurrentWriteSkewStressTest extends ConditionalOperationsConcurrentStressTest {
public ConditionalOperationsConcurrentWriteSkewStressTest() {
cacheMode = CacheMode.DIST_SYNC;
transactional = true;
writeSkewCheck = true;
}
}
| 592
| 28.65
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/AsyncAPITest.java
|
package org.infinispan.api;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
import org.infinispan.Cache;
import org.infinispan.commons.time.TimeService;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.versioning.NumericVersion;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.util.ControlledTimeService;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.AsyncAPITest")
public class AsyncAPITest extends SingleCacheManagerTest {
private Cache<String, String> c;
private ControlledTimeService timeService = new ControlledTimeService();
private Long startTime;
@BeforeMethod
public void clearCache() {
c.clear();
}
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(false);
TestingUtil.replaceComponent(cm, TimeService.class, timeService, true);
c = cm.getCache();
return cm;
}
public void testGetAsyncWhenKeyIsNotPresent() throws Exception {
CompletableFuture<String> f = c.getAsync("k");
assertFutureResult(f, null);
assertNull(c.get("k"));
}
public void testGetAsyncAfterPut() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.getAsync("k");
assertFutureResult(f, "v");
}
public void testGetAllAsync() throws Exception {
c.put("key-one-get", "one");
c.put("key-two-get", "two");
c.put("key-three-get", "three");
Set<String> keys = new HashSet<>();
keys.add("key-one-get");
keys.add("key-two-get");
keys.add("key-three-get");
CompletableFuture<Map<String, String>> getAllF = c.getAllAsync(keys);
assertNotNull(getAllF);
assertFalse(getAllF.isCancelled());
Map<String, String> resultAsMap = getAllF.get();
assertNotNull(resultAsMap);
assertEquals("one", resultAsMap.get("key-one-get"));
assertEquals("two", resultAsMap.get("key-two-get"));
assertEquals("three", resultAsMap.get("key-three-get"));
assertTrue(getAllF.isDone());
}
public void testPutAsync() throws Exception {
CompletableFuture<String> f = c.putAsync("k", "v1");
assertFutureResult(f, null);
assertEquals("v1", c.get("k"));
f = c.putAsync("k", "v2");
assertFutureResult(f, "v1");
assertEquals("v2", c.get("k"));
}
public void testPutAsyncEntry() throws Exception {
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
CompletableFuture<CacheEntry<String, String>> f = c.getAdvancedCache().putAsyncEntry("k", "v1", metadata);
assertFutureResult(f, null);
assertEquals("v1", c.get("k"));
Metadata updatedMetadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(2))
.lifespan(35_000)
.maxIdle(42_000)
.build();
f = c.getAdvancedCache().putAsyncEntry("k", "v2", updatedMetadata);
assertFutureResultOn(f, previousEntry -> {
assertEquals("k", previousEntry.getKey());
assertEquals("v1", previousEntry.getValue());
assertNotNull(previousEntry.getMetadata());
assertMetadata(metadata, previousEntry.getMetadata());
});
assertFutureResultOn(c.getAdvancedCache().getCacheEntryAsync("k"), currentEntry -> {
assertEquals("k", currentEntry.getKey());
assertEquals("v2", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(updatedMetadata, currentEntry.getMetadata());
});
}
public void testPutAllAsyncSingleKeyValue() throws Exception {
CompletableFuture<Void> f = c.putAllAsync(Collections.singletonMap("k", "v"));
assertFutureResult(f, null);
assertEquals("v", c.get("k"));
}
public void testPutAllAsyncMultipleKeyValue() throws Exception {
Map<String, String> map = new HashMap<>();
map.put("one-key", "one");
map.put("two-key", "two");
CompletableFuture<Void> putAllF = c.putAllAsync(map);
assertFutureResult(putAllF, null);
assertEquals("one", c.get("one-key"));
assertEquals("two", c.get("two-key"));
}
public void testPutIfAbsentAsync() throws Exception {
CompletableFuture<String> f = c.putIfAbsentAsync("k", "v1");
assertFutureResult(f, null);
assertEquals("v1", c.get("k"));
f = c.putIfAbsentAsync("k", "v2");
assertFutureResult(f, "v1");
assertEquals("v1", c.get("k"));
}
public void testPutIfAbsentAsyncEntry() throws Exception {
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
CompletableFuture<CacheEntry<String, String>> f = c.getAdvancedCache().putIfAbsentAsyncEntry("k", "v1", metadata);
assertFutureResult(f, null);
assertEquals("v1", c.get("k"));
Metadata updatedMetadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(2))
.lifespan(35_000)
.maxIdle(42_000)
.build();
f = c.getAdvancedCache().putIfAbsentAsyncEntry("k", "v2", updatedMetadata);
assertFutureResultOn(f, previousEntry -> {
assertEquals("k", previousEntry.getKey());
assertEquals("v1", previousEntry.getValue());
assertMetadata(metadata, previousEntry.getMetadata());
});
assertFutureResultOn(c.getAdvancedCache().getCacheEntryAsync("k"), currentEntry -> {
assertEquals("k", currentEntry.getKey());
assertEquals("v1", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(metadata, currentEntry.getMetadata());
});
}
public void testRemoveAsync() throws Exception {
c.put("k", "v");
assertEquals("v", c.get("k"));
CompletableFuture<String> f = c.removeAsync("k");
assertFutureResult(f, "v");
assertNull(c.get("k"));
assertFutureResult(c.removeAsync("k"), null);
}
public void testRemoveAsyncEntry() throws Exception {
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
assertFutureResult(c.getAdvancedCache().putAsync("k", "v", metadata), null);
assertFutureResultOn(c.getAdvancedCache().getCacheEntryAsync("k"), currentEntry -> {
assertEquals("k", currentEntry.getKey());
assertEquals("v", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(metadata, currentEntry.getMetadata());
});
CompletableFuture<CacheEntry<String, String>> f = c.getAdvancedCache().removeAsyncEntry("k");
assertFutureResultOn(f, previousEntry -> {
assertEquals("k", previousEntry.getKey());
assertEquals("v", previousEntry.getValue());
assertMetadata(metadata, previousEntry.getMetadata());
});
assertNull(c.get("k"));
f = c.getAdvancedCache().removeAsyncEntry("k");
assertFutureResult(f, null);
}
public void testRemoveConditionalAsync() throws Exception {
c.put("k", "v");
Future<Boolean> f = c.removeAsync("k", "v_nonexistent");
assertFutureResult(f, false);
assertEquals("v", c.get("k"));
f = c.removeAsync("k", "v");
assertFutureResult(f, true);
assertNull(c.get("k"));
}
public void testReplaceAsyncNonExistingKey() throws Exception {
CompletableFuture<String> f = c.replaceAsync("k", "v");
assertFutureResult(f, null);
assertNull(c.get("k"));
}
public void testReplaceAsyncExistingKey() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.replaceAsync("k", "v2");
assertFutureResult(f, "v");
assertEquals("v2", c.get("k"));
}
public void testReplaceAsyncEntryNonExistingKey() throws Exception {
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
CompletableFuture<CacheEntry<String, String>> f = c.getAdvancedCache().replaceAsyncEntry("k", "v", metadata);
assertFutureResult(f, null);
assertNull(c.get("k"));
}
public void testReplaceAsyncEntryExistingKey() throws Exception {
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
assertFutureResult(c.getAdvancedCache().putAsync("k", "v", metadata), null);
Metadata updatedMetadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(2))
.lifespan(35_000)
.maxIdle(42_000)
.build();
CompletableFuture<CacheEntry<String, String>> f = c.getAdvancedCache().replaceAsyncEntry("k", "v2", updatedMetadata);
assertFutureResultOn(f, previousEntry -> {
assertEquals(previousEntry.getKey(), "k");
assertEquals(previousEntry.getValue(), "v");
assertMetadata(metadata, previousEntry.getMetadata());
});
assertFutureResultOn(c.getAdvancedCache().getCacheEntryAsync("k"), currentEntry -> {
assertEquals("k", currentEntry.getKey());
assertEquals("v2", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(updatedMetadata, currentEntry.getMetadata());
});
}
public void testReplaceAsyncConditionalOnOldValueNonExisting() throws Exception {
c.put("k", "v");
CompletableFuture<Boolean> f = c.replaceAsync("k", "v_nonexistent", "v2");
assertFutureResult(f, false);
assertEquals("v", c.get("k"));
}
public void testReplaceAsyncConditionalOnOldValue() throws Exception {
c.put("k", "v");
CompletableFuture<Boolean> f = c.replaceAsync("k", "v", "v2");
assertFutureResult(f, true);
assertEquals("v2", c.get("k"));
}
public void testComputeIfAbsentAsync() throws Exception {
Function<Object, String> mappingFunction = k -> k + " world";
assertEquals("hello world", c.computeIfAbsentAsync("hello", mappingFunction).get());
assertEquals("hello world", c.get("hello"));
Function<Object, String> functionAfterPut = k -> k + " happy";
// hello already exists so nothing should happen
assertEquals("hello world", c.computeIfAbsentAsync("hello", functionAfterPut).get());
assertEquals("hello world", c.get("hello"));
int cacheSizeBeforeNullValueCompute = c.size();
Function<Object, String> functionMapsToNull = k -> null;
assertNull("with function mapping to null returns null", c.computeIfAbsentAsync("kaixo", functionMapsToNull).get());
assertNull("the key does not exist", c.get("kaixo"));
assertEquals(cacheSizeBeforeNullValueCompute, c.size());
RuntimeException computeRaisedException = new RuntimeException("hi there");
Function<Object, String> functionMapsToException = k -> {
throw computeRaisedException;
};
expectException(ExecutionException.class, RuntimeException.class, "hi there", () -> c.computeIfAbsentAsync("es", functionMapsToException).get());
}
public void testComputeIfPresentAsync() throws Exception {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
c.put("es", "hola");
assertEquals("hello_es:hola", c.computeIfPresentAsync("es", mappingFunction).get());
assertEquals("hello_es:hola", c.get("es"));
RuntimeException computeRaisedException = new RuntimeException("hi there");
BiFunction<Object, Object, String> mappingToException = (k, v) -> {
throw computeRaisedException;
};
expectException(ExecutionException.class, RuntimeException.class, "hi there", () -> c.computeIfPresentAsync("es", mappingToException).get());
BiFunction<Object, Object, String> mappingForNotPresentKey = (k, v) -> "absent_" + k + ":" + v;
assertNull("unexisting key should return null", c.computeIfPresentAsync("fr", mappingForNotPresentKey).get());
assertNull("unexisting key should return null", c.get("fr"));
BiFunction<Object, Object, String> mappingToNull = (k, v) -> null;
assertNull("mapping to null returns null", c.computeIfPresentAsync("es", mappingToNull).get());
assertNull("the key is removed", c.get("es"));
}
public void testComputeAsync() throws Exception {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
c.put("es", "hola");
assertEquals("hello_es:hola", c.computeAsync("es", mappingFunction).get());
assertEquals("hello_es:hola", c.get("es"));
BiFunction<Object, Object, String> mappingForNotPresentKey = (k, v) -> "absent_" + k + ":" + v;
assertEquals("absent_fr:null", c.computeAsync("fr", mappingForNotPresentKey).get());
assertEquals("absent_fr:null", c.get("fr"));
BiFunction<Object, Object, String> mappingToNull = (k, v) -> null;
assertNull("mapping to null returns null", c.computeAsync("es", mappingToNull).get());
assertNull("the key is removed", c.get("es"));
int cacheSizeBeforeNullValueCompute = c.size();
assertNull("mapping to null returns null", c.computeAsync("eus", mappingToNull).get());
assertNull("the key does not exist", c.get("eus"));
assertEquals(cacheSizeBeforeNullValueCompute, c.size());
RuntimeException computeRaisedException = new RuntimeException("hi there");
BiFunction<Object, Object, String> mappingToException = (k, v) -> {
throw computeRaisedException;
};
expectException(ExecutionException.class, RuntimeException.class, "hi there", () -> c.computeAsync("es", mappingToException).get());
}
public void testMergeAsync() throws Exception {
c.put("k", "v");
// replace
c.mergeAsync("k", "v", (oldValue, newValue) -> "" + oldValue + newValue).get();
assertEquals("vv", c.get("k"));
// remove if null value after remapping
c.mergeAsync("k", "v2", (oldValue, newValue) -> null).get();
assertEquals(null, c.get("k"));
// put if absent
c.mergeAsync("k2", "42", (oldValue, newValue) -> "" + oldValue + newValue).get();
assertEquals("42", c.get("k2"));
c.put("k", "v");
RuntimeException mergeRaisedException = new RuntimeException("hi there");
expectException(ExecutionException.class, RuntimeException.class, "hi there", () -> cache.mergeAsync("k", "v1", (k, v) -> {
throw mergeRaisedException;
}).get());
}
public void testPutAsyncWithLifespanAndMaxIdle() throws Exception {
// lifespan only
Future<String> f = c.putAsync("k", "v", 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v", 1000, 500, true);
log.warn("STARTING FAILING ONE");
// lifespan and max idle (test max idle)
f = c.putAsync("k", "v", 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v", 1000, 500, false);
// lifespan and max idle (test lifespan)
f = c.putAsync("k", "v", 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v", 3000, 500, true);
}
public void testPutAllAsyncWithLifespanAndMaxIdle() throws Exception {
// putAll lifespan only
Future<Void> f = c.putAllAsync(Collections.singletonMap("k", "v1"), 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v1", 1000, 500, true);
// putAll lifespan and max idle (test max idle)
f = c.putAllAsync(Collections.singletonMap("k", "v2"), 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v2", 1000, 500, false);
// putAll lifespan and max idle (test lifespan)
f = c.putAllAsync(Collections.singletonMap("k", "v3"), 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v3", 3000, 500, true);
}
public void testPutIfAbsentAsyncWithLifespanAndMaxIdle() throws Exception {
// putIfAbsent lifespan only
c.put("k", "v1");
CompletableFuture<String> f = c.putIfAbsentAsync("k", "v2", 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "v1");
assertEquals("v1", c.get("k"));
Thread.sleep(300);
assertEquals("v1", c.get("k"));
assertEquals("v1", c.remove("k"));
assertNull(c.get("k"));
// now really put (k removed) lifespan only
f = c.putIfAbsentAsync("k", "v", 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v", 1000, 500, true);
// putIfAbsent lifespan and max idle (test max idle)
f = c.putIfAbsentAsync("k", "v", 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v", 1000, 500, false);
// putIfAbsent lifespan and max idle (test lifespan)
f = c.putIfAbsentAsync("k", "v", 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
verifyEviction("k", "v", 3000, 500, true);
}
public void testReplaceAsyncWithLifespan() throws Exception {
CompletableFuture<String> f = c.replaceAsync("k", "v", 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, null);
assertNull(c.get("k"));
c.put("k", "v");
f = c.replaceAsync("k", "v1", 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "v");
verifyEviction("k", "v1", 1000, 500, true);
//replace2
c.put("k", "v1");
Future<Boolean> f3 = c.replaceAsync("k", "v_nonexistent", "v2", 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f3, false);
Thread.sleep(300);
assertEquals("v1", c.get("k"));
f3 = c.replaceAsync("k", "v1", "v2", 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f3, true);
verifyEviction("k", "v2", 1000, 500, true);
}
public void testReplaceAsyncWithLifespanAndMaxIdle() throws Exception {
// replace lifespan and max idle (test max idle)
c.put("k", "v");
CompletableFuture f = c.replaceAsync("k", "v1", 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "v");
verifyEviction("k", "v1", 1000, 500, false);
// replace lifespan and max idle (test lifespan)
c.put("k", "v");
f = c.replaceAsync("k", "v1", 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "v");
verifyEviction("k", "v1", 3000, 500, true);
//replace2 ifespan and max idle (test max idle)
c.put("k", "v1");
f = c.replaceAsync("k", "v1", "v2", 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, true);
verifyEviction("k", "v2", 1000, 500, false);
// replace2 lifespan and max idle (test lifespan)
c.put("k", "v1");
f = c.replaceAsync("k", "v1", "v2", 3000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, true);
verifyEviction("k", "v2", 3000, 500, true);
}
public void testMergeAsyncWithLifespan() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.mergeAsync("k", "v1", (oldValue, newValue) -> "" + oldValue + newValue, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "vv1");
verifyEviction("k", "vv1", 1000, 500, true);
f = c.mergeAsync("k2", "42", (oldValue, newValue) -> "" + oldValue + newValue, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "42");
verifyEviction("k2", "42", 1000, 500, true);
}
public void testMergeAsyncWithLifespanAndMaxIdle() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.mergeAsync("k", "v1", (oldValue, newValue) -> "" + oldValue + newValue, 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "vv1");
verifyEviction("k", "vv1", 1000, 500, false);
c.put("k", "v");
f = c.mergeAsync("k", "v1", (oldValue, newValue) -> "" + oldValue + newValue, 500, TimeUnit.MILLISECONDS, 5000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "vv1");
verifyEviction("k", "vv1", 500, 500, false);
f = c.mergeAsync("k2", "v", (oldValue, newValue) -> "" + oldValue + newValue, 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "v");
verifyEviction("k2", "v", 1000, 500, false);
f = c.mergeAsync("k2", "v", (oldValue, newValue) -> "" + oldValue + newValue, 500, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "v");
verifyEviction("k2", "v", 500, 500, false);
}
public void testComputeAsyncWithLifespan() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.computeAsync("k", (key, value) -> "" + key + value, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "kv");
verifyEviction("k", "kv", 1000, 500, true);
f = c.computeAsync("k2", (key, value) -> "" + 42, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "42");
verifyEviction("k2", "42", 1000, 500, true);
}
public void testComputeAsyncWithLifespanAndMaxIdle() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.computeAsync("k", (key, value) -> "" + key + value, 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "kv");
verifyEviction("k", "kv", 1000, 500, false);
c.put("k", "v");
f = c.computeAsync("k", (key, value) -> "" + key + value, 500, TimeUnit.MILLISECONDS, 5000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "kv");
verifyEviction("k", "kv", 500, 500, false);
f = c.computeAsync("k2", (key, value) -> "" + 42, 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "42");
verifyEviction("k2", "42", 1000, 500, false);
f = c.computeAsync("k2", (key, value) -> "" + value + 42, 500, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "null42");
verifyEviction("k2", "null42", 500, 500, false);
}
public void testComputeIfPresentAsyncWithLifespan() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.computeIfPresentAsync("k", (key, value) -> "" + key + value, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "kv");
verifyEviction("k", "kv", 1000, 500, true);
}
public void testComputeIfPresentAsyncWithLifespanAndMaxIdle() throws Exception {
c.put("k", "v");
CompletableFuture<String> f = c.computeIfPresentAsync("k", (key, value) -> "" + key + value, 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "kv");
verifyEviction("k", "kv", 1000, 500, false);
c.put("k", "v");
f = c.computeIfPresentAsync("k", (key, value) -> "" + key + value, 500, TimeUnit.MILLISECONDS, 5000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "kv");
verifyEviction("k", "kv", 500, 500, false);
}
public void testComputeIfAbsentAsyncWithLifespan() throws Exception {
CompletableFuture<String> f = c.computeIfAbsentAsync("k2", key -> key + 42, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "k242");
verifyEviction("k2", "k242", 1000, 500, true);
}
public void testComputeIfAbsentAsyncWithLifespanAndMaxIdle() throws Exception {
CompletableFuture<String> f = c.computeIfAbsentAsync("k2", key -> key + 42, 5000, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "k242");
verifyEviction("k2", "k242", 1000, 500, false);
f = c.computeIfAbsentAsync("k2", key -> "" + key + 42, 500, TimeUnit.MILLISECONDS, 1000, TimeUnit.MILLISECONDS);
markStartTime();
assertFutureResult(f, "k242");
verifyEviction("k2", "k242", 500, 500, false);
}
/**
* Verifies the common assertions for the obtained Future object
*
* @param f, the future
* @param expected, expected result after get
* @throws Exception
*/
private void assertFutureResult(Future<?> f, Object expected) throws Exception {
assertNotNull(f);
assertFalse(f.isCancelled());
assertEquals(expected, f.get(10, TimeUnit.SECONDS));
assertTrue(f.isDone());
}
private <T> void assertFutureResultOn(Future<T> f, Consumer<? super T> check) throws Exception {
assertNotNull(f);
assertFalse(f.isCancelled());
check.accept(f.get(10, TimeUnit.SECONDS));
assertTrue(f.isDone());
}
private void markStartTime() {
startTime = timeService.wallClockTime();
}
/**
* Verifies if a key is evicted after a certain time.
*
* @param key the key to check
* @param expectedValue expected key value at the beginning
* @param expectedLifetime expected life of the key
* @param checkPeriod period between executing checks. If the check modifies the idle time. this is important to
* block idle expiration.
* @param touchKey indicates if the poll for key existence should read the key and cause idle time to be
* reset
*/
private void verifyEviction(final String key, final String expectedValue, final long expectedLifetime, long checkPeriod, final boolean touchKey) {
if (startTime == null) {
throw new IllegalStateException("markStartTime() must be called before verifyEviction(..)");
}
try {
long expectedEndTime = startTime + expectedLifetime;
Condition condition = () -> {
if (touchKey) {
return !c.containsKey(key); //this check DOES read the key so it resets the idle time
} else {
//this check DOES NOT read the key so it does not interfere with idle time
InternalCacheEntry entry = c.getAdvancedCache().getDataContainer().peek(key);
return entry == null || entry.isExpired(timeService.wallClockTime());
}
};
assertTrue(expectedValue.equals(c.get(key)) || timeService.wallClockTime() > expectedEndTime);
// we need to loop to keep touching the entry and protect against idle expiration
while (timeService.wallClockTime() <= expectedEndTime) {
assertFalse("Entry evicted too soon!", condition.isSatisfied());
timeService.advance(checkPeriod);
}
assertTrue(timeService.wallClockTime() > expectedEndTime);
assertTrue(condition.isSatisfied());
Object value = c.get(key);
assertNull(value);
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
startTime = null;
}
}
private void assertMetadata(Metadata expected, Metadata actual) {
assertEquals(expected.version(), actual.version());
assertEquals(expected.lifespan(), actual.lifespan());
assertEquals(expected.maxIdle(), actual.maxIdle());
}
}
| 29,404
| 39.897079
| 168
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/APINonTxTest.java
|
package org.infinispan.api;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.infinispan.test.TestingUtil.assertNoLocks;
import static org.infinispan.test.TestingUtil.createMapEntry;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.LockedStream;
import org.infinispan.commons.lambda.NamedLambdas;
import org.infinispan.commons.util.ObjectDuplicator;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.functional.FunctionalMap;
import org.infinispan.functional.impl.FunctionalMapImpl;
import org.infinispan.functional.impl.ReadWriteMapImpl;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.concurrent.locks.impl.InfinispanLock;
import org.infinispan.util.function.SerializableBiConsumer;
import org.infinispan.util.function.SerializableBiFunction;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.1
*/
@Test(groups = "functional", testName = "api.APINonTxTest")
public class APINonTxTest extends SingleCacheManagerTest {
protected void configure(ConfigurationBuilder builder) {
}
@AfterMethod
public void checkForLeakedTransactions() {
TransactionTable txTable = TestingUtil.getTransactionTable(cache);
if (txTable != null) {
assertEquals(0, txTable.getLocalTxCount());
assertEquals(0, txTable.getLocalTransactions().size());
}
}
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
// start a single cache instance
ConfigurationBuilder c = getDefaultStandaloneCacheConfig(false);
configure(c);
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(false, TestDataSCI.INSTANCE);
cm.defineConfiguration("test", c.build());
cache = cm.getCache("test");
return cm;
}
public void testConvenienceMethods() {
String key = "key", value = "value";
Map<String, String> data = new HashMap<>();
data.put(key, value);
assertNull(cache.get(key));
assertCacheIsEmpty();
cache.put(key, value);
assertEquals(value, cache.get(key));
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
assertCacheSize(1);
cache.remove(key);
assertNull(cache.get(key));
assertCacheIsEmpty();
cache.putAll(data);
assertEquals(value, cache.get(key));
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
assertCacheSize(1);
}
public void testStopClearsData() {
String key = "key", value = "value";
cache.put(key, value);
assertEquals(value, cache.get(key));
assertCacheSize(1);
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
cache.stop();
assertEquals(ComponentStatus.TERMINATED, cache.getStatus());
cache.start();
assertFalse(cache.containsKey(key));
assertFalse(cache.keySet().contains(key));
assertFalse(cache.values().contains(value));
assertCacheIsEmpty();
}
/**
* Tests basic eviction
*/
public void testEvict() {
String key1 = "keyOne", key2 = "keyTwo", value = "value";
cache.put(key1, value);
cache.put(key2, value);
assertTrue(cache.containsKey(key1));
assertTrue(cache.containsKey(key2));
assertCacheSize(2);
assertTrue(cache.keySet().contains(key1));
assertTrue(cache.keySet().contains(key2));
assertTrue(cache.values().contains(value));
// evict two
cache.evict(key2);
assertTrue(cache.containsKey(key1));
assertFalse(cache.containsKey(key2));
assertCacheSize(1);
assertTrue(cache.keySet().contains(key1));
assertFalse(cache.keySet().contains(key2));
assertTrue(cache.values().contains(value));
cache.evict(key1);
assertFalse(cache.containsKey(key1));
assertFalse(cache.containsKey(key2));
assertFalse(cache.keySet().contains(key1));
assertFalse(cache.keySet().contains(key2));
assertFalse(cache.values().contains(value));
assertCacheIsEmpty();
// We should be fine if we evict a non existent key
cache.evict(key1);
}
public void testUnsupportedKeyValueCollectionOperationsAddMethod() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Object> keys = cache.keySet();
Collection<Object> values = cache.values();
//noinspection unchecked
Collection<Object>[] collections = new Collection[]{keys, values};
String newObj = "4";
List<Object> newObjCol = new ArrayList<>();
newObjCol.add(newObj);
for (Collection<Object> col : collections) {
expectException(UnsupportedOperationException.class, () -> col.add(newObj));
expectException(UnsupportedOperationException.class, () -> col.addAll(newObjCol));
}
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testAddMethodsForEntryCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
entries.add(createMapEntry("4", "four"));
}
public void testRemoveMethodOfKeyValueEntryCollections() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Object> keys = cache.keySet();
keys.remove(key1);
assertCacheSize(2);
Collection<Object> values = cache.values();
values.remove(value2);
assertCacheSize(1);
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
entries.remove(TestingUtil.<Object, Object>createMapEntry(key3, value3));
assertCacheIsEmpty();
}
public void testClearMethodOfKeyCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Object> keys = cache.keySet();
keys.clear();
assertCacheIsEmpty();
}
public void testClearMethodOfValuesCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Collection<Object> values = cache.values();
values.clear();
assertCacheIsEmpty();
}
public void testClearMethodOfEntryCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
entries.clear();
assertCacheIsEmpty();
}
public void testRemoveAllMethodOfKeyCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
List<String> keyCollection = new ArrayList<>(2);
keyCollection.add(key2);
keyCollection.add(key3);
Collection<Object> keys = cache.keySet();
keys.removeAll(keyCollection);
assertCacheSize(1);
}
public void testRemoveAllMethodOfValuesCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
List<String> valueCollection = new ArrayList<>(2);
valueCollection.add(value1);
valueCollection.add(value2);
Collection<Object> values = cache.values();
values.removeAll(valueCollection);
assertCacheSize(1);
}
public void testRemoveAllMethodOfEntryCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
List<Map.Entry> entryCollection = new ArrayList<>(2);
entryCollection.add(createMapEntry(key1, value1));
entryCollection.add(createMapEntry(key3, value3));
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
entries.removeAll(entryCollection);
assertCacheSize(1);
}
public void testRetainAllMethodOfKeyCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
List<String> keyCollection = new ArrayList<>(2);
keyCollection.add(key2);
keyCollection.add(key3);
keyCollection.add("6");
Collection<Object> keys = cache.keySet();
keys.retainAll(keyCollection);
assertCacheSize(2);
}
public void testRetainAllMethodOfValuesCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
List<String> valueCollection = new ArrayList<>(2);
valueCollection.add(value1);
valueCollection.add(value2);
valueCollection.add("5");
Collection<Object> values = cache.values();
values.retainAll(valueCollection);
assertCacheSize(2);
}
public void testRetainAllMethodOfEntryCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
List<Map.Entry> entryCollection = new ArrayList<>(3);
entryCollection.add(createMapEntry(key1, value1));
entryCollection.add(createMapEntry(key3, value3));
entryCollection.add(createMapEntry("4", "5"));
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
entries.retainAll(entryCollection);
assertCacheSize(2);
}
public void testRemoveIfMethodOfKeyCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Collection<Object> keys = cache.keySet();
keys.removeIf(k -> k.equals("2"));
assertCacheSize(2);
}
public void testRemoveIfMethodOfValuesCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Collection<Object> values = cache.values();
values.removeIf(v -> ((String) v).startsWith("t"));
assertCacheSize(1);
}
public void testRemoveIfMethodOfEntryCollection() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
entries.removeIf(e -> ((String) e.getValue()).startsWith("t"));
assertCacheSize(1);
}
public void testEntrySetValueFromEntryCollections() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
String newObj = "something-else";
for (Map.Entry<Object, Object> entry : entries) {
entry.setValue(newObj);
}
assertCacheSize(3);
assertEquals(newObj, cache.get(key1));
assertEquals(newObj, cache.get(key2));
assertEquals(newObj, cache.get(key3));
}
public void testKeyValueEntryCollections() {
String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
assertCacheSize(3);
Set<Object> expKeys = new HashSet<>();
expKeys.add(key1);
expKeys.add(key2);
expKeys.add(key3);
Set<Object> expValues = new HashSet<>();
expValues.add(value1);
expValues.add(value2);
expValues.add(value3);
Set expKeyEntries = ObjectDuplicator.duplicateSet(expKeys);
Set expValueEntries = ObjectDuplicator.duplicateSet(expValues);
Set<Object> keys = cache.keySet();
for (Object key : keys) {
assertTrue(expKeys.remove(key));
}
assertTrue(expKeys.isEmpty(), "Did not see keys " + expKeys + " in iterator!");
Collection<Object> values = cache.values();
for (Object value : values) {
assertTrue(expValues.remove(value));
}
assertTrue(expValues.isEmpty(), "Did not see keys " + expValues + " in iterator!");
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
for (Map.Entry entry : entries) {
assertTrue(expKeyEntries.remove(entry.getKey()));
assertTrue(expValueEntries.remove(entry.getValue()));
}
assertTrue(expKeyEntries.isEmpty(), "Did not see keys " + expKeyEntries + " in iterator!");
assertTrue(expValueEntries.isEmpty(), "Did not see keys " + expValueEntries + " in iterator!");
}
public void testSizeAndContents() {
String key = "key", value = "value";
assertCacheIsEmpty();
assertFalse(cache.containsKey(key));
assertFalse(cache.keySet().contains(key));
assertFalse(cache.values().contains(value));
cache.put(key, value);
assertCacheSize(1);
assertTrue(cache.containsKey(key));
assertTrue(cache.containsKey(key));
assertTrue(cache.keySet().contains(key));
assertTrue(cache.values().contains(value));
assertEquals(value, cache.remove(key));
assertTrue(cache.isEmpty());
assertCacheIsEmpty();
assertFalse(cache.containsKey(key));
assertFalse(cache.keySet().contains(key));
assertFalse(cache.values().contains(value));
Map<String, String> m = new HashMap<>();
m.put("1", "one");
m.put("2", "two");
m.put("3", "three");
cache.putAll(m);
assertEquals("one", cache.get("1"));
assertEquals("two", cache.get("2"));
assertEquals("three", cache.get("3"));
assertCacheSize(3);
m = new HashMap<>();
m.put("1", "newvalue");
m.put("4", "four");
cache.putAll(m);
assertEquals("newvalue", cache.get("1"));
assertEquals("two", cache.get("2"));
assertEquals("three", cache.get("3"));
assertEquals("four", cache.get("4"));
assertCacheSize(4);
}
public void testConcurrentMapMethods() {
assertNull(cache.putIfAbsent("A", "B"));
assertEquals("B", cache.putIfAbsent("A", "C"));
assertEquals("B", cache.get("A"));
assertFalse(cache.remove("A", "C"));
assertTrue(cache.containsKey("A"));
assertTrue(cache.remove("A", "B"));
assertFalse(cache.containsKey("A"));
cache.put("A", "B");
assertFalse(cache.replace("A", "D", "C"));
assertEquals("B", cache.get("A"));
assertTrue(cache.replace("A", "B", "C"));
assertEquals("C", cache.get("A"));
assertEquals("C", cache.replace("A", "X"));
assertNull(cache.replace("X", "A"));
assertFalse(cache.containsKey("X"));
}
@SuppressWarnings("ConstantConditions")
public void testPutNullParameters() {
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.put(null, null));
expectException(NullPointerException.class, "Null values are not supported!", () -> cache.put("k", null));
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.put(null, "v"));
//put if absent since it shares the same command as put
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.putIfAbsent(null, null));
expectException(NullPointerException.class, "Null values are not supported!", () -> cache.putIfAbsent("k", null));
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.putIfAbsent(null, "v"));
}
@SuppressWarnings("ConstantConditions")
public void testReplaceNullParameters() {
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.replace(null, null));
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.replace(null, "X"));
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.replace(null, "X", "Y"));
expectException(NullPointerException.class, "Null values are not supported!",
() -> cache.replace("hello", null, "X"));
expectException(NullPointerException.class, "Null values are not supported!",
() -> cache.replace("hello", "X", null));
}
@SuppressWarnings("ConstantConditions")
public void testRemoveNullParameters() {
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.remove(null));
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.remove(null, "X"));
expectException(NullPointerException.class, "Null values are not supported!", () -> cache.remove("k", null));
expectException(NullPointerException.class, "Null keys are not supported!", () -> cache.remove(null, null));
}
@SuppressWarnings("ConstantConditions")
public void testComputeNullParameters() {
expectException(NullPointerException.class, "Null keys are not supported!",
() -> cache.compute(null, (o, o2) -> "X"));
expectException(NullPointerException.class, "Null functions are not supported!", () -> cache.compute("k", null));
expectException(NullPointerException.class, "Null keys are not supported!",
() -> cache.computeIfAbsent(null, o -> "X"));
expectException(NullPointerException.class, "Null functions are not supported!",
() -> cache.computeIfAbsent("k", null));
expectException(NullPointerException.class, "Null keys are not supported!",
() -> cache.computeIfPresent(null, (o, o2) -> "X"));
expectException(NullPointerException.class, "Null functions are not supported!",
() -> cache.computeIfPresent("k", null));
}
@SuppressWarnings("ConstantConditions")
public void testMergeNullParameters() {
expectException(NullPointerException.class, "Null keys are not supported!",
() -> cache.merge(null, "X", (o, o2) -> "Y"));
expectException(NullPointerException.class, "Null values are not supported!",
() -> cache.merge("k", null, (o, o2) -> "Y"));
expectException(NullPointerException.class, "Null functions are not supported!",
() -> cache.merge("k", "X", null));
}
public void testPutIfAbsentLockCleanup() {
assertNoLocks(cache);
cache.put("key", "value");
assertNoLocks(cache);
// This call should fail.
cache.putForExternalRead("key", "value2");
assertNoLocks(cache);
assertEquals("value", cache.get("key"));
}
private void assertCacheIsEmpty() {
assertCacheSize(0);
}
private void assertCacheSize(int expectedSize) {
assertEquals(expectedSize, cache.size());
assertEquals(expectedSize, cache.keySet().size());
assertEquals(expectedSize, cache.values().size());
assertEquals(expectedSize, cache.entrySet().size());
boolean isEmpty = expectedSize == 0;
assertEquals(isEmpty, cache.isEmpty());
assertEquals(isEmpty, cache.keySet().isEmpty());
assertEquals(isEmpty, cache.values().isEmpty());
assertEquals(isEmpty, cache.entrySet().isEmpty());
}
public void testGetOrDefault() {
cache.put("A", "B");
assertEquals("K", cache.getOrDefault("Not there", "K"));
}
public void testMerge() throws Exception {
cache.put("A", "B");
// replace
cache.merge("A", "C", (oldValue, newValue) -> "" + oldValue + newValue);
assertEquals("BC", cache.get("A"));
// remove if null value after remapping
cache.merge("A", "C", (oldValue, newValue) -> null);
assertEquals(null, cache.get("A"));
// put if absent
cache.merge("F", "42", (oldValue, newValue) -> "" + oldValue + newValue);
assertEquals("42", cache.get("F"));
cache.put("A", "B");
RuntimeException mergeRaisedException = new RuntimeException("hi there");
expectException(RuntimeException.class, "hi there", () -> cache.merge("A", "C", (k, v) -> {
throw mergeRaisedException;
}));
}
public void testMergeWithExpirationParameters() {
BiFunction<Object, Object, String> mappingFunction = (v1, v2) -> v1 + " " + v2;
cache.put("es", "hola");
assertEquals("hola guy", cache.merge("es", "guy", mappingFunction, 1_000_000, TimeUnit.SECONDS));
CacheEntry<Object, Object> entry = cache.getAdvancedCache().getCacheEntry("es");
assertEquals("hola guy", entry.getValue());
assertEquals(1_000_000_000, entry.getLifespan());
assertEquals("hola guy and good bye", cache.merge("es", "and good bye", mappingFunction, 1_100_000, TimeUnit.SECONDS, -1, TimeUnit.SECONDS));
entry = cache.getAdvancedCache().getCacheEntry("es");
assertEquals("hola guy and good bye", entry.getValue());
assertEquals(1_100_000_000, entry.getLifespan());
}
public void testForEach() {
cache.put("A", "B");
cache.put("C", "D");
Set<String> values = new HashSet<>();
BiConsumer<? super Object, ? super Object> collectKeyValues = (k, v) -> values.add("hello_" + k.toString() + v.toString());
cache.forEach(collectKeyValues);
assertEquals(TestingUtil.setOf("hello_AB", "hello_CD"), values);
}
public void testComputeIfAbsent() {
Function<Object, String> mappingFunction = k -> k + " world";
assertEquals("hello world", cache.computeIfAbsent("hello", mappingFunction));
assertEquals("hello world", cache.get("hello"));
Function<Object, String> functionAfterPut = k -> k + " happy";
// hello already exists so nothing should happen
assertEquals("hello world", cache.computeIfAbsent("hello", functionAfterPut));
assertEquals("hello world", cache.get("hello"));
int cacheSizeBeforeNullValueCompute = cache.size();
Function<Object, String> functionMapsToNull = k -> null;
assertNull("with function mapping to null returns null", cache.computeIfAbsent("kaixo", functionMapsToNull));
assertNull("the key does not exist", cache.get("kaixo"));
assertEquals(cacheSizeBeforeNullValueCompute, cache.size());
RuntimeException computeRaisedException = new RuntimeException("hi there");
Function<Object, String> functionMapsToException = k -> {
throw computeRaisedException;
};
expectException(RuntimeException.class, "hi there", () -> cache.computeIfAbsent("es", functionMapsToException));
}
public void testComputeIfAbsentWithExpirationParameters() {
Function<Object, String> mappingFunction = k -> k + " world";
assertEquals("hello world", cache.computeIfAbsent("hello", mappingFunction, 1_000_000, TimeUnit.SECONDS));
CacheEntry<Object, Object> entry = cache.getAdvancedCache().getCacheEntry("hello");
assertEquals("hello world", entry.getValue());
assertEquals(1_000_000_000, entry.getLifespan());
assertEquals("hello world", cache.computeIfAbsent("hello", mappingFunction, 1_100_000, TimeUnit.SECONDS,
-1, TimeUnit.SECONDS));
entry = cache.getAdvancedCache().getCacheEntry("hello");
assertEquals("hello world", entry.getValue());
// The computeIfAbsent will fail, leaving the expiration the same
assertEquals(1_000_000_000, entry.getLifespan());
}
public void testComputeIfPresent() {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
cache.put("es", "hola");
assertEquals("hello_es:hola", cache.computeIfPresent("es", mappingFunction));
assertEquals("hello_es:hola", cache.get("es"));
RuntimeException computeRaisedException = new RuntimeException("hi there");
BiFunction<Object, Object, String> mappingToException = (k, v) -> {
throw computeRaisedException;
};
expectException(RuntimeException.class, "hi there", () -> cache.computeIfPresent("es", mappingToException));
BiFunction<Object, Object, String> mappingForNotPresentKey = (k, v) -> "absent_" + k + ":" + v;
assertNull("unexisting key should return null", cache.computeIfPresent("fr", mappingForNotPresentKey));
assertNull("unexisting key should return null", cache.get("fr"));
BiFunction<Object, Object, String> mappingToNull = (k, v) -> null;
assertNull("mapping to null returns null", cache.computeIfPresent("es", mappingToNull));
assertNull("the key is removed", cache.get("es"));
}
public void testComputeIfPresentWithExpirationParameters() {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
cache.put("es", "hola");
assertEquals("hello_es:hola", cache.computeIfPresent("es", mappingFunction, 1_000_000, TimeUnit.SECONDS));
CacheEntry<Object, Object> entry = cache.getAdvancedCache().getCacheEntry("es");
assertEquals("hello_es:hola", entry.getValue());
assertEquals(1_000_000_000, entry.getLifespan());
assertEquals("hello_es:hello_es:hola", cache.computeIfPresent("es", mappingFunction, 1_100_000, TimeUnit.SECONDS, -1, TimeUnit.SECONDS));
entry = cache.getAdvancedCache().getCacheEntry("es");
assertEquals("hello_es:hello_es:hola", entry.getValue());
assertEquals(1_100_000_000, entry.getLifespan());
}
public void testCompute() {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
cache.put("es", "hola");
assertEquals("hello_es:hola", cache.compute("es", mappingFunction));
assertEquals("hello_es:hola", cache.get("es"));
BiFunction<Object, Object, String> mappingForNotPresentKey = (k, v) -> "absent_" + k + ":" + v;
assertEquals("absent_fr:null", cache.compute("fr", mappingForNotPresentKey));
assertEquals("absent_fr:null", cache.get("fr"));
BiFunction<Object, Object, String> mappingToNull = (k, v) -> null;
assertNull("mapping to null returns null", cache.compute("es", mappingToNull));
assertNull("the key is removed", cache.get("es"));
int cacheSizeBeforeNullValueCompute = cache.size();
assertNull("mapping to null returns null", cache.compute("eus", mappingToNull));
assertNull("the key does not exist", cache.get("eus"));
assertEquals(cacheSizeBeforeNullValueCompute, cache.size());
RuntimeException computeRaisedException = new RuntimeException("hi there");
BiFunction<Object, Object, String> mappingToException = (k, v) -> {
throw computeRaisedException;
};
expectException(RuntimeException.class, "hi there", () -> cache.compute("es", mappingToException));
}
public void testComputeWithExpirationParameters() {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
cache.put("es", "hola");
assertEquals("hello_es:hola", cache.compute("es", mappingFunction, 1_000_000, TimeUnit.SECONDS));
CacheEntry<Object, Object> entry = cache.getAdvancedCache().getCacheEntry("es");
assertEquals("hello_es:hola", entry.getValue());
assertEquals(1_000_000_000, entry.getLifespan());
assertEquals("hello_es:hello_es:hola", cache.compute("es", mappingFunction, 1_100_000, TimeUnit.SECONDS,
-1, TimeUnit.SECONDS));
entry = cache.getAdvancedCache().getCacheEntry("es");
assertEquals("hello_es:hello_es:hola", entry.getValue());
assertEquals(1_100_000_000, entry.getLifespan());
}
public void testReplaceAll() {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
cache.put("es", "hola");
cache.put("cz", "ahoj");
cache.replaceAll(mappingFunction);
assertEquals("hello_es:hola", cache.get("es"));
assertEquals("hello_cz:ahoj", cache.get("cz"));
BiFunction<Object, Object, String> mappingToNull = (k, v) -> null;
expectException(NullPointerException.class, () -> cache.replaceAll(mappingToNull));
assertEquals("hello_es:hola", cache.get("es"));
assertEquals("hello_cz:ahoj", cache.get("cz"));
}
@DataProvider(name = "lockedStreamActuallyLocks")
public Object[][] lockStreamActuallyLocks() {
List<BiConsumer<Cache<Object, Object>, CacheEntry<Object, Object>>> biConsumers = Arrays.asList(
// Put
NamedLambdas.of("put", (c, e) -> assertEquals("value" + e.getKey(), c.put(e.getKey(), String.valueOf(e.getValue() + "-other")))),
// Functional Command
NamedLambdas.of("functional-command", (c, e) -> {
FunctionalMap.ReadWriteMap<Object, Object> rwMap = ReadWriteMapImpl.create(FunctionalMapImpl.create(c.getAdvancedCache()));
try {
assertEquals("value" + e.getKey(), rwMap.eval(e.getKey(), view -> {
Object prev = view.get();
view.set(prev + "-other");
return prev;
}).get());
} catch (InterruptedException | ExecutionException e1) {
throw new AssertionError(e1);
}
}),
// Put all
NamedLambdas.of("put-all", (c, e) -> c.putAll(Collections.singletonMap(e.getKey(), e.getValue() + "-other"))),
// Put Async
NamedLambdas.of("put-async", (c, e) -> {
try {
c.putAsync(e.getKey(), e.getValue() + "-other").get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e1) {
throw new AssertionError(e1);
}
}),
// Compute
NamedLambdas.of("compute", (c, e) -> c.compute(e.getKey(), (k, v) -> v + "-other")),
// Compute if present
NamedLambdas.of("compute-if-present", (c, e) -> c.computeIfPresent(e.getKey(), (k, v) -> v + "-other")),
// Merge
NamedLambdas.of("merge", (c, e) -> c.merge(e.getKey(), "-other", (v1, v2) -> "" + v1 + v2))
);
return biConsumers.stream().flatMap(consumer ->
Stream.of(Boolean.TRUE, Boolean.FALSE).map(bool -> new Object[] { consumer, bool })
).toArray(Object[][]::new);
}
@Test(dataProvider = "lockedStreamActuallyLocks")
public void testLockedStreamActuallyLocks(BiConsumer<Cache<Object, Object>, CacheEntry<Object, Object>> consumer,
boolean forEachOrInvokeAll) throws Throwable {
for (int i = 0; i < 10; i++) {
cache.put(i, "value" + i);
}
CyclicBarrier barrier = new CyclicBarrier(2);
int key = 4;
LockedStream<Object, Object> stream = cache.getAdvancedCache().lockedStream();
SerializableBiConsumer<Cache<Object, Object>, CacheEntry<Object, Object>> serConsumer = (c, e) -> {
Object innerKey = e.getKey();
if (innerKey.equals(key)) {
try {
barrier.await(10, TimeUnit.SECONDS);
consumer.accept(c, e);
InfinispanLock lock = TestingUtil.extractComponent(c, LockManager.class).getLock(innerKey);
assertNotNull(lock);
assertEquals(innerKey, lock.getLockOwner());
barrier.await(10, TimeUnit.SECONDS);
} catch (InterruptedException | BrokenBarrierException | TimeoutException e1) {
throw new RuntimeException(e1);
}
}
};
Future<?> forEachFuture = fork(() -> {
if (forEachOrInvokeAll) {
stream.forEach(serConsumer);
} else {
stream.invokeAll((c, e) -> {
serConsumer.accept(c, e);
return null;
});
}
});
barrier.await(10, TimeUnit.SECONDS);
Future<Object> putFuture = fork(() -> cache.put(key, "value" + key + "-new"));
TestingUtil.assertNotDone(putFuture);
// Let the forEach with lock complete
barrier.await(10, TimeUnit.SECONDS);
forEachFuture.get(10, TimeUnit.SECONDS);
// The put should replace the value that forEach inserted
assertEquals("value" + key + "-other", putFuture.get(10, TimeUnit.SECONDS));
// The put should be last since it had to wait until lock was released on forEachWithLock
assertEquals("value" + key + "-new", cache.get(key));
// Make sure the locks were cleaned up properly
LockManager lockManager = TestingUtil.extractComponent(cache, LockManager.class);
assertEquals(0, lockManager.getNumberOfLocksHeld());
}
public void testLockedStreamSetValue() {
for (int i = 0; i < 5; i++) {
cache.put(i, "value" + i);
}
cache.getAdvancedCache().lockedStream().forEach((c, e) -> e.setValue(e.getValue() + "-changed"));
for (int i = 0; i < 5; i++) {
assertEquals("value" + i + "-changed", cache.get(i));
}
}
public void testLockedStreamWithinLockedStream() {
cache.getAdvancedCache().lockedStream()
.forEach((c, e) -> expectException(IllegalArgumentException.class,
() -> c.getAdvancedCache().lockedStream()));
}
private <R> void assertLockStreamInvokeAll(LockedStream<Object, Object> lockedStream,
SerializableBiFunction<Cache<Object, Object>, CacheEntry<Object, Object>, R> biFunction,
Map<Object, R> expectedResults) {
Map<Object, R> results = lockedStream.invokeAll(biFunction);
assertEquals(expectedResults, results);
}
public void testLockedStreamInvokeAllPut() {
Map<Object, Object> original = new HashMap<>();
int insertedAmount = 5;
for (int i = 0; i < insertedAmount; i++) {
original.put("key-" + i, "value-" + i);
}
cache.putAll(original);
assertLockStreamInvokeAll(cache.getAdvancedCache().lockedStream(),
(c, e) -> c.put(e.getKey(), e.getValue() + "-updated"), original);
// Verify contents were updated
for(int i = 0; i < insertedAmount; i++) {
assertEquals("value-" + i + "-updated", cache.get("key-" + i));
}
}
public void testLockedStreamInvokeAllFilteredSet() {
Map<Object, Object> original = new HashMap<>();
int insertedAmount = 5;
for (int i = 0; i < insertedAmount; i++) {
original.put("key-" + i, "value-" + i);
}
cache.putAll(original);
// We only update the key with numbers 3
assertLockStreamInvokeAll(cache.getAdvancedCache().lockedStream().filter(e -> e.getKey().toString().contains("3")),
(c, e) -> c.put(e.getKey(), e.getValue() + "-updated"), Collections.singletonMap("key-" + 3, "value-" + 3));
// Verify contents were updated
for(int i = 0; i < insertedAmount; i++) {
assertEquals("value-" + i + (i == 3 ? "-updated" : ""), cache.get("key-" + i));
}
}
}
| 38,369
| 36.802956
| 147
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentOptimisticTest.java
|
package org.infinispan.api;
import java.util.List;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.2
*/
@Test (groups = "functional", testName = "api.ConditionalOperationsConcurrentOptimisticTest")
public class ConditionalOperationsConcurrentOptimisticTest extends ConditionalOperationsConcurrentTest {
public ConditionalOperationsConcurrentOptimisticTest() {
cacheMode = CacheMode.DIST_SYNC;
transactional = true;
}
@Override
public void testReplace() throws Exception {
List caches = caches(null);
testOnCaches(caches, new ReplaceOperation(false));
}
@Override
public void testConditionalRemove() throws Exception {
List caches = caches(null);
testOnCaches(caches, new ConditionalRemoveOperation(false));
}
public void testPutIfAbsent() throws Exception {
List caches = caches(null);
testOnCaches(caches, new PutIfAbsentOperation(false));
}
}
| 1,013
| 26.405405
| 104
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ClearTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.lang.invoke.MethodHandles;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Tests if clear operation actually succeeds in removing all keys from all nodes of a distributed cluster.
* See https://issues.jboss.org/browse/ISPN-2530.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "api.ClearTest")
public class ClearTest extends MultipleCacheManagersTest {
protected static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
protected AdvancedCache<Integer, String> c0;
protected AdvancedCache<Integer, String> c1;
protected AdvancedCache<Integer, String> c2;
@Override
public Object[] factory() {
return new Object[] {
new ClearTest().cacheMode(CacheMode.DIST_SYNC).transactional(false),
new ClearTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.OPTIMISTIC),
new ClearTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.PESSIMISTIC),
};
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(cacheMode, transactional, transactional);
builder.clustering().hash().numSegments(3)
.stateTransfer().fetchInMemoryState(true)
.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
if (transactional) {
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.lockingMode(lockingMode);
}
createCluster(builder, 3);
waitForClusterToForm();
c0 = advancedCache(0);
c1 = advancedCache(1);
c2 = advancedCache(2);
}
public void testClear() throws Exception {
final int numKeys = 5;
log.infof("Putting %d keys into cache ..", numKeys);
for (int i = 0; i < numKeys; i++) {
String value = "val_" + i;
c0.put(i, value);
// force all values into L1 of the other nodes
assertEquals(value, c0.get(i));
assertEquals(value, c1.get(i));
assertEquals(value, c2.get(i));
}
log.info("Finished putting keys");
DataContainer dc0 = c0.getDataContainer();
DataContainer dc1 = c1.getDataContainer();
DataContainer dc2 = c2.getDataContainer();
assertTrue(dc0.size() > 0);
assertTrue(dc1.size() > 0);
assertTrue(dc2.size() > 0);
log.info("Clearing cache ..");
c0.clear();
log.info("Finished clearing cache");
assertEquals(0, dc0.size());
assertEquals(0, dc1.size());
assertEquals(0, dc2.size());
}
}
| 3,402
| 34.082474
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/IgnoreReturnValueForConditionalOperationsTest.java
|
package org.infinispan.api;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
/**
* https://issues.jboss.org/browse/ISPN-3141
*/
@Test(groups = "functional", testName = "api.IgnoreReturnValueForConditionalOperationsTest")
public class IgnoreReturnValueForConditionalOperationsTest extends MultipleCacheManagersTest {
@Override
public Object[] factory() {
return new Object[] {
new IgnoreReturnValueForConditionalOperationsTest().cacheMode(CacheMode.DIST_SYNC).transactional(false),
new IgnoreReturnValueForConditionalOperationsTest().cacheMode(CacheMode.DIST_SYNC).transactional(true),
};
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder dcc = getDefaultClusteredCacheConfig(cacheMode, transactional);
createCluster(TestDataSCI.INSTANCE, dcc, 2);
waitForClusterToForm();
}
public void testConditionalReplace() {
Object k = init();
AdvancedCache<Object, Object> cache = advancedCache(0).withFlags(Flag.IGNORE_RETURN_VALUES);
Object put = cache.put("kx", "vx");
put = cache.put("kx", "vx");
assertTrue(advancedCache(0).withFlags(Flag.IGNORE_RETURN_VALUES).replace(k, "v0", "v1"));
assertEquals(cache(0).get(k), "v1");
assertEquals(cache(1).get(k), "v1");
}
public void testConditionalRemove() {
Object k = init();
assertTrue(advancedCache(0).withFlags(Flag.IGNORE_RETURN_VALUES).remove(k, "v0"));
assertNull(cache(0).get(k));
assertNull(cache(1).get(k));
}
private Object init() {
Object k = getKeyForCache(1);
cache(0).put(k, "v0");
assertEquals(cache(0).get(k), "v0");
assertEquals(cache(1).get(k), "v0");
return k;
}
}
| 2,155
| 33.774194
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/MetadataAPIDistTest.java
|
package org.infinispan.api;
import static org.infinispan.distribution.DistributionTestHelper.getFirstNonOwner;
import static org.infinispan.distribution.DistributionTestHelper.getFirstOwner;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.internal.junit.ArrayAsserts.assertArrayEquals;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.MetadataAPIDistTest")
public class MetadataAPIDistTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
builder.clustering().hash().numOwners(1);
createCluster(builder, 2);
waitForClusterToForm();
}
public void testGetCacheEntryNonOwner() {
byte[] key = {1, 2, 3};
Cache<byte[], byte[]> owner = getFirstOwner(key, this.<byte[], byte[]>caches());
Cache<byte[], byte[]> nonOwner = getFirstNonOwner(key, this.<byte[], byte[]>caches());
owner.put(key, new byte[]{4, 5, 6});
assertArrayEquals(new byte[]{4, 5, 6}, owner.get(key));
CacheEntry cacheEntry = nonOwner.getAdvancedCache().getCacheEntry(key);
assertNotNull(cacheEntry);
assertArrayEquals(new byte[]{4, 5, 6}, (byte[]) cacheEntry.getValue());
}
}
| 1,581
| 38.55
| 92
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ParallelCacheStartTest.java
|
package org.infinispan.api;
import java.util.List;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.ParallelCacheStartTest")
public class ParallelCacheStartTest extends MultipleCacheManagersTest {
private EmbeddedCacheManager cm1, cm2;
private ConfigurationBuilder cfg;
public ParallelCacheStartTest() {
cleanup = CleanupPhase.AFTER_METHOD;
}
protected void createCacheManagers() throws Throwable {
cm1 = addClusterEnabledCacheManager();
cfg = new ConfigurationBuilder();
cfg.clustering().cacheMode(CacheMode.REPL_SYNC)
.stateTransfer().fetchInMemoryState(false);
cm1.defineConfiguration("cache1", cfg.build());
cm1.defineConfiguration("cache2", cfg.build());
}
public void testParallelStartup() throws Exception {
// start both caches in parallel
cm1.startCaches("cache1", "cache2");
List memb1 = cm1.getMembers();
assert 1 == memb1.size() : "Expected 1 member; was " + memb1;
Object coord = memb1.get(0);
cm2 = addClusterEnabledCacheManager();
cm2.defineConfiguration("cache1", cfg.build());
cm2.defineConfiguration("cache2", cfg.build());
// again start both caches in parallel
cm2.startCaches("cache1", "cache2");
TestingUtil.blockUntilViewsReceived(50000, true, cm1, cm2);
memb1 = cm1.getMembers();
List memb2 = cm2.getMembers();
assert 2 == memb1.size();
assert memb1.equals(memb2);
cm1.stop();
TestingUtil.blockUntilViewsReceived(50000, false, cm2);
memb2 = cm2.getMembers();
assert 1 == memb2.size();
assert !coord.equals(memb2.get(0));
}
}
| 1,949
| 31.5
| 71
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/DistributedOptimisticRepeatableReadIsolationTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.DistributedOptimisticRepeatableReadIsolationTest")
public class DistributedOptimisticRepeatableReadIsolationTest extends AbstractRepeatableReadIsolationTest {
public DistributedOptimisticRepeatableReadIsolationTest() {
super(CacheMode.DIST_SYNC, LockingMode.OPTIMISTIC);
}
}
| 544
| 29.277778
| 107
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/BaseCacheAPIOptimisticTest.java
|
package org.infinispan.api;
import java.util.function.BiConsumer;
import org.infinispan.Cache;
import org.infinispan.container.entries.CacheEntry;
import org.testng.annotations.Test;
/**
* @author wburns
* @since 9.1
*/
public abstract class BaseCacheAPIOptimisticTest extends CacheAPITest {
@Test(dataProvider = "lockedStreamActuallyLocks", expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamActuallyLocks(BiConsumer<Cache<Object, Object>, CacheEntry<Object, Object>> consumer,
boolean forEachOrInvokeAll) throws Throwable {
super.testLockedStreamActuallyLocks(consumer, forEachOrInvokeAll);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamSetValue() {
super.testLockedStreamSetValue();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamWithinLockedStream() {
super.testLockedStreamWithinLockedStream();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamInvokeAllFilteredSet() {
super.testLockedStreamInvokeAllFilteredSet();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamInvokeAllPut() {
super.testLockedStreamInvokeAllPut();
}
}
| 1,407
| 30.288889
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConcurrentOperationsTest.java
|
package org.infinispan.api;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.Callable;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.2
*/
@Test(groups = "functional", testName = "api.ConcurrentOperationsTest")
public class ConcurrentOperationsTest extends MultipleCacheManagersTest {
protected final int threads;
protected final int nodes;
protected final int operations;
protected final CacheMode cacheMode;
protected ConcurrentOperationsTest(CacheMode cacheMode, int threads, int nodes, int operations) {
this.cacheMode = cacheMode;
this.threads = threads;
this.nodes = nodes;
this.operations = operations;
}
public ConcurrentOperationsTest() {
this(CacheMode.DIST_SYNC, 2, 2, 4);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder dcc = getDefaultClusteredCacheConfig(cacheMode, false);
dcc.clustering().l1().disable();
createClusteredCaches(nodes, dcc);
}
public void testNoTimeout() throws Throwable {
runTest(false);
}
public void testNoTimeoutAndCorrectness() throws Throwable {
runTest(true);
}
private void runTest(final boolean checkCorrectness) throws Throwable {
final CyclicBarrier barrier = new CyclicBarrier(threads);
final Random rnd = new Random();
final AtomicBoolean correctness = new AtomicBoolean(Boolean.TRUE);
List<Future<Boolean>> result = new ArrayList<Future<Boolean>>();
for (int t = 0; t < threads; t++) {
final int part = t;
Future<Boolean> f = fork(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
try {
for (int i = 0; i < operations; i++) {
barrier();
executeOperation(i);
barrier();
checkCorrectness(i);
printProgress(i);
if (!correctness.get()) break;
}
} catch (Throwable t) {
correctness.set(false);
throw new Exception(t);
}
return correctness.get();
}
private void printProgress(int i) {
if (i % 100 == 0) print("Progressing = " + i);
}
private void executeOperation(int iteration) {
int node = rnd.nextInt(nodes - 1);
switch (rnd.nextInt(4)) {
case 0: {
cache(node).put("k", "v_" + part + "_" + iteration);
break;
}
case 1: {
cache(node).remove("k");
break;
}
case 2: {
cache(node).putIfAbsent("k", "v" + part);
break;
}
case 3: {
cache(node).replace("k", "v" + part);
break;
}
default:
throw new IllegalStateException();
}
}
private void checkCorrectness(int i) {
if (checkCorrectness) {
log.tracef("Checking correctness for iteration %s", i);
print("Checking correctness");
List<Address> owners = cacheTopology(0).getDistribution("k").writeOwners();
if (!checkOwners(owners)) {
correctness.set(false);
}
for (int q = 0; q < nodes; q++) {
print(q, cache(0).get("k"));
}
Object expectedValue = cache(0).get("k");
log.tracef("Original value read from cache 0 is %s", expectedValue);
for (int j = 0; j < nodes; j++) {
Object actualValue = cache(j).get("k");
boolean areEquals = expectedValue == null ? actualValue == null : expectedValue.equals(actualValue);
print("Are " + actualValue + " and " + expectedValue + " equals ? " + areEquals);
if (!areEquals) {
correctness.set(false);
print("Consistency error. On cache 0 we had " + expectedValue + " and on " + j + " we had " + actualValue);
log.trace("Consistency error. On cache 0 we had " + expectedValue + " and on " + j + " we had " + actualValue);
}
}
}
}
private void barrier() throws BrokenBarrierException, java.util.concurrent.TimeoutException, InterruptedException {
barrier.await(10000, TimeUnit.MILLISECONDS);
log.tracef("Just passed barrier.");
}
});
result.add(f);
}
for (Future<Boolean> f: result) {
assertTrue(f.get());
}
}
protected boolean checkOwners(List<Address> owners) {
assert owners.size() == 2;
InternalCacheEntry entry0 = advancedCache(owners.get(0)).getDataContainer().get("k");
InternalCacheEntry entry1 = advancedCache(owners.get(1)).getDataContainer().get("k");
return checkOwnerEntries(entry0, entry1, owners.get(0), owners.get(1));
}
protected boolean checkOwnerEntries(InternalCacheEntry entry0, InternalCacheEntry entry1, Address mainOwner, Address backupOwner) {
Object mainOwnerValue = entry0 == null ? null : entry0.getValue();
Object otherOwnerValue = entry1 == null ? null : entry1.getValue();
log.tracef("Main owner value is %s, other Owner Value is %s", mainOwnerValue, otherOwnerValue);
boolean equals = mainOwnerValue == null? otherOwnerValue == null : mainOwnerValue.equals(otherOwnerValue);
if (!equals) {
print("Consistency error. On main owner(" + mainOwner + ") we had " +
mainOwnerValue + " and on backup owner(" + backupOwner + ") we had " + otherOwnerValue);
log.trace("Consistency error. On main owner(" + mainOwner + ") we had " +
mainOwnerValue + " and on backup owner(" + backupOwner + ") we had " + otherOwnerValue);
return false;
}
print("otherOwnerValue = " + otherOwnerValue);
print("mainOwnerValue = " + mainOwnerValue);
return true;
}
private AdvancedCache advancedCache(Address address) {
for (Cache c : caches()) {
if (c.getAdvancedCache().getRpcManager().getAddress().equals(address))
return c.getAdvancedCache();
}
throw new IllegalStateException("Couldn't find cache for address : " + address);
}
private void print(int index, Object value) {
print("[" + Thread.currentThread().getName() + "] Cache " + index + " sees value " + value);
}
private void print(Object value) {
log.debug(value);
}
public void testReplace() {
cache(0).put("k", "v1");
for (int i = 0; i < nodes; i++) {
assertEquals("v1", cache(i).get("k"));
}
assert cache(0).replace("k", "v2") != null;
assert cache(0).replace("k", "v2", "v3");
assertEquals(cache(0).get("k"), "v3");
}
}
| 7,988
| 36.15814
| 135
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/APINonTxOffHeapTest.java
|
package org.infinispan.api;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.data.Key;
import org.testng.annotations.Factory;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.APINonTxOffHeapTest")
public class APINonTxOffHeapTest extends APINonTxTest {
private StorageType storageType;
public APINonTxOffHeapTest storageType(StorageType storageType) {
this.storageType = storageType;
return this;
}
@Override
protected String parameters() {
return "[" + storageType + "]";
}
@Factory
public Object[] factory() {
return new Object[]{
new APINonTxOffHeapTest().storageType(StorageType.BINARY),
new APINonTxOffHeapTest().storageType(StorageType.OFF_HEAP)
};
}
@Override
protected void configure(ConfigurationBuilder builder) {
builder.memory().storageType(storageType);
}
@Test
public void testRemoveMethodOfKeyValueEntryCollections() {
final String key1 = "1", value1 = "one", key2 = "2", value2 = "two", key3 = "3", value3 = "three";
Map<String, String> m = new HashMap<>();
m.put(key1, value1);
m.put(key2, value2);
m.put(key3, value3);
cache.putAll(m);
Set<Object> keys = cache.keySet();
keys.remove(key1);
assertCacheSize(2);
Collection<Object> values = cache.values();
values.remove(value2);
assertCacheSize(1);
Set<Map.Entry<Object, Object>> entries = cache.entrySet();
entries.remove(TestingUtil.<Object, Object>createMapEntry(key3, value3));
assertCacheIsEmpty();
}
public void testGetOrDefault() {
cache.put("A", "B");
assertEquals("K", cache.getOrDefault("Not there", "K"));
}
public void testMerge() {
cache.put("A", "B");
// replace
cache.merge("A", "C", (oldValue, newValue) -> "" + oldValue + newValue);
assertEquals("BC", cache.get("A"));
// remove if null value after remapping
cache.merge("A", "C", (oldValue, newValue) -> null);
assertEquals(null, cache.get("A"));
// put if absent
cache.merge("F", "42", (oldValue, newValue) -> "" + oldValue + newValue);
assertEquals("42", cache.get("F"));
cache.put("A", "B");
RuntimeException mergeRaisedException = new RuntimeException("hi there");
expectException(RuntimeException.class, "hi there", () -> cache.merge("A", "C", (k, v) -> {
throw mergeRaisedException;
}));
}
@Test
public void testForEach() {
cache.put("A", "B");
cache.put("C", "D");
List<String> values = new ArrayList<>();
BiConsumer<? super Object, ? super Object> collectKeyValues = (k, v) -> values.add("hello_" + k.toString() + v.toString());
cache.forEach(collectKeyValues);
assertEquals(2, values.size());
//iteration order is not guaranteed, checking just that value is present
assertTrue(values.contains("hello_AB"));
assertTrue(values.contains("hello_CD"));
}
public void testCompute() {
BiFunction<Object, Object, String> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
cache.put("es", "hola");
assertEquals("hello_es:hola", cache.compute("es", mappingFunction));
assertEquals("hello_es:hola", cache.get("es"));
BiFunction<Object, Object, String> mappingForNotPresentKey = (k, v) -> "absent_" + k + ":" + v;
assertEquals("absent_fr:null", cache.compute("fr", mappingForNotPresentKey));
assertEquals("absent_fr:null", cache.get("fr"));
BiFunction<Object, Object, String> mappingToNull = (k, v) -> null;
assertNull(cache.compute("es", mappingToNull), "mapping to null returns null");
assertNull(cache.get("es"), "the key is removed");
int cacheSizeBeforeNullValueCompute = cache.size();
assertNull(cache.compute("eus", mappingToNull), "mapping to null returns null");
assertNull(cache.get("eus"), "the key does not exist");
assertEquals(cacheSizeBeforeNullValueCompute, cache.size());
RuntimeException computeRaisedException = new RuntimeException("hi there");
BiFunction<Object, Object, String> mappingToException = (k, v) -> {
throw computeRaisedException;
};
expectException(RuntimeException.class, "hi there", () -> cache.compute("es", mappingToException));
}
@Test
public void testReplaceAll() {
BiFunction<Object, Object, Object> mappingFunction = (k, v) -> "hello_" + k + ":" + v;
cache.put("es", "hola");
cache.put("cz", "ahoj");
cache.replaceAll(mappingFunction);
assertEquals("hello_es:hola", cache.get("es"));
assertEquals("hello_cz:ahoj", cache.get("cz"));
BiFunction<Object, Object, String> mappingToNull = (k, v) -> null;
expectException(NullPointerException.class, () -> cache.replaceAll(mappingToNull));
assertEquals("hello_es:hola", cache.get("es"));
assertEquals("hello_cz:ahoj", cache.get("cz"));
}
public void testCustomObjectKey() {
Key ck = new Key("a");
assertNull(cache.get(ck));
cache.put(ck, "blah");
assertEquals("blah", cache.get(ck));
}
@Test(enabled = false) // ISPN-8354
@Override
public void testLockedStreamSetValue() {
super.testLockedStreamSetValue();
}
@Test(enabled = false) // ISPN-8354
@Override
public void testLockedStreamWithinLockedStream() {
super.testLockedStreamWithinLockedStream();
}
@Test(enabled = false) // ISPN-8354
@Override
public void testLockedStreamActuallyLocks(BiConsumer<Cache<Object, Object>, CacheEntry<Object, Object>> consumer, boolean forEachOrInvokeAll) throws Throwable {
super.testLockedStreamActuallyLocks(consumer, forEachOrInvokeAll);
}
@Test(enabled = false) // ISPN-8354
@Override
public void testLockedStreamInvokeAllFilteredSet() {
super.testLockedStreamInvokeAllFilteredSet();
}
@Test(enabled = false) // ISPN-8354
@Override
public void testLockedStreamInvokeAllPut() {
super.testLockedStreamInvokeAllPut();
}
private void assertCacheSize(int expectedSize) {
assertEquals(expectedSize, cache.size());
assertEquals(expectedSize, cache.keySet().size());
assertEquals(expectedSize, cache.values().size());
assertEquals(expectedSize, cache.entrySet().size());
boolean isEmpty = expectedSize == 0;
assertEquals(isEmpty, cache.isEmpty());
assertEquals(isEmpty, cache.keySet().isEmpty());
assertEquals(isEmpty, cache.values().isEmpty());
assertEquals(isEmpty, cache.entrySet().isEmpty());
}
private void assertCacheIsEmpty() {
assertCacheSize(0);
}
}
| 7,346
| 32.244344
| 163
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ReplicatedPessimisticRepeatableReadIsolationTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.ReplicatedPessimisticRepeatableReadIsolationTest")
public class ReplicatedPessimisticRepeatableReadIsolationTest extends AbstractRepeatableReadIsolationTest {
public ReplicatedPessimisticRepeatableReadIsolationTest() {
super(CacheMode.REPL_SYNC, LockingMode.PESSIMISTIC);
}
}
| 545
| 29.333333
| 107
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/MetadataAPITest.java
|
package org.infinispan.api;
import static org.infinispan.container.versioning.InequalVersionComparisonResult.EQUAL;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.functional.MetaParam;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.versioning.EntryVersion;
import org.infinispan.container.versioning.NumericVersion;
import org.infinispan.context.Flag;
import org.infinispan.functional.impl.FunctionalMapImpl;
import org.infinispan.functional.impl.WriteOnlyMapImpl;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Tests cache API methods that take {@link Metadata} as parameter.
*
* @author Galder Zamarreño
* @since 5.3
*/
@Test(groups = "functional", testName = "api.MetadataAPITest")
public class MetadataAPITest extends SingleCacheManagerTest {
AdvancedCache<Integer, String> advCache;
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(false);
advCache = cm.<Integer, String>getCache().getAdvancedCache();
return cm;
}
public void testPutWithVersion() {
final Integer key = 1;
NumericVersion version = new NumericVersion(1);
advCache.put(key, "v1", withVersion(version));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(EQUAL, version.compareTo(cacheEntry.getMetadata().version()));
}
public void testConditionalReplaceWithVersion() {
final Integer key = 2;
NumericVersion version = new NumericVersion(1);
advCache.put(key, "v1", withVersion(version));
NumericVersion newVersion = new NumericVersion(2);
advCache.replace(key, "v1", "v2", withVersion(newVersion));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(EQUAL, newVersion.compareTo(cacheEntry.getMetadata().version()));
}
public void testPutIfAbsentWithVersion() {
final Integer key = 3;
NumericVersion version = new NumericVersion(1);
assertEquals(null, advCache.putIfAbsent(key, "v1", withVersion(version)));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(EQUAL, version.compareTo(cacheEntry.getMetadata().version()));
}
public void testPutAsyncWithVersion() throws Exception {
final Integer key = 4;
NumericVersion version = new NumericVersion(1);
Future<String> f = advCache.putAsync(key, "v1", withVersion(version));
assertNotNull(f);
assertFalse(f.isCancelled());
assertNull(f.get());
assertTrue(f.isDone());
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals("v1", entry.getValue());
assertEquals(EQUAL, version.compareTo(entry.getMetadata().version()));
}
public void testPutWithLifespan() {
final Integer key = 1;
int lifespan = 1_000_000;
advCache.put(key, "v1", withLifespan(lifespan));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(lifespan, cacheEntry.getMetadata().lifespan());
}
public void testConditionalReplaceWithLifespan() {
final Integer key = 2;
long lifespan = 1_000_000;
advCache.put(key, "v1", withLifespan(lifespan));
long newLifespan = 2_000_000;
advCache.replace(key, "v1", "v2", withLifespan(newLifespan));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(newLifespan, cacheEntry.getMetadata().lifespan());
}
public void testPutIfAbsentWithLifespan() {
final Integer key = 3;
long lifespan = 1_000_000;
assertEquals(null, advCache.putIfAbsent(key, "v1", withLifespan(lifespan)));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(lifespan, cacheEntry.getMetadata().lifespan());
}
public void testPutAsyncWithLifespan() throws Exception {
final Integer key = 4;
long lifespan = 1_000_000;
Future<String> f = advCache.putAsync(key, "v1", withLifespan(lifespan));
assertNotNull(f);
assertFalse(f.isCancelled());
assertNull(f.get());
assertTrue(f.isDone());
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals("v1", entry.getValue());
assertEquals(lifespan, entry.getMetadata().lifespan());
}
public void testPutFunctionalWithLifespan() throws Exception {
final Integer key = 4;
long lifespan = 1_000_000;
CompletableFuture<Void> f = WriteOnlyMapImpl.create(FunctionalMapImpl.create(advCache))
.eval(key, view -> view.set("v1", new MetaParam.MetaLifespan(lifespan)));
assertNotNull(f);
assertFalse(f.isCancelled());
assertNull(f.get());
assertTrue(f.isDone());
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals("v1", entry.getValue());
assertEquals(lifespan, entry.getMetadata().lifespan());
}
public void testReplaceFunctionalWithLifespan() throws Exception {
final Integer key = 4;
long lifespan = 1_000_000;
CompletableFuture<Void> f = WriteOnlyMapImpl.create(FunctionalMapImpl.create(advCache))
.eval(key, view -> view.set("v1", new MetaParam.MetaLifespan(lifespan)));
assertNotNull(f);
assertFalse(f.isCancelled());
assertNull(f.get());
assertTrue(f.isDone());
long newLifespan = 2_000_000;
f = WriteOnlyMapImpl.create(FunctionalMapImpl.create(advCache))
.eval(key, view -> view.set("v2", new MetaParam.MetaLifespan(newLifespan)));
assertNotNull(f);
assertFalse(f.isCancelled());
assertNull(f.get());
assertTrue(f.isDone());
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals("v2", entry.getValue());
assertEquals(newLifespan, entry.getMetadata().lifespan());
}
/**
* See ISPN-6773.
*/
public void testReplaceEmbeddedFunctionalWithLifespan() throws Exception {
final Integer key = 4;
long lifespan = 1_000_000;
advCache.put(key, "v1", withLifespan(lifespan));
long newLifespan = 2_000_000;
CompletableFuture<Void> f = WriteOnlyMapImpl.create(FunctionalMapImpl.create(advCache))
.eval(key, view -> view.set("v2", new MetaParam.MetaLifespan(newLifespan)));
assertNotNull(f);
assertFalse(f.isCancelled());
assertNull(f.get());
assertTrue(f.isDone());
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals("v2", entry.getValue());
assertEquals(newLifespan, entry.getMetadata().lifespan());
}
public void testGetCustomMetadataForMortalEntries() throws Exception {
final Integer key = 5;
Metadata meta = new CustomMetadata(3000, -1);
advCache.put(key, "v1", meta);
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals(meta, entry.getMetadata());
}
public void testGetCustomMetadataForTransientEntries() throws Exception {
final Integer key = 6;
Metadata meta = new CustomMetadata(-1, 3000);
advCache.put(key, "v1", meta);
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals(meta, entry.getMetadata());
}
public void testGetCustomMetadataForTransientMortalEntries() throws Exception {
final Integer key = 6;
Metadata meta = new CustomMetadata(3000, 3000);
advCache.put(key, "v1", meta);
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals(meta, entry.getMetadata());
}
public void testReplaceWithVersion() {
final Integer key = 7;
NumericVersion version = new NumericVersion(1);
advCache.put(key, "v1", withVersion(version));
NumericVersion newVersion = new NumericVersion(2);
advCache.replace(key, "v2", withVersion(newVersion));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(EQUAL, newVersion.compareTo(cacheEntry.getMetadata().version()));
}
public void testOverrideImmortalCustomMetadata() {
final Integer key = 8;
Metadata meta = new CustomMetadata(-1, -1);
advCache.put(key, "v1", meta);
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals(meta, entry.getMetadata());
Metadata newMeta = new CustomMetadata(120000, 60000);
advCache.put(key, "v2", newMeta);
assertEquals(newMeta, advCache.getCacheEntry(key).getMetadata());
}
public void testOverrideMortalCustomMetadata() {
final Integer key = 9;
Metadata meta = new CustomMetadata(120000, -1);
advCache.put(key, "v1", meta);
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals(meta, entry.getMetadata());
Metadata newMeta = new CustomMetadata(240000, -1);
advCache.put(key, "v2", newMeta);
assertEquals(newMeta, advCache.getCacheEntry(key).getMetadata());
}
public void testOverrideTransientCustomMetadata() {
final Integer key = 10;
Metadata meta = new CustomMetadata(-1, 120000);
advCache.put(key, "v1", meta);
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals(meta, entry.getMetadata());
Metadata newMeta = new CustomMetadata(-1, 240000);
advCache.put(key, "v2", newMeta);
assertEquals(newMeta, advCache.getCacheEntry(key).getMetadata());
}
public void testOverrideTransientMortalCustomMetadata() {
final Integer key = 10;
Metadata meta = new CustomMetadata(60000, 120000);
advCache.put(key, "v1", meta);
CacheEntry entry = advCache.getCacheEntry(key);
assertEquals(meta, entry.getMetadata());
Metadata newMeta = new CustomMetadata(120000, 240000);
advCache.put(key, "v2", newMeta);
assertEquals(newMeta, advCache.getCacheEntry(key).getMetadata());
}
public void testPutForExternalReadWithVersion() {
final Integer key = 11;
NumericVersion version = new NumericVersion(1);
advCache.putForExternalRead(key, "v1", withVersion(version));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(version, cacheEntry.getMetadata().version());
}
public void testPutForExternalReadInDecaratedCacheWithVersion() {
final Integer key = 12;
NumericVersion version = new NumericVersion(1);
// Flag forces decorated cache, but doesn't affect processing
AdvancedCache<Integer, String> decoratedCache = advCache.withFlags(Flag.SKIP_STATISTICS);
decoratedCache.putForExternalRead(key, "v1", withVersion(version));
CacheEntry cacheEntry = decoratedCache.getCacheEntry(key);
assertEquals(version, cacheEntry.getMetadata().version());
}
public void testPutForExternalReadWithLifespan() {
final Integer key = 11;
long lifespan = 1_000_000;
advCache.putForExternalRead(key, "v1", withLifespan(lifespan));
CacheEntry cacheEntry = advCache.getCacheEntry(key);
assertEquals(lifespan, cacheEntry.getMetadata().lifespan());
}
public void testPutForExternalReadInDecaratedCacheWithLifespan() {
final Integer key = 12;
long lifespan = 1_000_000;
// Flag forces decorated cache, but doesn't affect processing
AdvancedCache<Integer, String> decoratedCache = advCache.withFlags(Flag.SKIP_STATISTICS);
decoratedCache.putForExternalRead(key, "v1", withLifespan(lifespan));
CacheEntry cacheEntry = decoratedCache.getCacheEntry(key);
assertEquals(lifespan, cacheEntry.getMetadata().lifespan());
}
private Metadata withVersion(EntryVersion version) {
return new EmbeddedMetadata.Builder().version(version).build();
}
private Metadata withLifespan(long lifespan) {
return new EmbeddedMetadata.Builder().lifespan(lifespan).build();
}
private class CustomMetadata implements Metadata, Metadata.Builder {
private final long lifespan;
private final long maxIdle;
private CustomMetadata(long lifespan, long maxIdle) {
this.lifespan = lifespan;
this.maxIdle = maxIdle;
}
private CustomMetadata(Metadata template) {
this.lifespan = template.lifespan();
this.maxIdle = template.maxIdle();
}
@Override
public long lifespan() {
return lifespan;
}
@Override
public long maxIdle() {
return maxIdle;
}
@Override
public EntryVersion version() {
return null; // ignore
}
@Override
public Builder builder() {
return this; // ignore
}
@Override
public Builder lifespan(long time, TimeUnit unit) {
return new CustomMetadata(unit.toMillis(time), maxIdle);
}
@Override
public Builder lifespan(long time) {
return lifespan(time, TimeUnit.MILLISECONDS);
}
@Override
public Builder maxIdle(long time, TimeUnit unit) {
return new CustomMetadata(lifespan, unit.toMillis(time));
}
@Override
public Builder maxIdle(long time) {
return maxIdle(time, TimeUnit.MILLISECONDS);
}
@Override
public Builder version(EntryVersion version) {
return this;
}
@Override
public Metadata build() {
return this;
}
@Override
public Builder merge(Metadata metadata) {
return this;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CustomMetadata that = (CustomMetadata) o;
if (lifespan != that.lifespan) return false;
if (maxIdle != that.maxIdle) return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (lifespan ^ (lifespan >>> 32));
result = 31 * result + (int) (maxIdle ^ (maxIdle >>> 32));
return result;
}
@Override
public String toString() {
return "CustomMetadata{" +
"lifespan=" + lifespan +
", maxIdle=" + maxIdle +
'}';
}
}
}
| 14,602
| 35.416459
| 95
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/MetadataAPIDefaultExpiryTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.util.ControlledTimeService;
import org.infinispan.commons.time.TimeService;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.MetadataAPIDefaultExpiryTest")
public class MetadataAPIDefaultExpiryTest extends SingleCacheManagerTest {
public static final int EXPIRATION_TIMEOUT = 1000;
private final ControlledTimeService controlledTimeService = new ControlledTimeService();
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.expiration().lifespan(EXPIRATION_TIMEOUT);
EmbeddedCacheManager manager = TestCacheManagerFactory.createCacheManager(builder);
TestingUtil.replaceComponent(manager, TimeService.class, controlledTimeService, true);
return manager;
}
public void testDefaultLifespanPut() {
cache().put(1, "v1");
expectCachedThenExpired(1, "v1");
cache().getAdvancedCache().put(2, "v2", new EmbeddedMetadata.Builder().build());
expectCachedThenExpired(2, "v2");
}
public void testDefaultLifespanReplace() {
cache().put(1, "v1");
cache().replace(1, "v11");
expectCachedThenExpired(1, "v11");
cache().getAdvancedCache().put(2, "v2", new EmbeddedMetadata.Builder().build());
cache().getAdvancedCache().replace(2, "v22", new EmbeddedMetadata.Builder().build());
expectCachedThenExpired(2, "v22");
}
public void testDefaultLifespanReplaceWithOldValue() {
cache().put(1, "v1");
cache().replace(1, "v1", "v11");
expectCachedThenExpired(1, "v11");
cache().getAdvancedCache().put(2, "v2", new EmbeddedMetadata.Builder().build());
cache().getAdvancedCache().replace(2, "v2", "v22", new EmbeddedMetadata.Builder().build());
expectCachedThenExpired(2, "v22");
}
public void testDefaultLifespanPutIfAbsent() {
cache().putIfAbsent(1, "v1");
expectCachedThenExpired(1, "v1");
cache().getAdvancedCache().putIfAbsent(2, "v2", new EmbeddedMetadata.Builder().build());
expectCachedThenExpired(2, "v2");
}
public void testDefaultLifespanPutForExternalRead() {
cache().putForExternalRead(1, "v1");
expectCachedThenExpired(1, "v1");
cache().getAdvancedCache().putForExternalRead(2, "v2", new EmbeddedMetadata.Builder().build());
expectCachedThenExpired(2, "v2");
}
public void testDefaultLifespanPutAsync() throws Exception {
CompletableFuture<Object> f = cache().putAsync(1, "v1");
f.get(10, TimeUnit.SECONDS);
expectCachedThenExpired(1, "v1");
f = cache().getAdvancedCache().putAsync(2, "v2", new EmbeddedMetadata.Builder().build());
f.get(10, TimeUnit.SECONDS);
expectCachedThenExpired(2, "v2");
}
private void expectCachedThenExpired(Integer key, String value) {
// Check that it doesn't expire too early
controlledTimeService.advance(EXPIRATION_TIMEOUT - 1);
String v = this.<Integer, String>cache().get(key);
assertEquals(value, v);
// But not too late either
controlledTimeService.advance(2);
assertNull(cache.get(key));
}
}
| 3,712
| 38.5
| 101
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentOptimisticStressTest.java
|
package org.infinispan.api;
import java.util.List;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @author William Burns
* @since 7.0
*/
@Test (groups = "stress", testName = "api.ConditionalOperationsConcurrentOptimisticStressTest")
public class ConditionalOperationsConcurrentOptimisticStressTest extends ConditionalOperationsConcurrentStressTest {
public ConditionalOperationsConcurrentOptimisticStressTest() {
cacheMode = CacheMode.DIST_SYNC;
transactional = true;
}
@Override
public void testReplace() throws Exception {
List caches = caches(null);
testOnCaches(caches, new ReplaceOperation(false));
}
@Override
public void testConditionalRemove() throws Exception {
List caches = caches(null);
testOnCaches(caches, new ConditionalRemoveOperation(false));
}
@Override
public void testPutIfAbsent() throws Exception {
List caches = caches(null);
testOnCaches(caches, new PutIfAbsentOperation(false));
}
}
| 1,071
| 26.487179
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/RepeatableReadRemoteGetCountTest.java
|
package org.infinispan.api;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.distribution.MagicKey;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.CountingRpcManager;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.RepeatableReadRemoteGetCountTest")
public class RepeatableReadRemoteGetCountTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
builder.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
builder.clustering().hash().numOwners(1);
createClusteredCaches(2, TestDataSCI.INSTANCE, builder);
}
public void testOnKeyInitialized() throws Exception {
doTest(true);
}
public void testOnKeyNonInitialized() throws Exception {
doTest(false);
}
public void testWithoutReading() throws Exception {
final Object key = new MagicKey("key", cache(0));
final Cache<Object, Object> cache = cache(1);
final TransactionManager tm = tm(1);
final CountingRpcManager rpcManager = replaceRpcManager(cache);
cache(0).put(key, "v0");
tm.begin();
rpcManager.resetStats();
cache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES).put(key, "v1");
AssertJUnit.assertEquals("Wrong number of gets after put.", 0, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong value read.", "v1", cache.get(key));
AssertJUnit.assertEquals("Wrong number of gets after read.", 0, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong put return value.", "v1", cache.put(key, "v2"));
AssertJUnit.assertEquals("Wrong number of gets after put.", 0, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong replace return value.", "v2", cache.replace(key, "v3"));
AssertJUnit.assertEquals("Wrong number of gets after replace.", 0, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong conditional replace return value.", true, cache.replace(key, "v3", "v4"));
AssertJUnit.assertEquals("Wrong number of gets after conditional replace.", 0, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong conditional remove return value.", true, cache.remove(key, "v4"));
AssertJUnit.assertEquals("Wrong number of gets after conditional remove.", 0, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong conditional put return value.", null, cache.putIfAbsent(key, "v5"));
AssertJUnit.assertEquals("Wrong number of gets after conditional put.", 0, rpcManager.clusterGet);
tm.commit();
}
private void doTest(boolean initialized) throws Exception {
final Object key = new MagicKey("key", cache(0));
final Cache<Object, Object> cache = cache(1);
final TransactionManager tm = tm(1);
final CountingRpcManager rpcManager = replaceRpcManager(cache);
if (initialized) {
cache(0).put(key, "v1");
}
tm.begin();
rpcManager.resetStats();
AssertJUnit.assertEquals("Wrong value read.", initialized ? "v1" : null, cache.get(key));
AssertJUnit.assertEquals("Wrong number of gets after read.", 1, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong put return value.", initialized ? "v1" : null, cache.put(key, "v2"));
AssertJUnit.assertEquals("Wrong number of gets after put.", 1, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong replace return value.", "v2", cache.replace(key, "v3"));
AssertJUnit.assertEquals("Wrong number of gets after replace.", 1, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong conditional replace return value.", true, cache.replace(key, "v3", "v4"));
AssertJUnit.assertEquals("Wrong number of gets after conditional replace.", 1, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong conditional remove return value.", true, cache.remove(key, "v4"));
AssertJUnit.assertEquals("Wrong number of gets after conditional remove.", 1, rpcManager.clusterGet);
AssertJUnit.assertEquals("Wrong conditional put return value.", null, cache.putIfAbsent(key, "v5"));
AssertJUnit.assertEquals("Wrong number of gets after conditional put.", 1, rpcManager.clusterGet);
tm.commit();
}
private CountingRpcManager replaceRpcManager(Cache cache) {
RpcManager current = TestingUtil.extractComponent(cache, RpcManager.class);
if (current instanceof CountingRpcManager) {
return (CountingRpcManager) current;
}
CountingRpcManager countingRpcManager = new CountingRpcManager(current);
TestingUtil.replaceComponent(cache, RpcManager.class, countingRpcManager, true);
return countingRpcManager;
}
}
| 5,260
| 44.353448
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.lang.invoke.MethodHandles;
import java.util.HashSet;
import java.util.List;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
/**
* Verifies the atomic semantic of Infinispan's implementations of java.util.concurrent.ConcurrentMap'
* conditional operations.
*
* @author Sanne Grinovero <sanne@infinispan.org> (C) 2012 Red Hat Inc.
* @see java.util.concurrent.ConcurrentMap#replace(Object, Object, Object)
* @since 5.2
*/
@Test(groups = "functional", testName = "api.ConditionalOperationsConcurrentTest")
public class ConditionalOperationsConcurrentTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@Override
public Object[] factory() {
return new Object[] {
new ConditionalOperationsConcurrentTest().cacheMode(CacheMode.DIST_SYNC),
};
}
public ConditionalOperationsConcurrentTest() {
this(2, 10, 2);
}
public ConditionalOperationsConcurrentTest(int nodes, int operations, int threads) {
this.nodes = nodes;
this.operations = operations;
this.threads = threads;
this.validMoves = generateValidMoves();
}
protected final int nodes;
protected final int operations;
protected final int threads;
private static final String SHARED_KEY = "thisIsTheKeyForConcurrentAccess";
private final String[] validMoves;
private final AtomicBoolean failed = new AtomicBoolean(false);
private final AtomicBoolean quit = new AtomicBoolean(false);
private final AtomicInteger liveWorkers = new AtomicInteger();
private volatile String failureMessage = "";
protected boolean transactional = false;
protected LockingMode lockingMode = LockingMode.OPTIMISTIC;
protected boolean writeSkewCheck = false;
@BeforeMethod
public void init() {
failed.set(false);
quit.set(false);
liveWorkers.set(0);
failureMessage = "";
assertEquals(operations, validMoves.length);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder dcc = getDefaultClusteredCacheConfig(cacheMode, transactional);
dcc.transaction().lockingMode(lockingMode);
if (writeSkewCheck) {
dcc.transaction().locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
}
createCluster(TestDataSCI.INSTANCE, dcc, nodes);
waitForClusterToForm();
}
public void testReplace() throws Exception {
List caches = caches(null);
testOnCaches(caches, new ReplaceOperation(true));
}
public void testConditionalRemove() throws Exception {
List caches = caches(null);
testOnCaches(caches, new ConditionalRemoveOperation(true));
}
public void testPutIfAbsent() throws Exception {
List caches = caches(null);
testOnCaches(caches, new PutIfAbsentOperation(true));
}
protected void testOnCaches(List<Cache> caches, CacheOperation operation) {
failed.set(false);
quit.set(false);
caches.get(0).put(SHARED_KEY, "initialValue");
final SharedState state = new SharedState(threads);
final PostOperationStateCheck stateCheck = new PostOperationStateCheck(caches, state, operation);
final CyclicBarrier barrier = new CyclicBarrier(threads, stateCheck);
final String className = getClass().getSimpleName();//in order to be able filter this test's log file correctly
ExecutorService exec = Executors.newFixedThreadPool(threads, getTestThreadFactory("Mover"));
for (int threadIndex = 0; threadIndex < threads; threadIndex++) {
Runnable validMover = new ValidMover(caches, barrier, threadIndex, state, operation);
exec.execute(validMover);
}
exec.shutdown();
try {
boolean finished = exec.awaitTermination(5, TimeUnit.MINUTES);
assertTrue("Test took too long", finished);
} catch (InterruptedException e) {
fail("Thread interrupted!");
} finally {
// Stop the worker threads so that they don't affect the following tests
exec.shutdownNow();
}
assertFalse(failureMessage, failed.get());
}
private String[] generateValidMoves() {
String[] validMoves = new String[operations];
for (int i = 0; i < operations; i++) {
validMoves[i] = "v_" + i;
}
print("Valid moves ready");
return validMoves;
}
private void fail(final String message) {
boolean firstFailure = failed.compareAndSet(false, true);
if (firstFailure) {
failureMessage = message;
}
}
private void fail(final Exception e) {
log.error("Failing because of exception", e);
fail(e.toString());
}
final class ValidMover implements Runnable {
private final List<Cache> caches;
private final int threadIndex;
private final CyclicBarrier barrier;
private final SharedState state;
private final CacheOperation operation;
public ValidMover(List<Cache> caches, CyclicBarrier barrier, int threadIndex, SharedState state, CacheOperation operation) {
this.caches = caches;
this.barrier = barrier;
this.threadIndex = threadIndex;
this.state = state;
this.operation = operation;
}
@Override
public void run() {
int cachePickIndex = threadIndex;
liveWorkers.incrementAndGet();
try {
for (int moveToIndex = threadIndex;
moveToIndex < validMoves.length && !barrier.isBroken() && !failed.get() && !quit.get();
moveToIndex += threads) {
operation.beforeOperation(caches.get(0));
cachePickIndex = ++cachePickIndex % caches.size();
Cache cache = caches.get(cachePickIndex);
Object existing = cache.get(SHARED_KEY);
String targetValue = validMoves[moveToIndex];
state.beforeOperation(threadIndex, existing, targetValue);
blockAtTheBarrier();
boolean successful = operation.execute(cache, SHARED_KEY, existing, targetValue);
state.afterOperation(threadIndex, existing, targetValue, successful);
blockAtTheBarrier();
}
//not all threads might finish at the same block, so make sure none stays waiting for us when we exit
quit.set(true);
barrier.reset();
} catch (InterruptedException | RuntimeException e) {
log.error("Caught exception", e);
fail(e);
} catch (BrokenBarrierException e) {
log.error("Caught exception", e);
//just quit
print("Broken barrier!");
} finally {
int andGet = liveWorkers.decrementAndGet();
barrier.reset();
print("Thread #" + threadIndex + " terminating. Still " + andGet + " threads alive");
}
}
private void blockAtTheBarrier() throws InterruptedException, BrokenBarrierException {
try {
barrier.await(10000, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
if (!quit.get()) {
throw new RuntimeException(e);
}
}
}
}
static final class SharedState {
private final SharedThreadState[] threadStates;
private volatile boolean after = false;
public SharedState(final int threads) {
threadStates = new SharedThreadState[threads];
for (int i = 0; i < threads; i++) {
threadStates[i] = new SharedThreadState();
}
}
synchronized void beforeOperation(int threadIndex, Object expected, String targetValue) {
threadStates[threadIndex].beforeReplace(expected, targetValue);
after = false;
}
synchronized void afterOperation(int threadIndex, Object expected, String targetValue, boolean successful) {
threadStates[threadIndex].afterReplace(expected, targetValue, successful);
after = true;
}
public boolean isAfter() {
return after;
}
}
static final class SharedThreadState {
Object beforeExpected;
Object beforeTargetValue;
Object afterExpected;
Object afterTargetValue;
boolean successfulOperation;
public void beforeReplace(Object expected, Object targetValue) {
this.beforeExpected = expected;
this.beforeTargetValue = targetValue;
}
public void afterReplace(Object expected, Object targetValue, boolean replaced) {
this.afterExpected = expected;
this.afterTargetValue = targetValue;
this.successfulOperation = replaced;
}
public boolean sameBeforeValue(Object currentStored) {
return currentStored == null ? beforeExpected == null : currentStored.equals(beforeExpected);
}
}
final class PostOperationStateCheck implements Runnable {
private final List<Cache> caches;
private final SharedState state;
private final CacheOperation operation;
private volatile int cycle = 0;
public PostOperationStateCheck(final List<Cache> caches, final SharedState state, CacheOperation operation) {
this.caches = caches;
this.state = state;
this.operation = operation;
}
@Override
public void run() {
if (state.isAfter()) {
cycle++;
log.tracef("Starting cycle %d", cycle);
if (cycle % Math.max(operations / 100, 1) == 0) {
print((cycle * 100 * threads / operations) + "%");
}
checkAfterState();
} else {
checkBeforeState();
}
}
private void checkSameValueOnAllCaches() {
final Object currentStored = caches.get(0).get(SHARED_KEY);
log.tracef("Value seen by (first) cache %s is %s ", caches.get(0).getAdvancedCache().getRpcManager().getAddress(),
currentStored);
for (Cache c : caches) {
Object v = c.get(SHARED_KEY);
Address currentCache = c.getAdvancedCache().getRpcManager().getAddress();
log.tracef("Value seen by cache %s is %s", currentCache, v);
boolean sameValue = v == null ? currentStored == null : v.equals(currentStored);
if (!sameValue) {
fail(Thread.currentThread().getName() + ": Not all the caches see the same value. first cache: " + currentStored + " cache " + currentCache +" saw " + v);
}
}
}
private void checkBeforeState() {
final Object currentStored = caches.get(0).get(SHARED_KEY);
for (SharedThreadState threadState : state.threadStates) {
if ( !threadState.sameBeforeValue(currentStored)) {
fail("Some cache expected a different value than what is stored");
}
}
}
private void checkAfterState() {
final Object currentStored = assertTestCorrectness();
checkSameValueOnAllCaches();
if (operation.isCas()) {
checkSingleSuccessfulThread();
checkSuccessfulOperation(currentStored);
}
checkNoLocks();
}
private Object assertTestCorrectness() {
AdvancedCache someCache = caches.get(0).getAdvancedCache();
final Object currentStored = someCache.get(SHARED_KEY);
HashSet uniqueValueVerify = new HashSet();
for (SharedThreadState threadState : state.threadStates) {
uniqueValueVerify.add(threadState.afterTargetValue);
}
if (uniqueValueVerify.size() != threads) {
fail("test bug");
}
return currentStored;
}
private void checkNoLocks() {
for (Cache c : caches) {
LockManager lockManager = c.getAdvancedCache().getComponentRegistry().getComponent(LockManager.class);
//locks might be released async, so give it some time
boolean isLocked = true;
for (int i = 0; i < 30; i++) {
if (!lockManager.isLocked(SHARED_KEY)) {
isLocked = false;
break;
}
try {
Thread.sleep(500);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
if (isLocked) {
fail("lock on the entry wasn't cleaned up");
}
}
}
private void checkSuccessfulOperation(Object currentStored) {
for (SharedThreadState threadState : state.threadStates) {
if (threadState.successfulOperation) {
if (!operation.validateTargetValueForSuccess(threadState.afterTargetValue, currentStored)) {
fail("operation successful but the current stored value doesn't match the write operation of the successful thread");
}
} else {
if (threadState.afterTargetValue.equals(currentStored)) {
fail("operation not successful (which is fine) but the current stored value matches the write attempt");
}
}
}
}
private void checkSingleSuccessfulThread() {
//for CAS operations there's only one successful thread
int successfulThreads = 0;
for (SharedThreadState threadState : state.threadStates) {
if (threadState.successfulOperation) {
successfulThreads++;
}
}
if (successfulThreads != 1) {
fail(successfulThreads + " threads assume a successful replacement! (CAS should succeed on a single thread only)");
}
}
}
public static abstract class CacheOperation {
private final boolean isCas;
protected CacheOperation(boolean cas) {
isCas = cas;
}
public final boolean isCas() {
return isCas;
}
abstract boolean execute(Cache cache, String sharedKey, Object existing, String targetValue);
abstract void beforeOperation(Cache cache);
boolean validateTargetValueForSuccess(Object afterTargetValue, Object currentStored) {
return afterTargetValue.equals(currentStored);
}
}
static class ReplaceOperation extends CacheOperation {
ReplaceOperation(boolean cas) {
super(cas);
}
@Override
public boolean execute(Cache cache, String sharedKey, Object existing, String targetValue) {
try {
return cache.replace(SHARED_KEY, existing, targetValue);
} catch (CacheException e) {
return false;
}
}
@Override
public void beforeOperation(Cache cache) {
}
}
class PutIfAbsentOperation extends CacheOperation {
PutIfAbsentOperation(boolean cas) {
super(cas);
}
@Override
public boolean execute(Cache cache, String sharedKey, Object existing, String targetValue) {
try {
Object o = cache.putIfAbsent(SHARED_KEY, targetValue);
return o == null;
} catch (CacheException e) {
return false;
}
}
@Override
public void beforeOperation(Cache cache) {
try {
cache.remove(SHARED_KEY);
} catch (CacheException e) {
log.debug("Write skew check error while removing the key", e);
}
}
}
class ConditionalRemoveOperation extends CacheOperation {
ConditionalRemoveOperation(boolean cas) {
super(cas);
}
@Override
public boolean execute(Cache cache, String sharedKey, Object existing, String targetValue) {
try {
return cache.remove(SHARED_KEY, existing);
} catch (CacheException e) {
return false;
}
}
@Override
public void beforeOperation(Cache cache) {
try {
cache.put(SHARED_KEY, "someValue");
} catch (CacheException e) {
log.warn("Write skew check error while inserting the key", e);
}
}
@Override
boolean validateTargetValueForSuccess(Object afterTargetValue, Object currentStored) {
return currentStored == null;
}
}
private void print(String s) {
log.debug(s);
}
}
| 17,591
| 34.043825
| 169
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentPessimisticTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
@Test (groups = "functional", testName = "api.ConditionalOperationsConcurrentPessimisticTest")
public class ConditionalOperationsConcurrentPessimisticTest extends ConditionalOperationsConcurrentTest {
public ConditionalOperationsConcurrentPessimisticTest() {
cacheMode = CacheMode.DIST_SYNC;
transactional = true;
lockingMode = LockingMode.PESSIMISTIC;
}
}
| 548
| 33.3125
| 105
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ReplaceWithValueChangedTest.java
|
package org.infinispan.api;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import jakarta.transaction.Transaction;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* Test the condition described here: {@link org.infinispan.interceptors.distribution.TxDistributionInterceptor#ignorePreviousValueOnBackup}.
*
* @author Mircea Markus
* @since 5.2
*/
@Test (groups = "functional", testName = "api.ReplaceWithValueChangedTest")
public class ReplaceWithValueChangedTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
builder.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
createClusteredCaches(2, TestDataSCI.INSTANCE, builder);
}
public void testReplace1() throws Throwable {
Object k1 = getKeyForCache(0);
cache(0).put(k1, "v1");
tm(0).begin();
assertEquals("v1", cache(0).replace(k1, "v2"));
Transaction suspendedTx = tm(0).suspend();
cache(0).remove(k1);
assertNull(cache(0).get(k1));
assertNull(cache(1).get(k1));
log.trace("Here it begins");
suspendedTx.commit();
assertEquals("v2", cache(0).get(k1));
assertEquals("v2", cache(1).get(k1));
}
public void testReplace2() throws Throwable {
Object k1 = getKeyForCache(0);
cache(0).put(k1, "v1");
tm(0).begin();
assertEquals("v1", cache(0).replace(k1, "v2"));
Transaction suspendedTx = tm(0).suspend();
cache(0).put(k1, "v3");
assertEquals(cache(0).get(k1), "v3");
assertEquals(cache(1).get(k1), "v3");
suspendedTx.commit();
assertEquals("v2", cache(0).get(k1));
assertEquals("v2", cache(1).get(k1));
}
public void testPutIfAbsent() throws Throwable {
Object k1 = getKeyForCache(0);
tm(0).begin();
assertNull(cache(0).putIfAbsent(k1, "v1"));
Transaction suspendedTx = tm(0).suspend();
cache(0).put(k1, "v2");
assertEquals(cache(0).get(k1), "v2");
assertEquals(cache(1).get(k1), "v2");
suspendedTx.commit();
assertEquals("v1", cache(0).get(k1));
assertEquals("v1", cache(1).get(k1));
}
public void testConditionalRemove() throws Throwable {
Object k1 = getKeyForCache(0);
cache(0).put(k1, "v1");
tm(0).begin();
assertTrue(cache(0).remove(k1, "v1"));
Transaction suspendedTx = tm(0).suspend();
cache(0).put(k1, "v2");
assertEquals(cache(0).get(k1), "v2");
assertEquals(cache(1).get(k1), "v2");
log.trace("here it is");
suspendedTx.commit();
assertNull(cache(0).get(k1));
assertNull(cache(1).get(k1));
}
}
| 3,095
| 29.352941
| 141
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/MixedModeTest.java
|
package org.infinispan.api;
import static org.infinispan.context.Flag.CACHE_MODE_LOCAL;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.MagicKey;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.MixedModeTest")
public class MixedModeTest extends MultipleCacheManagersTest {
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder replSync = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
ConfigurationBuilder replAsync = getDefaultClusteredCacheConfig(CacheMode.REPL_ASYNC, false);
ConfigurationBuilder invalSync = getDefaultClusteredCacheConfig(CacheMode.INVALIDATION_SYNC, false);
ConfigurationBuilder invalAsync = getDefaultClusteredCacheConfig(CacheMode.INVALIDATION_ASYNC, false);
ConfigurationBuilder local = getDefaultClusteredCacheConfig(CacheMode.LOCAL, false);
createClusteredCaches(2, "replSync", TestDataSCI.INSTANCE, replSync);
defineConfigurationOnAllManagers("replAsync", replAsync);
defineConfigurationOnAllManagers("invalSync", invalSync);
defineConfigurationOnAllManagers("invalAsync", invalAsync);
waitForClusterToForm("replAsync", "invalSync", "invalAsync");
defineConfigurationOnAllManagers("local", local);
}
public void testMixedMode() {
AdvancedCache replSyncCache1, replSyncCache2;
AdvancedCache replAsyncCache1, replAsyncCache2;
AdvancedCache invalAsyncCache1, invalAsyncCache2;
AdvancedCache invalSyncCache1, invalSyncCache2;
AdvancedCache localCache1, localCache2;
replSyncCache1 = cache(0, "replSync").getAdvancedCache();
replSyncCache2 = cache(1, "replSync").getAdvancedCache();
replAsyncCache1 = cache(0, "replAsync").getAdvancedCache();
replAsyncCache2 = cache(1, "replAsync").getAdvancedCache();
invalSyncCache1 = cache(0, "invalSync").getAdvancedCache();
invalSyncCache2 = cache(1, "invalSync").getAdvancedCache();
invalAsyncCache1 = cache(0, "invalAsync").getAdvancedCache();
invalAsyncCache2 = cache(1, "invalAsync").getAdvancedCache();
localCache1 = cache(0, "local").getAdvancedCache();
localCache2 = cache(1, "local").getAdvancedCache();
// With the default SyncConsistentHashFactory, the same key will work for all caches
MagicKey key = new MagicKey("k", replAsyncCache1);
invalSyncCache2.withFlags(CACHE_MODE_LOCAL).put(key, "v");
assertEquals("v", invalSyncCache2.get(key));
assertNull(invalSyncCache1.get(key));
invalAsyncCache2.withFlags(CACHE_MODE_LOCAL).put(key, "v");
assertEquals("v", invalAsyncCache2.get(key));
assertNull(invalAsyncCache1.get(key));
replListener(replAsyncCache2).expectAny();
replListener(invalAsyncCache2).expectAny();
replSyncCache1.put(key, "replSync");
replAsyncCache1.put(key, "replAsync");
invalSyncCache1.put(key, "invalSync");
invalAsyncCache1.put(key, "invalAsync");
localCache1.put(key, "local");
replListener(replAsyncCache2).waitForRpc();
replListener(invalAsyncCache2).waitForRpc();
assertEquals("replSync", replSyncCache1.get(key));
assertEquals("replSync", replSyncCache2.get(key));
assertEquals("replAsync", replAsyncCache1.get(key));
assertEquals("replAsync", replAsyncCache2.get(key));
assertEquals("invalSync", invalSyncCache1.get(key));
assertNull(invalSyncCache2.get(key));
assertEquals("invalAsync", invalAsyncCache1.get(key));
assertNull(invalAsyncCache2.get(key));
assertEquals("local", localCache1.get(key));
assertNull(localCache2.get(key));
}
}
| 3,982
| 46.416667
| 108
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ReplicatedOptimisticRepeatableReadIsolationTest.java
|
package org.infinispan.api;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.ReplicatedOptimisticRepeatableReadIsolationTest")
public class ReplicatedOptimisticRepeatableReadIsolationTest extends AbstractRepeatableReadIsolationTest {
public ReplicatedOptimisticRepeatableReadIsolationTest() {
super(CacheMode.REPL_SYNC, LockingMode.OPTIMISTIC);
}
}
| 541
| 29.111111
| 106
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ByteArrayCacheTest.java
|
package org.infinispan.api;
import static org.infinispan.test.TestingUtil.withCacheManager;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Arrays;
import java.util.Map;
import org.infinispan.Cache;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.CacheManagerCallable;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Test that verifies that when custom, or JDK, objects that have undesirable
* equality checks, i.e. byte arrays, are stored in the cache, then the
* correct results are returned with different configurations (with or
* without key/value equivalence set up).
*
* @author Galder Zamarreño
* @since 5.3
*/
@Test(groups = "functional", testName = "api.ByteArrayCacheTest")
@CleanupAfterMethod
public class ByteArrayCacheTest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
ConfigurationBuilder builder = new ConfigurationBuilder();
return TestCacheManagerFactory.createCacheManager(builder);
}
public void testByteArrayValueOnlyReplace() {
ConfigurationBuilder builder = new ConfigurationBuilder();
withCacheManager(new CacheManagerCallable(
TestCacheManagerFactory.createCacheManager(builder)) {
@Override
public void call() {
// Mimics Memcached/REST endpoints where only value side is byte array
Cache<Integer, byte[]> cache = cm.getCache();
final Integer key = 2;
final byte[] value = {1, 2, 3};
cache.put(key, value);
// Use a different instance deliberately
final byte[] oldValue = {1, 2, 3};
final byte[] newValue = {4, 5, 6};
assertTrue(cache.replace(key, oldValue, newValue));
}
});
}
public void testByteArrayGet() {
Map<byte[], byte[]> map = cache();
byte[] key = {1, 2, 3};
byte[] value = {4, 5, 6};
map.put(key, value);
byte[] lookupKey = {1, 2, 3}; // on purpose, different instance required
assertTrue(String.format("Expected key=%s to return value=%s",
Util.toStr(lookupKey), Util.toStr(value)),
Arrays.equals(value, map.get(lookupKey)));
}
}
| 2,517
| 35.492754
| 82
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/NonDuplicateModificationTest.java
|
package org.infinispan.api;
import java.util.concurrent.Future;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.util.ControlledRpcManager;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
/**
* Data inconsistency can happen in non-transactional caches. the tests replicates this scenario: assuming N1 and N2 are
* owners of key K. N2 is the primary owner
* <p/>
* <ul>
* <li>N1 tries to update K. it forwards the command to N2.</li>
* <li>N2 acquires the lock, and forwards back to N1 (that applies the modification).</li>
* <li>N2 releases the lock and replies to N1.</li>
* <li>N1 applies again the modification without the lock.</li>
* </ul>
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.NonDuplicateModificationTest")
public class NonDuplicateModificationTest extends MultipleCacheManagersTest {
@Override
public Object[] factory() {
// It is not (easily) possible to run this test with bias acquisition since the ControlledRpcManager
// cannot handle RpcManager.sendTo + CommandAckCollector -style RPC
return new Object[] {
new NonDuplicateModificationTest().cacheMode(CacheMode.REPL_SYNC),
};
}
/**
* ISPN-3354
*/
public void testPut() throws Exception {
performTestOn(Operation.PUT);
}
/**
* ISPN-3354
*/
public void testReplace() throws Exception {
performTestOn(Operation.REPLACE);
}
/**
* ISPN-3354
*/
public void testRemove() throws Exception {
performTestOn(Operation.REMOVE);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(cacheMode, false);
builder.clustering().hash().numSegments(60);
createClusteredCaches(2, TestDataSCI.INSTANCE, builder);
}
private void performTestOn(final Operation operation) throws Exception {
final Object key = getKeyForCache(cache(0), cache(1));
cache(0).put(key, "v1");
assertKeyValue(key, "v1");
final ControlledRpcManager controlledRpcManager = ControlledRpcManager.replaceRpcManager(cache(1));
Future<Void> future = fork(() -> {
operation.execute(cache(1), key, "v2");
return null;
});
ControlledRpcManager.BlockedResponseMap blockedResponses = operation.expectCommand(controlledRpcManager)
.send().expectAllResponses();
cache(0).put(key, "v3");
blockedResponses.receive();
future.get();
controlledRpcManager.revertRpcManager();
assertKeyValue(key, "v3");
}
private void assertKeyValue(Object key, Object expected) {
for (Cache cache : caches()) {
AssertJUnit.assertEquals("Wrong value for key " + key + " on " + address(cache), expected, cache.get(key));
}
}
private enum Operation {
PUT(PutKeyValueCommand.class),
REMOVE(RemoveCommand.class),
REPLACE(ReplaceCommand.class);
private final Class<? extends ReplicableCommand> classToBlock;
Operation(Class<? extends ReplicableCommand> classToBlock) {
this.classToBlock = classToBlock;
}
private ControlledRpcManager.BlockedRequest expectCommand(ControlledRpcManager rpcManager)
throws InterruptedException {
return rpcManager.expectCommand(classToBlock);
}
private void execute(Cache<Object, Object> cache, Object key, Object value) {
switch (this) {
case PUT:
cache.put(key, value);
break;
case REMOVE:
cache.remove(key);
break;
case REPLACE:
cache.replace(key, value);
break;
}
}
}
}
| 4,268
| 30.858209
| 120
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/TxCacheAndAsyncOpsTest.java
|
package org.infinispan.api;
import java.util.Collections;
import java.util.concurrent.CompletableFuture;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.1
*/
@Test (groups = "functional", testName = "api.TxCacheAndAsyncOpsTest")
public class TxCacheAndAsyncOpsTest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
final ConfigurationBuilder defaultStandaloneConfig = getDefaultStandaloneCacheConfig(true);
return TestCacheManagerFactory.createCacheManager(defaultStandaloneConfig);
}
public void testAsyncOps() throws Exception {
CompletableFuture<Object> result = cache.putAsync("k", "v");
assert result.get() == null;
result = cache.removeAsync("k");
assert result.get().equals("v");
final CompletableFuture<Void> voidNotifyingFuture = cache.putAllAsync(Collections.singletonMap("k", "v"));
voidNotifyingFuture.get();
assert cache.get("k").equals("v");
}
}
| 1,255
| 31.205128
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/SimpleCacheTest.java
|
package org.infinispan.api;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletableFuture;
import java.util.function.BiConsumer;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.cache.impl.AbstractDelegatingCache;
import org.infinispan.cache.impl.SimpleCacheImpl;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.configuration.CustomInterceptorConfigTest;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.versioning.NumericVersion;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.BaseCustomAsyncInterceptor;
import org.infinispan.interceptors.impl.InvocationContextInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.stats.Stats;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
@Test(groups = "functional", testName = "api.SimpleCacheTest")
public class SimpleCacheTest extends APINonTxTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.simpleCache(true);
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(cb);
cache = AbstractDelegatingCache.unwrapCache(cm.getCache());
assertTrue(cache instanceof SimpleCacheImpl);
return cm;
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testAddInterceptor() {
cache().getAdvancedCache().getAsyncInterceptorChain()
.addInterceptor(new CustomInterceptorConfigTest.DummyInterceptor(), 0);
}
public void testFindInterceptor() {
AsyncInterceptorChain interceptorChain = cache().getAdvancedCache().getAsyncInterceptorChain();
assertNotNull(interceptorChain);
assertNull(interceptorChain.findInterceptorExtending(InvocationContextInterceptor.class));
}
@Test(expectedExceptions = CacheConfigurationException.class)
public void testTransactions() {
new ConfigurationBuilder().simpleCache(true)
.transaction().transactionMode(TransactionMode.TRANSACTIONAL).build();
}
@Test(expectedExceptions = CacheConfigurationException.class)
public void testCustomInterceptors() {
new ConfigurationBuilder().simpleCache(true)
.customInterceptors().addInterceptor().interceptor(new BaseCustomAsyncInterceptor())
.build();
}
@Test(expectedExceptions = CacheConfigurationException.class)
public void testBatching() {
new ConfigurationBuilder().simpleCache(true).invocationBatching().enable(true).build();
}
@Test(expectedExceptions = CacheConfigurationException.class, expectedExceptionsMessageRegExp = "ISPN000381: This configuration is not supported for simple cache")
public void testIndexing() {
new ConfigurationBuilder().simpleCache(true).indexing().enable().build();
}
@Test(expectedExceptions = CacheConfigurationException.class)
public void testStoreAsBinary() {
new ConfigurationBuilder().simpleCache(true).memory().storageType(StorageType.BINARY).build();
}
@Test(dataProvider = "lockedStreamActuallyLocks", expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamActuallyLocks(BiConsumer<Cache<Object, Object>, CacheEntry<Object, Object>> consumer,
boolean forEachOrInvokeAll) throws Throwable {
super.testLockedStreamActuallyLocks(consumer, forEachOrInvokeAll);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamSetValue() {
super.testLockedStreamSetValue();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamWithinLockedStream() {
super.testLockedStreamWithinLockedStream();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamInvokeAllFilteredSet() {
super.testLockedStreamInvokeAllFilteredSet();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
@Override
public void testLockedStreamInvokeAllPut() {
super.testLockedStreamInvokeAllPut();
}
public void testStatistics() {
Configuration cfg = new ConfigurationBuilder().simpleCache(true).jmxStatistics().enabled(true).build();
String name = "statsCache";
cacheManager.defineConfiguration(name, cfg);
Cache<Object, Object> cache = cacheManager.getCache(name);
assertEquals(0L, cache.getAdvancedCache().getStats().getStores());
cache.put("key", "value");
assertEquals(1L, cache.getAdvancedCache().getStats().getStores());
}
public void testEvictionWithStatistics() {
int KEY_COUNT = 5;
Configuration cfg = new ConfigurationBuilder()
.simpleCache(true)
.memory().size(1)
.jmxStatistics().enable()
.build();
String name = "evictionCache";
cacheManager.defineConfiguration(name, cfg);
Cache<Object, Object> cache = cacheManager.getCache(name);
for (int i = 0; i < KEY_COUNT; i++) {
cache.put("key" + i, "value");
}
Stats stats = cache.getAdvancedCache().getStats();
assertEquals(1, stats.getCurrentNumberOfEntriesInMemory());
assertEquals(KEY_COUNT, stats.getStores());
assertEquals(KEY_COUNT - 1, stats.getEvictions());
}
public void testPutAsyncEntry() {
AdvancedCache<Object, Object> c = cache.getAdvancedCache();
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
assertNull(await(c.putAsync("k", "v1", metadata)));
assertEquals("v1", cache.get("k"));
Metadata updatedMetadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(2))
.lifespan(35_000)
.maxIdle(42_000)
.build();
CacheEntry<Object, Object> previousEntry = await(c.putAsyncEntry("k", "v2", updatedMetadata));
assertEquals("k", previousEntry.getKey());
assertEquals("v1", previousEntry.getValue());
assertNotNull(previousEntry.getMetadata());
assertMetadata(metadata, previousEntry.getMetadata());
CacheEntry<Object, Object> currentEntry = c.getCacheEntry("k");
assertEquals("k", currentEntry.getKey());
assertEquals("v2", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(updatedMetadata, currentEntry.getMetadata());
}
public void testPutIfAbsentAsyncEntry() {
AdvancedCache<Object, Object> c = cache.getAdvancedCache();
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
assertNull(await(c.putAsync("k", "v1", metadata)));
assertEquals("v1", c.get("k"));
Metadata updatedMetadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(2))
.lifespan(35_000)
.maxIdle(42_000)
.build();
CacheEntry<Object, Object> previousEntry = await(c.putIfAbsentAsyncEntry("k", "v2", updatedMetadata));
assertEquals("k", previousEntry.getKey());
assertEquals("v1", previousEntry.getValue());
assertMetadata(metadata, previousEntry.getMetadata());
CacheEntry<Object, Object> currentEntry = await(c.getCacheEntryAsync("k"));
assertEquals("k", currentEntry.getKey());
assertEquals("v1", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(metadata, currentEntry.getMetadata());
}
public void testRemoveAsyncEntry() {
AdvancedCache<Object, Object> c = cache.getAdvancedCache();
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
assertNull(await(c.putAsync("k", "v", metadata)));
CacheEntry<Object, Object> currentEntry = await(c.getCacheEntryAsync("k"));
assertEquals("k", currentEntry.getKey());
assertEquals("v", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(metadata, currentEntry.getMetadata());
CacheEntry<Object, Object> previousEntry = await(c.removeAsyncEntry("k"));
assertEquals("k", previousEntry.getKey());
assertEquals("v", previousEntry.getValue());
assertMetadata(metadata, previousEntry.getMetadata());
assertNull(c.get("k"));
assertNull(await(c.removeAsyncEntry("k")));
}
public void testReplaceAsyncEntryNonExistingKey() {
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
CompletableFuture<CacheEntry<Object, Object>> f = cache.getAdvancedCache().replaceAsyncEntry("k", "v", metadata);
assertNull(await(f));
}
public void testReplaceAsyncEntryExistingKey() {
AdvancedCache<Object, Object> c = cache.getAdvancedCache();
Metadata metadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(1))
.lifespan(25_000)
.maxIdle(30_000)
.build();
assertNull(await(c.putAsync("k", "v1", metadata)));
Metadata updatedMetadata = new EmbeddedMetadata.Builder()
.version(new NumericVersion(2))
.lifespan(35_000)
.maxIdle(42_000)
.build();
CacheEntry<Object, Object> previousEntry = await(c.replaceAsyncEntry("k", "v2", updatedMetadata));
assertEquals(previousEntry.getKey(), "k");
assertEquals(previousEntry.getValue(), "v1");
assertMetadata(metadata, previousEntry.getMetadata());
CacheEntry<Object, Object> currentEntry = await(c.getCacheEntryAsync("k"));
assertEquals("k", currentEntry.getKey());
assertEquals("v2", currentEntry.getValue());
assertNotNull(currentEntry.getMetadata());
assertMetadata(updatedMetadata, currentEntry.getMetadata());
}
private void assertMetadata(Metadata expected, Metadata actual) {
assertEquals(expected.version(), actual.version());
assertEquals(expected.lifespan(), actual.lifespan());
assertEquals(expected.maxIdle(), actual.maxIdle());
}
}
| 11,251
| 39.916364
| 166
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/StartCacheFromListenerTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertEquals;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.lifecycle.ModuleLifecycle;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.2
*/
@Test(groups = "functional", testName = "api.StartCacheFromListenerTest")
public class StartCacheFromListenerTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
addClusterEnabledCacheManager();
addClusterEnabledCacheManager();
ConfigurationBuilder dcc = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
manager(0).defineConfiguration("some", dcc.build());
manager(0).defineConfiguration("single", dcc.build());
manager(0).defineConfiguration("cacheStarting", dcc.build());
manager(0).defineConfiguration("cacheStarted", dcc.build());
}
final AtomicBoolean cacheStartingInvoked = new AtomicBoolean(false);
public void testSingleInvocation() {
final EmbeddedCacheManager cacheManager = manager(0);
GlobalComponentRegistry registry = TestingUtil.extractGlobalComponentRegistry(cacheManager);
List<ModuleLifecycle> lifecycles = new LinkedList<>();
TestingUtil.replaceField(lifecycles, "moduleLifecycles", registry, GlobalComponentRegistry.class);
lifecycles.add(new ModuleLifecycle() {
@Override
public void cacheStarting(ComponentRegistry cr, Configuration configuration, String cacheName) {
log.debug("StartCacheFromListenerTest.cacheStarting");
if (!cacheStartingInvoked.get()) {
cacheStartingInvoked.set(true);
Future<Cache> fork = fork(() -> {
try {
return cacheManager.getCache("cacheStarting");
} catch (Exception e) {
log.error("Got", e);
throw e;
}
});
try {
log.debug("About to wait in get");
Cache cache = fork.get();
cache.put("k", "v");
log.debug("returned from get!");
} catch (InterruptedException e) {
log.error("Interrupted while waiting for the cache to start");
} catch (ExecutionException e) {
log.error("Failed to start cache", e);
}
}
}
});
log.debug("StartCacheFromListenerTest.testSingleInvocation1");
Cache<Object, Object> some = cacheManager.getCache("some");
log.debug("StartCacheFromListenerTest.testSingleInvocation2");
some.put("k", "v");
assertEquals("v", cacheManager.getCache("cacheStarting").get("k"));
}
public void testStartSameCache() {
final EmbeddedCacheManager cacheManager = manager(0);
GlobalComponentRegistry registry = TestingUtil.extractGlobalComponentRegistry(cacheManager);
List<ModuleLifecycle> lifecycles = new LinkedList<>();
TestingUtil.replaceField(lifecycles, "moduleLifecycles", registry, GlobalComponentRegistry.class);
lifecycles.add(new ModuleLifecycle() {
@Override
public void cacheStarted(ComponentRegistry cr, String cacheName) {
Cache cache = cacheManager.getCache("single");
cache.put("k1", "v1");
}
});
Cache<Object, Object> some = cacheManager.getCache("single");
some.put("k2", "v2");
assertEquals("v1", cacheManager.getCache("single").get("k1"));
assertEquals("v2", cacheManager.getCache("single").get("k2"));
}
}
| 4,245
| 39.826923
| 105
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationsConcurrentWriteSkewTest.java
|
package org.infinispan.api;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.MagicKey;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.distribution.VersionedDistributionInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.concurrent.ReclosableLatch;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.2
*/
@Test(groups = "functional", testName = "api.ConditionalOperationsConcurrentWriteSkewTest")
public class ConditionalOperationsConcurrentWriteSkewTest extends MultipleCacheManagersTest {
private static final int NODES_NUM = 3;
private final CacheMode mode = CacheMode.DIST_SYNC;
protected LockingMode lockingMode = LockingMode.OPTIMISTIC;
protected boolean writeSkewCheck;
protected boolean transactional;
public ConditionalOperationsConcurrentWriteSkewTest() {
transactional = true;
writeSkewCheck = true;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder dcc = getDefaultClusteredCacheConfig(mode, true);
dcc.transaction().lockingMode(lockingMode);
if (writeSkewCheck) {
dcc.transaction().locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
}
createCluster(TestDataSCI.INSTANCE, dcc, NODES_NUM);
waitForClusterToForm();
}
public void testSimpleConcurrentReplace() throws Exception {
doSimpleConcurrentTest(Operation.REPLACE);
}
public void testSimpleConcurrentPut() throws Exception {
doSimpleConcurrentTest(Operation.PUT);
}
public void testSimpleConcurrentRemove() throws Exception {
doSimpleConcurrentTest(Operation.REMOVE);
}
private void doSimpleConcurrentTest(final Operation operation) throws Exception {
//default owners are 2
assertEquals("Wrong number of owner. Please change the configuration", 2,
cache(0).getCacheConfiguration().clustering().hash().numOwners());
final Object key = new MagicKey(cache(0), cache(1));
try {
CommandInterceptorController controller = injectController(cache(1));
if (operation == Operation.REMOVE || operation == Operation.REPLACE) {
cache(0).put(key, "v1");
}
controller.awaitCommit.close();
controller.blockCommit.close();
final Future<Boolean> tx1 = fork(() -> {
tm(1).begin();
cache(1).put(key, "tx1");
tm(1).commit();
return Boolean.TRUE;
});
//await tx1 commit on cache1... the commit will be blocked!
//tx1 has already committed in cache(0) but not in cache(1)
//we block the remote get in order to force the tx2 to read the most recent value from cache(0)
controller.awaitCommit.await(30, TimeUnit.SECONDS);
controller.blockRemoteGet.close();
final Future<Boolean> tx2 = fork(() -> {
tm(2).begin();
switch (operation) {
case REMOVE:
cache(2).remove(key, "v1");
break;
case REPLACE:
cache(2).replace(key, "v1", "tx2");
break;
case PUT:
cache(2).putIfAbsent(key, "tx2");
break;
}
tm(2).commit();
return Boolean.TRUE;
});
//tx2 will not prepare the transaction remotely since the operation should fail.
assertTrue("Tx2 has not finished", tx2.get(20, TimeUnit.SECONDS));
//let everything run normally
controller.reset();
assertTrue("Tx1 has not finished", tx1.get(20, TimeUnit.SECONDS));
//check if no transactions are active
assertNoTransactions();
for (Cache cache : caches()) {
assertEquals("Wrong value for cache " + address(cache), "tx1", cache.get(key));
}
} finally {
removeController(cache(1));
}
}
private CommandInterceptorController injectController(Cache cache) {
CommandInterceptorController commandInterceptorController = new CommandInterceptorController();
extractInterceptorChain(cache).addInterceptorBefore(commandInterceptorController, VersionedDistributionInterceptor.class);
return commandInterceptorController;
}
private void removeController(Cache cache) {
extractInterceptorChain(cache).removeInterceptor(CommandInterceptorController.class);
}
private enum Operation {
PUT, REPLACE, REMOVE
}
class CommandInterceptorController extends DDAsyncInterceptor {
private final ReclosableLatch blockRemoteGet = new ReclosableLatch(true);
private final ReclosableLatch blockCommit = new ReclosableLatch(true);
private final ReclosableLatch awaitPrepare = new ReclosableLatch(true);
private final ReclosableLatch awaitCommit = new ReclosableLatch(true);
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command) throws Throwable {
try {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, throwable) -> {
log.debug("visit GetKeyValueCommand");
if (!ctx.isOriginLocal() && blockRemoteGet != null) {
log.debug("Remote Get Received... blocking...");
blockRemoteGet.await(30, TimeUnit.SECONDS);
}
});
} finally {
}
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, throwable) -> {
log.debug("visit GetCacheEntryCommand");
if (!ctx.isOriginLocal() && blockRemoteGet != null) {
log.debug("Remote Get Received... blocking...");
blockRemoteGet.await(30, TimeUnit.SECONDS);
}
});
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, throwable) -> {
log.debug("visit Prepare");
if (awaitPrepare != null) {
log.debug("Prepare Received... unblocking");
awaitPrepare.open();
}
});
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, throwable) -> {
if (ctx.isOriginLocal()) {
log.debug("visit Commit");
if (awaitCommit != null) {
log.debug("Commit Received... unblocking...");
awaitCommit.open();
}
if (blockCommit != null) {
log.debug("Commit Received... blocking...");
blockCommit.await(30, TimeUnit.SECONDS);
}
}
});
}
public void reset() {
if (blockCommit != null) {
blockCommit.open();
}
if (blockRemoteGet != null) {
blockRemoteGet.open();
}
if (awaitPrepare != null) {
awaitPrepare.open();
}
if (awaitCommit != null) {
awaitCommit.open();
}
}
}
}
| 8,324
| 35.674009
| 128
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/ConditionalOperationPrimaryOwnerFailTest.java
|
package org.infinispan.api;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.infinispan.test.TestingUtil.wrapInboundInvocationHandler;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import static org.testng.Assert.assertNull;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.impl.EntryFactory;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.MagicKey;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.concurrent.CompletionStages;
import org.mockito.Mockito;
import org.testng.annotations.Test;
/**
* Tests if the keys are not wrapped in the non-owner nodes
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.ConditionalOperationPrimaryOwnerFailTest")
public class ConditionalOperationPrimaryOwnerFailTest extends MultipleCacheManagersTest {
private static final String INITIAL_VALUE = "initial";
private static final String FINAL_VALUE = "final";
public void testEntryNotWrapped() throws Throwable {
assertClusterSize("Wrong cluster size!", 3);
final Object key = new MagicKey(cache(0), cache(1));
final Cache<Object, Object> futureBackupOwnerCache = cache(2);
cache(0).put(key, INITIAL_VALUE);
final PerCacheInboundInvocationHandler spyHandler = spyInvocationHandler(futureBackupOwnerCache);
final EntryFactory spyEntryFactory = spyEntryFactory(futureBackupOwnerCache);
//it blocks the StateResponseCommand.class
final CountDownLatch latch1 = new CountDownLatch(1);
final CountDownLatch latch2 = new CountDownLatch(1);
doAnswer(invocation -> {
CacheRpcCommand command = (CacheRpcCommand) invocation.getArguments()[0];
if (command instanceof StateResponseCommand) {
log.debugf("Blocking command %s", command);
latch2.countDown();
latch1.await();
}
return invocation.callRealMethod();
}).when(spyHandler).handle(any(CacheRpcCommand.class), any(Reply.class), any(DeliverOrder.class));
doAnswer(invocation -> {
InvocationContext context = (InvocationContext) invocation.getArguments()[0];
log.debugf("wrapEntryForWriting invoked with %s", context);
CompletionStage<Void> stage = (CompletionStage<Void>) invocation.callRealMethod();
CompletionStages.join(stage);
assertNull(context.lookupEntry(key), "Entry should not be wrapped!");
return stage;
}).when(spyEntryFactory).wrapEntryForWriting(any(InvocationContext.class), any(), anyInt(),
anyBoolean(), anyBoolean(), any());
Future<?> killMemberResult = fork(() -> killMember(1));
//await until the key is received from state transfer (the command is blocked now...)
latch2.await(30, TimeUnit.SECONDS);
futureBackupOwnerCache.put(key, FINAL_VALUE);
latch1.countDown();
killMemberResult.get(30, TimeUnit.SECONDS);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false);
builder.clustering()
.hash().numOwners(2)
.stateTransfer().fetchInMemoryState(true);
createClusteredCaches(3, TestDataSCI.INSTANCE, builder);
}
private EntryFactory spyEntryFactory(Cache<Object, Object> cache) {
EntryFactory spy = spy(extractComponent(cache, EntryFactory.class));
TestingUtil.replaceComponent(cache, EntryFactory.class, spy, true);
return spy;
}
private PerCacheInboundInvocationHandler spyInvocationHandler(Cache cache) {
return wrapInboundInvocationHandler(cache, Mockito::spy);
}
}
| 4,644
| 41.227273
| 104
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/SkipLockingTest.java
|
package org.infinispan.api;
import java.lang.reflect.Method;
import org.infinispan.AdvancedCache;
import org.infinispan.context.Flag;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Tests {@link Flag#SKIP_LOCKING} logic
*
* @author Galder Zamarre�o
* @since 4.1
*/
@Test(groups = "functional", testName = "api.SkipLockingTest")
public class SkipLockingTest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
return TestCacheManagerFactory.createCacheManager(false);
}
public void testSkipLockingAfterPutWithoutTm(Method m) {
String name = m.getName();
AdvancedCache advancedCache = cacheManager.getCache().getAdvancedCache();
advancedCache.put("k-" + name, "v-" + name);
advancedCache.withFlags(Flag.SKIP_LOCKING).put("k-" + name, "v2-" + name);
}
public void testSkipLockingAfterPutWithTm(Method m) {
EmbeddedCacheManager cacheManager = TestCacheManagerFactory.createCacheManager(true);
try {
AdvancedCache advancedCache = cacheManager.getCache().getAdvancedCache();
String name = m.getName();
advancedCache.put("k-" + name, "v-" + name);
advancedCache.withFlags(Flag.SKIP_LOCKING).put("k-" + name, "v2-" + name);
} finally {
cacheManager.stop();
}
}
}
| 1,502
| 31.673913
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/BaseCacheAPIPessimisticTest.java
|
package org.infinispan.api;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import jakarta.transaction.TransactionManager;
import org.infinispan.LockedStream;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* @author wburns
* @since 9.1
*/
public abstract class BaseCacheAPIPessimisticTest extends CacheAPITest {
@Override
protected void amend(ConfigurationBuilder cb) {
cb.transaction().lockingMode(LockingMode.PESSIMISTIC);
}
/**
* Tests to make sure that locked stream works properly when another user has the lock for a given key
*/
public void testLockedStreamBlocked() throws InterruptedException, TimeoutException, BrokenBarrierException, ExecutionException {
for (int i = 0; i < 10; i++) {
cache.put(i, "value" + i);
}
CyclicBarrier barrier = new CyclicBarrier(2);
int key = 4;
Future<Object> putFuture = fork(() -> TestingUtil.withTx(cache.getAdvancedCache().getTransactionManager(), () -> {
Object prev = cache.put(key, "value" + key + "-new");
// Wait for main thread to get to same point
barrier.await(10, TimeUnit.SECONDS);
// Main thread lets us complete
barrier.await(10, TimeUnit.SECONDS);
return prev;
}));
// Wait until fork thread has alredy locked the key
barrier.await(10, TimeUnit.SECONDS);
LockedStream<Object, Object> stream = cache.getAdvancedCache().lockedStream();
Future<?> forEachFuture = fork(() -> stream.filter(e -> e.getKey().equals(key)).forEach((c, e) ->
assertEquals("value" + key + "-new", c.put(e.getKey(), String.valueOf(e.getValue() + "-other")))));
TestingUtil.assertNotDone(forEachFuture);
// Let the tx put complete
barrier.await(10, TimeUnit.SECONDS);
forEachFuture.get(10, TimeUnit.MINUTES);
// The put should replace the value that forEach inserted
assertEquals("value" + key, putFuture.get(10, TimeUnit.SECONDS));
// The put should be last since it had to wait until lock was released on forEachWithLock
assertEquals("value" + key + "-new-other", cache.get(key));
// Make sure the locks were cleaned up properly
LockManager lockManager = cache.getAdvancedCache().getComponentRegistry().getComponent(LockManager.class);
assertEquals(0, lockManager.getNumberOfLocksHeld());
}
@DataProvider(name = "testLockedStreamInTx")
public Object[][] testLockedStreamInTxProvider() {
return new Object[][] { { Boolean.TRUE }, { Boolean.FALSE} };
}
@Test(dataProvider = "testLockedStreamInTx")
public void testLockedStreamInTxCommit(Boolean shouldCommit) throws Exception {
for (int i = 0; i < 5; i++) {
cache.put(i, "value" + i);
}
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
TestingUtil.withTx(tm, () -> {
cache.getAdvancedCache().lockedStream().forEach((c, e) -> c.put(e.getKey(), e.getValue() + "-changed"));
if (!shouldCommit) tm.setRollbackOnly();
return null;
});
for (int i = 0; i < 5; i++) {
assertEquals("value" + i + "-changed", cache.get(i));
}
}
public void testLockedStreamTxInsideConsumer() {
for (int i = 0; i < 5; i++) {
cache.put(i, "value" + i);
}
cache.getAdvancedCache().lockedStream().forEach((c, e) -> {
try {
TestingUtil.withTx(c.getAdvancedCache().getTransactionManager(), () -> c.put(e.getKey(), e.getValue() + "-changed"));
} catch (Exception e1) {
throw new RuntimeException(e1);
}
});
for (int i = 0; i < 5; i++) {
assertEquals("value" + i + "-changed", cache.get(i));
}
}
@DataProvider(name = "testLockedStreamInTxAndConsumer")
public Object[][] testLockedStreamInTxAndConsumerProvider() {
return new Object[][] {
{ Boolean.TRUE, Boolean.TRUE },
{ Boolean.TRUE, Boolean.FALSE },
{ Boolean.FALSE, Boolean.TRUE},
{ Boolean.FALSE, Boolean.FALSE}
};
}
@Test(dataProvider = "testLockedStreamInTxAndConsumer")
public void testLockedStreamInTxAndConsumer(Boolean outerCommit, Boolean innerCommit) throws Exception {
for (int i = 0; i < 5; i++) {
cache.put(i, "value" + i);
}
TransactionManager tm = cache.getAdvancedCache().getTransactionManager();
TestingUtil.withTx(tm, () -> {
cache.getAdvancedCache().lockedStream().forEach((c, e) -> {
try {
TransactionManager innerTm = c.getAdvancedCache().getTransactionManager();
TestingUtil.withTx(innerTm, () -> {
c.put(e.getKey(), e.getValue() + "-changed");
if (!innerCommit) innerTm.setRollbackOnly();
return null;
});
} catch (Exception e1) {
throw new RuntimeException(e1);
}
});
if (!outerCommit) tm.setRollbackOnly();
return null;
});
for (int i = 0; i < 5; i++) {
assertEquals("value" + i + (innerCommit ? "-changed" : ""), cache.get(i));
}
}
}
| 5,690
| 34.792453
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/CacheClusterJoinTest.java
|
package org.infinispan.api;
import java.util.List;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.CacheClusterJoinTest")
public class CacheClusterJoinTest extends MultipleCacheManagersTest {
private EmbeddedCacheManager cm1, cm2;
private ConfigurationBuilder cfg;
public CacheClusterJoinTest() {
cleanup = CleanupPhase.AFTER_METHOD;
}
protected void createCacheManagers() throws Throwable {
cm1 = addClusterEnabledCacheManager();
cfg = new ConfigurationBuilder();
cfg.clustering().cacheMode(CacheMode.REPL_SYNC)
.stateTransfer().fetchInMemoryState(false);
cm1.defineConfiguration("cache", cfg.build());
}
public void testGetMembers() throws Exception {
cm1.getCache("cache"); // this will make sure any lazy components are started.
List memb1 = cm1.getMembers();
assert 1 == memb1.size() : "Expected 1 member; was " + memb1;
Object coord = memb1.get(0);
cm2 = addClusterEnabledCacheManager();
cm2.defineConfiguration("cache", cfg.build());
cm2.getCache("cache"); // this will make sure any lazy components are started.
TestingUtil.blockUntilViewsReceived(50000, true, cm1, cm2);
memb1 = cm1.getMembers();
List memb2 = cm2.getMembers();
assert 2 == memb1.size();
assert memb1.equals(memb2);
TestingUtil.killCacheManagers(cm1);
TestingUtil.blockUntilViewsReceived(50000, false, cm2);
memb2 = cm2.getMembers();
assert 1 == memb2.size();
assert !coord.equals(memb2.get(0));
}
public void testIsCoordinator() throws Exception {
cm1.getCache("cache"); // this will make sure any lazy components are started.
assert cm1.isCoordinator() : "Should be coordinator!";
cm2 = addClusterEnabledCacheManager();
cm2.defineConfiguration("cache", cfg.build());
cm2.getCache("cache"); // this will make sure any lazy components are started.
assert cm1.isCoordinator();
assert !cm2.isCoordinator();
TestingUtil.killCacheManagers(cm1);
// wait till cache2 gets the view change notification
TestingUtil.blockUntilViewsReceived(50000, false, cm2);
assert cm2.isCoordinator();
}
}
| 2,506
| 35.867647
| 84
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.