repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/test/java/org/infinispan/api/lazy/LazyCacheAPITest.java
|
package org.infinispan.api.lazy;
import java.lang.reflect.Method;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.data.Person;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Cache API test with lazy deserialization turned on.
*
* @author Galder Zamarre�o
* @since 4.1
*/
@Test(groups = "functional", testName = "api.lazy.LazyCacheAPITest")
public class LazyCacheAPITest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
// start a single cache instance
ConfigurationBuilder c = getDefaultStandaloneCacheConfig(true);
c.memory().storageType(StorageType.BINARY);
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(false, TestDataSCI.INSTANCE);
cm.defineConfiguration("lazy-cache-test", c.build());
cache = cm.getCache("lazy-cache-test");
return cm;
}
public void testReplace(Method m) {
Person key = new Person(m.getName());
cache.put(key, "1");
assert "1".equals(cache.get(new Person(m.getName())));
Object oldValue = cache.replace(new Person(m.getName()), "2");
assert "1".equals(oldValue);
assert "2".equals(cache.get(new Person(m.getName())));
}
public void testReplaceWithOld(Method m) {
Person key = new Person(m.getName());
cache.put(key, "1");
assert "1".equals(cache.get(new Person(m.getName())));
assert !cache.replace(new Person(m.getName()), "99", "2");
assert cache.replace(new Person(m.getName()), "1", "2");
key = new Person(m.getName() + "-withCustomValue");
Person v1 = new Person("value1");
cache.put(key, v1);
assert v1.equals(cache.get(key));
Person v99 = new Person("value99");
Person v2 = new Person("value2");
assert !cache.replace(key, v99, v2);
assert cache.replace(key, v1, v2);
}
}
| 2,170
| 35.183333
| 104
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/PutForExternalReadLockCleanupTest.java
|
package org.infinispan.api.mvcc;
import static org.testng.AssertJUnit.assertEquals;
import java.util.function.Consumer;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.MagicKey;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.mvcc.PutForExternalReadLockCleanupTest")
@CleanupAfterMethod
public class PutForExternalReadLockCleanupTest extends MultipleCacheManagersTest {
private static final String VALUE = "v";
private static final Consumer<ConfigurationBuilder> ENABLE_L1 = c -> c.clustering().l1().enable();
private String name;
private Consumer<ConfigurationBuilder> amendConfiguration;
@Override
public Object[] factory() {
return new Object[] {
new PutForExternalReadLockCleanupTest("NonTx").transactional(false),
new PutForExternalReadLockCleanupTest("Optimistic").transactional(true).lockingMode(LockingMode.OPTIMISTIC),
new PutForExternalReadLockCleanupTest("Pessimistic").transactional(true).lockingMode(LockingMode.PESSIMISTIC),
new PutForExternalReadLockCleanupTest("NonTxL1", ENABLE_L1).transactional(false),
new PutForExternalReadLockCleanupTest("OptimisticL1", ENABLE_L1).transactional(true).lockingMode(LockingMode.OPTIMISTIC),
new PutForExternalReadLockCleanupTest("PessimisticL1", ENABLE_L1).transactional(true).lockingMode(LockingMode.PESSIMISTIC),
};
}
public PutForExternalReadLockCleanupTest() {}
private PutForExternalReadLockCleanupTest(String name) {
this(name, c -> {});
}
private PutForExternalReadLockCleanupTest(String name, Consumer<ConfigurationBuilder> amendConfiguration) {
this.name = name;
this.amendConfiguration = amendConfiguration;
}
@Override
protected String parameters() {
return "[" + name + "]";
}
public void testLockCleanupOnBackup() {
doTest(false);
}
public void testLockCleanuponOwner() {
doTest(true);
}
@Override
protected void createCacheManagers() {
ConfigurationBuilder c = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, transactional);
c.clustering().hash().numSegments(10).numOwners(1);
c.clustering().l1().disable();
amendConfiguration.accept(c);
createClusteredCaches(2, TestDataSCI.INSTANCE, c);
}
private void doTest(boolean owner) {
final Cache<MagicKey, String> cache1 = cache(0);
final Cache<MagicKey, String> cache2 = cache(1);
final MagicKey magicKey = new MagicKey(cache1);
if (owner) {
cache1.putForExternalRead(magicKey, VALUE);
} else {
cache2.putForExternalRead(magicKey, VALUE);
}
eventually(() -> cache1.containsKey(magicKey) && cache2.containsKey(magicKey));
assertEquals(VALUE, cache1.get(magicKey));
assertEquals(VALUE, cache2.get(magicKey));
assertNotLocked(magicKey);
}
}
| 3,210
| 35.078652
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/PutForExternalReadInvTest.java
|
package org.infinispan.api.mvcc;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* PutForExternalRead tests for invalidated caches.
*
* @author Galder Zamarreño
* @since 6.0
*/
@Test(groups="functional", testName = "api.mvcc.PutForExternalReadInvTest")
public class PutForExternalReadInvTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = new ConfigurationBuilder();
builder
.clustering().cacheMode(CacheMode.INVALIDATION_SYNC)
.transaction().transactionMode(TransactionMode.TRANSACTIONAL)
.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
createClusteredCaches(2, TestDataSCI.INSTANCE, builder);
}
public void testReadOwnWrites() {
Cache<Integer, String> c0 = cache(0);
Cache<Integer, String> c1 = cache(1);
c0.putForExternalRead(1, "v1");
assertEquals("v1", c0.get(1));
c1.putForExternalRead(1, "v1");
assertEquals("v1", c1.get(1));
}
}
| 1,437
| 32.44186
| 75
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/LockPerEntryTest.java
|
package org.infinispan.api.mvcc;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.concurrent.locks.impl.LockContainer;
import org.testng.annotations.Test;
@Test(groups = "functional", singleThreaded = true, testName = "api.mvcc.LockPerEntryTest")
public class LockPerEntryTest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
ConfigurationBuilder cfg = new ConfigurationBuilder();
cfg.locking().useLockStriping(false);
return TestCacheManagerFactory.createCacheManager(cfg);
}
public void testLocksCleanedUp() {
cache = cacheManager.getCache();
cache.put("/a/b/c", "v");
cache.put("/a/b/d", "v");
assertNoLocks();
}
public void testLocksConcurrency() throws Exception {
cache = cacheManager.getCache();
final int NUM_THREADS = 10;
final CountDownLatch l = new CountDownLatch(1);
final int numLoops = 1000;
final List<Exception> exceptions = new LinkedList<>();
Thread[] t = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++)
t[i] = new Thread() {
@Override
public void run() {
try {
l.await();
}
catch (Exception e) {
// ignore
}
for (int i = 0; i < numLoops; i++) {
try {
switch (i % 2) {
case 0:
cache.put("Key" + i, "v");
break;
case 1:
cache.remove("Key" + i);
break;
}
}
catch (Exception e) {
exceptions.add(e);
}
}
}
};
for (Thread th : t) th.start();
l.countDown();
for (Thread th : t) th.join();
if (!exceptions.isEmpty()) throw exceptions.get(0);
assertNoLocks();
}
private void assertNoLocks() {
LockManager lm = TestingUtil.extractLockManager(cache);
LockAssert.assertNoLocks(lm);
LockContainer lc = TestingUtil.extractField(lm, "lockContainer");
assert lc.size() == 0;
}
}
| 2,703
| 30.811765
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/LockTestBase.java
|
package org.infinispan.api.mvcc;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collections;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.CacheContainer;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedTransaction;
import org.infinispan.transaction.tm.EmbeddedTransactionManager;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
/**
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
*/
@Test(groups = "functional")
public abstract class LockTestBase extends AbstractInfinispanTest {
private Log log = LogFactory.getLog(LockTestBase.class);
protected boolean repeatableRead = true;
private CacheContainer cm;
protected static final class LockTestData {
public Cache<String, String> cache;
public EmbeddedTransactionManager tm;
public LockManager lockManager;
}
protected LockTestData lockTestData;
@BeforeMethod
public void setUp() {
LockTestData ltd = new LockTestData();
ConfigurationBuilder defaultCfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
defaultCfg
.locking()
.isolationLevel(repeatableRead ? IsolationLevel.REPEATABLE_READ : IsolationLevel.READ_COMMITTED)
.lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis())
.transaction()
.transactionManagerLookup(new EmbeddedTransactionManagerLookup());
cm = TestCacheManagerFactory.createCacheManager(defaultCfg);
ltd.cache = cm.getCache();
ltd.lockManager = TestingUtil.extractComponentRegistry(ltd.cache).getComponent(LockManager.class);
ltd.tm = (EmbeddedTransactionManager) TestingUtil.extractComponentRegistry(ltd.cache).getComponent(TransactionManager.class);
lockTestData = ltd;
}
@AfterMethod
public void tearDown() {
log.debug("**** - STARTING TEARDOWN - ****");
TestingUtil.killCacheManagers(cm);
lockTestData = null;
}
protected void assertLocked(Object key) {
LockAssert.assertLocked(key, lockTestData.lockManager);
}
protected void assertNotLocked(Object key) {
LockAssert.assertNotLocked(key, lockTestData.lockManager);
}
protected void assertNoLocks() {
LockAssert.assertNoLocks(lockTestData.lockManager);
}
public void testLocksOnPutKeyVal() throws Exception {
Cache<String, String> cache = lockTestData.cache;
EmbeddedTransactionManager tm = lockTestData.tm;
tm.begin();
cache.put("k", "v");
assertTrue(tm.getTransaction().runPrepare());
assertLocked("k");
tm.getTransaction().runCommit(false);
assertNoLocks();
tm.begin();
assertEquals("v", cache.get("k"));
assertNotLocked("k");
tm.commit();
assertNoLocks();
tm.begin();
cache.remove("k");
assertTrue(tm.getTransaction().runPrepare());
assertLocked("k");
tm.getTransaction().runCommit(false);
assertNoLocks();
}
public void testLocksOnPutData() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
tm.begin();
cache.putAll(Collections.singletonMap("k", "v"));
assertEquals("v", cache.get("k"));
final EmbeddedTransaction tx = ((EmbeddedTransactionManager) tm).getTransaction();
assertTrue(tx.runPrepare());
assertLocked("k");
tx.runCommit(false);
assertNoLocks();
tm.begin();
assertEquals("v", cache.get("k"));
assertNoLocks();
tm.commit();
assertNoLocks();
}
public void testLocksOnEvict() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
// init some data
cache.putAll(Collections.singletonMap("k", "v"));
assertEquals("v", cache.get("k"));
tm.begin();
cache.evict("k");
assertNotLocked("k");
tm.commit();
assertFalse(cache.containsKey("k"));
assertNoLocks();
}
public void testLocksOnRemoveNonexistent() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
EmbeddedTransactionManager tm = tl.tm;
assert !cache.containsKey("k") : "Should not exist";
tm.begin();
cache.remove("k");
tm.getTransaction().runPrepare();
assertLocked("k");
tm.getTransaction().runCommit(false);
assert !cache.containsKey("k") : "Should not exist";
assertNoLocks();
}
public void testLocksOnEvictNonexistent() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
assert !cache.containsKey("k") : "Should not exist";
tm.begin();
cache.evict("k");
assertNotLocked("k");
tm.commit();
assert !cache.containsKey("k") : "Should not exist";
assertNoLocks();
}
public void testLocksOnRemoveData() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
EmbeddedTransactionManager tm = tl.tm;
// init some data
cache.put("k", "v");
cache.put("k2", "v2");
assertEquals("v", cache.get("k"));
assertEquals("v2", cache.get("k2"));
// remove
tm.begin();
cache.remove("k");
cache.remove("k2");
assertTrue(tm.getTransaction().runPrepare());
assertLocked("k");
assertLocked("k2");
tm.getTransaction().runCommit(false);
assert cache.isEmpty();
assertNoLocks();
}
public void testWriteDoesntBlockRead() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
cache.put("k", "v");
// start a write.
tm.begin();
cache.put("k2", "v2");
Transaction write = tm.suspend();
// now start a read and confirm that the write doesn't block it.
tm.begin();
assertEquals("v", cache.get("k"));
assert null == cache.get("k2") : "Should not see uncommitted changes";
Transaction read = tm.suspend();
// commit the write
tm.resume(write);
tm.commit();
assertNoLocks();
tm.resume(read);
String value = cache.get("k2");
if (repeatableRead) {
assert null == value : "Should have repeatable read";
}
else
// no guarantees with read committed
assertTrue(null == value || "v2".equals(value));
tm.commit();
assertNoLocks();
}
public void testUpdateDoesntBlockRead() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
cache.put("k", "v");
// Change K
tm.begin();
cache.put("k", "v2");
Transaction write = tm.suspend();
// now start a read and confirm that the write doesn't block it.
tm.begin();
assertEquals("v", cache.get("k"));
Transaction read = tm.suspend();
// commit the write
tm.resume(write);
tm.commit();
assertNoLocks();
tm.resume(read);
if (repeatableRead)
assertEquals("Should have repeatable read", "v", cache.get("k"));
else
assertEquals("Read committed should see committed changes", "v2", cache.get("k"));
tm.commit();
assertNoLocks();
}
public void testWriteDoesntBlockReadNonexistent() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
// start a write.
tm.begin();
cache.put("k", "v");
Transaction write = tm.suspend();
// now start a read and confirm that the write doesn't block it.
tm.begin();
assert null == cache.get("k") : "Should not see uncommitted changes";
Transaction read = tm.suspend();
// commit the write
tm.resume(write);
tm.commit();
assertNoLocks();
tm.resume(read);
String value = cache.get("k");
if (repeatableRead) {
assert null == value : "Should have repeatable read";
} else {
// no guarantees with read committed
assertTrue(null == value || "v".equals(value));
}
tm.commit();
assertNoLocks();
}
public void testConcurrentWriters() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
EmbeddedTransactionManager tm = tl.tm;
tm.begin();
cache.put("k", "v");
final EmbeddedTransaction transaction = tm.getTransaction();
assertTrue(transaction.runPrepare());
tm.suspend();
tm.begin();
cache.put("k", "v");
assert !tm.getTransaction().runPrepare();
tm.rollback();
tm.resume(transaction);
transaction.runCommit(false);
assertNoLocks();
}
public void testRollbacks() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
cache.put("k", "v");
tm.begin();
assertEquals("v", cache.get("k"));
Transaction reader = tm.suspend();
tm.begin();
cache.put("k", "v2");
tm.rollback();
tm.resume(reader);
Object value = cache.get("k");
assertEquals("v", value);
tm.commit();
// even after commit
assertEquals("v", cache.get("k"));
assertNoLocks();
}
public void testRollbacksOnNullEntry() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
tm.begin();
assert null == cache.get("k");
Transaction reader = tm.suspend();
tm.begin();
cache.put("k", "v");
assertEquals("v", cache.get("k"));
tm.rollback();
tm.resume(reader);
assert null == cache.get("k") : "Expecting null but was " + cache.get("k");
tm.commit();
// even after commit
assert null == cache.get("k");
assertNoLocks();
}
}
| 10,890
| 28.514905
| 131
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/PutForExternalReadTest.java
|
package org.infinispan.api.mvcc;
import static org.infinispan.context.Flag.CACHE_MODE_LOCAL;
import static org.infinispan.test.TestingUtil.k;
import static org.infinispan.test.TestingUtil.v;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.lang.reflect.Method;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.MagicKey;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.interceptors.impl.CallInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.ReplListener;
import org.infinispan.test.TestBlocking;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.test.fwk.InTransactionMode;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.impl.TransactionTable;
import org.testng.annotations.Test;
import jakarta.transaction.Status;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
@Test(groups = "functional", testName = "api.mvcc.PutForExternalReadTest")
@CleanupAfterMethod
public class PutForExternalReadTest extends MultipleCacheManagersTest {
protected static final String CACHE_NAME = "pferSync";
protected static final String key = "k", value = "v1", value2 = "v2";
@Override
public Object[] factory() {
return new Object[] {
new PutForExternalReadTest().cacheMode(CacheMode.DIST_SYNC).transactional(false),
new PutForExternalReadTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.OPTIMISTIC),
new PutForExternalReadTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.PESSIMISTIC),
new PutForExternalReadTest().cacheMode(CacheMode.REPL_SYNC).transactional(false),
new PutForExternalReadTest().cacheMode(CacheMode.REPL_SYNC).transactional(true).lockingMode(LockingMode.OPTIMISTIC),
new PutForExternalReadTest().cacheMode(CacheMode.REPL_SYNC).transactional(true).lockingMode(LockingMode.PESSIMISTIC),
};
}
@Override
protected void createCacheManagers() {
ConfigurationBuilder c = createCacheConfigBuilder();
createClusteredCaches(2, CACHE_NAME, TestDataSCI.INSTANCE, c);
}
protected ConfigurationBuilder createCacheConfigBuilder() {
ConfigurationBuilder c = getDefaultClusteredCacheConfig(cacheMode, transactional);
c.clustering().hash().numOwners(100);
c.clustering().hash().numSegments(4);
if (lockingMode != null) {
c.transaction().lockingMode(lockingMode);
}
return c;
}
// This test executes PFER on cache1, and expects that it will be relayed to cache2 == primary
// and then sent to cache1 again for backup.
@InCacheMode({CacheMode.DIST_SYNC, CacheMode.REPL_SYNC})
public void testKeyOnlyWrittenOnceOnOriginator() throws Exception {
final Cache<MagicKey, String> cache1 = cache(0, CACHE_NAME);
final Cache<MagicKey, String> cache2 = cache(1, CACHE_NAME);
final CyclicBarrier barrier = new CyclicBarrier(2);
cache1.getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new BaseAsyncInterceptor() {
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand command)
throws Throwable {
if (command instanceof PutKeyValueCommand) {
if (!ctx.isOriginLocal()) {
// wait first before the check
TestBlocking.await(barrier, 10, TimeUnit.SECONDS);
// and once more after the check
TestBlocking.await(barrier, 10, TimeUnit.SECONDS);
}
}
return invokeNext(ctx, command);
}
}, 0);
final MagicKey myKey = new MagicKey(cache2);
cache1.putForExternalRead(myKey, value);
// Verify that the key was not written on the origin by the time it was looped back
barrier.await(10, TimeUnit.SECONDS);
assertNull(cache1.get(myKey));
// Verify that the key is written on the origin afterwards
barrier.await(10, TimeUnit.SECONDS);
eventually(() -> value.equals(cache1.get(myKey)) && value.equals(cache2.get(myKey)));
}
public void testNoOpWhenKeyPresent() {
final Cache<String, String> cache1 = cache(0, CACHE_NAME);
final Cache<String, String> cache2 = cache(1, CACHE_NAME);
cache1.putForExternalRead(key, value);
eventually(() -> value.equals(cache1.get(key)) && value.equals(cache2.get(key)));
// reset
cache1.remove(key);
eventually(() -> cache1.isEmpty() && cache2.isEmpty());
cache1.put(key, value);
eventually(() -> value.equals(cache1.get(key)) && value.equals(cache2.get(key)));
// now this pfer should be a no-op
cache1.putForExternalRead(key, value2);
assertEquals("PFER should have been a no-op", value, cache1.get(key));
assertEquals("PFER should have been a no-op", value, cache2.get(key));
}
@InTransactionMode(TransactionMode.TRANSACTIONAL)
public void testTxSuspension() throws Exception {
final Cache<String, String> cache1 = cache(0, CACHE_NAME);
final Cache<String, String> cache2 = cache(1, CACHE_NAME);
cache1.put(key + "0", value);
eventually(() -> value.equals(cache2.get(key+"0")));
// start a tx and do some stuff.
tm(0, CACHE_NAME).begin();
cache1.get(key + "0");
cache1.putForExternalRead(key, value); // should have happened in a separate tx and have committed already.
Transaction t = tm(0, CACHE_NAME).suspend();
eventually(() -> value.equals(cache1.get(key)) && value.equals(cache2.get(key)));
tm(0, CACHE_NAME).resume(t);
tm(0, CACHE_NAME).commit();
eventually(() -> value.equals(cache1.get(key + "0")) && value.equals(cache2.get(key + "0")));
}
public void testExceptionSuppression() throws Exception {
Cache<String, String> cache1 = cache(0, CACHE_NAME);
Cache<String, String> cache2 = cache(1, CACHE_NAME);
assertTrue(cache1.getAdvancedCache().getAsyncInterceptorChain().addInterceptorBefore(new BaseAsyncInterceptor() {
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand command)
throws Throwable {
if (command instanceof PutKeyValueCommand || command instanceof RemoveCommand) {
throw new RuntimeException("Barf!");
}
return invokeNext(ctx, command);
}
}, CallInterceptor.class));
// if cache1 is not primary, the value gets committed on cache2
try {
cache1.put(key, value);
fail("Should have barfed");
} catch (RuntimeException re) {
}
// clean up any indeterminate state left over
try {
cache1.remove(key);
fail("Should have barfed");
} catch (RuntimeException re) {
}
assertNull("Should have cleaned up", cache1.get(key));
assertNull("Should have cleaned up", cache1.getAdvancedCache().getDataContainer().get(key));
assertNull("Should have cleaned up", cache2.get(key));
InternalCacheEntry<String, String> cache2Entry = cache2.getAdvancedCache().getDataContainer().get(key);
assertTrue("Should have cleaned up", cache2Entry == null);
// should not barf
cache1.putForExternalRead(key, value);
}
public void testBasicPropagation() throws Exception {
final Cache<String, String> cache1 = cache(0, CACHE_NAME);
final Cache<String, String> cache2 = cache(1, CACHE_NAME);
assertFalse(cache1.containsKey(key));
assertFalse(cache2.containsKey(key));
ReplListener replListener2 = replListener(cache2);
replListener2.expect(PutKeyValueCommand.class);
cache1.putForExternalRead(key, value);
replListener2.waitForRpc();
// wait for command the finish executing asynchronously
eventually(() -> cache1.containsKey(key) && cache2.containsKey(key));
assertEquals("PFER updated cache1", value, cache1.get(key));
assertEquals("PFER propagated to cache2 as expected", value, cache2.get(key));
// replication to cache 1 should NOT happen.
cache2.putForExternalRead(key, value + "0");
assertEquals("PFER updated cache2", value, cache2.get(key));
assertEquals("Cache1 should be unaffected", value, cache1.get(key));
}
/**
* Tests that setting a cacheModeLocal=true flag prevents propagation of the putForExternalRead().
*/
public void testSimpleCacheModeLocal(Method m) throws Exception {
cacheModeLocalTest(false, m);
}
/**
* Tests that setting a cacheModeLocal=true flag prevents propagation of the putForExternalRead() when the call
* occurs inside a transaction.
*/
@InTransactionMode(TransactionMode.TRANSACTIONAL)
public void testCacheModeLocalInTx(Method m) throws Exception {
cacheModeLocalTest(true, m);
}
/**
* Tests that suspended transactions do not leak. See JBCACHE-1246.
*/
@InTransactionMode(TransactionMode.TRANSACTIONAL)
public void testMemLeakOnSuspendedTransactions() throws Exception {
Cache<String, String> cache1 = cache(0, CACHE_NAME);
Cache<String, String> cache2 = cache(1, CACHE_NAME);
TransactionManager tm1 = TestingUtil.getTransactionManager(cache1);
ReplListener replListener2 = replListener(cache2);
replListener2.expect(PutKeyValueCommand.class);
tm1.begin();
cache1.putForExternalRead(key, value);
tm1.commit();
replListener2.waitForRpc();
final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
final TransactionTable tt2 = TestingUtil.extractComponent(cache2, TransactionTable.class);
eventually(() -> tt1.getRemoteTxCount() == 0 && tt1.getLocalTxCount() == 0 &&
tt2.getRemoteTxCount() == 0 && tt2.getLocalTxCount() == 0);
replListener2.expectWithTx(PutKeyValueCommand.class);
tm1.begin();
assertEquals(tm1.getTransaction().getStatus(), Status.STATUS_ACTIVE);
cache1.putForExternalRead(key, value);
assertEquals(tm1.getTransaction().getStatus(), Status.STATUS_ACTIVE);
cache1.put(key, value);
assertEquals(tm1.getTransaction().getStatus(), Status.STATUS_ACTIVE);
log.info("Before commit!!");
tm1.commit();
eventually(() -> (tt1.getRemoteTxCount() == 0) && (tt1.getLocalTxCount() == 0) && (tt2.getRemoteTxCount() == 0)
&& (tt2.getLocalTxCount() == 0));
replListener2.expectWithTx(PutKeyValueCommand.class);
tm1.begin();
cache1.put(key, value);
cache1.putForExternalRead(key, value);
tm1.commit();
eventually(() -> (tt1.getRemoteTxCount() == 0) && (tt1.getLocalTxCount() == 0) && (tt2.getRemoteTxCount() == 0)
&& (tt2.getLocalTxCount() == 0));
replListener2.expectWithTx(PutKeyValueCommand.class, PutKeyValueCommand.class);
tm1.begin();
cache1.put(key, value);
cache1.putForExternalRead(key, value);
cache1.put(key, value);
tm1.commit();
eventually(() -> (tt1.getRemoteTxCount() == 0) && (tt1.getLocalTxCount() == 0) && (tt2.getRemoteTxCount() == 0)
&& (tt2.getLocalTxCount() == 0));
}
public void testMultipleIdenticalPutForExternalReadCalls() {
final Cache<String, String> cache1 = cache(0, CACHE_NAME);
final Cache<String, String> cache2 = cache(1, CACHE_NAME);
cache1.putForExternalRead(key, value);
// wait for command the finish executing asynchronously
eventually(() -> cache1.containsKey(key) && cache2.containsKey(key));
cache1.putForExternalRead(key, value2);
assertEquals(value, cache1.get(key));
}
/**
* Tests that setting a cacheModeLocal=true flag prevents propagation of the putForExternalRead().
*
* @throws Exception
*/
private void cacheModeLocalTest(boolean transactional, Method m) throws Exception {
Cache<String, String> cache1 = cache(0, CACHE_NAME);
Cache<String, String> cache2 = cache(1, CACHE_NAME);
TransactionManager tm1 = TestingUtil.getTransactionManager(cache1);
if (transactional)
tm1.begin();
String k = k(m);
cache1.getAdvancedCache().withFlags(CACHE_MODE_LOCAL).putForExternalRead(k, v(m));
assertTrue(cache1.getAdvancedCache().getDataContainer().containsKey(k));
assertFalse(cache2.getAdvancedCache().withFlags(CACHE_MODE_LOCAL).containsKey(k));
assertFalse(cache2.getAdvancedCache().getDataContainer().containsKey(k));
if (transactional)
tm1.commit();
}
}
| 13,487
| 39.504505
| 126
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/LockAssert.java
|
package org.infinispan.api.mvcc;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import org.infinispan.Cache;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.concurrent.locks.impl.LockContainer;
/**
* Helper class to assert lock status in MVCC
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
*/
public class LockAssert {
public static void assertLocked(Object key, LockManager lockManager) {
assertTrue("" + key + " not locked!", lockManager.isLocked(key));
}
public static void assertNotLocked(Object key, LockManager lockManager) {
assertFalse("" + key + " not locked!", lockManager.isLocked(key));
}
public static void assertNoLocks(LockManager lockManager) {
LockContainer lc = TestingUtil.extractField(lockManager, "lockContainer");
assertEquals("Stale locks exist! NumLocksHeld is " + lc.getNumLocksHeld() + " and lock info is " + lockManager.printLockInfo(),
0, lc.getNumLocksHeld());
}
public static void assertNoLocks(Cache cache) {
LockManager lockManager = TestingUtil.extractComponentRegistry(cache).getComponent(LockManager.class);
assertNoLocks(lockManager);
}
}
| 1,370
| 36.054054
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/repeatable_read/RepeatableReadLockTest.java
|
package org.infinispan.api.mvcc.repeatable_read;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import jakarta.transaction.RollbackException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.api.mvcc.LockTestBase;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.transaction.tm.EmbeddedTransactionManager;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.mvcc.repeatable_read.RepeatableReadLockTest")
public class RepeatableReadLockTest extends LockTestBase {
public RepeatableReadLockTest() {
repeatableRead = true;
}
public void testRepeatableReadWithRemove() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
cache.put("k", "v");
tm.begin();
assertNotNull(cache.get("k"));
Transaction reader = tm.suspend();
tm.begin();
assertNotNull(cache.remove("k"));
assertNull(cache.get("k"));
tm.commit();
assertNull(cache.get("k"));
tm.resume(reader);
assertNotNull(cache.get("k"));
assertEquals("v", cache.get("k"));
tm.commit();
assertNull(cache.get("k"));
assertNoLocks();
}
public void testRepeatableReadWithEvict() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
cache.put("k", "v");
tm.begin();
assertNotNull(cache.get("k"));
Transaction reader = tm.suspend();
tm.begin();
cache.evict("k");
assertNull(cache.get("k"));
tm.commit();
assertNull(cache.get("k"));
tm.resume(reader);
assertNotNull(cache.get("k"));
assertEquals("v", cache.get("k"));
tm.commit();
assertNull(cache.get("k"));
assertNoLocks();
}
public void testRepeatableReadWithNull() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
assertNull(cache.get("k"));
tm.begin();
assertNull(cache.get("k"));
Transaction reader = tm.suspend();
tm.begin();
cache.put("k", "v");
assertNotNull(cache.get("k"));
assertEquals("v", cache.get("k"));
tm.commit();
assertNotNull(cache.get("k"));
assertEquals("v", cache.get("k"));
tm.resume(reader);
assertEquals(null, cache.get("k"));
tm.commit();
assertNotNull(cache.get("k"));
assertEquals("v", cache.get("k"));
assertNoLocks();
}
public void testRepeatableReadWithNullRemoval() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
TransactionManager tm = tl.tm;
// start with an empty cache
tm.begin();
cache.get("a");
Transaction tx = tm.suspend();
cache.put("a", "v2");
assertEquals(cache.get("a"), "v2");
tm.resume(tx);
assertEquals(null, cache.get("a"));
cache.remove("a");
Exceptions.expectException(RollbackException.class, tm::commit);
assertEquals(cache.get("a"), "v2");
}
@Override
public void testLocksOnPutKeyVal() throws Exception {
LockTestData tl = lockTestData;
Cache<String, String> cache = tl.cache;
EmbeddedTransactionManager tm = tl.tm;
tm.begin();
cache.put("k", "v");
tm.getTransaction().runPrepare();
assertLocked("k");
tm.getTransaction().runCommit(false);
tm.suspend();
assertNoLocks();
tm.begin();
assertEquals(cache.get("k"), "v");
assertNotLocked("k");
tm.commit();
assertNoLocks();
tm.begin();
cache.remove("k");
tm.getTransaction().runPrepare();
assertLocked("k");
tm.getTransaction().runCommit(false);
assertNoLocks();
}
}
| 4,083
| 24.685535
| 90
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/repeatable_read/WriteSkewWithPersistenceTest.java
|
package org.infinispan.api.mvcc.repeatable_read;
import java.util.stream.StreamSupport;
import jakarta.transaction.HeuristicMixedException;
import jakarta.transaction.HeuristicRollbackException;
import jakarta.transaction.RollbackException;
import jakarta.transaction.SystemException;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.mvcc.repeatable_read.WriteSkewWithPersistenceTest")
public class WriteSkewWithPersistenceTest extends WriteSkewTest {
@Override
protected ConfigurationBuilder createConfigurationBuilder() {
ConfigurationBuilder configurationBuilder = super.createConfigurationBuilder();
configurationBuilder.persistence().addStore(new DummyInMemoryStoreConfigurationBuilder(configurationBuilder.persistence()));
configurationBuilder.clustering().hash().groups().enabled();
return configurationBuilder;
}
@Override
protected void commit() throws RollbackException, HeuristicMixedException, HeuristicRollbackException, SystemException {
// Make sure that all entries are evicted from DC
DataContainer<Object, Object> dataContainer = TestingUtil.extractComponent(cache, InternalDataContainer.class);
Object[] keys = StreamSupport.stream(dataContainer.spliterator(), false).map(InternalCacheEntry::getKey).toArray(Object[]::new);
for (Object key : keys) {
dataContainer.evict(key);
}
super.commit();
}
}
| 1,792
| 44.974359
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/repeatable_read/WriteSkewTest.java
|
package org.infinispan.api.mvcc.repeatable_read;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import jakarta.transaction.HeuristicMixedException;
import jakarta.transaction.HeuristicRollbackException;
import jakarta.transaction.RollbackException;
import jakarta.transaction.Status;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.api.mvcc.LockAssert;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.mvcc.repeatable_read.WriteSkewTest")
public class WriteSkewTest extends AbstractInfinispanTest {
private static final Log log = LogFactory.getLog(WriteSkewTest.class);
protected TransactionManager tm;
protected LockManager lockManager;
protected EmbeddedCacheManager cacheManager;
protected volatile Cache<String, String> cache;
@BeforeClass
public void setUp() {
ConfigurationBuilder configurationBuilder = createConfigurationBuilder();
configurationBuilder.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
// The default cache is NOT write skew enabled.
cacheManager = TestCacheManagerFactory.createCacheManager(configurationBuilder);
configurationBuilder.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
configurationBuilder.clustering().hash().groups().enabled();
cacheManager.defineConfiguration("writeSkew", configurationBuilder.build());
}
protected ConfigurationBuilder createConfigurationBuilder() {
ConfigurationBuilder configurationBuilder = new ConfigurationBuilder();
configurationBuilder
.transaction()
.transactionMode(TransactionMode.TRANSACTIONAL)
.locking()
.lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis())
.isolationLevel(IsolationLevel.REPEATABLE_READ);
return configurationBuilder;
}
@AfterClass
public void tearDown() {
TestingUtil.killCacheManagers(cacheManager);
cacheManager = null;
cache =null;
lockManager = null;
tm = null;
}
@BeforeMethod
public void postStart() {
cache = cacheManager.getCache("writeSkew");
lockManager = TestingUtil.extractComponentRegistry(cache).getComponent(LockManager.class);
tm = TestingUtil.extractComponentRegistry(cache).getComponent(TransactionManager.class);
}
protected void commit() throws RollbackException, HeuristicMixedException, HeuristicRollbackException, SystemException {
tm.commit();
}
protected void assertNoLocks() {
LockAssert.assertNoLocks(lockManager);
}
public void testDontCheckWriteSkew() throws Exception {
// Use the default cache here.
cache = cacheManager.getCache();
lockManager = TestingUtil.extractComponentRegistry(cache).getComponent(LockManager.class);
tm = TestingUtil.extractComponentRegistry(cache).getComponent(TransactionManager.class);
doTest(true);
}
public void testCheckWriteSkew() throws Exception {
doTest(false);
}
/**
* Tests write skew with two concurrent transactions that each execute two put() operations. One put() is done on the
* same key to create a write skew. The second put() is only needed to avoid optimizations done by
* OptimisticLockingInterceptor for single modification transactions and force it reach the code path that triggers
* ISPN-2092.
*/
public void testCheckWriteSkewWithMultipleModifications() throws Exception {
final CountDownLatch latch1 = new CountDownLatch(1);
final CountDownLatch latch2 = new CountDownLatch(1);
final CountDownLatch latch3 = new CountDownLatch(1);
Future<Void> t1 = fork(() -> {
latch1.await();
tm.begin();
try {
try {
cache.get("k1");
cache.put("k1", "v1");
cache.put("k2", "thread 1");
} finally {
latch2.countDown();
}
latch3.await();
Exceptions.expectException(RollbackException.class, this::commit);
} catch (Exception e) {
log.error("Unexpected exception in transaction 1", e);
tm.rollback();
}
return null;
});
Future<Void> t2 = fork(() -> {
latch2.await();
tm.begin();
try {
try {
cache.get("k1");
cache.put("k1", "v2");
cache.put("k3", "thread 2");
commit();
} finally {
latch3.countDown();
}
} catch (Exception e) {
// the TX is most likely rolled back already, but we attempt a rollback just in case it isn't
if (tm.getTransaction() != null) {
try {
tm.rollback();
} catch (SystemException e1) {
log.error("Failed to rollback", e1);
}
}
// Pass the exception to the main thread
throw e;
}
return null;
});
latch1.countDown();
t1.get(10, SECONDS);
t2.get(10, SECONDS);
assertTrue("k1 is expected to be in cache.", cache.containsKey("k1"));
assertEquals("Wrong value for key k1.", "v2", cache.get("k1"));
}
/** Checks that multiple modifications compare the initial value and the write skew does not fire */
public void testNoWriteSkewWithMultipleModifications() throws Exception {
cache.put("k1", "init");
tm.begin();
assertEquals("init", cache.get("k1"));
cache.put("k1", "v2");
cache.put("k2", "v3");
commit();
}
/**
* Verifies we can insert and then remove a value in the same transaction.
* See also ISPN-2075.
*/
public void testDontFailOnImmediateRemoval() throws Exception {
final String key = "testDontOnImmediateRemoval-Key";
tm.begin();
cache.put(key, "testDontOnImmediateRemoval-Value");
assertEquals("Wrong value for key " + key, "testDontOnImmediateRemoval-Value", cache.get(key));
cache.put(key, "testDontOnImmediateRemoval-Value-Second");
cache.remove(key);
commit();
assertFalse("Key " + key + " was not removed as expected.", cache.containsKey(key));
}
public void testNoWriteSkew() throws Exception {
//simplified version of testWriteSkewWithOnlyPut
final String key = "k";
tm.begin();
try {
cache.put(key, "init");
} catch (Exception e) {
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) {
commit();
} else {
tm.rollback();
}
}
Cache<String, String> putCache = cache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES);
tm.begin();
putCache.put(key, "v1");
final Transaction tx1 = tm.suspend();
tm.begin();
putCache.put(key, "v2");
final Transaction tx2 = tm.suspend();
tm.begin();
putCache.put(key, "v3");
final Transaction tx3 = tm.suspend();
//the following commits should not fail the write skew check
tm.resume(tx1);
commit();
tm.resume(tx2);
commit();
tm.resume(tx3);
commit();
}
public void testWriteSkew() throws Exception {
//simplified version of testWriteSkewWithOnlyPut
final String key = "k";
tm.begin();
try {
cache.put(key, "init");
} catch (Exception e) {
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) {
commit();
} else {
tm.rollback();
}
}
tm.begin();
cache.put(key, "v1");
final Transaction tx1 = tm.suspend();
tm.begin();
cache.put(key, "v2");
final Transaction tx2 = tm.suspend();
tm.begin();
cache.put(key, "v3");
final Transaction tx3 = tm.suspend();
//the first commit should succeed
tm.resume(tx1);
commit();
//the remaining should fail
try {
tm.resume(tx2);
commit();
fail("Transaction should fail!");
} catch (RollbackException e) {
//expected
}
try {
tm.resume(tx3);
commit();
fail("Transaction should fail!");
} catch (RollbackException e) {
//expected
}
}
// Write skew should not fire when the read is based purely on previously written value
// (the first put does not read the value)
// This test actually tests only local write skew check
public void testPreviousValueIgnored() throws Exception {
cache.put("k", "init");
tm.begin();
cache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES).put("k", "v1");
assertEquals("v1", cache.put("k", "v2"));
Transaction tx = tm.suspend();
assertEquals("init", cache.put("k", "other"));
tm.resume(tx);
commit();
}
public void testWriteSkewWithOnlyPut() throws Exception {
tm.begin();
try {
cache.put("k", "init");
} catch (Exception e) {
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) commit();
else tm.rollback();
}
int nbWriters = 10;
CyclicBarrier barrier = new CyclicBarrier(nbWriters + 1);
List<Future<Void>> futures = new ArrayList<>(nbWriters);
for (int i = 0; i < nbWriters; i++) {
log.debug("Schedule execution");
Future<Void> future = fork(new EntryWriter(barrier));
futures.add(future);
}
barrier.await(); // wait for all threads to be ready
barrier.await(); // wait for all threads to finish
log.debug("All threads finished, let's shutdown the executor and check whether any exceptions were reported");
for (Future<Void> future : futures) future.get();
}
private void doTest(final boolean disabledWriteSkewCheck) throws Exception {
final String key = "k";
final CountDownLatch w1Signal = new CountDownLatch(1);
final CountDownLatch w2Signal = new CountDownLatch(1);
final CountDownLatch threadSignal = new CountDownLatch(2);
cache.put(key, "v");
Future<Void> w1 = fork(() -> {
tm.begin();
assertEquals("Wrong value in Writer-1 for key " + key + ".", "v", cache.get(key));
threadSignal.countDown();
w1Signal.await();
cache.put(key, "v2");
commit();
return null;
});
Future<Void> w2 = fork(() -> {
tm.begin();
assertEquals("Wrong value in Writer-2 for key " + key + ".", "v", cache.get(key));
threadSignal.countDown();
w2Signal.await();
cache.put(key, "v3");
if (disabledWriteSkewCheck) {
commit();
} else {
Exceptions.expectException(RollbackException.class, this::commit);
}
return null;
});
threadSignal.await(10, SECONDS);
// now. both txs have read.
// let tx1 start writing
w1Signal.countDown();
w1.get(10, SECONDS);
w2Signal.countDown();
w2.get(10, SECONDS);
if (disabledWriteSkewCheck) {
assertEquals("W2 should have overwritten W1's work!", "v3", cache.get(key));
assertNoLocks();
} else {
assertEquals("W2 should *not* have overwritten W1's work!", "v2", cache.get(key));
assertNoLocks();
}
}
protected class EntryWriter implements Callable<Void> {
private final CyclicBarrier barrier;
EntryWriter(CyclicBarrier barrier) {
this.barrier = barrier;
}
@Override
public Void call() throws Exception {
try {
log.debug("Wait for all executions paths to be ready to perform calls");
barrier.await();
tm.begin();
try {
cache.getAdvancedCache().withFlags(Flag.IGNORE_RETURN_VALUES).put("k", "_lockthisplease_");
} catch (Exception e) {
log.error("Unexpected", e);
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) commit();
else tm.rollback();
}
return null;
} finally {
log.debug("Wait for all execution paths to finish");
barrier.await();
}
}
}
}
| 13,748
| 31.274648
| 123
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/repeatable_read/CacheAPIOptimisticTest.java
|
package org.infinispan.api.mvcc.repeatable_read;
import org.infinispan.api.BaseCacheAPIOptimisticTest;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.mvcc.repeatable_read.CacheAPIOptimisticTest")
public class CacheAPIOptimisticTest extends BaseCacheAPIOptimisticTest {
@Override
protected IsolationLevel getIsolationLevel() {
return IsolationLevel.REPEATABLE_READ;
}
@Override
public void testRetainAllMethodOfEntryCollection() {
//pruivo.note:
//write-skew is not stored in ImmortalCacheEntry
//should we add equals() to MetadataImmortalCacheEntry and re-implement the test using it?
//TBH, it doesn't make much sense to expose our internal cache entries...
}
}
| 802
| 35.5
| 96
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/repeatable_read/CacheAPIPessimisticTest.java
|
package org.infinispan.api.mvcc.repeatable_read;
import org.infinispan.api.BaseCacheAPIPessimisticTest;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.1
*/
@Test (groups = "functional", testName = "api.mvcc.repeatable_read.CacheAPIPessimisticTest")
public class CacheAPIPessimisticTest extends BaseCacheAPIPessimisticTest {
@Override
protected IsolationLevel getIsolationLevel() {
return IsolationLevel.REPEATABLE_READ;
}
}
| 526
| 28.277778
| 92
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/read_committed/CacheAPIOptimisticTest.java
|
package org.infinispan.api.mvcc.read_committed;
import org.infinispan.api.BaseCacheAPIOptimisticTest;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.mvcc.read_committed.CacheAPIOptimisticTest")
public class CacheAPIOptimisticTest extends BaseCacheAPIOptimisticTest {
@Override
protected IsolationLevel getIsolationLevel() {
return IsolationLevel.READ_COMMITTED;
}
}
| 471
| 32.714286
| 89
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/read_committed/ReadCommittedLockTest.java
|
package org.infinispan.api.mvcc.read_committed;
import static org.testng.AssertJUnit.assertEquals;
import jakarta.transaction.Transaction;
import org.infinispan.Cache;
import org.infinispan.api.mvcc.LockTestBase;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.mvcc.read_committed.ReadCommittedLockTest")
public class ReadCommittedLockTest extends LockTestBase {
public ReadCommittedLockTest() {
repeatableRead = false;
}
public void testVisibilityOfCommittedDataPut() throws Exception {
Cache<String, String> c = lockTestData.cache;
c.put("k", "v");
assertEquals("v", c.get("k"));
// start a tx and read K
lockTestData.tm.begin();
assertEquals("v", c.get("k"));
assertEquals("v", c.get("k"));
Transaction reader = lockTestData.tm.suspend();
lockTestData.tm.begin();
c.put("k", "v2");
Transaction writer = lockTestData.tm.suspend();
lockTestData.tm.resume(reader);
assertEquals("Should not read uncommitted data", "v", c.get("k"));
reader = lockTestData.tm.suspend();
lockTestData.tm.resume(writer);
lockTestData.tm.commit();
lockTestData.tm.resume(reader);
assertEquals("Should read committed data", "v2", c.get("k"));
lockTestData.tm.commit();
}
public void testVisibilityOfCommittedDataReplace() throws Exception {
Cache<String, String> c = lockTestData.cache;
c.put("k", "v");
assertEquals("v", c.get("k"));
// start a tx and read K
lockTestData.tm.begin();
assertEquals("v", c.get("k"));
assertEquals("v", c.get("k"));
Transaction reader = lockTestData.tm.suspend();
lockTestData.tm.begin();
c.replace("k", "v2");
Transaction writer = lockTestData.tm.suspend();
lockTestData.tm.resume(reader);
assertEquals("Should not read uncommitted data", "v", c.get("k"));
reader = lockTestData.tm.suspend();
lockTestData.tm.resume(writer);
lockTestData.tm.commit();
lockTestData.tm.resume(reader);
assertEquals("Should read committed data", "v2", c.get("k"));
lockTestData.tm.commit();
}
@Override
public void testConcurrentWriters() throws Exception {
super.testConcurrentWriters();
}
}
| 2,302
| 28.151899
| 88
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/mvcc/read_committed/CacheAPIPessimisticTest.java
|
package org.infinispan.api.mvcc.read_committed;
import org.infinispan.api.BaseCacheAPIPessimisticTest;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.1
*/
@Test (groups = "functional", testName = "api.mvcc.read_committed.CacheAPIPessimisticTest")
public class CacheAPIPessimisticTest extends BaseCacheAPIPessimisticTest {
@Override
protected IsolationLevel getIsolationLevel() {
return IsolationLevel.READ_COMMITTED;
}
}
| 523
| 28.111111
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/flags/NonTxFlagsEnabledTest.java
|
package org.infinispan.api.flags;
import static org.infinispan.context.Flag.CACHE_MODE_LOCAL;
import java.lang.reflect.Method;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.MagicKey;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.UnnecessaryLoadingTest;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.testng.annotations.Test;
/**
* FlagsEnabledTest for non transactional caches.
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "api.flags.NonTxFlagsEnabledTest")
@CleanupAfterMethod
public class NonTxFlagsEnabledTest extends FlagsEnabledTest {
public NonTxFlagsEnabledTest() {
super("non-tx-replication");
}
@Override
@Test(enabled = false, description = "non transactional cache")
public void testReplicateSkipCacheLoaderWithinTxInCoordinator(Method m) throws Exception {
}
@Override
@Test(enabled = false, description = "non transactional cache")
public void testReplicateSkipCacheLoaderWithinTxInNonCoordinator(Method m) throws Exception {
}
public void testCacheLocalInNonOwner() {
EmbeddedCacheManager cm = addClusterEnabledCacheManager(TestDataSCI.INSTANCE);
cm.createCache(cacheName, getConfigurationBuilder().build());
waitForClusterToForm(cacheName);
final AdvancedCache<Object, String> cache1 = advancedCache(0, cacheName);
final AdvancedCache<Object, String> cache2 = advancedCache(1, cacheName);
final AdvancedCache<Object, String> cache3 = advancedCache(2, cacheName);
final Object key = new MagicKey("k-no", cache1);
cache3.withFlags(CACHE_MODE_LOCAL).put(key, "value");
assertCacheValue(cache3, key, "value");
assertCacheValue(cache1, key, null);
assertCacheValue(cache2, key, null);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
builder.persistence().addStore(DummyInMemoryStoreConfigurationBuilder.class);
createClusteredCaches(2, cacheName, TestDataSCI.INSTANCE, builder);
}
private ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
builder
.persistence().addStore(UnnecessaryLoadingTest.CountingStoreConfigurationBuilder.class)
.persistence().addStore(DummyInMemoryStoreConfigurationBuilder.class);
return builder;
}
}
| 2,790
| 37.232877
| 99
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/flags/DecoratedCacheTest.java
|
package org.infinispan.api.flags;
import java.util.EnumSet;
import org.infinispan.AdvancedCache;
import org.infinispan.cache.impl.CacheImpl;
import org.infinispan.cache.impl.DecoratedCache;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.context.Flag;
import org.testng.annotations.Test;
/**
* @author Sanne Grinovero <sanne@infinispan.org> (C) 2011 Red Hat Inc.
*/
@Test(groups = "functional", testName = "api.flags.DecoratedCacheTest")
public class DecoratedCacheTest {
public void testDecoratedCacheFlagsSet() {
CacheImpl impl = new CacheImpl("baseCache");
DecoratedCache decoratedCache = new DecoratedCache(impl, EnumUtil.EMPTY_BIT_SET);
DecoratedCache nofailCache = (DecoratedCache) decoratedCache.withFlags(Flag.FAIL_SILENTLY);
EnumSet<Flag> nofailCacheFlags = EnumUtil.enumSetOf(nofailCache.getFlagsBitSet(), Flag.class);
assert nofailCacheFlags.contains(Flag.FAIL_SILENTLY);
assert nofailCacheFlags.size() == 1;
DecoratedCache asyncNoFailCache = (DecoratedCache) nofailCache.withFlags(Flag.FORCE_ASYNCHRONOUS);
EnumSet<Flag> asyncNofailCacheFlags = EnumUtil.enumSetOf(asyncNoFailCache.getFlagsBitSet(), Flag.class);
assert asyncNofailCacheFlags.size() == 2;
assert asyncNofailCacheFlags.contains(Flag.FAIL_SILENTLY);
assert asyncNofailCacheFlags.contains(Flag.FORCE_ASYNCHRONOUS);
AdvancedCache again = asyncNoFailCache.withFlags(Flag.FAIL_SILENTLY);
assert again == asyncNoFailCache; // as FAIL_SILENTLY was already specified
}
}
| 1,555
| 43.457143
| 110
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/flags/FlagsEnabledTest.java
|
package org.infinispan.api.flags;
import static org.infinispan.context.Flag.CACHE_MODE_LOCAL;
import static org.infinispan.context.Flag.SKIP_CACHE_LOAD;
import static org.infinispan.test.TestingUtil.k;
import static org.infinispan.test.TestingUtil.v;
import static org.infinispan.test.TestingUtil.withTx;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotSame;
import java.lang.reflect.Method;
import java.util.concurrent.Callable;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.cache.impl.AbstractDelegatingAdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.distribution.MagicKey;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* @author Sanne Grinovero <sanne@infinispan.org> (C) 2011 Red Hat Inc.
*/
@Test(groups = "functional", testName = "api.flags.FlagsEnabledTest")
@CleanupAfterMethod
public class FlagsEnabledTest extends MultipleCacheManagersTest {
protected final String cacheName;
public FlagsEnabledTest() {
this("tx-replication");
}
protected FlagsEnabledTest(String cacheName) {
this.cacheName = cacheName;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
builder
.locking().isolationLevel(IsolationLevel.REPEATABLE_READ)
.persistence().addStore(DummyInMemoryStoreConfigurationBuilder.class);
createClusteredCaches(2, cacheName, TestDataSCI.INSTANCE, builder);
}
DummyInMemoryStore getCacheStore(Cache cache) {
return (DummyInMemoryStore) TestingUtil.getFirstStore(cache);
}
public void testWithFlagsSemantics() {
final AdvancedCache<MagicKey, String> cache1 = advancedCache(0, cacheName);
final AdvancedCache<MagicKey, String> cache2 = advancedCache(1, cacheName);
assertNotSame("CacheStores", getCacheStore(cache1), getCacheStore(cache2));
assertLoadsAndReset(cache1, 0, cache2, 0);
final AdvancedCache<MagicKey, String> cache1LocalOnly = cache1.withFlags(CACHE_MODE_LOCAL);
MagicKey localKey = new MagicKey("local", cache1);
cache1LocalOnly.put(localKey, "value1");
assertLoadsAndReset(cache1, 1, cache2, 0);
cache2.withFlags(CACHE_MODE_LOCAL).put(localKey, "value2");
assertLoadsAndReset(cache1, 0, cache2, 1);
assertCacheValue(cache1, localKey, "value1");
assertLoadsAndReset(cache1, 0, cache2, 0);
assertCacheValue(cache2, localKey, "value2");
assertLoadsAndReset(cache1, 0, cache2, 0);
MagicKey nonLocalKey = new MagicKey("nonLocal", cache2);
cache1.put(nonLocalKey, "value");
// Write skew check needs the previous version on the originator AND on the primary owner
int cache1Loads = isTxCache() ? 1 : 0;
assertLoadsAndReset(cache1, cache1Loads, cache2, 1);
assertCacheValue(cache2, nonLocalKey, "value");
assertLoadsAndReset(cache1, 0, cache2, 0);
final AdvancedCache<MagicKey, String> cache1SkipRemoteAndStores =
cache1LocalOnly.withFlags(SKIP_CACHE_LOAD);
MagicKey localKey2 = new MagicKey("local2", cache1);
cache1SkipRemoteAndStores.put(localKey2, "value");
// CACHE_MODE_LOCAL operation is not replicated with the PrepareCommand and WSC is not executed,
// but the entry is committed on the origin
assertLoadsAndReset(cache1, 0, cache2, 0);
assertCacheValue(cache1, localKey2, "value");
// localKey2 isn't in memory, looks into store
assertCacheValue(cache2, localKey2, null);
assertLoadsAndReset(cache1, 0, cache2, 1);
assertCacheValue(cache2, localKey2, null);
assertLoadsAndReset(cache1, 0, cache2, 1);
assertCacheValue(cache2.withFlags(SKIP_CACHE_LOAD), localKey2, null);
assertLoadsAndReset(cache1, 0, cache2, 0);
// Options on cache1SkipRemoteAndStores did NOT affect this cache
MagicKey localKey3 = new MagicKey("local3", cache1);
assertCacheValue(cache1LocalOnly, localKey3, null);
assertLoadsAndReset(cache1, 1, cache2, 0);
}
public void testWithFlagsAndDelegateCache() {
final AdvancedCache<Integer, String> c1 =
new CustomDelegateCache<>(this.advancedCache(0, cacheName));
final AdvancedCache<Integer, String> c2 = advancedCache(1, cacheName);
c1.withFlags(CACHE_MODE_LOCAL).put(1, "v1");
assertCacheValue(c2, 1, null);
}
public void testReplicateSkipCacheLoad(Method m) {
final AdvancedCache<Object, String> cache1 = advancedCache(0, cacheName);
final AdvancedCache<Object, String> cache2 = advancedCache(1, cacheName);
assertLoadsAndReset(cache1, 0, cache2, 0);
final String v = v(m, 1);
final Object k = getKeyForCache(0, cacheName);
cache1.withFlags(Flag.SKIP_CACHE_LOAD).put(k, v);
// The write-skew check tries to load it from persistence.
assertLoadsAndReset(cache1, isTxCache() ? 1 : 0, cache2, 0);
assertCacheValue(cache2, k, v);
assertLoadsAndReset(cache1, 0, cache2, 0);
}
public void testReplicateSkipCacheLoaderWithinTxInCoordinator(Method m) throws Exception {
final AdvancedCache<String, String> cache1 = advancedCache(0, cacheName);
final AdvancedCache<String, String> cache2 = advancedCache(1, cacheName);
doReplicateSkipCacheLoaderWithinTx(m, cache1, cache2);
}
public void testReplicateSkipCacheLoaderWithinTxInNonCoordinator(Method m) throws Exception {
final AdvancedCache<String, String> cache1 = advancedCache(0, cacheName);
final AdvancedCache<String, String> cache2 = advancedCache(1, cacheName);
doReplicateSkipCacheLoaderWithinTx(m, cache2, cache1);
}
public void testCacheLocalInPrimaryOwner() {
final AdvancedCache<Object, String> cache1 = advancedCache(0, cacheName);
final AdvancedCache<Object, String> cache2 = advancedCache(1, cacheName);
final Object key = new MagicKey("k-po", cache1);
cache1.withFlags(CACHE_MODE_LOCAL).put(key, "value");
assertCacheValue(cache1, key, "value");
assertCacheValue(cache2, key, null);
}
public void testCacheLocalInBackupOwner() {
final AdvancedCache<Object, String> cache1 = advancedCache(0, cacheName);
final AdvancedCache<Object, String> cache2 = advancedCache(1, cacheName);
final Object key = new MagicKey("k-bo", cache1);
cache2.withFlags(CACHE_MODE_LOCAL).put(key, "value");
assertCacheValue(cache2, key, "value");
assertCacheValue(cache1, key, null);
}
private void doReplicateSkipCacheLoaderWithinTx(Method m,
final AdvancedCache<String, String> cache1,
AdvancedCache<String, String> cache2) throws Exception {
assertLoadsAndReset(cache1, 0, cache2, 0);
final String v = v(m, 1);
final String k = k(m, 1);
withTx(cache1.getTransactionManager(), (Callable<Void>) () -> {
cache1.withFlags(Flag.SKIP_CACHE_LOAD).put(k, v);
return null;
});
// The write-skew check tries to load it from persistence on the primary owner.
assertLoadsAndReset(cache1, isPrimaryOwner(cache1, k) ? 1 : 0, cache2, isPrimaryOwner(cache2, k) ? 1 : 0);
assertCacheValue(cache2, k, v);
assertLoadsAndReset(cache1, 0, cache2, 0);
}
public static class CustomDelegateCache<K, V>
extends AbstractDelegatingAdvancedCache<K, V> {
public CustomDelegateCache(AdvancedCache<K, V> cache) {
super(cache);
}
@Override
public AdvancedCache rewrap(AdvancedCache newDelegate) {
return new CustomDelegateCache(newDelegate);
}
}
private void assertLoadsAndReset(Cache<?, ?> cache1, int expected1, Cache<?, ?> cache2, int expected2) {
DummyInMemoryStore store1 = getCacheStore(cache1);
DummyInMemoryStore store2 = getCacheStore(cache2);
assertEquals(expected1, (int) store1.stats().get("load"));
assertEquals(expected2, (int) store2.stats().get("load"));
store1.clearStats();
store2.clearStats();
}
protected final void assertCacheValue(Cache<?, ?> cache, Object key, Object value) {
assertEquals("Wrong value for key '" + key + "' in cache '" + cache + "'.", value, cache.get(key));
}
private boolean isPrimaryOwner(Cache<?, ?> cache, Object key) {
return TestingUtil.extractComponent(cache, ClusteringDependentLogic.class).getCacheTopology().getDistribution(key).isPrimary();
}
private boolean isTxCache() {
return advancedCache(0, cacheName).getCacheConfiguration().transaction().transactionMode().isTransactional();
}
}
| 9,240
| 40.254464
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/batch/BatchWithTMTest.java
|
package org.infinispan.api.batch;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.infinispan.test.TestingUtil.getTransactionManager;
import static org.infinispan.test.fwk.TestCacheManagerFactory.getDefaultCacheConfiguration;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.lang.reflect.Method;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
@Test(groups = {"functional", "transaction"}, testName = "api.batch.BatchWithTMTest")
public class BatchWithTMTest extends AbstractBatchTest {
public void testBatchWithOngoingTM(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
TransactionManager tm = getTransactionManager(cache);
assertNoTransaction(tm);
tm.begin();
cache.put("k", "v");
cache.startBatch();
cache.put("k2", "v2");
tm.commit();
assertEquals("v", cache.get("k"));
assertEquals("v2", cache.get("k2"));
cache.endBatch(false); // should be a no op
assertEquals("v", cache.get("k"));
assertEquals("v2", cache.get("k2"));
}
public void testBatchWithoutOngoingTMSuspension(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
TransactionManager tm = getTransactionManager(cache);
assertNoTransaction(tm);
cache.startBatch();
cache.put("k", "v");
assertNoTransaction(tm);
cache.put("k2", "v2");
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
expectException(IllegalStateException.class, tm::commit);
assertNoTransaction(tm);
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
cache.endBatch(true);
assertEquals("v", getOnDifferentThread(cache, "k"));
assertEquals("v2", getOnDifferentThread(cache, "k2"));
}
public void testBatchRollback(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
cache.startBatch();
cache.put("k", "v");
cache.put("k2", "v2");
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
cache.endBatch(false);
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
}
protected <K, V> Cache<K, V> createCache(String name) {
ConfigurationBuilder c = getDefaultCacheConfiguration(true);
c.invocationBatching().enable();
cacheManager.defineConfiguration(name, c.build());
return cacheManager.getCache(name);
}
}
| 2,856
| 31.83908
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/batch/LockInBatchTest.java
|
package org.infinispan.api.batch;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "api.batch.LockInBatchTest")
public class LockInBatchTest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
ConfigurationBuilder dccc = getDefaultClusteredCacheConfig(CacheMode.LOCAL, false);
dccc.transaction().transactionMode(TransactionMode.TRANSACTIONAL).lockingMode(LockingMode.PESSIMISTIC);
dccc.invocationBatching().enable(true);
return TestCacheManagerFactory.createCacheManager(dccc);
}
public void testLockWithBatchingRollback() {
cache.startBatch();
cache.getAdvancedCache().lock("k");
assertTrue(lockManager().isLocked("k"));
cache().endBatch(false);
assertFalse(lockManager().isLocked("k"));
}
public void testLockWithBatchingCommit() {
cache.startBatch();
cache.getAdvancedCache().lock("k");
assertTrue(lockManager().isLocked("k"));
cache().endBatch(true);
assertFalse(lockManager().isLocked("k"));
}
public void testLockWithTmRollback() throws Throwable {
tm().begin();
cache.getAdvancedCache().lock("k");
assertTrue(lockManager().isLocked("k"));
tm().rollback();
assertFalse(lockManager().isLocked("k"));
}
public void testLockWithTmCommit() throws Throwable {
tm().begin();
cache.getAdvancedCache().lock("k");
assertTrue(lockManager().isLocked("k"));
tm().commit();
assertFalse(lockManager().isLocked("k"));
}
}
| 2,050
| 34.362069
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/batch/AbstractBatchTest.java
|
package org.infinispan.api.batch;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.lang.reflect.Method;
import java.util.concurrent.Future;
import jakarta.transaction.SystemException;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
@Test(groups = {"functional", "transaction", "smoke"})
public abstract class AbstractBatchTest extends SingleCacheManagerTest {
@Override
public EmbeddedCacheManager createCacheManager() {
return TestCacheManagerFactory.createCacheManager(false);
}
public void testClearInBatch(Method method) {
//tests if the clear doesn't leak the batch transaction.
//if it does, the get() will be executed against a committed transaction and it will fail.
Cache<String, String> cache = createCache(method.getName());
cache.put("k2", "v2");
cache.startBatch();
cache.clear();
cache.put("k1", "v1");
cache.endBatch(true);
assertEquals(null, cache.get("k2"));
assertEquals("v1", cache.get("k1"));
}
public void testPutForExternalReadInBatch(Method method) {
//tests if the putForExternalRead doesn't leak the batch transaction.
//if it does, the get() will be executed against a committed transaction and it will fail.
Cache<String, String> cache = createCache(method.getName());
cache.startBatch();
cache.putForExternalRead("k1", "v1");
cache.put("k2", "v2");
cache.endBatch(true);
assertEquals("v1", cache.get("k1"));
assertEquals("v2", cache.get("k2"));
cache.startBatch();
cache.putForExternalRead("k3", "v3");
cache.put("k1", "v2");
cache.endBatch(false);
assertEquals("v1", cache.get("k1"));
assertEquals("v2", cache.get("k2"));
assertEquals("v3", cache.get("k3"));
}
String getOnDifferentThread(final Cache<String, String> cache, final String key) throws Exception {
Future<String> f = fork(() -> {
cache.startBatch();
String v = cache.get(key);
cache.endBatch(true);
return v;
});
return f.get();
}
void assertNoTransaction(TransactionManager transactionManager) throws SystemException {
assertNull("Should have no ongoing txs", transactionManager.getTransaction());
}
protected abstract <K, V> Cache<K, V> createCache(String name);
}
| 2,620
| 31.7625
| 102
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/batch/BatchWithCustomTMTest.java
|
package org.infinispan.api.batch;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.infinispan.test.TestingUtil.getTransactionManager;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.lang.reflect.Method;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedBaseTransactionManager;
import org.testng.annotations.Test;
@Test(groups = {"functional", "transaction"}, testName = "api.batch.BatchWithCustomTMTest")
public class BatchWithCustomTMTest extends AbstractBatchTest {
public void testBatchWithOngoingTM(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
TransactionManager tm = getTransactionManager(cache);
assertEquals(MyDummyTransactionManager.class, tm.getClass());
tm.begin();
cache.put("k", "v");
cache.startBatch();
cache.put("k2", "v2");
tm.commit();
assertEquals("v", cache.get("k"));
assertEquals("v2", cache.get("k2"));
cache.endBatch(false); // should be a no op
assertEquals("v", cache.get("k"));
assertEquals("v2", cache.get("k2"));
}
public void testBatchWithoutOngoingTMSuspension(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
TransactionManager tm = getTransactionManager(cache);
assertEquals(MyDummyTransactionManager.class, tm.getClass());
assertNoTransaction(tm);
cache.startBatch();
cache.put("k", "v");
assertNoTransaction(tm);
cache.put("k2", "v2");
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
expectException(IllegalStateException.class, tm::commit);
assertNoTransaction(tm);
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
cache.endBatch(true); // should be a no op
assertEquals("v", getOnDifferentThread(cache, "k"));
assertEquals("v2", getOnDifferentThread(cache, "k2"));
}
public void testBatchRollback(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
cache.startBatch();
cache.put("k", "v");
cache.put("k2", "v2");
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
cache.endBatch(false);
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
}
protected <K, V> Cache<K, V> createCache(String name) {
ConfigurationBuilder c = new ConfigurationBuilder();
c.transaction().transactionManagerLookup(new MyDummyTransactionManagerLookup());
c.invocationBatching().enable();
c.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
cacheManager.defineConfiguration(name, c.build());
return cacheManager.getCache(name);
}
static class MyDummyTransactionManagerLookup extends EmbeddedTransactionManagerLookup {
MyDummyTransactionManager tm = new MyDummyTransactionManager();
@Override
public TransactionManager getTransactionManager() {
return tm;
}
}
static class MyDummyTransactionManager extends EmbeddedBaseTransactionManager {
}
}
| 3,606
| 33.352381
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/api/batch/BatchWithoutTMTest.java
|
package org.infinispan.api.batch;
import static org.infinispan.test.fwk.TestCacheManagerFactory.getDefaultCacheConfiguration;
import static org.testng.AssertJUnit.assertNull;
import java.lang.reflect.Method;
import org.infinispan.Cache;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
@Test(groups = {"functional", "smoke"}, testName = "api.batch.BatchWithoutTMTest")
public class BatchWithoutTMTest extends AbstractBatchTest {
@Override
public EmbeddedCacheManager createCacheManager() {
final ConfigurationBuilder defaultConfiguration = getDefaultCacheConfiguration(true);
defaultConfiguration.invocationBatching().enable().transaction().autoCommit(false);
return TestCacheManagerFactory.createCacheManager(defaultConfiguration);
}
public void testBatchWithoutCfg(Method method) {
Cache<String, String> cache = createCache(false, method.getName());
Exceptions.expectException(CacheConfigurationException.class, cache::startBatch);
Exceptions.expectException(CacheConfigurationException.class, () -> cache.endBatch(true));
Exceptions.expectException(CacheConfigurationException.class, () -> cache.endBatch(false));
}
public void testEndBatchWithoutStartBatch(Method method) {
Cache<String, String> cache = createCache(method.getName());
cache.endBatch(true);
cache.endBatch(false);
// should not fail.
}
public void testStartBatchIdempotency(Method method) {
Cache<String, String> cache = createCache(method.getName());
cache.startBatch();
cache.put("k", "v");
cache.startBatch(); // again
cache.put("k2", "v2");
cache.endBatch(true);
AssertJUnit.assertEquals("v", cache.get("k"));
AssertJUnit.assertEquals("v2", cache.get("k2"));
}
public void testBatchVisibility(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
cache.startBatch();
cache.put("k", "v");
assertNull("Other thread should not see batch update till batch completes!", getOnDifferentThread(cache, "k"));
cache.endBatch(true);
AssertJUnit.assertEquals("v", getOnDifferentThread(cache, "k"));
}
public void testBatchRollback(Method method) throws Exception {
Cache<String, String> cache = createCache(method.getName());
cache.startBatch();
cache.put("k", "v");
cache.put("k2", "v2");
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
cache.endBatch(false);
assertNull(getOnDifferentThread(cache, "k"));
assertNull(getOnDifferentThread(cache, "k2"));
}
@Override
protected <K, V> Cache<K, V> createCache(String name) {
return createCache(true, name);
}
private <K, V> Cache<K, V> createCache(boolean enableBatch, String name) {
ConfigurationBuilder c = new ConfigurationBuilder();
c.invocationBatching().enable(enableBatch);
cacheManager.defineConfiguration(name, c.build());
return cacheManager.getCache(name);
}
}
| 3,373
| 36.076923
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/TxReplayTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import java.util.concurrent.atomic.AtomicInteger;
import jakarta.transaction.Status;
import org.infinispan.Cache;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.MagicKey;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.impl.CallInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedTransaction;
import org.infinispan.transaction.tm.EmbeddedTransactionManager;
import org.testng.annotations.Test;
/**
* Tests the prepare replay.
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "statetransfer.TxReplayTest")
public class TxReplayTest extends MultipleCacheManagersTest {
private static final String VALUE = "value";
public void testReplay() throws Exception {
assertClusterSize("Wrong cluster size", 3);
final Object key = new MagicKey(cache(0), cache(1));
final Cache<Object, Object> newBackupOwnerCache = cache(2);
final TxCommandInterceptor interceptor = TxCommandInterceptor.inject(newBackupOwnerCache);
EmbeddedTransactionManager transactionManager = (EmbeddedTransactionManager) tm(0);
transactionManager.begin();
cache(0).put(key, VALUE);
final EmbeddedTransaction transaction = transactionManager.getTransaction();
transaction.runPrepare();
assertEquals("Wrong transaction status before killing backup owner.",
Status.STATUS_PREPARED, transaction.getStatus());
//now, we kill cache(1). the transaction is prepared in cache(1) and it should be forward to cache(2)
killMember(1);
checkIfTransactionExists(newBackupOwnerCache);
assertEquals("Wrong transaction status after killing backup owner.",
Status.STATUS_PREPARED, transaction.getStatus());
transaction.runCommit(false);
assertNoTransactions();
assertEquals("Wrong number of prepares!", 1, interceptor.numberPrepares.get());
assertEquals("Wrong number of commits!", 1, interceptor.numberCommits.get());
assertEquals("Wrong number of rollbacks!", 0, interceptor.numberRollbacks.get());
checkKeyInDataContainer(key);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
builder.transaction()
.useSynchronization(false)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.recovery().disable();
builder.clustering()
.hash().numOwners(2)
.stateTransfer().fetchInMemoryState(true);
createClusteredCaches(3, TestDataSCI.INSTANCE, builder);
}
private void checkKeyInDataContainer(Object key) {
for (Cache<Object, Object> cache : caches()) {
DataContainer container = cache.getAdvancedCache().getDataContainer();
InternalCacheEntry entry = container.get(key);
assertNotNull("Cache '" + address(cache) + "' does not contain key!", entry);
assertEquals("Cache '" + address(cache) + "' has wrong value!", VALUE, entry.getValue());
}
}
private void checkIfTransactionExists(Cache<Object, Object> cache) {
TransactionTable table = TestingUtil.extractComponent(cache, TransactionTable.class);
assertFalse("Expected a remote transaction.", table.getRemoteTransactions().isEmpty());
}
static class TxCommandInterceptor extends DDAsyncInterceptor {
//counters
private final AtomicInteger numberPrepares = new AtomicInteger(0);
private final AtomicInteger numberCommits = new AtomicInteger(0);
private final AtomicInteger numberRollbacks = new AtomicInteger(0);
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
numberPrepares.incrementAndGet();
}
return invokeNext(ctx, command);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
numberCommits.incrementAndGet();
}
return invokeNext(ctx, command);
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
numberRollbacks.incrementAndGet();
}
return invokeNext(ctx, command);
}
public static TxCommandInterceptor inject(Cache cache) {
AsyncInterceptorChain chain = cache.getAdvancedCache().getAsyncInterceptorChain();
if (chain.containsInterceptorType(TxCommandInterceptor.class)) {
return chain.findInterceptorWithClass(TxCommandInterceptor.class);
}
TxCommandInterceptor interceptor = new TxCommandInterceptor();
chain.addInterceptorBefore(interceptor, CallInterceptor.class);
return interceptor;
}
}
}
| 5,929
| 40.468531
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StaleLocksWithLockOnlyTxDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnComponentMethod;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnGlobalComponentMethod;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInboundRpc;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchCommand;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchMethodCall;
import static org.testng.AssertJUnit.assertEquals;
import jakarta.transaction.TransactionManager;
import org.hamcrest.BaseMatcher;
import org.hamcrest.CoreMatchers;
import org.hamcrest.Description;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.MagicKey;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.impl.TransactionTable;
import org.testng.annotations.Test;
@Test(testName = "lock.StaleLocksWithLockOnlyTxDuringStateTransferTest", groups = "functional")
@CleanupAfterMethod
public class StaleLocksWithLockOnlyTxDuringStateTransferTest extends MultipleCacheManagersTest {
public static final String CACHE_NAME = "testCache";
@Override
protected void createCacheManagers() throws Throwable {
createCluster(TestDataSCI.INSTANCE, new ConfigurationBuilder(), 2);
waitForClusterToForm();
}
public void testSync() throws Throwable {
final StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("st", "st:block_get_transactions", "st:resume_get_transactions",
"st:block_ch_update_on_0", "st:block_ch_update_on_1", "st:resume_ch_update_on_0", "st:resume_ch_update_on_1");
sequencer.logicalThread("tx", "tx:before_lock", "tx:block_remote_lock", "tx:resume_remote_lock", "tx:after_commit");
// The lock will be acquired after rebalance has started, but before cache0 starts sending the transaction data to cache1
sequencer.order("st:block_get_transactions", "tx:before_lock", "tx:block_remote_lock", "st:resume_get_transactions");
// The tx will be committed (1PC) after cache1 has received all the state, but before the topology is updated
sequencer.order("st:block_ch_update_on_1", "tx:resume_remote_lock", "tx:after_commit", "st:resume_ch_update_on_0");
ConfigurationBuilder cfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
cfg.clustering().cacheMode(CacheMode.DIST_SYNC)
.stateTransfer().awaitInitialTransfer(false)
.transaction().lockingMode(LockingMode.PESSIMISTIC);
manager(0).defineConfiguration(CACHE_NAME, cfg.build());
manager(1).defineConfiguration(CACHE_NAME, cfg.build());
AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
TransactionManager tm0 = cache0.getTransactionManager();
DistributionManager dm0 = cache0.getDistributionManager();
int initialTopologyId = dm0.getCacheTopology().getTopologyId();
int rebalanceTopologyId = initialTopologyId + 1;
final int finalTopologyId = rebalanceTopologyId + 3;
// Block state request commands on cache0 until the lock command has been sent to cache1
advanceOnComponentMethod(sequencer, cache0, StateProvider.class,
matchMethodCall("getTransactionsForSegments").build())
.before("st:block_get_transactions", "st:resume_get_transactions");
// Block the final topology update until the tx has finished
advanceOnGlobalComponentMethod(sequencer, manager(0), LocalTopologyManager.class,
matchMethodCall("handleTopologyUpdate")
.withMatcher(0, CoreMatchers.equalTo(CACHE_NAME))
.withMatcher(1, new CacheTopologyMatcher(finalTopologyId)).build())
.before("st:block_ch_update_on_0", "st:resume_ch_update_on_0");
advanceOnGlobalComponentMethod(sequencer, manager(1), LocalTopologyManager.class,
matchMethodCall("handleTopologyUpdate")
.withMatcher(0, CoreMatchers.equalTo(CACHE_NAME))
.withMatcher(1, new CacheTopologyMatcher(finalTopologyId)).build())
.before("st:block_ch_update_on_1", "st:resume_ch_update_on_1");
// Start cache 1, but the state request will be blocked on cache 0
AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);
// Block the remote lock command on cache 1
advanceOnInboundRpc(sequencer, cache(1, CACHE_NAME),
matchCommand(LockControlCommand.class).matchCount(0).withCache(CACHE_NAME).build())
.before("tx:block_remote_lock", "tx:resume_remote_lock");
// Wait for the rebalance to start
sequencer.advance("tx:before_lock");
assertEquals(rebalanceTopologyId, dm0.getCacheTopology().getTopologyId());
// Start a transaction on cache 0
MagicKey key = new MagicKey("testkey", cache0);
tm0.begin();
cache0.lock(key);
tm0.commit();
// Let the rebalance finish
sequencer.advance("tx:after_commit");
TestingUtil.waitForNoRebalance(caches(CACHE_NAME));
assertEquals(finalTopologyId, dm0.getCacheTopology().getTopologyId());
// Check for stale locks
final TransactionTable tt0 = TestingUtil.extractComponent(cache0, TransactionTable.class);
final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
eventually(() -> tt0.getLocalTxCount() == 0 && tt1.getRemoteTxCount() == 0);
sequencer.stop();
}
private static class CacheTopologyMatcher extends BaseMatcher<Object> {
private final int topologyId;
CacheTopologyMatcher(int topologyId) {
this.topologyId = topologyId;
}
@Override
public boolean matches(Object item) {
return (item instanceof CacheTopology) && ((CacheTopology) item).getTopologyId() == topologyId;
}
@Override
public void describeTo(Description description) {
description.appendText("CacheTopology(" + topologyId + ")");
}
}
}
| 6,649
| 47.540146
| 127
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/CommitTimeoutTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInterceptor;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchCommand;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Arrays;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import jakarta.transaction.RollbackException;
import org.infinispan.Cache;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.tx.VersionedCommitCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.annotations.Test;
/**
* Test that a commit command that has timed out on a backup owner cannot write entries after the locks have been
* released on the primary owner.
*/
@Test(groups = "functional", testName = "statetransfer.CommitTimeoutTest")
@CleanupAfterMethod
public class CommitTimeoutTest extends MultipleCacheManagersTest {
private static final String TEST_KEY = "key";
private static final String TX1_VALUE = "value1";
private static final String TX2_VALUE = "value2";
@Override
protected void createCacheManagers() throws Throwable {
ControlledConsistentHashFactory consistentHashFactory = new ControlledConsistentHashFactory.Default(1, 2);
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.clustering().cacheMode(CacheMode.DIST_SYNC);
builder.clustering().remoteTimeout(2000);
builder.clustering().hash().numSegments(1).consistentHashFactory(consistentHashFactory);
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
addClusterEnabledCacheManager(builder);
addClusterEnabledCacheManager(builder);
addClusterEnabledCacheManager(builder);
waitForClusterToForm();
}
public void testCommitDoesntWriteAfterRollback() throws Exception {
// Start a tx on A: put(k, v1), owners(k) = [B (primary) and C (backup)]
// Block the commit on C so that it times out
// Wait for the rollback command to be executed on B and C, and for the tx to end
// Check that locks are released on B
// Start another transaction on A: put(k, v2) with the same key
// Check that the new transaction writes successfully
// Allow the commit to proceed on C
// Check that k=v2 everywhere
StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("tx1", "tx1:begin", "tx1:block_commit_on_backup", "tx1:after_rollback_on_primary",
"tx1:after_rollback_on_backup", "tx1:resume_commit_on_backup", "tx1:after_commit_on_backup", "tx1:check");
sequencer.logicalThread("tx2", "tx2:begin", "tx2:end");
sequencer.order("tx1:after_rollback_on_backup", "tx2:begin", "tx2:end", "tx1:resume_commit_on_backup");
advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
matchCommand(VersionedCommitCommand.class).matchCount(0).build())
.before("tx1:block_commit_on_backup", "tx1:resume_commit_on_backup").after("tx1:after_commit_on_backup");
advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
matchCommand(RollbackCommand.class).build())
.after("tx1:after_rollback_on_primary");
advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
matchCommand(RollbackCommand.class).build())
.after("tx1:after_rollback_on_backup");
assertEquals(Arrays.asList(address(1), address(2)), cacheTopology(0).getDistribution(TEST_KEY).writeOwners());
sequencer.advance("tx1:begin");
tm(0).begin();
cache(0).put(TEST_KEY, TX1_VALUE);
try {
tm(0).commit();
} catch (RollbackException e) {
log.debugf("Commit timed out as expected", e);
}
sequencer.advance("tx2:begin");
LockManager lockManager1 = TestingUtil.extractLockManager(cache(1));
assertFalse(lockManager1.isLocked(TEST_KEY));
tm(0).begin();
cache(0).put(TEST_KEY, TX2_VALUE);
tm(0).commit();
checkValue();
sequencer.advance("tx2:end");
sequencer.advance("tx1:check");
checkValue();
}
private void checkValue() {
for (Cache cache : caches()) {
assertEquals(TX2_VALUE, cache.get(TEST_KEY));
}
}
@Test(enabled = false, description = "Fix for this scenario is not implemented yet - rollback is asynchronous")
public void testCommitDoesntWriteAfterTxEnd() throws Exception {
// Start a tx on A: put(k, v1), owners(k) = [B (primary) and C (backup)]
// Block the commit on C so that it times out
// Wait for the rollback command to be executed on B and block before it executes on C
// Check that k is still locked on B
// Allow the commit to proceed on C
// Allow the rollback to proceed on C
// Check that k=v1 everywhere
// Check that locks are released on B
final StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("tx1", "tx1:begin", "tx1:block_commit_on_backup", "tx1:after_rollback_on_primary",
"tx1:block_rollback_on_backup", "tx1:resume_commit_on_backup", "tx1:after_commit_on_backup",
"tx1:resume_rollback_on_backup", "tx1:after_rollback_on_backup", "tx1:check");
advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
matchCommand(CommitCommand.class).matchCount(0).build())
.before("tx1:block_commit_on_backup", "tx1:resume_commit_on_backup").after("tx1:after_commit_on_backup");
advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
matchCommand(RollbackCommand.class).build())
.after("tx1:after_rollback_on_primary");
advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
matchCommand(RollbackCommand.class).build())
.before("tx1:block_rollback_on_backup").after("tx1:after_rollback_on_backup");
assertEquals(Arrays.asList(address(1), address(2)), cacheTopology(0).getDistribution(TEST_KEY).writeOwners());
Future<Object> lockCheckFuture = fork(() -> {
sequencer.enter("tx1:resume_rollback_on_backup");
try {
assertTrue(TestingUtil.extractLockManager(cache(1)).isLocked(TEST_KEY));
} finally {
sequencer.exit("tx1:resume_rollback_on_backup");
}
return null;
});
sequencer.advance("tx1:begin");
tm(0).begin();
cache(0).put(TEST_KEY, TX1_VALUE);
tm(0).commit();
sequencer.advance("tx1:check");
assertFalse(TestingUtil.extractLockManager(cache(1)).isLocked(TEST_KEY));
lockCheckFuture.get(10, TimeUnit.SECONDS);
}
}
| 7,344
| 42.720238
| 118
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferTimestampsTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.ControlledTimeService;
import org.testng.annotations.Test;
/**
* Tests concurrent startup of replicated and distributed caches
*
* @author Dan Berindei
* @since 10.0
*/
@Test(testName = "statetransfer.StateTransferTimestampsTest", groups = "functional")
public class StateTransferTimestampsTest extends MultipleCacheManagersTest {
public static final String CACHE_NAME = "cache";
private ControlledTimeService timeService;
@Override
public Object[] factory() {
return new Object[]{
// DIST and REPL are different as REPL has a read optimization
new StateTransferTimestampsTest().cacheMode(CacheMode.DIST_SYNC),
new StateTransferTimestampsTest().cacheMode(CacheMode.REPL_SYNC),
// With other storage types there's an opportunity to change the timestamps before the write
new StateTransferTimestampsTest().cacheMode(CacheMode.DIST_SYNC).storageType(StorageType.OFF_HEAP),
new StateTransferTimestampsTest().cacheMode(CacheMode.DIST_SYNC).storageType(StorageType.BINARY),
};
}
@Override
protected void createCacheManagers() throws Throwable {
createCluster(new ConfigurationBuilder(), 2);
timeService = new ControlledTimeService();
ConfigurationBuilder replConfig = new ConfigurationBuilder();
replConfig.clustering().cacheMode(cacheMode).hash().numSegments(4);
if (storageType != null) {
replConfig.memory().storage(storageType);
}
for (EmbeddedCacheManager manager : managers()) {
TestingUtil.replaceComponent(manager, TimeService.class, timeService, true);
manager.defineConfiguration(CACHE_NAME, replConfig.build());
}
}
public void testStateTransfer() {
// Insert a key on node 0
AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
cache0.put("lifespan", "value", 2, SECONDS);
cache0.put("maxidle", "value", -1, SECONDS, 2, SECONDS);
cache0.put("lifespan+maxidle", "value", 10, SECONDS, 2, SECONDS);
long created = timeService.wallClockTime();
assertTimestamps(cache0, created, created);
// Advance the time service and start node 1 triggering state transfer
timeService.advance(SECONDS.toMillis(1));
AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);
// Check the timestamps on node 1
long accessed = timeService.wallClockTime();
assertTimestamps(cache1, created, accessed);
// Advance the time service to expire the lifespan entry
timeService.advance(SECONDS.toMillis(2));
assertNull(cache1.getCacheEntry("lifespan"));
assertNotNull(cache1.getCacheEntry("maxidle"));
assertNotNull(cache1.getCacheEntry("lifespan+maxidle"));
// Advance the time service a final time to expire the maxidle entry
timeService.advance(SECONDS.toMillis(3));
assertNull(cache1.getCacheEntry("maxidle"));
assertNull(cache1.getCacheEntry("lifespan+maxidle"));
}
private void assertTimestamps(AdvancedCache<Object, Object> cache, long created, long accessed) {
CacheEntry<Object, Object> lifespanEntry = cache.getCacheEntry("lifespan");
assertEquals(created, lifespanEntry.getCreated());
assertEquals(-1, lifespanEntry.getLastUsed());
CacheEntry<Object, Object> maxIdleEntry = cache.getCacheEntry("maxidle");
assertEquals(-1, maxIdleEntry.getCreated());
assertEquals(accessed, maxIdleEntry.getLastUsed());
CacheEntry<Object, Object> lifespanMaxIdleEntry = cache.getCacheEntry("lifespan+maxidle");
assertEquals(created, lifespanMaxIdleEntry.getCreated());
assertEquals(accessed, lifespanMaxIdleEntry.getLastUsed());
}
}
| 4,435
| 43.36
| 108
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ReplStateTransferCacheLoaderTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import java.io.File;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
/**
* Short test to reproduce the scenario from ISPN-2712/MODE-1754 (https://issues.jboss.org/browse/MODE-2712, https://issues.jboss.org/browse/MODE-1754).
* <p/>
* This test passes on 5.1.x but fails on 5.2.0 without the fix.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.ReplStateTransferCacheLoaderTest")
@CleanupAfterMethod
public class ReplStateTransferCacheLoaderTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(ReplStateTransferCacheLoaderTest.class);
private File tmpDir;
private GlobalConfigurationBuilder globalBuilder;
private ConfigurationBuilder builder;
@Override
protected void createCacheManagers() {
tmpDir = new File(CommonsTestingUtil.tmpDirectory(this.getClass()));
Util.recursiveFileRemove(tmpDir);
globalBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder();
globalBuilder.globalState().persistentLocation(tmpDir.getPath());
// reproduce the MODE-1754 config as closely as possible
builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true, true);
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL).lockingMode(LockingMode.PESSIMISTIC)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.memory().size(1000)
.locking().lockAcquisitionTimeout(20000)
.concurrencyLevel(5000) // lowering this to 50 makes the test pass also on 5.2 but it's just a temporary workaround
.useLockStriping(false).isolationLevel(IsolationLevel.READ_COMMITTED)
.memory().storageType(StorageType.BINARY)
.clustering().remoteTimeout(20000)
.stateTransfer().timeout(240000).fetchInMemoryState(false).chunkSize(10000)
.persistence().addSingleFileStore().location(new File(tmpDir, "store0").getAbsolutePath());
createCluster(globalBuilder, builder, 1);
waitForClusterToForm();
}
@AfterClass
protected void clearTempDir() {
Util.recursiveFileRemove(tmpDir);
}
public void testStateTransfer() {
final int numKeys = 300;
for (int i = 0; i < numKeys; i++) {
cache(0).put(i, i);
}
log.info("Finished putting keys");
for (int i = 0; i < numKeys; i++) {
assertEquals(i, cache(0).get(i));
}
log.info("Adding a new node ..");
// make sure this node writes in a different location
builder.persistence().clearStores().addSingleFileStore().location(new File(tmpDir, "store0").getAbsolutePath()).fetchPersistentState(true);
addClusterEnabledCacheManager(globalBuilder, builder);
log.info("Added a new node");
for (int i = 0; i < numKeys; i++) {
// some keys are lost in 5.2
assertEquals(i, cache(1).get(i));
}
}
}
| 3,827
| 38.875
| 152
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ManyTxsDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import jakarta.transaction.TransactionManager;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.executors.BlockingThreadPoolExecutorFactory;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.impl.TransactionTable;
import org.testng.annotations.Test;
@Test(testName = "lock.ManyTxsDuringStateTransferTest", groups = "functional")
@CleanupAfterMethod
public class ManyTxsDuringStateTransferTest extends MultipleCacheManagersTest {
public static final String CACHE_NAME = "testCache";
private static final int NUM_TXS = 20;
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder defaultBuilder = new ConfigurationBuilder();
addClusterEnabledCacheManager(getGlobalConfigurationBuilder(), defaultBuilder);
addClusterEnabledCacheManager(getGlobalConfigurationBuilder(), defaultBuilder);
waitForClusterToForm();
}
private GlobalConfigurationBuilder getGlobalConfigurationBuilder() {
GlobalConfigurationBuilder globalBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder();
BlockingThreadPoolExecutorFactory threadPoolFactory = new BlockingThreadPoolExecutorFactory(1, 1, 0, Thread.NORM_PRIORITY);
globalBuilder.transport().remoteCommandThreadPool().threadPoolFactory(threadPoolFactory);
return globalBuilder;
}
public void testManyTxs() throws Throwable {
ConfigurationBuilder cfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
cfg.clustering().cacheMode(CacheMode.DIST_SYNC)
.stateTransfer().awaitInitialTransfer(false)
.transaction().lockingMode(LockingMode.OPTIMISTIC);
manager(0).defineConfiguration(CACHE_NAME, cfg.build());
manager(1).defineConfiguration(CACHE_NAME, cfg.build());
final CheckPoint checkpoint = new CheckPoint();
final AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
final TransactionManager tm0 = cache0.getTransactionManager();
// Block state request commands on cache 0
StateProvider stateProvider = TestingUtil.extractComponent(cache0, StateProvider.class);
StateProvider spyProvider = spy(stateProvider);
doAnswer(invocation -> {
Object[] arguments = invocation.getArguments();
Address source = (Address) arguments[0];
int topologyId = (Integer) arguments[1];
CompletionStage<?> result = (CompletionStage<?>) invocation.callRealMethod();
return result.thenApply(transactions -> {
try {
checkpoint.trigger("post_get_transactions_" + topologyId + "_from_" + source);
checkpoint.awaitStrict("resume_get_transactions_" + topologyId + "_from_" + source, 10, SECONDS);
return transactions;
} catch (InterruptedException | TimeoutException e) {
throw new TestException(e);
}
});
}).when(spyProvider).getTransactionsForSegments(any(Address.class), anyInt(), any());
TestingUtil.replaceComponent(cache0, StateProvider.class, spyProvider, true);
// Start cache 1, but the tx data request will be blocked on cache 0
DistributionManager dm0 = cache0.getDistributionManager();
int initialTopologyId = dm0.getCacheTopology().getTopologyId();
int rebalanceTopologyId = initialTopologyId + 1;
AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);
checkpoint.awaitStrict("post_get_transactions_" + rebalanceTopologyId + "_from_" + address(1), 10, SECONDS);
// Start many transaction on cache 0, which will block on cache 1
Future<Object>[] futures = new Future[NUM_TXS];
for (int i = 0; i < NUM_TXS; i++) {
// The rollback command should be invoked on cache 1 and it should block until the tx is created there
final int ii = i;
futures[i] = fork(() -> {
tm0.begin();
cache0.put("testkey" + ii, "v" + ii);
tm0.commit();
return null;
});
}
// Wait for all (or at least most of) the txs to be replicated to cache 1
Thread.sleep(1000);
// Verify that cache 1 is in fact transferring state and transactional segments were requested
StateConsumer stateConsumer = TestingUtil.extractComponent(cache1, StateConsumer.class);
assertTrue(stateConsumer.isStateTransferInProgress());
assertTrue(stateConsumer.inflightTransactionSegmentCount() > 0);
// Let cache 1 receive the tx from cache 0.
checkpoint.trigger("resume_get_transactions_" + rebalanceTopologyId + "_from_" + address(1));
TestingUtil.waitForNoRebalance(caches(CACHE_NAME));
// State transfer ended on cache 1 and request for transactional segments were received
assertFalse(stateConsumer.isStateTransferInProgress());
assertEquals(stateConsumer.inflightTransactionSegmentCount(), 0);
// Wait for the txs to finish and check the results
DataContainer dataContainer0 = TestingUtil.extractComponent(cache0, InternalDataContainer.class);
DataContainer dataContainer1 = TestingUtil.extractComponent(cache1, InternalDataContainer.class);
for (int i = 0; i < NUM_TXS; i++) {
futures[i].get(10, SECONDS);
assertEquals("v" + i, dataContainer0.get("testkey" + i).getValue());
assertEquals("v" + i, dataContainer1.get("testkey" + i).getValue());
}
// Check for stale locks
final TransactionTable tt0 = TestingUtil.extractComponent(cache0, TransactionTable.class);
final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
eventuallyEquals(0, tt0::getLocalTxCount);
eventuallyEquals(0, tt1::getRemoteTxCount);
}
}
| 7,014
| 48.401408
| 129
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/PrepareTimeoutTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInterceptor;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchCommand;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.Arrays;
import org.infinispan.Cache;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.tx.VersionedPrepareCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.annotations.Test;
/**
* Test that a commit command that has timed out on a backup owner cannot write entries after the locks have been
* released on the primary owner.
*/
@Test(groups = "functional", testName = "statetransfer.PrepareTimeoutTest")
@CleanupAfterMethod
public class PrepareTimeoutTest extends MultipleCacheManagersTest {
private static final String TEST_KEY = "key";
private static final String TX1_VALUE = "value1";
private static final java.lang.Object TX2_VALUE = "value2";
public static final int COMPLETED_TX_TIMEOUT = 2000;
@Override
protected void createCacheManagers() throws Throwable {
ControlledConsistentHashFactory<?> consistentHashFactory = new ControlledConsistentHashFactory.Default(1, 2);
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.clustering().cacheMode(CacheMode.DIST_SYNC);
builder.clustering().remoteTimeout(2000);
builder.clustering().hash().numSegments(1).consistentHashFactory(consistentHashFactory);
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
builder.transaction().completedTxTimeout(COMPLETED_TX_TIMEOUT);
addClusterEnabledCacheManager(builder);
addClusterEnabledCacheManager(builder);
addClusterEnabledCacheManager(builder);
waitForClusterToForm();
}
public void testCommitDoesntWriteAfterRollback() throws Exception {
// Start a tx on A: put(k, v1), owners(k) = [B (primary) and C (backup)]
// Block the prepare on B and C so that it times out
// Wait for the rollback command to be executed on B and C
// Unblock the prepare on B and C
// Check that there are no locked keys or remote transactions on B and C
StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("main", "main:start", "main:check");
sequencer.logicalThread("primary", "primary:block_prepare", "primary:after_rollback", "primary:resume_prepare",
"primary:after_prepare");
sequencer.logicalThread("backup", "backup:block_prepare", "backup:after_rollback", "backup:resume_prepare",
"backup:after_prepare");
sequencer.order("main:start", "primary:block_prepare", "primary:after_prepare", "main:check");
sequencer.order("main:start", "backup:block_prepare", "backup:after_prepare", "main:check");
advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
matchCommand(VersionedPrepareCommand.class).matchCount(0).build())
.before("primary:block_prepare", "primary:resume_prepare").after("primary:after_prepare");
advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
matchCommand(RollbackCommand.class).build())
.after("primary:after_rollback");
advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
matchCommand(VersionedPrepareCommand.class).matchCount(0).build())
.before("backup:block_prepare", "backup:resume_prepare").after("backup:after_prepare");
advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
matchCommand(RollbackCommand.class).build())
.after("backup:after_rollback");
assertEquals(Arrays.asList(address(1), address(2)),
cacheTopology(0).getDistribution(TEST_KEY).writeOwners());
sequencer.advance("main:start");
tm(0).begin();
cache(0).put(TEST_KEY, TX1_VALUE);
try {
tm(0).commit();
fail("Exception expected during commit");
} catch (Exception e) {
// expected
}
tm(0).begin();
cache(0).put(TEST_KEY, TX2_VALUE);
GlobalTransaction gtx1 = transactionTable(0).getLocalTransaction(tm(0).getTransaction()).getGlobalTransaction();
tm(0).commit();
// Wait for the 1st tx to be removed from the completed txs table
Thread.sleep(COMPLETED_TX_TIMEOUT + 1000);
assertTrue(transactionTable(1).isTransactionCompleted(gtx1));
assertTrue(transactionTable(2).isTransactionCompleted(gtx1));
sequencer.advance("main:check");
LockManager lockManager1 = TestingUtil.extractLockManager(cache(1));
assertFalse(lockManager1.isLocked(TEST_KEY));
assertFalse(transactionTable(1).containRemoteTx(gtx1));
assertFalse(transactionTable(2).containRemoteTx(gtx1));
for (Cache<?, ?> cache : caches()) {
assertEquals(TX2_VALUE, cache.get(TEST_KEY));
}
}
}
| 5,628
| 43.674603
| 118
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/PerCacheRebalancePolicyJmxTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.fwk.TestCacheManagerFactory.configureJmx;
import static org.testng.Assert.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Arrays;
import java.util.List;
import javax.management.Attribute;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.infinispan.Cache;
import org.infinispan.commons.jmx.MBeanServerLookup;
import org.infinispan.commons.jmx.TestMBeanServerLookup;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.topology.ClusterTopologyManager;
import org.testng.annotations.Test;
/**
* @author Dan Berindei
* @author Tristan Tarrant
*/
@Test(groups = "functional", testName = "statetransfer.PerCacheRebalancePolicyJmxTest")
@CleanupAfterMethod
@InCacheMode({CacheMode.DIST_SYNC})
public class PerCacheRebalancePolicyJmxTest extends MultipleCacheManagersTest {
private static final String REBALANCING_ENABLED = "rebalancingEnabled";
private final MBeanServerLookup mBeanServerLookup = TestMBeanServerLookup.create();
public void testJoinAndLeaveWithRebalanceSuspended() throws Exception {
doTest(false);
}
public void testJoinAndLeaveWithRebalanceSuspendedAwaitingInitialTransfer() throws Exception {
doTest(true);
}
@Override
protected void createCacheManagers() throws Throwable {
//no-op
}
private ConfigurationBuilder getConfigurationBuilder(boolean awaitInitialTransfer) {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(cacheMode)
.stateTransfer().awaitInitialTransfer(awaitInitialTransfer);
return cb;
}
private GlobalConfigurationBuilder getGlobalConfigurationBuilder(String rackId) {
int index = cacheManagers.size();
GlobalConfigurationBuilder gcb = GlobalConfigurationBuilder.defaultClusteredBuilder();
gcb.transport().rackId(rackId);
configureJmx(gcb, getClass().getSimpleName() + index, mBeanServerLookup);
return gcb;
}
private void addNode(GlobalConfigurationBuilder gcb, ConfigurationBuilder builder) {
EmbeddedCacheManager cacheManager = addClusterEnabledCacheManager(gcb, builder);
cacheManager.defineConfiguration("a", builder.build());
cacheManager.defineConfiguration("b", builder.build());
}
private void doTest(boolean awaitInitialTransfer) throws Exception {
ConfigurationBuilder builder = getConfigurationBuilder(awaitInitialTransfer);
addNode(getGlobalConfigurationBuilder("r1"), builder);
addNode(getGlobalConfigurationBuilder("r1"), builder);
waitForClusterToForm("a", "b");
MBeanServer mBeanServer = mBeanServerLookup.getMBeanServer();
String domain0 = manager(1).getCacheManagerConfiguration().jmx().domain();
ObjectName ltmName0 = TestingUtil.getCacheManagerObjectName(domain0, "DefaultCacheManager", "LocalTopologyManager");
String domain1 = manager(1).getCacheManagerConfiguration().jmx().domain();
ObjectName ltmName1 = TestingUtil.getCacheManagerObjectName(domain1, "DefaultCacheManager", "LocalTopologyManager");
ObjectName jmxCacheA = TestingUtil.getCacheObjectName(domain0, "a(" + cacheMode.name().toLowerCase() + ")");
ObjectName jmxCacheB = TestingUtil.getCacheObjectName(domain0, "b(" + cacheMode.name().toLowerCase() + ")");
// Check initial state
DistributionManager dm0a = advancedCache(0, "a").getDistributionManager();
assertEquals(Arrays.asList(address(0), address(1)), dm0a.getCacheTopology().getCurrentCH().getMembers());
assertNull(dm0a.getCacheTopology().getPendingCH());
DistributionManager dm0b = advancedCache(0, "b").getDistributionManager();
assertEquals(Arrays.asList(address(0), address(1)), dm0b.getCacheTopology().getCurrentCH().getMembers());
assertNull(dm0b.getCacheTopology().getPendingCH());
assertTrue(mBeanServer.isRegistered(ltmName0));
assertTrue((Boolean) mBeanServer.getAttribute(ltmName0, REBALANCING_ENABLED));
// Suspend global rebalancing
mBeanServer.setAttribute(ltmName0, new Attribute(REBALANCING_ENABLED, false));
assertFalse((Boolean) mBeanServer.getAttribute(ltmName0, REBALANCING_ENABLED));
// Add 2 nodes
log.debugf("Starting 2 new nodes");
addNode(getGlobalConfigurationBuilder("r2"), builder);
addNode(getGlobalConfigurationBuilder("r2"), builder);
// Ensure the caches are started on all nodes
TestingUtil.blockUntilViewsReceived(3000, getCaches("a"));
TestingUtil.blockUntilViewsReceived(3000, getCaches("b"));
// Check that rebalance is suspended on the new nodes
ClusterTopologyManager ctm2 = TestingUtil.extractGlobalComponent(manager(2), ClusterTopologyManager.class);
assertFalse(ctm2.isRebalancingEnabled());
ClusterTopologyManager ctm3 = TestingUtil.extractGlobalComponent(manager(3), ClusterTopologyManager.class);
assertFalse(ctm3.isRebalancingEnabled());
// Check that no rebalance happened after 1 second
Thread.sleep(1000);
assertFalse((Boolean) mBeanServer.getAttribute(ltmName1, REBALANCING_ENABLED));
assertNull(dm0a.getCacheTopology().getPendingCH());
assertEquals(Arrays.asList(address(0), address(1)), dm0a.getCacheTopology().getCurrentCH().getMembers());
// Disable rebalancing for cache b
mBeanServer.setAttribute(jmxCacheB, new Attribute(REBALANCING_ENABLED, false));
// Re-enable global rebalancing
log.debugf("Rebalancing with nodes %s %s %s %s", address(0), address(1), address(2), address(3));
mBeanServer.setAttribute(ltmName0, new Attribute(REBALANCING_ENABLED, true));
assertTrue((Boolean) mBeanServer.getAttribute(ltmName0, REBALANCING_ENABLED));
checkRehashed(dm0a, getCaches("a"), Arrays.asList(address(0), address(1), address(2), address(3)));
// Check that cache "b" still has rebalancing disabled
assertFalse((Boolean)mBeanServer.getAttribute(jmxCacheB, REBALANCING_ENABLED));
assertEquals(Arrays.asList(address(0), address(1)), dm0b.getCacheTopology().getCurrentCH().getMembers());
// Enable rebalancing for cache b
mBeanServer.setAttribute(jmxCacheB, new Attribute(REBALANCING_ENABLED, true));
// Check that cache "b" now has 4 nodes, and the CH is balanced
checkRehashed(dm0b, getCaches("b"), Arrays.asList(address(0), address(1), address(2), address(3)));
}
private void checkRehashed(DistributionManager dm, List<Cache<Object,Object>> caches, List<Address> addresses) {
TestingUtil.waitForNoRebalance(caches);
assertNull(dm.getCacheTopology().getPendingCH());
LocalizedCacheTopology topology = dm.getCacheTopology();
ConsistentHash ch = topology.getCurrentCH();
assertEquals(addresses, ch.getMembers());
int numOwners = Math.min(caches.get(0).getCacheConfiguration().clustering().hash().numOwners(), ch.getMembers().size());
for (int i = 0; i < ch.getNumSegments(); i++) {
assertEquals(numOwners, ch.locateOwnersForSegment(i).size());
}
}
}
| 7,826
| 46.436364
| 126
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/LeaveDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.util.BlockingLocalTopologyManager.confirmTopologyUpdate;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.BlockingLocalTopologyManager;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "statetransfer.LeaveDuringStateTransferTest", description = "One instance of ISPN-5021")
public class LeaveDuringStateTransferTest extends MultipleCacheManagersTest {
private ControlledConsistentHashFactory.Default factory = new ControlledConsistentHashFactory.Default(0, 1);
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder cb = configuration();
createClusteredCaches(3, cb, new TransportFlags().withFD(true));
}
private ConfigurationBuilder configuration() {
ConfigurationBuilder cb = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
cb.clustering().hash().numSegments(1).consistentHashFactory(factory);
return cb;
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
public void test() throws Exception {
int startTopologyId = currentTopologyId(cache(0));
BlockingLocalTopologyManager localTopologyManager0 =
BlockingLocalTopologyManager.replaceTopologyManagerDefaultCache(cacheManagers.get(0));
BlockingLocalTopologyManager localTopologyManager2 =
BlockingLocalTopologyManager.replaceTopologyManagerDefaultCache(cacheManagers.get(2));
try {
factory.setOwnerIndexes(1, 2);
addClusterEnabledCacheManager(configuration(), new TransportFlags().withFD(true));
Future<Cache<Object, Object>> joiner = fork(() -> cacheManagers.get(3).getCache());
// Install READ_OLD, READ_ALL and READ_NEW topologies, but do not confirm READ_NEW (+3)
confirmTopologyUpdate(CacheTopology.Phase.READ_OLD_WRITE_ALL, localTopologyManager0, localTopologyManager2);
confirmTopologyUpdate(CacheTopology.Phase.READ_ALL_WRITE_ALL, localTopologyManager0, localTopologyManager2);
localTopologyManager0.expectTopologyUpdate(CacheTopology.Phase.READ_NEW_WRITE_ALL).unblock();
localTopologyManager2.expectTopologyUpdate(CacheTopology.Phase.READ_NEW_WRITE_ALL).unblock();
BlockingLocalTopologyManager.BlockedConfirmation blockedConfirmation0 =
localTopologyManager0.expectPhaseConfirmation();
BlockingLocalTopologyManager.BlockedConfirmation blockedConfirmation2 =
localTopologyManager2.expectPhaseConfirmation();
log.debug("State transfer almost complete");
eventually(() -> currentTopologyId(cache(2)) == startTopologyId + 3);
// Block rebalance that could follow even if the previous rebalance was not completed
log.debug("Isolating node " + cacheManagers.get(1));
TestingUtil.getDiscardForCache(manager(1)).discardAll(true);
TestingUtil.blockUntilViewsReceived(60000, true, cacheManagers);
log.debug("Waiting for topology update from view change");
// The coordinator sends a READ_NEW topology (+4), but doesn't wait for the confirmation
// before restarting the rebalance with a READ_OLD topology update (+5).
// Since the messages are not ordered, either one can be processed first.
BlockingLocalTopologyManager.BlockedTopology blockedTopology0 = localTopologyManager0.expectTopologyUpdate();
BlockingLocalTopologyManager.BlockedTopology blockedTopology2 = localTopologyManager2.expectTopologyUpdate();
// The LimitedExecutor doesn't allow the new topology to be installed until the old confirmations are unblocked
blockedConfirmation0.unblock();
blockedConfirmation2.unblock();
// Unblock the READ_NEW topology (+4) and keep the READ_OLD one (+5) blocked
blockedTopology0 = blockNewRebalance(localTopologyManager0, blockedTopology0);
blockedTopology2 = blockNewRebalance(localTopologyManager2, blockedTopology2);
// since we blocked confirmation for topology +3, the new topology will be +4
eventually(() -> currentTopologyId(cache(0)) == startTopologyId + 4);
cache(0).put("key", "value");
assertEquals("value", cache(2).get("key"));
// Unblock and confirm READ_OLD topology (+5)
blockedTopology0.unblock();
blockedTopology2.unblock();
localTopologyManager0.expectPhaseConfirmation().unblock();
localTopologyManager2.expectPhaseConfirmation().unblock();
// Finish the rebalance
confirmTopologyUpdate(CacheTopology.Phase.READ_ALL_WRITE_ALL, localTopologyManager0, localTopologyManager2);
confirmTopologyUpdate(CacheTopology.Phase.READ_NEW_WRITE_ALL, localTopologyManager0, localTopologyManager2);
confirmTopologyUpdate(CacheTopology.Phase.NO_REBALANCE, localTopologyManager0, localTopologyManager2);
joiner.get(10, TimeUnit.SECONDS);
} finally {
localTopologyManager2.stopBlocking();
localTopologyManager0.stopBlocking();
}
}
private BlockingLocalTopologyManager.BlockedTopology
blockNewRebalance(BlockingLocalTopologyManager ltm, BlockingLocalTopologyManager.BlockedTopology blockedTopology)
throws InterruptedException {
if (blockedTopology.getCacheTopology().getPhase() == CacheTopology.Phase.READ_NEW_WRITE_ALL) {
// Block the rebalance start first, and only then unblock the previous topology
// Otherwise the order of the rebalance start and topology confirmation wouldn't be deterministic
BlockingLocalTopologyManager.BlockedTopology newTopology =
ltm.expectTopologyUpdate(CacheTopology.Phase.READ_OLD_WRITE_ALL);
blockedTopology.unblock();
ltm.expectPhaseConfirmation().unblock();
return newTopology;
} else {
assertEquals(CacheTopology.Phase.READ_OLD_WRITE_ALL, blockedTopology.getCacheTopology().getPhase());
ltm.confirmTopologyUpdate(CacheTopology.Phase.READ_NEW_WRITE_ALL);
return blockedTopology;
}
}
private int currentTopologyId(Cache cache) {
return TestingUtil.extractComponent(cache, DistributionManager.class).getCacheTopology().getTopologyId();
}
}
| 7,013
| 51.736842
| 122
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ReadAfterLostDataTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.TestingUtil.crashCacheManagers;
import static org.infinispan.test.TestingUtil.installNewView;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commands.topology.TopologyUpdateCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.functional.EntryView.ReadEntryView;
import org.infinispan.functional.EntryView.ReadWriteEntryView;
import org.infinispan.functional.FunctionalMap.ReadOnlyMap;
import org.infinispan.functional.FunctionalMap.ReadWriteMap;
import org.infinispan.functional.impl.FunctionalMapImpl;
import org.infinispan.functional.impl.ReadOnlyMapImpl;
import org.infinispan.functional.impl.ReadWriteMapImpl;
import org.infinispan.marshall.core.MarshallableFunctions;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.topology.HeartBeatCommand;
import org.infinispan.util.ControlledTransport;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Tests operations executed when the actual owners are lost, with partition handling disabled (AP mode).
*/
@Test(groups = "functional", testName = "statetransfer.ReadAfterLostDataTest")
@InCacheMode(CacheMode.DIST_SYNC)
@CleanupAfterMethod
public class ReadAfterLostDataTest extends MultipleCacheManagersTest {
private final List<Runnable> cleanup = new ArrayList<>();
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(cacheMode)
.partitionHandling().whenSplit(PartitionHandling.ALLOW_READ_WRITES);
createClusteredCaches(4, TestDataSCI.INSTANCE, cb, new TransportFlags().withFD(true).withMerge(true));
}
@AfterMethod
protected void cleanup() {
cleanup.forEach(Runnable::run);
cleanup.clear();
}
public void testGet() throws Exception {
test(ReadAfterLostDataTest::get, false, false);
}
public void testGetBeforeTopologyUpdate() throws Exception {
test(ReadAfterLostDataTest::get, false, true);
}
public void testGetAll() throws Exception {
test(ReadAfterLostDataTest::getAll, false, false);
}
public void testGetAllBeforeTopologyUpdate() throws Exception {
test(ReadAfterLostDataTest::getAll, false, true);
}
public void testPut() throws Exception {
test(ReadAfterLostDataTest::put, true, false);
}
public void testRemove() throws Exception {
test(ReadAfterLostDataTest::remove, true, false);
}
public void testReplace() throws Exception {
test(ReadAfterLostDataTest::replace, true, false);
}
//TODO: We don't test put/remove/replace/read-write before topology update as with triangle these commands
// invoke RpcManager.sendTo that does not throw an exception when the target is not in view anymore.
// These commands rely on next topology (that is blocked) causing the OutdatedTopologyException.
// We'd need to execute them in parallel, wait until all of them use RpcManager.sendTo and then unblock
// the topology change. That's rather too much tied to the actual implementation.
public void testPutMap() throws Exception {
test(ReadAfterLostDataTest::putMap, true, false);
}
public void testPutMapBeforeTopologyUpdate() throws Exception {
test(ReadAfterLostDataTest::putMap, true, true);
}
public void testRead() throws Exception {
test(ReadAfterLostDataTest::read, false, false);
}
public void testReadBeforeTopologyUpdate() throws Exception {
test(ReadAfterLostDataTest::read, false, true);
}
public void testReadMany() throws Exception {
test(ReadAfterLostDataTest::readMany, false, false);
}
public void testReadManyBeforeTopologyUpdate() throws Exception {
test(ReadAfterLostDataTest::readMany, false, true);
}
public void testReadWrite() throws Exception {
test(ReadAfterLostDataTest::readWrite, false, false);
}
public void testReadWriteMany() throws Exception {
test(ReadAfterLostDataTest::readWriteMany, false, false);
}
public void testReadWriteManyBeforeTopologyUpdate() throws Exception {
test(ReadAfterLostDataTest::readWriteMany, true, true);
}
protected void test(BiFunction<Cache<Object, Object>, Collection<?>, Map<?, ?>> operation, boolean write, boolean blockUpdates) throws Exception {
List<Object> keys = new ArrayList<>();
keys.add(getKeyForCache(cache(0), cache(1))); // both owners in p0
keys.add(getKeyForCache(cache(0), cache(2))); // primary in p0
keys.add(getKeyForCache(cache(2), cache(1))); // backup in p0
keys.add(getKeyForCache(cache(2), cache(3))); // nothing in p0
for (int i = 0; i < keys.size(); ++i) {
cache(0).put(keys.get(i), "value" + i);
}
AdvancedCache<Object, Object> coordCache = advancedCache(0);
assertTrue(coordCache.getCacheManager().isCoordinator());
ClusterTopologyManager clusterTopologyManager = TestingUtil.extractComponent(coordCache, ClusterTopologyManager.class);
clusterTopologyManager.setRebalancingEnabled(false);
if (blockUpdates) {
ControlledTransport controlledTransport = ControlledTransport.replace(coordCache);
controlledTransport.excludeCacheCommands();
controlledTransport.excludeCommands(HeartBeatCommand.class);
cleanup.add(controlledTransport::stopBlocking);
InternalCacheRegistry icr = coordCache.getCacheManager().getGlobalComponentRegistry().getComponent(InternalCacheRegistry.class);
int cacheCount = icr.getInternalCacheNames().size() + 1; // include default
List<CompletableFuture<ControlledTransport.BlockedRequest<TopologyUpdateCommand>>> topologyUpdateRequests = new ArrayList<>();
// Block the sending of the TopologyUpdateCommand until a command asks for the transaction data future
for(int i = 0; i < cacheCount; i++) {
topologyUpdateRequests.add(controlledTransport.expectCommandAsync(TopologyUpdateCommand.class));
}
CompletableFuture<Void> firstTransactionDataRequest = new CompletableFuture<>();
for (Cache<?, ?> c : Arrays.asList(cache(0), cache(1))) {
int currentTopology = c.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
// Because all responses are CacheNotFoundResponses, retries will block to wait for a new topology
// Even reads block, because in general the values might have been copied to the write-only owners
TestingUtil.wrapComponent(c, StateTransferLock.class,
stl -> new UnblockingStateTransferLock(stl, currentTopology + 1, firstTransactionDataRequest));
}
firstTransactionDataRequest.thenAccept(__ -> {
for(CompletableFuture<ControlledTransport.BlockedRequest<TopologyUpdateCommand>> request : topologyUpdateRequests) {
request.thenAccept(ControlledTransport.BlockedRequest::send);
}
});
}
crashCacheManagers(manager(2), manager(3));
installNewView(manager(0), manager(1));
invokeOperation(cache(0), operation, keys);
// Don't do the second check if first operation modified the data
if (!write) {
invokeOperation(cache(1), operation, keys);
}
}
private void invokeOperation(Cache<Object, Object> cache, BiFunction<Cache<Object, Object>, Collection<?>, Map<?, ?>> operation, List<Object> keys) {
Map<?, ?> result = operation.apply(cache, keys);
assertEquals("value0", result.get(keys.get(0)));
assertEquals("value1", result.get(keys.get(1)));
assertEquals("value2", result.get(keys.get(2)));
assertEquals(null, result.get(keys.get(3)));
assertEquals(result.toString(), 3, result.size());
}
private static Map<?, ?> get(Cache<Object, Object> cache, Collection<?> keys) {
Map<Object, Object> map = new HashMap<>();
for (Object key : keys) {
Object value = cache.get(key);
if (value != null) {
map.put(key, value);
}
}
return map;
}
private static Map<?, ?> getAll(Cache<Object, Object> cache, Collection<?> keys) {
return cache.getAdvancedCache().getAll(new HashSet<>(keys));
}
private static Map<?, ?> put(Cache<Object, Object> cache, Collection<?> keys) {
Map<Object, Object> map = new HashMap<>();
int i = 0;
for (Object key : keys) {
Object value = cache.put(key, "other" + (i++));
if (value != null) {
map.put(key, value);
}
}
return map;
}
private static Map<?, ?> putMap(Cache<Object, Object> cache, Collection<?> keys) {
Map<Object, Object> writeMap = new HashMap<>();
int i = 0;
for (Object key : keys) {
writeMap.put(key, "other" + (i++));
}
return cache.getAdvancedCache().getAndPutAll(writeMap);
}
private static Map<?, ?> remove(Cache<Object, Object> cache, Collection<?> keys) {
Map<Object, Object> map = new HashMap<>();
for (Object key : keys) {
Object value = cache.remove(key);
if (value != null) {
map.put(key, value);
}
}
return map;
}
private static Map<?, ?> replace(Cache<Object, Object> cache, Collection<?> keys) {
Map<Object, Object> map = new HashMap<>();
int i = 0;
for (Object key : keys) {
Object value = cache.replace(key, "other" + (i++));
if (value != null) {
map.put(key, value);
}
}
return map;
}
private static Map<?, ?> read(Cache<Object, Object> cache, Collection<?> keys) {
ReadOnlyMap<Object, Object> ro = ReadOnlyMapImpl.create(FunctionalMapImpl.create(cache.getAdvancedCache()));
Map<Object, Object> map = new HashMap<>();
for (Object key : keys) {
ro.eval(key, MarshallableFunctions.identity()).join().find().ifPresent(value -> map.put(key, value));
}
return map;
}
private static Map<?, ?> readMany(Cache<Object, Object> cache, Collection<?> keys) {
ReadOnlyMap<Object, Object> ro = ReadOnlyMapImpl.create(FunctionalMapImpl.create(cache.getAdvancedCache()));
return ro.evalMany(new HashSet<>(keys), MarshallableFunctions.identity())
.filter(view -> view.find().isPresent())
.collect(Collectors.toMap(ReadEntryView::key, ReadEntryView::get));
}
private static Map<?, ?> readWrite(Cache<Object, Object> cache, Collection<?> keys) {
ReadWriteMap<Object, Object> rw = ReadWriteMapImpl.create(FunctionalMapImpl.create(cache.getAdvancedCache()));
Map<Object, Object> map = new HashMap<>();
for (Object key : keys) {
rw.eval(key, MarshallableFunctions.identity()).join().find().ifPresent(value -> map.put(key, value));
}
return map;
}
private static Map<?, ?> readWriteMany(Cache<Object, Object> cache, Collection<?> keys) {
ReadWriteMap<Object, Object> ro = ReadWriteMapImpl.create(FunctionalMapImpl.create(cache.getAdvancedCache()));
return ro.evalMany(new HashSet<>(keys), MarshallableFunctions.identity())
.filter(view -> view.find().isPresent())
.collect(Collectors.toMap(ReadWriteEntryView::key, ReadWriteEntryView::get));
}
private static class UnblockingStateTransferLock extends DelegatingStateTransferLock {
private final int topologyId;
private final CompletableFuture<Void> transactionDataRequestFuture;
public UnblockingStateTransferLock(StateTransferLock delegate, int topologyId,
CompletableFuture<Void> transactionDataRequestFuture) {
super(delegate);
this.topologyId = topologyId;
this.transactionDataRequestFuture = transactionDataRequestFuture;
}
@Override
public CompletionStage<Void> transactionDataFuture(int expectedTopologyId) {
if (expectedTopologyId >= topologyId) {
log.tracef("Completing future for transaction data request with topology %d", expectedTopologyId);
transactionDataRequestFuture.complete(null);
}
return super.transactionDataFuture(expectedTopologyId);
}
}
}
| 13,263
| 40.842271
| 152
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ReplCommandRetryTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.findInterceptor;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.BaseCustomAsyncInterceptor;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.interceptors.locking.PessimisticLockingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.topology.CacheTopology;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.ReplicatedControlledConsistentHashFactory;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* Test that commands are properly retried during/after state transfer.
*
* @author Dan Berindei
* @since 7.2
*/
@Test(groups = "functional", testName = "statetransfer.ReplCommandRetryTest")
@CleanupAfterMethod
public class ReplCommandRetryTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() {
// do nothing, each test will create its own cache managers
}
private ConfigurationBuilder buildConfig(LockingMode lockingMode, Class<?> commandToBlock, boolean isOriginator) {
ConfigurationBuilder configurationBuilder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, lockingMode != null);
configurationBuilder.transaction().lockingMode(lockingMode);
// The coordinator will always be the primary owner
configurationBuilder.clustering().hash().numSegments(1)
.consistentHashFactory(new ReplicatedControlledConsistentHashFactory(0));
configurationBuilder.clustering().remoteTimeout(15000);
configurationBuilder.clustering().stateTransfer().fetchInMemoryState(true);
if (commandToBlock == LockControlCommand.class && !isOriginator) {
configurationBuilder.customInterceptors().addInterceptor()
.before(PessimisticLockingInterceptor.class).interceptor(new DelayInterceptor(commandToBlock));
} else {
configurationBuilder.customInterceptors().addInterceptor()
.after(EntryWrappingInterceptor.class).interceptor(new DelayInterceptor(commandToBlock));
}
configurationBuilder.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
return configurationBuilder;
}
public void testRetryAfterJoinNonTransactional() throws Exception {
EmbeddedCacheManager cm1 = addClusterEnabledCacheManager(buildConfig(null, PutKeyValueCommand.class, true));
final Cache<Object, Object> c1 = cm1.getCache();
DelayInterceptor di1 = findInterceptor(c1, DelayInterceptor.class);
int initialTopologyId = c1.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
EmbeddedCacheManager cm2 = addClusterEnabledCacheManager(buildConfig(null, PutKeyValueCommand.class, false));
final Cache<Object, Object> c2 = cm2.getCache();
DelayInterceptor di2 = findInterceptor(c2, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 4, c1, c2);
Future<Object> f = fork(() -> {
log.tracef("Initiating a put command on %s", c1);
c1.put("k", "v");
return null;
});
// The command is replicated to c2, and blocks in the DelayInterceptor on c2
di2.waitUntilBlocked(1);
// c3 joins, topology id changes
EmbeddedCacheManager cm3 = addClusterEnabledCacheManager(buildConfig(null, PutKeyValueCommand.class, false));
Cache<Object, Object> c3 = cm3.getCache();
DelayInterceptor di3 = findInterceptor(c3, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 8, c1, c2, c3);
// Unblock the replicated command on c2.
log.tracef("Triggering retry 1");
di2.unblock(1);
// c2 will return UnsureResponse, and c1 will retry the command.
// c1 will send the command to c2 and c3, blocking on both in the DelayInterceptor
di2.waitUntilBlocked(2);
di3.waitUntilBlocked(1);
// Unblock the command with the new topology id on c2
di2.unblock(2);
// c4 joins, topology id changes
EmbeddedCacheManager cm4 = addClusterEnabledCacheManager(buildConfig(null, PutKeyValueCommand.class, false));
Cache<Object, Object> c4 = cm4.getCache();
DelayInterceptor di4 = findInterceptor(c4, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 12, c1, c2, c3, c4);
// Unblock the command with the new topology id on c3.
log.tracef("Triggering retry 2");
di3.unblock(1);
// c3 will send an UnsureResponse, and c1 will retry the command.
// c1 will send the command to c2, c3, and c4, blocking everywhere in the DelayInterceptor
// Unblock every node except c1
di2.unblock(3);
di3.unblock(2);
di4.unblock(1);
// Now c1 blocks
di1.unblock(1);
log.tracef("Waiting for the put command to finish on %s", c1);
Object retval = f.get(10, TimeUnit.SECONDS);
log.tracef("Put command finished on %s", c1);
assertNull(retval);
// 1 for the last retry
assertEquals(1, di1.getCounter());
// 1 for the initial invocation + 1 for each retry
assertEquals(3, di2.getCounter());
// 1 for each retry
assertEquals(2, di3.getCounter());
// just the last retry
assertEquals(1, di4.getCounter());
}
public void testRetryAfterJoinLockControlCommand() throws Exception {
testRetryAfterJoinTransactional(LockingMode.PESSIMISTIC, LockControlCommand.class);
}
public void testRetryAfterJoinOnePhasePrepareCommand() throws Exception {
testRetryAfterJoinTransactional(LockingMode.PESSIMISTIC, PrepareCommand.class);
}
public void testRetryAfterJoinTwoPhasePrepareCommand() throws Exception {
testRetryAfterJoinTransactional(LockingMode.OPTIMISTIC, PrepareCommand.class);
}
public void testRetryAfterJoinCommitCommand() throws Exception {
testRetryAfterJoinTransactional(LockingMode.OPTIMISTIC, CommitCommand.class);
}
private void testRetryAfterJoinTransactional(LockingMode lockingMode, Class<?> commandClass) throws Exception {
EmbeddedCacheManager cm1 = addClusterEnabledCacheManager(buildConfig(lockingMode, commandClass, false));
final Cache<Object, Object> c1 = cm1.getCache();
DelayInterceptor di1 = findInterceptor(c1, DelayInterceptor.class);
int initialTopologyId = c1.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
EmbeddedCacheManager cm2 = addClusterEnabledCacheManager(buildConfig(lockingMode, commandClass, true));
final Cache<String, String> c2 = cm2.getCache();
DelayInterceptor di2 = findInterceptor(c2, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 4, c1, c2);
Future<Object> f = fork(() -> {
// The LockControlCommand wouldn't be replicated if we initiated the transaction on the primary owner (c1)
log.tracef("Initiating a transaction on backup owner %s", c2);
c2.put("k", "v");
return null;
});
// The prepare command is replicated to cache c1, and it blocks in the DelayInterceptor
di1.waitUntilBlocked(1);
// c3 joins, topology id changes
EmbeddedCacheManager cm3 = addClusterEnabledCacheManager(buildConfig(lockingMode, commandClass, false));
Cache c3 = cm3.getCache();
DelayInterceptor di3 = findInterceptor(c3, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 8, c1, c2, c3);
// Unblock the replicated command on c1.
// c1 will return an UnsureResponse, and c2 will retry (1)
log.tracef("Triggering retry 1 from node %s", c1);
di1.unblock(1);
// The prepare command will again block on c1 and c3
di1.waitUntilBlocked(2);
di3.waitUntilBlocked(1);
// c4 joins, topology id changes
EmbeddedCacheManager cm4 = addClusterEnabledCacheManager(buildConfig(lockingMode, commandClass, false));
Cache c4 = cm4.getCache();
DelayInterceptor di4 = findInterceptor(c4, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 12, c1, c2, c3, c4);
// Unblock the replicated command on c1
di1.unblock(2);
// Unblock the replicated command on c3, c2 will retry (2)
log.tracef("Triggering retry 2 from %s", c3);
di3.unblock(1);
// Check that the c1, c3, and c4 all received the retried command
di1.unblock(3);
di3.unblock(2);
di4.unblock(1);
// Allow the command to finish on the originator (c2).
log.tracef("Finishing tx on %s", c2);
di2.unblock(1);
log.tracef("Waiting for the transaction to finish on %s", c2);
f.get(10, TimeUnit.SECONDS);
log.tracef("Transaction finished on %s", c2);
// 1 for the initial call + 1 for each retry (2)
assertEquals(di1.getCounter(), 3);
// 1 for the last retry
assertEquals(di2.getCounter(), 1);
// 1 for each retry
assertEquals(di3.getCounter(), 2);
// 1 for the last retry
assertEquals(di4.getCounter(), 1);
}
private void waitForStateTransfer(int expectedTopologyId, Cache... caches) {
waitForNoRebalance(caches);
for (Cache c : caches) {
CacheTopology cacheTopology = c.getAdvancedCache().getDistributionManager().getCacheTopology();
assertEquals(String.format("Wrong topology on cache %s, expected %d and got %s", c, expectedTopologyId, cacheTopology),
expectedTopologyId, cacheTopology.getTopologyId());
}
}
class DelayInterceptor extends BaseCustomAsyncInterceptor {
private final AtomicInteger counter = new AtomicInteger(0);
private final CheckPoint checkPoint = new CheckPoint();
private final Class<?> commandToBlock;
public DelayInterceptor(Class<?> commandToBlock) {
this.commandToBlock = commandToBlock;
}
public int getCounter() {
return counter.get();
}
public void waitUntilBlocked(int count) throws TimeoutException, InterruptedException {
String event = checkPoint.peek(5, SECONDS, "blocked_" + count + "_on_" + cache);
assertEquals("blocked_" + count + "_on_" + cache, event);
}
public void unblock(int count) throws InterruptedException, TimeoutException, BrokenBarrierException {
log.tracef("Unblocking command on cache %s", cache);
checkPoint.awaitStrict("blocked_" + count + "_on_" + cache, 5, SECONDS);
checkPoint.trigger("resume_" + count + "_on_" + cache);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.isInTxScope() && !command.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
private void doBlock(InvocationContext ctx, ReplicableCommand command) throws InterruptedException,
TimeoutException {
if (commandToBlock != command.getClass())
return;
log.tracef("Delaying command %s originating from %s", command, ctx.getOrigin());
Integer myCount = counter.incrementAndGet();
checkPoint.trigger("blocked_" + myCount + "_on_" + cache);
checkPoint.awaitStrict("resume_" + myCount + "_on_" + cache, 15, SECONDS);
log.tracef("Command unblocked: %s", command);
}
@Override
public String toString() {
return "DelayInterceptor{counter=" + counter + "}";
}
}
}
| 13,807
| 41.881988
| 128
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StaleTxWithCommitDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.fail;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.MagicKey;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.impl.TransactionTable;
import org.testng.annotations.Test;
@Test(testName = "lock.StaleTxWithCommitDuringStateTransferTest", groups = "functional")
@CleanupAfterMethod
public class StaleTxWithCommitDuringStateTransferTest extends MultipleCacheManagersTest {
public static final String CACHE_NAME = "testCache";
@Override
protected void createCacheManagers() throws Throwable {
createCluster(TestDataSCI.INSTANCE, new ConfigurationBuilder(), 2);
waitForClusterToForm();
}
public void testCommit() throws Throwable {
doTest(true);
}
public void testRollback() throws Throwable {
doTest(false);
}
private void doTest(final boolean commit) throws Throwable {
ConfigurationBuilder cfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
cfg.clustering().cacheMode(CacheMode.DIST_SYNC)
.stateTransfer().awaitInitialTransfer(false)
.transaction().lockingMode(LockingMode.PESSIMISTIC);
manager(0).defineConfiguration(CACHE_NAME, cfg.build());
manager(1).defineConfiguration(CACHE_NAME, cfg.build());
final CheckPoint checkpoint = new CheckPoint();
final AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
final TransactionManager tm0 = cache0.getTransactionManager();
// Block state request commands on cache 0
StateProvider stateProvider = TestingUtil.extractComponent(cache0, StateProvider.class);
StateProvider spyProvider = spy(stateProvider);
doAnswer(invocation -> {
Object[] arguments = invocation.getArguments();
Address source = (Address) arguments[0];
int topologyId = (Integer) arguments[1];
CompletionStage<?> result = (CompletionStage<?>) invocation.callRealMethod();
return result.thenApply(transactions -> {
try {
checkpoint.trigger("post_get_transactions_" + topologyId + "_from_" + source);
checkpoint.awaitStrict("resume_get_transactions_" + topologyId + "_from_" + source, 10, SECONDS);
return transactions;
} catch (InterruptedException | TimeoutException e) {
throw new TestException(e);
}
});
}).when(spyProvider).getTransactionsForSegments(any(Address.class), anyInt(), any());
TestingUtil.replaceComponent(cache0, StateProvider.class, spyProvider, true);
// Start a transaction on cache 0, which will block on cache 1
MagicKey key = new MagicKey("testkey", cache0);
tm0.begin();
cache0.put(key, "v0");
final Transaction tx = tm0.suspend();
// Start cache 1, but the tx data request will be blocked on cache 0
DistributionManager dm0 = cache0.getDistributionManager();
int initialTopologyId = dm0.getCacheTopology().getTopologyId();
int rebalanceTopologyId = initialTopologyId + 1;
AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);
checkpoint.awaitStrict("post_get_transactions_" + rebalanceTopologyId + "_from_" + address(1), 10, SECONDS);
// The commit/rollback command should be invoked on cache 1 and it should block until the tx is created there
Future<Object> future = fork(() -> {
tm0.resume(tx);
if (commit) {
tm0.commit();
} else {
tm0.rollback();
}
return null;
});
// Check that the rollback command is blocked on cache 1
try {
future.get(1, SECONDS);
fail("Commit/Rollback command should have been blocked");
} catch (TimeoutException e) {
// expected;
}
// Let cache 1 receive the tx from cache 0.
checkpoint.trigger("resume_get_transactions_" + rebalanceTopologyId + "_from_" + address(1));
TestingUtil.waitForNoRebalance(caches(CACHE_NAME));
// Wait for the tx finish
future.get(10, SECONDS);
// Check the key on all caches
if (commit) {
assertEquals("v0", TestingUtil.extractComponent(cache0, InternalDataContainer.class).get(key).getValue());
assertEquals("v0", TestingUtil.extractComponent(cache1, InternalDataContainer.class).get(key).getValue());
} else {
assertNull(TestingUtil.extractComponent(cache0, InternalDataContainer.class).get(key));
assertNull(TestingUtil.extractComponent(cache1, InternalDataContainer.class).get(key));
}
// Check for stale locks
final TransactionTable tt0 = TestingUtil.extractComponent(cache0, TransactionTable.class);
final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
eventually(() -> tt0.getLocalTxCount() == 0 && tt1.getRemoteTxCount() == 0);
}
}
| 6,149
| 42.309859
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/JoinInNewThreadTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import java.util.concurrent.Future;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.commons.test.TestResourceTracker;
import org.testng.annotations.Test;
/**
* Test that a node started in a different thread can join the cluster.
*
* @author Dan Berindei
* @since 10.0
*/
@Test(testName = "statetransfer.JoinInNewThreadTest", groups = "functional")
@CleanupAfterMethod
public class JoinInNewThreadTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
// Do nothing here
}
public void testJoinInNewThread() throws Exception {
TestResourceTracker.setThreadTestName(JoinInNewThreadTest.class.getName());
ConfigurationBuilder replCfg = new ConfigurationBuilder();
replCfg.clustering().cacheMode(CacheMode.REPL_SYNC).stateTransfer().timeout(30, SECONDS);
// Connect 2 channels
addClusterEnabledCacheManager(replCfg);
addClusterEnabledCacheManager(replCfg);
waitForClusterToForm();
Future<Void> future = fork(() -> {
TestResourceTracker.testThreadStarted(this.getTestName());
addClusterEnabledCacheManager(replCfg);
waitForClusterToForm();
});
future.get(30, SECONDS);
}
}
| 1,527
| 31.510638
| 95
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferCacheLoaderFunctionalTest.java
|
package org.infinispan.statetransfer;
import static org.testng.Assert.assertEquals;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.data.DelayedMarshallingPojo;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "statetransfer.StateTransferCacheLoaderFunctionalTest")
public class StateTransferCacheLoaderFunctionalTest extends StateTransferFunctionalTest {
int id;
ThreadLocal<Boolean> sharedCacheLoader = new ThreadLocal<Boolean>() {
protected Boolean initialValue() {
return false;
}
};
public StateTransferCacheLoaderFunctionalTest() {
super("nbst-with-loader");
}
@Override
protected EmbeddedCacheManager createCacheManager(String cacheName) {
configurationBuilder.persistence().clearStores();
// increment the DIMCS store id
DummyInMemoryStoreConfigurationBuilder dimcs = new DummyInMemoryStoreConfigurationBuilder(configurationBuilder.persistence());
dimcs.storeName("store number " + id++);
dimcs.shared(sharedCacheLoader.get()).preload(true);
configurationBuilder.persistence().addStore(dimcs);
configurationBuilder.persistence();
return super.createCacheManager(cacheName);
}
@Override
protected void writeInitialData(final Cache<Object, Object> c) {
super.writeInitialData(c);
c.evict(A_B_NAME);
c.evict(A_B_AGE);
c.evict(A_C_NAME);
c.evict(A_C_AGE);
c.evict(A_D_NAME);
c.evict(A_D_AGE);
}
protected void verifyInitialDataOnLoader(Cache<Object, Object> c) throws Exception {
DummyInMemoryStore l = TestingUtil.getFirstStore(c);
assert l.contains(A_B_AGE);
assert l.contains(A_B_NAME);
assert l.contains(A_C_AGE);
assert l.contains(A_C_NAME);
assert l.loadEntry(A_B_AGE).getValue().equals(TWENTY);
assert l.loadEntry(A_B_NAME).getValue().equals(JOE);
assert l.loadEntry(A_C_AGE).getValue().equals(FORTY);
assert l.loadEntry(A_C_NAME).getValue().equals(BOB);
}
protected void verifyNoData(Cache<Object, Object> c) {
assert c.isEmpty() : "Cache should be empty!";
}
protected void verifyNoDataOnLoader(Cache<Object, Object> c) throws Exception {
DummyInMemoryStore l = TestingUtil.getFirstStore(c);
assert !l.contains(A_B_AGE);
assert !l.contains(A_B_NAME);
assert !l.contains(A_C_AGE);
assert !l.contains(A_C_NAME);
assert !l.contains(A_D_AGE);
assert !l.contains(A_D_NAME);
}
public void testSharedLoader() throws Exception {
try {
sharedCacheLoader.set(true);
Cache<Object, Object> c1 = createCacheManager(cacheName).getCache(cacheName);
writeInitialData(c1);
// starting the second cache would initialize an in-memory state transfer but not a persistent one since the loader is shared
Cache<Object, Object> c2 = createCacheManager(cacheName).getCache(cacheName);
TestingUtil.blockUntilViewsReceived(60000, c1, c2);
TestingUtil.waitForNoRebalance(c1, c2);
verifyInitialDataOnLoader(c1);
verifyInitialData(c1);
verifyNoDataOnLoader(c2);
// There shouldn't be any data locally since there was no entries in memory and the shared loader doesn't
// actually share entries
verifyNoData(c2.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL));
} finally {
sharedCacheLoader.set(false);
}
}
public void testInitialSlowPreload() throws Exception {
// Test for ISPN-2495
// Preload on cache on node 1 is slow and unfinished at the point, where cache on node 2 starts.
// Node 2 requests state, got answer that no entries available. Since node 2 is not coordinator,
// preload is ignored. At the end, node 1 contains REPL cache with all entries, node 2 has same cache without entries.
try {
sharedCacheLoader.set(true);
EmbeddedCacheManager cm1 = createCacheManager(cacheName);
Cache<Object, Object> cache1 = cm1.getCache(cacheName);
verifyNoDataOnLoader(cache1);
verifyNoData(cache1);
// write initial data
cache1.put("A", new DelayedMarshallingPojo(0, 2000));
cache1.put("B", new DelayedMarshallingPojo(0, 2000));
cache1.put("C", new DelayedMarshallingPojo(0, 2000));
assertEquals(cache1.size(), 3);
cm1.stop();
// this cache is only used to start networking
final ConfigurationBuilder defaultConfigurationBuilder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
// now lets start cm and shortly after another cache manager
final EmbeddedCacheManager cm2 = super.createCacheManager(cacheName);
cm2.defineConfiguration("initialCache", defaultConfigurationBuilder.build());
cm2.startCaches("initialCache");
EmbeddedCacheManager cm3 = super.createCacheManager(cacheName);
cm3.defineConfiguration("initialCache", defaultConfigurationBuilder.build());
cm3.startCaches("initialCache");
// networking is started and cluster has 2 members
TestingUtil.blockUntilViewsReceived(60000, cm2.getCache("initialCache"), cm3.getCache("initialCache"));
// now fork start of "slow" cache
Future<Void> future = fork(() -> {
cm2.startCaches(cacheName);
return null;
});
// lets wait a bit, cache is started pon cm2, but preload is not finished
TestingUtil.sleepThread(1000);
// uncomment this to see failing test
future.get(10, TimeUnit.SECONDS);
// at this point node is not alone, so preload is not used
// the start of the cache must be blocked until state transfer is finished
cm3.startCaches(cacheName);
assertEquals(cm3.getCache(cacheName).size(), 3);
} finally {
sharedCacheLoader.set(false);
}
}
}
| 6,414
| 39.345912
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ClusterTopologyManagerTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.sequence;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import org.infinispan.Cache;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.Mocks;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InTransactionMode;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.TransactionMode;
import org.jgroups.protocols.DISCARD;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "statetransfer.ClusterTopologyManagerTest")
@CleanupAfterMethod
public class ClusterTopologyManagerTest extends MultipleCacheManagersTest {
public static final String CACHE_NAME = "testCache";
private static final String OTHER_CACHE_NAME = "other_cache";
private ConfigurationBuilder defaultConfig;
private Cache<?, ?> c1, c2, c3;
private DISCARD d1, d2, d3;
@Override
public Object[] factory() {
return new Object[] {
new ClusterTopologyManagerTest().cacheMode(CacheMode.DIST_SYNC).transactional(true),
};
}
@Override
protected void createCacheManagers() throws Throwable {
defaultConfig = getDefaultClusteredCacheConfig(cacheMode, transactional);
createClusteredCaches(3, defaultConfig, new TransportFlags().withFD(true).withMerge(true), CACHE_NAME);
c1 = cache(0, CACHE_NAME);
c2 = cache(1, CACHE_NAME);
c3 = cache(2, CACHE_NAME);
d1 = TestingUtil.getDiscardForCache(c1.getCacheManager());
d2 = TestingUtil.getDiscardForCache(c2.getCacheManager());
d3 = TestingUtil.getDiscardForCache(c3.getCacheManager());
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
public void testNodeAbruptLeave() {
// Create some more caches to trigger ISPN-2572
ConfigurationBuilder cfg = defaultConfig;
defineConfigurationOnAllManagers("cache2", cfg);
defineConfigurationOnAllManagers("cache3", cfg);
defineConfigurationOnAllManagers("cache4", cfg);
defineConfigurationOnAllManagers("cache5", cfg);
cache(0, "cache2");
cache(1, "cache2");
cache(0, "cache3");
cache(2, "cache3");
cache(1, "cache4");
cache(2, "cache4");
cache(0, "cache5");
cache(1, "cache5");
// create the partitions
log.debugf("Splitting cluster");
d3.discardAll(true);
TestingUtil.installNewView(manager(0), manager(1));
TestingUtil.installNewView(manager(2));
// wait for the partitions to form
long startTime = System.currentTimeMillis();
TestingUtil.blockUntilViewsReceived(30000, false, c1, c2);
TestingUtil.blockUntilViewsReceived(30000, false, c3);
TestingUtil.waitForNoRebalance(c1, c2);
TestingUtil.waitForNoRebalance(c3);
TestingUtil.waitForNoRebalance(cache(0, "cache2"), cache(1, "cache2"));
TestingUtil.waitForNoRebalance(cache(0, "cache3"));
TestingUtil.waitForNoRebalance(cache(1, "cache4"));
TestingUtil.waitForNoRebalance(cache(0, "cache5"), cache(1, "cache5"));
long endTime = System.currentTimeMillis();
log.debugf("Recovery took %s", Util.prettyPrintTime(endTime - startTime));
assert endTime - startTime < 30000 : "Recovery took too long: " + Util.prettyPrintTime(endTime - startTime);
// Check that a new node can join
EmbeddedCacheManager newCm = addClusterEnabledCacheManager(new TransportFlags().withFD(true).withMerge(true));
newCm.defineConfiguration(CACHE_NAME, defaultConfig.build());
Cache<Object, Object> c4 = cache(3, CACHE_NAME);
TestingUtil.blockUntilViewsReceived(30000, true, c1, c2, c4);
TestingUtil.waitForNoRebalance(c1, c2, c4);
newCm.defineConfiguration("cache2", defaultConfig.build());
newCm.defineConfiguration("cache3", defaultConfig.build());
newCm.defineConfiguration("cache4", defaultConfig.build());
newCm.defineConfiguration("cache5", defaultConfig.build());
cache(3, "cache2");
cache(3, "cache3");
cache(3, "cache4");
cache(3, "cache5");
TestingUtil.waitForNoRebalance(cache(0, "cache2"), cache(1, "cache2"), cache(3, "cache2"));
TestingUtil.waitForNoRebalance(cache(0, "cache3"), cache(3, "cache3"));
TestingUtil.waitForNoRebalance(cache(1, "cache4"), cache(3, "cache4"));
TestingUtil.waitForNoRebalance(cache(0, "cache5"), cache(1, "cache5"), cache(3, "cache5"));
}
public void testClusterRecoveryAfterCoordLeave() {
// create the partitions
log.debugf("Splitting cluster");
d1.discardAll(true);
TestingUtil.installNewView(manager(0));
TestingUtil.installNewView(manager(1), manager(2));
// wait for the partitions to form
long startTime = System.currentTimeMillis();
TestingUtil.blockUntilViewsReceived(30000, false, c1);
TestingUtil.blockUntilViewsReceived(30000, false, c2, c3);
TestingUtil.waitForNoRebalance(c1);
TestingUtil.waitForNoRebalance(c2, c3);
long endTime = System.currentTimeMillis();
log.debugf("Recovery took %s", Util.prettyPrintTime(endTime - startTime));
assert endTime - startTime < 30000 : "Recovery took too long: " + Util.prettyPrintTime(endTime - startTime);
// Check that a new node can join
addClusterEnabledCacheManager(new TransportFlags().withFD(true).withMerge(true));
manager(3).defineConfiguration(CACHE_NAME, defaultConfig.build());
Cache<Object, Object> c4 = cache(3, CACHE_NAME);
TestingUtil.blockUntilViewsReceived(30000, true, c2, c3, c4);
TestingUtil.waitForNoRebalance(c2, c3, c4);
}
public void testClusterRecoveryAfterThreeWaySplit() {
// create the partitions
log.debugf("Splitting the cluster in three");
d1.discardAll(true);
d2.discardAll(true);
d3.discardAll(true);
TestingUtil.installNewView(manager(0));
TestingUtil.installNewView(manager(1));
TestingUtil.installNewView(manager(2));
// wait for the partitions to form
TestingUtil.blockUntilViewsReceived(30000, false, c1);
TestingUtil.blockUntilViewsReceived(30000, false, c2);
TestingUtil.blockUntilViewsReceived(30000, false, c3);
TestingUtil.waitForNoRebalance(c1);
TestingUtil.waitForNoRebalance(c2);
TestingUtil.waitForNoRebalance(c3);
// merge the remaining partitions
log.debugf("Merging the cluster partitions");
d1.discardAll(false);
d2.discardAll(false);
d3.discardAll(false);
// wait for the merged cluster to form
long startTime = System.currentTimeMillis();
TestingUtil.blockUntilViewsReceived(60000, c1, c2, c3);
TestingUtil.waitForNoRebalance(c1, c2, c3);
long endTime = System.currentTimeMillis();
log.debugf("Merge took %s", Util.prettyPrintTime(endTime - startTime));
assert endTime - startTime < 30000 : "Merge took too long: " + Util.prettyPrintTime(endTime - startTime);
// Check that a new node can join
addClusterEnabledCacheManager(new TransportFlags().withFD(true).withMerge(true));
manager(3).defineConfiguration(CACHE_NAME, defaultConfig.build());
Cache<Object, Object> c4 = cache(3, CACHE_NAME);
TestingUtil.blockUntilViewsReceived(30000, true, c1, c2, c3, c4);
TestingUtil.waitForNoRebalance(c1, c2, c3, c4);
}
public void testClusterRecoveryAfterSplitAndCoordLeave() {
// create the partitions
log.debugf("Splitting the cluster in three");
d1.discardAll(true);
d2.discardAll(true);
d3.discardAll(true);
TestingUtil.installNewView(manager(0));
TestingUtil.installNewView(manager(1));
TestingUtil.installNewView(manager(2));
// wait for the partitions to form
TestingUtil.blockUntilViewsReceived(30000, false, c1);
TestingUtil.blockUntilViewsReceived(30000, false, c2);
TestingUtil.blockUntilViewsReceived(30000, false, c3);
TestingUtil.waitForNoRebalance(c1);
TestingUtil.waitForNoRebalance(c2);
TestingUtil.waitForNoRebalance(c3);
// kill the coordinator
manager(0).stop();
// merge the two remaining partitions
log.debugf("Merging the cluster partitions");
d2.discardAll(false);
d3.discardAll(false);
// wait for the merged cluster to form
long startTime = System.currentTimeMillis();
TestingUtil.blockUntilViewsReceived(30000, c2, c3);
TestingUtil.waitForNoRebalance(c2, c3);
long endTime = System.currentTimeMillis();
log.debugf("Merge took %s", Util.prettyPrintTime(endTime - startTime));
assert endTime - startTime < 30000 : "Merge took too long: " + Util.prettyPrintTime(endTime - startTime);
// Check that a new node can join
addClusterEnabledCacheManager(new TransportFlags().withFD(true).withMerge(true));
manager(3).defineConfiguration(CACHE_NAME, defaultConfig.build());
Cache<Object, Object> c4 = cache(3, CACHE_NAME);
TestingUtil.blockUntilViewsReceived(30000, true, c2, c3, c4);
TestingUtil.waitForNoRebalance(c2, c3, c4);
}
public void testClusterRecoveryWithRebalance() throws Exception {
// Compute the merge coordinator by sorting the JGroups addresses, the same way MERGE2/3 do
List<Address> members = new ArrayList<>(manager(0).getMembers());
Collections.sort(members);
Address mergeCoordAddress = members.get(0);
log.debugf("The merge coordinator will be %s", mergeCoordAddress);
EmbeddedCacheManager mergeCoordManager = manager(mergeCoordAddress);
int mergeCoordIndex = cacheManagers.indexOf(mergeCoordManager);
// create the partitions
log.debugf("Splitting the cluster in three");
d1.discardAll(true);
d2.discardAll(true);
d3.discardAll(true);
TestingUtil.installNewView(manager(0));
TestingUtil.installNewView(manager(1));
TestingUtil.installNewView(manager(2));
// wait for the coordinator to be separated (don't care about the others)
TestingUtil.blockUntilViewsReceived(30000, false, c1);
TestingUtil.blockUntilViewsReceived(30000, false, c2);
TestingUtil.blockUntilViewsReceived(30000, false, c3);
TestingUtil.waitForNoRebalance(c1);
TestingUtil.waitForNoRebalance(c2);
TestingUtil.waitForNoRebalance(c3);
// Disable DISCARD *only* on the merge coordinator
if (mergeCoordIndex == 0) d1.discardAll(false);
if (mergeCoordIndex == 1) d2.discardAll(false);
if (mergeCoordIndex == 2) d3.discardAll(false);
int viewIdAfterSplit = mergeCoordManager.getTransport().getViewId();
final CheckPoint checkpoint = new CheckPoint();
blockRebalanceStart(mergeCoordManager, checkpoint, 2);
EmbeddedCacheManager cm4 = addClusterEnabledCacheManager(new TransportFlags().withFD(true).withMerge(true));
blockRebalanceStart(cm4, checkpoint, 2);
// Force the initialization of the transport
cm4.defineConfiguration(CACHE_NAME, defaultConfig.build());
cm4.defineConfiguration(OTHER_CACHE_NAME, defaultConfig.build());
cm4.getCache(OTHER_CACHE_NAME);
TestingUtil.blockUntilViewsReceived(30000, manager(mergeCoordIndex), cm4);
Future<Cache<Object,Object>> cacheFuture = fork(() -> cm4.getCache(CACHE_NAME));
log.debugf("Waiting for the REBALANCE_START command to reach the merge coordinator");
checkpoint.awaitStrict("rebalance_" + Arrays.asList(mergeCoordAddress, cm4.getAddress()), 10, SECONDS);
// merge the partitions
log.debugf("Merging the cluster partitions");
d1.discardAll(false);
d2.discardAll(false);
d3.discardAll(false);
// wait for the JGroups merge
long startTime = System.currentTimeMillis();
TestingUtil.blockUntilViewsReceived(30000, cacheManagers);
TestingUtil.waitForNoRebalance(caches(CACHE_NAME));
// unblock the REBALANCE_START command
log.debugf("Unblocking the REBALANCE_START command on the coordinator");
checkpoint.triggerForever("merge");
// wait for the 4th cache to finish joining
Cache<Object, Object> c4 = cacheFuture.get(30, SECONDS);
TestingUtil.waitForNoRebalance(c1, c2, c3, c4);
long endTime = System.currentTimeMillis();
log.debugf("Merge took %s", Util.prettyPrintTime(endTime - startTime));
assert endTime - startTime < 30000 : "Merge took too long: " + Util.prettyPrintTime(endTime - startTime);
// Check that another node can join
EmbeddedCacheManager cm5 = addClusterEnabledCacheManager(new TransportFlags().withFD(true).withMerge(true));
cm5.defineConfiguration(CACHE_NAME, defaultConfig.build());
Cache<Object, Object> c5 = cm5.getCache(CACHE_NAME);
TestingUtil.blockUntilViewsReceived(30000, true, c1, c2, c3, c4, c5);
TestingUtil.waitForNoRebalance(c1, c2, c3, c4, c5);
}
protected void blockRebalanceStart(final EmbeddedCacheManager manager, final CheckPoint checkpoint, final int numMembers) {
final LocalTopologyManager localTopologyManager = TestingUtil.extractGlobalComponent(manager,
LocalTopologyManager.class);
LocalTopologyManager spyLocalTopologyManager = spy(localTopologyManager);
doAnswer(invocation -> {
CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
List<Address> members = topology.getMembers();
checkpoint.trigger("rebalance_" + members);
if (members.size() == numMembers) {
log.debugf("Blocking the REBALANCE_START command with members %s on %s", members, manager.getAddress());
return sequence(checkpoint.future("merge", 30, SECONDS, testExecutor()),
() -> Mocks.callRealMethod(invocation));
}
return invocation.callRealMethod();
}).when(spyLocalTopologyManager).handleRebalance(eq(CACHE_NAME), any(CacheTopology.class), anyInt(),
any(Address.class));
TestingUtil.replaceComponent(manager, LocalTopologyManager.class, spyLocalTopologyManager, true);
}
/*
* Test that cluster recovery can finish if one of the members leaves before sending the status response.
*/
public void testAbruptLeaveAfterGetStatus() throws TimeoutException, InterruptedException {
// Block the GET_STATUS command on node 2
final LocalTopologyManager localTopologyManager2 = TestingUtil.extractGlobalComponent(manager(1),
LocalTopologyManager.class);
final CheckPoint checkpoint = new CheckPoint();
LocalTopologyManager spyLocalTopologyManager2 = spy(localTopologyManager2);
final CacheTopology initialTopology = localTopologyManager2.getCacheTopology(CACHE_NAME);
log.debugf("Starting with topology %d", initialTopology.getTopologyId());
doAnswer(invocation -> {
int viewId = (Integer) invocation.getArguments()[0];
checkpoint.trigger("GET_STATUS_" + viewId);
log.debugf("Blocking the GET_STATUS command on the new coordinator");
checkpoint.awaitStrict("3 left", 10, SECONDS);
return invocation.callRealMethod();
}).when(spyLocalTopologyManager2).handleStatusRequest(anyInt());
// There should be no topology update or rebalance with 2 members
CompletableFuture<Void> update2MembersFuture = new CompletableFuture<>();
doAnswer(invocation -> {
CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
if (topology.getMembers().size() == 2) {
log.debugf("Found CH update with 2 mem %s", topology);
update2MembersFuture.completeExceptionally(new TestException());
}
return invocation.callRealMethod();
}).when(spyLocalTopologyManager2).handleTopologyUpdate(eq(CACHE_NAME), any(CacheTopology.class),
any(AvailabilityMode.class), anyInt(), any(Address.class));
doAnswer(invocation -> {
CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
if (topology.getMembers().size() == 2) {
log.debugf("Discarding rebalance command %s", topology);
update2MembersFuture.completeExceptionally(new TestException());
}
return invocation.callRealMethod();
}).when(spyLocalTopologyManager2).handleRebalance(eq(CACHE_NAME), any(CacheTopology.class), anyInt(),
any(Address.class));
TestingUtil.replaceComponent(manager(1), LocalTopologyManager.class, spyLocalTopologyManager2, true);
// Node 1 (the coordinator) dies. Node 2 becomes coordinator and tries to call GET_STATUS
killNode(manager(0), new EmbeddedCacheManager[]{manager(1), manager(2)});
// Wait for the GET_STATUS command and stop node 3 abruptly
int viewId = manager(1).getTransport().getViewId();
checkpoint.awaitStrict("GET_STATUS_" + viewId, 10, SECONDS);
killNode(manager(2), new EmbeddedCacheManager[]{manager(1)});
checkpoint.triggerForever("3 left");
// Wait for node 2 to install a view with only itself and unblock the GET_STATUS command
TestingUtil.waitForNoRebalance(c2);
// Check there was no topology update or rebalance with 2 members
update2MembersFuture.complete(null);
update2MembersFuture.join();
}
private void killNode(EmbeddedCacheManager nodeToKill, EmbeddedCacheManager[] nodesToKeep) {
log.debugf("Killing node %s", nodeToKill);
d1.discardAll(true);
TestingUtil.installNewView(nodeToKill);
nodeToKill.stop();
TestingUtil.installNewView(nodesToKeep);
TestingUtil.blockUntilViewsReceived(30000, false, nodesToKeep);
}
@InTransactionMode(TransactionMode.TRANSACTIONAL)
public void testLeaveDuringGetTransactions() throws InterruptedException, TimeoutException {
final CheckPoint checkpoint = new CheckPoint();
StateProvider stateProvider = TestingUtil.extractComponent(c2, StateProvider.class);
StateProvider spyStateProvider = spy(stateProvider);
doAnswer(invocation -> {
int topologyId = (Integer) invocation.getArguments()[1];
checkpoint.trigger("GET_TRANSACTIONS");
log.debugf("Blocking the GET_TRANSACTIONS(%d) command on the %s", topologyId, c2);
checkpoint.awaitStrict("LEAVE", 10, SECONDS);
return invocation.callRealMethod();
}).when(spyStateProvider).getTransactionsForSegments(any(Address.class), anyInt(), any());
TestingUtil.replaceComponent(c2, StateProvider.class, spyStateProvider, true);
long startTime = System.currentTimeMillis();
manager(2).stop();
checkpoint.awaitStrict("GET_TRANSACTIONS", 10, SECONDS);
manager(1).stop();
checkpoint.trigger("LEAVE");
TestingUtil.blockUntilViewsReceived(30000, false, c1);
TestingUtil.waitForNoRebalance(c1);
long endTime = System.currentTimeMillis();
log.debugf("Recovery took %s", Util.prettyPrintTime(endTime - startTime));
assert endTime - startTime < 30000 : "Recovery took too long: " + Util.prettyPrintTime(endTime - startTime);
}
}
| 20,414
| 45.292517
| 126
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferLargeObjectTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.data.Value;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Tester for https://jira.jboss.org/browse/ISPN-654.
*
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test(groups = "functional" , testName="statetransfer.StateTransferLargeObjectTest")
public class StateTransferLargeObjectTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(StateTransferLargeObjectTest.class);
private Cache<Integer, Object> c0;
private Cache<Integer, Object> c1;
private Cache<Integer, Object> c2;
private Cache<Integer, Object> c3;
private Map<Integer, Object> expected;
private final Random rnd = new Random();
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
builder.clustering().cacheMode(CacheMode.DIST_SYNC)
.l1().disable()
.clustering().stateTransfer().fetchInMemoryState(true)
.locking().useLockStriping(false)
.clustering().hash().numOwners(3).numSegments(60)
.stateTransfer().chunkSize(50);
createCluster(TestDataSCI.INSTANCE, builder, 4);
c0 = cache(0);
c1 = cache(1);
c2 = cache(2);
c3 = cache(3);
waitForClusterToForm();
log.debug("Rehash is complete!");
expected = new HashMap<>();
}
public void testForFailure() {
final int num = 500;
for (int i = 0; i < num; i++) {
Object bigObject = createBigObject(i, "prefix");
expected.put(i, bigObject);
c0.put(i, bigObject);
}
log.debug("About to stop " + c3.getAdvancedCache().getRpcManager().getAddress());
c3.stop();
c3.getCacheManager().stop();
log.debug("Cache stopped async!");
for (int i = 0; i < num; i++) {
log.debug("----Running a get on " + i);
assertValue(i, c0.get(i));
assertValue(i, c1.get(i));
assertValue(i, c2.get(i));
}
log.debug("Before stopping cache managers!");
TestingUtil.killCacheManagers(manager(2));
log.debug("2 killed");
TestingUtil.killCacheManagers(manager(1));
log.debug("1 killed");
TestingUtil.killCacheManagers(manager(0));
log.debug("0 killed");
}
private void assertValue(int i, Object o) {
assertNotNull(o);
assertTrue(o instanceof Value);
assertEquals(o, expected.get(i));
}
private Object createBigObject(int num, String prefix) {
return new Value("[" + num + "|" + prefix + "|" + (num*3) + "|" + (num*7) + "]", generateLargeString());
}
private String generateLargeString() {
byte[] bytes = new byte[20 * 100];
rnd.nextBytes(bytes);
return new String(bytes);
}
@AfterMethod
@Override
protected void clearContent() {
}
}
| 3,604
| 31.1875
| 111
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/TxReplay2Test.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInboundRpc;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInterceptor;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchCommand;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import java.util.Arrays;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import jakarta.transaction.Status;
import org.infinispan.Cache;
import org.infinispan.commands.remote.recovery.TxCompletionNotificationCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.impl.CallInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedTransaction;
import org.infinispan.transaction.tm.EmbeddedTransactionManager;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.util.ByteString;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Tests that a transaction is replayed only once if the commit command is received twice.
*
* @author Dan Berindei
* @since 7.0
*/
@Test(groups = "functional", testName = "statetransfer.TxReplay2Test")
public class TxReplay2Test extends MultipleCacheManagersTest {
private static final String VALUE = "value";
ControlledConsistentHashFactory consistentHashFactory = new ControlledConsistentHashFactory.Default(0, 1, 2);
public void testReplay() throws Exception {
final StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("tx", "tx:before_prepare_replay", "tx:resume_prepare_replay", "tx:mark_tx_completed");
sequencer.logicalThread("sim", "sim:before_extra_commit", "sim:during_extra_commit", "sim:after_extra_commit");
sequencer.order("tx:before_prepare_replay", "sim:before_extra_commit");
sequencer.order("sim:during_extra_commit", "tx:resume_prepare_replay");
sequencer.order("sim:after_extra_commit", "tx:mark_tx_completed");
final Object key = "key";
assertEquals(Arrays.asList(address(0), address(1), address(2)), cacheTopology(0).getDistribution(key).writeOwners());
Cache<Object, Object> primaryOwnerCache = cache(0);
final Cache<Object, Object> newBackupOwnerCache = cache(3);
final CountingInterceptor newBackupCounter = CountingInterceptor.inject(newBackupOwnerCache);
final CountingInterceptor primaryCounter = CountingInterceptor.inject(primaryOwnerCache);
final CountingInterceptor oldBackup2Counter = CountingInterceptor.inject(cache(2));
advanceOnInterceptor(sequencer, newBackupOwnerCache, CallInterceptor.class,
matchCommand(PrepareCommand.class).matchCount(0).build())
.before("tx:before_prepare_replay", "tx:resume_prepare_replay");
advanceOnInterceptor(sequencer, newBackupOwnerCache, TransactionSynchronizerInterceptor.class,
matchCommand(CommitCommand.class).matchCount(1).build())
.before("sim:during_extra_commit");
advanceOnInboundRpc(sequencer, newBackupOwnerCache, matchCommand(TxCompletionNotificationCommand.class).build())
.before("tx:mark_tx_completed");
final EmbeddedTransactionManager transactionManager = (EmbeddedTransactionManager) tm(0);
transactionManager.begin();
primaryOwnerCache.put(key, VALUE);
final EmbeddedTransaction transaction = transactionManager.getTransaction();
TransactionTable transactionTable0 = TestingUtil.getTransactionTable(primaryOwnerCache);
final GlobalTransaction gtx = transactionTable0.getLocalTransaction(transaction).getGlobalTransaction();
transaction.runPrepare();
assertEquals("Wrong transaction status before killing backup owner.",
Status.STATUS_PREPARED, transaction.getStatus());
// Now, we kill cache(1). the transaction is prepared in cache(1) and it should be transferred to cache(2)
killMember(1);
int currentTopologyId = primaryOwnerCache.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
Future<Object> secondCommitFuture = fork(() -> {
// Wait for the commit command to block replaying the prepare on the new backup
sequencer.advance("sim:before_extra_commit");
// And try to run another commit command
CommitCommand command = new CommitCommand(ByteString.fromString(newBackupOwnerCache.getName()), gtx);
command.setTopologyId(currentTopologyId);
command.markTransactionAsRemote(true);
ComponentRegistry componentRegistry = TestingUtil.extractComponentRegistry(newBackupOwnerCache);
try {
command.invokeAsync(componentRegistry);
} catch (Throwable throwable) {
throw new CacheException(throwable);
}
sequencer.advance("sim:after_extra_commit");
return null;
});
checkIfTransactionExists(newBackupOwnerCache);
assertEquals("Wrong transaction status after killing backup owner.",
Status.STATUS_PREPARED, transaction.getStatus());
transaction.runCommit(false);
secondCommitFuture.get(10, SECONDS);
assertNoTransactions();
assertEquals("Wrong number of prepares!", 2, newBackupCounter.numberPrepares.get());
assertEquals("Wrong number of commits!", 2, newBackupCounter.numberCommits.get());
assertEquals("Wrong number of rollbacks!", 0, newBackupCounter.numberRollbacks.get());
assertEquals("Wrong number of prepares!", 2, oldBackup2Counter.numberPrepares.get());
assertEquals("Wrong number of commits!", 1, oldBackup2Counter.numberCommits.get());
assertEquals("Wrong number of rollbacks!", 0, oldBackup2Counter.numberRollbacks.get());
// We only count remote commands, and there shouldn't be any on the primary/originator
assertEquals("Wrong number of prepares!", 0, primaryCounter.numberPrepares.get());
assertEquals("Wrong number of commits!", 0, primaryCounter.numberCommits.get());
assertEquals("Wrong number of rollbacks!", 0, primaryCounter.numberRollbacks.get());
checkKeyInDataContainer(key);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
builder.transaction()
.useSynchronization(false)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.recovery().disable();
builder.clustering()
.hash().numOwners(3).numSegments(1).consistentHashFactory(consistentHashFactory)
.stateTransfer().fetchInMemoryState(true);
builder.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
createClusteredCaches(4, builder);
}
private void checkKeyInDataContainer(Object key) {
for (Cache<Object, Object> cache : caches()) {
DataContainer container = cache.getAdvancedCache().getDataContainer();
InternalCacheEntry entry = container.get(key);
assertNotNull("Cache '" + address(cache) + "' does not contain key!", entry);
assertEquals("Cache '" + address(cache) + "' has wrong value!", VALUE, entry.getValue());
}
}
private void checkIfTransactionExists(Cache<Object, Object> cache) {
TransactionTable table = TestingUtil.extractComponent(cache, TransactionTable.class);
assertFalse("Expected a remote transaction.", table.getRemoteTransactions().isEmpty());
}
static class CountingInterceptor extends DDAsyncInterceptor {
private static final Log log = LogFactory.getLog(CountingInterceptor.class);
//counters
private final AtomicInteger numberPrepares = new AtomicInteger(0);
private final AtomicInteger numberCommits = new AtomicInteger(0);
private final AtomicInteger numberRollbacks = new AtomicInteger(0);
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
log.debugf("Received remote prepare for transaction %s", command.getGlobalTransaction());
numberPrepares.incrementAndGet();
}
return invokeNext(ctx, command);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
log.debugf("Received remote commit for transaction %s", command.getGlobalTransaction());
numberCommits.incrementAndGet();
}
return invokeNext(ctx, command);
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
log.debugf("Received remote rollback for transaction %s", command.getGlobalTransaction());
numberRollbacks.incrementAndGet();
}
return invokeNext(ctx, command);
}
public static CountingInterceptor inject(Cache cache) {
AsyncInterceptorChain chain = cache.getAdvancedCache().getAsyncInterceptorChain();
if (chain.containsInterceptorType(CountingInterceptor.class)) {
return chain.findInterceptorWithClass(CountingInterceptor.class);
}
CountingInterceptor interceptor = new CountingInterceptor();
chain.addInterceptorBefore(interceptor, CallInterceptor.class);
return interceptor;
}
}
}
| 10,696
| 48.523148
| 127
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/WriteSkewDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.distribution.DistributionTestHelper.hasOwners;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.infinispan.test.TestingUtil.withTx;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createClusteredCacheManager;
import static org.infinispan.transaction.impl.WriteSkewHelper.versionFromEntry;
import static org.infinispan.util.BlockingLocalTopologyManager.confirmTopologyUpdate;
import static org.infinispan.util.logging.Log.CLUSTER;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.remote.recovery.TxCompletionNotificationCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.VersionedPrepareCommand;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.BaseControlledConsistentHashFactory;
import org.infinispan.util.BlockingLocalTopologyManager;
import org.infinispan.util.ControlledRpcManager;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Tests if the entry version is lost during the state transfer in which the primary owner changes.
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "statetransfer.WriteSkewDuringStateTransferTest", singleThreaded = true)
public class WriteSkewDuringStateTransferTest extends MultipleCacheManagersTest {
private final List<BlockingLocalTopologyManager> topologyManagerList =
Collections.synchronizedList(new ArrayList<>(4));
@AfterMethod(alwaysRun = true)
public final void unblockAll() {
//keep track of all controlled components. In case of failure, we need to unblock all otherwise we have to wait
//long time until the test is able to stop all cache managers.
for (BlockingLocalTopologyManager topologyManager : topologyManagerList) {
topologyManager.stopBlocking();
}
topologyManagerList.clear();
}
/*
*/
/**
* See ISPN-3738
*
* Replicated TX cache with WSC, A, B are in cluster, C is joining
* 0. The current CH already contains A and B as owners, C is joining (is not primary owner of anything yet).
* B is primary owner of K=V.
* 1. A sends PrepareCommand to B and C with put(K, V) (V is null on all nodes); //A has already received the
* rebalance_start
* 2. C receives PrepareCommand and responds with no versions (it is not primary owner); //C has already received the
* rebalance_start
* 3. topology changes on B - primary ownership of K is transferred to C; //B has already received the ch_update
* 4. B receives PrepareCommand, responds without K's version (it is not primary)
* 5. --B forwards the Prepare to C as it sees that the command has lower topology ID--
* 6. C responds to B's prepare with version of K; //at this point, C has received the ch_update
* 7. K version is not added to B's response, B responds to A
* 8. A finds out that topology has changed, forwards prepare to C; //A has received the ch_update
* 9. C responds to C's prepare with version of K
* 10. A receives C's response, but the versions are not added to transaction
* 11. A sends out CommitCommand missing version of K
* 12. all nodes record K=V without version as usual ImmortalCacheEntry
*/
public void testVersionsAfterStateTransfer() throws Exception {
assertClusterSize("Wrong cluster size", 2);
final Object key = "key1";
assertKeyOwnership(key, cache(1), cache(0));
final int currentTopologyId = currentTopologyId(cache(0));
final ControlledRpcManager nodeARpcManager = ControlledRpcManager.replaceRpcManager(cache(0));
final NodeController nodeAController = setNodeControllerIn(cache(0));
setInitialPhaseForNodeA(nodeAController, currentTopologyId);
final NodeController nodeBController = setNodeControllerIn(cache(1));
setInitialPhaseForNodeB(nodeBController, currentTopologyId);
final NewNode nodeC = addNode(currentTopologyId);
// Start the rebalance everywhere
confirmTopologyUpdate(CacheTopology.Phase.READ_OLD_WRITE_ALL,
nodeAController.topologyManager,
nodeBController.topologyManager,
nodeC.controller.topologyManager);
// Install the READ_ALL_WRITE_ALL topology on B
nodeBController.topologyManager.confirmTopologyUpdate(CacheTopology.Phase.READ_ALL_WRITE_ALL);
Future<Object> tx = executeTransaction(cache(0), key);
// Wait until all nodes have replied. then, we change the topology ID and let it collect the responses.
ControlledRpcManager.BlockedResponseMap blockedPrepare =
nodeARpcManager.expectCommand(VersionedPrepareCommand.class).send().expectAllResponses();
assertEquals(0, nodeC.commandLatch.getCount());
nodeAController.topologyManager.confirmTopologyUpdate(CacheTopology.Phase.READ_ALL_WRITE_ALL);
nodeC.controller.topologyManager.confirmTopologyUpdate(CacheTopology.Phase.READ_ALL_WRITE_ALL);
confirmTopologyUpdate(CacheTopology.Phase.READ_NEW_WRITE_ALL, nodeAController.topologyManager,
nodeBController.topologyManager, nodeC.controller.topologyManager);
confirmTopologyUpdate(CacheTopology.Phase.NO_REBALANCE, nodeAController.topologyManager,
nodeBController.topologyManager, nodeC.controller.topologyManager);
awaitForTopology(currentTopologyId + 4, cache(0));
blockedPrepare.receive();
// Retry the prepare and then commit
nodeARpcManager.expectCommand(PrepareCommand.class).send().receiveAll();
nodeARpcManager.expectCommand(CommitCommand.class).send().receiveAll();
nodeARpcManager.expectCommand(TxCompletionNotificationCommand.class).send();
assertNull("Wrong put() return value.", tx.get());
nodeAController.topologyManager.stopBlocking();
nodeBController.topologyManager.stopBlocking();
nodeC.controller.topologyManager.stopBlocking();
nodeC.joinerFuture.get(30, TimeUnit.SECONDS);
awaitForTopology(currentTopologyId + 4, cache(0));
awaitForTopology(currentTopologyId + 4, cache(1));
awaitForTopology(currentTopologyId + 4, cache(2));
assertKeyVersionInDataContainer(key, cache(1), cache(2));
nodeARpcManager.stopBlocking();
cache(0).put(key, "v2");
}
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(2, WriteSkewDuringStateTransferSCI.INSTANCE, configuration());
}
private void assertKeyVersionInDataContainer(Object key, Cache<?, ?>... owners) {
for (Cache<?, ?> cache : owners) {
DataContainer<?, ?> dataContainer = extractComponent(cache, InternalDataContainer.class);
InternalCacheEntry<?, ?> entry = dataContainer.peek(key);
assertNotNull("Entry cannot be null in " + address(cache) + ".", entry);
assertNotNull("Version cannot be null.", versionFromEntry(entry));
}
}
private void awaitForTopology(final int expectedTopologyId, final Cache<?, ?> cache) {
eventually(() -> expectedTopologyId == currentTopologyId(cache));
}
private int currentTopologyId(Cache<?, ?> cache) {
return cache.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
}
private Future<Object> executeTransaction(final Cache<Object, Object> cache, final Object key) {
return fork(() -> withTx(cache.getAdvancedCache().getTransactionManager(), () -> cache.put(key, "value")));
}
private NewNode addNode(final int currentTopologyId) {
final NewNode newNode = new NewNode();
ConfigurationBuilder builder = configuration();
newNode.controller = new NodeController();
newNode.controller.interceptor = new ControlledCommandInterceptor();
//noinspection deprecation
builder.customInterceptors().addInterceptor().index(0).interceptor(newNode.controller.interceptor);
GlobalConfigurationBuilder globalBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder();
globalBuilder.serialization().addContextInitializer(WriteSkewDuringStateTransferSCI.INSTANCE);
EmbeddedCacheManager embeddedCacheManager = createClusteredCacheManager(false, globalBuilder, builder, new TransportFlags());
registerCacheManager(embeddedCacheManager);
newNode.controller.topologyManager = replaceTopologyManager(embeddedCacheManager);
newNode.controller.interceptor.addAction(new Action() {
@Override
public boolean isApplicable(InvocationContext context, VisitableCommand command) {
return !context.isOriginLocal() && command instanceof PrepareCommand;
}
@Override
public void before(InvocationContext context, VisitableCommand command, Cache<?, ?> cache) {
log.tracef("Before: command=%s. origin=%s", command, context.getOrigin());
if (context.getOrigin().equals(address(cache(1)))) {
//from node B, i.e, it is forwarded. it needs to wait until the topology changes
try {
//noinspection deprecation
cache.getAdvancedCache().getComponentRegistry().getStateTransferLock().waitForTopology(currentTopologyId + 2,
10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (TimeoutException e) {
throw CLUSTER.failedWaitingForTopology(currentTopologyId + 2);
}
}
}
@Override
public void after(InvocationContext context, VisitableCommand command, Cache<?, ?> cache) {
log.tracef("After: command=%s. origin=%s", command, context.getOrigin());
if (context.getOrigin().equals(address(0))) {
newNode.commandLatch.countDown();
}
}
});
newNode.joinerFuture = fork(() -> {
// Starts the default cache
embeddedCacheManager.start();
return null;
});
return newNode;
}
private ConfigurationBuilder configuration() {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
builder.clustering()
.stateTransfer().fetchInMemoryState(true)
.hash().numSegments(1).numOwners(3).consistentHashFactory(new ConsistentHashFactoryImpl());
builder.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
return builder;
}
private void assertKeyOwnership(Object key, Cache<?, ?> primaryOwner, Cache<?, ?>... backupOwners) {
assertTrue("Wrong ownership for " + key + ".", hasOwners(key, primaryOwner, backupOwners));
}
private BlockingLocalTopologyManager replaceTopologyManager(EmbeddedCacheManager cacheContainer) {
BlockingLocalTopologyManager localTopologyManager =
BlockingLocalTopologyManager.replaceTopologyManagerDefaultCache(cacheContainer);
topologyManagerList.add(localTopologyManager);
return localTopologyManager;
}
private static NodeController setNodeControllerIn(Cache<Object, Object> cache) {
NodeController nodeController = new NodeController();
nodeController.interceptor = new ControlledCommandInterceptor(cache);
nodeController.topologyManager = BlockingLocalTopologyManager.replaceTopologyManagerDefaultCache(
cache.getCacheManager());
return nodeController;
}
private static void setInitialPhaseForNodeA(NodeController nodeA, final int currentTopology) {
//node A initial phase:
//* Node A sends the prepare for B and C. So, node A will send the prepare after the topologyId+1 is installed.
nodeA.interceptor.addAction(new Action() {
@Override
public boolean isApplicable(InvocationContext context, VisitableCommand command) {
return context.isOriginLocal() && command instanceof PrepareCommand;
}
@Override
public void before(InvocationContext context, VisitableCommand command, Cache<?, ?> cache) {
try {
//noinspection deprecation
cache.getAdvancedCache().getComponentRegistry().getStateTransferLock().waitForTopology(currentTopology + 1, 10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (TimeoutException e) {
throw CLUSTER.failedWaitingForTopology(currentTopology + 1);
}
}
@Override
public void after(InvocationContext context, VisitableCommand command, Cache<?, ?> cache) {
//no-op
}
});
}
private static void setInitialPhaseForNodeB(NodeController nodeB, final int currentTopology) {
//node B initial phase:
//* Node B receives the prepare after it looses the primary owner to node C
nodeB.interceptor.addAction(new Action() {
@Override
public boolean isApplicable(InvocationContext context, VisitableCommand command) {
return !context.isOriginLocal() && command instanceof PrepareCommand;
}
@Override
public void before(InvocationContext context, VisitableCommand command, Cache<?, ?> cache) {
try {
//noinspection deprecation
cache.getAdvancedCache().getComponentRegistry().getStateTransferLock().waitForTopology(currentTopology + 2,
10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (TimeoutException e) {
throw CLUSTER.failedWaitingForTopology(currentTopology + 2);
}
}
@Override
public void after(InvocationContext context, VisitableCommand command, Cache<?, ?> cache) {
//no-op
}
});
}
public interface Action {
boolean isApplicable(InvocationContext context, VisitableCommand command);
void before(InvocationContext context, VisitableCommand command, Cache<?, ?> cache);
void after(InvocationContext context, VisitableCommand command, Cache<?, ?> cache);
}
@SuppressWarnings("deprecation")
@SerializeWith(ConsistentHashFactoryImpl.Externalizer.class)
public static class ConsistentHashFactoryImpl extends BaseControlledConsistentHashFactory.Default {
ConsistentHashFactoryImpl() {
super(1);
}
@Override
protected final int[][] assignOwners(int numSegments, List<Address> members) {
//the primary owner is the last member.
switch (members.size()) {
case 1:
return new int[][]{{0}};
case 2:
return new int[][]{{1, 0}};
default:
return new int[][]{{members.size() - 1, 0, 1}};
}
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<ConsistentHashFactoryImpl> {
@Override
public void writeObject(ObjectOutput output, ConsistentHashFactoryImpl object) throws IOException {
}
@Override
public ConsistentHashFactoryImpl readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new ConsistentHashFactoryImpl();
}
}
}
public static class ControlledCommandInterceptor extends BaseAsyncInterceptor {
private final List<Action> actionList;
private Cache<Object, Object> cache;
public ControlledCommandInterceptor(Cache<Object, Object> cache) {
actionList = new ArrayList<>(3);
this.cache = cache;
this.cacheConfiguration = cache.getCacheConfiguration();
extractInterceptorChain(cache).addInterceptor(this, 0);
}
public ControlledCommandInterceptor() {
actionList = new ArrayList<>(3);
}
public void addAction(Action action) {
actionList.add(action);
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable {
List<Action> actions = extractActions(ctx, command);
if (actions.isEmpty()) {
return invokeNext(ctx, command);
}
for (Action action : actions) {
action.before(ctx, command, cache);
}
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
for (Action action : actions) {
action.after(ctx, command, cache);
}
});
}
private List<Action> extractActions(InvocationContext context, VisitableCommand command) {
if (actionList.isEmpty()) {
return Collections.emptyList();
}
List<Action> actions = new ArrayList<>(actionList.size());
for (Action action : actionList) {
if (action.isApplicable(context, command)) {
actions.add(action);
}
}
return actions;
}
}
private static class NodeController {
ControlledCommandInterceptor interceptor;
BlockingLocalTopologyManager topologyManager;
}
private static class NewNode {
Future<Void> joinerFuture;
CountDownLatch commandLatch = new CountDownLatch(1);
NodeController controller;
}
@AutoProtoSchemaBuilder(
includeClasses = ConsistentHashFactoryImpl.class,
schemaFileName = "test.core.WriteSkewDuringStateTransferTest.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.WriteSkewDuringStateTransferTest",
service = false
)
public interface WriteSkewDuringStateTransferSCI extends SerializationContextInitializer {
WriteSkewDuringStateTransferSCI INSTANCE = new WriteSkewDuringStateTransferSCIImpl();
}
}
| 19,772
| 43.433708
| 145
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StatefulSetRollingUpgradeTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.commons.test.CommonsTestingUtil.tmpDirectory;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createClusteredCacheManager;
import java.io.File;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.Test;
/**
* Start cluster (A,B) redeploy after upgrade. Rolling upgrades always occur in the order B,A and A does not restart
* until B has completed successfully.
*
* @author Ryan Emerson
* @since 11.0
*/
@CleanupAfterMethod
@Test(groups = {"functional", "unstable"}, testName = "statetransfer.StatefulSetRollingUpgradeTest")
public class StatefulSetRollingUpgradeTest extends MultipleCacheManagersTest {
private static final String CACHE_NAME = "testCache";
private static final int NUM_ROLLING_UPGRADES = 4;
private int numNodes;
@Override
public Object[] factory() {
return new Object[]{
new StatefulSetRollingUpgradeTest().setNumNodes(2),
new StatefulSetRollingUpgradeTest().setNumNodes(3),
new StatefulSetRollingUpgradeTest().setNumNodes(4),
new StatefulSetRollingUpgradeTest().setNumNodes(5)
};
}
private StatefulSetRollingUpgradeTest setNumNodes(int numNodes) {
this.numNodes = numNodes;
return this;
}
@Override
protected String[] parameterNames() {
return new String[]{"nodes"};
}
@Override
protected Object[] parameterValues() {
return new Object[]{numNodes};
}
@Override
protected void createCacheManagers() throws Throwable {
Util.recursiveFileRemove(tmpDirectory(this.getClass().getSimpleName()));
for (int id = 0; id < numNodes; id++)
createStatefulCacheManager(id);
waitForClusterToForm(CACHE_NAME);
}
public void testStateTransferRestart() {
for (int i = 0; i < NUM_ROLLING_UPGRADES; i++) {
for (int j = numNodes - 1; j > -1; j--) {
manager(j).stop();
cacheManagers.remove(j);
waitForClusterToForm(CACHE_NAME);
createStatefulCacheManager(j);
waitForClusterToForm(CACHE_NAME);
}
}
}
private void createStatefulCacheManager(int id) {
String stateDirectory = tmpDirectory(this.getClass().getSimpleName() + File.separator + id);
GlobalConfigurationBuilder global = GlobalConfigurationBuilder.defaultClusteredBuilder();
global.globalState().enable().persistentLocation(stateDirectory);
ConfigurationBuilder config = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
config.clustering()
.partitionHandling().whenSplit(PartitionHandling.DENY_READ_WRITES)
.stateTransfer().timeout(5 * numNodes, TimeUnit.SECONDS);
EmbeddedCacheManager manager = createClusteredCacheManager(true, global, null, new TransportFlags().withFD(true));
manager.defineConfiguration(CACHE_NAME, config.build());
cacheManagers.add(id, manager);
}
}
| 3,477
| 34.85567
| 120
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StaleLocksWithCommitDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.MagicKey;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.TransactionCoordinator;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.concurrent.CompletionStages;
import org.testng.annotations.Test;
@Test(testName = "statetransfer.StaleLocksWithCommitDuringStateTransferTest", groups = "functional")
@CleanupAfterMethod
public class StaleLocksWithCommitDuringStateTransferTest extends MultipleCacheManagersTest {
Cache<MagicKey, String> c1, c2;
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(CacheMode.DIST_SYNC)
.remoteTimeout(5000)
.transaction().transactionMode(TransactionMode.TRANSACTIONAL).cacheStopTimeout(100);
createCluster(TestDataSCI.INSTANCE, cb, 2);
c1 = cache(0);
c2 = cache(1);
waitForClusterToForm();
}
public void testRollbackLocalFailure() throws Exception {
doStateTransferInProgressTest(false, true);
}
public void testCommitLocalFailure() throws Exception {
doStateTransferInProgressTest(true, true);
}
public void testRollbackRemoteFailure() throws Exception {
doStateTransferInProgressTest(false, false);
}
public void testCommitRemoteFailure() throws Exception {
doStateTransferInProgressTest(true, false);
}
/**
* Check that the transaction commit/rollback recovers if we receive a StateTransferInProgressException from the remote node
*/
private void doStateTransferInProgressTest(boolean commit, final boolean failOnOriginator) throws Exception {
MagicKey k1 = new MagicKey("k1", c1);
MagicKey k2 = new MagicKey("k2", c2);
tm(c1).begin();
c1.put(k1, "v1");
c1.put(k2, "v2");
// We split the transaction commit in two phases by calling the TransactionCoordinator methods directly
TransactionTable txTable = TestingUtil.extractComponent(c1, TransactionTable.class);
TransactionCoordinator txCoordinator = TestingUtil.extractComponent(c1, TransactionCoordinator.class);
// Execute the prepare on both nodes
LocalTransaction localTx = txTable.getLocalTransaction(tm(c1).getTransaction());
CompletionStages.join(txCoordinator.prepare(localTx));
final CountDownLatch commitLatch = new CountDownLatch(1);
Thread worker = new Thread("RehasherSim,StaleLocksWithCommitDuringStateTransferTest") {
@Override
public void run() {
try {
// Before calling commit we block transactions on one of the nodes to simulate a state transfer
final StateTransferLock blockFirst = TestingUtil.extractComponent(failOnOriginator ? c1 : c2, StateTransferLock.class);
final StateTransferLock blockSecond = TestingUtil.extractComponent(failOnOriginator ? c2 : c1, StateTransferLock.class);
try {
blockFirst.acquireExclusiveTopologyLock();
blockSecond.acquireExclusiveTopologyLock();
commitLatch.countDown();
// should be much larger than the lock acquisition timeout
Thread.sleep(1000);
} finally {
blockSecond.releaseExclusiveTopologyLock();
blockFirst.releaseExclusiveTopologyLock();
}
} catch (Throwable t) {
log.errorf(t, "Error blocking/unblocking transactions");
}
}
};
worker.start();
commitLatch.await(10, TimeUnit.SECONDS);
try {
// finally commit or rollback the transaction
if (commit) {
tm(c1).commit();
} else {
tm(c1).rollback();
}
// make the transaction manager forget about our tx so that we don't get rollback exceptions in the log
tm(c1).suspend();
} finally {
// don't leak threads
worker.join();
}
// test that we don't leak locks
assertEventuallyNotLocked(c1, k1);
assertEventuallyNotLocked(c2, k1);
assertEventuallyNotLocked(c1, k2);
assertEventuallyNotLocked(c2, k2);
}
public void testRollbackSuspectFailure() throws Exception {
doTestSuspect(false);
}
public void testCommitSuspectFailure() throws Exception {
doTestSuspect(true);
}
/**
* Check that the transaction commit/rollback recovers if the remote node dies during the RPC
*/
private void doTestSuspect(boolean commit) throws Exception {
MagicKey k1 = new MagicKey("k1", c1);
MagicKey k2 = new MagicKey("k2", c2);
tm(c1).begin();
c1.put(k1, "v1");
c1.put(k2, "v2");
// We split the transaction commit in two phases by calling the TransactionCoordinator methods directly
TransactionTable txTable = TestingUtil.extractComponent(c1, TransactionTable.class);
TransactionCoordinator txCoordinator = TestingUtil.extractComponent(c1, TransactionCoordinator.class);
// Execute the prepare on both nodes
LocalTransaction localTx = txTable.getLocalTransaction(tm(c1).getTransaction());
CompletionStages.join(txCoordinator.prepare(localTx));
// Delay the commit on the remote node. Can't used blockNewTransactions because we don't want a StateTransferInProgressException
AsyncInterceptorChain c2ic = c2.getAdvancedCache().getAsyncInterceptorChain();
c2ic.addInterceptorBefore(new DelayCommandInterceptor(), StateTransferInterceptor.class);
// Schedule the remote node to stop on another thread since the main thread will be busy with the commit call
Thread worker = new Thread("RehasherSim,StaleLocksWithCommitDuringStateTransferTest") {
@Override
public void run() {
try {
// should be much larger than the lock acquisition timeout
Thread.sleep(1000);
manager(c2).stop();
// stLock.unblockNewTransactions(1000);
} catch (InterruptedException e) {
log.errorf(e, "Error stopping cache");
}
}
};
worker.start();
try {
// finally commit or rollback the transaction
if (commit) {
CompletionStages.join(txCoordinator.commit(localTx, false));
} else {
CompletionStages.join(txCoordinator.rollback(localTx));
}
// make the transaction manager forget about our tx so that we don't get rollback exceptions in the log
tm(c1).suspend();
} finally {
// don't leak threads
worker.join();
}
// test that we don't leak locks
assertEventuallyNotLocked(c1, k1);
assertEventuallyNotLocked(c1, k2);
}
static class DelayCommandInterceptor extends BaseAsyncInterceptor {
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable {
if (command instanceof CommitCommand) {
Thread.sleep(3000);
}
return invokeNext(ctx, command);
}
}
}
| 7,988
| 37.408654
| 135
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ReadAfterLosingOwnershipTest.java
|
package org.infinispan.statetransfer;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.protostream.annotations.ProtoName;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestBlocking;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.util.BaseControlledConsistentHashFactory;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
/**
* Tests the read when a node loses the ownership of a key.
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "statetransfer.ReadAfterLosingOwnershipTest")
@CleanupAfterMethod
public class ReadAfterLosingOwnershipTest extends MultipleCacheManagersTest {
private boolean l1 = false;
@Override
public Object[] factory() {
return new Object[] {
new ReadAfterLosingOwnershipTest().transactional(true),
new ReadAfterLosingOwnershipTest().transactional(false),
new ReadAfterLosingOwnershipTest().l1(true).transactional(true),
new ReadAfterLosingOwnershipTest().l1(true).transactional(false),
};
}
public ReadAfterLosingOwnershipTest l1(boolean l1) {
this.l1 = l1;
return this;
}
@Override
protected String parameters() {
return "[tx=" + transactional + ", l1=" + l1 + "]";
}
public void testOwnershipLostWithPut() throws Exception {
doOwnershipLostTest(Operation.PUT, false);
}
public void testOwnershipLostWithRemove() throws Exception {
doOwnershipLostTest(Operation.REMOVE, false);
}
public void testOwnershipLostWithPutOnOwner() throws Exception {
doOwnershipLostTest(Operation.PUT, true);
}
public void testOwnershipLostWithRemoveOnOwner() throws Exception {
doOwnershipLostTest(Operation.REMOVE, true);
}
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(2, ReadAfterLostOwnershipTestSCI.INSTANCE, createConfigurationBuilder());
}
protected final ConfigurationBuilder createConfigurationBuilder() {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, transactional);
builder.clustering()
.hash().numOwners(2).consistentHashFactory(new SingleKeyConsistentHashFactory()).numSegments(1)
.l1().enabled(l1)
.stateTransfer().fetchInMemoryState(true);
return builder;
}
private void doOwnershipLostTest(Operation operation, boolean onOwner) throws ExecutionException, InterruptedException {
log.debug("Initialize cache");
cache(0).put("key", "value0");
assertCachesKeyValue("key", "value0");
StateConsumerImpl stateConsumer = (StateConsumerImpl) TestingUtil.extractComponent(cache(1), StateConsumer.class);
Listener listener = new Listener();
stateConsumer.setKeyInvalidationListener(listener);
log.debug("Add a 3rd node");
addClusterEnabledCacheManager(ReadAfterLostOwnershipTestSCI.INSTANCE, createConfigurationBuilder());
Future<Void> join = fork(() -> {
waitForClusterToForm();
log.debug("3rd has join");
});
log.debug("Waiting for command to block");
listener.notifier.await();
log.debug("Set a new value");
//we change the value in the old owner if onOwner is false
operation.update(onOwner ? cache(0) : cache(1));
//we check the value in the primary owner and old owner (cache(2) has not started yet)
assertCachesKeyValue("key", operation.finalValue(), cache(0), cache(1));
listener.wait.countDown();
log.debug("Waiting for the 3rd node to join");
join.get();
assertCachesKeyValue("key", operation.finalValue());
}
private void assertCachesKeyValue(Object key, Object value) {
assertCachesKeyValue(key, value, caches());
}
private void assertCachesKeyValue(Object key, Object value, Cache<Object, Object>... caches) {
assertCachesKeyValue(key, value, Arrays.asList(caches));
}
private void assertCachesKeyValue(Object key, Object value, Collection<Cache<Object, Object>> caches) {
for (Cache<Object, Object> cache : caches) {
AssertJUnit.assertEquals("Wrong key value for " + address(cache), value, cache.get(key));
}
}
private enum Operation {
//only PUT and REMOVE is needed because one updates the key (i.e. the value is not null) and the other removes
//it (i.e. the value is null)
PUT,
REMOVE;
public void update(Cache<Object, Object> cache) {
if (this == PUT) {
cache.put("key", "value1");
} else {
cache.remove("key");
}
}
public Object finalValue() {
return this == PUT ? "value1" : null;
}
}
@ProtoName("ReadAfterSingleKeyConsistentHashFactory")
public static class SingleKeyConsistentHashFactory extends BaseControlledConsistentHashFactory.Default {
SingleKeyConsistentHashFactory() {
super(1);
}
protected final int[][] assignOwners(int numSegments, List<Address> members) {
//the owners will be the first member and the last (numberOfOwners - 1)-th members
switch (members.size()) {
case 1:
return new int[][]{{0}};
case 2:
return new int[][]{{0, 1}};
default:
return new int[][]{{0, members.size() - 1}};
}
}
}
public class Listener implements StateConsumerImpl.KeyInvalidationListener {
public final CountDownLatch notifier = new CountDownLatch(1);
final CountDownLatch wait = new CountDownLatch(1);
@Override
public void beforeInvalidation(IntSet removedSegments, IntSet staleL1Segments) {
log.debugf("Before invalidation: removedSegments=%s, staleL1Segments=%s", removedSegments, staleL1Segments);
if (!removedSegments.contains(0)) {
//it only matters when it looses the segment 0 and the key is moved to the new owner
return;
}
notifier.countDown();
try {
TestBlocking.await(wait, 10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
@AutoProtoSchemaBuilder(
includeClasses = SingleKeyConsistentHashFactory.class,
schemaFileName = "test.core.ReadAfterLostOwnershipTest.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.ReadAfterLostOwnershipTest",
service = false
)
interface ReadAfterLostOwnershipTestSCI extends SerializationContextInitializer {
ReadAfterLostOwnershipTestSCI INSTANCE = new ReadAfterLostOwnershipTestSCIImpl();
}
}
| 7,402
| 34.591346
| 123
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/BigObject.java
| 0
| 0
| 0
|
java
|
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/OrphanTransactionsCleanupTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Arrays;
import jakarta.transaction.Transaction;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.ReplicatedControlledConsistentHashFactory;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "statetransfer.OrphanTransactionsCleanupTest")
public class OrphanTransactionsCleanupTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(OrphanTransactionsCleanupTest.class);
protected ConfigurationBuilder configurationBuilder;
public OrphanTransactionsCleanupTest() {
cleanup = CleanupPhase.AFTER_METHOD;
}
@Override
protected void createCacheManagers() throws Throwable {
configurationBuilder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
configurationBuilder.transaction().lockingMode(LockingMode.PESSIMISTIC);
// Make the coordinator the primary owner of the only segment
configurationBuilder.clustering().hash().numSegments(1).consistentHashFactory(new ReplicatedControlledConsistentHashFactory(0));
configurationBuilder.clustering().stateTransfer().awaitInitialTransfer(false);
addClusterEnabledCacheManager(configurationBuilder);
addClusterEnabledCacheManager(configurationBuilder);
waitForClusterToForm();
}
public void testJoinerTransactionSurvives() throws Exception {
Cache<Object, Object> c0 = manager(0).getCache();
Cache<Object, Object> c1 = manager(1).getCache();
final TransactionTable tt0 = TestingUtil.extractComponent(c0, TransactionTable.class);
// Disable rebalancing so that the joiner is not included in the CH
LocalTopologyManager ltm0 = TestingUtil.extractGlobalComponent(manager(0), LocalTopologyManager.class);
ltm0.setRebalancingEnabled(false);
// Add a new node
addClusterEnabledCacheManager(configurationBuilder);
Cache<Object, Object> c2 = manager(2).getCache();
// Start a transaction from c2, but don't commit yet
tm(2).begin();
c2.put("key1", "value1");
assertEquals(1, tt0.getRemoteGlobalTransaction().size());
Transaction tx2 = tm(2).suspend();
// Start another transaction from c1, also without committing it
tm(1).begin();
c1.put("key2", "value2");
assertEquals(2, tt0.getRemoteGlobalTransaction().size());
Transaction tx1 = tm(1).suspend();
// Kill node 1 to trigger the orphan transaction cleanup
manager(1).stop();
TestingUtil.blockUntilViewsReceived(60000, false, c0, c2);
// Cache 2 should not be in the CH yet
TestingUtil.waitForNoRebalance(c0);
assertEquals(Arrays.asList(address(0)), c0.getAdvancedCache().getDistributionManager().getWriteConsistentHash().getMembers());
eventuallyEquals(1, () -> tt0.getRemoteTransactions().size());
// Committing the tx on c2 should succeed
tm(2).resume(tx2);
tm(2).commit();
}
}
| 3,470
| 40.321429
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferRestart2Test.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.jgroups.protocols.DISCARD;
import org.testng.annotations.Test;
/**
* Tests scenario for ISPN-7127
*
* - create nodes A, B - start node C - starts state transfer from B to C
* - abruptly kill B before it is able to reply to the {@link StateTransferStartCommand} from C
* - C resends the request to A
* - finally cluster A, C is formed where all entries are properly backed up on both nodes
*
* @author Michal Linhard
* @author Dan Berindei
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.StateTransferRestart2Test")
@CleanupAfterMethod
public class StateTransferRestart2Test extends MultipleCacheManagersTest {
private ConfigurationBuilder cfgBuilder;
@Override
protected void createCacheManagers() throws Throwable {
cfgBuilder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
cfgBuilder.transaction().transactionManagerLookup(new EmbeddedTransactionManagerLookup());
cfgBuilder.clustering().hash().numOwners(2);
cfgBuilder.clustering().stateTransfer().fetchInMemoryState(true);
cfgBuilder.clustering().stateTransfer().timeout(20000);
GlobalConfigurationBuilder gcb0 = new GlobalConfigurationBuilder().clusteredDefault();
addClusterEnabledCacheManager(gcb0, cfgBuilder, new TransportFlags().withFD(true));
GlobalConfigurationBuilder gcb1 = new GlobalConfigurationBuilder().clusteredDefault();
addClusterEnabledCacheManager(gcb1, cfgBuilder, new TransportFlags().withFD(true));
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
public void testStateTransferRestart() throws Throwable {
final int numKeys = 100;
log.info("waiting for cluster { c0, c1 }");
waitForClusterToForm();
log.info("putting in data");
final Cache<Object, Object> c0 = cache(0);
final Cache<Object, Object> c1 = cache(1);
for (int k = 0; k < numKeys; k++) {
c0.put(k, k);
}
TestingUtil.waitForNoRebalance(c0, c1);
assertEquals(numKeys, c0.entrySet().size());
assertEquals(numKeys, c1.entrySet().size());
DISCARD d1 = TestingUtil.getDiscardForCache(c1.getCacheManager());
GlobalConfigurationBuilder gcb2 = new GlobalConfigurationBuilder();
gcb2.transport().transport(new KillingJGroupsTransport(d1, c1));
log.info("adding cache c2");
addClusterEnabledCacheManager(gcb2, cfgBuilder, new TransportFlags().withFD(true));
log.info("get c2");
final Cache<Object, Object> c2 = cache(2);
log.info("waiting for cluster { c0, c2 }");
TestingUtil.blockUntilViewsChanged(10000, 2, c0, c2);
log.infof("c0 entrySet size before : %d", c0.entrySet().size());
log.infof("c2 entrySet size before : %d", c2.entrySet().size());
eventuallyEquals(numKeys, () -> c0.entrySet().size());
eventuallyEquals(numKeys, () -> c2.entrySet().size());
log.info("Ending the test");
}
class KillingJGroupsTransport extends JGroupsTransport {
private final DISCARD d1;
private final Cache<Object, Object> c1;
public KillingJGroupsTransport(DISCARD d1, Cache<Object, Object> c1) {
this.d1 = d1;
this.c1 = c1;
}
@Override
public <T> CompletionStage<T> invokeCommand(Address target, ReplicableCommand command,
ResponseCollector<T> collector, DeliverOrder deliverOrder,
long timeout, TimeUnit unit) {
if (command instanceof StateTransferStartCommand && target.equals(address(1))) {
d1.discardAll(true);
fork((Callable<Void>) () -> {
log.info("KILLING the c1 cache");
TestingUtil.killCacheManagers(manager(c1));
return null;
});
}
return super.invokeCommand(target, command, collector, deliverOrder, timeout, unit);
}
}
}
| 5,210
| 39.084615
| 108
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateProviderTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.context.Flag.STATE_TRANSFER_PROGRESS;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.stream.Collectors;
import org.infinispan.Cache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.SmallIntSet;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.TestAddress;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.distribution.ch.impl.HashFunctionPartitioner;
import org.infinispan.notifications.cachelistener.cluster.ClusterCacheNotifier;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.reactive.publisher.impl.LocalPublisherManager;
import org.infinispan.reactive.publisher.impl.Notifications;
import org.infinispan.reactive.publisher.impl.SegmentAwarePublisherSupplier;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.PersistentUUID;
import org.infinispan.topology.PersistentUUIDManager;
import org.infinispan.topology.PersistentUUIDManagerImpl;
import org.infinispan.transaction.impl.TransactionOriginatorChecker;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.ByteString;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import io.reactivex.rxjava3.core.Flowable;
/**
* Test for StateProviderImpl.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.StateProviderTest")
public class StateProviderTest {
private static final Log log = LogFactory.getLog(StateProviderTest.class);
// Number of segments must be a power of 2 for keyPartition4Segments to work
private static final int NUM_SEGMENTS = 4;
private static final TestAddress A = new TestAddress(0, "A");
private static final TestAddress B = new TestAddress(1, "B");
private static final TestAddress C = new TestAddress(2, "C");
private static final TestAddress D = new TestAddress(3, "D");
private static final TestAddress E = new TestAddress(4, "E");
private static final TestAddress F = new TestAddress(5, "F");
private static final TestAddress G = new TestAddress(6, "G");
private static final PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
static {
Arrays.asList(A, B, C, D, E, F, G).forEach(address -> persistentUUIDManager.addPersistentAddressMapping(address, PersistentUUID.randomUUID()));
}
private Configuration configuration;
private Cache cache;
private RpcManager rpcManager;
private CommandsFactory commandsFactory;
private ClusterCacheNotifier cacheNotifier;
private PersistenceManager persistenceManager;
private InternalDataContainer dataContainer;
private TransactionTable transactionTable;
private StateTransferLock stateTransferLock;
private DistributionManager distributionManager;
private LocalizedCacheTopology cacheTopology;
private InternalEntryFactory ef;
private LocalPublisherManager<?, ?> lpm;
@BeforeMethod
public void setUp() {
// create cache configuration
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().invocationBatching().enable()
.clustering().cacheMode(CacheMode.DIST_SYNC)
.clustering().stateTransfer().timeout(30000)
.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis())
.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
configuration = cb.build();
cache = mock(Cache.class);
when(cache.getName()).thenReturn("testCache");
rpcManager = mock(RpcManager.class);
commandsFactory = mock(CommandsFactory.class);
cacheNotifier = mock(ClusterCacheNotifier.class);
persistenceManager = mock(PersistenceManager.class);
dataContainer = mock(InternalDataContainer.class);
transactionTable = mock(TransactionTable.class);
stateTransferLock = mock(StateTransferLock.class);
distributionManager = mock(DistributionManager.class);
ef = mock(InternalEntryFactory.class);
lpm = mock(LocalPublisherManager.class);
when(distributionManager.getCacheTopology()).thenAnswer(invocation -> cacheTopology);
}
public void test1() {
// create list of 6 members
List<Address> members1 = Arrays.asList(A, B, C, D, E, F);
List<Address> members2 = new ArrayList<>(members1);
members2.remove(A);
members2.remove(F);
members2.add(G);
// create CHes
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(StateProviderTest.NUM_SEGMENTS);
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
DefaultConsistentHash ch1 = chf.create(2, StateProviderTest.NUM_SEGMENTS, members1, null);
DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
// create dependencies
when(rpcManager.getAddress()).thenReturn(A);
when(rpcManager.invokeCommand(any(Address.class), any(), any(), any())).thenReturn(new CompletableFuture<>());
// create state provider
StateProviderImpl stateProvider = new StateProviderImpl();
TestingUtil.inject(stateProvider, configuration, rpcManager, commandsFactory, cacheNotifier, persistenceManager,
dataContainer, transactionTable, stateTransferLock, distributionManager, ef, lpm, keyPartitioner,
TransactionOriginatorChecker.LOCAL);
stateProvider.start();
final List<InternalCacheEntry> cacheEntries = new ArrayList<>();
Object key1 = new TestKey("key1", 0, keyPartitioner);
Object key2 = new TestKey("key2", 0, keyPartitioner);
cacheEntries.add(new ImmortalCacheEntry(key1, "value1"));
cacheEntries.add(new ImmortalCacheEntry(key2, "value2"));
when(dataContainer.iterator()).thenAnswer(invocation -> cacheEntries.iterator());
when(transactionTable.getLocalTransactions()).thenReturn(Collections.emptyList());
when(transactionTable.getRemoteTransactions()).thenReturn(Collections.emptyList());
CacheTopology simpleTopology = new CacheTopology(1, 1, ch1, ch1, ch1,
CacheTopology.Phase.READ_OLD_WRITE_ALL, ch1.getMembers(),
persistentUUIDManager.mapAddresses(ch1.getMembers()));
this.cacheTopology = new LocalizedCacheTopology(CacheMode.DIST_SYNC, simpleTopology, keyPartitioner, A, true);
stateProvider.onTopologyUpdate(this.cacheTopology, false);
log.debug("ch1: " + ch1);
IntSet segmentsToRequest = IntSets.from(ch1.getSegmentsForOwner(members1.get(0)));
CompletionStage<List<TransactionInfo>> transactionsStage =
stateProvider.getTransactionsForSegments(members1.get(0), 1, segmentsToRequest);
List<TransactionInfo> transactions = CompletionStages.join(transactionsStage);
assertEquals(0, transactions.size());
CompletionStage<List<TransactionInfo>> transactionsStage2 =
stateProvider.getTransactionsForSegments(members1.get(0), 1,
SmallIntSet.of(2, StateProviderTest.NUM_SEGMENTS));
Exceptions.expectExecutionException(IllegalArgumentException.class, transactionsStage2.toCompletableFuture());
verifyNoMoreInteractions(stateTransferLock);
when(dataContainer.iterator(any())).thenReturn(cacheEntries.iterator());
when(persistenceManager.publishEntries(any(IntSet.class), any(), anyBoolean(), anyBoolean(), any()))
.thenReturn(Flowable.empty());
SegmentAwarePublisherSupplier<?> supplier = mock(SegmentAwarePublisherSupplier.class);
when(lpm.entryPublisher(any(), any(), any(), eq(
EnumUtil.bitSetOf(STATE_TRANSFER_PROGRESS)), any(), any()))
.thenAnswer(i -> supplier);
List<SegmentAwarePublisherSupplier.NotificationWithLost<?>> values = cacheEntries.stream()
.map(ice -> Notifications.value(ice, 0))
.collect(Collectors.toList());
values.add(Notifications.segmentComplete(0));
when(supplier.publisherWithSegments())
.thenAnswer(i -> Flowable.fromIterable(values));
stateProvider.startOutboundTransfer(F, 1, IntSets.immutableSet(0), true);
assertTrue(stateProvider.isStateTransferInProgress());
log.debug("ch2: " + ch2);
simpleTopology = new CacheTopology(2, 1, ch2, ch2, ch2, CacheTopology.Phase.READ_OLD_WRITE_ALL,
ch2.getMembers(),
persistentUUIDManager.mapAddresses(ch2.getMembers()));
this.cacheTopology = new LocalizedCacheTopology(CacheMode.DIST_SYNC, simpleTopology, keyPartitioner, A, true);
stateProvider.onTopologyUpdate(this.cacheTopology, true);
assertFalse(stateProvider.isStateTransferInProgress());
stateProvider.startOutboundTransfer(D, 1, IntSets.immutableSet(0), true);
assertTrue(stateProvider.isStateTransferInProgress());
stateProvider.stop();
assertFalse(stateProvider.isStateTransferInProgress());
}
public void test2() {
// create list of 6 members
List<Address> members1 = Arrays.asList(A, B, C, D, E, F);
List<Address> members2 = new ArrayList<>(members1);
members2.remove(A);
members2.remove(F);
members2.add(G);
// create CHes
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(StateProviderTest.NUM_SEGMENTS);
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
DefaultConsistentHash ch1 = chf.create(2, NUM_SEGMENTS, members1, null);
//todo [anistor] it seems that address 6 is not used for un-owned segments
DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
// set up dependencies
when(commandsFactory.buildStateResponseCommand(anyInt(), any(), anyBoolean()))
.thenAnswer(invocation -> new StateResponseCommand(ByteString.fromString("testCache"),
(Integer) invocation.getArguments()[0],
(Collection<StateChunk>) invocation.getArguments()[1],
true));
when(rpcManager.getAddress()).thenReturn(A);
when(rpcManager.invokeCommand(any(Address.class), any(), any(), any())).thenReturn(new CompletableFuture<>());
// create state provider
StateProviderImpl stateProvider = new StateProviderImpl();
TestingUtil.inject(stateProvider, configuration, rpcManager, commandsFactory, cacheNotifier, persistenceManager,
dataContainer, transactionTable, stateTransferLock, distributionManager, ef, lpm, keyPartitioner,
TransactionOriginatorChecker.LOCAL);
stateProvider.start();
final List<InternalCacheEntry> cacheEntries = new ArrayList<>();
Object key1 = new TestKey("key1", 0, keyPartitioner);
Object key2 = new TestKey("key2", 0, keyPartitioner);
Object key3 = new TestKey("key3", 1, keyPartitioner);
Object key4 = new TestKey("key4", 1, keyPartitioner);
cacheEntries.add(new ImmortalCacheEntry(key1, "value1"));
cacheEntries.add(new ImmortalCacheEntry(key2, "value2"));
cacheEntries.add(new ImmortalCacheEntry(key3, "value3"));
cacheEntries.add(new ImmortalCacheEntry(key4, "value4"));
when(dataContainer.iterator(any())).thenReturn(cacheEntries.iterator());
when(persistenceManager.publishEntries(any(IntSet.class), any(), anyBoolean(), anyBoolean(), any()))
.thenReturn(Flowable.empty());
when(transactionTable.getLocalTransactions()).thenReturn(Collections.emptyList());
when(transactionTable.getRemoteTransactions()).thenReturn(Collections.emptyList());
CacheTopology simpleTopology = new CacheTopology(1, 1, ch1, ch1, ch1, CacheTopology.Phase.READ_OLD_WRITE_ALL,
ch1.getMembers(),
persistentUUIDManager.mapAddresses(ch1.getMembers()));
this.cacheTopology = new LocalizedCacheTopology(CacheMode.DIST_SYNC, simpleTopology, keyPartitioner, A, true);
stateProvider.onTopologyUpdate(this.cacheTopology, false);
log.debug("ch1: " + ch1);
IntSet segmentsToRequest = IntSets.from(ch1.getSegmentsForOwner(members1.get(0)));
CompletionStage<List<TransactionInfo>> transactionsStage =
stateProvider.getTransactionsForSegments(members1.get(0), 1, segmentsToRequest);
List<TransactionInfo> transactions = CompletionStages.join(transactionsStage);
assertEquals(0, transactions.size());
CompletionStage<List<TransactionInfo>> transactionsStage2 =
stateProvider.getTransactionsForSegments(members1.get(0), 1,
SmallIntSet.of(2, StateProviderTest.NUM_SEGMENTS));
Exceptions.expectExecutionException(IllegalArgumentException.class, transactionsStage2.toCompletableFuture());
verifyNoMoreInteractions(stateTransferLock);
SegmentAwarePublisherSupplier<?> supplier = mock(SegmentAwarePublisherSupplier.class);
when(lpm.entryPublisher(any(), any(), any(),
eq(EnumUtil.bitSetOf(STATE_TRANSFER_PROGRESS)), any(), any()))
.thenAnswer(i -> supplier);
List<SegmentAwarePublisherSupplier.NotificationWithLost<?>> values = cacheEntries.stream()
.map(ice -> Notifications.value(ice, 0))
.collect(Collectors.toList());
values.add(Notifications.segmentComplete(0));
when(supplier.publisherWithSegments())
.thenAnswer(i -> Flowable.fromIterable(values));
stateProvider.startOutboundTransfer(F, 1, IntSets.immutableSet(0), true);
assertTrue(stateProvider.isStateTransferInProgress());
// TestingUtil.sleepThread(15000);
log.debug("ch2: " + ch2);
simpleTopology = new CacheTopology(2, 1, ch2, ch2, ch2, CacheTopology.Phase.READ_OLD_WRITE_ALL,
ch2.getMembers(),
persistentUUIDManager.mapAddresses(ch2.getMembers()));
this.cacheTopology = new LocalizedCacheTopology(CacheMode.DIST_SYNC, simpleTopology, keyPartitioner, A, true);
stateProvider.onTopologyUpdate(this.cacheTopology, false);
assertFalse(stateProvider.isStateTransferInProgress());
stateProvider.startOutboundTransfer(E, 1, IntSets.immutableSet(0), true);
assertTrue(stateProvider.isStateTransferInProgress());
stateProvider.stop();
assertFalse(stateProvider.isStateTransferInProgress());
}
}
| 16,714
| 49.805471
| 149
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/TxDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static java.lang.String.valueOf;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import jakarta.transaction.Status;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.distribution.MagicKey;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedTransaction;
import org.infinispan.transaction.tm.EmbeddedTransactionManager;
import org.testng.annotations.Test;
/**
* Checks if the transactions are forward correctly to the new owners
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "statetransfer.TxDuringStateTransferTest")
@CleanupAfterMethod
public class TxDuringStateTransferTest extends MultipleCacheManagersTest {
private static final String INITIAL_VALUE = "v1";
private static final String FINAL_VALUE = "v2";
public void testPut() throws Exception {
performTest(Operation.PUT);
}
public void testRemove() throws Exception {
performTest(Operation.REMOVE);
}
public void testReplace() throws Exception {
performTest(Operation.REPLACE);
}
public void testConditionalPut() throws Exception {
performTest(Operation.CONDITIONAL_PUT);
}
public void testConditionalRemove() throws Exception {
performTest(Operation.CONDITIONAL_REMOVE);
}
public void testConditionalReplace() throws Exception {
performTest(Operation.CONDITIONAL_REPLACE);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
builder.transaction()
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.useSynchronization(false)
.recovery().disable();
builder.clustering()
.stateTransfer().fetchInMemoryState(true)
.hash().numOwners(3);
createClusteredCaches(4, TestDataSCI.INSTANCE, builder);
}
private void performTest(Operation operation) throws Exception {
assertClusterSize("Wrong number of caches.", 4);
final Object key = new MagicKey(cache(0), cache(1), cache(2));
//init
operation.init(cache(0), key);
final EmbeddedTransactionManager transactionManager = (EmbeddedTransactionManager) tm(0);
transactionManager.begin();
operation.perform(cache(0), key);
final EmbeddedTransaction transaction = transactionManager.getTransaction();
transaction.runPrepare();
assertEquals("Wrong transaction status before killing backup owner.",
Status.STATUS_PREPARED, transaction.getStatus());
//now, we kill cache(1). the transaction is prepared in cache(1) and it should be forward to cache(3)
killMember(1);
assertEquals("Wrong transaction status after killing backup owner.",
Status.STATUS_PREPARED, transaction.getStatus());
transaction.runCommit(false);
for (Cache<Object, Object> cache : caches()) {
//all the caches are owner
operation.check(cache, key, valueOf(address(cache)));
}
}
private enum Operation {
PUT,
REMOVE,
REPLACE,
CONDITIONAL_PUT,
CONDITIONAL_REMOVE,
CONDITIONAL_REPLACE;
public final void init(Cache<Object, Object> cache, Object key) {
if (this != CONDITIONAL_PUT) {
cache.put(key, INITIAL_VALUE);
}
}
public final void perform(Cache<Object, Object> cache, Object key) {
switch (this) {
case PUT:
cache.put(key, FINAL_VALUE);
break;
case REMOVE:
cache.remove(key);
break;
case REPLACE:
cache.replace(key, FINAL_VALUE);
break;
case CONDITIONAL_PUT:
cache.putIfAbsent(key, FINAL_VALUE);
break;
case CONDITIONAL_REMOVE:
cache.remove(key, INITIAL_VALUE);
break;
case CONDITIONAL_REPLACE:
cache.replace(key, INITIAL_VALUE, FINAL_VALUE);
break;
}
}
public final void check(Cache<Object, Object> cache, Object key, String cacheAddress) {
//all the caches are owner. So, check in data container.
DataContainer dataContainer = cache.getAdvancedCache().getDataContainer();
if (this == REMOVE || this == CONDITIONAL_REMOVE) {
assertFalse("Key was not removed in '" + cacheAddress + "'!", dataContainer.containsKey(key));
} else {
InternalCacheEntry entry = dataContainer.get(key);
assertNotNull("Cache '" + cacheAddress + "' does not contains entry!", entry);
assertEquals("Cache '" + cacheAddress + "' has wrong value!", FINAL_VALUE, entry.getValue());
}
}
}
}
| 5,393
| 34.721854
| 107
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ReplStateTransferOnJoinConsistencyTest.java
|
package org.infinispan.statetransfer;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* Test for ISPN-2362 and ISPN-2502 in replicated mode. Uses a cluster which initially has 2 nodes
* and then a third is added to test consistency of state transfer.
* Tests several operations both in an optimistic tx cluster (with write-skew check enabled) and in a pessimistic tx one.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.ReplStateTransferOnJoinConsistencyTest")
@CleanupAfterMethod
public class ReplStateTransferOnJoinConsistencyTest extends DistStateTransferOnJoinConsistencyTest {
@Override
protected ConfigurationBuilder createConfigurationBuilder(boolean isOptimistic) {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true, true);
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup());
if (isOptimistic) {
builder.transaction().lockingMode(LockingMode.OPTIMISTIC)
.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
} else {
builder.transaction().lockingMode(LockingMode.PESSIMISTIC);
}
builder.clustering().l1().disable().locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
builder.clustering().hash().numSegments(10)
.stateTransfer().fetchInMemoryState(true).awaitInitialTransfer(false);
return builder;
}
}
| 1,960
| 43.568182
| 121
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ConcurrentStartForkChannelTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.TestingUtil.blockUntilViewsReceived;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import java.io.ByteArrayInputStream;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.JGroupsConfigBuilder;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.jgroups.BytesMessage;
import org.jgroups.JChannel;
import org.jgroups.Message;
import org.jgroups.blocks.RequestCorrelator;
import org.jgroups.conf.ClassConfigurator;
import org.jgroups.fork.ForkChannel;
import org.jgroups.fork.UnknownForkHandler;
import org.jgroups.protocols.FORK;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* Tests concurrent startup of caches using ForkChannels.
*
* @author Dan Berindei
* @since 9.0
*/
@Test(testName = "statetransfer.ConcurrentStartForkChannelTest", groups = "functional")
@CleanupAfterMethod
public class ConcurrentStartForkChannelTest extends MultipleCacheManagersTest {
public static final byte[] FORK_NOT_FOUND_BUFFER = Util.EMPTY_BYTE_ARRAY;
public static final String CACHE_NAME = "repl";
@Override
protected void createCacheManagers() throws Throwable {
// The test method will create the cache managers
}
@DataProvider(name = "startOrder")
public Object[][] startOrder() {
return new Object[][]{{0, 1}, {1, 0}};
}
@Test(timeOut = 30000, dataProvider = "startOrder")
public void testConcurrentStart(int eagerManager, int lazyManager) throws Exception {
TestResourceTracker.testThreadStarted(this.getTestName());
ConfigurationBuilder replCfg = new ConfigurationBuilder();
replCfg.clustering().cacheMode(CacheMode.REPL_SYNC).stateTransfer().timeout(30, TimeUnit.SECONDS);
String name1 = TestResourceTracker.getNextNodeName();
String name2 = TestResourceTracker.getNextNodeName();
// Create and connect both channels beforehand
JChannel ch1 = createChannel(name1);
JChannel ch2 = createChannel(name2);
// Create the cache managers, but do not start them yet
EmbeddedCacheManager cm1 = createCacheManager(replCfg, name1, ch1);
EmbeddedCacheManager cm2 = createCacheManager(replCfg, name2, ch2);
cm1.defineConfiguration(CACHE_NAME, replCfg.build());
cm2.defineConfiguration(CACHE_NAME, replCfg.build());
try {
log.debugf("Cache managers created. Starting the caches");
// When the coordinator starts first, it's ok to just start the caches in sequence.
// When the coordinator starts last, however, the other node is not able to start before the
// coordinator has the ClusterTopologyManager running.
Future<Cache<String, String>> c1rFuture = fork(() -> manager(eagerManager).getCache(CACHE_NAME));
Thread.sleep(1000);
Cache<String, String> c2r = manager(lazyManager).getCache(CACHE_NAME);
Cache<String, String> c1r = c1rFuture.get(10, TimeUnit.SECONDS);
blockUntilViewsReceived(10000, cm1, cm2);
waitForNoRebalance(c1r, c2r);
} finally {
// Stopping the cache managers isn't enough, because it will only close the ForkChannels
cm1.stop();
ch1.close();
cm2.stop();
ch2.close();
}
}
private EmbeddedCacheManager createCacheManager(ConfigurationBuilder cacheCfg, String name,
JChannel channel) throws Exception {
FORK fork = new FORK();
fork.setUnknownForkHandler(new UnknownForkHandler() {
@Override
public Object handleUnknownForkStack(Message message, String forkStackId) {
return handle(message);
}
@Override
public Object handleUnknownForkChannel(Message message, String forkChannelId) {
return handle(message);
}
private Object handle(Message message) {
short id = ClassConfigurator.getProtocolId(RequestCorrelator.class);
RequestCorrelator.Header header = message.getHeader(id);
if (header != null) {
log.debugf("Sending CacheNotFoundResponse reply for %s", header);
short flags = JGroupsTransport.REPLY_FLAGS;
Message response = new BytesMessage(message.getSrc()).setFlag(flags, false);
response.putHeader(FORK.ID, message.getHeader(FORK.ID));
response.putHeader(id,
new RequestCorrelator.Header(RequestCorrelator.Header.RSP, header.req_id, id));
response.setArray(FORK_NOT_FOUND_BUFFER);
fork.down(response);
}
return null;
}
});
channel.getProtocolStack().addProtocol(fork);
ForkChannel fch = new ForkChannel(channel, "stack1", "channel1");
GlobalConfigurationBuilder gcb = new GlobalConfigurationBuilder();
gcb.transport().transport(new JGroupsTransport(fch));
gcb.transport().nodeName(channel.getName());
gcb.transport().distributedSyncTimeout(30, TimeUnit.SECONDS);
EmbeddedCacheManager cm = TestCacheManagerFactory.newDefaultCacheManager(false, gcb, cacheCfg);
registerCacheManager(cm);
return cm;
}
private JChannel createChannel(String name) throws Exception {
String configString = JGroupsConfigBuilder.getJGroupsConfig(ConcurrentStartForkChannelTest.class.getName(),
new TransportFlags());
JChannel channel = new JChannel(new ByteArrayInputStream(configString.getBytes()));
channel.setName(name);
channel.connect(ConcurrentStartForkChannelTest.class.getSimpleName());
log.tracef("Channel %s connected: %s", channel, channel.getViewAsString());
return channel;
}
}
| 6,477
| 40.793548
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ForkChannelRestartTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.TestingUtil.blockUntilViewsReceived;
import static org.infinispan.test.TestingUtil.getDiscardForCache;
import static org.infinispan.test.TestingUtil.installNewView;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import java.io.ByteArrayInputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.infinispan.Cache;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.JGroupsConfigBuilder;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.jgroups.BytesMessage;
import org.jgroups.JChannel;
import org.jgroups.Message;
import org.jgroups.blocks.RequestCorrelator;
import org.jgroups.conf.ClassConfigurator;
import org.jgroups.fork.ForkChannel;
import org.jgroups.fork.UnknownForkHandler;
import org.jgroups.protocols.FORK;
import org.testng.annotations.Test;
/**
* Tests restart of nodes using ForkChannels.
*
* @author Dan Berindei
* @since 10.0
*/
@Test(testName = "statetransfer.ForkChannelRestartTest", groups = "functional")
@CleanupAfterMethod
public class ForkChannelRestartTest extends MultipleCacheManagersTest {
private static final byte[] FORK_NOT_FOUND_BUFFER = Util.EMPTY_BYTE_ARRAY;
private static final String CACHE_NAME = "repl";
private static final int CLUSTER_SIZE = 3;
@Override
protected void createCacheManagers() throws Throwable {
// The test method will create the cache managers
}
public void testRestart() throws Exception {
TestResourceTracker.testThreadStarted(this.getTestName());
ConfigurationBuilder replCfg = new ConfigurationBuilder();
replCfg.clustering().cacheMode(CacheMode.REPL_SYNC).stateTransfer().timeout(30, TimeUnit.SECONDS);
replCfg.clustering().partitionHandling().whenSplit(PartitionHandling.DENY_READ_WRITES);
String[] names = new String[CLUSTER_SIZE + 1];
JChannel[] channels = new JChannel[CLUSTER_SIZE + 1];
EmbeddedCacheManager[] managers = new EmbeddedCacheManager[CLUSTER_SIZE + 1];
for (int i = 0; i < CLUSTER_SIZE; i++) {
configureManager(replCfg, names, channels, managers, i);
}
for (int i = 0; i < CLUSTER_SIZE; i++) {
managers[i].getCache(CACHE_NAME);
}
log.debugf("Cache managers created. Crashing manager %s but keeping the channel in the view", names[1]);
getDiscardForCache(managers[1]).discardAll(true);
installNewView(managers[1]);
managers[1].stop();
configureManager(replCfg, names, channels, managers, CLUSTER_SIZE);
Future<Cache<Object, Object>> future = fork(() -> managers[CLUSTER_SIZE].getCache(CACHE_NAME));
Thread.sleep(1000);
log.debugf("Stopping channel %s", names[1]);
channels[1].close();
List<EmbeddedCacheManager> liveManagers = new ArrayList<>(Arrays.asList(managers));
liveManagers.remove(1);
blockUntilViewsReceived(10000, false, liveManagers);
waitForNoRebalance(liveManagers.stream().map(cm -> cm.getCache(CACHE_NAME)).collect(Collectors.toList()));
log.debug("Rebalance finished successfully");
future.get(10, TimeUnit.SECONDS);
}
private void configureManager(ConfigurationBuilder replCfg, String[] names,
JChannel[] channels,
EmbeddedCacheManager[] managers, int i) throws Exception {
// Create the ForkChannels
names[i] = TestResourceTracker.getNextNodeName();
channels[i] = createChannel(names[i]);
// Then start the managers
managers[i] = createCacheManager(replCfg, names[i], channels[i]);
managers[i].defineConfiguration(CACHE_NAME, replCfg.build());
}
private EmbeddedCacheManager createCacheManager(ConfigurationBuilder cacheCfg, String name,
JChannel channel) throws Exception {
ForkChannel fch = new ForkChannel(channel, "stack1", "channel1");
GlobalConfigurationBuilder gcb = new GlobalConfigurationBuilder();
gcb.transport().nodeName(name);
gcb.transport().transport(new JGroupsTransport(fch));
gcb.transport().distributedSyncTimeout(40, TimeUnit.SECONDS);
EmbeddedCacheManager cm = TestCacheManagerFactory.newDefaultCacheManager(false, gcb, cacheCfg);
registerCacheManager(cm);
return cm;
}
private JChannel createChannel(String name) throws Exception {
String configString = JGroupsConfigBuilder.getJGroupsConfig(ForkChannelRestartTest.class.getName(),
new TransportFlags().withFD(true));
JChannel channel = new JChannel(new ByteArrayInputStream(configString.getBytes()));
TestResourceTracker.addResource(new TestResourceTracker.Cleaner<JChannel>(channel) {
@Override
public void close() {
ref.close();
}
});
channel.setName(name);
FORK fork = new FORK();
fork.setUnknownForkHandler(new UnknownForkHandler() {
@Override
public Object handleUnknownForkStack(Message message, String forkStackId) {
return handle(message);
}
@Override
public Object handleUnknownForkChannel(Message message, String forkChannelId) {
return handle(message);
}
private Object handle(Message message) {
short id = ClassConfigurator.getProtocolId(RequestCorrelator.class);
RequestCorrelator.Header requestHeader = message.getHeader(id);
if (requestHeader != null) {
log.debugf("Sending CacheNotFoundResponse reply from %s for %s", name, requestHeader);
short flags = JGroupsTransport.REPLY_FLAGS;
Message response = new BytesMessage(message.getSrc()).setFlag(flags, false);
FORK.ForkHeader forkHeader = message.getHeader(FORK.ID);
response.putHeader(FORK.ID, forkHeader);
response.putHeader(id, new RequestCorrelator.Header(RequestCorrelator.Header.RSP, requestHeader.req_id, id));
response.setArray(FORK_NOT_FOUND_BUFFER);
fork.down(response);
}
return null;
}
});
channel.getProtocolStack().addProtocol(fork);
channel.connect("FORKISPN");
log.tracef("Channel %s connected: %s", channel, channel.getViewAsString());
return channel;
}
}
| 7,168
| 41.170588
| 124
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferTestingUtil.java
|
package org.infinispan.statetransfer;
import org.infinispan.Cache;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.test.TestingUtil;
/**
* StateTransferTestingUtil.
*
* @author Galder Zamarreño
* @since 4.0
*/
public class StateTransferTestingUtil {
public static final String A_B_NAME = "a_b_name";
public static final String A_C_NAME = "a_c_name";
public static final String A_D_NAME = "a_d_age";
public static final String A_B_AGE = "a_b_age";
public static final String A_C_AGE = "a_c_age";
public static final String A_D_AGE = "a_d_age";
public static final String JOE = "JOE";
public static final String BOB = "BOB";
public static final String JANE = "JANE";
public static final Integer TWENTY = 20;
public static final Integer FORTY = 40;
public static void verifyNoDataOnLoader(Cache<Object, Object> c) throws Exception {
DummyInMemoryStore l = TestingUtil.getFirstStore(c);
assert !l.contains(A_B_AGE);
assert !l.contains(A_B_NAME);
assert !l.contains(A_C_AGE);
assert !l.contains(A_C_NAME);
assert !l.contains(A_D_AGE);
assert !l.contains(A_D_NAME);
}
public static void verifyNoData(Cache<Object, Object> c) {
assert c.isEmpty() : "Cache should be empty!";
}
public static void writeInitialData(final Cache<Object, Object> c) {
c.put(A_B_NAME, JOE);
c.put(A_B_AGE, TWENTY);
c.put(A_C_NAME, BOB);
c.put(A_C_AGE, FORTY);
c.evict(A_B_NAME);
c.evict(A_B_AGE);
c.evict(A_C_NAME);
c.evict(A_C_AGE);
c.evict(A_D_NAME);
c.evict(A_D_AGE);
}
public static void verifyInitialDataOnLoader(Cache<Object, Object> c) throws Exception {
DummyInMemoryStore l = TestingUtil.getFirstStore(c);
assert l.contains(A_B_AGE);
assert l.contains(A_B_NAME);
assert l.contains(A_C_AGE);
assert l.contains(A_C_NAME);
assert l.loadEntry(A_B_AGE).getValue().equals(TWENTY);
assert l.loadEntry(A_B_NAME).getValue().equals(JOE);
assert l.loadEntry(A_C_AGE).getValue().equals(FORTY);
assert l.loadEntry(A_C_NAME).getValue().equals(BOB);
}
public static void verifyInitialData(Cache<Object, Object> c) {
assert JOE.equals(c.get(A_B_NAME)) : "Incorrect value for key " + A_B_NAME;
assert TWENTY.equals(c.get(A_B_AGE)) : "Incorrect value for key " + A_B_AGE;
assert BOB.equals(c.get(A_C_NAME)) : "Incorrect value for key " + A_C_NAME;
assert FORTY.equals(c.get(A_C_AGE)) : "Incorrect value for key " + A_C_AGE;
}
}
| 2,584
| 34.902778
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ConcurrentStartTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.blockUntilViewsReceived;
import static org.infinispan.test.TestingUtil.extractGlobalComponent;
import static org.infinispan.test.TestingUtil.extractGlobalComponentRegistry;
import static org.infinispan.test.TestingUtil.replaceComponent;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.topology.CacheJoinCommand;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.InboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.XSiteReplicateCommand;
import org.testng.annotations.Test;
/**
* Tests concurrent startup of replicated and distributed caches
*
* @author Dan Berindei
* @since 7.2
*/
@Test(testName = "statetransfer.ConcurrentStartTest", groups = "functional")
public class ConcurrentStartTest extends MultipleCacheManagersTest {
public static final String REPL_CACHE_NAME = "repl";
public static final String DIST_CACHE_NAME = "dist";
@Override
protected void createCacheManagers() throws Throwable {
// The test method will create the cache managers
}
@Test(timeOut = 60000)
public void testConcurrentStart() throws Exception {
TestResourceTracker.testThreadStarted(this.getTestName());
final CheckPoint checkPoint = new CheckPoint();
EmbeddedCacheManager cm1 = createCacheManager();
EmbeddedCacheManager cm2 = createCacheManager();
// Install the blocking invocation handlers
assertEquals(ComponentStatus.INSTANTIATED, extractGlobalComponentRegistry(cm1).getStatus());
replaceInboundInvocationHandler(cm1, checkPoint, 0);
assertEquals(ComponentStatus.INSTANTIATED, extractGlobalComponentRegistry(cm2).getStatus());
replaceInboundInvocationHandler(cm2, checkPoint, 1);
log.debugf("Cache managers created. Starting the caches");
Future<Object> repl1Future = fork(new CacheStartCallable(cm1, REPL_CACHE_NAME));
Future<Object> repl2Future = fork(new CacheStartCallable(cm2, REPL_CACHE_NAME));
Future<Object> dist1Future = fork(new CacheStartCallable(cm1, DIST_CACHE_NAME));
Future<Object> dist2Future = fork(new CacheStartCallable(cm2, DIST_CACHE_NAME));
// The joiner always sends a POLICY_GET_STATUS command to the coordinator.
// The coordinator may or may not send a GET_STATUS command to the other node,
// depending on whether the second node joined the cluster quickly enough.
// Wait for at least one of the POLICY_GET_STATUS/GET_STATUS commands to block
checkPoint.peek(2, SECONDS, "blocked_0", "blocked_1");
// Now allow both to continue.
checkPoint.trigger("unblocked_0", CheckPoint.INFINITE);
checkPoint.trigger("unblocked_1", CheckPoint.INFINITE);
repl1Future.get(10, SECONDS);
repl2Future.get(10, SECONDS);
dist1Future.get(10, SECONDS);
dist2Future.get(10, SECONDS);
Cache<String, String> c1r = cm1.getCache(REPL_CACHE_NAME);
Cache<String, String> c1d = cm1.getCache(DIST_CACHE_NAME);
Cache<String, String> c2r = cm2.getCache(REPL_CACHE_NAME);
Cache<String, String> c2d = cm2.getCache(DIST_CACHE_NAME);
blockUntilViewsReceived(10000, cm1, cm2);
waitForNoRebalance(c1r, c2r);
waitForNoRebalance(c1d, c2d);
c1r.put("key", "value");
assertEquals("value", c2r.get("key"));
c1d.put("key", "value");
assertEquals("value", c2d.get("key"));
}
private EmbeddedCacheManager createCacheManager() {
GlobalConfigurationBuilder gcb = new GlobalConfigurationBuilder();
gcb.transport().defaultTransport();
TestCacheManagerFactory.amendGlobalConfiguration(gcb, new TransportFlags());
ConfigurationBuilder defaultCacheConfig = new ConfigurationBuilder();
EmbeddedCacheManager cm = TestCacheManagerFactory.newDefaultCacheManager(false, gcb, defaultCacheConfig);
registerCacheManager(cm);
Configuration replCfg = new ConfigurationBuilder().clustering().cacheMode(CacheMode.REPL_SYNC).build();
cm.defineConfiguration(REPL_CACHE_NAME, replCfg);
Configuration distCfg = new ConfigurationBuilder().clustering().cacheMode(CacheMode.DIST_SYNC).build();
cm.defineConfiguration(DIST_CACHE_NAME, distCfg);
return cm;
}
private void replaceInboundInvocationHandler(EmbeddedCacheManager cm, CheckPoint checkPoint, int index) {
InboundInvocationHandler handler = extractGlobalComponent(cm, InboundInvocationHandler.class);
BlockingInboundInvocationHandler ourHandler =
new BlockingInboundInvocationHandler(handler, checkPoint, index);
replaceComponent(cm, InboundInvocationHandler.class, ourHandler, true);
}
private static class CacheStartCallable implements Callable<Object> {
private final EmbeddedCacheManager cm;
private final String cacheName;
public CacheStartCallable(EmbeddedCacheManager cm, String cacheName) {
this.cm = cm;
this.cacheName = cacheName;
}
@Override
public Object call() throws Exception {
cm.getCache(cacheName);
return null;
}
}
private static class BlockingInboundInvocationHandler implements InboundInvocationHandler {
private Log log = LogFactory.getLog(ConcurrentStartTest.class);
private final CheckPoint checkPoint;
private final InboundInvocationHandler delegate;
private final int index;
public BlockingInboundInvocationHandler(InboundInvocationHandler delegate, CheckPoint checkPoint,
int index) {
this.delegate = delegate;
this.checkPoint = checkPoint;
this.index = index;
}
@Override
public void handleFromCluster(Address origin, ReplicableCommand command, Reply reply, DeliverOrder order) {
if (command instanceof CacheJoinCommand) {
try {
checkPoint.trigger("blocked_" + index);
checkPoint.awaitStrict("unblocked_" + index, 10, SECONDS);
} catch (Exception e) {
log.warnf(e, "Error while blocking before command %s", command);
}
}
delegate.handleFromCluster(origin, command, reply, order);
}
@Override
public void handleFromRemoteSite(String origin, XSiteReplicateCommand command, Reply reply, DeliverOrder order) {
delegate.handleFromRemoteSite(origin, command, reply, order);
}
}
}
| 7,553
| 42.165714
| 119
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/InitialStateTransferCompletionTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.interceptors.impl.InvocationContextInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.testng.annotations.Test;
/**
* Tests that config option StateTransferConfiguration.awaitInitialTransfer() is honored correctly.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.InitialStateTransferCompletionTest")
@CleanupAfterMethod
public class InitialStateTransferCompletionTest extends MultipleCacheManagersTest {
private ConfigurationBuilder cacheConfigBuilder;
@Override
protected void createCacheManagers() throws Throwable {
cacheConfigBuilder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true, true);
cacheConfigBuilder.transaction().transactionMode(TransactionMode.TRANSACTIONAL)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.lockingMode(LockingMode.PESSIMISTIC)
.clustering().hash().numOwners(10) // a number bigger than actual number of nodes will make this distributed cluster behave as if fully replicated
.stateTransfer().fetchInMemoryState(true)
.awaitInitialTransfer(true); // setting this to false will lead to test failure
createCluster(cacheConfigBuilder, 2);
waitForClusterToForm();
}
public void testStateTransferCompletion() throws Exception {
final int numKeys = 100;
// populate cache
Cache<Object, Object> cache0 = cache(0);
for (int i = 0; i < numKeys; i++) {
cache0.put("k" + i, "v" + i);
}
final AtomicBoolean ignoreFurtherStateTransfer = new AtomicBoolean();
final AtomicInteger transferredKeys = new AtomicInteger();
cacheConfigBuilder.customInterceptors().addInterceptor().before(InvocationContextInterceptor.class)
.interceptor(new CountInterceptor(ignoreFurtherStateTransfer, transferredKeys));
// add the third member
log.trace("Adding new member ...");
addClusterEnabledCacheManager(cacheConfigBuilder);
Cache<String, String> cache2 = cache(2); //this must return only when all state was received
ignoreFurtherStateTransfer.set(true);
log.trace("Successfully added a new member");
// check number of transferred keys
int actualTransferredKeys = transferredKeys.get();
assertEquals(numKeys, actualTransferredKeys);
// check the current topology
LocalizedCacheTopology cacheTopology = cache2.getAdvancedCache().getDistributionManager().getCacheTopology();
assertNull(cacheTopology.getPendingCH());
ConsistentHash readCh = cacheTopology.getReadConsistentHash();
assertTrue(readCh.getMembers().contains(address(2)));
// check number of keys directly in data container
DataContainer dc2 = cache(2).getAdvancedCache().getDataContainer();
assertEquals(numKeys, dc2.size());
// check the expected values of these keys
for (int i = 0; i < numKeys; i++) {
String key = "k" + i;
String expectedValue = "v" + i;
assertTrue(cacheTopology.isReadOwner(key));
InternalCacheEntry entry = dc2.get(key);
assertNotNull(entry);
assertEquals(expectedValue, entry.getValue());
}
}
static class CountInterceptor extends BaseAsyncInterceptor {
private final AtomicBoolean ignoreFurtherStateTransfer;
private final AtomicInteger transferredKeys;
public CountInterceptor(AtomicBoolean ignoreFurtherStateTransfer, AtomicInteger transferredKeys) {
this.ignoreFurtherStateTransfer = ignoreFurtherStateTransfer;
this.transferredKeys = transferredKeys;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
if (cmd instanceof PutKeyValueCommand &&
((PutKeyValueCommand) cmd).hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
if (ignoreFurtherStateTransfer.get()) {
return null;
}
return invokeNextThenAccept(ctx, cmd, (rCtx, rCommand, rv) -> {
transferredKeys.incrementAndGet();
});
}
return invokeNext(ctx, cmd);
}
}
}
| 5,447
| 41.5625
| 159
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/DataRehashedEventTest.java
|
package org.infinispan.statetransfer;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.DataRehashed;
import org.infinispan.notifications.cachelistener.event.DataRehashedEvent;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.annotations.Test;
/**
* @author Dan Berindei
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.DataRehashedEventTest")
@CleanupAfterMethod
@InCacheMode({ CacheMode.DIST_SYNC })
public class DataRehashedEventTest extends MultipleCacheManagersTest {
private DataRehashedListener rehashListener;
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(1, getDefaultConfig());
}
protected ConfigurationBuilder getDefaultConfig() {
return getDefaultClusteredCacheConfig(cacheMode, false);
}
public void testJoinAndLeave() {
Cache<Object, Object> c1 = cache(0);
rehashListener = new DataRehashedListener();
c1.addListener(rehashListener);
ConsistentHash ch1Node = advancedCache(0).getDistributionManager().getReadConsistentHash();
assertEquals(rehashListener.removeEvents().size(), 0);
// start a second node and wait for the rebalance to end
addClusterEnabledCacheManager(getDefaultConfig());
cache(1);
TestingUtil.waitForNoRebalance(cache(0), cache(1));
ConsistentHash ch2Nodes = advancedCache(0).getDistributionManager().getReadConsistentHash();
rehashListener.waitForEvents(2);
List<DataRehashedEvent<Object, Object>> events = rehashListener.removeEvents();
assertEquals(events.size(), 2);
DataRehashedEvent<Object, Object> pre = events.get(0);
DataRehashedEvent<Object, Object> post = events.get(1);
assertTrue(pre.isPre());
assertEquals(pre.getConsistentHashAtStart(), ch1Node);
// we could get this "intermediate" CH with TopologyChanged events, but this should be enough
assertNotNull(pre.getConsistentHashAtEnd());
assertEquals(pre.getMembersAtEnd(), ch2Nodes.getMembers());
assertFalse(post.isPre());
assertEquals(post.getConsistentHashAtStart(), ch1Node);
assertEquals(post.getConsistentHashAtEnd(), ch2Nodes);
// start a third node and wait for the rebalance to end
addClusterEnabledCacheManager(getDefaultConfig());
cache(2);
TestingUtil.waitForNoRebalance(cache(0), cache(1), cache(2));
ConsistentHash ch3Nodes = advancedCache(0).getDistributionManager().getReadConsistentHash();
rehashListener.waitForEvents(2);
events = rehashListener.removeEvents();
assertEquals(events.size(), 2);
pre = events.get(0);
post = events.get(1);
assertTrue(pre.isPre());
assertEquals(pre.getConsistentHashAtStart(), ch2Nodes);
// we could get this "intermediate" CH with TopologyChanged events, but this should be enough
assertNotNull(pre.getConsistentHashAtEnd());
assertEquals(pre.getMembersAtEnd(), ch3Nodes.getMembers());
assertFalse(post.isPre());
assertEquals(post.getConsistentHashAtStart(), ch2Nodes);
assertEquals(post.getConsistentHashAtEnd(), ch3Nodes);
// stop cache 2 and wait for the rebalance to end
killMember(2);
// this CH might be different than the CH before the 3rd node joined
ConsistentHash chAfterLeave = advancedCache(0).getDistributionManager().getReadConsistentHash();
rehashListener.waitForEvents(2);
events = rehashListener.removeEvents();
assertEquals(events.size(), 2);
pre = events.get(0);
post = events.get(1);
assertTrue(pre.isPre());
// we could get this "intermediate" CH with TopologyChanged events, but this should be enough
assertNotNull(pre.getConsistentHashAtStart());
assertEquals(pre.getMembersAtStart(), chAfterLeave.getMembers());
assertEquals(pre.getConsistentHashAtEnd(), chAfterLeave);
assertFalse(post.isPre());
assertEquals(post.getConsistentHashAtStart(), pre.getConsistentHashAtStart());
assertEquals(post.getConsistentHashAtEnd(), pre.getConsistentHashAtEnd());
// stop cache 1 and wait for the rebalance to end
killMember(1);
// cache 0 was already an owner for all the segments, so there shouldn't be any rebalance
events = rehashListener.removeEvents();
assertEquals(events.size(), 0);
}
public void testPostOnlyEvent() {
Cache<Object, Object> c1 = cache(0);
rehashListener = new DataRehashedListenerPostOnly();
c1.addListener(rehashListener);
assertEquals(rehashListener.removeEvents().size(), 0);
// start a second node and wait for the rebalance to end
addClusterEnabledCacheManager(getDefaultConfig());
cache(1);
TestingUtil.waitForNoRebalance(cache(0), cache(1));
rehashListener.waitForEvents(1);
}
@Listener
public class DataRehashedListener {
private volatile List<DataRehashedEvent<Object, Object>> events = new CopyOnWriteArrayList<DataRehashedEvent<Object, Object>>();
@DataRehashed
public void onDataRehashed(DataRehashedEvent<Object, Object> e) {
log.tracef("New event received: %s", e);
events.add(e);
}
List<DataRehashedEvent<Object, Object>> removeEvents() {
List<DataRehashedEvent<Object, Object>> oldEvents = events;
events = new CopyOnWriteArrayList<DataRehashedEvent<Object, Object>>();
return oldEvents;
}
void waitForEvents(final int count) {
eventually(new Condition() {
@Override
public boolean isSatisfied() throws Exception {
return events.size() >= count;
}
});
}
}
@Listener(observation = Listener.Observation.POST)
public class DataRehashedListenerPostOnly extends DataRehashedListener {
}
}
| 6,505
| 37.046784
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/RemoteGetDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.distribution.DistributionTestHelper.isFirstOwner;
import static org.infinispan.util.BlockingLocalTopologyManager.confirmTopologyUpdate;
import static org.infinispan.util.BlockingLocalTopologyManager.finishRebalance;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import org.infinispan.Cache;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.InterceptorConfiguration;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.interceptors.BaseCustomAsyncInterceptor;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.protostream.annotations.ProtoName;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.UnsureResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.topology.CacheTopology.Phase;
import org.infinispan.util.BaseControlledConsistentHashFactory;
import org.infinispan.util.BlockingLocalTopologyManager;
import org.infinispan.util.ControlledRpcManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Test multiple possible situations of interleaving between a remote get and state transfer.
*
* @author Pedro Ruivo
* @since 6.0
*/
@Test(groups = "functional", testName = "statetransfer.RemoteGetDuringStateTransferTest")
@CleanupAfterMethod
public class RemoteGetDuringStateTransferTest extends MultipleCacheManagersTest {
private final List<BlockingLocalTopologyManager> topologyManagerList =
Collections.synchronizedList(new ArrayList<>(4));
private final List<ControlledRpcManager> rpcManagerList =
Collections.synchronizedList(new ArrayList<>(4));
/*
Summary
T0 initial topology (NO_REBALANCE, ro = wo = [1])
T1 state transfer started (READ_OLD_WRITE_ALL, ro = [1], wo = [1, 2])
T2 state transfer finished but rebalance not complete (READ_ALL_WRITE_ALL, ro = wo = [1, 2])
T3 read new, write all topology (READ_NEW_WRITE_ALL, ro = [2], wo = [1, 2])
T4 rebalance completed (NO_REBALANCE, ro = wo = [2])
| sc | first request | process request 1 | receive response 1 | retry | process request 2 | receive response 2 |
| 010 | T0 | 1:T1 | T0 | N1 | | |
| 011 | T0 | 1:T1 | T1/T2/T3/T4 | N1 | | |
| [2] | T0 | 1:T2 | T0 | | | |
| [1] | T0 | 1:T2 | T1/T2/T3/T4 | N1 | | |
| [2] | T0 | 1:T3 | T0/T1 | | | |
| [2] | T0 | 1:T3 | T2 | Y* | 2:T0/T1 | |
| [4] | T0 | 1:T3 | T2 | Y* | 2:T2 | T0/T1 |
| 032_22 | T0 | 1:T3 | T2 | Y* | 2:T2 | T2/T3/T4 |
| 032_32 | T0 | 1:T3 | T2 | Y* | 2:T3/T4 | T2/T3/T4 |
| [2] | T0 | 1:T3 | T3 | Y | 2:T0/T1 | |
| [4] | T0 | 1:T3 | T3 | Y | 2:T2 | T0/T1/T2 |
| 033_23 | T0 | 1:T3 | T3 | Y | 2:T2 | T3/T4 |
| [4] | T0 | 1:T3 | T3 | Y | 2:T3/T4 | T0/T1/T2 |
| 033_33 | T0 | 1:T3 | T3 | Y | 2:T3/T4 | T3/T4 |
| [2] | T0 | 1:T3 | T4 | Y | 2:T0/T1/T2 | |
| [4] | T0 | 1:T3 | T4 | Y | 2:T3/T4 | T0/T1/T2/T3 |
| [1] | T0 | 1:T3 | T4 | Y | 2:T3/T4 | T4 |
| [2] | T0 | 1:T4 | T0/T1/T2 | | | |
| [4] | T0 | 1:T4 | T3/T4 | Y | 2:T0/T1/T2 | |
| [2] | T0 | 1:T4 | T3/T4 | Y | 2:T3/T4 | T0/T1/T2 |
| [1] | T0 | 1:T4 | T3/T4 | Y | 2:T3/T4 | T3/T4 |
| [4] | T1 | 1:T0 | T0 | | | |
| 101 | T1 | 1:T0 | T1/T2/T3/T4 | N1 | | |
| [4] | T1 | 1:T1 | T0 | | | |
| 111 | T1 | 1:T1 | T1/T2/T3/T4 | | | |
| [4] | T1 | 1:T2 | T0 | | | |
| [1] | T1 | 1:T2 | T1/T2/T3/T4 | N1 | | |
| [2] | T1 | 1:T3 | T2 | Y* | 2:T0/T1 | |
| [4] | T1 | 1:T3 | T2 | Y* | 2:T2 | T0/T1 |
| 132_22 | T1 | 1:T3 | T2 | Y* | 2:T2 | T2/T3/T4 |
| [4] | T1 | 1:T3 | T2 | Y* | 2:T3 | T0/T1 |
| 132_32 | T1 | 1:T3 | T2 | Y* | 2:T3 | T2/T3/T4 |
| [2] | T1 | 1:T3 | T2 | Y* | 2:T4 | T0/T1/T2 |
| [1] | T1 | 1:T3 | T2 | Y* | 2:T4 | T3/T4 |
| [2] | T1 | 1:T3 | T3 | Y | 2:T0/T1 | |
| [4] | T1 | 1:T3 | T3 | Y | 2:T2 | T0/T1/T2 |
| 133_23 | T1 | 1:T3 | T3 | Y | 2:T2 | T3/T4 |
| [4] | T1 | 1:T3 | T3 | Y | 2:T3 | T0/T1/T2 |
| 133_33 | T1 | 1:T3 | T3 | Y | 2:T3 | T3/T4 |
| [4] | T1 | 1:T3 | T3 | Y | 2:T4 | T0/T1/T2 |
| [1] | T1 | 1:T3 | T3 | Y | 2:T4 | T3/T4 |
| [2] | T1 | 1:T3 | T4 | Y | 2:T0/T1/T2 | |
| [1] | T1 | 1:T3 | T4 | Y | 2:T3/T4 | T4 |
| [4] | T1 | 1:T4 | T0/T1/T2 | | | |
| [2] | T1 | 1:T4 | T3/T4 | Y | 2:T0/T1/T2 | |
| [4] | T1 | 1:T4 | T3/T4 | Y | 2:T3/T4 | T0/T1/T2 |
| [1] | T1 | 1:T4 | T3/T4 | Y | 2:T3/T4 | T3/T4 |
| [2] | T2 | 1:T0 | | | | |
| [4] | T2 | 1: *, 2: * | T0/T1 | | | |
| 2112 | T2 | 1:T1, 2: T1 | T2/T3/T4 | N1 | | |
| 2122 | T2 | 1:T1, 2: T2 | T2/T3/T4 | N1T2 | | |
| 2132 | T2 | 1:T1, 2: T3/T4 | T2/T3/T4 | N1T2 | | |
| 2212 | T2 | 1:T2, 2: T1 | T2/T3/T4 | N1 | | |
| 2222 | T2 | 1:T2, 2: T2 | T2/T3/T4 | N1T2 | | |
| 2232 | T2 | 1:T2, 2: T3/T4 | T2/T3/T4 | N1T2 | | |
| 2312_22| T2 | 1:T3/T4, 2: T1 | T2/T3/T4 | Y | 2: T2 | T2/T3/T4 |
| 2312_32| T2 | 1:T3/T4, 2: T1 | T2/T3/T4 | Y | 2: T3/T4 | T2/T3/T4 |
| 2322 | T2 | 1:T3/T4, 2: T2 | T2/T3/T4 | N2 | | |
| 2332 | T2 | 1:T3/T4, 2: T3/T4 | T2/T3/T4 | N2 | | |
| [2] | T3 | 2: T0/T1 | | | | |
| [4] | T3 | 2: T2 | T0/T1/T2 | | | |
| 323 | T3 | 2: T2 | T3/T4 | N2 | | |
| [4] | T3 | 2: T3/T4 | T0/T1/T2 | | | |
| 333 | T3 | 2: T3/T4 | T3/T4 | N2 | | |
| [2] | T4 | 2: T0/T1/T2 | | | | |
| [4] | T4 | 2:T3 | T0/T1/T2/T3 | N2 | | |
| 434 | T4 | 2:T3 | T4 | N2 | | |
*) The retry will go to both node 1 and 2 but 1 in T3 will respond with UnsureResponse
[1] too similar to the previous scenario
[2] impossible because we topologies can't differ by more than 1 at the same time
[4] impossible, first response was received in later topology than second response
N1/N2/N1T2 We won't do a retry because we got successful response from node 1/2/both 1 and 2
A note for 2312_x2: while the two nodes cannot have topologies 3 and 1 at the same time, the two reads can arrive
at different times there.
*/
@AfterMethod(alwaysRun = true)
public final void unblockAll() {
//keep track of all controlled components. In case of failure, we need to unblock all otherwise we have to wait
//long time until the test is able to stop all cache managers.
for (BlockingLocalTopologyManager topologyManager : topologyManagerList) {
topologyManager.stopBlocking();
}
topologyManagerList.clear();
for (ControlledRpcManager rpcManager : rpcManagerList) {
rpcManager.stopBlocking();
}
rpcManagerList.clear();
}
/**
* ISPN-3315: In this scenario, a remote get is triggered and the reply received in a stable state. the old owner
* receives the request after the rebalance_start command.
*/
public void testScenario_010() throws Exception {
assertClusterSize("Wrong cluster size.", 2);
final Object key = "key_010";
ownerCheckAndInit(cache(1), key, "v");
final ControlledRpcManager rpcManager0 = replaceRpcManager(cache(0));
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
cache(0).getAdvancedCache().getAsyncInterceptorChain()
.addInterceptorAfter(new AssertNoRetryInterceptor(), StateTransferInterceptor.class);
//remote get is sent in topology T0
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
ControlledRpcManager.BlockedRequest blockedGet = rpcManager0.expectCommand(ClusteredGetCommand.class);
FailReadsInterceptor fri = new FailReadsInterceptor();
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST).interceptor(fri));
// Install topology T1 on node 1 and unblock the remote get
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager1);
awaitForTopology(currentTopologyId + 1, cache(1));
blockedGet.send().receiveAll();
//check the value returned and make sure that the requestor is still in currentTopologyId (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
fri.assertNotHit();
assertTopologyId(currentTopologyId, cache(0));
// Finish the rebalance
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager0, joiner.topologyManager);
finishRebalance(Phase.READ_ALL_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
joiner.joinerFuture.get();
}
/**
* ISPN-3315: similar to scenario 010, the remote get is triggered in stable state but reply is received after the
* rebalance_start command. As in scenario 010, the owner receives the request after the rebalance_start command.
*/
public void testScenario_011() throws Exception {
assertClusterSize("Wrong cluster size.", 2);
final Object key = "key_011";
ownerCheckAndInit(cache(1), key, "v");
final ControlledRpcManager rpcManager0 = replaceRpcManager(cache(0));
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
cache(0).getAdvancedCache().getAsyncInterceptorChain()
.addInterceptorAfter(new AssertNoRetryInterceptor(), StateTransferInterceptor.class);
//the remote get is triggered in the current topology id.
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
ControlledRpcManager.BlockedRequest blockedGet = rpcManager0.expectCommand(ClusteredGetCommand.class);
FailReadsInterceptor fri = new FailReadsInterceptor();
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST).interceptor(fri));
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager0, topologyManager1);
//wait until the rebalance start arrives in old owner and in the requestor. then let the remote get go.
awaitForTopology(currentTopologyId + 1, cache(1));
awaitForTopology(currentTopologyId + 1, cache(0));
blockedGet.send().receiveAll();
//check the value returned and make sure that the requestor is in the correct topology id (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
fri.assertNotHit();
assertTopologyId(currentTopologyId + 1, cache(1));
assertTopologyId(currentTopologyId + 1, cache(0));
// Finish the rebalance
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL);
finishRebalance(Phase.READ_ALL_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
joiner.joinerFuture.get();
}
public void testScenario_101() throws Exception {
testScenario_1x1(0);
}
public void testScenario_111() throws Exception {
testScenario_1x1(1);
}
protected void testScenario_1x1(int topologyOnNode1) throws Exception {
assertTrue(0 <= topologyOnNode1 && topologyOnNode1 <= 1);
assertClusterSize("Wrong cluster size.", 2);
final Object key = String.format("key_1%d1", topologyOnNode1);
ownerCheckAndInit(cache(1), key, "v");
final ControlledRpcManager rpcManager0 = replaceRpcManager(cache(0));
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
cache(0).getAdvancedCache().getAsyncInterceptorChain()
.addInterceptorAfter(new AssertNoRetryInterceptor(), StateTransferInterceptor.class);
FailReadsInterceptor fri = new FailReadsInterceptor();
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST).interceptor(fri));
// Install topology T1 on node 0 and maybe on node 1 as well
topologyManager0.confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL);
if (topologyOnNode1 > 0) {
topologyManager1.confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL);
}
awaitForTopology(currentTopologyId + 1, cache(0));
awaitForTopology(currentTopologyId + topologyOnNode1, cache(1));
//the remote get is triggered after the rebalance_start and before the confirm_rebalance.
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
ControlledRpcManager.BlockedRequest blockedGet = rpcManager0.expectCommand(ClusteredGetCommand.class);
blockedGet.send().receiveAll();
//check the value returned and make sure that the requestor is in the correct topology id (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
fri.assertNotHit();
assertTopologyId(currentTopologyId + 1, cache(0));
if (topologyOnNode1 < 1) {
topologyManager1.confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL);
}
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL);
finishRebalance(Phase.READ_ALL_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
joiner.joinerFuture.get();
}
public void testScenario_032_22() throws Exception {
testScenario_03x_yx(2, 2);
}
public void testScenario_032_32() throws Exception {
testScenario_03x_yx(2, 3);
}
public void testScenario_033_23() throws Exception {
testScenario_03x_yx(3, 2);
}
public void testScenario_033_33() throws Exception {
testScenario_03x_yx(3, 3);
}
protected void testScenario_03x_yx(int topologyOnNode0, int topologyOnNode2) throws Exception {
assertTrue(2 <= topologyOnNode0 && topologyOnNode0 <= 3);
assertTrue(2 <= topologyOnNode2 && topologyOnNode2 <= 3);
assertClusterSize("Wrong cluster size.", 2);
final Object key = String.format("key_03%d_%d%d", topologyOnNode0, topologyOnNode2, topologyOnNode0);
ownerCheckAndInit(cache(1), key, "v");
final ControlledRpcManager rpcManager0 = replaceRpcManager(cache(0));
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
//consistency check. the remote get is triggered
assertTopologyId(currentTopologyId, cache(0));
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
ControlledRpcManager.BlockedRequest blockedGet = rpcManager0.expectCommand(ClusteredGetCommand.class);
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST)
.interceptor(
new WaitForTopologyInterceptor(currentTopologyId + topologyOnNode2)));
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
if (topologyOnNode0 > 2) {
topologyManager0.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
if (topologyOnNode2 > 2) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
topologyManager1.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
// Wait for all the nodes to have the required topology
awaitForTopology(currentTopologyId + 3, cache(1));
awaitForTopology(currentTopologyId + topologyOnNode0, cache(0));
blockedGet.send().receiveAll();
// Allow the retry to proceed normally
rpcManager0.expectCommand(ClusteredGetCommand.class).send().receiveAll();
//check the value returned and make sure that the requestor is in the correct topology id (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
assertTopologyId(currentTopologyId + topologyOnNode0, cache(0));
// Finish the rebalance
if (topologyOnNode0 < 3) {
topologyManager0.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
if (topologyOnNode2 < 3) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
finishRebalance(Phase.NO_REBALANCE, topologyManager0, topologyManager1, joiner.topologyManager);
joiner.joinerFuture.get();
}
public void testScenario_132_22() throws Exception {
testScenario_13x_yx(2, 2);
}
public void testScenario_132_32() throws Exception {
testScenario_13x_yx(2, 3);
}
public void testScenario_133_23() throws Exception {
testScenario_13x_yx(3, 2);
}
public void testScenario_133_33() throws Exception {
testScenario_13x_yx(3, 3);
}
protected void testScenario_13x_yx(int topologyOnNode0, int topologyOnNode2) throws Exception {
assertTrue(2 <= topologyOnNode0 && topologyOnNode0 <= 3);
assertTrue(2 <= topologyOnNode2 && topologyOnNode2 <= 3);
assertClusterSize("Wrong cluster size.", 2);
final Object key = String.format("key_13%d_%d%d", topologyOnNode0, topologyOnNode2, topologyOnNode0);
ownerCheckAndInit(cache(1), key, "v");
final ControlledRpcManager rpcManager0 = replaceRpcManager(cache(0));
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST)
.interceptor(
new WaitForTopologyInterceptor(currentTopologyId + topologyOnNode2)));
// Install topology T1 everywhere
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
//consistency check. the remote get is triggered
awaitForTopology(currentTopologyId + 1, cache(0));
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
ControlledRpcManager.BlockedRequest blockedGet = rpcManager0.expectCommand(ClusteredGetCommand.class);
// Install topology T2 everywhere
confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
// Install topology T3 where needed
topologyManager1.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
if (topologyOnNode0 > 2) {
topologyManager0.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
if (topologyOnNode2 > 2) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
//wait until the consistent_hash_update arrives in old owner
awaitForTopology(currentTopologyId + 3, cache(1));
awaitForTopology(currentTopologyId + topologyOnNode0, cache(0));
// Unblock the request and process the responses
blockedGet.send().receiveAll();
// Unblock the retry and its responses
rpcManager0.expectCommand(ClusteredGetCommand.class).send().receiveAll();
//check the value returned and make sure that the requestor is in the correct topology id (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
assertTopologyId(currentTopologyId + topologyOnNode0, cache(0));
// Finish the rebalance
if (topologyOnNode0 < 3) {
topologyManager0.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
if (topologyOnNode2 < 3) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
finishRebalance(Phase.NO_REBALANCE, topologyManager0, topologyManager1, joiner.topologyManager);
joiner.joinerFuture.get();
}
public void testScenario_2112() throws Exception {
testScenario_2xy2(1, 1, 1, 1);
}
public void testScenario_2212() throws Exception {
testScenario_2xy2(2, 1, 1, 1);
}
public void testScenario_2122() throws Exception {
testScenario_2xy2(1, 2, 2, -1);
}
public void testScenario_2132() throws Exception {
testScenario_2xy2(1, 3, 2, -1);
}
public void testScenario_2222() throws Exception {
testScenario_2xy2(2, 2, 2, -1);
}
public void testScenario_2232() throws Exception {
testScenario_2xy2(2, 3, 2, -1);
}
public void testScenario_2322() throws Exception {
testScenario_2xy2(3, 2, 1, 2);
}
public void testScenario_2332() throws Exception {
testScenario_2xy2(3, 3, 1, 2);
}
protected void testScenario_2xy2(int topologyOnNode1, int topologyOnNode2, int expectedSuccessResponses,
int expectSuccessFrom) throws Exception {
assertTrue(1 <= topologyOnNode1 && topologyOnNode1 <= 3);
assertTrue(1 <= topologyOnNode2 && topologyOnNode2 <= 3);
assertClusterSize("Wrong cluster size.", 2);
final Object key = String.format("key_2%d%d2", topologyOnNode1, topologyOnNode2);
ownerCheckAndInit(cache(1), key, "v");
final ControlledRpcManager rpcManager0 = replaceRpcManager(cache(0));
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
cache(0).getAdvancedCache().getAsyncInterceptorChain()
.addInterceptorAfter(new AssertNoRetryInterceptor(), StateTransferInterceptor.class);
WaitForTopologyInterceptor wfti = new WaitForTopologyInterceptor(currentTopologyId + topologyOnNode2);
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST).interceptor(wfti));
// Install topology T1 everywhere and T2 on the originator
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
topologyManager0.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
awaitForTopology(currentTopologyId + 2, cache(0));
// We wouldn't only need to install newer topologies on the new owner for the retry
// but the coordinator will only start with a new topology after all the nodes confirmed the old one
if (topologyOnNode1 > 1) {
topologyManager1.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
}
if (topologyOnNode2 > 1) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
}
if (topologyOnNode1 > 2) {
topologyManager1.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
assertTopologyId(currentTopologyId + 2, cache(0));
assertTopologyId(currentTopologyId + topologyOnNode1, cache(1));
// Send the request and receive the response from node 1
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
ControlledRpcManager.SentRequest sentGet = rpcManager0.expectCommand(ClusteredGetCommand.class).send();
sentGet.expectResponse(address(1));
// Now we can install and confirm topology T2 on node 1
// So that node 2 is free to install topology T3 if necessary
if (topologyOnNode1 < 2) {
topologyManager1.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
}
if (topologyOnNode2 > 2) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
eventuallyEquals(currentTopologyId + topologyOnNode2,
() -> wfti.distributionManager.getCacheTopology().getTopologyId());
ControlledRpcManager.BlockedResponseMap blockedGet = sentGet.expectAllResponses();
int succesful = 0;
for (Map.Entry<Address, Response> rsp : blockedGet.getResponses().entrySet()) {
if (rsp.getValue().isSuccessful()) {
if (expectSuccessFrom >= 0) {
assertEquals(address(expectSuccessFrom), rsp.getKey());
}
succesful++;
} else {
assertEquals(UnsureResponse.INSTANCE, rsp.getValue());
if (expectSuccessFrom >= 0) {
assertFalse(rsp.getKey().equals(address(expectSuccessFrom)));
}
}
}
assertTrue(succesful == expectedSuccessResponses);
// Unblock the responses and retry if necessary
blockedGet.receive();
if (succesful == 0) {
rpcManager0.expectCommand(ClusteredGetCommand.class).send().receiveAll();
}
//check the value returned and make sure that the requestor is in the correct topology id (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
if (topologyOnNode2 < 2) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
}
topologyManager0.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
if (topologyOnNode1 < 3) {
topologyManager1.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
if (topologyOnNode2 < 3) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
finishRebalance(Phase.NO_REBALANCE, topologyManager0, topologyManager1, joiner.topologyManager);
joiner.joinerFuture.get();
}
public void testScenario_2312_22() throws Exception {
testScenario_2312_x2(2);
}
public void testScenario_2312_32() throws Exception {
testScenario_2312_x2(3);
}
private void testScenario_2312_x2(int retryTopologyOnNode2) throws Exception {
assertTrue(2 <= retryTopologyOnNode2 && retryTopologyOnNode2 <= 3);
assertClusterSize("Wrong cluster size.", 2);
final Object key = String.format("key_2312_%d2", retryTopologyOnNode2);
ownerCheckAndInit(cache(1), key, "v");
final ControlledRpcManager rpcManager0 = replaceRpcManager(cache(0));
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
CyclicBarrier barrier1 = new CyclicBarrier(2);
CyclicBarrier barrier2 = new CyclicBarrier(2);
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST)
.interceptor(new BlockingInterceptor<>(barrier2, GetCacheEntryCommand.class,
true, false)));
// Install T1 everywhere and T2 on node 0
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
topologyManager0.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
awaitForTopology(currentTopologyId + 2, cache(0));
// Block the command on node 1 so we can install T3 first
cache(1).getAdvancedCache().getAsyncInterceptorChain()
.addInterceptor(new BlockingInterceptor<>(barrier1, GetCacheEntryCommand.class, false, false), 0);
// Send the remote get and wait for the reply from node 2
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
ControlledRpcManager.SentRequest sentGet = rpcManager0.expectCommand(ClusteredGetCommand.class).send();
barrier2.await(10, TimeUnit.SECONDS);
barrier2.await(10, TimeUnit.SECONDS);
sentGet.expectResponse(address(2), UnsureResponse.INSTANCE).receive();
// Install T2 on nodes 1 and 2
topologyManager1.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL);
// Now we can install T3 on node 1
topologyManager1.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
awaitForTopology(currentTopologyId + 3, cache(1));
// Also install T3 on node 2 if necessary
if (retryTopologyOnNode2 > 2) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
// Unblock read on node1 and receive the responses
barrier1.await(10, TimeUnit.SECONDS);
barrier1.await(10, TimeUnit.SECONDS);
sentGet.expectResponse(address(1), UnsureResponse.INSTANCE).receive().finish();
// Process retry
ControlledRpcManager.SentRequest sentRetry = rpcManager0.expectCommand(ClusteredGetCommand.class).send();
barrier1.await(10, TimeUnit.SECONDS);
barrier1.await(10, TimeUnit.SECONDS);
barrier2.await(10, TimeUnit.SECONDS);
barrier2.await(10, TimeUnit.SECONDS);
sentRetry.receiveAll();
//check the value returned and make sure that the requestor is in the correct topology id (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
assertTopologyId(currentTopologyId + 2, cache(0));
// Finish the rebalance
topologyManager0.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
if (retryTopologyOnNode2 < 3) {
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
finishRebalance(Phase.NO_REBALANCE, topologyManager0, topologyManager1, joiner.topologyManager);
joiner.joinerFuture.get();
}
public void testScenario_323() throws Exception {
testScenario_xyx(3, 2);
}
public void testScenario_333() throws Exception {
testScenario_xyx(3, 3);
}
public void testScenario_434() throws Exception {
testScenario_xyx(4, 3);
}
protected void testScenario_xyx(int topologyOnNode0, int topologyOnNode2) throws Exception {
assertTrue(3 <= topologyOnNode0 && topologyOnNode0 <= 4);
assertTrue(2 <= topologyOnNode2 && topologyOnNode2 <= 3);
assertTrue(topologyOnNode0 - topologyOnNode2 <= 1);
assertClusterSize("Wrong cluster size.", 2);
final Object key = String.format("key_%d%d%d", topologyOnNode0, topologyOnNode2, topologyOnNode2);
ownerCheckAndInit(cache(1), key, "v");
final BlockingLocalTopologyManager topologyManager0 = replaceTopologyManager(manager(0));
final BlockingLocalTopologyManager topologyManager1 = replaceTopologyManager(manager(1));
final int currentTopologyId = currentTopologyId(cache(0));
cache(0).getAdvancedCache().getAsyncInterceptorChain()
.addInterceptorAfter(new AssertNoRetryInterceptor(), StateTransferInterceptor.class);
FailReadsInterceptor fri = new FailReadsInterceptor();
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(fri, 0);
NewNode joiner = addNode(cb -> cb.customInterceptors().addInterceptor()
.position(InterceptorConfiguration.Position.FIRST)
.interceptor(new WaitForTopologyInterceptor(currentTopologyId +
topologyOnNode2)));
// Install topology T2 everywhere
confirmTopologyUpdate(Phase.READ_OLD_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
confirmTopologyUpdate(Phase.READ_ALL_WRITE_ALL, topologyManager0, topologyManager1, joiner.topologyManager);
// Install T3 on node 0 and the others as necessary
topologyManager0.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
if (topologyOnNode2 > 2) {
topologyManager1.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
// Install T4 on node 0 if necessary
if (topologyOnNode0 > 3) {
topologyManager0.confirmTopologyUpdate(Phase.NO_REBALANCE);
}
awaitForTopology(currentTopologyId + topologyOnNode0, cache(0));
// Send the remote get and check that it got a single response from node 2
Future<Object> remoteGetFuture = remoteGet(cache(0), key);
//check the value returned and make sure that the requestor is in the correct topology id (consistency check)
assertEquals("Wrong value from remote get.", "v", remoteGetFuture.get());
fri.assertNotHit();
assertTopologyId(currentTopologyId + topologyOnNode0, cache(0));
if (topologyOnNode2 < 3) {
topologyManager1.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
joiner.topologyManager.confirmTopologyUpdate(Phase.READ_NEW_WRITE_ALL);
}
if (topologyOnNode0 < 4) {
topologyManager0.confirmTopologyUpdate(Phase.NO_REBALANCE);
}
topologyManager1.confirmTopologyUpdate(Phase.NO_REBALANCE);
joiner.topologyManager.confirmTopologyUpdate(Phase.NO_REBALANCE);
joiner.joinerFuture.get();
}
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(2, RemoteGetDuringStateTransferSCI.INSTANCE, configuration());
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
private Future<Object> remoteGet(Cache cache, Object key) {
return fork(() -> cache.get(key));
}
private int currentTopologyId(Cache cache) {
return cache.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
}
private void assertTopologyId(final int expectedTopologyId, final Cache cache) {
assertEquals(expectedTopologyId, currentTopologyId(cache));
}
private void awaitForTopology(final int expectedTopologyId, final Cache cache) {
eventuallyEquals(expectedTopologyId, () -> currentTopologyId(cache));
}
private void awaitUntilNotInDataContainer(final Cache cache, final Object key) {
eventually(() -> !cache.getAdvancedCache().getDataContainer().containsKey(key));
}
private NewNode addNode(Consumer<ConfigurationBuilder> modifyConfiguration) {
NewNode newNode = new NewNode();
ConfigurationBuilder configurationBuilder = configuration();
if (modifyConfiguration != null) {
modifyConfiguration.accept(configurationBuilder);
}
EmbeddedCacheManager embeddedCacheManager = addClusterEnabledCacheManager(RemoteGetDuringStateTransferSCI.INSTANCE, configurationBuilder);
newNode.topologyManager = replaceTopologyManager(embeddedCacheManager);
newNode.joinerFuture = fork(() -> {
waitForClusterToForm();
return null;
});
return newNode;
}
private void ownerCheckAndInit(Cache<Object, Object> owner, Object key, Object value) {
assertTrue(address(owner) + " should be the owner of " + key + ".", isFirstOwner(owner, key));
owner.put(key, value);
assertCacheValue(key, value);
}
private void assertCacheValue(Object key, Object value) {
for (Cache cache : caches()) {
assertEquals("Wrong value for key " + key + " on " + address(cache) + ".", value, cache.get(key));
}
}
private ConfigurationBuilder configuration() {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false);
builder.clustering()
.hash()
.numSegments(1)
.numOwners(1)
.consistentHashFactory(new SingleKeyConsistentHashFactory())
.stateTransfer()
.timeout(30, TimeUnit.SECONDS);
return builder;
}
private BlockingLocalTopologyManager replaceTopologyManager(EmbeddedCacheManager cacheContainer) {
BlockingLocalTopologyManager localTopologyManager = BlockingLocalTopologyManager.replaceTopologyManagerDefaultCache(cacheContainer);
topologyManagerList.add(localTopologyManager);
return localTopologyManager;
}
private ControlledRpcManager replaceRpcManager(Cache cache) {
ControlledRpcManager controlledRpcManager = ControlledRpcManager.replaceRpcManager(cache);
rpcManagerList.add(controlledRpcManager);
return controlledRpcManager;
}
@ProtoName("RemoteGetSingleKeyConsistentHashFactory")
public static class SingleKeyConsistentHashFactory extends BaseControlledConsistentHashFactory.Default {
SingleKeyConsistentHashFactory() {
super(1);
}
@Override
protected int[][] assignOwners(int numSegments, List<Address> members) {
return new int[][]{{members.size() - 1}};
}
}
static class WaitForTopologyInterceptor extends DDAsyncInterceptor {
private static final Log log = LogFactory.getLog(RemoteGetDuringStateTransferTest.class);
protected final int expectedTopologyId;
// ugly hooks to be able to access topology from test
private volatile DistributionManager distributionManager;
private volatile StateTransferLock stateTransferLock;
private WaitForTopologyInterceptor(int expectedTopologyId) {
this.expectedTopologyId = expectedTopologyId;
}
@Inject
public void init(DistributionManager distributionManager, StateTransferLock stateTransferLock) {
this.distributionManager = distributionManager;
this.stateTransferLock = stateTransferLock;
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) throws Throwable {
assertNotNull(stateTransferLock);
log.tracef("Waiting for topology %d before executing %s", expectedTopologyId, command);
stateTransferLock.topologyFuture(expectedTopologyId).toCompletableFuture().get(10, TimeUnit.SECONDS);
assertEquals(expectedTopologyId, distributionManager.getCacheTopology().getTopologyId());
return invokeNext(ctx, command);
}
}
static class FailReadsInterceptor extends BaseCustomAsyncInterceptor {
private final AtomicBoolean hit = new AtomicBoolean();
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) throws Throwable {
hit.set(true);
throw new IllegalStateException("Did not expect the command to be executed on node " + cache.getCacheManager().getAddress());
}
public void assertNotHit() {
assertFalse(hit.get());
}
}
static class AssertNoRetryInterceptor extends DDAsyncInterceptor {
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) {
assertFalse(command.hasAnyFlag(FlagBitSets.COMMAND_RETRY));
return invokeNextAndExceptionally(ctx, command, (rCtx, rCommand, t) -> {
assertFalse(t instanceof OutdatedTopologyException);
throw t;
});
}
}
private class NewNode {
Future<Void> joinerFuture;
BlockingLocalTopologyManager topologyManager;
}
@AutoProtoSchemaBuilder(
includeClasses = SingleKeyConsistentHashFactory.class,
schemaFileName = "test.core.RemoteGetDuringStateTransferTest.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.RemoteGetDuringStateTransferTest",
service = false
)
interface RemoteGetDuringStateTransferSCI extends SerializationContextInitializer {
RemoteGetDuringStateTransferSCI INSTANCE = new RemoteGetDuringStateTransferSCIImpl();
}
}
| 46,149
| 50.050885
| 144
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/DistStateTransferOnJoinConsistencyTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.interceptors.impl.InvocationContextInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Test for ISPN-2362 and ISPN-2502 in distributed mode. Uses a cluster which initially has 2 nodes
* and then a third is added to test consistency of state transfer.
* Tests several operations both in an optimistic tx cluster (with write-skew check enabled) and in a pessimistic tx one.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.DistStateTransferOnJoinConsistencyTest")
@CleanupAfterMethod
public class DistStateTransferOnJoinConsistencyTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(DistStateTransferOnJoinConsistencyTest.class);
private enum Operation {
REMOVE, CLEAR, PUT, PUT_MAP, PUT_IF_ABSENT, REPLACE
}
@Override
protected final void createCacheManagers() {
// cache managers will be created by each test
}
protected ConfigurationBuilder createConfigurationBuilder(boolean isOptimistic) {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true, true);
builder.clustering().hash().numOwners(3).numSegments(2);
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup());
if (isOptimistic) {
builder.transaction().lockingMode(LockingMode.OPTIMISTIC)
.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
} else {
builder.transaction().lockingMode(LockingMode.PESSIMISTIC);
}
builder.clustering().l1().disable().locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
builder.clustering().stateTransfer().fetchInMemoryState(true).awaitInitialTransfer(false);
return builder;
}
public void testRemoveOptimistic() throws Exception {
testOperationDuringJoin(Operation.REMOVE, true);
}
public void testRemovePessimistic() throws Exception {
testOperationDuringJoin(Operation.REMOVE, false);
}
public void testClearOptimistic() throws Exception {
testOperationDuringJoin(Operation.CLEAR, true);
}
public void testClearPessimistic() throws Exception {
testOperationDuringJoin(Operation.CLEAR, false);
}
public void testPutOptimistic() throws Exception {
testOperationDuringJoin(Operation.PUT, true);
}
public void testPutPessimistic() throws Exception {
testOperationDuringJoin(Operation.PUT, false);
}
public void testPutMapOptimistic() throws Exception {
testOperationDuringJoin(Operation.PUT_MAP, true);
}
public void testPutMapPessimistic() throws Exception {
testOperationDuringJoin(Operation.PUT_MAP, false);
}
public void testPutIfAbsentOptimistic() throws Exception {
testOperationDuringJoin(Operation.PUT_IF_ABSENT, true);
}
public void testPutIfAbsentPessimistic() throws Exception {
testOperationDuringJoin(Operation.PUT_IF_ABSENT, false);
}
public void testReplaceOptimistic() throws Exception {
testOperationDuringJoin(Operation.REPLACE, true);
}
public void testReplacePessimistic() throws Exception {
testOperationDuringJoin(Operation.REPLACE, false);
}
private void testOperationDuringJoin(Operation op, boolean isOptimistic) throws Exception {
ConfigurationBuilder builder = createConfigurationBuilder(isOptimistic);
createCluster(builder, 2);
waitForClusterToForm();
final int numKeys = 5;
log.infof("Putting %d keys into cache ..", numKeys);
for (int i = 0; i < numKeys; i++) {
cache(0).put(i, "before_st_" + i);
}
log.info("Finished putting keys");
for (int i = 0; i < numKeys; i++) {
String expected = "before_st_" + i;
assertValue(0, i, expected);
assertValue(1, i, expected);
}
final CountDownLatch applyStateProceedLatch = new CountDownLatch(1);
final CountDownLatch applyStateStartedLatch = new CountDownLatch(1);
builder.customInterceptors().addInterceptor().before(InvocationContextInterceptor.class)
.interceptor(new LatchInterceptor(applyStateStartedLatch, applyStateProceedLatch));
log.info("Adding a new node ..");
addClusterEnabledCacheManager(builder);
log.info("Added a new node");
DataContainer<Object, Object> dc0 = advancedCache(0).getDataContainer();
DataContainer<Object, Object> dc1 = advancedCache(1).getDataContainer();
DataContainer<Object, Object> dc2 = advancedCache(2).getDataContainer();
// wait for state transfer on node C to progress to the point where data segments are about to be applied
if (!applyStateStartedLatch.await(15, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
if (op == Operation.CLEAR) {
log.info("Clearing cache ..");
cache(0).clear();
log.info("Finished clearing cache");
assertEquals(0, dc0.size());
assertEquals(0, dc1.size());
} else if (op == Operation.REMOVE) {
log.info("Removing all keys one by one ..");
for (int i = 0; i < numKeys; i++) {
cache(0).remove(i);
}
log.info("Finished removing keys");
assertEquals(0, dc0.size());
assertEquals(0, dc1.size());
} else if (op == Operation.PUT || op == Operation.PUT_MAP || op == Operation.REPLACE || op == Operation.PUT_IF_ABSENT) {
log.info("Updating all keys ..");
if (op == Operation.PUT) {
for (int i = 0; i < numKeys; i++) {
cache(0).put(i, "after_st_" + i);
}
} else if (op == Operation.PUT_MAP) {
Map<Integer, String> toPut = new HashMap<>();
for (int i = 0; i < numKeys; i++) {
toPut.put(i, "after_st_" + i);
}
cache(0).putAll(toPut);
} else if (op == Operation.REPLACE) {
for (int i = 0; i < numKeys; i++) {
String expectedOldValue = "before_st_" + i;
boolean replaced = cache(0).replace(i, expectedOldValue, "after_st_" + i);
assertTrue(replaced);
}
} else { // PUT_IF_ABSENT
for (int i = 0; i < numKeys; i++) {
String expectedOldValue = "before_st_" + i;
Object prevValue = cache(0).putIfAbsent(i, "after_st_" + i);
assertEquals(expectedOldValue, prevValue);
}
}
log.info("Finished updating keys");
}
// allow state transfer to apply state
applyStateProceedLatch.countDown();
// wait for apply state to end
TestingUtil.waitForNoRebalance(cache(0), cache(1), cache(2));
// at this point state transfer is fully done
log.tracef("Data container of NodeA has %d keys: %s", dc0.size(), StreamSupport.stream(dc0.spliterator(), false).map(ice -> ice.getKey().toString()).collect(Collectors.joining(",")));
log.tracef("Data container of NodeB has %d keys: %s", dc1.size(), StreamSupport.stream(dc1.spliterator(), false).map(ice -> ice.getKey().toString()).collect(Collectors.joining(",")));
log.tracef("Data container of NodeC has %d keys: %s", dc2.size(), StreamSupport.stream(dc2.spliterator(), false).map(ice -> ice.getKey().toString()).collect(Collectors.joining(",")));
if (op == Operation.CLEAR || op == Operation.REMOVE) {
// caches should be empty. check that no keys were revived by an inconsistent state transfer
for (int i = 0; i < numKeys; i++) {
assertNull(dc0.get(i));
assertNull(dc1.get(i));
assertNull(dc2.get(i));
}
} else if (op == Operation.PUT || op == Operation.PUT_MAP || op == Operation.REPLACE) {
// check that all values are the ones expected after state transfer and were not overwritten with old values carried by state transfer
for (int i = 0; i < numKeys; i++) {
String expectedValue = "after_st_" + i;
assertValue(0, i, expectedValue);
assertValue(1, i, expectedValue);
assertValue(2, i, expectedValue);
}
} else { // PUT_IF_ABSENT
// check that all values are the ones before state transfer
for (int i = 0; i < numKeys; i++) {
String expectedValue = "before_st_" + i;
assertValue(0, i, expectedValue);
assertValue(1, i, expectedValue);
assertValue(2, i, expectedValue);
}
}
}
private void assertValue(int cacheIndex, int key, String expectedValue) {
InternalCacheEntry ice = cache(cacheIndex).getAdvancedCache().getDataContainer().get(key);
assertNotNull("Found null on cache " + cacheIndex, ice);
assertEquals("Did not find the expected value on cache " + cacheIndex, expectedValue, ice.getValue());
assertEquals("Did not find the expected value on cache " + cacheIndex, expectedValue, cache(cacheIndex).get(key));
}
static class LatchInterceptor extends BaseAsyncInterceptor {
private final CountDownLatch applyStateStartedLatch;
private final CountDownLatch applyStateProceedLatch;
public LatchInterceptor(CountDownLatch applyStateStartedLatch, CountDownLatch applyStateProceedLatch) {
this.applyStateStartedLatch = applyStateStartedLatch;
this.applyStateProceedLatch = applyStateProceedLatch;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
// if this 'put' command is caused by state transfer we delay it to ensure other cache operations
// are performed first and create opportunity for inconsistencies
if (cmd instanceof PutKeyValueCommand && ((PutKeyValueCommand) cmd).hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
// signal we encounter a state transfer PUT
applyStateStartedLatch.countDown();
// wait until it is ok to apply state
if (!applyStateProceedLatch.await(15, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
}
return invokeNext(ctx, cmd);
}
}
}
| 11,720
| 41.467391
| 189
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/OperationsDuringStateTransferTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.impl.CallInterceptor;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.interceptors.impl.InvocationContextInterceptor;
import org.infinispan.interceptors.impl.VersionedEntryWrappingInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.OperationsDuringStateTransferTest")
@CleanupAfterMethod
public class OperationsDuringStateTransferTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(OperationsDuringStateTransferTest.class);
private ConfigurationBuilder cacheConfigBuilder;
@Override
public Object[] factory() {
return new Object[] {
new OperationsDuringStateTransferTest().cacheMode(CacheMode.DIST_SYNC).transactional(false),
new OperationsDuringStateTransferTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.PESSIMISTIC),
new OperationsDuringStateTransferTest().cacheMode(CacheMode.DIST_SYNC).transactional(true).lockingMode(LockingMode.OPTIMISTIC),
new OperationsDuringStateTransferTest().cacheMode(CacheMode.REPL_SYNC).transactional(false),
new OperationsDuringStateTransferTest().cacheMode(CacheMode.REPL_SYNC).transactional(true).lockingMode(LockingMode.PESSIMISTIC),
new OperationsDuringStateTransferTest().cacheMode(CacheMode.REPL_SYNC).transactional(true).lockingMode(LockingMode.OPTIMISTIC),
};
}
@Override
protected void createCacheManagers() {
cacheConfigBuilder = getDefaultClusteredCacheConfig(cacheMode, transactional, true);
if (transactional) {
cacheConfigBuilder.transaction().transactionMode(TransactionMode.TRANSACTIONAL)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup());
cacheConfigBuilder.transaction().lockingMode(lockingMode);
if (lockingMode == LockingMode.OPTIMISTIC) {
cacheConfigBuilder.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
}
}
cacheConfigBuilder.clustering().hash().numSegments(10)
.l1().disable()
.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
cacheConfigBuilder.clustering().stateTransfer().fetchInMemoryState(true).awaitInitialTransfer(false);
addClusterEnabledCacheManager(cacheConfigBuilder);
waitForClusterToForm();
}
public void testRemove() throws Exception {
cache(0).put("myKey", "myValue");
// add an interceptor on second node that will block REMOVE commands right after EntryWrappingInterceptor until we are ready
final CountDownLatch removeStartedLatch = new CountDownLatch(1);
final CountDownLatch removeProceedLatch = new CountDownLatch(1);
cacheConfigBuilder.customInterceptors().addInterceptor().after(ewi())
.interceptor(new RemoveLatchInterceptor(removeStartedLatch, removeProceedLatch));
// do not allow coordinator to send topology updates to node B
final ClusterTopologyManager ctm0 = TestingUtil.extractGlobalComponent(manager(0), ClusterTopologyManager.class);
ctm0.setRebalancingEnabled(false);
log.info("Adding a new node ..");
addClusterEnabledCacheManager(cacheConfigBuilder);
log.info("Added a new node");
// node B is not a member yet and rebalance has not started yet
CacheTopology cacheTopology = advancedCache(1).getDistributionManager().getCacheTopology();
assertNull(cacheTopology.getPendingCH());
assertTrue(cacheTopology.getMembers().contains(address(0)));
assertFalse(cacheTopology.getMembers().contains(address(1)));
assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1)));
// no keys should be present on node B yet because state transfer is blocked
assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());
// initiate a REMOVE
Future<Object> getFuture = fork(() -> {
try {
return cache(1).remove("myKey");
} catch (Exception e) {
log.errorf(e, "PUT failed: %s", e.getMessage());
throw e;
}
});
// wait for REMOVE command on node B to reach beyond *EntryWrappingInterceptor, where it will block.
// the value seen so far is null
if (!removeStartedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
// paranoia, yes the value is still missing from data container
assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());
// allow rebalance to start
ctm0.setRebalancingEnabled(true);
// wait for state transfer to end
TestingUtil.waitForNoRebalance(cache(0), cache(1));
// the state should be already transferred now
assertEquals(1, cache(1).keySet().size());
// allow REMOVE to continue
removeProceedLatch.countDown();
Object oldVal = getFuture.get(10, TimeUnit.SECONDS);
assertNotNull(oldVal);
assertEquals("myValue", oldVal);
assertNull(cache(0).get("myKey"));
assertNull(cache(1).get("myKey"));
}
public Class<? extends DDAsyncInterceptor> ewi() {
Class<? extends DDAsyncInterceptor> after;
if (Configurations.isTxVersioned(cache(0).getCacheConfiguration())) {
after = VersionedEntryWrappingInterceptor.class;
} else {
after = EntryWrappingInterceptor.class;
}
return after;
}
public void testPut() throws Exception {
cache(0).put("myKey", "myValue");
// add an interceptor on second node that will block PUT commands right after EntryWrappingInterceptor until we are ready
final CountDownLatch putStartedLatch = new CountDownLatch(1);
final CountDownLatch putProceedLatch = new CountDownLatch(1);
cacheConfigBuilder.customInterceptors().addInterceptor().after(ewi())
.interceptor(new PutLatchInterceptor(putStartedLatch, putProceedLatch));
// do not allow coordinator to send topology updates to node B
final ClusterTopologyManager ctm0 = TestingUtil.extractGlobalComponent(manager(0), ClusterTopologyManager.class);
ctm0.setRebalancingEnabled(false);
log.info("Adding a new node ..");
addClusterEnabledCacheManager(cacheConfigBuilder);
log.info("Added a new node");
// node B is not a member yet and rebalance has not started yet
CacheTopology cacheTopology = advancedCache(1).getDistributionManager().getCacheTopology();
assertNull(cacheTopology.getPendingCH());
assertTrue(cacheTopology.getMembers().contains(address(0)));
assertFalse(cacheTopology.getMembers().contains(address(1)));
assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1)));
// no keys should be present on node B yet because state transfer is blocked
assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());
// initiate a PUT
Future<Object> putFuture = fork(() -> {
try {
return cache(1).put("myKey", "newValue");
} catch (Exception e) {
log.errorf(e, "PUT failed: %s", e.getMessage());
throw e;
}
});
// wait for PUT command on node B to reach beyond *EntryWrappingInterceptor, where it will block.
// the value seen so far is null
if (!putStartedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
// paranoia, yes the value is still missing from data container
assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());
// allow rebalance to start
ctm0.setRebalancingEnabled(true);
// wait for state transfer to end
TestingUtil.waitForNoRebalance(cache(0), cache(1));
// the state should be already transferred now
assertEquals(1, cache(1).keySet().size());
// allow PUT to continue
putProceedLatch.countDown();
Object oldVal = putFuture.get(10, TimeUnit.SECONDS);
assertNotNull(oldVal);
assertEquals("myValue", oldVal);
assertEquals("newValue", cache(0).get("myKey"));
assertEquals("newValue", cache(1).get("myKey"));
}
public void testReplace() throws Exception {
cache(0).put("myKey", "myValue");
// add an interceptor on second node that will block REPLACE commands right after EntryWrappingInterceptor until we are ready
final CountDownLatch replaceStartedLatch = new CountDownLatch(1);
final CountDownLatch replaceProceedLatch = new CountDownLatch(1);
cacheConfigBuilder.customInterceptors().addInterceptor().after(ewi())
.interceptor(new ReplaceLatchInterceptor(replaceStartedLatch, replaceProceedLatch));
// do not allow coordinator to send topology updates to node B
final ClusterTopologyManager ctm0 = TestingUtil.extractGlobalComponent(manager(0), ClusterTopologyManager.class);
ctm0.setRebalancingEnabled(false);
log.info("Adding a new node ..");
addClusterEnabledCacheManager(cacheConfigBuilder);
log.info("Added a new node");
// node B is not a member yet and rebalance has not started yet
CacheTopology cacheTopology = advancedCache(1).getDistributionManager().getCacheTopology();
assertNull(cacheTopology.getPendingCH());
assertTrue(cacheTopology.getMembers().contains(address(0)));
assertFalse(cacheTopology.getMembers().contains(address(1)));
assertFalse(cacheTopology.getCurrentCH().getMembers().contains(address(1)));
// no keys should be present on node B yet because state transfer is blocked
assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());
// initiate a REPLACE
Future<Object> getFuture = fork(() -> {
try {
return cache(1).replace("myKey", "newValue");
} catch (Exception e) {
log.errorf(e, "REPLACE failed: %s", e.getMessage());
throw e;
}
});
// wait for REPLACE command on node B to reach beyond *EntryWrappingInterceptor, where it will block.
// the value seen so far is null
if (!replaceStartedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
// paranoia, yes the value is still missing from data container
assertTrue(cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().isEmpty());
// allow rebalance to start
ctm0.setRebalancingEnabled(true);
// wait for state transfer to end
TestingUtil.waitForNoRebalance(cache(0), cache(1));
// the state should be already transferred now
assertEquals(1, cache(1).keySet().size());
// allow REPLACE to continue
replaceProceedLatch.countDown();
Object oldVal = getFuture.get(10, TimeUnit.SECONDS);
assertNotNull(oldVal);
assertEquals("myValue", oldVal);
assertEquals("newValue", cache(0).get("myKey"));
assertEquals("newValue", cache(1).get("myKey"));
}
public void testGet() throws Exception {
cache(0).put("myKey", "myValue");
// add an interceptor on node B that will block state transfer until we are ready
final CountDownLatch applyStateProceedLatch = new CountDownLatch(1);
final CountDownLatch applyStateStartedLatch = new CountDownLatch(1);
cacheConfigBuilder.customInterceptors().addInterceptor().before(InvocationContextInterceptor.class)
.interceptor(new StateTransferLatchInterceptor(applyStateStartedLatch, applyStateProceedLatch));
// add an interceptor on node B that will block GET commands until we are ready
final CountDownLatch getKeyStartedLatch = new CountDownLatch(1);
final CountDownLatch getKeyProceedLatch = new CountDownLatch(1);
cacheConfigBuilder.customInterceptors().addInterceptor().before(CallInterceptor.class)
.interceptor(new GetLatchInterceptor(getKeyStartedLatch, getKeyProceedLatch));
log.info("Adding a new node ..");
addClusterEnabledCacheManager(cacheConfigBuilder);
log.info("Added a new node");
// Note: We have to access DC instead of cache with LOCAL_MODE flag
// state transfer is blocked, no keys should be present on node B yet
assertEquals(0, cache(1).getAdvancedCache().getDataContainer().size());
// wait for state transfer on node B to progress to the point where data segments are about to be applied
if (!applyStateStartedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
// state transfer is blocked, no keys should be present on node B yet
assertEquals(0, cache(1).getAdvancedCache().getDataContainer().size());
// initiate a GET
Future<Object> getFuture = fork(() -> cache(1).get("myKey"));
// wait for GET command on node B to reach beyond *DistributionInterceptor, where it will block.
// the value seen so far is null
if (!getKeyStartedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
// allow state transfer to apply state
applyStateProceedLatch.countDown();
// wait for state transfer to end
TestingUtil.waitForNoRebalance(cache(0), cache(1));
assertEquals(1, cache(1).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).keySet().size());
// allow GET to continue
getKeyProceedLatch.countDown();
Object value = getFuture.get(10, TimeUnit.SECONDS);
assertEquals("myValue", value);
}
static class RemoveLatchInterceptor extends BaseAsyncInterceptor {
private final CountDownLatch removeStartedLatch;
private final CountDownLatch removeProceedLatch;
public RemoveLatchInterceptor(CountDownLatch removeStartedLatch, CountDownLatch removeProceedLatch) {
this.removeStartedLatch = removeStartedLatch;
this.removeProceedLatch = removeProceedLatch;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
if (cmd instanceof RemoveCommand) {
// signal we encounter a REMOVE
removeStartedLatch.countDown();
// wait until it is ok to continue with REMOVE
if (!removeProceedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
}
return invokeNext(ctx, cmd);
}
}
static class PutLatchInterceptor extends BaseAsyncInterceptor {
private final CountDownLatch putStartedLatch;
private final CountDownLatch putProceedLatch;
public PutLatchInterceptor(CountDownLatch putStartedLatch, CountDownLatch putProceedLatch) {
this.putStartedLatch = putStartedLatch;
this.putProceedLatch = putProceedLatch;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
if (cmd instanceof PutKeyValueCommand &&
!((PutKeyValueCommand) cmd).hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
// signal we encounter a (non-state-transfer) PUT
putStartedLatch.countDown();
// wait until it is ok to continue with PUT
if (!putProceedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
}
return invokeNext(ctx, cmd);
}
}
static class ReplaceLatchInterceptor extends BaseAsyncInterceptor {
private final CountDownLatch replaceStartedLatch;
private final CountDownLatch replaceProceedLatch;
public ReplaceLatchInterceptor(CountDownLatch replaceStartedLatch, CountDownLatch replaceProceedLatch) {
this.replaceStartedLatch = replaceStartedLatch;
this.replaceProceedLatch = replaceProceedLatch;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
if (cmd instanceof ReplaceCommand) {
// signal we encounter a REPLACE
replaceStartedLatch.countDown();
// wait until it is ok to continue with REPLACE
if (!replaceProceedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
}
return invokeNext(ctx, cmd);
}
}
static class StateTransferLatchInterceptor extends BaseAsyncInterceptor {
private final CountDownLatch applyStateStartedLatch;
private final CountDownLatch applyStateProceedLatch;
public StateTransferLatchInterceptor(CountDownLatch applyStateStartedLatch,
CountDownLatch applyStateProceedLatch) {
this.applyStateStartedLatch = applyStateStartedLatch;
this.applyStateProceedLatch = applyStateProceedLatch;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
// if this 'put' command is caused by state transfer we block until GET begins
if (cmd instanceof PutKeyValueCommand &&
((PutKeyValueCommand) cmd).hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
// signal we encounter a state transfer PUT
applyStateStartedLatch.countDown();
// wait until it is ok to apply state
if (!applyStateProceedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
}
return invokeNext(ctx, cmd);
}
}
static class GetLatchInterceptor extends BaseAsyncInterceptor {
private final CountDownLatch getKeyStartedLatch;
private final CountDownLatch getKeyProceedLatch;
public GetLatchInterceptor(CountDownLatch getKeyStartedLatch, CountDownLatch getKeyProceedLatch) {
this.getKeyStartedLatch = getKeyStartedLatch;
this.getKeyProceedLatch = getKeyProceedLatch;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
if (cmd instanceof GetKeyValueCommand) {
// Only block the first get to come here - they are not concurrent so this check is fine
if (getKeyStartedLatch.getCount() != 0) {
// signal we encounter a GET
getKeyStartedLatch.countDown();
// wait until it is ok to continue with GET
if (!getKeyProceedLatch.await(10, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
}
}
return invokeNext(ctx, cmd);
}
}
}
| 20,536
| 42.327004
| 137
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferDistSharedCacheLoaderFunctionalTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
/**
* Added tests that verify when a rehash occurs that the store contents are updated properly
*
* @author William Burns
* @since 6.0
*/
@Test(groups = "functional", testName = "statetransfer.StateTransferDistSharedCacheLoaderFunctionalTest")
public class StateTransferDistSharedCacheLoaderFunctionalTest extends StateTransferFunctionalTest {
ThreadLocal<Boolean> sharedCacheLoader = new ThreadLocal<Boolean>() {
@Override
protected Boolean initialValue() {
return true;
}
};
int id;
static final int INSERTION_COUNT = 500;
@BeforeMethod
public void beforeEachMethod() {
sharedCacheLoader.set(true);
}
protected void createCacheManagers() throws Throwable {
super.createCacheManagers();
configurationBuilder.clustering().cacheMode(CacheMode.DIST_SYNC);
}
@Override
protected EmbeddedCacheManager createCacheManager(String cacheName) {
configurationBuilder.persistence().clearStores();
DummyInMemoryStoreConfigurationBuilder dimcs = new DummyInMemoryStoreConfigurationBuilder(configurationBuilder.persistence());
if (sharedCacheLoader.get()) {
dimcs.storeName(getClass().getName());
} else {
dimcs.storeName(getClass().getName() + id++);
}
dimcs.fetchPersistentState(false).purgeOnStartup(false).shared(sharedCacheLoader.get()).preload(true);
configurationBuilder.persistence().passivation(false).addStore(dimcs);
// Want to enable eviction, but don't actually evict anything
configurationBuilder.memory().size(INSERTION_COUNT * 10);
EmbeddedCacheManager cm = addClusterEnabledCacheManager(sci, configurationBuilder, new TransportFlags().withMerge(true));
cm.defineConfiguration(cacheName, configurationBuilder.build());
return cm;
}
public void testUnsharedNotFetchedStoreEntriesRemovedProperly() throws Exception {
sharedCacheLoader.set(false);
Cache<Object, Object> cache1, cache2, cache3;
EmbeddedCacheManager cm1 = createCacheManager(cacheName);
cache1 = cm1.getCache(cacheName);
writeLargeInitialData(cache1);
assertEquals(INSERTION_COUNT, cache1.getAdvancedCache().getDataContainer().size());
verifyInitialDataOnLoader(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2);
assertEquals(INSERTION_COUNT, getDataContainerSize(cache1));
assertEquals(INSERTION_COUNT, getDataContainerSize(cache2));
verifyCacheLoaderCount(INSERTION_COUNT, cache2);
EmbeddedCacheManager cm3 = createCacheManager(cacheName);
cache3 = cm3.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2, cache3);
// Need an additional wait for the non-owned entries to be deleted from the data containers
eventuallyEquals(INSERTION_COUNT * 2, () -> getDataContainerSize(cache1, cache2, cache3));
// TODO Shouldn't this work?
//verifyCacheLoaderCount(INSERTION_COUNT * 2, cache1, cache2, cache3);
}
public void testUnsharedFetchedStoreEntriesRemovedProperly() throws Exception {
sharedCacheLoader.set(false);
Cache<Object, Object> cache1, cache2, cache3;
EmbeddedCacheManager cm1 = createCacheManager(cacheName);
cache1 = cm1.getCache(cacheName);
writeLargeInitialData(cache1);
assertEquals(INSERTION_COUNT, cache1.getAdvancedCache().getDataContainer().size());
verifyInitialDataOnLoader(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2);
assertEquals(INSERTION_COUNT, cache1.getAdvancedCache().getDataContainer().size());
assertEquals(INSERTION_COUNT, cache2.getAdvancedCache().getDataContainer().size());
verifyCacheLoaderCount(INSERTION_COUNT, cache2);
EmbeddedCacheManager cm3 = createCacheManager(cacheName);
cache3 = cm3.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2, cache3);
// Need an additional wait for the non-owned entries to be deleted from the data containers
eventuallyEquals(INSERTION_COUNT * 2, () -> getDataContainerSize(cache1, cache2, cache3));
// TODO Shouldn't this work?
//verifyCacheLoaderCount(INSERTION_COUNT * 2, cache1, cache2, cache3);
}
public void testSharedNotFetchedStoreEntriesRemovedProperly() throws Exception {
Cache<Object, Object> cache1, cache2, cache3;
EmbeddedCacheManager cm1 = createCacheManager(cacheName);
cache1 = cm1.getCache(cacheName);
writeLargeInitialData(cache1);
assertEquals(INSERTION_COUNT, cache1.getAdvancedCache().getDataContainer().size());
verifyInitialDataOnLoader(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2);
assertEquals(INSERTION_COUNT, cache1.getAdvancedCache().getDataContainer().size());
assertEquals(INSERTION_COUNT, cache2.getAdvancedCache().getDataContainer().size());
verifyCacheLoaderCount(INSERTION_COUNT, cache2);
EmbeddedCacheManager cm3 = createCacheManager(cacheName);
cache3 = cm3.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2, cache3);
// Shared cache loader should have all the contents still
verifyInitialDataOnLoader(cache3);
// Need an additional wait for the non-owned entries to be deleted from the data containers
eventuallyEquals(INSERTION_COUNT * 2, () -> getDataContainerSize(cache1, cache2, cache3));
}
protected int getDataContainerSize(Cache<?, ?>... caches) {
int count = 0;
for (Cache<?, ?> c : caches) {
count += c.getAdvancedCache().getDataContainer().size();
}
return count;
}
protected void writeLargeInitialData(final Cache<Object, Object> c) {
for (int i = 0; i < INSERTION_COUNT; ++i) {
c.put("key " + i, "value " + i);
}
}
private void verifyCacheLoaderCount(int expectedCount, Cache... caches) {
int count = 0;
for (Cache<Object, Object> cache : caches) {
count += ((DummyInMemoryStore) TestingUtil.getFirstStore(cache)).size();
}
assertEquals(expectedCount, count);
}
protected void verifyInitialDataOnLoader(Cache<Object, Object> c) {
DummyInMemoryStore l = TestingUtil.getFirstStore(c);
for (int i = 0; i < INSERTION_COUNT; ++i) {
assertTrue("Didn't contain key " + i, l.contains("key " + i));
}
for (int i = 0; i < INSERTION_COUNT; ++i) {
assertEquals("value " + i, l.loadEntry("key " + i).getValue());
}
}
}
| 7,371
| 37.596859
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/TestKey.java
|
package org.infinispan.statetransfer;
import java.io.Serializable;
import java.util.Random;
import org.infinispan.distribution.ch.KeyPartitioner;
/**
* A key that maps to a given data segment of the ConsistentHash.
*
* @author anistor@redhat.com
* @since 5.2
*/
final class TestKey implements Serializable {
private static final long serialVersionUID = -42;
/**
* A name used for easier debugging. This is not relevant for equals() and hashCode().
*/
private final String name;
/**
* A carefully crafted hash code.
*/
private final int hashCode;
public TestKey(String name, int segmentId, KeyPartitioner keyPartitioner) {
if (segmentId < 0) {
throw new IllegalArgumentException("segmentId is out of range");
}
this.name = name;
Random rnd = new Random();
Integer r;
do {
r = rnd.nextInt();
} while (segmentId != keyPartitioner.getSegment(r));
hashCode = r;
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || o.getClass() != TestKey.class) return false;
TestKey other = (TestKey) o;
return hashCode == other.hashCode;
}
@Override
public String toString() {
return "TestKey{name=" + name + ", hashCode=" + hashCode + '}';
}
}
| 1,400
| 21.967213
| 89
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferSegmentMetricsTest.java
|
package org.infinispan.statetransfer;
import org.infinispan.Cache;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.BaseDistFunctionalTest;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.topology.CacheTopology;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyCollection;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertTrue;
@Test(groups = "functional", testName = "statetransfer.StateTransferSegmentMetricsTest")
@CleanupAfterMethod
public class StateTransferSegmentMetricsTest extends BaseDistFunctionalTest<String, String> {
private final int MAX_NUM_SEGMENTS = 6;
private final int[][] owners1And2 = new int[][]{{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}};
private final int[][] owners1And3 = new int[][]{{0, 2}, {0, 2}, {0, 2}, {0, 2}, {0, 2}, {0, 2}};
private final ControlledConsistentHashFactory factory = new ControlledConsistentHashFactory.Default(owners1And2);
public StateTransferSegmentMetricsTest() {
INIT_CLUSTER_SIZE = 3;
numOwners = 2;
performRehashing = true;
cleanup = CleanupPhase.AFTER_METHOD;
}
{
transactional = true;
}
@AfterMethod
public void resetFactory() {
factory.setOwnerIndexes(owners1And2);
}
@Override
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder builder = super.buildConfiguration();
builder.clustering().hash().consistentHashFactory(factory).numSegments(MAX_NUM_SEGMENTS)
.transaction().transactionMode(TransactionMode.TRANSACTIONAL).lockingMode(LockingMode.OPTIMISTIC);
return builder;
}
@Test
public void testSegmentCounterDuringStateTransfer() throws Exception {
final StateTransferManager manager = TestingUtil.extractComponent(c3, StateTransferManager.class);
final CheckPoint checkPoint = new CheckPoint();
// Node 3 will request node 1 for transactions
waitTransactionRequest(c1, checkPoint);
// We have to wait until non owner has the new topology installed before transferring state
waitRequestingSegments(c3, checkPoint);
// Wait to receive segment batch.
waitApplyingSegmentBatch(c3, checkPoint);
// Change the ownership.
factory.setOwnerIndexes(owners1And3);
// New node joins the cluster to trigger the topology change.
EmbeddedCacheManager cm = addClusterEnabledCacheManager();
cm.defineConfiguration(cacheName, configuration.build());
Future<Void> join = fork(() -> {
waitForClusterToForm(cacheName);
log.debug("4th has joined");
return null;
});
// Node 3 is notified about topology change and request all the segments since the ownership changed.
// This also involves requesting the transactions, we wait to the provider receive the request
checkPoint.awaitStrict("topology_update_notify_invoked_" + c3, 10, TimeUnit.SECONDS);
checkPoint.awaitStrict("transactions_requested_invoked_" + c1, 10, TimeUnit.SECONDS);
assertEquals(manager.getInflightTransactionalSegmentCount(), MAX_NUM_SEGMENTS);
// Transferring states should be in progress.
assertTrue(manager.isStateTransferInProgress());
checkPoint.triggerForever("transactions_requested_released_" + c1);
checkPoint.awaitStrict("topology_update_notify_executed_" + c3, 10, TimeUnit.SECONDS);
// Node 3 already received the transactional segments
assertEquals(manager.getInflightTransactionalSegmentCount(), 0);
// Node 3 ask for data from all the segments he currently owns.
assertEquals(manager.getInflightSegmentTransferCount(), MAX_NUM_SEGMENTS);
checkPoint.triggerForever("topology_update_notify_released_" + c3);
checkPoint.awaitStrict("state_installed_invoked_" + c3, 10, TimeUnit.SECONDS);
assertEquals(manager.getInflightSegmentTransferCount(), MAX_NUM_SEGMENTS);
checkPoint.triggerForever("state_installed_invoked_release_" + c3);
// Wait until the batch is applied. If, for any reason, the batch does not have all the segments this will fail.
checkPoint.awaitStrict("state_applied_" + c3, 10, TimeUnit.SECONDS);
assertEquals(manager.getInflightSegmentTransferCount(), 0);
// We do not actually care about the new node.
join.cancel(true);
}
private void waitTransactionRequest(final Cache<?, ?> cache, final CheckPoint checkPoint) {
StateProvider sp = TestingUtil.extractComponent(cache, StateProvider.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(sp);
StateProvider mockProvider = mock(StateProvider.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
checkPoint.trigger("transactions_requested_invoked_" + cache);
Object response = forwardedAnswer.answer(invocation);
try {
checkPoint.awaitStrict("transactions_requested_released_" + cache, 10, TimeUnit.SECONDS);
return response;
} catch (InterruptedException | TimeoutException e) {
throw new TestException(e);
}
}).when(mockProvider).getTransactionsForSegments(any(Address.class), anyInt(), any(IntSet.class));
TestingUtil.replaceComponent(cache, StateProvider.class, mockProvider, true);
}
private void waitRequestingSegments(final Cache<?, ?> cache, final CheckPoint checkPoint) {
StateConsumer sc = TestingUtil.extractComponent(cache, StateConsumer.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(sc);
StateConsumer mockConsumer = mock(StateConsumer.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
// Wait for main thread to sync up
checkPoint.trigger("topology_update_notify_invoked_" + cache);
return ((CompletionStage<?>) forwardedAnswer.answer(invocation)).thenRun(() -> {
// Now wait until main thread lets us through
checkPoint.trigger("topology_update_notify_executed_" + cache);
try {
checkPoint.awaitStrict("topology_update_notify_released_" + cache, 10, TimeUnit.SECONDS);
} catch (InterruptedException | TimeoutException e) {
Thread.currentThread().interrupt();
}
});
}).when(mockConsumer).onTopologyUpdate(any(CacheTopology.class), anyBoolean());
TestingUtil.replaceComponent(cache, StateConsumer.class, mockConsumer, true);
}
private void waitApplyingSegmentBatch(final Cache<?, ?> cache, final CheckPoint checkPoint) {
StateConsumer sc = TestingUtil.extractComponent(cache, StateConsumer.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(sc);
StateConsumer mockConsumer = mock(StateConsumer.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
// Sync with main thread
checkPoint.trigger("state_installed_invoked_" + cache);
// Proceed when main thread allows
checkPoint.awaitStrict("state_installed_invoked_release_" + cache, 10, TimeUnit.SECONDS);
// Apply the whole batch of segments and then issue a signal back to main thread.
return ((CompletionStage<?>) forwardedAnswer.answer(invocation))
.thenRun(() -> checkPoint.trigger("state_applied_" + cache));
}).when(mockConsumer).applyState(any(Address.class), anyInt(), anyCollection());
TestingUtil.replaceComponent(cache, StateConsumer.class, mockConsumer, true);
}
}
| 8,654
| 46.295082
| 118
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ConcurrentStartChanelLookupTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.blockUntilViewsReceived;
import static org.infinispan.test.TestingUtil.extractGlobalComponentRegistry;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.testng.AssertJUnit.assertEquals;
import java.io.ByteArrayInputStream;
import java.util.concurrent.Future;
import org.infinispan.Cache;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.JGroupsConfigBuilder;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.jgroups.JChannel;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* Tests concurrent startup of cache managers when the channel is started externally
* and injected with a JGroupsChannelLookup.
*
* @author Dan Berindei
* @since 8.2
*/
@Test(testName = "statetransfer.ConcurrentStartChanelLookupTest", groups = "functional")
@CleanupAfterMethod
public class ConcurrentStartChanelLookupTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
// The test method will create the cache managers
}
@DataProvider(name = "startOrder")
public Object[][] startOrder() {
return new Object[][]{{0, 1}, {1, 0}};
}
@Test(timeOut = 60000, dataProvider = "startOrder")
public void testConcurrentStart(int eagerManager, int lazyManager) throws Exception {
TestResourceTracker.testThreadStarted(this.getTestName());
String name1 = TestResourceTracker.getNextNodeName();
String name2 = TestResourceTracker.getNextNodeName();
// Create and connect both channels beforehand
// We need both nodes in the view when the coordinator's ClusterTopologyManagerImpl starts
// in order to reproduce the ISPN-5106 deadlock
JChannel ch1 = createChannel(name1, 0);
JChannel ch2 = createChannel(name2, 1);
// Use a JGroupsChannelLookup to pass the created channels to the transport
EmbeddedCacheManager cm1 = createCacheManager(name1, ch1);
EmbeddedCacheManager cm2 = createCacheManager(name2, ch2);
try {
assertEquals(ComponentStatus.INSTANTIATED, extractGlobalComponentRegistry(cm1).getStatus());
assertEquals(ComponentStatus.INSTANTIATED, extractGlobalComponentRegistry(cm2).getStatus());
log.debugf("Channels created. Starting the caches");
Future<Object> repl1Future = fork(() -> manager(eagerManager).getCache());
// If eagerManager == 0, the coordinator broadcasts a GET_STATUS command.
// If eagerManager == 1, the non-coordinator sends a JOIN command to the coordinator.
// We want to start the lazyManager without receiving these commands, then the eagerManager should
// retry and succeed.
// Bundling and retransmission in NAKACK2 mean we need an extra wait after lazyManager sent its
// command, so we don't try to wait for a precise amount of time.
Thread.sleep(1000);
Future<Object> repl2Future = fork(() -> manager(lazyManager).getCache());
repl1Future.get(10, SECONDS);
repl2Future.get(10, SECONDS);
Cache<String, String> c1r = cm1.getCache();
Cache<String, String> c2r = cm2.getCache();
blockUntilViewsReceived(10000, cm1, cm2);
waitForNoRebalance(c1r, c2r);
c1r.put("key", "value");
assertEquals("value", c2r.get("key"));
} finally {
cm1.stop();
cm2.stop();
ch1.close();
ch2.close();
}
}
private EmbeddedCacheManager createCacheManager(String name1, JChannel ch1) {
GlobalConfigurationBuilder gcb1 = new GlobalConfigurationBuilder();
gcb1.transport().nodeName(ch1.getName()).distributedSyncTimeout(10, SECONDS);
CustomChannelLookup.configureTransportWithChannel(gcb1, ch1, name1, false);
ConfigurationBuilder replCfg = new ConfigurationBuilder();
replCfg.clustering().cacheMode(CacheMode.REPL_SYNC);
replCfg.clustering().stateTransfer().timeout(30, SECONDS);
EmbeddedCacheManager cm1 = TestCacheManagerFactory.newDefaultCacheManager(false, gcb1, replCfg);
registerCacheManager(cm1);
return cm1;
}
private JChannel createChannel(String name, int portRange) throws Exception {
String configString = JGroupsConfigBuilder.getJGroupsConfig(ConcurrentStartChanelLookupTest.class.getName(),
new TransportFlags());
JChannel channel = new JChannel(new ByteArrayInputStream(configString.getBytes()));
channel.setName(name);
channel.connect(ConcurrentStartChanelLookupTest.class.getSimpleName());
log.tracef("Channel %s connected: %s", channel, channel.getViewAsString());
return channel;
}
}
| 5,365
| 41.587302
| 114
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferRestartTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.jgroups.protocols.DISCARD;
import org.testng.annotations.Test;
/**
* tests scenario for ISPN-2574
*
* - create nodes A, B - start node C - starts state transfer from B to C
* - abruptly kill B before it is able to send StateResponse to C
* - C resends the request to A
* - finally cluster A, C is formed where all entries are properly backed up on both nodes
*
* @author Michal Linhard
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.StateTransferRestartTest")
@CleanupAfterMethod
public class StateTransferRestartTest extends MultipleCacheManagersTest {
private ConfigurationBuilder cfgBuilder;
private GlobalConfigurationBuilder gcfgBuilder;
class MockTransport extends JGroupsTransport {
volatile Callable<Void> callOnStateResponseCommand;
@Override
public <T> CompletionStage<T> invokeCommand(Address target,
ReplicableCommand command,
ResponseCollector<T> collector,
DeliverOrder deliverOrder, long timeout,
TimeUnit unit) {
if (callOnStateResponseCommand != null && command.getClass() == StateResponseCommand.class) {
log.trace("Ignoring StateResponseCommand");
try {
callOnStateResponseCommand.call();
} catch (Exception e) {
log.error("Error in callOnStateResponseCommand", e);
}
return CompletableFuture.completedFuture(null);
}
return super.invokeCommand(target, command, collector, deliverOrder, timeout, unit);
}
}
private MockTransport mockTransport = new MockTransport();
@Override
protected void createCacheManagers() throws Throwable {
cfgBuilder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
cfgBuilder.transaction().transactionManagerLookup(new EmbeddedTransactionManagerLookup());
cfgBuilder.clustering().hash().numOwners(2);
cfgBuilder.clustering().stateTransfer().fetchInMemoryState(true);
cfgBuilder.clustering().stateTransfer().timeout(20000);
gcfgBuilder = new GlobalConfigurationBuilder();
gcfgBuilder.transport().transport(mockTransport);
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
public void testStateTransferRestart() throws Throwable {
final int numKeys = 100;
addClusterEnabledCacheManager(cfgBuilder, new TransportFlags().withFD(true));
addClusterEnabledCacheManager(gcfgBuilder, cfgBuilder, new TransportFlags().withFD(true));
log.info("waiting for cluster { c0, c1 }");
waitForClusterToForm();
log.info("putting in data");
final Cache<Object, Object> c0 = cache(0);
final Cache<Object, Object> c1 = cache(1);
for (int k = 0; k < numKeys; k++) {
c0.put(k, k);
}
TestingUtil.waitForNoRebalance(c0, c1);
assertEquals(numKeys, c0.entrySet().size());
assertEquals(numKeys, c1.entrySet().size());
mockTransport.callOnStateResponseCommand = () -> {
fork((Callable<Void>) () -> {
log.info("KILLING the c1 cache");
try {
DISCARD d3 = TestingUtil.getDiscardForCache(c1.getCacheManager());
d3.discardAll(true);
TestingUtil.killCacheManagers(manager(c1));
} catch (Exception e) {
log.info("there was some exception while killing cache");
}
return null;
});
try {
// sleep and wait to be killed
Thread.sleep(25000);
} catch (InterruptedException e) {
log.info("Interrupted as expected.");
Thread.currentThread().interrupt();
}
return null;
};
log.info("adding cache c2");
addClusterEnabledCacheManager(cfgBuilder, new TransportFlags().withFD(true));
log.info("get c2");
final Cache<Object, Object> c2 = cache(2);
log.info("waiting for cluster { c0, c2 }");
TestingUtil.blockUntilViewsChanged(10000, 2, c0, c2);
log.infof("c0 entrySet size before : %d", c0.entrySet().size());
log.infof("c2 entrySet size before : %d", c2.entrySet().size());
eventuallyEquals(numKeys, () -> c0.entrySet().size());
eventuallyEquals(numKeys, () -> c2.entrySet().size());
log.info("Ending the test");
}
}
| 5,809
| 38.52381
| 102
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/RebalancePolicyJmxTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.TestingUtil.killCacheManagers;
import static org.infinispan.test.fwk.TestCacheManagerFactory.configureJmx;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Arrays;
import javax.management.Attribute;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.infinispan.commons.jmx.MBeanServerLookup;
import org.infinispan.commons.jmx.TestMBeanServerLookup;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.topology.RebalancingStatus;
import org.testng.annotations.Test;
/**
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "statetransfer.RebalancePolicyJmxTest")
@CleanupAfterMethod
@InCacheMode({ CacheMode.DIST_SYNC })
public class RebalancePolicyJmxTest extends MultipleCacheManagersTest {
private static final String REBALANCING_ENABLED = "rebalancingEnabled";
private final MBeanServerLookup mBeanServerLookup = TestMBeanServerLookup.create();
public void testJoinAndLeaveWithRebalanceSuspended() throws Exception {
doTest(false);
}
public void testJoinAndLeaveWithRebalanceSuspendedAwaitingInitialTransfer() throws Exception {
doTest(true);
}
@Override
protected void createCacheManagers() throws Throwable {
//no-op
}
private ConfigurationBuilder getConfigurationBuilder(boolean awaitInitialTransfer) {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(cacheMode)
.stateTransfer().awaitInitialTransfer(awaitInitialTransfer);
return cb;
}
private GlobalConfigurationBuilder getGlobalConfigurationBuilder(String rackId) {
int index = cacheManagers.size();
GlobalConfigurationBuilder gcb = GlobalConfigurationBuilder.defaultClusteredBuilder();
gcb.transport().rackId(rackId);
configureJmx(gcb, getClass().getSimpleName() + index, mBeanServerLookup);
return gcb;
}
private void doTest(boolean awaitInitialTransfer) throws Exception {
addClusterEnabledCacheManager(getGlobalConfigurationBuilder("r1"), getConfigurationBuilder(awaitInitialTransfer));
addClusterEnabledCacheManager(getGlobalConfigurationBuilder("r1"), getConfigurationBuilder(awaitInitialTransfer));
waitForClusterToForm();
MBeanServer mBeanServer = mBeanServerLookup.getMBeanServer();
String domain0 = manager(1).getCacheManagerConfiguration().jmx().domain();
ObjectName ltmName0 = TestingUtil.getCacheManagerObjectName(domain0, "DefaultCacheManager", "LocalTopologyManager");
String domain1 = manager(1).getCacheManagerConfiguration().jmx().domain();
ObjectName ltmName1 = TestingUtil.getCacheManagerObjectName(domain1, "DefaultCacheManager", "LocalTopologyManager");
// Check initial state
DistributionManager dm0 = advancedCache(0).getDistributionManager();
assertEquals(Arrays.asList(address(0), address(1)), dm0.getCacheTopology().getCurrentCH().getMembers());
assertNull(dm0.getCacheTopology().getPendingCH());
assertTrue(mBeanServer.isRegistered(ltmName0));
assertTrue((Boolean) mBeanServer.getAttribute(ltmName0, REBALANCING_ENABLED));
// Suspend rebalancing
mBeanServer.setAttribute(ltmName0, new Attribute(REBALANCING_ENABLED, false));
assertFalse((Boolean) mBeanServer.getAttribute(ltmName0, REBALANCING_ENABLED));
// Add 2 nodes
log.debugf("Starting 2 new nodes");
addClusterEnabledCacheManager(getGlobalConfigurationBuilder("r2"), getConfigurationBuilder(awaitInitialTransfer));
addClusterEnabledCacheManager(getGlobalConfigurationBuilder("r2"), getConfigurationBuilder(awaitInitialTransfer));
cache(2);
cache(3);
// Check that rebalance is suspended on the new nodes
ClusterTopologyManager ctm2 = TestingUtil.extractGlobalComponent(manager(2), ClusterTopologyManager.class);
assertFalse(ctm2.isRebalancingEnabled());
ClusterTopologyManager ctm3 = TestingUtil.extractGlobalComponent(manager(3), ClusterTopologyManager.class);
assertFalse(ctm3.isRebalancingEnabled());
StateTransferManager stm0 = TestingUtil.extractComponent(cache(0), StateTransferManager.class);
assertEquals(RebalancingStatus.SUSPENDED.toString(), stm0.getRebalancingStatus());
// Check that no rebalance happened after 1 second
Thread.sleep(1000);
assertFalse((Boolean) mBeanServer.getAttribute(ltmName1, REBALANCING_ENABLED));
assertNull(dm0.getCacheTopology().getPendingCH());
assertEquals(Arrays.asList(address(0), address(1)), dm0.getCacheTopology().getCurrentCH().getMembers());
// Re-enable rebalancing
log.debugf("Rebalancing with nodes %s %s %s %s", address(0), address(1), address(2), address(3));
mBeanServer.setAttribute(ltmName0, new Attribute(REBALANCING_ENABLED, true));
assertTrue((Boolean) mBeanServer.getAttribute(ltmName0, REBALANCING_ENABLED));
// Duplicate request to enable rebalancing - should be ignored
mBeanServer.setAttribute(ltmName0, new Attribute(REBALANCING_ENABLED, true));
// Check that the cache now has 4 nodes, and the CH is balanced
TestingUtil.waitForNoRebalance(cache(0), cache(1), cache(2), cache(3));
assertNull(dm0.getCacheTopology().getPendingCH());
assertEquals(RebalancingStatus.COMPLETE.toString(), stm0.getRebalancingStatus());
ConsistentHash ch = dm0.getCacheTopology().getCurrentCH();
assertEquals(Arrays.asList(address(0), address(1), address(2), address(3)), ch.getMembers());
int numOwners = Math.min(cache(0).getCacheConfiguration().clustering().hash().numOwners(), ch.getMembers().size());
for (int i = 0; i < ch.getNumSegments(); i++) {
assertEquals(numOwners, ch.locateOwnersForSegment(i).size());
}
// Suspend rebalancing again
mBeanServer.setAttribute(ltmName1, new Attribute(REBALANCING_ENABLED, false));
assertFalse((Boolean) mBeanServer.getAttribute(ltmName0, REBALANCING_ENABLED));
assertFalse((Boolean) mBeanServer.getAttribute(ltmName1, REBALANCING_ENABLED));
// Duplicate request to enable rebalancing - should be ignored
mBeanServer.setAttribute(ltmName1, new Attribute(REBALANCING_ENABLED, false));
// Kill the first 2 nodes
log.debugf("Stopping nodes %s %s", address(0), address(1));
killCacheManagers(manager(0), manager(1));
// Check that the nodes are no longer in the CH, but every segment only has one copy
// Implicitly, this also checks that no data has been lost - if both a segment's owners had left,
// the CH factory would have assigned 2 owners.
Thread.sleep(1000);
DistributionManager dm2 = advancedCache(2).getDistributionManager();
assertNull(dm2.getCacheTopology().getPendingCH());
ch = dm2.getCacheTopology().getCurrentCH();
assertEquals(Arrays.asList(address(2), address(3)), ch.getMembers());
// Scattered cache cannot reliably tolerate failure of two nodes, some segments may get lost
if (cacheMode.isDistributed()) {
for (int i = 0; i < ch.getNumSegments(); i++) {
assertEquals(1, ch.locateOwnersForSegment(i).size());
}
}
StateTransferManager stm2 = TestingUtil.extractComponent(cache(2), StateTransferManager.class);
assertEquals(RebalancingStatus.SUSPENDED.toString(), stm2.getRebalancingStatus());
// Enable rebalancing again
log.debugf("Rebalancing with nodes %s %s", address(2), address(3));
String domain2 = manager(2).getCacheManagerConfiguration().jmx().domain();
ObjectName ltmName2 = TestingUtil.getCacheManagerObjectName(domain2, "DefaultCacheManager", "LocalTopologyManager");
String domain3 = manager(2).getCacheManagerConfiguration().jmx().domain();
ObjectName ltmName3 = TestingUtil.getCacheManagerObjectName(domain3, "DefaultCacheManager", "LocalTopologyManager");
mBeanServer.setAttribute(ltmName2, new Attribute(REBALANCING_ENABLED, true));
assertTrue((Boolean) mBeanServer.getAttribute(ltmName2, REBALANCING_ENABLED));
assertTrue((Boolean) mBeanServer.getAttribute(ltmName3, REBALANCING_ENABLED));
// Check that the CH is now balanced (and every segment has 2 copies)
TestingUtil.waitForNoRebalance(cache(2), cache(3));
assertEquals(RebalancingStatus.COMPLETE.toString(), stm2.getRebalancingStatus());
assertNull(dm2.getCacheTopology().getPendingCH());
ch = dm2.getCacheTopology().getCurrentCH();
assertEquals(Arrays.asList(address(2), address(3)), ch.getMembers());
for (int i = 0; i < ch.getNumSegments(); i++) {
assertEquals(numOwners, ch.locateOwnersForSegment(i).size());
}
}
}
| 9,402
| 50.664835
| 122
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/CustomChannelLookup.java
|
package org.infinispan.statetransfer;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.configuration.global.TransportConfigurationBuilder;
import org.infinispan.remoting.transport.jgroups.JGroupsChannelLookup;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.jgroups.JChannel;
/**
* JGroupsChannelLookup implementation that returns an existing channel.
*
* @author Dan Berindei
* @since 8.2
*/
public class CustomChannelLookup implements JGroupsChannelLookup {
private static final Map<String, JChannel> channelMap = new ConcurrentHashMap<>();
private boolean connect;
public static void configureTransportWithChannel(GlobalConfigurationBuilder gcb, JChannel channel, String nodeName,
boolean connect) {
TransportConfigurationBuilder tcb = gcb.transport();
tcb.defaultTransport();
tcb.addProperty(JGroupsTransport.CHANNEL_LOOKUP, CustomChannelLookup.class.getName());
tcb.addProperty("customNodeName", nodeName);
tcb.addProperty("customConnect", Boolean.toString(connect));
channelMap.put(nodeName, channel);
}
@Override
public JChannel getJGroupsChannel(Properties p) {
String nodeName = p.getProperty("customNodeName");
connect = Boolean.valueOf(p.getProperty("customConnect"));
return channelMap.remove(nodeName);
}
@Override
public boolean shouldConnect() {
return connect;
}
@Override
public boolean shouldDisconnect() {
return true;
}
@Override
public boolean shouldClose() {
return true;
}
}
| 1,755
| 30.927273
| 118
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferExpiredStoreTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.time.ControlledTimeService;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* Reproducer for ISPN-13191 Deadlock: expiration blocks state transfer
*/
@Test(testName = "statetransfer.StateTransferExpiredStoreTest", groups = "functional")
public class StateTransferExpiredStoreTest extends MultipleCacheManagersTest {
private ControlledTimeService timeService;
@Override
protected void createCacheManagers() throws Throwable {
timeService = new ControlledTimeService();
createCluster(TestDataSCI.INSTANCE, null, 2);
TestingUtil.replaceComponent(manager(0), TimeService.class, timeService, true);
TestingUtil.replaceComponent(manager(1), TimeService.class, timeService, true);
}
private ConfigurationBuilder getConfigurationBuilder(CacheMode cacheMode) {
ConfigurationBuilder cfg = new ConfigurationBuilder();
cfg.persistence()
.addStore(DummyInMemoryStoreConfigurationBuilder.class)
.fetchPersistentState(true);
cfg.clustering().cacheMode(cacheMode);
cfg.clustering().stateTransfer().timeout(30_000);
return cfg;
}
@DataProvider
Object[][] cacheModes() {
return new Object[][] {
{CacheMode.DIST_SYNC},
{CacheMode.REPL_SYNC},
};
}
@Test(dataProvider = "cacheModes")
public void testStateTransfer(CacheMode cacheMode) {
String cacheName = "cache_" + cacheMode;
Configuration configuration = getConfigurationBuilder(cacheMode).build();
manager(0).defineConfiguration(cacheName, configuration);
manager(1).defineConfiguration(cacheName, configuration);
AdvancedCache<Object, Object> cache0 = advancedCache(0, cacheName);
String value = "value";
cache0.put("immortal", value);
for (int i = 1; i <= 3; i++) {
cache0.put("lifespan+maxidle" + i, value, i, SECONDS, i, SECONDS);
cache0.put("lifespan" + i, value, i, SECONDS);
cache0.put("maxidle" + i, value, -1, SECONDS, i, SECONDS);
cache0.put("lifespan+maxidle" + i, value, i, SECONDS, i, SECONDS);
}
log.info("timeService.advance");
timeService.advance(SECONDS.toMillis(2));
AdvancedCache<Object, Object> cache1 = advancedCache(1, cacheName);
assertEquals(value, cache1.get("immortal"));
for (int i = 2; i <= 3; i++) {
assertEquals(value, cache0.get("lifespan+maxidle" + i));
assertEquals(value, cache0.get("lifespan" + i));
assertEquals(value, cache0.get("maxidle" + i));
assertEquals(value, cache0.get("lifespan+maxidle" + i));
}
}
}
| 3,280
| 38.059524
| 86
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/TxReplay3Test.java
|
package org.infinispan.statetransfer;
import static org.infinispan.test.TestingUtil.wrapComponent;
import static org.infinispan.test.TestingUtil.wrapInboundInvocationHandler;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.MagicKey;
import org.infinispan.remoting.inboundhandler.AbstractDelegatingHandler;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.UnsureResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.AbstractDelegatingRpcManager;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Test for https://issues.jboss.org/browse/ISPN-6047
*
* @author Pedro Ruivo
* @since 8.2
*/
@Test(groups = "functional", testName = "statetransfer.TxReplay3Test")
public class TxReplay3Test extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(TxReplay3Test.class);
private static final String VALUE_1 = "v1";
private static final String VALUE_2 = "v2";
private static final String TX1_LOCKED = "tx1:acquired_lock";
private static final String TX1_UNSURE = "tx1:unsure_response";
private static final String TX2_PENDING = "tx2:waiting_tx1";
private static final String MAIN_ADVANCE = "main:advance";
private static final String JOIN_NEW_NODE = "join:add_new_node";
public void testReplay() throws Exception {
final Object key = new MagicKey("TxReplay3Test", cache(0));
final StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("tx1", TX1_LOCKED, TX1_UNSURE);
sequencer.logicalThread("tx2", TX2_PENDING);
sequencer.logicalThread("join", JOIN_NEW_NODE);
sequencer.logicalThread("main", MAIN_ADVANCE);
sequencer.order(TX1_LOCKED, MAIN_ADVANCE, TX2_PENDING, JOIN_NEW_NODE, TX1_UNSURE);
wrapComponent(cache(1), RpcManager.class,
(wrapOn, current) -> new UnsureResponseRpcManager(current, sequencer), true);
Handler handler = wrapInboundInvocationHandler(cache(0), current -> new Handler(current, sequencer));
handler.setOrigin(address(cache(2)));
Future<Void> tx1 = fork(() -> {
cache(1).put(key, VALUE_1);
return null;
});
sequencer.advance(MAIN_ADVANCE);
Future<Void> tx2 = fork(() -> {
cache(2).put(key, VALUE_2);
return null;
});
sequencer.enter(JOIN_NEW_NODE);
addClusterEnabledCacheManager(config()).getCache();
waitForClusterToForm();
sequencer.exit(JOIN_NEW_NODE);
tx1.get(30, TimeUnit.SECONDS);
tx2.get(30, TimeUnit.SECONDS);
assertEquals(VALUE_2, cache(0).get(key));
}
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(3, TestDataSCI.INSTANCE, config());
}
private static ConfigurationBuilder config() {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
builder.transaction()
.useSynchronization(false)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.recovery().disable();
builder.locking().lockAcquisitionTimeout(1, TimeUnit.MINUTES).isolationLevel(IsolationLevel.READ_COMMITTED);
builder.clustering()
.remoteTimeout(1, TimeUnit.MINUTES)
.hash().numOwners(1).numSegments(1)
.consistentHashFactory(new ControlledConsistentHashFactory.Default(0))
.stateTransfer().fetchInMemoryState(false);
return builder;
}
private static class UnsureResponseRpcManager extends AbstractDelegatingRpcManager {
private final StateSequencer sequencer;
private volatile boolean triggered = false;
public UnsureResponseRpcManager(RpcManager realOne, StateSequencer sequencer) {
super(realOne);
this.sequencer = sequencer;
}
@Override
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker,
RpcOptions rpcOptions) {
return super.performRequest(targets, command, collector, invoker, rpcOptions)
.thenApply(result -> {
log.debugf("After invoke remotely %s. Responses=%s", command, result);
if (triggered || !(command instanceof PrepareCommand))
return result;
log.debugf("Triggering %s and %s", TX1_LOCKED, TX1_UNSURE);
triggered = true;
try {
sequencer.advance(TX1_LOCKED);
sequencer.advance(TX1_UNSURE);
} catch (TimeoutException | InterruptedException e) {
throw new CacheException(e);
}
Map<Address, Response> newResult = new HashMap<>();
((Map<Address, Response>) result).forEach((address, response) -> newResult.put(address, UnsureResponse.INSTANCE));
log.debugf("After invoke remotely %s. New Responses=%s", command, newResult);
return (T) newResult;
});
}
}
private static class Handler extends AbstractDelegatingHandler {
private final StateSequencer sequencer;
private volatile boolean triggered = false;
private volatile Address origin;
public Handler(PerCacheInboundInvocationHandler delegate, StateSequencer sequencer) {
super(delegate);
this.sequencer = sequencer;
}
public void setOrigin(Address origin) {
this.origin = origin;
}
@Override
protected boolean beforeHandle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
log.debugf("Before invoking %s. expected origin=%s", command, origin);
return super.beforeHandle(command, reply, order);
}
@Override
protected void afterHandle(CacheRpcCommand command, DeliverOrder order, boolean delegated) {
super.afterHandle(command, order, delegated);
log.debugf("After invoking %s. expected origin=%s", command, origin);
if (!triggered && command instanceof PrepareCommand && command.getOrigin().equals(origin)) {
log.debugf("Triggering %s.", TX2_PENDING);
triggered = true;
try {
sequencer.advance(TX2_PENDING);
} catch (TimeoutException | InterruptedException e) {
throw new CacheException(e);
}
}
}
}
}
| 8,050
| 40.076531
| 129
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/ReplCommandForwardingTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.findInterceptor;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.BaseCustomAsyncInterceptor;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.ReplicatedControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* Test that commands are properly forwarded during/after state transfer.
*
* @author Dan Berindei
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.ReplCommandForwardingTest")
@CleanupAfterMethod
public class ReplCommandForwardingTest extends MultipleCacheManagersTest {
private static final String CACHE_NAME = "testCache";
@Override
protected void createCacheManagers() {
// do nothing, each test will create its own cache managers
}
private ConfigurationBuilder buildConfig(Class<?> commandToBlock) {
ConfigurationBuilder configurationBuilder = getDefaultClusteredCacheConfig( CacheMode.REPL_ASYNC, false);
configurationBuilder.clustering().remoteTimeout(15000);
// The coordinator will always be the primary owner
configurationBuilder.clustering().hash().numSegments(1).consistentHashFactory(new ReplicatedControlledConsistentHashFactory(0));
configurationBuilder.clustering().stateTransfer().fetchInMemoryState(true);
// We must block after the commit was replicated, but before the entries are committed
configurationBuilder.customInterceptors()
.addInterceptor().after(EntryWrappingInterceptor.class).interceptor(new DelayInterceptor(commandToBlock));
return configurationBuilder;
}
public void testForwardToJoinerNonTransactional() throws Exception {
EmbeddedCacheManager cm1 = addClusterEnabledCacheManager();
final Cache<Object, Object> c1 = cm1.createCache(CACHE_NAME, buildConfig(PutKeyValueCommand.class).build());
DelayInterceptor di1 = findInterceptor(c1, DelayInterceptor.class);
int initialTopologyId = c1.getAdvancedCache().getDistributionManager().getCacheTopology().getTopologyId();
EmbeddedCacheManager cm2 = addClusterEnabledCacheManager();
Cache<Object, Object> c2 = cm2.createCache(CACHE_NAME, buildConfig(PutKeyValueCommand.class).build());
DelayInterceptor di2 = findInterceptor(c2, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 4, c1, c2);
// Start a 3rd node, but start a different cache there so that the topology stays the same.
// Otherwise the put command blocked on node 1 could block the view message (as both are broadcast by node 0).
EmbeddedCacheManager cm3 = addClusterEnabledCacheManager();
cm3.createCache("differentCache", getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC).build());
Future<Object> f = fork(() -> {
log.tracef("Initiating a put command on %s", c1);
c1.put("k", "v");
return null;
});
// The put command is replicated to cache c2, and it blocks in the DelayInterceptor on both c1 and c2.
di1.waitUntilBlocked(1);
di2.waitUntilBlocked(1);
// c3 joins the cache, topology id changes
Cache<Object, Object> c3 = cm3.createCache(CACHE_NAME, buildConfig(PutKeyValueCommand.class).build());
DelayInterceptor di3 = findInterceptor(c3, DelayInterceptor.class);
waitForStateTransfer(initialTopologyId + 8, c1, c2, c3);
// Unblock the replicated command on c2 and c1
// Neither cache will forward the command to c3
di2.unblock(1);
di1.unblock(1);
Thread.sleep(2000);
assertEquals("The command shouldn't have been forwarded to " + c3, 0, di3.getCounter());
log.tracef("Waiting for the put command to finish on %s", c1);
Object retval = f.get(10, SECONDS);
log.tracef("Put command finished on %s", c1);
assertNull(retval);
// 1 direct invocation
assertEquals(1, di1.getCounter());
// 1 from c1
assertEquals(1, di2.getCounter());
// 0 invocations
assertEquals(0, di3.getCounter());
}
private void waitForStateTransfer(int expectedTopologyId, Cache... caches) {
waitForNoRebalance(caches);
for (Cache c : caches) {
CacheTopology cacheTopology = c.getAdvancedCache().getDistributionManager().getCacheTopology();
assertEquals(String.format("Wrong topology on cache %s, expected %d and got %s", c, expectedTopologyId,
cacheTopology), cacheTopology.getTopologyId(), expectedTopologyId);
}
}
class DelayInterceptor extends BaseCustomAsyncInterceptor {
private final AtomicInteger counter = new AtomicInteger(0);
private final CheckPoint checkPoint = new CheckPoint();
private final Class<?> commandToBlock;
public DelayInterceptor(Class<?> commandToBlock) {
this.commandToBlock = commandToBlock;
}
public int getCounter() {
return counter.get();
}
public void waitUntilBlocked(int count) throws TimeoutException, InterruptedException {
String event = checkPoint.peek(5, SECONDS, "blocked_" + count + "_on_" + cache);
assertEquals("blocked_" + count + "_on_" + cache, event);
}
public void unblock(int count) throws InterruptedException, TimeoutException, BrokenBarrierException {
log.tracef("Unblocking command on cache %s", cache);
checkPoint.awaitStrict("blocked_" + count + "_on_" + cache, 5, SECONDS);
checkPoint.trigger("resume_" + count + "_on_" + cache);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.isInTxScope() && !command.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
private void doBlock(InvocationContext ctx, ReplicableCommand command) throws InterruptedException,
TimeoutException {
if (commandToBlock != command.getClass())
return;
log.tracef("Delaying command %s originating from %s", command, ctx.getOrigin());
Integer myCount = counter.incrementAndGet();
checkPoint.trigger("blocked_" + myCount + "_on_" + cache);
checkPoint.awaitStrict("resume_" + myCount + "_on_" + cache, 15, SECONDS);
log.tracef("Command unblocked: %s", command);
}
@Override
public String toString() {
return "DelayInterceptor{counter=" + counter + "}";
}
}
}
| 8,860
| 42.650246
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/DistStateTransferOnLeaveConsistencyTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.infinispan.Cache;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Test for ISPN-2362 and ISPN-2502 in distributed mode. Uses a cluster which initially has 3 nodes and
* the second node is killed in order to cause a state transfer and then test consistency.
* Tests several operations both in an optimistic tx cluster (with write-skew check enabled) and in a pessimistic tx one.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.DistStateTransferOnLeaveConsistencyTest")
@CleanupAfterMethod
public class DistStateTransferOnLeaveConsistencyTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(DistStateTransferOnLeaveConsistencyTest.class);
private ControlledConsistentHashFactory consistentHashFactory;
private enum Operation {
REMOVE, CLEAR, PUT, PUT_MAP, PUT_IF_ABSENT, REPLACE
}
@Override
protected final void createCacheManagers() {
// cache managers will be created by each test
}
protected ConfigurationBuilder createConfigurationBuilder(boolean isOptimistic) {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true, true);
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup());
if (isOptimistic) {
builder.transaction().lockingMode(LockingMode.OPTIMISTIC)
.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
} else {
builder.transaction().lockingMode(LockingMode.PESSIMISTIC);
}
// Make it impossible for a key to be owned by nodes 0 and 2
consistentHashFactory = new ControlledConsistentHashFactory.Default(new int[][]{{0, 1}, {1, 2}});
builder.clustering().hash().numOwners(2).numSegments(2).consistentHashFactory(consistentHashFactory);
builder.clustering().stateTransfer().fetchInMemoryState(true).awaitInitialTransfer(false);
builder.clustering().l1().disable().locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
return builder;
}
public void testRemoveOptimistic() throws Exception {
testOperationDuringLeave(Operation.REMOVE, true);
}
public void testRemovePessimistic() throws Exception {
testOperationDuringLeave(Operation.REMOVE, false);
}
public void testClearOptimistic() throws Exception {
testOperationDuringLeave(Operation.CLEAR, true);
}
public void testClearPessimistic() throws Exception {
testOperationDuringLeave(Operation.CLEAR, false);
}
public void testPutOptimistic() throws Exception {
testOperationDuringLeave(Operation.PUT, true);
}
public void testPutPessimistic() throws Exception {
testOperationDuringLeave(Operation.PUT, false);
}
public void testPutMapOptimistic() throws Exception {
testOperationDuringLeave(Operation.PUT_MAP, true);
}
public void testPutMapPessimistic() throws Exception {
testOperationDuringLeave(Operation.PUT_MAP, false);
}
public void testPutIfAbsentOptimistic() throws Exception {
testOperationDuringLeave(Operation.PUT_IF_ABSENT, true);
}
public void testPutIfAbsentPessimistic() throws Exception {
testOperationDuringLeave(Operation.PUT_IF_ABSENT, false);
}
public void testReplaceOptimistic() throws Exception {
testOperationDuringLeave(Operation.REPLACE, true);
}
public void testReplacePessimistic() throws Exception {
testOperationDuringLeave(Operation.REPLACE, false);
}
private void testOperationDuringLeave(Operation op, boolean isOptimistic) throws Exception {
ConfigurationBuilder builder = createConfigurationBuilder(isOptimistic);
createCluster(builder, 3);
waitForClusterToForm();
final int numKeys = 5;
log.infof("Putting %d keys into cache ..", numKeys);
for (int i = 0; i < numKeys; i++) {
cache(0).put(i, "before_st_" + i);
}
log.info("Finished putting keys");
for (int i = 0; i < numKeys; i++) {
assertEquals("before_st_" + i, cache(0).get(i));
assertEquals("before_st_" + i, cache(1).get(i));
assertEquals("before_st_" + i, cache(2).get(i));
}
CountDownLatch applyStateProceedLatch = new CountDownLatch(1);
CountDownLatch applyStateStartedLatch1 = new CountDownLatch(1);
blockStateTransfer(advancedCache(0), applyStateStartedLatch1, applyStateProceedLatch);
CountDownLatch applyStateStartedLatch2 = new CountDownLatch(1);
blockStateTransfer(advancedCache(2), applyStateStartedLatch2, applyStateProceedLatch);
// The indexes will only be used after node 1 is killed
consistentHashFactory.setOwnerIndexes(new int[][]{{0, 1}, {1, 0}});
log.info("Killing node 1 ..");
TestingUtil.killCacheManagers(manager(1));
log.info("Node 1 killed");
DataContainer<Object, Object> dc0 = advancedCache(0).getDataContainer();
DataContainer<Object, Object> dc2 = advancedCache(2).getDataContainer();
// wait for state transfer on nodes A and C to progress to the point where data segments are about to be applied
if (!applyStateStartedLatch1.await(15, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
if (!applyStateStartedLatch2.await(15, TimeUnit.SECONDS)) {
throw new TimeoutException();
}
if (op == Operation.CLEAR) {
log.info("Clearing cache ..");
cache(0).clear();
log.info("Finished clearing cache");
assertEquals(0, dc0.size());
assertEquals(0, dc2.size());
} else if (op == Operation.REMOVE) {
log.info("Removing all keys one by one ..");
for (int i = 0; i < numKeys; i++) {
cache(0).remove(i);
}
log.info("Finished removing keys");
assertEquals(0, dc0.size());
assertEquals(0, dc2.size());
} else if (op == Operation.PUT || op == Operation.PUT_MAP || op == Operation.REPLACE || op == Operation.PUT_IF_ABSENT) {
log.info("Updating all keys ..");
if (op == Operation.PUT) {
for (int i = 0; i < numKeys; i++) {
cache(0).put(i, "after_st_" + i);
}
} else if (op == Operation.PUT_MAP) {
Map<Integer, String> toPut = new HashMap<>();
for (int i = 0; i < numKeys; i++) {
toPut.put(i, "after_st_" + i);
}
cache(0).putAll(toPut);
} else if (op == Operation.REPLACE) {
for (int i = 0; i < numKeys; i++) {
String expectedOldValue = "before_st_" + i;
boolean replaced = cache(0).replace(i, expectedOldValue, "after_st_" + i);
assertTrue(replaced);
}
} else { // PUT_IF_ABSENT
for (int i = 0; i < numKeys; i++) {
String expectedOldValue = "before_st_" + i;
Object prevValue = cache(0).putIfAbsent(i, "after_st_" + i);
assertEquals(expectedOldValue, prevValue);
}
}
log.info("Finished updating keys");
}
// allow state transfer to apply state
applyStateProceedLatch.countDown();
// wait for apply state to end
TestingUtil.waitForNoRebalance(cache(0), cache(2));
// at this point state transfer is fully done
log.tracef("Data container of NodeA has %d keys: %s", dc0.size(), StreamSupport.stream(dc0.spliterator(), false).map(ice -> ice.getKey().toString()).collect(Collectors.joining(",")));
log.tracef("Data container of NodeC has %d keys: %s", dc2.size(), StreamSupport.stream(dc2.spliterator(), false).map(ice -> ice.getKey().toString()).collect(Collectors.joining(",")));
if (op == Operation.CLEAR || op == Operation.REMOVE) {
// caches should be empty. check that no keys were revived by an inconsistent state transfer
for (int i = 0; i < numKeys; i++) {
assertNull(dc0.get(i));
assertNull(dc2.get(i));
}
} else if (op == Operation.PUT || op == Operation.PUT_MAP || op == Operation.REPLACE) {
LocalizedCacheTopology cacheTopology = advancedCache(0).getDistributionManager().getCacheTopology();
// check that all values are the ones expected after state transfer
for (int i = 0; i < numKeys; i++) {
// check number of owners
int owners = 0;
if (dc0.get(i) != null) {
owners++;
}
if (dc2.get(i) != null) {
owners++;
}
assertEquals("Wrong number of owners", cacheTopology.getDistribution(i).readOwners().size(), owners);
// check values were not overwritten with old values carried by state transfer
String expected = "after_st_" + i;
assertEquals(expected, cache(0).get(i));
assertEquals("after_st_" + i, cache(2).get(i));
}
} else { // PUT_IF_ABSENT
LocalizedCacheTopology cacheTopology = advancedCache(0).getDistributionManager().getCacheTopology();
for (int i = 0; i < numKeys; i++) {
// check number of owners
int owners = 0;
if (dc0.get(i) != null) {
owners++;
}
if (dc2.get(i) != null) {
owners++;
}
assertEquals("Wrong number of owners", cacheTopology.getDistribution(i).readOwners().size(), owners);
String expected = "before_st_" + i;
assertEquals(expected, cache(0).get(i));
assertEquals(expected, cache(2).get(i));
}
}
}
private static void blockStateTransfer(Cache<?,?> cache, CountDownLatch started, CountDownLatch proceed) {
TestingUtil.wrapComponent(cache, StateConsumer.class, (current) -> {
BlockingStateConsumer stateConsumer;
if (current instanceof BlockingStateConsumer) {
stateConsumer = (BlockingStateConsumer) current;
} else {
stateConsumer = new BlockingStateConsumer(current);
}
stateConsumer.startedLatch = started;
stateConsumer.proceedLatch = proceed;
return stateConsumer;
});
}
@Scope(Scopes.NAMED_CACHE)
public static class BlockingStateConsumer extends DelegatingStateConsumer {
@Inject BlockingManager blockingManager;
volatile CountDownLatch startedLatch;
volatile CountDownLatch proceedLatch;
BlockingStateConsumer(StateConsumer delegate) {
super(delegate);
}
@Override
public CompletionStage<?> applyState(Address sender, int topologyId, Collection<StateChunk> stateChunks) {
return blockingManager.runBlocking(() -> {
// signal we encounter a state transfer PUT
startedLatch.countDown();
// wait until it is ok to apply state
try {
if (!proceedLatch.await(15, TimeUnit.SECONDS)) {
throw CompletableFutures.asCompletionException(new TimeoutException());
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
CompletionStages.join(super.applyState(sender, topologyId, stateChunks));
}, "state-" + sender);
}
}
}
| 13,027
| 39.968553
| 189
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/DelegatingStateTransferLock.java
|
package org.infinispan.statetransfer;
import java.util.concurrent.CompletionStage;
public class DelegatingStateTransferLock implements StateTransferLock {
private final StateTransferLock delegate;
public DelegatingStateTransferLock(StateTransferLock delegate) {
this.delegate = delegate;
}
@Override
public void acquireExclusiveTopologyLock() {
delegate.acquireExclusiveTopologyLock();
}
@Override
public void releaseExclusiveTopologyLock() {
delegate.releaseExclusiveTopologyLock();
}
@Override
public void acquireSharedTopologyLock() {
delegate.acquireSharedTopologyLock();
}
@Override
public void releaseSharedTopologyLock() {
delegate.releaseSharedTopologyLock();
}
@Override
public void notifyTransactionDataReceived(int topologyId) {
delegate.notifyTransactionDataReceived(topologyId);
}
@Override
public CompletionStage<Void> transactionDataFuture(int expectedTopologyId) {
return delegate.transactionDataFuture(expectedTopologyId);
}
@Override
public boolean transactionDataReceived(int expectedTopologyId) {
return delegate.transactionDataReceived(expectedTopologyId);
}
@Override
public CompletionStage<Void> topologyFuture(int expectedTopologyId) {
return delegate.topologyFuture(expectedTopologyId);
}
@Override
public void notifyTopologyInstalled(int topologyId) {
delegate.notifyTopologyInstalled(topologyId);
}
@Override
public boolean topologyReceived(int expectedTopologyId) {
return delegate.topologyReceived(expectedTopologyId);
}
}
| 1,630
| 25.306452
| 79
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferPessimisticTest.java
|
package org.infinispan.statetransfer;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.AssertJUnit.assertEquals;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestBlocking;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.Test;
/**
* Test if state transfer happens properly on a cache with pessimistic transactions.
* See https://issues.jboss.org/browse/ISPN-2408.
*
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.StateTransferPessimisticTest")
@CleanupAfterMethod
public class StateTransferPessimisticTest extends MultipleCacheManagersTest {
public static final int NUM_KEYS = 100;
public static final int CLUSTER_SIZE = 2;
private ConfigurationBuilder dccc;
@Override
protected void createCacheManagers() throws Throwable {
dccc = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true, true);
dccc.transaction()
.transactionMode(TransactionMode.TRANSACTIONAL)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.lockingMode(LockingMode.PESSIMISTIC);
dccc.clustering().hash().numOwners(1).l1().disable();
dccc.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
createCluster(TestDataSCI.INSTANCE, dccc, CLUSTER_SIZE);
waitForClusterToForm();
}
public void testStateTransfer() throws Exception {
CyclicBarrier barrier = new CyclicBarrier(2);
blockDataContainerIteration(cache(0), barrier);
Set<Object> keys = new HashSet<>();
for (int i = 0; i < NUM_KEYS; i++) {
Object key = getKeyForCache(0);
if (!keys.add(key)) continue;
// put a key to have some data in cache
cache(0).put(key, key);
}
log.trace("State transfer happens here");
// add a third node
addClusterEnabledCacheManager(TestDataSCI.INSTANCE, dccc);
waitForClusterToForm();
// Wait for the stale entries invalidation to block
barrier.await(10, TimeUnit.SECONDS);
log.trace("Checking the values from caches...");
for (Object key : keys) {
// Expect one copy of each entry on the old owner, cache 0
assertEquals(1, checkKey(key, cache(0)));
}
// Unblock the stale entries invalidation
barrier.await(10, TimeUnit.SECONDS);
cache(0).getAdvancedCache().getAsyncInterceptorChain().removeInterceptor(BlockingInterceptor.class);
for (Object key : keys) {
// Check that the stale entries on the old nodes are eventually deleted
eventuallyEquals(1, () -> checkKey(key, cache(0), cache(1), cache(2)));
}
}
public int checkKey(Object key, Cache... caches) {
log.tracef("Checking key: %s", key);
int c = 0;
// check them directly in data container
for (Cache cache : caches) {
InternalCacheEntry e = cache.getAdvancedCache().getDataContainer().get(key);
if (e != null) {
assertEquals(key, e.getValue());
c++;
}
}
// look at them also via cache API
for (Cache cache : caches) {
assertEquals(key, cache.get(key));
}
return c;
}
protected void blockDataContainerIteration(final Cache<?, ?> cache, final CyclicBarrier barrier) {
InternalDataContainer dataContainer = TestingUtil.extractComponent(cache, InternalDataContainer.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(dataContainer);
InternalDataContainer mockContainer = mock(InternalDataContainer.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
// Wait for main thread to sync up
TestBlocking.await(barrier, 10, TimeUnit.SECONDS);
// Now wait until main thread lets us through
TestBlocking.await(barrier, 10, TimeUnit.SECONDS);
return forwardedAnswer.answer(invocation);
}).when(mockContainer).removeSegments(any());
TestingUtil.replaceComponent(cache, InternalDataContainer.class, mockContainer, true);
}
}
| 5,043
| 37.8
| 125
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateConsumerTest.java
|
package org.infinispan.statetransfer;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import io.reactivex.rxjava3.core.Flowable;
import org.infinispan.Cache;
import org.infinispan.commands.CommandInvocationId;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.statetransfer.StateTransferCancelCommand;
import org.infinispan.commands.statetransfer.StateTransferGetTransactionsCommand;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.conflict.impl.InternalConflictManager;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.SingleKeyNonTxInvocationContext;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.TestAddress;
import org.infinispan.distribution.TriangleOrderManager;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.distribution.ch.impl.HashFunctionPartitioner;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.metadata.Metadata;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.reactive.publisher.impl.LocalPublisherManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.topology.PersistentUUID;
import org.infinispan.topology.PersistentUUIDManager;
import org.infinispan.topology.PersistentUUIDManagerImpl;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.ByteString;
import org.infinispan.util.concurrent.CommandAckCollector;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.statetransfer.XSiteStateTransferManager;
import org.mockito.stubbing.Answer;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
/**
* Tests StateConsumerImpl.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Test(groups = "functional", testName = "statetransfer.StateConsumerTest")
public class StateConsumerTest extends AbstractInfinispanTest {
private static final Log log = LogFactory.getLog(StateConsumerTest.class);
private static final ByteString CACHE_NAME = ByteString.fromString("test-cache");
private ExecutorService pooledExecutorService;
@BeforeMethod
public void createExecutorService() {
pooledExecutorService = new ThreadPoolExecutor(0, 20, 0L,
TimeUnit.MILLISECONDS, new SynchronousQueue<>(),
getTestThreadFactory("Worker"),
new ThreadPoolExecutor.CallerRunsPolicy());
}
@AfterMethod
public void shutdownExecutorService() {
if (pooledExecutorService != null) {
pooledExecutorService.shutdownNow();
pooledExecutorService = null;
}
}
// creates 4 members
private static Address[] createMembers(PersistentUUIDManager persistentUUIDManager) {
Address[] addresses = new Address[4];
for (int i = 0; i < 4; i++) {
addresses[i] = new TestAddress(i);
persistentUUIDManager.addPersistentAddressMapping(addresses[i], PersistentUUID.randomUUID());
}
return addresses;
}
private static XSiteStateTransferManager mockXSiteStateTransferManager() {
XSiteStateTransferManager mock = mock(XSiteStateTransferManager.class);
doNothing().when(mock).onTopologyUpdated(any(CacheTopology.class), anyBoolean());
return mock;
}
private static CommandsFactory mockCommandsFactory() {
CommandsFactory mock = mock(CommandsFactory.class);
when(mock.buildStateTransferStartCommand(anyInt(), any(IntSet.class)))
.thenAnswer(invocation -> new StateTransferStartCommand(CACHE_NAME, invocation.getArgument(0),
invocation.getArgument(1)));
when(mock.buildStateTransferGetTransactionsCommand(anyInt(), any(IntSet.class)))
.thenAnswer(invocation -> new StateTransferGetTransactionsCommand(CACHE_NAME, invocation.getArgument(0),
invocation.getArgument(1)));
when(mock.buildStateTransferCancelCommand(anyInt(), any(IntSet.class)))
.thenAnswer(invocation -> new StateTransferCancelCommand(CACHE_NAME, invocation.getArgument(0),
invocation.getArgument(1)));
when(mock.buildPutKeyValueCommand(any(), any(), anyInt(), any(Metadata.class), anyLong()))
.thenAnswer(invocation -> new PutKeyValueCommand(invocation.getArgument(0), invocation.getArgument(1),
false, false, invocation.getArgument(3), invocation.getArgument(2),
invocation.getArgument(4), CommandInvocationId.DUMMY_INVOCATION_ID));
return mock;
}
private static Configuration createConfiguration() {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.invocationBatching().enable()
.clustering().cacheMode(CacheMode.DIST_SYNC)
.clustering().stateTransfer().timeout(30000)
.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis())
.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
return cb.build();
}
private static Cache<?, ?> mockCache() {
Cache<?, ?> cache = mock(Cache.class);
when(cache.getName()).thenReturn(CACHE_NAME.toString());
when(cache.getStatus()).thenReturn(ComponentStatus.RUNNING);
return cache;
}
private static RpcManager mockRpcManager(Map<Address, Set<Integer>> requestedSegments, Set<Integer> flatRequestedSegments, Address address) {
Transport transport = mock(Transport.class);
when(transport.getViewId()).thenReturn(1);
RpcManager rpcManager = mock(RpcManager.class);
Answer<?> successfulResponse = invocation -> CompletableFuture.completedFuture(
SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferGetTransactionsCommand.class),
any(ResponseCollector.class),
any(RpcOptions.class)))
.thenAnswer(invocation -> {
Address recipient = invocation.getArgument(0);
StateTransferGetTransactionsCommand cmd = invocation.getArgument(1);
Set<Integer> segments = cmd.getSegments();
requestedSegments.put(recipient, segments);
flatRequestedSegments.addAll(segments);
return CompletableFuture.completedFuture(SuccessfulResponse.create(new ArrayList<TransactionInfo>()));
});
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferStartCommand.class),
any(ResponseCollector.class),
any(RpcOptions.class)))
.thenAnswer(successfulResponse);
when(rpcManager.invokeCommand(any(Address.class), any(StateTransferCancelCommand.class),
any(ResponseCollector.class),
any(RpcOptions.class)))
.thenAnswer(successfulResponse);
when(rpcManager.getSyncRpcOptions()).thenReturn(new RpcOptions(DeliverOrder.NONE, 10000, TimeUnit.MILLISECONDS));
when(rpcManager.blocking(any())).thenAnswer(invocation -> ((CompletionStage<?>) invocation
.getArgument(0)).toCompletableFuture().join());
when(rpcManager.getAddress()).thenReturn(address);
when(rpcManager.getTransport()).thenReturn(transport);
return rpcManager;
}
private static PersistenceManager mockPersistenceManager() {
PersistenceManager persistenceManager = mock(PersistenceManager.class);
when(persistenceManager.removeSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.addSegments(any())).thenReturn(CompletableFuture.completedFuture(false));
when(persistenceManager.publishKeys(any(), any())).thenReturn(Flowable.empty());
return persistenceManager;
}
private static TransactionTable mockTransactionTable() {
TransactionTable transactionTable = mock(TransactionTable.class);
when(transactionTable.getLocalTransactions()).thenReturn(Collections.emptyList());
when(transactionTable.getRemoteTransactions()).thenReturn(Collections.emptyList());
return transactionTable;
}
private static InvocationContextFactory mockInvocationContextFactory() {
InvocationContextFactory icf = mock(InvocationContextFactory.class);
when(icf.createSingleKeyNonTxInvocationContext()).thenAnswer(
invocationOnMock -> new SingleKeyNonTxInvocationContext(null));
return icf;
}
private static void noRebalance(StateConsumer stateConsumer, PersistentUUIDManager persistentUUIDManager, int topologyId, int rebalanceId, ConsistentHash ch) {
stateConsumer.onTopologyUpdate(
new CacheTopology(topologyId, rebalanceId, ch, null, CacheTopology.Phase.NO_REBALANCE,
ch.getMembers(), persistentUUIDManager.mapAddresses(ch.getMembers())), false);
}
private static void rebalanceStart(StateConsumer stateConsumer, PersistentUUIDManager persistentUUIDManager, int topologyId, int rebalanceId, ConsistentHash current, ConsistentHash pending, ConsistentHash union) {
stateConsumer.onTopologyUpdate(
new CacheTopology(topologyId, rebalanceId, current, pending, union, CacheTopology.Phase.READ_OLD_WRITE_ALL,
union.getMembers(), persistentUUIDManager.mapAddresses(union.getMembers())), true);
}
private static void assertRebalanceStart(StateConsumerImpl stateConsumer, ConsistentHash current, ConsistentHash pending, Address member, Set<Integer> flatRequestedSegments) {
// check that all segments have been requested
Set<Integer> oldSegments = current.getSegmentsForOwner(member);
Set<Integer> newSegments = pending.getSegmentsForOwner(member);
newSegments.removeAll(oldSegments);
log.debugf("Rebalancing. Added segments=%s, old segments=%s", newSegments, oldSegments);
assertTrue(stateConsumer.hasActiveTransfers());
assertEquals(flatRequestedSegments, newSegments);
assertEquals(stateConsumer.inflightRequestCount(), newSegments.size());
}
private static void completeAndCheckRebalance(StateConsumerImpl stateConsumer, Map<Address, Set<Integer>> requestedSegments, int topologyId) throws ExecutionException, InterruptedException, TimeoutException {
// We count how many segments were requested and then start to apply the state individually, to assert that the
// number of in-flight requests will decrease accordingly. During real usage, the state chunk collections can
// have more than a single segment.
long inflightCounter = requestedSegments.values().stream().mapToLong(Collection::size).sum();
assertEquals(stateConsumer.inflightRequestCount(), inflightCounter);
for (Map.Entry<Address, Set<Integer>> entry : requestedSegments.entrySet()) {
for (Integer segment : entry.getValue()) {
Collection<StateChunk> chunks = Collections.singletonList(
new StateChunk(segment, Collections.emptyList(), true));
stateConsumer.applyState(entry.getKey(), topologyId, chunks)
.toCompletableFuture()
.get(10, TimeUnit.SECONDS);
inflightCounter -= 1;
assertEquals(stateConsumer.inflightRequestCount(), inflightCounter);
}
}
assertEquals(stateConsumer.inflightRequestCount(), 0);
eventually(() -> !stateConsumer.hasActiveTransfers());
}
private static void applyState(StateConsumer stateConsumer, Map<Address, Set<Integer>> requestedSegments, Collection<InternalCacheEntry<?, ?>> cacheEntries) {
Map.Entry<Address, Set<Integer>> entry = requestedSegments.entrySet().iterator().next();
Collection<StateChunk> chunks = Collections.singletonList(
new StateChunk(entry.getValue().iterator().next(), cacheEntries, true));
stateConsumer.applyState(entry.getKey(), 22, chunks);
}
private void injectComponents(StateConsumer stateConsumer, AsyncInterceptorChain interceptorChain, RpcManager rpcManager) {
TestingUtil.inject(stateConsumer,
mockCache(),
TestingUtil.named(NON_BLOCKING_EXECUTOR, pooledExecutorService),
mock(LocalTopologyManager.class),
interceptorChain,
mockInvocationContextFactory(),
createConfiguration(),
rpcManager,
mockCommandsFactory(),
mockPersistenceManager(),
mock(InternalDataContainer.class),
mockTransactionTable(),
mock(StateTransferLock.class),
mock(CacheNotifier.class),
new CommitManager(),
new CommandAckCollector(),
new TriangleOrderManager(0),
new HashFunctionPartitioner(),
mock(InternalConflictManager.class),
mock(DistributionManager.class),
mock(LocalPublisherManager.class),
mock(PerCacheInboundInvocationHandler.class),
mockXSiteStateTransferManager());
}
public void testClusterRecoverDuringStateTransfer() throws Exception {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
// create list of 4 members
Address[] addresses = createMembers(persistentUUIDManager);
List<Address> members1 = Arrays.asList(addresses[0], addresses[1], addresses[2], addresses[3]);
List<Address> members2 = Arrays.asList(addresses[0], addresses[1], addresses[2]);
// create CHes
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
DefaultConsistentHash ch1 = chf.create(2, 40, members1, null);
final DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
DefaultConsistentHash ch3 = chf.rebalance(ch2);
DefaultConsistentHash ch23 = chf.union(ch2, ch3);
log.debug(ch1);
log.debug(ch2);
final Map<Address, Set<Integer>> requestedSegments = new ConcurrentHashMap<>();
final Set<Integer> flatRequestedSegments = new ConcurrentSkipListSet<>();
// create state provider
final StateConsumerImpl stateConsumer = new StateConsumerImpl();
injectComponents(stateConsumer, mock(AsyncInterceptorChain.class),
mockRpcManager(requestedSegments, flatRequestedSegments, addresses[0]));
stateConsumer.start();
assertFalse(stateConsumer.hasActiveTransfers());
// node 4 leaves
noRebalance(stateConsumer, persistentUUIDManager, 1, 1, ch2);
assertFalse(stateConsumer.hasActiveTransfers());
// start a rebalance
rebalanceStart(stateConsumer, persistentUUIDManager, 2, 2, ch2, ch3, ch23);
assertRebalanceStart(stateConsumer, ch2, ch3, addresses[0], flatRequestedSegments);
// simulate a cluster state recovery and return to ch2
Future<Object> future = fork(() -> {
noRebalance(stateConsumer, persistentUUIDManager, 3, 2, ch2);
return null;
});
noRebalance(stateConsumer, persistentUUIDManager, 3, 2, ch2);
future.get();
assertFalse(stateConsumer.hasActiveTransfers());
// restart the rebalance
requestedSegments.clear();
flatRequestedSegments.clear();
rebalanceStart(stateConsumer, persistentUUIDManager, 4, 4, ch2, ch3, ch23);
assertRebalanceStart(stateConsumer, ch2, ch3, addresses[0], flatRequestedSegments);
completeAndCheckRebalance(stateConsumer, requestedSegments, 4);
stateConsumer.stop();
}
// Reproducer for ISPN-14982
public void testJoinDuringStateTransfer() throws Exception {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
// create list of 4 members
Address[] addresses = createMembers(persistentUUIDManager);
List<Address> members1 = Arrays.asList(addresses[0], addresses[1], addresses[2]);
List<Address> members2 = Arrays.asList(addresses[1], addresses[2]);
// create CHes
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
DefaultConsistentHash ch1 = chf.create(2, 40, members1, null);
final DefaultConsistentHash ch2 = chf.updateMembers(ch1, members2, null);
DefaultConsistentHash ch3 = chf.rebalance(ch2);
DefaultConsistentHash ch23 = chf.union(ch2, ch3);
log.debug(ch1);
log.debug(ch2);
log.debug(ch23);
final Map<Address, Set<Integer>> requestedSegments = new ConcurrentHashMap<>();
final Set<Integer> flatRequestedSegments = new ConcurrentSkipListSet<>();
final CompletableFuture<Object> putFuture = new CompletableFuture<>();
// create dependencies
AsyncInterceptorChain interceptorChain = mock(AsyncInterceptorChain.class);
when(interceptorChain.invokeAsync(any(), any())).thenReturn(putFuture);
// create state provider
final StateConsumerImpl stateConsumer = new StateConsumerImpl();
injectComponents(stateConsumer, interceptorChain,
mockRpcManager(requestedSegments, flatRequestedSegments, addresses[1]));
stateConsumer.start();
// initial topology
noRebalance(stateConsumer, persistentUUIDManager, 21, 7, ch1);
assertFalse(stateConsumer.hasActiveTransfers());
// start a rebalance (copied form logs)
//CacheTopology{id=22, phase=READ_OLD_WRITE_ALL, rebalanceId=8, currentCH=DefaultConsistentHash{ns=60, owners = (2)[node-3: 29+10, node-5: 31+10]}, pendingCH=DefaultConsistentHash{ns=60, owners = (2)[node-3: 30+30, node-5: 30+30]}, unionCH=DefaultConsistentHash{ns=60, owners = (2)[node-3: 29+31, node-5: 31+29]}, actualMembers=[node-3, node-5], persistentUUIDs=...}
rebalanceStart(stateConsumer, persistentUUIDManager, 22, 8, ch2, ch3, ch23);
assertRebalanceStart(stateConsumer, ch2, ch3, addresses[1], flatRequestedSegments);
applyState(stateConsumer, requestedSegments, Collections.singletonList(new ImmortalCacheEntry("a", "b")));
// merge view update
//CacheTopology{id=23, phase=NO_REBALANCE, rebalanceId=9, currentCH=DefaultConsistentHash{ns=60, owners = (2)[node-3: 29+10, node-5: 31+10]}, pendingCH=null, unionCH=null, actualMembers=[node-3, node-5], persistentUUIDs=...}
noRebalance(stateConsumer, persistentUUIDManager, 23, 9, ch2);
// restart the rebalance
requestedSegments.clear();
flatRequestedSegments.clear();
// CacheTopology{id=24, phase=READ_OLD_WRITE_ALL, rebalanceId=10, currentCH=DefaultConsistentHash{ns=60, owners = (2)[node-3: 29+10, node-5: 31+10]}, pendingCH=DefaultConsistentHash{ns=60, owners = (2)[node-3: 30+30, node-5: 30+30]}, unionCH=DefaultConsistentHash{ns=60, owners = (2)[node-3: 29+31, node-5: 31+29]}, actualMembers=[node-3, node-5], persistentUUIDs=...}
rebalanceStart(stateConsumer, persistentUUIDManager, 24, 10, ch2, ch3, ch23);
assertRebalanceStart(stateConsumer, ch2, ch3, addresses[1], flatRequestedSegments);
// let the apply state complete
putFuture.complete(null);
completeAndCheckRebalance(stateConsumer, requestedSegments, 24);
stateConsumer.stop();
}
}
| 21,614
| 48.919169
| 374
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/StateTransferFunctionalTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.sequence;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import static org.testng.AssertJUnit.assertEquals;
import java.lang.reflect.Method;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.Mocks;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.data.DelayedMarshallingPojo;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
import jakarta.transaction.SystemException;
import jakarta.transaction.TransactionManager;
@Test(groups = "functional", testName = "statetransfer.StateTransferFunctionalTest")
public class StateTransferFunctionalTest extends MultipleCacheManagersTest {
public static final String A_B_NAME = "a_b_name";
public static final String A_C_NAME = "a_c_name";
public static final String A_D_NAME = "a_d_age";
public static final String A_B_AGE = "a_b_age";
public static final String A_C_AGE = "a_c_age";
public static final String A_D_AGE = "a_d_age";
public static final String JOE = "JOE";
public static final String BOB = "BOB";
public static final String JANE = "JANE";
public static final Integer TWENTY = 20;
public static final Integer FORTY = 40;
protected SerializationContextInitializer sci;
protected ConfigurationBuilder configurationBuilder;
protected final String cacheName;
private volatile int testCount = 0;
private static final Log log = LogFactory.getLog(StateTransferFunctionalTest.class);
public StateTransferFunctionalTest() {
this("nbst");
}
public StateTransferFunctionalTest(String testCacheName) {
cacheName = testCacheName;
cleanup = CleanupPhase.AFTER_METHOD;
}
protected void createCacheManagers() throws Throwable {
sci = new StateTransferFunctionalSCIImpl();
configurationBuilder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
configurationBuilder.transaction()
.lockingMode(LockingMode.PESSIMISTIC)
.useSynchronization(false)
.recovery().disable();
configurationBuilder.clustering().remoteTimeout(30000);
configurationBuilder.clustering().stateTransfer().chunkSize(20);
configurationBuilder.locking().useLockStriping(false); // reduces the odd chance of a key collision and deadlock
}
protected EmbeddedCacheManager createCacheManager(String cacheName) {
EmbeddedCacheManager cm = addClusterEnabledCacheManager(sci, configurationBuilder, new TransportFlags().withMerge(true));
cm.defineConfiguration(cacheName, configurationBuilder.build());
return cm;
}
public static class DelayTransfer {
volatile boolean doDelay = false;
DelayTransfer() {}
void enableDelay() {
doDelay = true;
}
// Should only be called by protostream when marshalling
@ProtoField(number = 1, defaultValue = "false")
public boolean isIgnore() {
if (doDelay)
TestingUtil.sleepThread(1000);
return false;
}
public void setIgnore(boolean ignore) {
}
}
private static class WritingTask implements Callable<Integer> {
private final Cache<Object, Object> cache;
private final boolean tx;
private volatile boolean stop;
private TransactionManager tm;
WritingTask(Cache<Object, Object> cache, boolean tx) {
this.cache = cache;
this.tx = tx;
if (tx) tm = TestingUtil.getTransactionManager(cache);
}
@Override
public Integer call() throws Exception {
int c = 0;
while (!stop) {
boolean success = false;
try {
if (tx)
tm.begin();
cache.put("test" + c, c);
if (tx)
tm.commit();
success = true;
c++;
// Without this, the writing thread would occupy 1 core completely before the 2nd node joins.
Thread.sleep(1);
} catch (Exception e) {
log.errorf(e, "Error writing key test%s", c);
stop();
} finally {
if (tx && !success) {
try {
tm.rollback();
} catch (SystemException e) {
log.error(e);
}
}
}
}
return c;
}
public void stop() {
stop = true;
}
}
public void testInitialStateTransfer(Method m) throws Exception {
testCount++;
logTestStart(m);
Cache<Object, Object> cache1, cache2;
EmbeddedCacheManager cm1 = createCacheManager(cacheName);
cache1 = cm1.getCache(cacheName);
writeInitialData(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2);
verifyInitialData(cache2);
logTestEnd(m);
}
public void testInitialStateTransferCacheNotPresent(Method m) throws Exception {
testCount++;
logTestStart(m);
Cache<Object, Object> cache1, cache2;
EmbeddedCacheManager cacheManager1 = createCacheManager(cacheName);
cache1 = cacheManager1.getCache(cacheName);
writeInitialData(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2);
verifyInitialData(cache2);
cacheManager1.defineConfiguration("otherCache", configurationBuilder.build());
cacheManager1.getCache("otherCache");
logTestEnd(m);
}
public void testConcurrentStateTransfer(Method m) throws Exception {
testCount++;
logTestStart(m);
Cache<Object, Object> cache1, cache2, cache3, cache4;
cache1 = createCacheManager(cacheName).getCache(cacheName);
writeInitialData(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
cache1.put("delay", new DelayTransfer());
TestingUtil.waitForNoRebalance(cache1, cache2);
verifyInitialData(cache2);
EmbeddedCacheManager cm3 = createCacheManager(cacheName);
EmbeddedCacheManager cm4 = createCacheManager(cacheName);
Future<Cache> joinFuture1 = fork(() -> cm3.getCache(cacheName));
Future<Cache> joinFuture2 = fork(() -> cm4.getCache(cacheName));
joinFuture1.get(30, SECONDS);
joinFuture2.get(30, SECONDS);
cache3 = cm3.getCache(cacheName);
cache4 = cm4.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2, cache3, cache4);
TestingUtil.waitForNoRebalance(cache1, cache2, cache3, cache4);
verifyInitialData(cache3);
verifyInitialData(cache4);
logTestEnd(m);
}
public void testSTWithThirdWritingNonTxCache(Method m) throws Exception {
testCount++;
logTestStart(m);
thirdWritingCacheTest(false);
logTestEnd(m);
}
public void testSTWithThirdWritingTxCache(Method m) throws Exception {
testCount++;
logTestStart(m);
thirdWritingCacheTest(true);
logTestEnd(m);
}
public void testSTWithWritingNonTxThread(Method m) throws Exception {
testCount++;
logTestStart(m);
writingThreadTest(false);
logTestEnd(m);
}
public void testSTWithWritingTxThread(Method m) throws Exception {
testCount++;
logTestStart(m);
writingThreadTest(true);
logTestEnd(m);
}
public void testInitialStateTransferAfterRestart(Method m) throws Exception {
testCount++;
logTestStart(m);
Cache<Object, Object> cache1, cache2;
cache1 = createCacheManager(cacheName).getCache(cacheName);
writeInitialData(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2);
verifyInitialData(cache2);
cache2.stop();
cache2.start();
verifyInitialData(cache2);
logTestEnd(m);
}
public void testStateTransferException(Method m) throws InterruptedException, java.util.concurrent.TimeoutException, ExecutionException {
testCount++;
logTestStart(m);
Cache<Object, Object> cache1;
cache1 = createCacheManager(cacheName).getCache(cacheName);
writeInitialData(cache1);
GlobalConfigurationBuilder globalBuilder = defaultGlobalConfigurationBuilder();
if (sci != null) globalBuilder.serialization().addContextInitializer(sci);
EmbeddedCacheManager embeddedCacheManager = TestCacheManagerFactory.createClusteredCacheManager(false,
// Use an empty configuration builder
globalBuilder, new ConfigurationBuilder(), new TransportFlags().withMerge(true));
amendCacheManagerBeforeStart(embeddedCacheManager);
embeddedCacheManager.start();
CheckPoint checkPoint = new CheckPoint();
blockRebalanceStart(embeddedCacheManager, checkPoint, 2);
ConfigurationBuilder configToUse = new ConfigurationBuilder();
configToUse.read(configurationBuilder.build(), Combine.DEFAULT)
.clustering().remoteTimeout(1, TimeUnit.NANOSECONDS).stateTransfer().timeout(1, TimeUnit.NANOSECONDS);
assertEquals(1, cache1.getAdvancedCache().getDistributionManager().getCacheTopology().getMembers().size());
embeddedCacheManager.defineConfiguration(cacheName, configToUse.build());
Future<Cache<Object, Object>> future = fork(() -> embeddedCacheManager.getCache(cacheName));
// This guarantees the timeout will hit
checkPoint.awaitStrict("rebalance_begin", 10, SECONDS);
Exceptions.expectException(ExecutionException.class, TimeoutException.class, () -> future.get(10, SECONDS));
// Let the operation finally complete
checkPoint.triggerForever("merge");
eventuallyEquals(1, () -> cache1.getAdvancedCache().getDistributionManager().getCacheTopology().getMembers().size());
}
protected void blockRebalanceStart(final EmbeddedCacheManager manager, final CheckPoint checkpoint, final int numMembers) {
final LocalTopologyManager localTopologyManager = TestingUtil.extractGlobalComponent(manager,
LocalTopologyManager.class);
LocalTopologyManager spyLocalTopologyManager = spy(localTopologyManager);
doAnswer(invocation -> {
CacheTopology topology = (CacheTopology) invocation.getArguments()[1];
List<Address> members = topology.getMembers();
checkpoint.trigger("rebalance_begin");
if (members.size() == numMembers) {
log.debugf("Blocking the REBALANCE_START command with members %s on %s", members, manager.getAddress());
return sequence(checkpoint.future("merge", 30, SECONDS, testExecutor()),
() -> Mocks.callRealMethod(invocation));
}
return invocation.callRealMethod();
}).when(spyLocalTopologyManager).handleRebalance(eq(cacheName), any(CacheTopology.class), anyInt(),
any(Address.class));
TestingUtil.replaceComponent(manager, LocalTopologyManager.class, spyLocalTopologyManager, true);
}
private void logTestStart(Method m) {
logTestLifecycle(m, "start");
}
private void logTestEnd(Method m) {
logTestLifecycle(m, "end");
}
private void logTestLifecycle(Method m, String lifecycle) {
log.infof("%s %s - %s", m.getName(), lifecycle, testCount);
}
private void thirdWritingCacheTest(boolean tx) throws Exception {
Cache<Object, Object> cache1, cache2, cache3;
cache1 = createCacheManager(cacheName).getCache(cacheName);
cache3 = createCacheManager(cacheName).getCache(cacheName);
TestingUtil.blockUntilViewsReceived(60000, cache1, cache3);
writeInitialData(cache1);
// Delay the transient copy, so that we get a more thorough log test
DelayTransfer value = new DelayTransfer();
cache1.put("delay", value);
value.enableDelay();
WritingTask writingTask = new WritingTask(cache3, tx);
Future<Integer> future = fork(writingTask);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2, cache3);
writingTask.stop();
int count = future.get(60, SECONDS);
verifyInitialData(cache2);
for (int c = 0; c < count; c++) {
assertEquals(c, cache2.get("test" + c));
}
}
protected void verifyInitialData(Cache<Object, Object> c) {
Address address = c.getAdvancedCache().getRpcManager().getAddress();
log.debugf("Checking values on cache " + address);
assertEquals("Incorrect value for key " + A_B_NAME, JOE, c.get(A_B_NAME));
assertEquals("Incorrect value for key " + A_B_AGE, TWENTY, c.get(A_B_AGE));
assertEquals("Incorrect value for key " + A_C_NAME, BOB, c.get(A_C_NAME));
assertEquals("Incorrect value for key " + A_C_AGE, FORTY, c.get(A_C_AGE));
}
protected void writeInitialData(final Cache<Object, Object> c) {
c.put(A_B_NAME, JOE);
c.put(A_B_AGE, TWENTY);
c.put(A_C_NAME, BOB);
c.put(A_C_AGE, FORTY);
}
private void writingThreadTest(boolean tx) throws Exception {
Cache<Object, Object> cache1, cache2;
cache1 = createCacheManager(cacheName).getCache(cacheName);
assertEquals(0, cache1.getAdvancedCache().getDataContainer().size());
writeInitialData(cache1);
// Delay the transient copy, so that we get a more thorough log test
DelayTransfer value = new DelayTransfer();
cache1.put("delay", value);
value.enableDelay();
WritingTask writingTask = new WritingTask(cache1, tx);
Future<Integer> future = fork(writingTask);
verifyInitialData(cache1);
EmbeddedCacheManager cm2 = createCacheManager(cacheName);
cache2 = cm2.getCache(cacheName);
TestingUtil.waitForNoRebalance(cache1, cache2);
writingTask.stop();
int count = future.get(60, SECONDS);
verifyInitialData(cache1);
verifyInitialData(cache2);
for (int c = 0; c < count; c++) {
assertEquals(c, cache2.get("test" + c));
}
}
@AutoProtoSchemaBuilder(
includeClasses = {
DelayedMarshallingPojo.class,
DelayTransfer.class
},
schemaFileName = "test.core.StateTransferFunctionalTest.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.StateTransferFunctionalTest",
service = false
)
interface StateTransferFunctionalSCI extends SerializationContextInitializer {
}
}
| 16,231
| 35.476404
| 140
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/GetWithForceWriteLockRetryTest.java
|
package org.infinispan.statetransfer;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.MagicKey;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.locking.PessimisticLockingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
/**
* Test that commands are properly retried during/after state transfer.
*
* @author Dan Berindei
* @since 8.2
*/
@Test(groups = "functional", testName = "statetransfer.GetWithForceWriteLockRetryTest")
@CleanupAfterMethod
public class GetWithForceWriteLockRetryTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() {
createCluster(TestDataSCI.INSTANCE, buildConfig(), 3);
waitForClusterToForm();
}
private ConfigurationBuilder buildConfig() {
// The coordinator will always be the primary owner
ConfigurationBuilder configurationBuilder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
configurationBuilder.clustering().hash().numSegments(60);
configurationBuilder.transaction().lockingMode(LockingMode.PESSIMISTIC);
return configurationBuilder;
}
public void testRetryAfterLeave() throws Exception {
EmbeddedCacheManager cm1 = manager(0);
Cache<Object, Object> c1 = cm1.getCache();
EmbeddedCacheManager cm2 = manager(1);
Cache c2 = cm2.getCache();
EmbeddedCacheManager cm3 = manager(2);
Cache c3 = cm3.getCache();
DelayInterceptor di3 = new DelayInterceptor(LockControlCommand.class, c3);
extractInterceptorChain(c3).addInterceptorBefore(di3, PessimisticLockingInterceptor.class);
Object key = new MagicKey(c3);
TransactionManager tm1 = tm(c1);
Future<Object> f = fork(() -> {
log.tracef("Initiating a transaction on backup owner %s", c2);
tm1.begin();
try {
c1.getAdvancedCache().withFlags(Flag.FORCE_WRITE_LOCK).get(key);
} finally {
// Even if the remote lock failed, this will remove the transaction
tm1.commit();
}
return null;
});
// The prepare command is replicated to cache c1, and it blocks in the DelayInterceptor
di3.waitUntilBlocked(1);
// Kill c3
killMember(2);
waitForNoRebalance(c1, c2);
// Check that the lock succeeded
f.get(10, SECONDS);
// Unblock the remote command on c3 - shouldn't make any difference
di3.unblock(1);
}
class DelayInterceptor extends DDAsyncInterceptor {
private final AtomicInteger counter = new AtomicInteger(0);
private final CheckPoint checkPoint = new CheckPoint();
private final Class<?> commandToBlock;
private final Cache<?, ?> cache;
public DelayInterceptor(Class<?> commandToBlock, Cache<?, ?> cache) {
this.commandToBlock = commandToBlock;
this.cache = cache;
}
public int getCounter() {
return counter.get();
}
public void waitUntilBlocked(int count) throws TimeoutException, InterruptedException {
String event = checkPoint.peek(5, SECONDS, "blocked_" + count + "_on_" + cache);
assertEquals("blocked_" + count + "_on_" + cache, event);
}
public void unblock(int count) throws InterruptedException, TimeoutException, BrokenBarrierException {
log.tracef("Unblocking command on cache %s", cache);
checkPoint.awaitStrict("blocked_" + count + "_on_" + cache, 5, SECONDS);
checkPoint.trigger("resume_" + count + "_on_" + cache);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command)
throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.isInTxScope() && !command.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER)) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command)
throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!ctx.getCacheTransaction().isFromStateTransfer()) {
doBlock(ctx, command);
}
});
}
private void doBlock(InvocationContext ctx, ReplicableCommand command)
throws InterruptedException, TimeoutException {
if (commandToBlock != command.getClass())
return;
log.tracef("Delaying command %s originating from %s", command, ctx.getOrigin());
Integer myCount = counter.incrementAndGet();
checkPoint.trigger("blocked_" + myCount + "_on_" + cache);
checkPoint.awaitStrict("resume_" + myCount + "_on_" + cache, 15, SECONDS);
log.tracef("Command unblocked: %s", command);
}
@Override
public String toString() {
return "DelayInterceptor{counter=" + counter + "}";
}
}
}
| 7,018
| 37.146739
| 108
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/MergeDuringReplaceTest.java
|
package org.infinispan.statetransfer;
import static org.testng.Assert.assertEquals;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.util.ControlledRpcManager;
import org.jgroups.protocols.DISCARD;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "statetransfer.MergeDuringReplaceTest")
@CleanupAfterMethod
@InCacheMode({ CacheMode.DIST_SYNC })
public class MergeDuringReplaceTest extends MultipleCacheManagersTest {
private DISCARD[] discard;
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder defaultConfig = getDefaultClusteredCacheConfig(cacheMode, false);
createClusteredCaches(3, defaultConfig, new TransportFlags().withFD(true).withMerge(true));
DISCARD d1 = TestingUtil.getDiscardForCache(manager(0));
DISCARD d2 = TestingUtil.getDiscardForCache(manager(1));
DISCARD d3 = TestingUtil.getDiscardForCache(manager(2));
discard = new DISCARD[]{d1, d2, d3};
}
public void testMergeDuringReplace() throws Exception {
final String key = "myKey";
final String value = "myValue";
cache(0).put(key, value);
int nonOwner;
final Cache<Object, Object> c;
LocalizedCacheTopology cacheTopology = advancedCache(0).getDistributionManager().getCacheTopology();
List<Address> members = new ArrayList<>(cacheTopology.getMembers());
List<Address> owners = cacheTopology.getDistribution(key).readOwners();
members.removeAll(owners);
nonOwner = cacheTopology.getMembers().indexOf(members.get(0));
c = cache(nonOwner);
List<Cache<Object, Object>> partition1 = caches();
partition1.remove(c);
ControlledRpcManager controlledRpcManager = ControlledRpcManager.replaceRpcManager(c);
controlledRpcManager.excludeCommands(StateTransferStartCommand.class, StateResponseCommand.class);
Future<Boolean> future = fork(() -> c.replace(key, value, "myNewValue"));
ControlledRpcManager.BlockedRequest blockedReplace = controlledRpcManager.expectCommand(ReplaceCommand.class);
discard[nonOwner].discardAll(true);
// wait for the partitions to form
TestingUtil.blockUntilViewsReceived(30000, false, partition1.get(0), partition1.get(1));
TestingUtil.blockUntilViewsReceived(30000, false, c);
TestingUtil.waitForNoRebalance(partition1.get(0), partition1.get(1));
TestingUtil.waitForNoRebalance(c);
blockedReplace.send().receiveAll();
// Since the non owner didn't have the value before the split it can't do the replace correctly
assertEquals(future.get(10, TimeUnit.SECONDS), Boolean.FALSE);
controlledRpcManager.stopBlocking();
}
public int findNonOwner(String key) {
for (Cache cache : caches()) {
if (!cache.getAdvancedCache().getDataContainer().containsKey(key)) {
return caches().indexOf(cache);
}
}
throw new IllegalStateException();
}
}
| 3,747
| 38.452632
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/OutboundTransferTaskTest.java
|
package org.infinispan.statetransfer;
import io.reactivex.rxjava3.core.Flowable;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.reactive.publisher.impl.Notifications;
import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.LocalModeAddress;
import org.infinispan.test.TestException;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.mockito.ArgumentCaptor;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertEquals;
@Test(groups = "functional", testName = "statetransfer.OutboundTransferTaskTest")
@CleanupAfterMethod
public class OutboundTransferTaskTest {
public void shouldNotifyForAllSegments() throws InterruptedException {
int numSegments = 30;
IntSet segments = IntSets.from(IntStream.range(0, numSegments).iterator());
RpcManager rpcManager = mock(RpcManager.class);
CommandsFactory commandsFactory = mock(CommandsFactory.class);
OutboundTransferTask task = new OutboundTransferTask(
LocalModeAddress.INSTANCE,
segments,
numSegments,
numSegments,
1,
chunks -> {},
rpcManager,
commandsFactory,
10_000,
"mock-cache",
true
);
ArgumentCaptor<Collection<StateChunk>> cmdCaptor = ArgumentCaptor.forClass(Collection.class);
when(commandsFactory.buildStateResponseCommand(anyInt(), cmdCaptor.capture(), anyBoolean()))
.thenReturn(mock(StateResponseCommand.class));
when(rpcManager.invokeCommand(any(Address.class), any(), any(), any()))
.thenAnswer(i -> CompletableFutures.completedNull());
List<SegmentPublisherSupplier.Notification<InternalCacheEntry<?, ?>>> entries = new ArrayList<>();
for (int i = 0; i < numSegments; i++) {
ImmortalCacheEntry entry = new ImmortalCacheEntry("key", "value");
entries.add(Notifications.value(entry, i));
entries.add(Notifications.segmentComplete(i));
}
final CountDownLatch latch = new CountDownLatch(1);
task.execute(Flowable.fromIterable(entries))
.whenComplete((v, t) -> {
if (t != null) {
throw new TestException(t);
}
latch.countDown();
});
if (!latch.await(15, TimeUnit.SECONDS)) {
throw new TestException("Did not receive all segment notifications");
}
// We have 30 segments, the flowable contains 1 notification for data and another for segment complete.
// Since the chunk size is 30, we will issue 2 requests containing 15 chunks each.
IntSet transferred = IntSets.mutableEmptySet(numSegments);
assertEquals(cmdCaptor.getAllValues().size(), 2);
for (Collection<StateChunk> chunks : cmdCaptor.getAllValues()) {
assertEquals(chunks.size(), 15);
transferred.addAll(chunks.stream().map(StateChunk::getSegmentId).collect(Collectors.toList()));
}
assertEquals(transferred, segments);
}
}
| 4,004
| 39.05
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/DelegatingStateConsumer.java
|
package org.infinispan.statetransfer;
import java.util.Collection;
import java.util.concurrent.CompletionStage;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.CacheTopology;
public class DelegatingStateConsumer implements StateConsumer {
private final StateConsumer delegate;
public DelegatingStateConsumer(StateConsumer delegate) {
this.delegate = delegate;
}
@Override
public boolean isStateTransferInProgress() {
return delegate.isStateTransferInProgress();
}
@Override
public boolean isStateTransferInProgressForKey(Object key) {
return delegate.isStateTransferInProgressForKey(key);
}
@Override
public long inflightRequestCount() {
return delegate.inflightRequestCount();
}
@Override
public long inflightTransactionSegmentCount() {
return delegate.inflightTransactionSegmentCount();
}
@Override
public CompletionStage<CompletionStage<Void>> onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance) {
return delegate.onTopologyUpdate(cacheTopology, isRebalance);
}
@Override
public CompletionStage<?> applyState(Address sender, int topologyId, Collection<StateChunk> stateChunks) {
return delegate.applyState(sender, topologyId, stateChunks);
}
@Override
public void stop() {
delegate.stop();
}
@Override
public void stopApplyingState(int topologyId) {
delegate.stopApplyingState(topologyId);
}
@Override
public boolean ownsData() {
return delegate.ownsData();
}
}
| 1,577
| 24.451613
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/NonTxStateTransferInvalidationTest.java
|
package org.infinispan.statetransfer;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.BaseCustomAsyncInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.testng.annotations.Test;
/**
* Test that state transfer does not transfer anything on a non-tx invalidation cache.
*
* @since 7.0
*/
@Test(groups = "functional", testName = "statetransfer.NonTxStateTransferInvalidationTest")
@CleanupAfterMethod
public class NonTxStateTransferInvalidationTest extends MultipleCacheManagersTest {
public static final int NUM_KEYS = 10;
private ConfigurationBuilder dccc;
@Override
protected void createCacheManagers() throws Throwable {
dccc = getDefaultClusteredCacheConfig(CacheMode.INVALIDATION_SYNC, false, true);
createCluster(dccc, 2);
waitForClusterToForm();
}
public void testStateTransferDisabled() throws Exception {
// Insert initial data in the cache
Set<Object> keys = new HashSet<Object>();
for (int i = 0; i < NUM_KEYS; i++) {
Object key = "key" + i;
keys.add(key);
cache(0).put(key, key);
}
log.trace("State transfer happens here");
// add a third node
addClusterEnabledCacheManager(dccc);
waitForClusterToForm();
log.trace("Checking the values from caches...");
for (Object key : keys) {
log.tracef("Checking key: %s", key);
// check them directly in data container
InternalCacheEntry d0 = advancedCache(0).getDataContainer().get(key);
InternalCacheEntry d1 = advancedCache(1).getDataContainer().get(key);
InternalCacheEntry d2 = advancedCache(2).getDataContainer().get(key);
assertEquals(key, d0.getValue());
assertNull(d1);
assertNull(d2);
}
}
public void testConfigValidation() {
ConfigurationBuilder builder1 = new ConfigurationBuilder();
builder1.clustering().cacheMode(CacheMode.INVALIDATION_ASYNC).stateTransfer();
builder1.validate();
ConfigurationBuilder builder2 = new ConfigurationBuilder();
builder2.clustering().cacheMode(CacheMode.INVALIDATION_ASYNC).stateTransfer().fetchInMemoryState(true);
Exceptions.expectException(CacheConfigurationException.class, builder2::validate);
ConfigurationBuilder builder3 = new ConfigurationBuilder();
builder3.clustering().cacheMode(CacheMode.INVALIDATION_ASYNC).persistence()
.addStore(DummyInMemoryStoreConfigurationBuilder.class);
builder3.validate();
}
public void testInvalidationDuringStateTransfer() throws Exception {
EmbeddedCacheManager node1 = manager(0);
Cache<String, Object> node1Cache = node1.getCache();
EmbeddedCacheManager node2 = manager(1);
Cache<String, Object> node2Cache = node2.getCache();
CountDownLatch latch = new CountDownLatch(1);
node2Cache.getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new BaseCustomAsyncInterceptor() {
@Override
public Object visitInvalidateCommand(InvocationContext ctx, InvalidateCommand command) throws
Throwable {
latch.await(10, TimeUnit.SECONDS);
return super.visitInvalidateCommand(ctx, command);
}
}, 0);
String key = "key";
Future<?> future = fork(() -> {
node1Cache.putForExternalRead(key, new Object());
node1Cache.remove(key);
});
EmbeddedCacheManager node3 = addClusterEnabledCacheManager(dccc);
Cache<Object, Object> node3Cache = node3.getCache();
TestingUtil.waitForNoRebalance(caches());
log.info("Node 3 started");
latch.countDown();
future.get(30, TimeUnit.SECONDS);
assertNull(node1Cache.get(key));
assertNull(node2Cache.get(key));
assertNull(node3Cache.get(key));
}
}
| 4,724
| 37.414634
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/statetransfer/CommitManagerTest.java
|
package org.infinispan.statetransfer;
import org.infinispan.commons.time.TimeService;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ReadCommittedEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.Flag;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.IntPredicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.mockito.Mockito.mock;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
@Test(groups = "functional", testName = "statetransfer.CommitManagerTest")
public class CommitManagerTest {
public void shouldStartAndStopTrackingCorrectly() {
final CommitManager manager = new CommitManager();
// Recently create manager is not tracking anything.
assertFalse(manager.isTracking(Flag.PUT_FOR_STATE_TRANSFER));
assertFalse(manager.isTracking(Flag.PUT_FOR_X_SITE_STATE_TRANSFER));
// Start tracking state transfer.
manager.startTrack(Flag.PUT_FOR_STATE_TRANSFER);
assertTrue(manager.isTracking(Flag.PUT_FOR_STATE_TRANSFER));
// Stop tracking state transfer.
manager.stopTrack(Flag.PUT_FOR_STATE_TRANSFER);
assertFalse(manager.isTracking(Flag.PUT_FOR_STATE_TRANSFER));
// Start tracking cross site state transfer.
manager.startTrack(Flag.PUT_FOR_X_SITE_STATE_TRANSFER);
assertTrue(manager.isTracking(Flag.PUT_FOR_X_SITE_STATE_TRANSFER));
// Stop tracking cross site state transfer.
manager.stopTrack(Flag.PUT_FOR_X_SITE_STATE_TRANSFER);
assertFalse(manager.isTracking(Flag.PUT_FOR_X_SITE_STATE_TRANSFER));
}
public void shouldCommitEntriesForSegment() throws Exception {
final int numberOfSegments = 10;
final InternalDataContainer container = mock(InternalDataContainer.class);
final PersistenceManager persistenceManager = mock(PersistenceManager.class);
final TimeService timeService = mock(TimeService.class);
final CommitManager manager = new CommitManager();
TestingUtil.inject(manager, container, persistenceManager, timeService);
// Start tracking for state transfer.
manager.startTrack(Flag.PUT_FOR_STATE_TRANSFER);
// Create some entries associated with segments.
for (int i = 0; i < numberOfSegments; i++) {
for (int j = 0; j < 10; j++) {
String formatted = String.format("value-%d-%d", i, j);
final CacheEntry<String, String> entry = new ReadCommittedEntry<>(formatted, formatted, null);
manager.commit(entry, Flag.PUT_FOR_STATE_TRANSFER, i, false, null)
.toCompletableFuture().get(1, TimeUnit.SECONDS);
}
}
// The map should not store any entries since we are tracking only state transfer and
// the manager was fed with only state transfer entries.
assertEquals(manager.tracker.size(), 0);
assertTrue(manager.isEmpty());
// Stop tracking some segments does not raise any problems.
manager.stopTrackFor(Flag.PUT_FOR_STATE_TRANSFER, 0);
manager.stopTrackFor(Flag.PUT_FOR_STATE_TRANSFER, 1);
manager.stopTrackFor(Flag.PUT_FOR_STATE_TRANSFER, 2);
// Verify that still tracking for state transfer and not entries were stored.
assertTrue(manager.isTracking(Flag.PUT_FOR_STATE_TRANSFER));
assertTrue(manager.isEmpty());
}
public void onlyClearSegmentIfNoXSiteST() throws Exception {
final int numberOfSegments = 10;
final IntPredicate isXSiteSegment = segment -> segment % 2 != 0;
final InternalDataContainer container = mock(InternalDataContainer.class);
final PersistenceManager persistenceManager = mock(PersistenceManager.class);
final TimeService timeService = mock(TimeService.class);
final CommitManager manager = new CommitManager();
TestingUtil.inject(manager, container, persistenceManager, timeService);
// Start tracking for state transfer.
manager.startTrack(Flag.PUT_FOR_STATE_TRANSFER);
manager.startTrack(Flag.PUT_FOR_X_SITE_STATE_TRANSFER);
// Create some entries associated with segments.
for (int i = 0; i < numberOfSegments; i++) {
for (int j = 0; j < 10; j++) {
String formatted = String.format("value-%d-%d", i, j);
final CacheEntry<String, String> entry = new ReadCommittedEntry<>(formatted, formatted, null);
CompletionStage<?> future;
if (isXSiteSegment.test(i)) {
future = manager.commit(entry, Flag.PUT_FOR_X_SITE_STATE_TRANSFER, i, false, null);
} else {
future = manager.commit(entry, Flag.PUT_FOR_STATE_TRANSFER, i, false, null);
}
future.toCompletableFuture().get(1, TimeUnit.SECONDS);
}
}
// Verify that we are tracking numberOfSegments segments. This is a different scenario because we are dealing
// with both types simultaneously.
assertEquals(manager.tracker.size(), numberOfSegments);
// We trigger the stop track for all segments, but only the even ones should be cleared.
for (int i = 0; i < numberOfSegments; i++) {
manager.stopTrackFor(Flag.PUT_FOR_STATE_TRANSFER, i);
}
// Leaving us with 5 segments on tracker.
assertEquals(manager.tracker.size(), 5);
// Verify that we are left only with the even ones.
// This happens because we keep entries to discard the "other way around", we are left with entries telling to
// discard for state transfers.
Set<Integer> expectedSegments = IntStream.range(0, numberOfSegments)
.filter(i -> !isXSiteSegment.test(i)).boxed().collect(Collectors.toSet());
assertEquals(manager.tracker.keySet(), expectedSegments);
}
}
| 6,094
| 42.535714
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/encoding/impl/StorageConfigurationManagerTest.java
|
package org.infinispan.encoding.impl;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.testng.AssertJUnit.assertEquals;
import java.time.Instant;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "encoding.impl.StorageConfigurationManagerTest")
public class StorageConfigurationManagerTest extends SingleCacheManagerTest {
public static final String CACHE_NAME = "testCache";
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
return TestCacheManagerFactory.createCacheManager();
}
public static long wallClockTime() {
return TIME_SERVICE.wallClockTime();
}
public static long time() {
return TIME_SERVICE.time();
}
public static Instant instant() {
return TIME_SERVICE.instant();
}
public static long timeDuration(long startTimeNanos, TimeUnit outputTimeUnit) {
return TIME_SERVICE.timeDuration(startTimeNanos, outputTimeUnit);
}
public static long timeDuration(long startTimeNanos, long endTimeNanos, TimeUnit outputTimeUnit) {
return TIME_SERVICE.timeDuration(startTimeNanos, endTimeNanos, outputTimeUnit);
}
public static boolean isTimeExpired(long endTimeNanos) {
return TIME_SERVICE.isTimeExpired(endTimeNanos);
}
public static long remainingTime(long endTimeNanos, TimeUnit outputTimeUnit) {
return TIME_SERVICE.remainingTime(endTimeNanos, outputTimeUnit);
}
public static long expectedEndTime(long duration, TimeUnit inputTimeUnit) {
return TIME_SERVICE.expectedEndTime(duration, inputTimeUnit);
}
public void testDefaultMediaType() {
ConfigurationBuilder configurationBuilder = new ConfigurationBuilder();
assertStorageMediaTypes(configurationBuilder, StorageType.HEAP, StorageType.HEAP,
MediaType.APPLICATION_OBJECT);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storage(StorageType.HEAP);
assertStorageMediaTypes(configurationBuilder, StorageType.HEAP, StorageType.HEAP,
MediaType.APPLICATION_OBJECT);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storageType(StorageType.HEAP);
assertStorageMediaTypes(configurationBuilder, StorageType.HEAP, StorageType.HEAP,
MediaType.APPLICATION_OBJECT);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storage(StorageType.OBJECT);
assertStorageMediaTypes(configurationBuilder, StorageType.OBJECT, StorageType.OBJECT,
MediaType.APPLICATION_OBJECT);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storageType(StorageType.OBJECT);
assertStorageMediaTypes(configurationBuilder, StorageType.OBJECT, StorageType.OBJECT,
MediaType.APPLICATION_OBJECT);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storageType(StorageType.BINARY);
assertStorageMediaTypes(configurationBuilder, StorageType.BINARY, StorageType.BINARY,
MediaType.APPLICATION_PROTOSTREAM);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storageType(StorageType.BINARY);
assertStorageMediaTypes(configurationBuilder, StorageType.BINARY, StorageType.BINARY,
MediaType.APPLICATION_PROTOSTREAM);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storageType(StorageType.OFF_HEAP);
assertStorageMediaTypes(configurationBuilder, StorageType.OFF_HEAP, StorageType.OFF_HEAP,
MediaType.APPLICATION_PROTOSTREAM);
configurationBuilder = new ConfigurationBuilder();
configurationBuilder.memory().storageType(StorageType.OFF_HEAP);
assertStorageMediaTypes(configurationBuilder, StorageType.OFF_HEAP, StorageType.OFF_HEAP,
MediaType.APPLICATION_PROTOSTREAM);
}
private void assertStorageMediaTypes(ConfigurationBuilder configurationBuilder, StorageType storage,
StorageType storageType, MediaType mediaType) {
cacheManager.defineConfiguration(CACHE_NAME, configurationBuilder.build());
Cache<Object, Object> cache = cacheManager.getCache(CACHE_NAME);
Configuration cacheConfiguration = cache.getCacheConfiguration();
assertEquals("Wrong storage", storage, cacheConfiguration.memory().storage());
assertEquals("Wrong storageType", storageType, cacheConfiguration.memory().storageType());
assertEquals("Wrong heapConfiguration.storageType", storageType,
cacheConfiguration.memory().heapConfiguration().storageType());
StorageConfigurationManager scm = extractComponent(cache, StorageConfigurationManager.class);
assertEquals("Wrong key media type", mediaType, scm.getKeyStorageMediaType());
assertEquals("Wrong value media type", mediaType, scm.getValueStorageMediaType());
cacheManager.administration().removeCache(CACHE_NAME);
}
}
| 5,701
| 43.897638
| 103
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/topology/AsymmetricClusterTest.java
|
package org.infinispan.topology;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TransportFlags;
import org.jgroups.protocols.DISCARD;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "topology.AsymmetricClusterTest")
@CleanupAfterMethod
public class AsymmetricClusterTest extends MultipleCacheManagersTest {
public static final String CACHE_NAME = "testCache";
private ConfigurationBuilder localConfig;
private ConfigurationBuilder clusteredConfig;
DISCARD d1, d2;
@Override
protected void createCacheManagers() throws Throwable {
localConfig = new ConfigurationBuilder();
clusteredConfig = new ConfigurationBuilder();
clusteredConfig.clustering().cacheMode(CacheMode.REPL_SYNC).stateTransfer().timeout(30, TimeUnit.SECONDS);
for (int i = 0; i < 2; i++) addClusterEnabledCacheManager(localConfig, new TransportFlags().withFD(true));
d1 = TestingUtil.getDiscardForCache(manager(0));
d2 = TestingUtil.getDiscardForCache(manager(1));
}
public void testCrashAndRestartOnlyMember() throws Exception {
testRestartOnlyMember(true);
}
public void testStopAndRestartOnlyMember() throws Exception {
testRestartOnlyMember(false);
}
private void testRestartOnlyMember(boolean crash) {
// The coordinator stays up throughout the test, but the cache only runs on node 1 and then 2
manager(1).defineConfiguration(CACHE_NAME, clusteredConfig.build());
manager(1).getCache(CACHE_NAME);
if (crash) {
d2.discardAll(true);
}
manager(1).stop();
TestingUtil.blockUntilViewsReceived(30000, false, manager(0));
addClusterEnabledCacheManager(new TransportFlags().withFD(true));
manager(2).defineConfiguration(CACHE_NAME, clusteredConfig.build());
manager(2).getCache(CACHE_NAME);
}
public void testCoordinatorCrashesDuringJoin() {
d2.discardAll(true);
manager(1).defineConfiguration(CACHE_NAME, clusteredConfig.build());
fork((Callable<Object>) () -> cache(1, CACHE_NAME));
TestingUtil.blockUntilViewsReceived(30000, false, manager(0));
TestingUtil.blockUntilViewsReceived(30000, false, manager(1));
TestingUtil.waitForNoRebalance(cache(1, CACHE_NAME));
}
}
| 2,600
| 33.223684
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/topology/ClusterCacheStatusTest.java
|
package org.infinispan.topology;
import static java.util.Collections.singletonList;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertEquals;
import java.util.List;
import java.util.Optional;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.distribution.TestAddress;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PreferAvailabilityStrategy;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.statetransfer.RebalanceType;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.logging.events.EventLogManager;
import org.infinispan.util.logging.events.TestingEventLogManager;
import org.mockito.Mockito;
import org.mockito.MockitoSession;
import org.mockito.quality.Strictness;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "topology.ClusterCacheStatusTest")
public class ClusterCacheStatusTest extends AbstractInfinispanTest {
private static final String CACHE_NAME = "test";
private static final CacheJoinInfo JOIN_INFO =
new CacheJoinInfo(new DefaultConsistentHashFactory(), 8, 2, 1000,
CacheMode.DIST_SYNC, 1.0f, null, Optional.empty());
private static final Address A = new TestAddress(1, "A");
private static final Address B = new TestAddress(2, "B");
private static final Address C = new TestAddress(3, "C");
private ClusterCacheStatus status;
private ClusterTopologyManagerImpl topologyManager;
private MockitoSession mockitoSession;
private Transport transport;
@BeforeMethod(alwaysRun = true)
public void setup() {
mockitoSession = Mockito.mockitoSession().strictness(Strictness.STRICT_STUBS).startMocking();
EventLogManager eventLogManager = new TestingEventLogManager();
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
EmbeddedCacheManager cacheManager = mock(EmbeddedCacheManager.class);
topologyManager = mock(ClusterTopologyManagerImpl.class);
transport = mock(Transport.class);
PreferAvailabilityStrategy availabilityStrategy =
new PreferAvailabilityStrategy(eventLogManager, persistentUUIDManager,
ClusterTopologyManagerImpl::distLostDataCheck);
status = new ClusterCacheStatus(cacheManager, null, CACHE_NAME, availabilityStrategy, RebalanceType.FOUR_PHASE,
topologyManager, transport, persistentUUIDManager, eventLogManager,
Optional.empty(), false);
}
@AfterMethod(alwaysRun = true)
public void teardown() {
mockitoSession.finishMocking();
}
@Test
public void testQueueRebalanceSingleNode() throws Exception {
when(topologyManager.isRebalancingEnabled()).thenReturn(true);
status.doJoin(A, makeJoinInfo(A));
verifyStableTopologyUpdate();
status.doJoin(B, makeJoinInfo(B));
verifyRebalanceStart();
completeRebalance(status);
verifyStableTopologyUpdate();
status.doJoin(C, makeJoinInfo(C));
verifyRebalanceStart();
completeRebalance(status);
verifyStableTopologyUpdate();
when(transport.getMembers()).thenReturn(singletonList(C));
when(transport.getViewId()).thenReturn(1);
status.doHandleClusterView(1);
TestClusterCacheStatus cache = TestClusterCacheStatus.start(JOIN_INFO, C);
cache.incrementIds(9, 2);
cache.incrementStableIds(9, 2);
assertEquals(cache.topology(), status.getCurrentTopology());
assertEquals(cache.stableTopology(), status.getStableTopology());
verifyTopologyUpdate();
verifyStableTopologyUpdate();
verifyNoMoreInteractions(topologyManager);
}
private void verifyRebalanceStart() {
verify(topologyManager).broadcastRebalanceStart(CACHE_NAME, status.getCurrentTopology());
}
private void verifyStableTopologyUpdate() {
verify(topologyManager).broadcastStableTopologyUpdate(CACHE_NAME, status.getStableTopology());
}
private void verifyTopologyUpdate() {
verify(topologyManager).broadcastTopologyUpdate(CACHE_NAME, status.getCurrentTopology(),
AvailabilityMode.AVAILABLE);
}
private void completeRebalance(ClusterCacheStatus status) throws Exception {
advanceRebalance(status, CacheTopology.Phase.READ_OLD_WRITE_ALL, CacheTopology.Phase.READ_ALL_WRITE_ALL,
CacheTopology.Phase.READ_NEW_WRITE_ALL, CacheTopology.Phase.NO_REBALANCE);
}
private void advanceRebalance(ClusterCacheStatus status, CacheTopology.Phase initialPhase,
CacheTopology.Phase... phases) throws Exception {
assertEquals(initialPhase, status.getCurrentTopology().getPhase());
for (CacheTopology.Phase phase : phases) {
confirmRebalancePhase(status, status.getCurrentTopology().getMembers());
assertEquals(phase, status.getCurrentTopology().getPhase());
verifyTopologyUpdate();
}
}
private void confirmRebalancePhase(ClusterCacheStatus status, List<Address> members) throws Exception {
int topologyId = status.getCurrentTopology().getTopologyId();
for (Address a : members) {
status.confirmRebalancePhase(a, topologyId);
}
assertEquals(topologyId + 1, status.getCurrentTopology().getTopologyId());
}
private CacheJoinInfo makeJoinInfo(Address a) {
PersistentUUID persistentUUID = new PersistentUUID(a.hashCode(), a.hashCode());
return new CacheJoinInfo(JOIN_INFO.getConsistentHashFactory(), JOIN_INFO.getNumSegments(), JOIN_INFO.getNumOwners(),
JOIN_INFO.getTimeout(), JOIN_INFO.getCacheMode(), JOIN_INFO.getCapacityFactor(),
persistentUUID, Optional.empty());
}
}
| 6,280
| 42.618056
| 122
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/topology/ClusterTopologyManagerImplTest.java
|
package org.infinispan.topology;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.mockito.Mockito.mock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.topology.CacheStatusRequestCommand;
import org.infinispan.commands.topology.RebalanceStartCommand;
import org.infinispan.commands.topology.RebalanceStatusRequestCommand;
import org.infinispan.commands.topology.TopologyUpdateCommand;
import org.infinispan.commands.topology.TopologyUpdateStableCommand;
import org.infinispan.configuration.ConfigurationManager;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.TestAddress;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHashFactory;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.manager.TestModuleRepository;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifier;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifierImpl;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.MockTransport;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.events.EventLogManager;
import org.infinispan.util.logging.events.TestingEventLogManager;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "topology.ClusterTopologyManagerImplTest")
public class ClusterTopologyManagerImplTest extends AbstractInfinispanTest {
private static final String CACHE_NAME = "testCache";
private ExecutorService executor = Executors.newFixedThreadPool(2, getTestThreadFactory("Executor"));
private ExecutorService scheduledExecutor = Executors.newSingleThreadScheduledExecutor(getTestThreadFactory("Executor"));
private static final Address A = new TestAddress(0, "A");
private static final Address B = new TestAddress(1, "B");
private final ConsistentHashFactory<?> replicatedChf = new ReplicatedConsistentHashFactory();
// The persistent UUIDs are different, the rest of the join info is the same
private final CacheJoinInfo joinInfoA = makeJoinInfo();
private final CacheJoinInfo joinInfoB = makeJoinInfo();
private CacheJoinInfo makeJoinInfo() {
return new CacheJoinInfo(replicatedChf, 16, 1, 1000,
CacheMode.REPL_SYNC, 1.0f, PersistentUUID.randomUUID(), Optional.empty());
}
/**
* Start two nodes and make both join the cache.
*/
public void testClusterStartupWith2Nodes() throws Exception {
// Create global component registry with dependencies
GlobalConfiguration gc = GlobalConfigurationBuilder.defaultClusteredBuilder().build();
EmbeddedCacheManager cacheManager = mock(EmbeddedCacheManager.class);
GlobalComponentRegistry gcr = new GlobalComponentRegistry(gc, cacheManager, Collections.emptySet(),
TestModuleRepository.defaultModuleRepository(),
mock(ConfigurationManager.class));
BasicComponentRegistry gbcr = gcr.getComponent(BasicComponentRegistry.class);
gbcr.replaceComponent(EventLogManager.class.getName(), new TestingEventLogManager(), false);
CacheManagerNotifierImpl managerNotifier = new CacheManagerNotifierImpl();
gbcr.replaceComponent(CacheManagerNotifier.class.getName(), managerNotifier, false);
managerNotifier.start();
MockTransport transport = new MockTransport(A);
gbcr.replaceComponent(Transport.class.getName(), transport, false);
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
gbcr.replaceComponent(PersistentUUIDManager.class.getName(), persistentUUIDManager, false);
gbcr.replaceComponent(KnownComponentNames.NON_BLOCKING_EXECUTOR, executor, false);
gbcr.replaceComponent(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR, scheduledExecutor, false);
MockLocalTopologyManager ltm = new MockLocalTopologyManager(CACHE_NAME);
gbcr.replaceComponent(LocalTopologyManager.class.getName(), ltm, false);
// Initial conditions
transport.init(1, singletonList(A));
ltm.init(null, null, null, null);
// Component under test: ClusterTopologyManagerImpl on the coordinator (A)
ClusterTopologyManagerImpl ctm = new ClusterTopologyManagerImpl();
gbcr.replaceComponent(ClusterTopologyManager.class.getName(), ctm, false);
gcr.rewire();
ctm.start();
// CTMI becomes coordinator and fetches the cluster status
transport.expectCommand(CacheStatusRequestCommand.class).finish();
// No caches, so no topology update is expected here
Thread.sleep(1);
transport.verifyNoErrors();
// First node joins the cache
CacheStatusResponse joinResponseA = CompletionStages.join(ctm.handleJoin(CACHE_NAME, A, joinInfoA, 1));
assertEquals(1, joinResponseA.getCacheTopology().getTopologyId());
assertCHMembers(joinResponseA.getCacheTopology().getCurrentCH(), A);
assertNull(joinResponseA.getCacheTopology().getPendingCH());
// LTMI normally updates the topology when receiving the join response
ltm.handleTopologyUpdate(CACHE_NAME, joinResponseA.getCacheTopology(), joinResponseA.getAvailabilityMode(), 1, A);
ltm.expectTopology(1, singletonList(A), null, CacheTopology.Phase.NO_REBALANCE);
// CTMI replies to the initial stable topology broadcast
transport.expectCommand(TopologyUpdateStableCommand.class, c -> {
assertCHMembers(c.getCurrentCH(), A);
assertNull(c.getPendingCH());
}).finish();
// Add a second node
transport.updateView(2, asList(A, B));
managerNotifier.notifyViewChange(asList(A, B), singletonList(A), A, 2);
// CTMI confirms availability
transport.expectHeartBeatCommand().finish();
// Second node tries to join with old view and is rejected
CacheStatusResponse joinResponseB1 = CompletionStages.join(ctm.handleJoin(CACHE_NAME, B, joinInfoB, 1));
assertNull(joinResponseB1);
// Second node joins the cache with correct view id, receives the initial topology
CacheStatusResponse joinResponseB = CompletionStages.join(ctm.handleJoin(CACHE_NAME, B, joinInfoB, 2));
assertEquals(1, joinResponseB.getCacheTopology().getTopologyId());
assertCHMembers(joinResponseB.getCacheTopology().getCurrentCH(), A);
assertNull(joinResponseB.getCacheTopology().getPendingCH());
verifyRebalance(transport, ltm, ctm, 2, 1, singletonList(A), asList(A, B));
transport.verifyNoErrors();
gcr.stop();
}
/**
* Assume there are already 2 nodes and the coordinator leaves during rebalance
*/
public void testCoordinatorLostDuringRebalance() throws Exception {
// Create global component registry with dependencies
GlobalConfiguration gc = GlobalConfigurationBuilder.defaultClusteredBuilder().build();
EmbeddedCacheManager cacheManager = mock(EmbeddedCacheManager.class);
GlobalComponentRegistry gcr = new GlobalComponentRegistry(gc, cacheManager, Collections.emptySet(),
TestModuleRepository.defaultModuleRepository(),
mock(ConfigurationManager.class));
BasicComponentRegistry gbcr = gcr.getComponent(BasicComponentRegistry.class);
gbcr.replaceComponent(EventLogManager.class.getName(), new TestingEventLogManager(), false);
CacheManagerNotifierImpl managerNotifier = new CacheManagerNotifierImpl();
gbcr.replaceComponent(CacheManagerNotifier.class.getName(), managerNotifier, false);
managerNotifier.start();
MockTransport transport = new MockTransport(B);
gbcr.replaceComponent(Transport.class.getName(), transport, false);
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
gbcr.replaceComponent(PersistentUUIDManager.class.getName(), persistentUUIDManager, false);
gbcr.replaceComponent(KnownComponentNames.NON_BLOCKING_EXECUTOR, executor, false);
gbcr.replaceComponent(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR, scheduledExecutor, false);
MockLocalTopologyManager ltm = new MockLocalTopologyManager(CACHE_NAME);
gbcr.replaceComponent(LocalTopologyManager.class.getName(), ltm, false);
// Initial conditions (rebalance in phase 3, READ_NEW_WRITE_ALL)
transport.init(2, asList(A, B));
ConsistentHash stableCH = replicatedChf.create(joinInfoA.getNumOwners(),
joinInfoA.getNumSegments(), singletonList(A), null);
ConsistentHash pendingCH = replicatedChf.create(joinInfoA.getNumOwners(),
joinInfoA.getNumSegments(), asList(A, B), null);
CacheTopology initialTopology = new CacheTopology(4, 2, stableCH, pendingCH,
CacheTopology.Phase.READ_NEW_WRITE_ALL, asList(A, B),
asList(joinInfoA.getPersistentUUID(),
joinInfoB.getPersistentUUID()));
CacheTopology stableTopology = new CacheTopology(1, 1, stableCH, null,
CacheTopology.Phase.NO_REBALANCE, singletonList(A),
singletonList(joinInfoA.getPersistentUUID()));
ltm.init(joinInfoA, initialTopology, stableTopology, AvailabilityMode.AVAILABLE);
// Normally LocalTopologyManagerImpl.start()/doHandleTopologyUpdate() registers the persistent UUIDs
// TODO Write test with asymmetric caches leaving the PersistentUUIDManager cache incomplete
persistentUUIDManager.addPersistentAddressMapping(A, joinInfoA.getPersistentUUID());
persistentUUIDManager.addPersistentAddressMapping(B, joinInfoB.getPersistentUUID());
// Component under test: ClusterTopologyManagerImpl on the new coordinator (B)
ClusterTopologyManagerImpl ctm = new ClusterTopologyManagerImpl();
gbcr.replaceComponent(ClusterTopologyManager.class.getName(), ctm, false);
gcr.rewire();
// When CTMI starts as regular member it requests the rebalancing status from the coordinator
runConcurrently(
ctm::start,
() -> transport.expectCommand(RebalanceStatusRequestCommand.class)
.singleResponse(A, SuccessfulResponse.create(true)));
// Wait for the initial view update in CTMI to finish
eventuallyEquals(ClusterTopologyManager.ClusterManagerStatus.REGULAR_MEMBER, ctm::getStatus);
// The coordinator (node A) leaves the cluster
transport.updateView(3, singletonList(B));
managerNotifier.notifyViewChange(singletonList(B), asList(A, B), B, 3);
// Node B becomes coordinator and CTMI tries to recover the cluster status
transport.expectCommand(CacheStatusRequestCommand.class).finish();
// CTMI gets a single cache topology with READ_NEW and broadcasts a new topology with only the read CH
ltm.expectTopology(5, asList(A, B), null, CacheTopology.Phase.NO_REBALANCE);
transport.expectCommand(TopologyUpdateCommand.class, c -> {
assertEquals(5, c.getTopologyId());
assertCHMembers(c.getCurrentCH(), A, B);
assertNull(c.getPendingCH());
});
transport.expectCommand(TopologyUpdateStableCommand.class, c -> {
assertEquals(1, c.getTopologyId());
assertCHMembers(c.getCurrentCH(), A);
assertNull(c.getPendingCH());
});
// CTMI broadcasts a new cache topology with only node B
ltm.expectTopology(6, singletonList(B), null, CacheTopology.Phase.NO_REBALANCE);
transport.expectCommand(TopologyUpdateCommand.class, c -> {
assertEquals(6, c.getTopologyId());
assertCHMembers(c.getCurrentCH(), B);
assertNull(c.getPendingCH());
});
// The new topology doesn't need rebalancing, so CTMI updates the stable topology
transport.expectCommand(TopologyUpdateStableCommand.class, c -> {
assertEquals(6, c.getTopologyId());
assertCHMembers(c.getCurrentCH(), B);
assertNull(c.getPendingCH());
});
// Shouldn't send any more commands here
Thread.sleep(1);
transport.verifyNoErrors();
// Node A restarts
transport.updateView(4, asList(B, A));
managerNotifier.notifyViewChange(asList(B, A), singletonList(B), A, 4);
// CTMI confirms members are available in case it needs to starts a rebalance
transport.expectHeartBeatCommand().finish();
// Node A rejoins
ctm.handleJoin(CACHE_NAME, A, joinInfoA, 4);
verifyRebalance(transport, ltm, ctm, 7, 4, singletonList(B), asList(B, A));
transport.verifyNoErrors();
gcr.stop();
}
private void verifyRebalance(MockTransport transport, MockLocalTopologyManager ltm, ClusterTopologyManagerImpl ctm,
int rebalanceTopologyId, int rebalanceViewId, List<Address> initialMembers,
List<Address> finalMembers) throws Exception {
// CTMI starts rebalance
ltm.expectTopology(rebalanceTopologyId, initialMembers, finalMembers,
CacheTopology.Phase.READ_OLD_WRITE_ALL);
transport.expectCommand(RebalanceStartCommand.class, c -> {
assertEquals(rebalanceTopologyId, c.getTopologyId());
assertEquals(CacheTopology.Phase.READ_OLD_WRITE_ALL, c.getPhase());
assertEquals(initialMembers, c.getCurrentCH().getMembers());
assertEquals(finalMembers, c.getPendingCH().getMembers());
}).finish();
// Confirm state transfer (phase 1, READ_OLD_WRITE_ALL)
ctm.handleRebalancePhaseConfirm(CACHE_NAME, A, rebalanceTopologyId, null, rebalanceViewId);
ctm.handleRebalancePhaseConfirm(CACHE_NAME, B, rebalanceTopologyId, null, rebalanceViewId);
// CTMI starts phase 2, READ_ALL_WRITE_ALL
ltm.expectTopology(rebalanceTopologyId + 1, initialMembers, finalMembers,
CacheTopology.Phase.READ_ALL_WRITE_ALL);
transport.expectCommand(TopologyUpdateCommand.class, c -> {
assertEquals(rebalanceTopologyId + 1, c.getTopologyId());
assertEquals(CacheTopology.Phase.READ_ALL_WRITE_ALL, c.getPhase());
assertEquals(initialMembers, c.getCurrentCH().getMembers());
assertEquals(finalMembers, c.getPendingCH().getMembers());
}).finish();
// Confirm phase 2
ctm.handleRebalancePhaseConfirm(CACHE_NAME, A, rebalanceTopologyId + 1, null, rebalanceViewId);
ctm.handleRebalancePhaseConfirm(CACHE_NAME, B, rebalanceTopologyId + 1, null, rebalanceViewId);
// CTMI starts phase 3: READ_NEW_WRITE_ALL
ltm.expectTopology(rebalanceTopologyId + 2, initialMembers, finalMembers,
CacheTopology.Phase.READ_NEW_WRITE_ALL);
transport.expectCommand(TopologyUpdateCommand.class, c -> {
assertEquals(rebalanceTopologyId + 2, c.getTopologyId());
assertEquals(CacheTopology.Phase.READ_NEW_WRITE_ALL, c.getPhase());
assertEquals(initialMembers, c.getCurrentCH().getMembers());
assertEquals(finalMembers, c.getPendingCH().getMembers());
}).finish();
// Confirm phase 3
ctm.handleRebalancePhaseConfirm(CACHE_NAME, A, rebalanceTopologyId + 2, null, rebalanceViewId);
ctm.handleRebalancePhaseConfirm(CACHE_NAME, B, rebalanceTopologyId + 2, null, rebalanceViewId);
// CTMI finishes rebalance
ltm.expectTopology(rebalanceTopologyId + 3, finalMembers, null, CacheTopology.Phase.NO_REBALANCE);
transport.expectCommand(TopologyUpdateCommand.class, c -> {
assertEquals(rebalanceTopologyId + 3, c.getTopologyId());
assertEquals(CacheTopology.Phase.NO_REBALANCE, c.getPhase());
assertEquals(finalMembers, c.getCurrentCH().getMembers());
assertNull(c.getPendingCH());
}).finish();
transport.expectCommand(TopologyUpdateStableCommand.class, c -> {
assertEquals(rebalanceTopologyId + 3, c.getTopologyId());
assertEquals(finalMembers, c.getCurrentCH().getMembers());
assertNull(c.getPendingCH());
}).finish();
}
private void assertCHMembers(ConsistentHash ch, Address... members) {
assertEquals(asList(members), ch.getMembers());
}
@AfterClass(alwaysRun = true)
public void shutdownExecutors() throws InterruptedException {
executor.shutdownNow();
assertTrue(executor.awaitTermination(10, TimeUnit.SECONDS));
scheduledExecutor.shutdownNow();
assertTrue(scheduledExecutor.awaitTermination(10, TimeUnit.SECONDS));
}
}
| 17,889
| 51.157434
| 124
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/topology/MockLocalTopologyManager.java
|
package org.infinispan.topology;
import static java.util.Collections.singletonMap;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Mock implementation of {@link LocalTopologyManager} with a single cache.
*
* @author Dan Berindei
* @since 9.2
*/
class MockLocalTopologyManager implements LocalTopologyManager {
private static final Log log = LogFactory.getLog(MockLocalTopologyManager.class);
private final String cacheName;
private final BlockingQueue<CacheTopology> topologies = new LinkedBlockingDeque<>();
private CacheStatusResponse status;
MockLocalTopologyManager(String cacheName) {
this.cacheName = cacheName;
}
public void init(CacheJoinInfo joinInfo, CacheTopology topology, CacheTopology stableTopology,
AvailabilityMode availabilityMode) {
this.status = new CacheStatusResponse(joinInfo, topology, stableTopology, availabilityMode, Collections.emptyList());
}
public void verifyTopology(CacheTopology topology, int topologyId, List<Address> currentMembers,
List<Address> pendingMembers, CacheTopology.Phase phase) {
log.debugf("Verifying topology %s", topology);
assertEquals(topologyId, topology.getTopologyId());
assertEquals(phase, topology.getPhase());
assertEquals(currentMembers, topology.getCurrentCH().getMembers());
if (pendingMembers != null) {
assertEquals(pendingMembers, topology.getPendingCH().getMembers());
} else {
assertNull(topology.getPendingCH());
}
}
public void expectTopology(int topologyId, List<Address> currentMembers, List<Address> pendingMembers,
CacheTopology.Phase phase) throws Exception {
CacheTopology topology = topologies.poll(10, TimeUnit.SECONDS);
assertNotNull("Timed out waiting for topology " + topologyId, topology);
verifyTopology(topology, topologyId, currentMembers, pendingMembers, phase);
}
@Override
public CompletionStage<CacheTopology> join(String cacheName, CacheJoinInfo joinInfo, CacheTopologyHandler stm,
PartitionHandlingManager phm) {
throw new UnsupportedOperationException();
}
@Override
public void leave(String cacheName, long timeout) {
throw new UnsupportedOperationException();
}
@Override
public void confirmRebalancePhase(String cacheName, int topologyId, int rebalanceId, Throwable throwable) {
throw new UnsupportedOperationException();
}
@Override
public CompletionStage<ManagerStatusResponse> handleStatusRequest(int viewId) {
Map<String, CacheStatusResponse> caches = status.getCacheJoinInfo() != null ?
singletonMap(cacheName, status) :
Collections.emptyMap();
return CompletableFuture.completedFuture(new ManagerStatusResponse(caches, true));
}
@Override
public CompletionStage<Void> handleTopologyUpdate(String cacheName, CacheTopology cacheTopology, AvailabilityMode availabilityMode,
int viewId, Address sender) {
status = new CacheStatusResponse(status.getCacheJoinInfo(), cacheTopology,
status.getStableTopology(), availabilityMode, status.joinedMembers());
topologies.add(cacheTopology);
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> handleStableTopologyUpdate(String cacheName, CacheTopology cacheTopology, Address sender, int viewId) {
status = new CacheStatusResponse(status.getCacheJoinInfo(), status.getCacheTopology(),
cacheTopology, status.getAvailabilityMode(), status.joinedMembers());
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> handleRebalance(String cacheName, CacheTopology cacheTopology, int viewId, Address sender) {
status = new CacheStatusResponse(status.getCacheJoinInfo(), cacheTopology,
status.getStableTopology(), status.getAvailabilityMode(), status.joinedMembers());
topologies.add(cacheTopology);
return CompletableFutures.completedNull();
}
@Override
public CacheTopology getCacheTopology(String cacheName) {
return status.getCacheTopology();
}
@Override
public CacheTopology getStableCacheTopology(String cacheName) {
return status.getStableTopology();
}
@Override
public boolean isRebalancingEnabled() {
return true;
}
@Override
public boolean isCacheRebalancingEnabled(String cacheName) {
return true;
}
@Override
public void setRebalancingEnabled(boolean enabled) {
throw new UnsupportedOperationException();
}
@Override
public void setCacheRebalancingEnabled(String cacheName, boolean enabled) {
throw new UnsupportedOperationException();
}
@Override
public RebalancingStatus getRebalancingStatus(String cacheName) {
throw new UnsupportedOperationException();
}
@Override
public AvailabilityMode getCacheAvailability(String cacheName) {
return status.getAvailabilityMode();
}
@Override
public void setCacheAvailability(String cacheName, AvailabilityMode availabilityMode) {
throw new UnsupportedOperationException();
}
@Override
public PersistentUUID getPersistentUUID() {
return null;
}
@Override
public void cacheShutdown(String name) {
throw new UnsupportedOperationException();
}
@Override
public CompletionStage<Void> handleCacheShutdown(String cacheName) {
throw new UnsupportedOperationException();
}
@Override
public CompletionStage<Void> stableTopologyCompletion(String cacheName) {
return CompletableFutures.completedNull();
}
}
| 6,668
| 35.642857
| 135
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/topology/TestClusterCacheStatus.java
|
package org.infinispan.topology;
import static java.util.Arrays.asList;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotSame;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertSame;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.partitionhandling.impl.AvailabilityStrategy;
import org.infinispan.partitionhandling.impl.AvailabilityStrategyContext;
import org.infinispan.remoting.transport.Address;
/**
* Mock {@link org.infinispan.topology.ClusterCacheStatus} for unit tests.
*
* It only maintains the current/stable topologies, verifying {@link AvailabilityStrategyContext} calls
* requires a proper mock.
*
* @author Dan Berindei
* @since 9.2
*/
public class TestClusterCacheStatus {
private final CacheJoinInfo joinInfo;
private CacheTopology topology;
private CacheTopology stableTopology;
public TestClusterCacheStatus(CacheJoinInfo joinInfo, CacheTopology topology, CacheTopology stableTopology) {
this.joinInfo = joinInfo;
this.topology = topology;
assertNull(stableTopology.getPendingCH());
this.stableTopology = stableTopology;
}
public static TestClusterCacheStatus start(CacheJoinInfo joinInfo, Address... members) {
List<Address> membersList = asList(members);
return start(joinInfo, membersList);
}
public static TestClusterCacheStatus start(CacheJoinInfo joinInfo, List<Address> members) {
ConsistentHash currentCH = joinInfo.getConsistentHashFactory()
.create(joinInfo.getNumOwners(),
joinInfo.getNumSegments(), members, null);
CacheTopology topology = new CacheTopology(1, 1, currentCH, null, null, CacheTopology.Phase.NO_REBALANCE, members,
persistentUUIDs(members));
return new TestClusterCacheStatus(joinInfo, topology, topology);
}
public TestClusterCacheStatus copy() {
return new TestClusterCacheStatus(joinInfo, topology, stableTopology);
}
public void startRebalance(CacheTopology.Phase phase, Address... targetMembers) {
startRebalance(phase, asList(targetMembers));
}
public void startRebalance(CacheTopology.Phase phase, List<Address> targetMembers) {
assertNull(topology.getPendingCH());
assertTrue(targetMembers.containsAll(topology.getCurrentCH().getMembers()));
ConsistentHash pendingCH = joinInfo.getConsistentHashFactory().updateMembers(topology.getCurrentCH(), targetMembers,
null);
pendingCH = joinInfo.getConsistentHashFactory().rebalance(pendingCH);
topology = new CacheTopology(topology.getTopologyId() + 1, topology.getRebalanceId() + 1, topology.getCurrentCH(),
pendingCH, null, phase, targetMembers, persistentUUIDs(targetMembers));
}
public void advanceRebalance(CacheTopology.Phase phase) {
topology = new CacheTopology(topology.getTopologyId() + 1, topology.getRebalanceId(), topology.getCurrentCH(),
topology.getPendingCH(), topology.getUnionCH(), phase, topology.getActualMembers(),
persistentUUIDs(topology.getMembers()));
}
public void finishRebalance() {
topology = new CacheTopology(topology.getTopologyId() + 1, topology.getRebalanceId(), topology.getPendingCH(),
null, null, CacheTopology.Phase.NO_REBALANCE,
topology.getActualMembers(), persistentUUIDs(topology.getActualMembers()));
}
public void cancelRebalance() {
assertNotSame(CacheTopology.Phase.NO_REBALANCE, topology.getPhase());
assertNotSame(CacheTopology.Phase.CONFLICT_RESOLUTION, topology.getPhase());
// Use the read CH as the current CH
topology = new CacheTopology(topology.getTopologyId() + 1, topology.getRebalanceId() + 1,
readConsistentHash(), null, null, CacheTopology.Phase.NO_REBALANCE,
topology.getActualMembers(), persistentUUIDs(topology.getActualMembers()));
}
/**
* The topologies included in the status responses do not have a union CH, so
* {@link CacheTopology#getReadConsistentHash()} doesn't work.
*/
public ConsistentHash readConsistentHash() {
return AvailabilityStrategy.ownersConsistentHash(topology, joinInfo.getConsistentHashFactory());
}
public void updateStableTopology() {
assertEquals(CacheTopology.Phase.NO_REBALANCE, topology.getPhase());
stableTopology = topology;
}
public void removeMembers(Address... leavers) {
removeMembers(asList(leavers));
}
public void removeMembers(List<Address> leavers) {
List<Address> updatedMembers = new ArrayList<>(topology.getActualMembers());
updatedMembers.removeAll(leavers);
assertEquals(topology.getActualMembers().size(), leavers.size() + updatedMembers.size());
ConsistentHash updatedCH = joinInfo.getConsistentHashFactory()
.updateMembers(topology.getCurrentCH(), updatedMembers, null);
ConsistentHash updatedPendingCH = topology.getPendingCH() != null ?
joinInfo.getConsistentHashFactory()
.updateMembers(topology.getPendingCH(), updatedMembers, null) :
null;
ConsistentHash updatedUnionCH = topology.getUnionCH() != null ?
joinInfo.getConsistentHashFactory()
.updateMembers(topology.getUnionCH(), updatedMembers, null) :
null;
topology = new CacheTopology(topology.getTopologyId() + 1, topology.getRebalanceId(), updatedCH, updatedPendingCH,
updatedUnionCH, topology.getPhase(), updatedMembers,
persistentUUIDs(updatedMembers));
}
public void startConflictResolution(ConsistentHash conflictCH, Address... mergeMembers) {
startConflictResolution(conflictCH, asList(mergeMembers));
}
public void startConflictResolution(ConsistentHash conflictCH, List<Address> mergeMembers) {
topology = new CacheTopology(topology.getTopologyId() + 1, topology.getRebalanceId() + 1,
conflictCH, null, CacheTopology.Phase.CONFLICT_RESOLUTION, mergeMembers,
persistentUUIDs(mergeMembers));
}
public static ConsistentHash conflictResolutionConsistentHash(TestClusterCacheStatus... caches) {
ConsistentHashFactory chf = caches[0].joinInfo.getConsistentHashFactory();
ConsistentHash hash = Stream.of(caches)
.map(TestClusterCacheStatus::readConsistentHash)
.reduce(chf::union)
.orElseThrow(IllegalStateException::new);
return chf.union(hash, chf.rebalance(hash));
}
public ConsistentHash ch(Address... addresses) {
return joinInfo.getConsistentHashFactory()
.create(joinInfo.getNumOwners(), joinInfo.getNumSegments(),
asList(addresses), null);
}
public static PersistentUUID persistentUUID(Address a) {
return new PersistentUUID(a.hashCode(), a.hashCode());
}
private static List<PersistentUUID> persistentUUIDs(List<Address> members) {
return members.stream()
.map(TestClusterCacheStatus::persistentUUID)
.collect(Collectors.toList());
}
public CacheJoinInfo joinInfo(Address a) {
// Copy the generic CacheJoinInfo and replace the persistent UUID
return new CacheJoinInfo(joinInfo.getConsistentHashFactory(), joinInfo.getNumSegments(), joinInfo.getNumOwners(),
joinInfo.getTimeout(), joinInfo.getCacheMode(), joinInfo.getCapacityFactor(),
persistentUUID(a), joinInfo.getPersistentStateChecksum());
}
public CacheTopology topology() {
return topology;
}
public CacheTopology stableTopology() {
return stableTopology;
}
public void incrementIds(int topologyIdDelta, int rebalanceIdDelta) {
topology = new CacheTopology(topology.getTopologyId() + topologyIdDelta,
topology.getRebalanceId() + rebalanceIdDelta,
topology.getCurrentCH(), topology.getPendingCH(), topology.getUnionCH(),
topology.getPhase(), topology.getActualMembers(),
topology.getMembersPersistentUUIDs());
}
public void incrementStableIds(int topologyIdDelta, int rebalanceIdDelta) {
assertSame(CacheTopology.Phase.NO_REBALANCE, stableTopology.getPhase());
assertNull(stableTopology.getPendingCH());
assertNull(stableTopology.getUnionCH());
stableTopology = new CacheTopology(stableTopology.getTopologyId() + topologyIdDelta,
stableTopology.getRebalanceId() + rebalanceIdDelta,
stableTopology.getCurrentCH(), null, null,
stableTopology.getPhase(), stableTopology.getActualMembers(),
stableTopology.getMembersPersistentUUIDs());
}
public void incrementIds() {
incrementIds(1, 1);
}
public void incrementIdsIfNeeded(TestClusterCacheStatus... otherPartitions) {
// If there is any other partition with the same topology id, use that topology id + 1
// Same with the rebalance id
int newTopologyId = topology.getTopologyId();
int newRebalanceId = topology.getRebalanceId();
for (TestClusterCacheStatus cache : otherPartitions) {
newTopologyId = Math.max(cache.topology.getTopologyId() + 1, newTopologyId);
newRebalanceId = Math.max(cache.topology.getRebalanceId() + 1, newRebalanceId);
}
topology = new CacheTopology(newTopologyId, newRebalanceId,
topology.getCurrentCH(), topology.getPendingCH(), topology.getUnionCH(),
topology.getPhase(), topology.getActualMembers(),
topology.getMembersPersistentUUIDs());
}
public void updateActualMembers(Address... actualMembers) {
updateActualMembers(asList(actualMembers));
}
public void updateActualMembers(List<Address> actualMembers) {
assertTrue(topology.getMembers().containsAll(actualMembers));
topology = new CacheTopology(topology.getTopologyId() + 1, topology.getRebalanceId() + 1,
topology.getCurrentCH(), topology.getPendingCH(), topology.getUnionCH(),
topology.getPhase(), actualMembers, persistentUUIDs(actualMembers));
}
}
| 11,367
| 48.212121
| 122
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/RemoteExceptionHandlingWithStateTransferTest.java
|
package org.infinispan.remoting;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
/**
* Verifies remote exception handling when state transfer is enabled.
*
* @author Galder Zamarreño
* @since 5.1
*/
@Test(groups = "functional", testName = "remoting.RemoteExceptionHandlingWithStateTransferTest")
public class RemoteExceptionHandlingWithStateTransferTest extends TransportSenderExceptionHandlingTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder config = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
config.clustering().stateTransfer().fetchInMemoryState(true);
createClusteredCaches(2, "replSync", FailureTypeSCI.INSTANCE, config);
}
}
| 834
| 35.304348
| 104
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/RaftTest.java
|
package org.infinispan.remoting;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.io.ByteBufferImpl;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.manager.EmbeddedCacheManagerStartupException;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.raft.RaftChannel;
import org.infinispan.remoting.transport.raft.RaftChannelConfiguration;
import org.infinispan.remoting.transport.raft.RaftManager;
import org.infinispan.remoting.transport.raft.RaftStateMachine;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
/**
* Basic test for RAFT protocol.
*
* @since 14.0
*/
@Test(groups = "functional", testName = "remoting.RaftTest")
public class RaftTest extends MultipleCacheManagersTest {
private static final RaftChannelConfiguration DEFAULT_CONFIGURATION = new RaftChannelConfiguration.Builder()
.logMode(RaftChannelConfiguration.RaftLogMode.VOLATILE)
.build();
private static final int CONCURRENT_THREADS = 16;
private static final int CLUSTER_SIZE = 3;
// note, node name must contain the test name!
private static final String[] RAFT_MEMBERS = new String[]{"RaftTest-A", "RaftTest-B", "RaftTest-C", "RaftTest-D"};
@Override
protected void createCacheManagers() throws Throwable {
for (int i = 0; i < CLUSTER_SIZE; ++i) {
GlobalConfigurationBuilder builder = defaultGlobalConfigurationBuilder();
builder.transport().raftMembers(RAFT_MEMBERS);
builder.transport().nodeName(RAFT_MEMBERS[i]);
addClusterEnabledCacheManager(builder, null);
}
}
public void testRaft(Method method) throws ExecutionException, InterruptedException, TimeoutException {
List<RaftManager> raftManagerList = raftManagers();
for (RaftManager m : raftManagerList) {
AssertJUnit.assertTrue(m.isRaftAvailable());
}
List<RaftQueueStateMachine> stateMachines = registerStateMachine(raftManagerList, RaftQueueStateMachine::new, method.getName());
awaitForLeader(raftManagerList, method.getName());
List<Future<CompletionStage<ByteBuffer>>> futures = new ArrayList<>(CONCURRENT_THREADS);
CyclicBarrier barrier = new CyclicBarrier(CONCURRENT_THREADS);
for (int i = 0; i < CONCURRENT_THREADS; ++i) {
int idx = i % stateMachines.size();
byte b = (byte) i;
futures.add(fork(() -> {
barrier.await(10, TimeUnit.SECONDS);
return stateMachines.get(idx).raftChannel.send(ByteBufferImpl.create(b));
}));
}
for (Future<CompletionStage<ByteBuffer>> f : futures) {
CompletionStage<ByteBuffer> cf = f.get(10, TimeUnit.SECONDS);
ByteBuffer buffer = cf.toCompletableFuture().get(10, TimeUnit.SECONDS);
AssertJUnit.assertEquals(1, buffer.getLength());
AssertJUnit.assertEquals(0, buffer.getBuf()[0]);
}
List<Byte> expectedState = null;
// wait until all bytes are applied
for (int i = 0; i < stateMachines.size(); ++i) {
RaftQueueStateMachine m = stateMachines.get(i);
eventually(() -> m.state.size() == CONCURRENT_THREADS);
if (expectedState == null) {
expectedState = new ArrayList<>(m.state);
} else {
AssertJUnit.assertEquals("State is different for node " + i, expectedState, m.state);
}
}
}
public void testRaftStateTransfer(Method method) throws ExecutionException, InterruptedException, TimeoutException {
List<RaftManager> raftManagerList = raftManagers();
for (RaftManager m : raftManagerList) {
AssertJUnit.assertTrue(m.isRaftAvailable());
}
List<RaftQueueStateMachine> stateMachines = registerStateMachine(raftManagerList, RaftQueueStateMachine::new, method.getName());
awaitForLeader(raftManagerList, method.getName());
List<Future<CompletionStage<ByteBuffer>>> futures = new ArrayList<>(CONCURRENT_THREADS);
CyclicBarrier barrier = new CyclicBarrier(CONCURRENT_THREADS);
for (int i = 0; i < CONCURRENT_THREADS; ++i) {
int idx = i % stateMachines.size();
byte b = (byte) i;
futures.add(fork(() -> {
barrier.await(10, TimeUnit.SECONDS);
return stateMachines.get(idx).raftChannel.send(ByteBufferImpl.create(b));
}));
}
for (Future<CompletionStage<ByteBuffer>> f : futures) {
CompletionStage<ByteBuffer> cf = f.get(10, TimeUnit.SECONDS);
ByteBuffer buffer = cf.toCompletableFuture().get(10, TimeUnit.SECONDS);
AssertJUnit.assertEquals(1, buffer.getLength());
AssertJUnit.assertEquals(0, buffer.getBuf()[0]);
}
List<Byte> expectedState = null;
// wait until all bytes are applied
for (int i = 0; i < stateMachines.size(); ++i) {
RaftQueueStateMachine m = stateMachines.get(i);
eventually(() -> m.state.size() == CONCURRENT_THREADS);
if (expectedState == null) {
expectedState = new ArrayList<>(m.state);
} else {
AssertJUnit.assertEquals("State is different for node " + i, expectedState, m.state);
}
}
try {
GlobalConfigurationBuilder builder = defaultGlobalConfigurationBuilder();
builder.transport().raftMembers(RAFT_MEMBERS);
builder.transport().nodeName(RAFT_MEMBERS[3]);
EmbeddedCacheManager cm = addClusterEnabledCacheManager(builder, null);
RaftManager raftManager = raftManager(cm);
RaftQueueStateMachine sm = registerStateMachine(raftManager, RaftQueueStateMachine::new, method.getName());
awaitForLeader(raftManager, method.getName());
// eventually, receives all entries!
eventuallyEquals(CONCURRENT_THREADS, sm.state::size);
AssertJUnit.assertEquals("State is different for node 3", expectedState, sm.state);
} finally {
// kill the new member
if (cacheManagers.size() == 4) {
TestingUtil.killCacheManagers(cacheManagers.remove(3));
}
}
}
public void testNoDupes(Method method) throws ExecutionException, InterruptedException, TimeoutException {
List<RaftManager> raftManagerList = raftManagers();
for (RaftManager m : raftManagerList) {
AssertJUnit.assertTrue(m.isRaftAvailable());
}
List<RaftQueueStateMachine> stateMachines = registerStateMachine(raftManagerList, RaftQueueStateMachine::new, method.getName());
awaitForLeader(raftManagerList, method.getName());
List<Future<CompletionStage<ByteBuffer>>> futures = new ArrayList<>(CONCURRENT_THREADS);
CyclicBarrier barrier = new CyclicBarrier(CONCURRENT_THREADS);
for (int i = 0; i < CONCURRENT_THREADS; ++i) {
int idx = i % stateMachines.size();
byte b = (byte) i;
futures.add(fork(() -> {
barrier.await(10, TimeUnit.SECONDS);
return stateMachines.get(idx).raftChannel.send(ByteBufferImpl.create(b));
}));
}
for (Future<CompletionStage<ByteBuffer>> f : futures) {
CompletionStage<ByteBuffer> cf = f.get(10, TimeUnit.SECONDS);
ByteBuffer buffer = cf.toCompletableFuture().get(10, TimeUnit.SECONDS);
AssertJUnit.assertEquals(1, buffer.getLength());
AssertJUnit.assertEquals(0, buffer.getBuf()[0]);
}
List<Byte> expectedState = null;
// wait until all bytes are applied
for (int i = 0; i < stateMachines.size(); ++i) {
RaftQueueStateMachine m = stateMachines.get(i);
eventually(() -> m.state.size() == CONCURRENT_THREADS);
if (expectedState == null) {
expectedState = new ArrayList<>(m.state);
} else {
AssertJUnit.assertEquals("State is different for node " + i, expectedState, m.state);
}
}
try {
GlobalConfigurationBuilder builder = defaultGlobalConfigurationBuilder();
builder.transport().raftMembers(RAFT_MEMBERS);
// duplicated node name! the start should fail
builder.transport().nodeName(RAFT_MEMBERS[2]);
Exceptions.expectException(EmbeddedCacheManagerStartupException.class, CacheException.class, SecurityException.class, () -> addClusterEnabledCacheManager(builder, null));
} finally {
// kill the new member
if (cacheManagers.size() == 4) {
TestingUtil.killCacheManagers(cacheManagers.remove(3));
}
}
}
private List<RaftManager> raftManagers() {
return cacheManagers.stream()
.map(RaftTest::raftManager)
.collect(Collectors.toList());
}
private static RaftManager raftManager(EmbeddedCacheManager cacheManager) {
return TestingUtil.extractGlobalComponent(cacheManager, Transport.class).raftManager();
}
private static <T extends RaftStateMachine> List<T> registerStateMachine(List<? extends RaftManager> raftManagers, Supplier<? extends T> supplier, String name) {
return raftManagers.stream()
.map(m -> registerStateMachine(m, supplier, name))
.collect(Collectors.toList());
}
private static <T extends RaftStateMachine> T registerStateMachine(RaftManager manager, Supplier<T> supplier, String name) {
return manager.getOrRegisterStateMachine(name, supplier, DEFAULT_CONFIGURATION);
}
private static void awaitForLeader(List<? extends RaftManager> raftManagers, String name) {
for (RaftManager manager : raftManagers) {
awaitForLeader(manager, name);
}
}
private static void awaitForLeader(RaftManager manager, String name) {
eventually(() -> manager.hasLeader(name));
}
private static class RaftQueueStateMachine implements RaftStateMachine {
private volatile RaftChannel raftChannel;
final List<Byte> state = Collections.synchronizedList(new LinkedList<>());
@Override
public void init(RaftChannel raftChannel) {
this.raftChannel = raftChannel;
}
@Override
public ByteBuffer apply(ByteBuffer buffer) throws Exception {
AssertJUnit.assertEquals(1, buffer.getLength());
state.add(buffer.getBuf()[0]);
log.debugf("[%s | %s] apply: %d", raftChannel.channelName(), raftChannel.raftId(), state.size());
return ByteBufferImpl.create((byte) 0);
}
@Override
public void readStateFrom(DataInput dataInput) throws IOException {
int size = dataInput.readInt();
state.clear();
for (int i = 0; i < size; ++i) {
state.add(dataInput.readByte());
}
log.debugf("[%s | %s] received state: %d", raftChannel.channelName(), raftChannel.raftId(), state.size());
}
@Override
public void writeStateTo(DataOutput dataOutput) throws IOException {
List<Byte> copy = new ArrayList<>(state);
dataOutput.writeInt(copy.size());
for (byte b : copy) {
dataOutput.writeByte(b);
}
log.debugf("[%s | %s] sent state: %d", raftChannel.channelName(), raftChannel.raftId(), copy.size());
}
}
}
| 11,983
| 40.611111
| 179
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/AsynchronousInvocationTest.java
|
package org.infinispan.remoting;
import static org.infinispan.test.TestingUtil.extractGlobalComponent;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createClusteredCacheManager;
import static org.infinispan.test.fwk.TestCacheManagerFactory.getDefaultCacheConfiguration;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.List;
import java.util.concurrent.AbstractExecutorService;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.GlobalRpcCommand;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.module.TestGlobalConfigurationBuilder;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.remote.SingleRpcCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.InboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestException;
import org.infinispan.util.ByteString;
import org.infinispan.util.concurrent.BlockingTaskAwareExecutorService;
import org.infinispan.util.concurrent.BlockingTaskAwareExecutorServiceImpl;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Tests the Asynchronous Invocation API and checks if the commands are correctly processed (or JGroups or Infinispan
* thread pool)
*
* @author Pedro Ruivo
* @since 5.3
*/
@Test(groups = "functional", testName = "remoting.AsynchronousInvocationTest")
public class AsynchronousInvocationTest extends AbstractInfinispanTest {
public static final String CACHE_NAME = "testCache";
public static final ByteString CACHE_NAME_BYTES = ByteString.fromString(CACHE_NAME);
private EmbeddedCacheManager cacheManager;
private DummyTaskCountExecutorService nonBlockingExecutorService;
private DummyTaskCountExecutorService blockingExecutorService;
private InboundInvocationHandler invocationHandler;
private Address address;
private static CacheRpcCommand mockCacheRpcCommand(boolean blocking) throws Throwable {
CacheRpcCommand mock = mock(CacheRpcCommand.class);
when(mock.canBlock()).thenReturn(blocking);
when(mock.getCacheName()).thenReturn(CACHE_NAME_BYTES);
when(mock.invokeAsync(any())).thenReturn(CompletableFutures.completedNull());
return mock;
}
private static GlobalRpcCommand mockGlobalRpcCommand(boolean blocking) throws Throwable {
GlobalRpcCommand mock = mock(GlobalRpcCommand.class);
when(mock.canBlock()).thenReturn(blocking);
when(mock.invokeAsync(any())).thenReturn(CompletableFutures.completedNull());
return mock;
}
private static ReplicableCommand mockReplicableCommand(boolean blocking) throws Throwable {
ReplicableCommand mock = mock(ReplicableCommand.class);
when(mock.canBlock()).thenReturn(blocking);
when(mock.invokeAsync()).thenReturn(CompletableFutures.completedNull());
return mock;
}
private static SingleRpcCommand mockSingleRpcCommand(boolean blocking) {
VisitableCommand mock = mock(VisitableCommand.class);
when(mock.canBlock()).thenReturn(blocking);
return new SingleRpcCommand(CACHE_NAME_BYTES, mock);
}
@BeforeClass
public void setUp() throws Throwable {
// We need to use an actual thread pool - due to a circular dependency in ClusterTopologyManagerImpl invoking
// a command via the non blocking executor that loads up the LocalTopologyManagerImpl that Injects the ClusterTopologyManagerImpl
ExecutorService realExecutor = Executors.newSingleThreadExecutor();
nonBlockingExecutorService = new DummyTaskCountExecutorService(realExecutor);
blockingExecutorService = new DummyTaskCountExecutorService(realExecutor);
BlockingTaskAwareExecutorService nonBlockingExecutor =
new BlockingTaskAwareExecutorServiceImpl(nonBlockingExecutorService,
TIME_SERVICE);
BlockingTaskAwareExecutorService blockingExecutor =
new BlockingTaskAwareExecutorServiceImpl(blockingExecutorService,
TIME_SERVICE);
GlobalConfigurationBuilder globalBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder();
globalBuilder.defaultCacheName(CACHE_NAME);
globalBuilder.addModule(TestGlobalConfigurationBuilder.class)
.testGlobalComponent(KnownComponentNames.NON_BLOCKING_EXECUTOR, nonBlockingExecutor)
.testGlobalComponent(KnownComponentNames.BLOCKING_EXECUTOR, blockingExecutor);
ConfigurationBuilder builder = getDefaultCacheConfiguration(false);
builder.clustering().cacheMode(CacheMode.DIST_SYNC);
cacheManager = createClusteredCacheManager(globalBuilder, builder);
Transport transport = extractGlobalComponent(cacheManager, Transport.class);
address = transport.getAddress();
invocationHandler = extractGlobalComponent(cacheManager, InboundInvocationHandler.class);
// Start the cache
cacheManager.getCache();
}
@AfterClass
public void tearDown() {
if (cacheManager != null) {
// BlockingTaskAwareExecutorServiceImpl doesn't have a @Stop annotation so we need to stop it manually
extractGlobalComponent(cacheManager, ExecutorService.class, KnownComponentNames.NON_BLOCKING_EXECUTOR).shutdownNow();
extractGlobalComponent(cacheManager, ExecutorService.class, KnownComponentNames.BLOCKING_EXECUTOR).shutdownNow();
cacheManager.stop();
}
}
public void testCacheRpcCommands() throws Throwable {
CacheRpcCommand blockingCacheRpcCommand = mockCacheRpcCommand(true);
assertDispatchForCommand(blockingCacheRpcCommand, true);
CacheRpcCommand nonBlockingCacheRpcCommand = mockCacheRpcCommand(false);
assertDispatchForCommand(nonBlockingCacheRpcCommand, false);
}
public void testGlobalRpcCommands() throws Throwable {
GlobalRpcCommand blockingGlobalRpcCommand = mockGlobalRpcCommand(true);
assertDispatchForCommand(blockingGlobalRpcCommand, true);
GlobalRpcCommand nonBlockingGlobalRpcCommand = mockGlobalRpcCommand(false);
assertDispatchForCommand(nonBlockingGlobalRpcCommand, false);
}
public void testReplicableCommands() throws Throwable {
ReplicableCommand blockingReplicableCommand = mockReplicableCommand(true);
assertDispatchForCommand(blockingReplicableCommand, true);
ReplicableCommand nonBlockingReplicableCommand = mockReplicableCommand(false);
assertDispatchForCommand(nonBlockingReplicableCommand, false);
}
public void testSingleRpcCommand() throws Throwable {
SingleRpcCommand blockingSingleRpcCommand = mockSingleRpcCommand(true);
assertDispatchForCommand(blockingSingleRpcCommand, true);
SingleRpcCommand nonBlockingSingleRpcCommand = mockSingleRpcCommand(false);
assertDispatchForCommand(nonBlockingSingleRpcCommand, false);
}
private void assertDispatchForCommand(ReplicableCommand command, boolean isBlocking) throws Exception {
Assert.assertEquals(isBlocking, command.canBlock());
log.debugf("Testing " + command.getClass().getCanonicalName());
DummyTaskCountExecutorService executorToUse = isBlocking ? blockingExecutorService : nonBlockingExecutorService;
executorToUse.reset();
CompletableFutureResponse response = new CompletableFutureResponse();
invocationHandler.handleFromCluster(address, command, response, DeliverOrder.NONE);
response.await(30, TimeUnit.SECONDS);
Assert.assertEquals(executorToUse.hasExecutedCommand, isBlocking,
"Command " + command.getClass() + " dispatched wrongly.");
executorToUse.reset();
response = new CompletableFutureResponse();
invocationHandler.handleFromCluster(address, command, response, DeliverOrder.PER_SENDER);
response.await(30, TimeUnit.SECONDS);
Assert.assertFalse(executorToUse.hasExecutedCommand, "Command " + command.getClass() + " dispatched wrongly.");
}
private class DummyTaskCountExecutorService extends AbstractExecutorService {
private final ExecutorService realExecutor;
private volatile boolean hasExecutedCommand;
private DummyTaskCountExecutorService(ExecutorService realExecutor) {
this.realExecutor = realExecutor;
}
@Override
public void execute(Runnable command) {
hasExecutedCommand = true;
realExecutor.execute(command);
}
public void reset() {
hasExecutedCommand = false;
}
@Override
public void shutdown() {
realExecutor.shutdown();
}
@Override
public List<Runnable> shutdownNow() {
return realExecutor.shutdownNow();
}
@Override
public boolean isShutdown() {
return realExecutor.isShutdown();
}
@Override
public boolean isTerminated() {
return realExecutor.isTerminated();
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
return realExecutor.awaitTermination(timeout, unit);
}
}
private static class CompletableFutureResponse implements Reply {
private final CompletableFuture<Response> responseFuture = new CompletableFuture<>();
public void await(long time, TimeUnit unit) throws Exception {
Response response = responseFuture.get(time, unit);
if (response instanceof ExceptionResponse) {
throw new TestException(((ExceptionResponse) response).getException());
}
}
@Override
public void reply(Response response) {
responseFuture.complete(response);
}
}
}
| 10,711
| 43.082305
| 135
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/TransportSenderExceptionHandlingTest.java
|
package org.infinispan.remoting;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import java.lang.reflect.InvocationTargetException;
import java.util.EmptyStackException;
import org.infinispan.Cache;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.marshall.MarshallingException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.locking.NonTransactionalLockingInterceptor;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated;
import org.infinispan.notifications.cachelistener.event.CacheEntryEvent;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.data.BrokenMarshallingPojo;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "remoting.TransportSenderExceptionHandlingTest")
public class TransportSenderExceptionHandlingTest extends MultipleCacheManagersTest {
private final String key = "k-illyria", value = "v-illyria", value2 = "v2-illyria";
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(2, "replSync", FailureTypeSCI.INSTANCE,
getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false));
}
public void testInvokeAndExceptionWhileUnmarshalling() throws Exception {
Cache cache1 = cache(0, "replSync");
Exceptions.expectException(RemoteException.class, MarshallingException.class, () -> cache1.put(key, new BrokenMarshallingPojo(false)));
}
@Test(expectedExceptions = ArrayStoreException.class)
public void testThrowExceptionFromRemoteListener() throws Throwable {
induceListenerMalfunctioning(false, FailureType.EXCEPTION_FROM_LISTENER);
}
@Test(expectedExceptions = NoClassDefFoundError.class)
public void testThrowErrorFromRemoteListener() throws Throwable {
induceListenerMalfunctioning(true, FailureType.ERROR_FROM_LISTENER);
}
@Test(expectedExceptions = EmptyStackException.class)
public void testThrowExceptionFromRemoteInterceptor() throws Throwable {
induceInterceptorMalfunctioning(FailureType.EXCEPTION_FROM_INTERCEPTOR);
}
@Test(expectedExceptions = ClassCircularityError.class)
public void testThrowErrorFromRemoteInterceptor() throws Throwable {
induceInterceptorMalfunctioning(FailureType.ERROR_FROM_INTERCEPTOR);
}
private void induceInterceptorMalfunctioning(FailureType failureType) throws Throwable {
Cache cache1 = cache(0, "replSync");
Cache cache2 = cache(1, "replSync");
extractInterceptorChain(cache2).addInterceptorAfter(
new ErrorInducingInterceptor(), NonTransactionalLockingInterceptor.class);
log.info("Before put.");
try {
cache1.put(failureType, 1);
} catch (CacheException e) {
Throwable cause = e.getCause();
if (cause.getCause() == null)
throw cause;
else
throw cause.getCause();
} finally {
extractInterceptorChain(cache2).removeInterceptor(ErrorInducingInterceptor.class);
}
}
private void induceListenerMalfunctioning(boolean throwError, FailureType failureType) throws Throwable {
Cache cache1 = cache(0, "replSync");
Cache cache2 = cache(1, "replSync");
ErrorInducingListener listener = new ErrorInducingListener(throwError);
cache2.addListener(listener);
try {
cache1.put(failureType, 1);
} catch (RemoteException e) {
Throwable cause = e.getCause(); // get the exception behind the remote one
if (throwError && cause.getCause() instanceof InvocationTargetException)
throw cause.getCause().getCause();
else
throw cause.getCause();
} finally {
cache2.removeListener(listener);
}
}
@Listener
public static class ErrorInducingListener {
final boolean throwError;
public ErrorInducingListener(boolean throwError) {
this.throwError = throwError;
}
@CacheEntryCreated
public void entryCreated(CacheEntryEvent event) throws Exception {
if (event.isPre() && shouldFail(event)) {
if (throwError)
throw new NoClassDefFoundError("Simulated error...");
else
throw new ArrayStoreException("A failure...");
}
}
private boolean shouldFail(CacheEntryEvent event) {
Object key = event.getKey();
return key == FailureType.EXCEPTION_FROM_LISTENER
|| key == FailureType.ERROR_FROM_LISTENER;
}
}
static class ErrorInducingInterceptor extends DDAsyncInterceptor {
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
Object k = command.getKey();
if (k == FailureType.EXCEPTION_FROM_INTERCEPTOR)
throw new EmptyStackException();
else if (k == FailureType.ERROR_FROM_INTERCEPTOR)
throw new ClassCircularityError();
else
return super.visitPutKeyValueCommand(ctx, command);
}
}
@AutoProtoSchemaBuilder(
includeClasses = {
BrokenMarshallingPojo.class,
FailureType.class
},
schemaFileName = "test.core.TransportSenderExceptionHandlingTest.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.TransportSenderExceptionHandlingTest",
service = false
)
interface FailureTypeSCI extends SerializationContextInitializer {
FailureTypeSCI INSTANCE = new FailureTypeSCIImpl();
}
}
| 6,049
| 39.066225
| 141
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/MessageSentToLeaverTest.java
|
package org.infinispan.remoting;
import java.util.Collection;
import java.util.Map;
import org.infinispan.Cache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Test that CommandAwareRpcManager detects members who left the cluster and throws an exception.
*
* @author Dan Berindei <dan@infinispan.org>
*/
@Test (testName = "remoting.MessageSentToLeaverTest", groups = "functional")
public class MessageSentToLeaverTest extends AbstractInfinispanTest {
public void testGroupRequestSentToMemberAfterLeaving() {
EmbeddedCacheManager cm1 = null, cm2 = null, cm3 = null;
try {
ConfigurationBuilder c = new ConfigurationBuilder();
c
.clustering().cacheMode(CacheMode.REPL_SYNC)
.hash().numOwners(3);
cm1 = TestCacheManagerFactory.createClusteredCacheManager(c);
cm2 = TestCacheManagerFactory.createClusteredCacheManager(c);
cm3 = TestCacheManagerFactory.createClusteredCacheManager(c);
Cache<Object,Object> c1 = cm1.getCache();
Cache<Object, Object> c2 = cm2.getCache();
Cache<Object, Object> c3 = cm3.getCache();
TestingUtil.blockUntilViewsReceived(30000, c1, c2, c3);
c2.put("k", "v1");
RpcManager rpcManager = TestingUtil.extractComponent(c1, RpcManager.class);
Collection<Address> addresses = cm1.getMembers();
CommandsFactory cf = TestingUtil.extractCommandsFactory(c1);
PutKeyValueCommand cmd = cf.buildPutKeyValueCommand("k", "v2", 0,
new EmbeddedMetadata.Builder().build(), EnumUtil.EMPTY_BIT_SET);
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
cmd.setTopologyId(rpcManager.getTopologyId());
Map<Address,Response> responseMap = rpcManager.blocking(rpcManager.invokeCommand(addresses, cmd, MapResponseCollector.validOnly(), rpcOptions));
assert responseMap.size() == 2;
TestingUtil.killCacheManagers(cm2);
TestingUtil.blockUntilViewsReceived(30000, false, c1, c3);
try {
rpcManager.blocking(rpcManager.invokeCommand(addresses, cmd, MapResponseCollector.validOnly(), rpcOptions));
assert false: "invokeRemotely should have thrown an exception";
} catch (SuspectException e) {
// expected
}
} finally {
TestingUtil.killCacheManagers(cm1, cm2, cm3);
}
}
}
| 3,252
| 39.6625
| 153
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/FailureType.java
|
package org.infinispan.remoting;
import org.infinispan.protostream.annotations.ProtoEnumValue;
public enum FailureType {
@ProtoEnumValue(number = 0)
EXCEPTION_FROM_LISTENER,
@ProtoEnumValue(number = 1)
ERROR_FROM_LISTENER,
@ProtoEnumValue(number = 2)
EXCEPTION_FROM_INTERCEPTOR,
@ProtoEnumValue(number = 3)
ERROR_FROM_INTERCEPTOR
}
| 361
| 19.111111
| 61
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/NonExistentCacheTest.java
|
package org.infinispan.remoting;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
@Test (testName = "remoting.NonExistentCacheTest", groups = "functional")
public class NonExistentCacheTest extends AbstractInfinispanTest {
private EmbeddedCacheManager createCacheManager() {
ConfigurationBuilder c = TestCacheManagerFactory.getDefaultCacheConfiguration(false);
c.clustering().cacheMode(CacheMode.REPL_SYNC)
.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
GlobalConfigurationBuilder gc = GlobalConfigurationBuilder.defaultClusteredBuilder();
return TestCacheManagerFactory.createClusteredCacheManager(gc, c);
}
public void testPutWithNonExistentCache() {
EmbeddedCacheManager cm1 = null, cm2 = null;
try {
cm1 = createCacheManager();
cm2 = createCacheManager();
cm1.getCache();
cm2.getCache();
cm1.getCache().put("k", "v");
assertEquals("v", cm1.getCache().get("k"));
assertEquals("v", cm2.getCache().get("k"));
cm1.defineConfiguration("newCache", cm1.getDefaultCacheConfiguration());
cm1.getCache("newCache").put("k", "v");
assertEquals("v", cm1.getCache("newCache").get("k"));
} finally {
TestingUtil.killCacheManagers(cm1, cm2);
}
}
}
| 1,815
| 34.607843
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/jgroups/NonExistingJGroupsConfigTest.java
|
package org.infinispan.remoting.jgroups;
import java.io.ByteArrayInputStream;
import java.io.FileNotFoundException;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ParserRegistry;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.manager.EmbeddedCacheManagerStartupException;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(testName = "remoting.jgroups.NonExistingJGroupsConfigTest", groups = "functional")
public class NonExistingJGroupsConfigTest extends AbstractInfinispanTest {
public void channelLookupTest() {
String config = "<infinispan>\n" +
"<jgroups>\n" +
" <stack-file name=\"dummy\" path=\"nosuchfile.xml\"/>\n" +
"</jgroups>\n" +
"<cache-container default-cache=\"default\">" +
" <transport stack=\"dummy\" cluster=\"demoCluster\" />\n" +
" <replicated-cache name=\"default\" />\n" +
"</cache-container>\n" +
"</infinispan>";
Exceptions.expectException(CacheConfigurationException.class,
"ISPN000365:.*", () -> {
EmbeddedCacheManager cm = null;
try {
ConfigurationBuilderHolder cbh = new ParserRegistry().parse(
new ByteArrayInputStream(config.getBytes()), Void -> {
throw new FileNotFoundException();
}, MediaType.APPLICATION_XML);
cm = new DefaultCacheManager(cbh, true);
} finally {
TestingUtil.killCacheManagers(cm);
}
});
}
public void brokenJGroupsConfigTest() {
String config = "<infinispan>\n" +
"<jgroups>\n" +
" <stack-file name=\"dummy\" path=\"stacks/broken-tcp.xml\"/>\n" +
"</jgroups>\n" +
"<cache-container default-cache=\"default\">" +
" <transport stack=\"dummy\" cluster=\"demoCluster\" />\n" +
" <replicated-cache name=\"default\" />\n" +
"</cache-container>\n" +
"</infinispan>";
Exceptions.expectException(EmbeddedCacheManagerStartupException.class,
CacheConfigurationException.class,
"ISPN000541:.*", () -> {
EmbeddedCacheManager cm = null;
try {
cm = new DefaultCacheManager(new ByteArrayInputStream(config.getBytes()));
} finally {
TestingUtil.killCacheManagers(cm);
}
});
}
}
| 2,911
| 41.202899
| 92
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/jgroups/ChannelLookupTest.java
|
package org.infinispan.remoting.jgroups;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.isA;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertSame;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collections;
import java.util.Properties;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.jgroups.JGroupsChannelLookup;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.jgroups.Address;
import org.jgroups.Event;
import org.jgroups.JChannel;
import org.jgroups.View;
import org.jgroups.protocols.UDP;
import org.jgroups.stack.ProtocolStack;
import org.jgroups.util.UUID;
import org.testng.annotations.Test;
@Test(testName = "remoting.jgroups.ChannelLookupTest", groups = "functional")
public class ChannelLookupTest extends AbstractInfinispanTest {
static JChannel mockChannel = mock(JChannel.class);
private ProtocolStack ps = mock(ProtocolStack.class);
private Address a = new UUID(1, 1);
private View v = new View(a, 1, Collections.singletonList(a));
public void channelLookupTest() {
when(mockChannel.getAddress()).thenReturn(a);
when(mockChannel.down(isA(Event.class))).thenReturn(a);
when(mockChannel.getView()).thenReturn(v);
when(mockChannel.getProtocolStack()).thenReturn(ps);
UDP mockUDPTransport = mock(UDP.class);
when(mockUDPTransport.registerProbeHandler(any())).thenReturn(mockUDPTransport);
when(ps.getTransport()).thenReturn(mockUDPTransport);
EmbeddedCacheManager cm = null;
try {
GlobalConfigurationBuilder gc = GlobalConfigurationBuilder.defaultClusteredBuilder();
gc.transport().defaultTransport().addProperty("channelLookup", DummyLookup.class.getName());
cm = TestCacheManagerFactory.createClusteredCacheManager(gc, new ConfigurationBuilder());
cm.start();
cm.getCache();
GlobalComponentRegistry gcr = TestingUtil.extractGlobalComponentRegistry(cm);
Transport t = gcr.getComponent(Transport.class);
assertNotNull(t);
assertTrue(t instanceof JGroupsTransport);
assertSame(mockChannel, ((JGroupsTransport) t).getChannel());
} finally {
TestingUtil.killCacheManagers(cm);
}
}
public static class DummyLookup implements JGroupsChannelLookup {
public DummyLookup() {
}
@Override
public JChannel getJGroupsChannel(Properties p) {
return mockChannel;
}
@Override
public boolean shouldConnect() {
return false;
}
@Override
public boolean shouldDisconnect() {
return false;
}
@Override
public boolean shouldClose() {
return false;
}
}
}
| 3,323
| 33.989474
| 101
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/jgroups/MissingUpHandlerTest.java
|
package org.infinispan.remoting.jgroups;
import static org.testng.AssertJUnit.assertEquals;
import java.io.ByteArrayInputStream;
import java.util.Properties;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.jgroups.JChannel;
import org.testng.annotations.Test;
/**
* When the JGroups channel is started externally and injected via {@code ChannelLookup},
* there is a small window where incoming messages will be silently discarded:
* between the time the channel is started externally and the time JGroupsTransport attaches the UpHandler.
*
* In replication mode a put operation would wait for a success response from all the members
* of the cluster, and if the RPC was initiated during this time window it would never get a response.
*
* This test checks that the caller doesn't get a (@code TimeoutException} waiting for a response.
*
* @author Dan Berindei <dan@infinispan.org>
* @since 5.1
*/
@Test(groups = "functional", testName = "remoting.jgroups.MissingUpHandlerTest")
@CleanupAfterMethod
public class MissingUpHandlerTest extends MultipleCacheManagersTest {
protected String cacheName = "replSync";
protected CacheMode cacheMode = CacheMode.REPL_SYNC;
@Override
protected void createCacheManagers() throws Exception {
ConfigurationBuilder c = getDefaultClusteredCacheConfig(cacheMode, false);
c.clustering().stateTransfer().fetchInMemoryState(true);
createClusteredCaches(1, cacheName, c);
}
public void testExtraChannelWithoutRpcDispatcher() throws Exception {
// start with a single cache
Cache<String, String> cache1 = cache(0, cacheName);
cache1.put("k1", "v1");
assertEquals("v1", cache1.get("k1"));
// create a new jgroups channel that will join the cluster
// but without attaching the Infinispan UpHandler
try (JChannel channel2 = createJGroupsChannel(manager(0).getCacheManagerConfiguration())) {
// try the put operation again
cache1.put("k2", "v2");
assertEquals("v2", cache1.get("k2"));
// create a new cache, make sure it joins properly
ConfigurationBuilder c = getDefaultClusteredCacheConfig(cacheMode, false);
c.clustering().stateTransfer().fetchInMemoryState(true);
EmbeddedCacheManager cm = addClusterEnabledCacheManager(new TransportFlags());
cm.defineConfiguration(cacheName, c.build());
Cache<String, String> cache2 = cm.getCache(cacheName);
assertEquals(2, cache2.getAdvancedCache().getRpcManager().getTransport().getMembers().size());
assertEquals("v1", cache1.get("k1"));
assertEquals("v2", cache1.get("k2"));
cache1.put("k1", "v1_2");
cache2.put("k2", "v2_2");
assertEquals("v1_2", cache1.get("k1"));
assertEquals("v2_2", cache1.get("k2"));
}
}
private JChannel createJGroupsChannel(GlobalConfiguration oldGC) {
GlobalConfigurationBuilder builder = new GlobalConfigurationBuilder().read(oldGC);
TestCacheManagerFactory.amendTransport(builder);
GlobalConfiguration gc = builder.build();
Properties p = gc.transport().properties();
String jgroupsCfg = p.getProperty(JGroupsTransport.CONFIGURATION_STRING);
try {
JChannel channel = new JChannel(new ByteArrayInputStream(jgroupsCfg.getBytes()));
channel.setName(gc.transport().nodeName());
channel.connect(gc.transport().clusterName());
return channel;
} catch (Exception e) {
throw new CacheException(e);
}
}
}
| 4,167
| 42.873684
| 107
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/inboundhandler/BlockingInboundInvocationHandler.java
|
package org.infinispan.remoting.inboundhandler;
import static org.infinispan.test.TestingUtil.wrapGlobalComponent;
import java.util.function.Predicate;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.NotifierLatch;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.xsite.XSiteReplicateCommand;
@Scope(Scopes.GLOBAL)
public class BlockingInboundInvocationHandler implements InboundInvocationHandler {
private final Address address;
private final NotifierLatch latch;
private final InboundInvocationHandler delegate;
@Inject BlockingManager blockingManager;
private volatile Predicate<ReplicableCommand> predicate;
public static BlockingInboundInvocationHandler replace(EmbeddedCacheManager manager) {
return wrapGlobalComponent(manager, InboundInvocationHandler.class,
iih -> new BlockingInboundInvocationHandler(iih, manager.getAddress()), true);
}
public BlockingInboundInvocationHandler(InboundInvocationHandler delegate, Address address) {
this.delegate = delegate;
this.address = address;
latch = new NotifierLatch(toString());
}
@Override
public void handleFromCluster(Address origin, ReplicableCommand command,
Reply reply, DeliverOrder order) {
Predicate<ReplicableCommand> predicate = this.predicate;
if (predicate != null && predicate.test(command)) {
blockingManager.runBlocking(() -> {
latch.blockIfNeeded();
delegate.handleFromCluster(origin, command, reply, order);
}, "blocking-inbound-handler");
return;
}
delegate.handleFromCluster(origin, command, reply, order);
}
@Override
public void handleFromRemoteSite(String origin, XSiteReplicateCommand<?> command,
Reply reply, DeliverOrder order) {
delegate.handleFromRemoteSite(origin, command, reply, order);
}
public NotifierLatch latch() {
return latch;
}
public <T extends ReplicableCommand> void blockBefore(Class<T> commandClass, Predicate<T> predicate) {
this.predicate = c -> commandClass.isInstance(c) && predicate.test(commandClass.cast(c));
latch.startBlocking();
}
public void blockBefore(Class<? extends ReplicableCommand> commandClass) {
this.predicate = commandClass::isInstance;
latch.startBlocking();
}
public void stopBlocking() {
latch.stopBlocking();
}
@Override
public String toString() {
return "BlockingInboundInvocationHandler@" + address;
}
}
| 2,872
| 35.367089
| 111
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/rpc/CustomReplicableCommand.java
|
package org.infinispan.remoting.rpc;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.Visitor;
import org.infinispan.context.InvocationContext;
/**
* @author anistor@redhat.com
* @since 5.3
*/
public class CustomReplicableCommand implements VisitableCommand, Serializable {
public static final byte COMMAND_ID = 127;
private static final long serialVersionUID = -1L;
private Object arg;
public CustomReplicableCommand() {
// For command id uniqueness test
}
public CustomReplicableCommand(Object arg) {
this.arg = arg;
}
@Override
public Object invoke() throws Throwable {
if (arg instanceof Throwable) {
throw (Throwable) arg;
}
// echo the arg back to the caller
return arg;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeObject(arg);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
arg = input.readObject();
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public Object acceptVisitor(InvocationContext ctx, Visitor visitor) throws Throwable {
return visitor.visitUnknownCommand(ctx, this);
}
@Override
public LoadType loadType() {
throw new UnsupportedOperationException();
}
}
| 1,589
| 20.780822
| 89
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/rpc/RpcManagerCustomCacheRpcCommandTest.java
|
package org.infinispan.remoting.rpc;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.util.ByteString;
import org.testng.annotations.Test;
/**
* @author anistor@redhat.com
* @since 5.3
*/
@Test(testName = "remoting.rpc.RpcManagerCustomCacheRpcCommandTest", groups = "functional")
public class RpcManagerCustomCacheRpcCommandTest extends RpcManagerCustomReplicableCommandTest {
@Override
protected ReplicableCommand createReplicableCommandForTest(Object arg) {
return new CustomCacheRpcCommand(ByteString.fromString(TEST_CACHE), arg);
}
}
| 582
| 29.684211
| 96
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/rpc/RpcManagerCustomReplicableCommandTest.java
|
package org.infinispan.remoting.rpc;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.Map;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
/**
* @author William Burns
* @author anistor@redhat.com
* @since 5.3
*/
@Test(testName = "remoting.rpc.RpcManagerCustomReplicableCommandTest", groups = "functional")
public class RpcManagerCustomReplicableCommandTest extends MultipleCacheManagersTest {
protected static final String TEST_CACHE = "testCache";
protected static final String EXPECTED_RETURN_VALUE = "the-return-value";
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
createClusteredCaches(2, TEST_CACHE, builder);
}
protected ReplicableCommand createReplicableCommandForTest(Object arg) {
return new CustomReplicableCommand(arg);
}
/**
* Test to make sure that invokeRemotely returns the result from the remote side.
*/
public void testInvokeRemotely() {
RpcManager rpcManager = cache(0, "testCache").getAdvancedCache().getRpcManager();
ReplicableCommand command = createReplicableCommandForTest(EXPECTED_RETURN_VALUE);
Map<Address, Response> remoteResponses = invoke(rpcManager, command);
log.tracef("Responses were: %s", remoteResponses);
assertEquals(1, remoteResponses.size());
Response response = remoteResponses.values().iterator().next();
assertNotNull(response);
assertTrue(response.isValid());
assertTrue(response.isSuccessful());
assertTrue(response instanceof SuccessfulResponse);
Object value = ((SuccessfulResponse) response).getResponseValue();
assertEquals(EXPECTED_RETURN_VALUE, value);
}
/**
* Test to make sure that invokeRemotely results in a RemoteException.
*/
public void testInvokeRemotelyWithRemoteException() {
RpcManager rpcManager = cache(0, "testCache").getAdvancedCache().getRpcManager();
ReplicableCommand command = createReplicableCommandForTest(new IllegalArgumentException("exception!"));
try {
invoke(rpcManager, command);
fail("Expected RemoteException not thrown");
} catch (RemoteException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
assertEquals("exception!", e.getCause().getMessage());
} catch (Exception ex) {
fail("Expected exception not thrown but instead we got : " + ex);
}
}
/**
* Test to make sure that invokeRemotely with a ResponseMode argument returns the result from the remote side.
*/
public void testInvokeRemotelyWithResponseMode() {
RpcManager rpcManager = cache(0, "testCache").getAdvancedCache().getRpcManager();
ReplicableCommand command = createReplicableCommandForTest(EXPECTED_RETURN_VALUE);
Map<Address, Response> remoteResponses = invoke(rpcManager, command);
log.tracef("Responses were: %s", remoteResponses);
assertEquals(1, remoteResponses.size());
Response response = remoteResponses.values().iterator().next();
assertNotNull(response);
assertTrue(response.isValid());
assertTrue(response.isSuccessful());
assertTrue(response instanceof SuccessfulResponse);
Object value = ((SuccessfulResponse) response).getResponseValue();
assertEquals(EXPECTED_RETURN_VALUE, value);
}
/**
* Test to make sure that invokeRemotely with a ResponseMode argument returns the result from the remote side.
*/
public void testInvokeRemotelyWithResponseModeWithRemoteException() {
RpcManager rpcManager = cache(0, "testCache").getAdvancedCache().getRpcManager();
ReplicableCommand command = createReplicableCommandForTest(new IllegalArgumentException("exception!"));
try {
invoke(rpcManager, command);
fail("Expected RemoteException not thrown");
} catch (RemoteException e) {
assertTrue(e.getCause() instanceof IllegalArgumentException);
assertEquals("exception!", e.getCause().getMessage());
} catch (Exception ex) {
fail("Expected exception not thrown but instead we got : " + ex);
}
}
private Map<Address, Response> invoke(RpcManager rpcManager, ReplicableCommand command) {
return rpcManager.blocking(
rpcManager.invokeCommandOnAll(command, MapResponseCollector.ignoreLeavers(), rpcManager.getSyncRpcOptions())
);
}
}
| 5,118
| 40.282258
| 120
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.