repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/ReplicatedStreamIteratorEvictionTest.java
|
package org.infinispan.stream;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* Eviction test for stream when using a replicated cache to verify it works properly
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.ReplicatedStreamIteratorEvictionTest")
public class ReplicatedStreamIteratorEvictionTest extends BaseStreamIteratorEvictionTest {
public ReplicatedStreamIteratorEvictionTest() {
super(false, CacheMode.REPL_SYNC);
}
}
| 528
| 28.388889
| 90
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/BaseClusteredStreamIteratorTest.java
|
package org.infinispan.stream;
import static org.testng.Assert.assertEquals;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.distribution.MagicKey;
import org.testng.annotations.Test;
/**
* Base class for stream iterator tests that are ran on clustered caches
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.BaseClusteredStreamIteratorTest")
public abstract class BaseClusteredStreamIteratorTest extends BaseStreamIteratorTest {
public BaseClusteredStreamIteratorTest(boolean tx, CacheMode mode) {
super(tx, mode);
}
protected Map<Object, String> putValuesInCache() {
return putValueInEachCache(3);
}
protected Map<Object, String> putValueInEachCache(int cacheNumber) {
// This is linked to keep insertion order
Map<Object, String> valuesInserted = new LinkedHashMap<Object, String>();
for (int i = 0; i < cacheNumber; ++i) {
Cache<Object, String> cache = cache(i, CACHE_NAME);
Object key = getKeyTiedToCache(cache);
cache.put(key, key.toString());
valuesInserted.put(key, key.toString());
}
return valuesInserted;
}
@Test
public void simpleTestIteratorFromOtherNode() {
Map<Object, String> values = putValuesInCache();
Cache<MagicKey, String> cache = cache(1, CACHE_NAME);
Iterator<CacheEntry<MagicKey, String>> iterator = cache.getAdvancedCache().cacheEntrySet().stream().iterator();
Map<MagicKey, String> results = mapFromIterator(iterator);
assertEquals(values, results);
}
@Test
public void simpleTestRemoteFilter() {
Map<Object, String> values = putValuesInCache();
Iterator<Map.Entry<Object, String>> iter = values.entrySet().iterator();
Object excludedKey = iter.next().getKey();
// Remove it so comparison below will be correct
iter.remove();
Cache<MagicKey, String> cache = cache(1, CACHE_NAME);
Iterator<CacheEntry<MagicKey, String>> iterator = cache.getAdvancedCache().cacheEntrySet().stream()
.filter(entry -> !Objects.equals(excludedKey, entry.getKey()))
.iterator();
Map<MagicKey, String> results = mapFromIterator(iterator);
assertEquals(results, values);
}
}
| 2,467
| 32.808219
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/ReplicatedStreamIteratorTest.java
|
package org.infinispan.stream;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.distribution.MagicKey;
import org.testng.annotations.Test;
/**
* Test to verify stream behavior for a replicated cache
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.ReplicatedStreamIteratorTest")
public class ReplicatedStreamIteratorTest extends BaseClusteredStreamIteratorTest {
public ReplicatedStreamIteratorTest() {
super(false, CacheMode.REPL_SYNC);
}
@Override
protected Object getKeyTiedToCache(Cache<?, ?> cache) {
return new MagicKey(cache);
}
}
| 663
| 24.538462
| 83
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamTest.java
|
package org.infinispan.stream;
import org.infinispan.CacheCollection;
import org.infinispan.CacheStream;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* Verifies stream tests work on a regular distrbuted stream
*/
@Test(groups = "functional", testName = "streams.DistributedStreamTest")
public class DistributedStreamTest extends BaseStreamTest {
public DistributedStreamTest() {
super(false);
cacheMode(CacheMode.DIST_SYNC);
}
@Override
protected <E> CacheStream<E> createStream(CacheCollection<E> entries) {
// This forces parallel distribution since iterator defaults to sequential
return entries.stream().parallelDistribution();
}
}
| 729
| 28.2
| 80
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamIteratorExceptionTest.java
|
package org.infinispan.stream;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.fail;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* Test to verify stream iterator exception propagation behavior for a distributed cache.
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.DistributedStreamIteratorExceptionTest")
public class DistributedStreamIteratorExceptionTest extends BaseSetupStreamIteratorTest {
protected DistributedStreamIteratorExceptionTest(CacheMode cacheMode) {
super(false, cacheMode);
}
public DistributedStreamIteratorExceptionTest() {
this(CacheMode.DIST_SYNC);
}
protected InternalDataContainer mockContainer(Throwable t) {
return when(mock(InternalDataContainer.class).publisher(anyInt())).thenThrow(t).getMock();
}
public void ensureDataContainerRemoteExceptionPropagated() {
Cache cache0 = cache(0, CACHE_NAME);
Cache cache1 = cache(1, CACHE_NAME);
// Extract real one to replace after
InternalDataContainer dataContainer = TestingUtil.extractComponent(cache1, InternalDataContainer.class);
try {
Throwable t = new AssertionError();
InternalDataContainer mockContainer = mockContainer(t);
TestingUtil.replaceComponent(cache1, InternalDataContainer.class, mockContainer, true);
try {
cache0.entrySet().stream().iterator().hasNext();
fail("We should have gotten a CacheException");
} catch (CacheException e) {
Throwable cause = e;
while ((cause = cause.getCause()) != null) {
if (t.getClass().isInstance(cause)) {
break;
}
}
assertNotNull("We should have found the throwable as a cause", cause);
}
} finally {
TestingUtil.replaceComponent(cache1, InternalDataContainer.class, dataContainer, true);
}
}
}
| 2,328
| 35.968254
| 110
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamIteratorWithPassivationTest.java
|
package org.infinispan.stream;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.distribution.MagicKey;
import org.infinispan.eviction.impl.PassivationManager;
import org.infinispan.filter.CacheFilters;
import org.infinispan.filter.KeyValueFilter;
import org.infinispan.marshall.TestObjectStreamMarshaller;
import org.infinispan.marshall.persistence.impl.MarshalledEntryUtil;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.util.concurrent.CompletionStages;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.Test;
/**
* Test to verify distributed stream behavior when a loader with passivation is present
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.DistributedStreamIteratorWithPassivationTest")
public class DistributedStreamIteratorWithPassivationTest extends BaseSetupStreamIteratorTest {
public DistributedStreamIteratorWithPassivationTest() {
this(false, CacheMode.DIST_SYNC);
}
protected DistributedStreamIteratorWithPassivationTest(boolean tx, CacheMode mode) {
super(tx, mode);
}
@Override
protected void enhanceConfiguration(ConfigurationBuilder builder) {
builder.clustering().hash().numOwners(1);
builder.persistence().passivation(true).addStore(DummyInMemoryStoreConfigurationBuilder.class).storeName(getTestName());
}
@Test(enabled = false, description = "This requires supporting concurrent activation in cache loader interceptor")
public void testConcurrentActivation() throws InterruptedException, ExecutionException, TimeoutException {
final Cache<MagicKey, String> cache0 = cache(0, CACHE_NAME);
Cache<MagicKey, String> cache1 = cache(1, CACHE_NAME);
Cache<MagicKey, String> cache2 = cache(2, CACHE_NAME);
Map<MagicKey, String> originalValues = new HashMap<>();
originalValues.put(new MagicKey(cache0), "cache0");
originalValues.put(new MagicKey(cache1), "cache1");
originalValues.put(new MagicKey(cache2), "cache2");
final MagicKey loaderKey = new MagicKey(cache0);
final String loaderValue = "loader0";
cache0.putAll(originalValues);
// Put this in after the cache has been updated
originalValues.put(loaderKey, loaderValue);
PersistenceManager persistenceManager = TestingUtil.extractComponent(cache0, PersistenceManager.class);
DummyInMemoryStore store = persistenceManager.getStores(DummyInMemoryStore.class).iterator().next();
TestObjectStreamMarshaller sm = new TestObjectStreamMarshaller();
PersistenceManager pm = null;
try {
store.write(MarshalledEntryUtil.create(loaderKey, loaderValue, sm));
final CheckPoint checkPoint = new CheckPoint();
pm = waitUntilAboutToProcessStoreTask(cache0, checkPoint);
Future<Void> future = fork(() -> {
// Wait until loader is invoked
checkPoint.awaitStrict("pre_process_on_all_stores_invoked", 10, TimeUnit.SECONDS);
// Now force the entry to be moved to the in memory
assertEquals(loaderValue, cache0.get(loaderKey));
checkPoint.triggerForever("pre_process_on_all_stores_released");
return null;
});
Iterator<Map.Entry<MagicKey, String>> iterator = cache0.entrySet().stream().iterator();
// we need this count since the map will replace same key'd value
int count = 0;
Map<MagicKey, String> results = new HashMap<>();
while (iterator.hasNext()) {
Map.Entry<MagicKey, String> entry = iterator.next();
results.put(entry.getKey(), entry.getValue());
count++;
}
assertEquals(count, 4);
assertEquals(originalValues, results);
future.get(10, TimeUnit.SECONDS);
} finally {
if (pm != null) {
TestingUtil.replaceComponent(cache0, PersistenceManager.class, pm, true, true);
}
sm.stop();
}
}
@Test(enabled = false, description = "This requires supporting concurrent activation in cache loader interceptor")
// TODO: this test needs to be redone to take into account filtering as well, after we support activated entries
public void testConcurrentActivationWithFilter() throws InterruptedException, ExecutionException, TimeoutException {
final Cache<MagicKey, String> cache0 = cache(0, CACHE_NAME);
Cache<MagicKey, String> cache1 = cache(1, CACHE_NAME);
Cache<MagicKey, String> cache2 = cache(2, CACHE_NAME);
Map<MagicKey, String> originalValues = new HashMap<>();
originalValues.put(new MagicKey(cache0), "cache0");
originalValues.put(new MagicKey(cache1), "cache1");
originalValues.put(new MagicKey(cache2), "cache2");
final MagicKey loaderKey = new MagicKey(cache0);
final String loaderValue = "loader0";
cache0.putAll(originalValues);
PersistenceManager persistenceManager = TestingUtil.extractComponent(cache0, PersistenceManager.class);
DummyInMemoryStore store = persistenceManager.getStores(DummyInMemoryStore.class).iterator().next();
TestObjectStreamMarshaller sm = new TestObjectStreamMarshaller();
PersistenceManager pm = null;
try {
store.write(MarshalledEntryUtil.create(loaderKey, loaderValue, sm));
final CheckPoint checkPoint = new CheckPoint();
pm = waitUntilAboutToProcessStoreTask(cache0, checkPoint);
Future<Void> future = fork(() -> {
// Wait until loader is invoked
checkPoint.awaitStrict("pre_process_on_all_stores_invoked", 10, TimeUnit.SECONDS);
// Now force the entry to be moved to the in memory
assertEquals(loaderValue, cache0.get(loaderKey));
checkPoint.triggerForever("pre_process_on_all_stores_released");
return null;
});
KeyValueFilter<MagicKey, String> filter = (Serializable & KeyValueFilter<MagicKey, String>)(k, v, m) -> originalValues.containsKey(k);
Iterator<CacheEntry<MagicKey, String>> iterator = cache0.getAdvancedCache().cacheEntrySet().stream()
.filter(CacheFilters.predicate(filter))
.iterator();
// we need this count since the map will replace same key'd value
int count = 0;
Map<MagicKey, String> results = new HashMap<>();
while (iterator.hasNext()) {
Map.Entry<MagicKey, String> entry = iterator.next();
results.put(entry.getKey(), entry.getValue());
count++;
}
// We shouldn't have found the value in the loader
assertEquals(count, 3);
assertEquals(originalValues, results);
future.get(10, TimeUnit.SECONDS);
} finally {
if (pm != null) {
TestingUtil.replaceComponent(cache0, PersistenceManager.class, pm, true, true);
}
sm.stop();
}
}
@Test(enabled = false, description = "This requires supporting concurrent activation in cache loader interceptor")
public void testConcurrentActivationWithConverter() throws InterruptedException, ExecutionException, TimeoutException {
final Cache<MagicKey, String> cache0 = cache(0, CACHE_NAME);
Cache<MagicKey, String> cache1 = cache(1, CACHE_NAME);
Cache<MagicKey, String> cache2 = cache(2, CACHE_NAME);
Map<MagicKey, String> originalValues = new HashMap<>();
originalValues.put(new MagicKey(cache0), "cache0");
originalValues.put(new MagicKey(cache1), "cache1");
originalValues.put(new MagicKey(cache2), "cache2");
final MagicKey loaderKey = new MagicKey(cache0);
final String loaderValue = "loader0";
cache0.putAll(originalValues);
// Put this in after the cache has been updated
originalValues.put(loaderKey, loaderValue);
PersistenceManager persistenceManager = TestingUtil.extractComponent(cache0, PersistenceManager.class);
DummyInMemoryStore store = persistenceManager.getStores(DummyInMemoryStore.class).iterator().next();
TestObjectStreamMarshaller sm = new TestObjectStreamMarshaller();
PersistenceManager pm = null;
try {
store.write(MarshalledEntryUtil.create(loaderKey, loaderValue, sm));
final CheckPoint checkPoint = new CheckPoint();
pm = waitUntilAboutToProcessStoreTask(cache0, checkPoint);
Future<Void> future = fork(() -> {
// Wait until loader is invoked
checkPoint.awaitStrict("pre_process_on_all_stores_invoked", 10, TimeUnit.SECONDS);
// Now force the entry to be moved to the in memory
assertEquals(loaderValue, cache0.get(loaderKey));
checkPoint.triggerForever("pre_process_on_all_stores_released");
return null;
});
Iterator<CacheEntry<MagicKey, String>> iterator = cache0.getAdvancedCache().cacheEntrySet().stream().map(
CacheFilters.function(new StringTruncator(1, 3))).iterator();
// we need this count since the map will replace same key'd value
int count = 0;
Map<MagicKey, String> results = new HashMap<>();
while (iterator.hasNext()) {
Map.Entry<MagicKey, String> entry = iterator.next();
results.put(entry.getKey(), entry.getValue());
count++;
}
// We shouldn't have found the value in the loader
assertEquals(count, 4);
for (Map.Entry<MagicKey, String> entry : originalValues.entrySet()) {
assertEquals(entry.getValue().substring(1, 4), results.get(entry.getKey()));
}
future.get(10, TimeUnit.SECONDS);
} finally {
if (pm != null) {
TestingUtil.replaceComponent(cache0, PersistenceManager.class, pm, true, true);
}
sm.stop();
}
}
protected PersistenceManager waitUntilAboutToProcessStoreTask(final Cache<?, ?> cache, final CheckPoint checkPoint) {
PersistenceManager pm = TestingUtil.extractComponent(cache, PersistenceManager.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(pm);
PersistenceManager mockManager = mock(PersistenceManager.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
// Wait for main thread to sync up
checkPoint.trigger("pre_process_on_all_stores_invoked");
// Now wait until main thread lets us through
checkPoint.awaitStrict("pre_process_on_all_stores_released", 10, TimeUnit.SECONDS);
return forwardedAnswer.answer(invocation);
}).when(mockManager).publishEntries(any(), any(), anyBoolean(), anyBoolean(), any());
TestingUtil.replaceComponent(cache, PersistenceManager.class, mockManager, true);
return pm;
}
/**
* This test is to verify that if a concurrent passivation occurs while switching between data container and loader(s)
* that we don't return the same key/value twice
*/
public void testConcurrentPassivation() throws InterruptedException, ExecutionException, TimeoutException {
final Cache<MagicKey, String> cache0 = cache(0, CACHE_NAME);
Cache<MagicKey, String> cache1 = cache(1, CACHE_NAME);
Cache<MagicKey, String> cache2 = cache(2, CACHE_NAME);
Map<MagicKey, String> originalValues = new HashMap<>();
originalValues.put(new MagicKey(cache0), "cache0");
originalValues.put(new MagicKey(cache1), "cache1");
originalValues.put(new MagicKey(cache2), "cache2");
final MagicKey loaderKey = new MagicKey(cache0);
final String loaderValue = "loader0";
// Make sure this is in the cache to begin with
originalValues.put(loaderKey, loaderValue);
cache0.putAll(originalValues);
PersistenceManager pm = null;
try {
final CheckPoint checkPoint = new CheckPoint();
pm = waitUntilAboutToProcessStoreTask(cache0, checkPoint);
Future<Void> future = fork(() -> {
// Wait until loader is invoked
checkPoint.awaitStrict("pre_process_on_all_stores_invoked", 10, TimeUnit.SECONDS);
// Now force the entry to be moved to loader
CompletionStages.join(TestingUtil.extractComponent(cache0, PassivationManager.class).passivateAsync(new ImmortalCacheEntry(loaderKey, loaderValue)));
checkPoint.triggerForever("pre_process_on_all_stores_released");
return null;
});
Iterator<Map.Entry<MagicKey, String>> iterator = cache1.entrySet().stream().iterator();
// we need this count since the map will replace same key'd value
Map<MagicKey, String> results = new HashMap<>();
while (iterator.hasNext()) {
Map.Entry<MagicKey, String> entry = iterator.next();
String prev = results.put(entry.getKey(), entry.getValue());
assertNull("Entry " + entry + " replaced an existing value of " + prev, prev);
}
assertEquals(originalValues, results);
future.get(10, TimeUnit.SECONDS);
} finally {
if (pm != null) {
TestingUtil.replaceComponent(cache0, PersistenceManager.class, pm, true, true);
}
}
}
}
| 14,356
| 42.243976
| 161
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/SimpleStreamTest.java
|
package org.infinispan.stream;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
@Test(groups = "functional", testName = "streams.SimpleStreamTest")
public class SimpleStreamTest extends LocalStreamTest {
@Override
protected void enhanceConfiguration(ConfigurationBuilder builder) {
builder.simpleCache(true);
}
}
| 438
| 26.4375
| 70
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/InvalidationStreamIteratorTest.java
|
package org.infinispan.stream;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
/**
* Test to verify stream behavior for an invalidation cache
*
* @author wburns
* @since 7.0
*/
@Test(groups = "functional", testName = "stream.InvalidationStreamIteratorTest")
public class InvalidationStreamIteratorTest extends BaseStreamIteratorTest {
public InvalidationStreamIteratorTest() {
super(false, CacheMode.INVALIDATION_SYNC);
}
@Override
protected void createCacheManagers() throws Throwable {
builderUsed = new ConfigurationBuilder();
builderUsed.clustering().cacheMode(cacheMode);
if (transactional) {
builderUsed.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
}
builderUsed.clustering().hash().numOwners(2);
builderUsed.clustering().stateTransfer().chunkSize(50);
createClusteredCaches(1, CACHE_NAME, sci, builderUsed);
}
protected final AtomicInteger counter = new AtomicInteger();
@Override
protected Object getKeyTiedToCache(Cache<?, ?> cache) {
return cache.toString() + "-" + counter.getAndIncrement();
}
}
| 1,361
| 30.674419
| 82
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamIteratorTest.java
|
package org.infinispan.stream;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.withSettings;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.Flag;
import org.infinispan.distribution.MagicKey;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.reactive.publisher.impl.ClusterPublisherManager;
import org.infinispan.reactive.publisher.impl.LocalPublisherManager;
import org.infinispan.reactive.publisher.impl.PublisherHandler;
import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier;
import org.infinispan.reactive.publisher.impl.commands.batch.InitialPublisherCommand;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.Mocks;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.fwk.TransportFlags;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.Test;
import io.reactivex.rxjava3.core.Flowable;
/**
* Test to verify distributed stream iterator
*
* @author wburns
* @since 8.0
*/
@Test(groups = {"functional", "smoke"}, testName = "iteration.DistributedStreamIteratorTest")
public class DistributedStreamIteratorTest extends BaseClusteredStreamIteratorTest {
public DistributedStreamIteratorTest() {
this(false, CacheMode.DIST_SYNC);
}
public DistributedStreamIteratorTest(boolean tx, CacheMode cacheMode) {
super(tx, cacheMode);
// This is needed since we kill nodes
cleanup = CleanupPhase.AFTER_METHOD;
}
protected Object getKeyTiedToCache(Cache<?, ?> cache) {
return new MagicKey(cache);
}
@Test
public void testIterationDuringInitialTransfer() throws Exception {
Map<Object, String> values = putValueInEachCache(3);
// Go back to 2 caches, because we assign all 3 segments to the first 3 nodes
// And we need the joiner to request some state in order to block it
killMember(2, CACHE_NAME);
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
CheckPoint checkPoint = new CheckPoint();
checkPoint.triggerForever(Mocks.AFTER_RELEASE);
blockStateTransfer(cache0, checkPoint);
EmbeddedCacheManager joinerManager =
addClusterEnabledCacheManager(sci, new ConfigurationBuilder(), new TransportFlags().withFD(true));
ConfigurationBuilder builderNoAwaitInitialTransfer = new ConfigurationBuilder();
builderNoAwaitInitialTransfer.read(builderUsed.build(), Combine.DEFAULT);
builderNoAwaitInitialTransfer.clustering().stateTransfer().awaitInitialTransfer(false);
joinerManager.defineConfiguration(CACHE_NAME, builderNoAwaitInitialTransfer.build());
Cache<String, String> joinerCache = joinerManager.getCache(CACHE_NAME, true);
// Not required, but it should make the logs clearer
checkPoint.awaitStrict(Mocks.BEFORE_INVOCATION, 10, TimeUnit.SECONDS);
Set<String> iteratorValues = new HashSet<>();
try {
Iterator<String> iter = joinerCache.entrySet().stream().map(Map.Entry::getValue).iterator();
while (iter.hasNext()) {
String value = iter.next();
iteratorValues.add(value);
}
} finally {
checkPoint.triggerForever(Mocks.BEFORE_RELEASE);
}
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertTrue("Entry wasn't found:" + entry, iteratorValues.contains(entry.getValue()));
}
}
@Test
public void verifyNodeLeavesBeforeGettingData() throws Exception {
Map<Object, String> values = putValueInEachCache(3);
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
CheckPoint checkPoint = new CheckPoint();
checkPoint.triggerForever(Mocks.AFTER_RELEASE);
waitUntilSendingResponse(cache1, checkPoint);
final BlockingQueue<String> returnQueue = new LinkedBlockingQueue<>();
Future<Void> future = fork(() -> {
Iterator<String> iter = cache0.values().stream().iterator();
while (iter.hasNext()) {
String entry = iter.next();
returnQueue.add(entry);
}
return null;
});
// Make sure the thread is waiting for the response
checkPoint.awaitStrict(Mocks.BEFORE_INVOCATION, 10, TimeUnit.SECONDS);
// Now kill the cache - we should recover
killMember(1, CACHE_NAME);
checkPoint.trigger(Mocks.BEFORE_RELEASE);
future.get(10, TimeUnit.SECONDS);
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertTrue("Entry wasn't found:" + entry, returnQueue.contains(entry.getValue()));
}
}
/**
* This test is to verify proper behavior when a node dies after sending a batch to the requestor
*/
@Test
public void verifyNodeLeavesAfterSendingBackSomeData() throws TimeoutException, InterruptedException, ExecutionException {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
Map<Object, String> values = new HashMap<>();
int chunkSize = cache0.getCacheConfiguration().clustering().stateTransfer().chunkSize();
// Now insert 2 more values than the chunk size into the node we will kill
for (int i = 0; i < chunkSize + 2; ++i) {
MagicKey key = new MagicKey(cache1);
cache1.put(key, key.toString());
values.put(key, key.toString());
}
CheckPoint checkPoint = new CheckPoint();
// Let the first request come through fine
checkPoint.trigger(Mocks.BEFORE_RELEASE);
waitUntilSendingResponse(cache1, checkPoint);
final BlockingQueue<Map.Entry<Object, String>> returnQueue = new LinkedBlockingQueue<>();
Future<Void> future = fork(() -> {
Iterator<Map.Entry<Object, String>> iter = cache0.entrySet().stream().iterator();
while (iter.hasNext()) {
Map.Entry<Object, String> entry = iter.next();
returnQueue.add(entry);
}
return null;
});
// Now wait for them to send back first results
checkPoint.awaitStrict(Mocks.AFTER_INVOCATION, 10, TimeUnit.SECONDS);
checkPoint.trigger(Mocks.AFTER_RELEASE);
// We should get a value now, note all values are currently residing on cache1 as primary
Map.Entry<Object, String> value = returnQueue.poll(10, TimeUnit.SECONDS);
// Now kill the cache - we should recover
killMember(1, CACHE_NAME);
future.get(10, TimeUnit.SECONDS);
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertTrue("Entry wasn't found:" + entry, returnQueue.contains(entry) || entry.equals(value));
}
}
@Test
public void waitUntilProcessingResults() throws TimeoutException, InterruptedException, ExecutionException {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
Map<Object, String> values = new HashMap<>();
for (int i = 0; i < 9; ++i) {
MagicKey key = new MagicKey(cache1);
cache1.put(key, key.toString());
values.put(key, key.toString());
}
CheckPoint checkPoint = new CheckPoint();
checkPoint.triggerForever(Mocks.AFTER_RELEASE);
ClusterPublisherManager<Object, String> spy = Mocks.replaceComponentWithSpy(cache0, ClusterPublisherManager.class);
doAnswer(invocation -> {
SegmentPublisherSupplier<?> result = (SegmentPublisherSupplier<?>) invocation.callRealMethod();
return Mocks.blockingPublisher(result, checkPoint);
}).when(spy).entryPublisher(any(), any(), any(), anyLong(), any(), anyInt(), any());
final BlockingQueue<Map.Entry<Object, String>> returnQueue = new LinkedBlockingQueue<>();
Future<Void> future = fork(() -> {
Iterator<Map.Entry<Object, String>> iter = cache0.entrySet().stream().iterator();
while (iter.hasNext()) {
Map.Entry<Object, String> entry = iter.next();
returnQueue.add(entry);
}
return null;
});
// Now wait for them to send back first results but don't let them process
checkPoint.awaitStrict(Mocks.BEFORE_INVOCATION, 10, TimeUnit.SECONDS);
// Now let them process the results
checkPoint.triggerForever(Mocks.BEFORE_RELEASE);
// Now kill the cache - we should recover and get appropriate values
killMember(1, CACHE_NAME);
future.get(10, TimeUnit.SECONDS);
KeyPartitioner keyPartitioner = TestingUtil.extractComponent(cache0, KeyPartitioner.class);
Map<Integer, Set<Map.Entry<Object, String>>> expected = generateEntriesPerSegment(keyPartitioner, values.entrySet());
Map<Integer, Set<Map.Entry<Object, String>>> answer = generateEntriesPerSegment(keyPartitioner, returnQueue);
for (Map.Entry<Integer, Set<Map.Entry<Object, String>>> entry : expected.entrySet()) {
Integer segment = entry.getKey();
Set<Map.Entry<Object, String>> answerForSegment = answer.get(segment);
if (answerForSegment != null) {
for (Map.Entry<Object, String> exp : entry.getValue()) {
if (!answerForSegment.contains(exp)) {
log.errorf("Segment %d, missing %s", segment, exp);
}
}
for (Map.Entry<Object, String> ans : answerForSegment) {
if (!entry.getValue().contains(ans)) {
log.errorf("Segment %d, extra %s", segment, ans);
}
}
assertEquals(entry.getValue().size(), answerForSegment.size());
}
assertEquals("Segment " + segment + " had a mismatch", entry.getValue(), answerForSegment);
}
}
@Test
public void testNodeLeavesWhileIteratingOverContainerCausingRehashToLoseValues() throws Exception {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
Cache<Object, String> cache2 = cache(2, CACHE_NAME);
// We put some entries into cache1, which will be shut down below. The batch size is only 2 so we won't be able
// to get them all in 1 remote call - this way we can block until we know we touch the data container, so at least
// the second request will give us an issue
Map<Object, String> values = new HashMap<>();
values.put(new MagicKey(cache0), "ignore");
values.put(new MagicKey(cache0), "ignore");
values.put(new MagicKey(cache0), "ignore");
values.put(new MagicKey(cache1), "ignore");
cache1.putAll(values);
CheckPoint checkPoint = new CheckPoint();
checkPoint.triggerForever("post_iterator_released");
waitUntilDataContainerWillBeIteratedOn(cache0, checkPoint);
final BlockingQueue<Map.Entry<Object, String>> returnQueue = new LinkedBlockingQueue<>();
Future<Void> future = fork(() -> {
// Put batch size to a lower number just to make sure it doesn't retrieve them all in 1 go
Iterator<Map.Entry<Object, String>> iter = cache2.entrySet().stream().distributedBatchSize(2).iterator();
while (iter.hasNext()) {
Map.Entry<Object, String> entry = iter.next();
returnQueue.add(entry);
}
return null;
});
// Now wait for them to send back first results but don't let them process
checkPoint.awaitStrict("pre_iterator_invoked", 10, TimeUnit.SECONDS);
// Now kill the cache - we should recover and get appropriate values
killMember(0, CACHE_NAME, false);
// Now let them process the results
checkPoint.triggerForever("pre_iterator_released");
future.get(10, TimeUnit.SECONDS);
KeyPartitioner keyPartitioner = TestingUtil.extractComponent(cache1, KeyPartitioner.class);
Map<Integer, Set<Map.Entry<Object, String>>> expected = generateEntriesPerSegment(keyPartitioner, values.entrySet());
Map<Integer, Set<Map.Entry<Object, String>>> answer = generateEntriesPerSegment(keyPartitioner, returnQueue);
for (Map.Entry<Integer, Set<Map.Entry<Object, String>>> entry : expected.entrySet()) {
try {
assertEquals("Segment " + entry.getKey() + " had a mismatch", entry.getValue(), answer.get(entry.getKey()));
} catch (AssertionError e) {
log.fatal("TEST ENDED");
throw e;
}
}
}
@Test
public void testLocallyForcedStream() {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
Cache<Object, String> cache2 = cache(2, CACHE_NAME);
Map<Object, String> values = new HashMap<>();
MagicKey key1 = new MagicKey(cache0);
cache0.put(key1, key1.toString());
values.put(key1, key1.toString());
// Force it so only cache0 has it's primary owned keys
MagicKey key2 = magicKey(cache1, cache2);
cache2.put(key2, key2.toString());
// Force it so only cache0 has it's primary owned keys
MagicKey key3 = magicKey(cache2, cache1);
cache1.put(key3, key3.toString());
int count = 0;
Iterator<Map.Entry<Object, String>> iter = cache0.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).entrySet().
stream().iterator();
while (iter.hasNext()) {
Map.Entry<Object, String> entry = iter.next();
String cacheValue = cache0.get(entry.getKey());
assertNotNull(cacheValue);
assertEquals(cacheValue, entry.getValue());
count++;
}
assertEquals(values.size(), count);
}
/**
* Tests iteration that isn't fully iterated and instead closed to ensure it properly closes all resources
*
* @param dataOwnerCache cache that will own the data (must not be cache 2)
* @param iteratorCache cache that will iterate (must not be cache 2)
*/
private void testIteratorClosedProperlyOnClose(Cache<Object, String> dataOwnerCache, Cache<Object, String> iteratorCache) {
Cache<Object, String> cache2 = cache(2, CACHE_NAME);
// We have to insert over the buffer size default - which iterator uses
for (int i = 0; i < Flowable.bufferSize() + 2; ++i) {
dataOwnerCache.put(magicKey(dataOwnerCache, cache2), "value");
}
PublisherHandler handler = TestingUtil.extractComponent(dataOwnerCache, PublisherHandler.class);
assertEquals(0, handler.openPublishers());
try (CacheStream<Map.Entry<Object, String>> stream = iteratorCache.entrySet().stream()) {
Iterator<Map.Entry<Object, String>> iter = stream.distributedBatchSize(1).iterator();
assertTrue(iter.hasNext());
assertEquals(1, handler.openPublishers());
}
// The close is done asynchronously
eventuallyEquals(0, handler::openPublishers);
}
public void testIteratorClosedWhenPartiallyIteratedLocal() {
testIteratorClosedProperlyOnClose(cache(1, CACHE_NAME), cache(1, CACHE_NAME));
}
public void testIteratorClosedWhenPartiallyIteratedRemote() {
testIteratorClosedProperlyOnClose(cache(1, CACHE_NAME), cache(0, CACHE_NAME));
}
public void testIteratorClosedWhenIteratedFully() {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
Cache<Object, String> cache2 = cache(2, CACHE_NAME);
// We have to insert over the buffer size default - which iterator uses
for (int i = 0; i < Flowable.bufferSize() + 2; ++i) {
// We insert 2 values into caches where we aren't the owner (they have to be in same node or else iterator
// will finish early)
cache0.put(magicKey(cache1, cache2), "not-local");
}
PublisherHandler handler = TestingUtil.extractComponent(cache1, PublisherHandler.class);
assertEquals(0, handler.openPublishers());
Iterator<Map.Entry<Object, String>> iter = cache0.entrySet().stream().distributedBatchSize(1).iterator();
assertTrue(iter.hasNext());
assertEquals(1, handler.openPublishers());
iter.forEachRemaining(ignore -> {});
// The close is done asynchronously
eventuallyEquals(0, handler::openPublishers);
}
protected MagicKey magicKey(Cache<Object, String> cache1, Cache<Object, String> cache2) {
if (cache1.getCacheConfiguration().clustering().hash().numOwners() < 2) {
return new MagicKey(cache1);
} else {
return new MagicKey(cache1, cache2);
}
}
@Test
public void testStayLocalIfAllSegmentsPresentLocallyWithReHash() throws Exception {
testStayLocalIfAllSegmentsPresentLocally(true);
}
@Test
public void testStayLocalIfAllSegmentsPresentLocallyWithoutRehash() throws Exception {
testStayLocalIfAllSegmentsPresentLocally(false);
}
private void testStayLocalIfAllSegmentsPresentLocally(boolean rehashAware) {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
RpcManager rpcManager = Mocks.replaceComponentWithSpy(cache0, RpcManager.class);
putValueInEachCache(3);
KeyPartitioner keyPartitioner = TestingUtil.extractComponent(cache0, KeyPartitioner.class);
ConsistentHash ch = cache0.getAdvancedCache().getDistributionManager().getWriteConsistentHash();
IntSet segmentsCache0 = IntSets.from(ch.getSegmentsForOwner(address(0)));
CacheStream<Map.Entry<Object, String>> stream = cache0.entrySet().stream();
if (!rehashAware) stream = stream.disableRehashAware();
Map<Object, String> entries = mapFromIterator(stream.filterKeySegments(segmentsCache0).iterator());
Map<Integer, Set<Map.Entry<Object, String>>> entriesPerSegment = generateEntriesPerSegment(keyPartitioner, entries.entrySet());
// We should not see keys from other segments, but there may be segments without any keys
assertTrue(segmentsCache0.containsAll(entriesPerSegment.keySet()));
verify(rpcManager, never()).invokeCommand(any(Address.class), any(InitialPublisherCommand.class), any(), any());
}
protected void waitUntilSendingResponse(final Cache<?, ?> cache, final CheckPoint checkPoint) {
Mocks.blockingMock(checkPoint, LocalPublisherManager.class, cache, (stub, m) -> {
stub.when(m).entryPublisher(any(), any(), any(), anyLong(), any(), any());
}
);
}
protected <K> void blockStateTransfer(final Cache<?, ?> cache, final CheckPoint checkPoint) {
Mocks.blockInboundCacheRpcCommand(cache, checkPoint, command -> command instanceof StateTransferStartCommand);
}
protected void waitUntilDataContainerWillBeIteratedOn(final Cache<?, ?> cache, final CheckPoint checkPoint) {
InternalDataContainer<?, ?> dataContainer = TestingUtil.extractComponent(cache, InternalDataContainer.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(dataContainer);
InternalDataContainer<?, ?> mockContainer = mock(InternalDataContainer.class,
withSettings().defaultAnswer(forwardedAnswer));
final AtomicInteger invocationCount = new AtomicInteger();
Answer<?> blockingAnswer = invocation -> {
boolean waiting = false;
if (invocationCount.getAndIncrement() == 0) {
waiting = true;
// Wait for main thread to sync up
checkPoint.trigger("pre_iterator_invoked");
// Now wait until main thread lets us through
checkPoint.awaitStrict("pre_iterator_released", 10, TimeUnit.SECONDS);
}
try {
return forwardedAnswer.answer(invocation);
} finally {
invocationCount.getAndDecrement();
if (waiting) {
// Wait for main thread to sync up
checkPoint.trigger("post_iterator_invoked");
// Now wait until main thread lets us through
checkPoint.awaitStrict("post_iterator_released", 10, TimeUnit.SECONDS);
}
}
};
doAnswer(blockingAnswer).when(mockContainer).publisher(anyInt());
TestingUtil.replaceComponent(cache, InternalDataContainer.class, mockContainer, true);
}
}
| 21,740
| 41.545988
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/BaseStreamTest.java
|
package org.infinispan.stream;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.DoubleSummaryStatistics;
import java.util.HashSet;
import java.util.IntSummaryStatistics;
import java.util.Iterator;
import java.util.List;
import java.util.LongSummaryStatistics;
import java.util.Map;
import java.util.Optional;
import java.util.PrimitiveIterator;
import java.util.Queue;
import java.util.Set;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.StringJoiner;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.function.DoubleConsumer;
import java.util.function.IntConsumer;
import java.util.function.LongConsumer;
import java.util.function.Supplier;
import java.util.function.ToIntFunction;
import java.util.stream.Collectors;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.infinispan.Cache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.CacheStream;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.function.SerializableToDoubleFunction;
import org.infinispan.util.function.SerializableToIntFunction;
import org.infinispan.util.function.SerializableToLongFunction;
import org.testng.annotations.Test;
/**
* Base test class for streams to verify proper behavior of all of the terminal operations for all of the various
* stream classes
*/
@Test(groups = "functional")
public abstract class BaseStreamTest extends MultipleCacheManagersTest {
protected final String CACHE_NAME = "testCache";
protected ConfigurationBuilder builderUsed;
static final Map<Integer, Object> forEachStructure = new ConcurrentHashMap<>();
static final AtomicInteger forEachOffset = new AtomicInteger();
static int populateNextForEachStructure(Object obj) {
int offset = forEachOffset.getAndIncrement();
forEachStructure.put(offset, obj);
return offset;
}
static <R> R getForEachObject(int offset) {
return (R) forEachStructure.get(offset);
}
static void clearForEachObject(int offset) {
forEachStructure.remove(offset);
}
public BaseStreamTest(boolean tx) {
this.transactional = tx;
}
protected void enhanceConfiguration(ConfigurationBuilder builder) {
// Do nothing to config by default, used by people who extend this
}
protected abstract <E> CacheStream<E> createStream(CacheCollection<E> cacheCollection);
@Override
protected void createCacheManagers() throws Throwable {
builderUsed = new ConfigurationBuilder();
builderUsed.clustering().cacheMode(cacheMode);
if (transactional) {
builderUsed.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
}
if (cacheMode.isClustered()) {
builderUsed.clustering().stateTransfer().chunkSize(50);
enhanceConfiguration(builderUsed);
createClusteredCaches(3, CACHE_NAME, builderUsed);
} else {
enhanceConfiguration(builderUsed);
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(builderUsed);
cacheManagers.add(cm);
cm.defineConfiguration(CACHE_NAME, builderUsed.build());
}
}
protected <K, V> Cache<K, V> getCache(int index) {
return cache(index, CACHE_NAME);
}
public void testObjAllMatch() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).allMatch(e -> e.getValue().endsWith("-value")));
assertFalse(createStream(entrySet).allMatch(e -> e.getKey() % 2 == 0));
assertTrue(createStream(entrySet).allMatch(e -> e.getKey() < 10 && e.getKey() >= 0));
assertTrue(createStream(entrySet).allMatch(e -> e.getKey().toString().equals(e.getValue().substring(0, 1))));
}
public void testObjAnyMatch() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).anyMatch(e -> e.getValue().endsWith("-value")));
assertTrue(createStream(entrySet).anyMatch(e -> e.getKey() % 2 == 0));
assertTrue(createStream(entrySet).anyMatch(e -> e.getKey() < 10 && e.getKey() >= 0));
assertTrue(createStream(entrySet).anyMatch(e -> e.getValue().equals("4-value")));
assertFalse(createStream(entrySet).anyMatch(e -> e.getKey() > 12));
}
public void testObjCollectorIntAverager() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(4.5, createStream(entrySet).collect(
() -> Collectors.averagingInt(Map.Entry::getKey)));
}
public void testObjCollectorIntStatistics() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
IntSummaryStatistics stats = createStream(entrySet).collect(
() -> Collectors.summarizingInt(Map.Entry::getKey));
assertEquals(10, stats.getCount());
assertEquals(4.5, stats.getAverage());
assertEquals(0, stats.getMin());
assertEquals(9, stats.getMax());
assertEquals(45, stats.getSum());
}
public void testObjCollectorGroupBy() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
ConcurrentMap<Boolean, List<Map.Entry<Integer, String>>> grouped = createStream(entrySet).collect(
() -> Collectors.groupingByConcurrent(k -> k.getKey() % 2 == 0));
grouped.get(true).parallelStream().forEach(e -> assertTrue(e.getKey() % 2 == 0));
grouped.get(false).parallelStream().forEach(e -> assertTrue(e.getKey() % 2 == 1));
}
public void testObjCollect() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
List<Map.Entry<Integer, String>> list = createStream(entrySet).collect(ArrayList::new,
ArrayList::add, ArrayList::addAll);
assertEquals(cache.size(), list.size());
list.parallelStream().forEach(e -> assertEquals(cache.get(e.getKey()), e.getValue()));
}
public void testObjSortedCollector() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
List<Map.Entry<Integer, String>> list = createStream(entrySet).sorted(
(e1, e2) -> Integer.compare(e1.getKey(), e2.getKey())).collect(
Collectors::<Map.Entry<Integer, String>>toList);
assertEquals(cache.size(), list.size());
AtomicInteger i = new AtomicInteger();
list.forEach(e -> {
assertEquals(i.getAndIncrement(), e.getKey().intValue());
assertEquals(cache.get(e.getKey()), e.getValue());
});
}
public void testObjCount() {
Cache<Integer, String> cache = getCache(0);
int range = 12;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(range, createStream(entrySet).count());
}
public void testObjFindAny() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).findAny().isPresent());
assertTrue(createStream(entrySet).filter(e -> e.getValue().endsWith("-value")).findAny().isPresent());
assertTrue(createStream(entrySet).filter(e -> e.getKey() % 2 == 0).findAny().isPresent());
assertTrue(createStream(entrySet).filter(e -> e.getKey() < 10 && e.getKey() >= 0).findAny().isPresent());
assertTrue(createStream(entrySet).filter(e -> e.getValue().equals("4-value")).findAny().isPresent());
assertFalse(createStream(entrySet).filter(e -> e.getKey() > 12).findAny().isPresent());
}
public void testObjFindFirst() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(0, createStream(entrySet).sorted(
(e1, e2) -> Integer.compare(e1.getKey(), e2.getKey())).findFirst().get().getKey().intValue());
}
public void testObjForEach() {
Cache<Integer, String> cache = getCache(0);
int offset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.entrySet()).forEach(e -> {
AtomicInteger atomic = getForEachObject(offset);
atomic.addAndGet(e.getKey());
});
return ((AtomicInteger) getForEachObject(offset)).get();
}, cache);
} finally {
clearForEachObject(offset);
}
}
@SerializeWith(ForEachInjected.Externalizer.class)
public static class ForEachInjected<E> implements Consumer<E>, CacheAware<Integer, String> {
private Cache<?, ?> cache;
private final int cacheOffset;
private final int atomicOffset;
private final ToIntFunction<? super E> function;
private ForEachInjected(int cacheOffset, int atomicOffset, SerializableToIntFunction<? super E> function) {
this.cacheOffset = cacheOffset;
this.atomicOffset = atomicOffset;
this.function = function;
}
@Override
public void injectCache(Cache<Integer, String> cache) {
this.cache = cache;
}
@Override
public void accept(E entry) {
Cache<?, ?> cache = getForEachObject(cacheOffset);
if (cache != null && this.cache != null && cache.getName().equals(this.cache.getName())) {
((AtomicInteger) getForEachObject(atomicOffset)).addAndGet(function.applyAsInt(entry));
} else {
fail("Did not receive correct cache!");
}
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<ForEachInjected> {
@Override
public void writeObject(ObjectOutput output, ForEachInjected object) throws IOException {
output.writeInt(object.cacheOffset);
output.writeInt(object.atomicOffset);
output.writeObject(object.function);
}
@Override
public ForEachInjected readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int cacheOffset = input.readInt();
int atomicOffset = input.readInt();
SerializableToIntFunction f = (SerializableToIntFunction) input.readObject();
return new ForEachInjected<>(cacheOffset, atomicOffset, f);
}
}
}
public void testObjForEachCacheInjected() {
Cache<Integer, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.entrySet()).forEach(new ForEachInjected<>(cacheOffset, atomicOffset, Map.Entry::getKey));
return ((AtomicInteger) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testObjForEachBiConsumer() {
Cache<Integer, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.entrySet()).forEach((c, e) -> {
Cache<?, ?> localCache = getForEachObject(cacheOffset);
if (c != null && localCache != null && c.getName().equals(localCache.getName())) {
((AtomicInteger) getForEachObject(atomicOffset)).addAndGet(e.getKey());
} else {
fail("Did not receive correct cache!");
}
});
return ((AtomicInteger) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testObjKeySetForEachCacheInjected() {
Cache<Integer, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.keySet()).forEach(new ForEachInjected<>(cacheOffset, atomicOffset, Integer::intValue));
return ((AtomicInteger) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testObjValuesForEachCacheInjected() {
Cache<Integer, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.values()).forEach(new ForEachInjected<>(cacheOffset, atomicOffset,
e -> Integer.valueOf(e.substring(0, 1))));
return ((AtomicInteger) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testObjFlatMapForEach() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
Queue<String> queue = new ConcurrentLinkedQueue<>();
int queueOffset = populateNextForEachStructure(queue);
try {
createStream(entrySet).distributedBatchSize(5)
.flatMap(e -> Arrays.stream(e.getValue().split("a")))
.forEach(e -> {
Queue<String> localQueue = getForEachObject(queueOffset);
localQueue.add(e);
});
assertEquals(range * 2, queue.size());
int lueCount = 0;
for (String string : queue) {
if (string.equals("lue")) lueCount++;
}
assertEquals(10, lueCount);
} finally {
clearForEachObject(queueOffset);
}
}
public void testObjForEachOrdered() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
List<Map.Entry<Integer, String>> list = new ArrayList<>(range);
// we sort inverted order
createStream(entrySet).sorted((e1, e2) -> Integer.compare(e2.getKey(), e1.getKey())).forEachOrdered(
list::add);
assertEquals(range, list.size());
for (int i = 0; i < range; ++i) {
// 0 based so we have to also subtract by 1
assertEquals(range - i - 1, list.get(i).getKey().intValue());
}
}
public void testObjMax() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(Integer.valueOf(9),
createStream(entrySet).max((e1, e2) -> Integer.compare(e1.getKey(), e2.getKey())).get().getKey());
}
public void testObjMin() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(Integer.valueOf(0),
createStream(entrySet).min((e1, e2) -> Integer.compare(e1.getKey(), e2.getKey())).get().getKey());
}
public void testObjNoneMatch() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertFalse(createStream(entrySet).noneMatch(e -> e.getValue().endsWith("-value")));
assertFalse(createStream(entrySet).noneMatch(e -> e.getKey() % 2 == 0));
assertFalse(createStream(entrySet).noneMatch(e -> e.getKey() < 10 && e.getKey() >= 0));
assertFalse(createStream(entrySet).noneMatch(e -> e.getKey().toString().equals(e.getValue().substring(0, 1))));
assertTrue(createStream(entrySet).noneMatch(e -> e.getKey() > 12));
}
public void testObjReduce1() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
Optional<Map.Entry<Integer, String>> optional = createStream(entrySet).reduce(
(e1, e2) -> new ImmortalCacheEntry(e1.getKey() + e2.getKey(), e1.getValue() + e2.getValue()));
assertTrue(optional.isPresent());
Map.Entry<Integer, String> result = optional.get();
assertEquals((range - 1) * (range / 2), result.getKey().intValue());
assertEquals(range * 7, result.getValue().length());
}
public void testObjReduce2() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
Map.Entry<Integer, String> result = createStream(entrySet).reduce(new ImmortalCacheEntry(0, ""),
(e1, e2) -> new ImmortalCacheEntry(e1.getKey() + e2.getKey(), e1.getValue() + e2.getValue()));
assertEquals((range - 1) * (range / 2), result.getKey().intValue());
assertEquals(range * 7, result.getValue().length());
}
public void testObjReduce2WithMap() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
Integer result = createStream(entrySet).map(Map.Entry::getKey).reduce(0, (e1, e2) -> e1 + e2);
assertEquals((range - 1) * (range / 2), result.intValue());
}
public void testObjReduce3() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
Integer result = createStream(entrySet).reduce(0, (e1, e2) -> e1 + e2.getKey(), (i1, i2) -> i1 + i2);
assertEquals((range - 1) * (range / 2), result.intValue());
}
public void testObjIterator() {
Cache<Integer, String> cache = getCache(0);
testIntOperation(() -> {
Iterator<Map.Entry<Integer, String>> iterator = createStream(cache.entrySet()).iterator();
AtomicInteger count = new AtomicInteger();
iterator.forEachRemaining(e -> {
assertEquals(cache.get(e.getKey()), e.getValue());
count.addAndGet(e.getKey());
});
return count.get();
}, cache);
}
public void testObjSortedIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
Iterator<Map.Entry<Integer, String>> iterator = createStream(entrySet).sorted(
(e1, e2) -> Integer.compare(e1.getKey(), e2.getKey())).iterator();
AtomicInteger i = new AtomicInteger();
iterator.forEachRemaining(e -> {
assertEquals(i.getAndIncrement(), e.getKey().intValue());
assertEquals(cache.get(e.getKey()), e.getValue());
});
}
public void testObjMapIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
Iterator<String> iterator = createStream(entrySet).map(Map.Entry::getValue).iterator();
Set<String> set = new HashSet<>(range);
iterator.forEachRemaining(set::add);
assertEquals(range, set.size());
IntStream.range(0, range).forEach(i -> assertTrue(set.contains(i + "-value")));
}
public void testObjFlatMapIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
Map<Integer, IntSet> keysBySegment = log.isTraceEnabled() ? new TreeMap<>() : null;
KeyPartitioner kp = TestingUtil.extractComponent(cache, KeyPartitioner.class);
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> {
if (keysBySegment != null) {
int segment = kp.getSegment(i);
IntSet keys = keysBySegment.computeIfAbsent(segment, IntSets::mutableEmptySet);
keys.set(i);
}
cache.put(i, i + "-value" + i);
});
if (keysBySegment != null) {
log.tracef("Keys by segment are: " + keysBySegment);
}
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
StringJoiner stringJoiner = new StringJoiner(" ");
// Rxjava requets 128 by default for many operations - thus we have a number larger than that
int explosionCount = 293;
for (int i = 0; i < explosionCount; ++i) {
stringJoiner.add("special-" + String.valueOf(i));
}
String specialString = stringJoiner.toString();
Iterator<String> iterator = createStream(entrySet)
.distributedBatchSize(1)
.flatMap(e -> {
if (e.getKey() == 2) {
// Make sure to test an empty stream as well
return Stream.empty();
}
if (e.getKey() == 5) {
// Make sure we also test a very large resulting stream without the key in it
return Arrays.stream(specialString.split(" "));
}
return Arrays.stream(e.getValue().split("a"));
})
.iterator();
List<String> list = new ArrayList<>(range * 2);
iterator.forEachRemaining(list::add);
if (keysBySegment != null) {
log.tracef("Returned values are: %s", list);
}
assertEquals((range - 2) * 2 + explosionCount, list.size());
}
public void testObjToArray1() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
Object[] array = createStream(entrySet).toArray();
assertEquals(cache.size(), array.length);
Spliterator<Map.Entry<Integer, String>> spliterator = Spliterators.spliterator(array, Spliterator.DISTINCT |
Spliterator.NONNULL);
StreamSupport.stream(spliterator, false).forEach(e -> assertEquals(cache.get(e.getKey()), e.getValue()));
}
public void testObjToArray2() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
Map.Entry<Integer, String>[] array = createStream(entrySet).toArray(Map.Entry[]::new);
assertEquals(cache.size(), array.length);
Spliterator<Map.Entry<Integer, String>> spliterator = Spliterators.spliterator(array, Spliterator.DISTINCT |
Spliterator.NONNULL);
StreamSupport.stream(spliterator, false).forEach(e -> assertEquals(cache.get(e.getKey()), e.getValue()));
}
public void testObjSortedSkipIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
for (int i = 0; i < range; ++i) {
Iterator<Map.Entry<Integer, String>> iterator = createStream(entrySet).sorted(
(e1, e2) -> Integer.compare(e1.getKey(), e2.getKey())).skip(i).iterator();
AtomicInteger atomicInteger = new AtomicInteger(i);
iterator.forEachRemaining(e -> {
assertEquals(atomicInteger.getAndIncrement(), e.getKey().intValue());
assertEquals(cache.get(e.getKey()), e.getValue());
});
assertEquals(range, atomicInteger.get());
}
}
public void testObjSortedLimitIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
for (int i = 1; i < range; ++i) {
Iterator<Map.Entry<Integer, String>> iterator = createStream(entrySet).sorted(
(e1, e2) -> Integer.compare(e1.getKey(), e2.getKey())).limit(i).iterator();
AtomicInteger atomicInteger = new AtomicInteger();
iterator.forEachRemaining(e -> {
assertEquals(atomicInteger.getAndIncrement(), e.getKey().intValue());
assertEquals(cache.get(e.getKey()), e.getValue());
});
assertEquals(i, atomicInteger.get());
}
}
public void testObjPointlessSortMap() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
IntSummaryStatistics stats = createStream(entrySet).sorted((e1, e2) -> Integer.compare(e1.getKey(), e2.getKey()))
.mapToInt(Map.Entry::getKey).summaryStatistics();
assertEquals(range, stats.getCount());
assertEquals(0, stats.getMin());
assertEquals(9, stats.getMax());
}
// IntStream tests
static final SerializableToIntFunction<Map.Entry<Integer, String>> toInt = Map.Entry::getKey;
public void testIntAllMatch() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertFalse(createStream(entrySet).mapToInt(toInt).allMatch(i -> i % 2 == 0));
assertFalse(createStream(entrySet).mapToInt(toInt).allMatch(i -> i > 10 && i < 0));
assertTrue(createStream(entrySet).mapToInt(toInt).allMatch(i -> i < 12));
}
public void testIntAnyMatch() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).mapToInt(toInt).anyMatch(i -> i % 2 == 0));
assertFalse(createStream(entrySet).mapToInt(toInt).anyMatch(i -> i > 10 && i < 0));
assertTrue(createStream(entrySet).mapToInt(toInt).anyMatch(i -> i < 12));
}
public void testIntAverage() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(4.5, createStream(entrySet).mapToInt(toInt).average().getAsDouble());
}
public void testIntCollect() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
HashSet<Integer> set = createStream(entrySet).mapToInt(toInt).collect(HashSet::new, Set::add, Set::addAll);
assertEquals(10, set.size());
}
public void testIntCount() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(10, createStream(entrySet).mapToInt(toInt).count());
}
public void testIntFindAny() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).mapToInt(toInt).findAny().isPresent());
assertTrue(createStream(entrySet).mapToInt(toInt).filter(e -> e % 2 == 0).findAny().isPresent());
assertTrue(createStream(entrySet).mapToInt(toInt).filter(e -> e < 10 && e >= 0).findAny().isPresent());
assertFalse(createStream(entrySet).mapToInt(toInt).filter(e -> e > 12).findAny().isPresent());
}
public void testIntFindFirst() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(0, createStream(entrySet).mapToInt(toInt).sorted().findFirst().getAsInt());
}
public void testIntForEach() {
Cache<Integer, String> cache = getCache(0);
int offset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.entrySet()).mapToInt(toInt).forEach(e -> {
AtomicInteger atomic = getForEachObject(offset);
atomic.addAndGet(e);
});
return ((AtomicInteger) getForEachObject(offset)).get();
}, cache);
} finally {
clearForEachObject(offset);
}
}
public void testIntFlatMapForEach() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
int offset = populateNextForEachStructure(new AtomicInteger());
try {
createStream(entrySet).distributedBatchSize(5).mapToInt(toInt).flatMap(i -> IntStream.of(i, 2))
.forEach(e -> {
AtomicInteger atomic = getForEachObject(offset);
atomic.addAndGet(e);
});
AtomicInteger atomic = getForEachObject(offset);
assertEquals((range - 1) * (range / 2) + 2 * range, atomic.get());
} finally {
clearForEachObject(offset);
}
}
public void testIntForEachOrdered() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
List<Integer> list = new ArrayList<>(range);
// we sort inverted order
createStream(entrySet).mapToInt(toInt).sorted().forEachOrdered(
list::add);
assertEquals(range, list.size());
for (int i = 0; i < range; ++i) {
// 0 based so we have to also subtract by 1
assertEquals(i, list.get(i).intValue());
}
}
@SerializeWith(ForEachIntInjected.Externalizer.class)
public static class ForEachIntInjected implements IntConsumer, CacheAware<Integer, String> {
private Cache<?, ?> cache;
private final int cacheOffset;
private final int atomicOffset;
private ForEachIntInjected(int cacheOffset, int atomicOffset) {
this.cacheOffset = cacheOffset;
this.atomicOffset = atomicOffset;
}
@Override
public void injectCache(Cache<Integer, String> cache) {
this.cache = cache;
}
@Override
public void accept(int value) {
Cache<?, ?> cache = getForEachObject(cacheOffset);
if (cache != null && this.cache != null && cache.getName().equals(this.cache.getName())) {
((AtomicInteger) getForEachObject(atomicOffset)).addAndGet(value);
} else {
fail("Did not receive correct cache!");
}
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<ForEachIntInjected> {
@Override
public void writeObject(ObjectOutput output, ForEachIntInjected object) throws IOException {
output.writeInt(object.cacheOffset);
output.writeInt(object.atomicOffset);
}
@Override
public ForEachIntInjected readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int cacheOffset = input.readInt();
int atomicOffset = input.readInt();
return new ForEachIntInjected(cacheOffset, atomicOffset);
}
}
}
private void testIntOperation(Supplier<Integer> intSupplier, Cache<Integer, String> cache) {
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
assertEquals((range - 1) * (range / 2), intSupplier.get().intValue());
}
public void testIntForEachCacheInjected() {
Cache<Integer, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.entrySet()).mapToInt(toInt).forEach(new ForEachIntInjected(cacheOffset, atomicOffset));
return ((AtomicInteger) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testIntForEachBiConsumer() {
Cache<Integer, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicInteger());
try {
testIntOperation(() -> {
createStream(cache.entrySet()).mapToInt(toInt).forEach((c, i) -> {
Cache<?, ?> localCache = getForEachObject(cacheOffset);
if (c != null && localCache != null && c.getName().equals(localCache.getName())) {
AtomicInteger atomicInteger = getForEachObject(atomicOffset);
atomicInteger.addAndGet(i);
}
});
return ((AtomicInteger) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testIntFlatMapObjConsumerForEach() {
Cache<Integer, String> cache = getCache(0);
String cacheName = cache.getName();
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
int offset = populateNextForEachStructure(new AtomicInteger());
try {
createStream(entrySet).distributedBatchSize(5).mapToInt(toInt).flatMap(i -> IntStream.of(i, 2))
.forEach((c, e) -> {
assertEquals(cacheName, c.getName());
AtomicInteger atomic = getForEachObject(offset);
atomic.addAndGet(e);
});
AtomicInteger atomic = getForEachObject(offset);
assertEquals((range - 1) * (range / 2) + 2 * range, atomic.get());
} finally {
clearForEachObject(offset);
}
}
public void testIntIterator() {
Cache<Integer, String> cache = getCache(0);
testIntOperation(() -> {
PrimitiveIterator.OfInt iterator = createStream(cache.entrySet()).mapToInt(toInt).iterator();
AtomicInteger count = new AtomicInteger();
iterator.forEachRemaining((int e) -> {
assertTrue(cache.containsKey(e));
count.addAndGet(e);
});
return count.get();
}, cache);
}
public void testIntSortedIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
PrimitiveIterator.OfInt iterator = createStream(entrySet).mapToInt(toInt).sorted().iterator();
AtomicLong i = new AtomicLong();
iterator.forEachRemaining((int e) -> assertEquals(i.getAndIncrement(), e));
}
public void testIntFlatMapIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
PrimitiveIterator.OfInt iterator = createStream(entrySet).flatMapToInt(
e -> IntStream.of(e.getKey(), e.getValue().length())).iterator();
int pos = 0;
int halfCount = 0;
while (iterator.hasNext()) {
int next = iterator.nextInt();
pos++;
if (next == 7) {
halfCount++;
}
assertTrue(cache.containsKey(next));
}
assertEquals(range + 1, halfCount);
assertEquals(range * 2, pos);
}
public void testIntNoneMatch() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
assertFalse(createStream(entrySet).mapToInt(toInt).noneMatch(i -> i % 2 == 0));
assertTrue(createStream(entrySet).mapToInt(toInt).noneMatch(i -> i > 10 && i < 0));
assertFalse(createStream(entrySet).mapToInt(toInt).noneMatch(i -> i < 12));
}
public void testIntReduce1() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// One value is 0 so multiplying them together should be 0
assertEquals(0, createStream(entrySet).mapToInt(toInt).reduce(1, (i1, i2) -> i1 * i2));
assertEquals(362880, createStream(entrySet).mapToInt(toInt).filter(i -> i != 0).reduce(1, (i1, i2) -> i1 * i2));
}
public void testIntReduce2() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// One value is 0 so multiplying them together should be 0
assertEquals(0, createStream(entrySet).mapToInt(toInt).reduce((i1, i2) -> i1 * i2).getAsInt());
assertEquals(362880, createStream(entrySet).mapToInt(toInt).filter(i -> i != 0).reduce((i1, i2) -> i1 * i2)
.getAsInt());
}
public void testIntSummaryStatistics() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
IntSummaryStatistics statistics = createStream(entrySet).mapToInt(toInt).summaryStatistics();
assertEquals(0, statistics.getMin());
assertEquals(9, statistics.getMax());
assertEquals(4.5, statistics.getAverage());
assertEquals((range - 1) * (range / 2), statistics.getSum());
assertEquals(10, statistics.getCount());
}
public void testIntToArray() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
int[] array = createStream(entrySet).mapToInt(toInt).toArray();
assertEquals(cache.size(), array.length);
Spliterator.OfInt spliterator = Spliterators.spliterator(array, Spliterator.DISTINCT);
StreamSupport.intStream(spliterator, true).forEach(e -> assertTrue(cache.containsKey(e)));
}
public void testIntSum() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
int result = createStream(entrySet).mapToInt(toInt).sum();
assertEquals((range - 1) * (range / 2), result);
}
public void testIntMax() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(9, createStream(entrySet).mapToInt(toInt).max().getAsInt());
}
public void testIntMin() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
assertEquals(0, createStream(entrySet).mapToInt(toInt).min().getAsInt());
}
public void testIntSortedSkip() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
for (int i = 0; i < range; i++) {
IntSummaryStatistics stats = createStream(entrySet).mapToInt(toInt)
.sorted().skip(i).summaryStatistics();
assertEquals(range - i, stats.getCount());
assertEquals(i, stats.getMin());
assertEquals(range - 1, stats.getMax());
assertEquals(IntStream.range(i, range).sum(), stats.getSum());
}
}
public void testIntSortedLimit() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
for (int i = 1; i < range; i++) {
IntSummaryStatistics stats = createStream(entrySet).mapToInt(toInt)
.sorted().limit(i).summaryStatistics();
assertEquals(i, stats.getCount());
assertEquals(0, stats.getMin());
assertEquals(i - 1, stats.getMax());
assertEquals(IntStream.range(0, i).sum(), stats.getSum());
}
}
// LongStream tests
static final SerializableToLongFunction<Map.Entry<Long, String>> toLong = Map.Entry::getKey;
public void testLongAllMatch() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertFalse(createStream(entrySet).mapToLong(toLong).allMatch(i -> i % 2 == 0));
assertFalse(createStream(entrySet).mapToLong(toLong).allMatch(i -> i > 10 && i < 0));
assertTrue(createStream(entrySet).mapToLong(toLong).allMatch(i -> i < 12));
}
public void testLongAnyMatch() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).mapToLong(toLong).anyMatch(i -> i % 2 == 0));
assertFalse(createStream(entrySet).mapToLong(toLong).anyMatch(i -> i > 10 && i < 0));
assertTrue(createStream(entrySet).mapToLong(toLong).anyMatch(i -> i < 12));
}
public void testLongAverage() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertEquals(4.5, createStream(entrySet).mapToLong(toLong).average().getAsDouble());
}
public void testLongCollect() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
HashSet<Long> set = createStream(entrySet).mapToLong(toLong).collect(HashSet::new, Set::add, Set::addAll);
assertEquals(10, set.size());
}
public void testLongCount() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertEquals(10, createStream(entrySet).mapToLong(toLong).count());
}
public void testLongFindAny() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).mapToLong(toLong).findAny().isPresent());
assertTrue(createStream(entrySet).mapToLong(toLong).filter(e -> e % 2 == 0).findAny().isPresent());
assertTrue(createStream(entrySet).mapToLong(toLong).filter(e -> e < 10 && e >= 0).findAny().isPresent());
assertFalse(createStream(entrySet).mapToLong(toLong).filter(e -> e > 12).findAny().isPresent());
}
public void testLongFindFirst() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertEquals(0, createStream(entrySet).mapToLong(toLong).sorted().findFirst().getAsLong());
}
public void testLongForEach() {
Cache<Long, String> cache = getCache(0);
int offset = populateNextForEachStructure(new AtomicLong());
try {
testLongOperation(() -> {
createStream(cache.entrySet()).mapToLong(toLong).forEach(e -> {
AtomicLong atomic = getForEachObject(offset);
atomic.addAndGet(e);
});
return ((AtomicLong) getForEachObject(offset)).get();
}, cache);
} finally {
clearForEachObject(offset);
}
}
public void testLongFlatMapForEach() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
int offset = populateNextForEachStructure(new AtomicLong());
try {
createStream(entrySet).distributedBatchSize(5).mapToLong(toLong).flatMap(i -> LongStream.of(i, 2))
.forEach(e -> {
AtomicLong atomic = getForEachObject(offset);
atomic.addAndGet(e);
});
AtomicLong atomic = getForEachObject(offset);
assertEquals((range - 1) * (range / 2) + 2 * range, atomic.get());
} finally {
clearForEachObject(offset);
}
}
public void testLongForEachOrdered() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
List<Long> list = new ArrayList<>(range);
// we sort inverted order
createStream(entrySet).mapToLong(toLong).sorted().forEachOrdered(
list::add);
assertEquals(range, list.size());
for (int i = 0; i < range; ++i) {
// 0 based so we have to also subtract by 1
assertEquals(i, list.get(i).longValue());
}
}
@SerializeWith(ForEachLongInjected.Externalizer.class)
public static class ForEachLongInjected implements LongConsumer, CacheAware<Long, String> {
private Cache<?, ?> cache;
private final int cacheOffset;
private final int atomicOffset;
private ForEachLongInjected(int cacheOffset, int atomicOffset) {
this.cacheOffset = cacheOffset;
this.atomicOffset = atomicOffset;
}
@Override
public void injectCache(Cache<Long, String> cache) {
this.cache = cache;
}
@Override
public void accept(long value) {
Cache<?, ?> cache = getForEachObject(cacheOffset);
if (cache != null && this.cache != null && cache.getName().equals(this.cache.getName())) {
((AtomicLong) getForEachObject(atomicOffset)).addAndGet(value);
} else {
fail("Did not receive correct cache!");
}
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<ForEachLongInjected> {
@Override
public void writeObject(ObjectOutput output, ForEachLongInjected object) throws IOException {
output.writeInt(object.cacheOffset);
output.writeInt(object.atomicOffset);
}
@Override
public ForEachLongInjected readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int cacheOffset = input.readInt();
int atomicOffset = input.readInt();
return new ForEachLongInjected(cacheOffset, atomicOffset);
}
}
}
private void testLongOperation(Supplier<Long> longSupplier, Cache<Long, String> cache) {
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
assertEquals((range - 1) * (range / 2), longSupplier.get().longValue());
}
public void testLongForEachCacheInjected() {
Cache<Long, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicLong());
try {
testLongOperation(() -> {
createStream(cache.entrySet()).mapToLong(toLong).forEach(new ForEachLongInjected(cacheOffset, atomicOffset));
return ((AtomicLong) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testLongForEachBiConsumer() {
Cache<Long, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int atomicOffset = populateNextForEachStructure(new AtomicLong());
try {
testLongOperation(() -> {
createStream(cache.entrySet()).mapToLong(toLong).forEach((c, i) -> {
Cache<?, ?> localCache = getForEachObject(cacheOffset);
if (c != null && localCache != null && c.getName().equals(localCache.getName())) {
AtomicLong atomicLong = getForEachObject(atomicOffset);
atomicLong.addAndGet(i);
}
});
return ((AtomicLong) getForEachObject(atomicOffset)).get();
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(atomicOffset);
}
}
public void testLongFlatMapObjConsumerForEach() {
Cache<Long, String> cache = getCache(0);
String cacheName = cache.getName();
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
int offset = populateNextForEachStructure(new AtomicLong());
try {
createStream(entrySet).distributedBatchSize(5).mapToLong(toLong).flatMap(i -> LongStream.of(i, 2))
.forEach((c, e) -> {
assertEquals(cacheName, c.getName());
AtomicLong atomic = getForEachObject(offset);
atomic.addAndGet(e);
});
AtomicLong atomic = getForEachObject(offset);
assertEquals((range - 1) * (range / 2) + 2 * range, atomic.get());
} finally {
clearForEachObject(offset);
}
}
public void testLongIterator() {
Cache<Long, String> cache = getCache(0);
testLongOperation(() -> {
PrimitiveIterator.OfLong iterator = createStream(cache.entrySet()).mapToLong(toLong).iterator();
AtomicLong count = new AtomicLong();
iterator.forEachRemaining((long e) -> {
assertTrue(cache.containsKey(e));
count.addAndGet(e);
});
return count.get();
}, cache);
}
public void testLongSortedIterator() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
PrimitiveIterator.OfLong iterator = createStream(entrySet).mapToLong(toLong).sorted().iterator();
AtomicLong i = new AtomicLong();
iterator.forEachRemaining((long e) -> assertEquals(i.getAndIncrement(), e));
}
public void testLongFlatMapIterator() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
PrimitiveIterator.OfLong iterator = createStream(entrySet).flatMapToLong(
e -> LongStream.of(e.getKey(), e.getValue().length())).iterator();
int pos = 0;
int halfCount = 0;
while (iterator.hasNext()) {
long next = iterator.nextLong();
pos++;
if (next == 7) {
halfCount++;
}
assertTrue(cache.containsKey(next));
}
assertEquals(range + 1, halfCount);
assertEquals(range * 2, pos);
}
public void testLongNoneMatch() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
assertFalse(createStream(entrySet).mapToLong(toLong).noneMatch(i -> i % 2 == 0));
assertTrue(createStream(entrySet).mapToLong(toLong).noneMatch(i -> i > 10 && i < 0));
assertFalse(createStream(entrySet).mapToLong(toLong).noneMatch(i -> i < 12));
}
public void testLongReduce1() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
// One value is 0 so multiplying them together should be 0
assertEquals(0, createStream(entrySet).mapToLong(toLong).reduce(1, (i1, i2) -> i1 * i2));
assertEquals(362880, createStream(entrySet).mapToLong(toLong).filter(i -> i != 0).reduce(1, (i1, i2) -> i1 * i2));
}
public void testLongReduce2() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
// One value is 0 so multiplying them together should be 0
assertEquals(0, createStream(entrySet).mapToLong(toLong).reduce((i1, i2) -> i1 * i2).getAsLong());
assertEquals(362880, createStream(entrySet).mapToLong(toLong).filter(i -> i != 0).reduce((i1, i2) -> i1 * i2)
.getAsLong());
}
public void testLongSummaryStatistics() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
LongSummaryStatistics statistics = createStream(entrySet).mapToLong(toLong).summaryStatistics();
assertEquals(0, statistics.getMin());
assertEquals(9, statistics.getMax());
assertEquals(4.5, statistics.getAverage());
assertEquals((range - 1) * (range / 2), statistics.getSum());
assertEquals(10, statistics.getCount());
}
public void testLongToArray() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
long[] array = createStream(entrySet).mapToLong(toLong).toArray();
assertEquals(cache.size(), array.length);
Spliterator.OfLong spliterator = Spliterators.spliterator(array, Spliterator.DISTINCT);
StreamSupport.longStream(spliterator, true).forEach(e -> assertTrue(cache.containsKey(e)));
}
public void testLongSum() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
long result = createStream(entrySet).mapToLong(toLong).sum();
assertEquals((range - 1) * (range / 2), result);
}
public void testLongMax() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertEquals(9, createStream(entrySet).mapToLong(toLong).max().getAsLong());
}
public void testLongMin() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
assertEquals(0, createStream(entrySet).mapToLong(toLong).min().getAsLong());
}
public void testLongSortedSkip() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
for (int i = 0; i < range; i++) {
LongSummaryStatistics stats = createStream(entrySet).mapToLong(toLong)
.sorted().skip(i).summaryStatistics();
assertEquals(range - i, stats.getCount());
assertEquals(i, stats.getMin());
assertEquals(range - 1, stats.getMax());
assertEquals(IntStream.range(i, range).sum(), stats.getSum());
}
}
public void testLongSortedLimit() {
Cache<Long, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
LongStream.range(0, range).forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Long, String>> entrySet = cache.entrySet();
for (int i = 1; i < range; i++) {
LongSummaryStatistics stats = createStream(entrySet).mapToLong(toLong)
.sorted().limit(i).summaryStatistics();
assertEquals(i, stats.getCount());
assertEquals(0, stats.getMin());
assertEquals(i - 1, stats.getMax());
assertEquals(IntStream.range(0, i).sum(), stats.getSum());
}
}
// DoubleStream tests
static final SerializableToDoubleFunction<Map.Entry<Double, String>> toDouble = Map.Entry::getKey;
public void testDoubleAllMatch() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertFalse(createStream(entrySet).mapToDouble(toDouble).allMatch(i -> i % 2 == 0));
assertFalse(createStream(entrySet).mapToDouble(toDouble).allMatch(i -> i > 5 && i < 0));
assertTrue(createStream(entrySet).mapToDouble(toDouble).allMatch(i -> i < 5));
assertFalse(createStream(entrySet).mapToDouble(toDouble).allMatch(i -> Math.floor(i) == i));
}
public void testDoubleAnyMatch() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).mapToDouble(toDouble).anyMatch(i -> i % 2 == 0));
assertFalse(createStream(entrySet).mapToDouble(toDouble).anyMatch(i -> i > 5 && i < 0));
assertTrue(createStream(entrySet).mapToDouble(toDouble).anyMatch(i -> i < 5));
assertTrue(createStream(entrySet).mapToDouble(toDouble).anyMatch(i -> Math.floor(i) == i));
}
public void testDoubleAverage() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertEquals(2.25, createStream(entrySet).mapToDouble(toDouble).average().getAsDouble());
}
public void testDoubleCollect() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
HashSet<Double> set = createStream(entrySet).mapToDouble(toDouble).collect(HashSet::new,
Set::add, Set::addAll);
assertEquals(10, set.size());
}
public void testDoubleCount() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertEquals(10, createStream(entrySet).mapToDouble(toDouble).count());
}
public void testDoubleFindAny() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertTrue(createStream(entrySet).mapToDouble(toDouble).findAny().isPresent());
assertTrue(createStream(entrySet).mapToDouble(toDouble).filter(e -> e % 2 == 0).findAny().isPresent());
assertTrue(createStream(entrySet).mapToDouble(toDouble).filter(e -> e < 5 && e >= 0).findAny().isPresent());
assertFalse(createStream(entrySet).mapToDouble(toDouble).filter(e -> e > 5).findAny().isPresent());
}
public void testDoubleFindFirst() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertEquals(0.0, createStream(entrySet).mapToDouble(toDouble).sorted().findFirst().getAsDouble());
}
public void testDoubleForEach() {
Cache<Double, String> cache = getCache(0);
int offset = populateNextForEachStructure(new DoubleSummaryStatistics());
try {
testDoubleOperation(() -> {
createStream(cache.entrySet()).mapToDouble(toDouble).forEach(e -> {
DoubleSummaryStatistics stats = getForEachObject(offset);
synchronized (stats) {
stats.accept(e);
}
});
return getForEachObject(offset);
}, cache);
} finally {
clearForEachObject(offset);
}
}
public void testDoubleFlatMapForEach() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
int offset = populateNextForEachStructure(new DoubleSummaryStatistics());
try {
createStream(entrySet).distributedBatchSize(5).mapToDouble(toDouble).flatMap(e -> DoubleStream.of(e, 2.25))
.forEach(e -> {
DoubleSummaryStatistics stats = getForEachObject(offset);
synchronized (stats) {
stats.accept(e);
}
});
DoubleSummaryStatistics stats = getForEachObject(offset);
assertEquals(2.25, stats.getAverage());
assertEquals(0.0, stats.getMin());
assertEquals(4.5, stats.getMax());
assertEquals(20, stats.getCount());
assertEquals(45.0, stats.getSum());
} finally {
clearForEachObject(offset);
}
}
public void testDoubleForEachOrdered() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
List<Double> list = new ArrayList<>(range);
// we sort inverted order
createStream(entrySet).mapToDouble(toDouble).sorted().forEachOrdered(
list::add);
assertEquals(range, list.size());
for (int i = 0; i < range; ++i) {
// 0 based so we have to also subtract by 1
assertEquals((double) i / 2, list.get(i));
}
}
@SerializeWith(ForEachDoubleInjected.Externalizer.class)
public static class ForEachDoubleInjected<E> implements DoubleConsumer, CacheAware<Double, String> {
private Cache<?, ?> cache;
private final int cacheOffset;
private final int atomicOffset;
private ForEachDoubleInjected(int cacheOffset, int atomicOffset) {
this.cacheOffset = cacheOffset;
this.atomicOffset = atomicOffset;
}
@Override
public void injectCache(Cache<Double, String> cache) {
this.cache = cache;
}
@Override
public void accept(double value) {
Cache<?, ?> cache = getForEachObject(cacheOffset);
if (cache != null && this.cache != null && cache.getName().equals(this.cache.getName())) {
DoubleSummaryStatistics stats = getForEachObject(atomicOffset);
synchronized (stats) {
stats.accept(value);
}
} else {
fail("Did not receive correct cache!");
}
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<ForEachDoubleInjected> {
@Override
public void writeObject(ObjectOutput output, ForEachDoubleInjected object) throws IOException {
output.writeInt(object.cacheOffset);
output.writeInt(object.atomicOffset);
}
@Override
public ForEachDoubleInjected readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int cacheOffset = input.readInt();
int atomicOffset = input.readInt();
return new ForEachDoubleInjected(cacheOffset, atomicOffset);
}
}
}
private void testDoubleOperation(Supplier<DoubleSummaryStatistics> statisticsSupplier, Cache<Double, String> cache) {
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
DoubleSummaryStatistics stats = statisticsSupplier.get();
assertEquals(2.25, stats.getAverage());
assertEquals(0.0, stats.getMin());
assertEquals(4.5, stats.getMax());
assertEquals(10, stats.getCount());
assertEquals(22.5, stats.getSum());
}
public void testDoubleForEachCacheInjected() {
Cache<Double, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int offset = populateNextForEachStructure(new DoubleSummaryStatistics());
try {
testDoubleOperation(() -> {
createStream(cache.entrySet()).mapToDouble(toDouble).forEach(new ForEachDoubleInjected<>(cacheOffset,
offset));
return getForEachObject(offset);
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(offset);
}
}
public void testDoubleForEachBiConsumer() {
Cache<Double, String> cache = getCache(0);
int cacheOffset = populateNextForEachStructure(cache);
int offset = populateNextForEachStructure(new DoubleSummaryStatistics());
try {
testDoubleOperation(() -> {
createStream(cache.entrySet()).mapToDouble(toDouble).forEach((c, d) -> {
Cache<?, ?> localCache = getForEachObject(cacheOffset);
if (c != null && localCache != null && c.getName().equals(localCache.getName())) {
DoubleSummaryStatistics stats = getForEachObject(offset);
synchronized (stats) {
stats.accept(d);
}
}
});
DoubleSummaryStatistics stats = getForEachObject(offset);
return stats;
}, cache);
} finally {
clearForEachObject(cacheOffset);
clearForEachObject(offset);
}
}
public void testDoubleFlatMapObjConsumerForEach() {
Cache<Double, String> cache = getCache(0);
String cacheName = cache.getName();
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
int offset = populateNextForEachStructure(new DoubleSummaryStatistics());
try {
createStream(entrySet).distributedBatchSize(5).mapToDouble(toDouble).flatMap(e -> DoubleStream.of(e, 2.25))
.forEach((c, e) -> {
assertEquals(cacheName, c.getName());
DoubleSummaryStatistics stats = getForEachObject(offset);
synchronized (stats) {
stats.accept(e);
}
});
DoubleSummaryStatistics stats = getForEachObject(offset);
assertEquals(2.25, stats.getAverage());
assertEquals(0.0, stats.getMin());
assertEquals(4.5, stats.getMax());
assertEquals(20, stats.getCount());
assertEquals(45.0, stats.getSum());
} finally {
clearForEachObject(offset);
}
}
public void testDoubleIterator() {
Cache<Double, String> cache = getCache(0);
testDoubleOperation(() -> {
PrimitiveIterator.OfDouble iterator = createStream(cache.entrySet()).mapToDouble(toDouble).iterator();
DoubleSummaryStatistics doubleSummaryStatistics = new DoubleSummaryStatistics();
iterator.forEachRemaining((double e) -> {
assertTrue(cache.containsKey(e));
doubleSummaryStatistics.accept(e);
});
return doubleSummaryStatistics;
}, cache);
}
public void testDoubleSortedIterator() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
PrimitiveIterator.OfDouble iterator = createStream(entrySet).mapToDouble(toDouble).sorted().iterator();
AtomicInteger i = new AtomicInteger();
iterator.forEachRemaining((double e) -> assertEquals((double) i.getAndIncrement() / 2, e));
}
public void testDoubleFlatMapIterator() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
PrimitiveIterator.OfDouble iterator = createStream(entrySet).flatMapToDouble(
e -> DoubleStream.of(e.getKey(), .5)).iterator();
int pos = 0;
int halfCount = 0;
while (iterator.hasNext()) {
double next = iterator.nextDouble();
pos++;
if (next == 0.5) {
halfCount++;
}
assertTrue(cache.containsKey(next));
}
assertEquals(range + 1, halfCount);
assertEquals(range * 2, pos);
}
public void testDoubleNoneMatch() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
assertFalse(createStream(entrySet).mapToDouble(toDouble).noneMatch(i -> i % 2 == 0));
assertTrue(createStream(entrySet).mapToDouble(toDouble).noneMatch(i -> i > 5 && i < 0));
assertFalse(createStream(entrySet).mapToDouble(toDouble).noneMatch(i -> i < 5));
}
public void testDoubleReduce1() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
// One value is 0.0 so multiplying them together should be 0.0
assertEquals(0.0, createStream(entrySet).mapToDouble(toDouble).reduce(1.0, (i1, i2) -> i1 * i2));
assertEquals(708.75, createStream(entrySet).mapToDouble(toDouble).filter(i -> i != 0).reduce(1.0,
(i1, i2) -> i1 * i2));
}
public void testDoubleReduce2() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
// One value is 0.0 so multiplying them together should be 0.0
assertEquals(0.0, createStream(entrySet).mapToDouble(toDouble).reduce((i1, i2) -> i1 * i2).getAsDouble());
assertEquals(708.75, createStream(entrySet).mapToDouble(toDouble).filter(i -> i != 0)
.reduce((i1, i2) -> i1 * i2).getAsDouble());
}
public void testDoubleSummaryStatistics() {
Cache<Double, String> cache = getCache(0);
testDoubleOperation(() -> createStream(cache.entrySet()).mapToDouble(toDouble).summaryStatistics(), cache);
}
public void testDoubleToArray() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
double[] array = createStream(entrySet).mapToDouble(toDouble).toArray();
assertEquals(cache.size(), array.length);
Spliterator.OfDouble spliterator = Spliterators.spliterator(array, Spliterator.DISTINCT);
StreamSupport.doubleStream(spliterator, true).forEach(e -> assertTrue(cache.containsKey(e)));
}
public void testDoubleSum() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
// This isn't the best usage of this, but should be a usable example
double result = createStream(entrySet).mapToDouble(toDouble).sum();
assertEquals((double) (range - 1) * (range / 2) / 2, result);
}
public void testDoubleMax() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertEquals(4.5, createStream(entrySet).mapToDouble(toDouble).max().getAsDouble());
}
public void testDoubleMin() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
DoubleStream.iterate(0.0, d -> d + .5).limit(10).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
assertEquals(0.0, createStream(entrySet).mapToDouble(toDouble).min().getAsDouble());
}
public void testDoubleSortedSkip() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).mapToDouble(value -> value).forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
for (int i = 0; i < range; i++) {
DoubleSummaryStatistics stats = createStream(entrySet).mapToDouble(toDouble)
.sorted().skip(i).summaryStatistics();
assertEquals(range - i, stats.getCount());
assertEquals((double) i, stats.getMin());
assertEquals((double) range - 1, stats.getMax());
assertEquals((double) IntStream.range(i, range).sum(), stats.getSum());
}
}
public void testDoubleSortedLimit() {
Cache<Double, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).mapToDouble(value -> value).forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Double, String>> entrySet = cache.entrySet();
for (int i = 1; i < range; i++) {
DoubleSummaryStatistics stats = createStream(entrySet).mapToDouble(toDouble)
.sorted().limit(i).summaryStatistics();
assertEquals(i, stats.getCount());
assertEquals(0d, stats.getMin());
assertEquals((double) i - 1, stats.getMax());
assertEquals((double) IntStream.range(0, i).sum(), stats.getSum());
}
}
// KeySet Tests
public void testObjKeySetMax() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Integer> keySet = cache.keySet();
assertEquals(9, createStream(keySet).max(Integer::compare).get().intValue());
}
public void testKeySetIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Integer> keySet = cache.keySet();
Iterator<Integer> iterator = createStream(keySet).iterator();
AtomicInteger count = new AtomicInteger();
iterator.forEachRemaining(e -> {
assertTrue(cache.containsKey(e));
count.addAndGet(e);
});
assertEquals((range - 1) * (range / 2), count.get());
}
public void testKeySetMapIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Integer> keySet = cache.keySet();
Iterator<String> iterator = createStream(keySet).map(i -> i + "-value").iterator();
AtomicInteger count = new AtomicInteger();
iterator.forEachRemaining(e -> {
Integer key = Integer.valueOf(e.substring(0, 1));
assertEquals(cache.get(key), e);
count.addAndGet(key);
});
assertEquals((range - 1) * (range / 2), count.get());
}
public void testKeySetFlatMapIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Integer> keySet = cache.keySet();
PrimitiveIterator.OfInt iterator = createStream(keySet).flatMapToInt(
i -> IntStream.of(i, 3)).iterator();
int pos = 0;
int halfCount = 0;
while (iterator.hasNext()) {
int next = iterator.nextInt();
if (next == 3) {
halfCount++;
}
pos++;
assertTrue(cache.containsKey(next));
}
assertEquals(range + 1, halfCount);
assertEquals(range * 2, pos);
}
// Values Tests
public void testObjValuesMax() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheCollection<String> keySet = cache.values();
assertEquals("9-value",
createStream(keySet).max((e1, e2) -> Integer.compare(
Integer.valueOf(e1.substring(0, 1)),
Integer.valueOf(e2.substring(0, 1)))).get());
}
// Tests to make sure when max returns an empty result that it works
public void testObjMaxEmpty() {
Cache<Integer, String> cache = getCache(0);
assertEquals(0, cache.size());
CacheCollection<String> keySet = cache.values();
assertFalse(
createStream(keySet).max((e1, e2) -> Integer.compare(
Integer.valueOf(e1.substring(0, 1)),
Integer.valueOf(e2.substring(0, 1)))).isPresent());
}
public void testObjValuesIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheCollection<String> values = cache.values();
Iterator<String> iterator = createStream(values).iterator();
AtomicInteger count = new AtomicInteger();
iterator.forEachRemaining(e -> {
Integer key = Integer.valueOf(e.substring(0, 1));
assertEquals(cache.get(key), e);
count.addAndGet(key);
});
assertEquals((range - 1) * (range / 2), count.get());
}
public void testValuesMapIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheCollection<String> values = cache.values();
PrimitiveIterator.OfInt iterator = createStream(values).mapToInt(
e -> Integer.valueOf(e.substring(0, 1))).iterator();
AtomicInteger count = new AtomicInteger();
iterator.forEachRemaining((int e) -> {
assertTrue(cache.containsKey(e));
count.addAndGet(e);
});
assertEquals((range - 1) * (range / 2), count.get());
}
public void testValuesFlatMapIterator() {
Cache<Integer, String> cache = getCache(0);
int range = 10;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheCollection<String> values = cache.values();
PrimitiveIterator.OfInt iterator = createStream(values).flatMapToInt(
e -> IntStream.of(Integer.valueOf(e.substring(0, 1)), e.length())).iterator();
int pos = 0;
int halfCount = 0;
while (iterator.hasNext()) {
int next = iterator.nextInt();
if (next == 7) {
halfCount++;
}
pos++;
assertTrue(cache.containsKey(next));
}
assertEquals(range + 1, halfCount);
assertEquals(range * 2, pos);
}
public void testKeySegmentFilter() {
Cache<Integer, String> cache = getCache(0);
int range = 12;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
// Take the first half of the segments
int segments = cache.getCacheConfiguration().clustering().hash().numSegments() / 2;
AtomicInteger realCount = new AtomicInteger();
KeyPartitioner keyPartitioner = cache.getAdvancedCache().getComponentRegistry().getComponent(KeyPartitioner.class);
cache.forEach((k, v) -> {
if (segments >= keyPartitioner.getSegment(k)) {
realCount.incrementAndGet();
}
});
assertEquals(realCount.get(), createStream(entrySet).filterKeySegments(
IntStream.range(0, segments).boxed().collect(Collectors.toSet())).count());
}
public void testKeyFilter() {
Cache<Integer, String> cache = getCache(0);
int range = 12;
// First populate the cache with a bunch of values
IntStream.range(0, range).boxed().forEach(i -> cache.put(i, i + "-value"));
CacheSet<Map.Entry<Integer, String>> entrySet = cache.entrySet();
Set<Integer> keys = IntStream.of(2, 5, 8, 3, 1, range + 2).boxed().collect(Collectors.toSet());
assertEquals(keys.size() - 1, createStream(entrySet).filterKeys(keys).count());
}
}
| 92,999
| 39.15544
| 121
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamOffHeapTest.java
|
package org.infinispan.stream;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.testng.annotations.Test;
/**
* Verifies stream tests work on a regular distrbuted stream with off heap enabled
*/
@Test(groups = "functional", testName = "streams.DistributedStreamOffHeapTest")
public class DistributedStreamOffHeapTest extends DistributedStreamTest {
@Override
protected void enhanceConfiguration(ConfigurationBuilder builder) {
builder.memory().storageType(StorageType.OFF_HEAP);
}
// Test is disabled, it assumes specific keys tie to specific segments which aren't true with
// off heap
@Test(enabled = false)
@Override
public void testKeySegmentFilter() {
super.testKeySegmentFilter();
}
}
| 815
| 31.64
| 96
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamIteratorRepeatableReadTxTest.java
|
package org.infinispan.stream;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import jakarta.transaction.NotSupportedException;
import jakarta.transaction.SystemException;
import jakarta.transaction.TransactionManager;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.distribution.MagicKey;
import org.infinispan.filter.AcceptAllKeyValueFilter;
import org.infinispan.filter.CacheFilters;
import org.infinispan.filter.CompositeKeyValueFilterConverter;
import org.infinispan.filter.KeyValueFilter;
import org.infinispan.filter.KeyValueFilterConverter;
import org.testng.annotations.Test;
/**
* Test to verify distributed stream iterator when in a tx
*
* @author wburns
* @since 8.0
*/
@Test(groups = {"functional", "smoke"}, testName = "stream.DistributedStreamIteratorRepeatableReadTxTest")
public class DistributedStreamIteratorRepeatableReadTxTest extends DistributedStreamIteratorTest {
public DistributedStreamIteratorRepeatableReadTxTest() {
super(true, CacheMode.DIST_SYNC);
}
/**
* See ISPN-12731 Cache collections ignore values added in transaction
*/
public void testCacheCollectionsIncludesEntriesNotYetCommitted() throws Exception {
Cache<Object, String> cache = cache(0, CACHE_NAME);
TransactionManager tm = tm(cache);
tm.begin();
try {
Map<Object, String> inserted = new LinkedHashMap<>();
for (int i = 0; i < 3; ++i) {
Object key = new MagicKey(cache(i, CACHE_NAME));
cache.put(key, key.toString());
inserted.put(key, key.toString());
}
// cache collections use streams internally
Set<Object> expectedKeys = inserted.keySet();
Set<Object> keySetResults = new HashSet<>(cache.keySet());
assertEquals(expectedKeys, keySetResults);
Set<String> expectedValues = new HashSet<>(inserted.values());
Set<String> valuesResults = new HashSet<>(cache.values());
assertEquals(expectedValues, valuesResults);
Set<Map.Entry<Object, String>> expectedEntries = inserted.entrySet();
Set<Map.Entry<Object, String>> entrySetResults = new HashSet<>(cache.entrySet());
assertEquals(expectedEntries, entrySetResults);
} finally {
tm.rollback();
}
}
public void testFilterWithExistingTransaction() throws Exception {
Map<Object, String> values = putValueInEachCache(3);
Cache<Object, String> cache = cache(0, CACHE_NAME);
TransactionManager tm = tm(cache);
tm.begin();
try {
Object key = "filtered-key";
cache.put(key, "filtered-value");
Iterator<CacheEntry<Object, String>> iterator = cache.getAdvancedCache().cacheEntrySet().stream()
.filter(entry -> !Objects.equals(key, entry.getKey()))
.iterator();
Map<Object, String> results = mapFromIterator(iterator);
assertEquals(values, results);
} finally {
tm.rollback();
}
}
@Test
public void testConverterWithExistingTransaction() throws NotSupportedException, SystemException {
Map<Object, String> values = putValuesInCache();
Cache<Object, String> cache = cache(0, CACHE_NAME);
TransactionManager tm = tm(cache);
tm.begin();
try {
Object key = "converted-key";
String value = "converted-value";
values.put(key, value);
cache.put(key, "converted-value");
try (CacheStream<CacheEntry<Object, String>> stream = cache.getAdvancedCache().cacheEntrySet().stream().
filter(CacheFilters.predicate(AcceptAllKeyValueFilter.getInstance())).
map(CacheFilters.function(new StringTruncator(2, 5)))) {
Map<Object, String> results = mapFromStream(stream);
assertEquals(values.size(), results.size());
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertEquals(entry.getValue().substring(2, 7), results.get(entry.getKey()));
}
}
} finally {
tm.rollback();
}
}
@Test
public void testKeyFilterConverterWithExistingTransaction() throws NotSupportedException, SystemException {
Map<Object, String> values = putValuesInCache();
Cache<Object, String> cache = cache(0, CACHE_NAME);
TransactionManager tm = tm(cache);
tm.begin();
try {
Iterator<Map.Entry<Object, String>> iter = values.entrySet().iterator();
Map.Entry<Object, String> extraEntry = iter.next();
while (iter.hasNext()) {
iter.next();
iter.remove();
}
Object key = "converted-key";
String value = "converted-value";
values.put(key, value);
cache.put(key, "converted-value");
Collection<Object> acceptedKeys = new ArrayList<>();
acceptedKeys.add(key);
acceptedKeys.add(extraEntry.getKey());
KeyValueFilter<Object, String> filter = (Serializable & KeyValueFilter<Object, String>)(k, v, m) -> acceptedKeys.contains(k);
KeyValueFilterConverter<Object, String, String> filterConverter = new CompositeKeyValueFilterConverter<>(filter,
new StringTruncator(2, 5));
try (CacheStream<CacheEntry<Object, String>> stream = CacheFilters.filterAndConvert(
cache.getAdvancedCache().cacheEntrySet().stream(), filterConverter)) {
Map<Object, String> results = mapFromStream(stream);
assertEquals(values.size(), results.size());
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertEquals(entry.getValue().substring(2, 7), results.get(entry.getKey()));
}
}
} finally {
tm.rollback();
}
}
public void testStreamWithMissedKeyInTransaction() throws Exception {
AdvancedCache<Object, String> cache = advancedCache(0, CACHE_NAME);
TransactionManager tm = tm(cache);
tm.begin();
try {
Object localMissingKey = new MagicKey("key1", cache);
Object remoteMissingKey = new MagicKey("key2", cache(1, CACHE_NAME));
assertFalse(cache.containsKey(localMissingKey));
assertFalse(cache.containsKey(remoteMissingKey));
Iterator<CacheEntry<Object, String>> iterator = cache.getAdvancedCache().cacheEntrySet().stream().iterator();
Map<Object, String> results = mapFromIterator(iterator);
assertEquals(Collections.emptyMap(), results);
// size() also uses streams internally
assertEquals(0, cache.size());
} finally {
tm.rollback();
}
}
}
| 7,157
| 37.278075
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/SimpleParallelStreamTest.java
|
package org.infinispan.stream;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
@Test(groups = "functional", testName = "streams.SimpleParallelStreamTest")
public class SimpleParallelStreamTest extends LocalParallelStreamTest {
@Override
protected void enhanceConfiguration(ConfigurationBuilder builder) {
builder.simpleCache(true);
}
}
| 462
| 27.9375
| 75
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/SimpleStreamOffHeapTest.java
|
package org.infinispan.stream;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.testng.annotations.Test;
/**
* @author William Burns
*/
@Test(groups = "functional", testName = "streams.SimpleStreamOffHeapTest")
public class SimpleStreamOffHeapTest extends SimpleStreamTest {
@Override
protected void enhanceConfiguration(ConfigurationBuilder builder) {
super.enhanceConfiguration(builder);
builder.memory().storageType(StorageType.OFF_HEAP);
}
}
| 552
| 29.722222
| 74
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/LocalStreamTest.java
|
package org.infinispan.stream;
import org.infinispan.CacheCollection;
import org.infinispan.CacheStream;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* Verifies stream tests work on a local stream
*/
@Test(groups = "functional", testName = "streams.LocalStreamTest")
public class LocalStreamTest extends BaseStreamTest {
public LocalStreamTest() {
super(false);
cacheMode(CacheMode.LOCAL);
}
@Override
protected <E> CacheStream<E> createStream(CacheCollection<E> entries) {
return entries.stream();
}
@Test(enabled = false)
@Override
public void testKeySegmentFilter() {
}
}
| 675
| 22.310345
| 74
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedWriteBehindStreamIteratorTest.java
|
package org.infinispan.stream;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Map;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.MagicKey;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.reactive.publisher.impl.commands.batch.InitialPublisherCommand;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.Mocks;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
@Test(groups = {"functional", "smoke"}, testName = "iteration.DistributedWriteBehindStreamIteratorTest")
public class DistributedWriteBehindStreamIteratorTest extends BaseSetupStreamIteratorTest {
private boolean asyncStore;
private boolean sharedStore;
public DistributedWriteBehindStreamIteratorTest() {
// Cache Mode is provided in factory methods
super(false, null);
}
DistributedWriteBehindStreamIteratorTest async(boolean asyncStore) {
this.asyncStore = asyncStore;
return this;
}
DistributedWriteBehindStreamIteratorTest shared(boolean sharedStore) {
this.sharedStore = sharedStore;
return this;
}
@Override
public Object[] factory() {
return new Object[]{
new DistributedWriteBehindStreamIteratorTest().async(true).shared(true).cacheMode(CacheMode.REPL_SYNC),
new DistributedWriteBehindStreamIteratorTest().async(false).shared(true).cacheMode(CacheMode.REPL_SYNC),
new DistributedWriteBehindStreamIteratorTest().async(true).shared(false).cacheMode(CacheMode.REPL_SYNC),
new DistributedWriteBehindStreamIteratorTest().async(false).shared(false).cacheMode(CacheMode.REPL_SYNC),
new DistributedWriteBehindStreamIteratorTest().async(true).shared(true).cacheMode(CacheMode.DIST_SYNC),
new DistributedWriteBehindStreamIteratorTest().async(false).shared(true).cacheMode(CacheMode.DIST_SYNC),
new DistributedWriteBehindStreamIteratorTest().async(true).shared(false).cacheMode(CacheMode.DIST_SYNC),
new DistributedWriteBehindStreamIteratorTest().async(false).shared(false).cacheMode(CacheMode.DIST_SYNC),
};
}
@Override
protected Object[] parameterValues() {
return concat(super.parameterValues(), asyncStore, sharedStore);
}
@Override
protected String[] parameterNames() {
return concat(super.parameterNames(), "asyncStore", "sharedStore");
}
@Override
protected void enhanceConfiguration(ConfigurationBuilder builder) {
DummyInMemoryStoreConfigurationBuilder dimscb = builder.persistence().addStore(DummyInMemoryStoreConfigurationBuilder.class);
if (sharedStore) {
dimscb.shared(true);
}
if (asyncStore) {
dimscb.storeName(getTestName())
.async().enable();
}
}
@DataProvider(name = "rehashAware")
public Object[][] dataProvider() {
return new Object[][] {
{ Boolean.TRUE } , { Boolean.FALSE }
};
}
@Test(dataProvider = "rehashAware")
public void testBackupSegmentsOptimizationWithWriteBehindStore (boolean rehashAware) {
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
RpcManager rpcManager = Mocks.replaceComponentWithSpy(cache1, RpcManager.class);
for (Cache<Object, String> cache : this.<Object, String>caches(CACHE_NAME)) {
MagicKey key = new MagicKey(cache);
cache.put(key, key.toString());
}
// Remember that segment ownership is as {0, 1}, {1, 2}, {2, 1}
CacheStream<Map.Entry<Object, String>> stream = cache1.entrySet().stream();
if (!rehashAware) stream = stream.disableRehashAware();
int invocationCount;
if (cacheMode.isReplicated()) {
Map<Object, String> entries = mapFromIterator(stream.iterator());
assertEquals(caches(CACHE_NAME).size(), entries.size());
invocationCount = cacheManagers.size() - 1;
} else {
// Distributed cache
// Cache1 owns 1 (primary) and 2 (backup)
// When it is a write behind shared store it will have to go remote otherwise will stay local
Map<Object, String> entries = mapFromIterator(stream.filterKeySegments(IntSets.immutableSet(2)).iterator());
assertEquals(1, entries.size());
invocationCount = 1;
}
// We can't stay local if we have a shared and async store - this is because write modifications are stored
// on the primary owner, so we could miss updates
if (asyncStore && sharedStore) {
verify(rpcManager, times(invocationCount)).invokeCommand(any(Address.class), any(InitialPublisherCommand.class), any(), any());
} else {
verify(rpcManager, never()).invokeCommand(any(Address.class), any(InitialPublisherCommand.class), any(), any());
}
}
}
| 5,276
| 39.906977
| 136
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedSequentialNonRehashStreamTest.java
|
package org.infinispan.stream;
import org.infinispan.CacheCollection;
import org.infinispan.CacheStream;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.annotations.Test;
/**
* Verifies stream tests work when rehash is disabled on a sequential stream
*/
@Test(groups = "functional", testName = "streams.DistributedSequentialNonRehashStreamTest")
@InCacheMode({ CacheMode.DIST_SYNC })
public class DistributedSequentialNonRehashStreamTest extends DistributedStreamTest {
@Override
protected <E> CacheStream<E> createStream(CacheCollection<E> entries) {
return entries.stream().sequentialDistribution().disableRehashAware();
}
}
| 714
| 33.047619
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamIteratorReadCommittedTxTest.java
|
package org.infinispan.stream;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* Test streams with READ_COMMITTED isolation level (reproducer for ISPN-9305)
*
* @author Dan Berindei
* @since 9.3
*/
@Test(groups = {"functional", "smoke"}, testName = "stream.DistributedStreamIteratorReadCommittedTxTest")
public class DistributedStreamIteratorReadCommittedTxTest extends DistributedStreamIteratorRepeatableReadTxTest {
@Override
protected void enhanceConfiguration(ConfigurationBuilder builder) {
builder.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
}
}
| 697
| 33.9
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedParallelStreamTest.java
|
package org.infinispan.stream;
import org.infinispan.CacheCollection;
import org.infinispan.CacheStream;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.annotations.Test;
/**
* Verifies stream tests work when stream is parallel with parallel distribution
*/
@Test(groups = "functional", testName = "streams.DistributedParallelStreamTest")
@InCacheMode({ CacheMode.DIST_SYNC })
public class DistributedParallelStreamTest extends DistributedStreamTest {
@Override
protected <E> CacheStream<E> createStream(CacheCollection<E> entries) {
return entries.parallelStream().parallelDistribution();
}
}
| 681
| 31.47619
| 80
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/LocalStreamIteratorTest.java
|
package org.infinispan.stream;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* Test to verify stream behavior for a local cache.
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.LocalStreamIteratorTest")
public class LocalStreamIteratorTest extends BaseStreamIteratorTest {
public LocalStreamIteratorTest() {
super(false, CacheMode.LOCAL);
}
protected final AtomicInteger counter = new AtomicInteger();
@Override
protected Object getKeyTiedToCache(Cache<?, ?> cache) {
return cache.toString() + "-" + counter.getAndIncrement();
}
}
| 732
| 25.178571
| 73
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/BaseStreamIteratorTest.java
|
package org.infinispan.stream;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.TransientMortalCacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.MagicKey;
import org.infinispan.filter.CacheFilters;
import org.infinispan.filter.CompositeKeyValueFilterConverter;
import org.infinispan.filter.KeyValueFilter;
import org.infinispan.filter.KeyValueFilterConverter;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Base class for stream iterator tests
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.BaseStreamIteratorTest")
public abstract class BaseStreamIteratorTest extends BaseSetupStreamIteratorTest {
public BaseStreamIteratorTest(boolean tx, CacheMode mode) {
super(tx, mode);
}
protected abstract Object getKeyTiedToCache(Cache<?, ?> cache);
protected Map<Object, String> putValuesInCache() {
// This is linked to keep insertion order
Map<Object, String> valuesInserted = new LinkedHashMap<>();
Cache<Object, String> cache = cache(0, CACHE_NAME);
Object key = getKeyTiedToCache(cache);
cache.put(key, key.toString());
valuesInserted.put(key, key.toString());
return valuesInserted;
}
@AfterMethod
public void removeInterceptor() {
advancedCache(0, CACHE_NAME).getAsyncInterceptorChain().removeInterceptor(AssertSkipCacheStoreInterceptor.class);
}
@Test
public void simpleTest() {
Map<Object, String> values = putValuesInCache();
Cache<MagicKey, String> cache = cache(0, CACHE_NAME);
Iterator<Map.Entry<MagicKey, String>> iterator = cache.entrySet().iterator();
Map<MagicKey, String> results = mapFromIterator(iterator);
assertEquals(values, results);
}
@Test
public void simpleTestIteratorWithMetadata() {
// This is linked to keep insertion order
Set<CacheEntry<Object, String>> valuesInserted = new HashSet<>();
Cache<Object, String> cache = cache(0, CACHE_NAME);
for (int i = 0; i < 3; ++i) {
Object key = getKeyTiedToCache(cache);
TimeUnit unit = TimeUnit.MINUTES;
cache.put(key, key.toString(), 10, unit, i + 1, unit);
valuesInserted.add(new TransientMortalCacheEntry(key, key.toString(), unit.toMillis(i + 1), unit.toMillis(10),
System.currentTimeMillis()));
}
Set<CacheEntry<Object, String>> retrievedValues = new HashSet<>();
Iterator<CacheEntry<Object, String>> iterator = cache.getAdvancedCache().cacheEntrySet().stream().iterator();
while (iterator.hasNext()) {
CacheEntry<Object, String> entry = iterator.next();
retrievedValues.add(entry);
}
assertEquals(retrievedValues.size(), valuesInserted.size());
// Have to do our own equals since Transient uses created time which we can't guarantee will equal
for (CacheEntry<Object, String> inserted : valuesInserted) {
CacheEntry<Object, String> found = null;
for (CacheEntry<Object, String> retrieved : retrievedValues) {
if (retrieved.getKey().equals(inserted.getKey())) {
found = retrieved;
break;
}
}
assertNotNull("No retrieved Value matching" + inserted, found);
assertEquals(found.getValue(), inserted.getValue());
assertEquals(found.getMaxIdle(), inserted.getMaxIdle());
assertEquals(found.getLifespan(), inserted.getLifespan());
}
}
@Test
public void simpleTestLocalFilter() {
Map<Object, String> values = putValuesInCache();
Iterator<Map.Entry<Object, String>> iter = values.entrySet().iterator();
Object excludedKey = iter.next().getKey();
// Remove it so comparison below will be correct
iter.remove();
Cache<MagicKey, String> cache = cache(0, CACHE_NAME);
Iterator<CacheEntry<MagicKey, String>> iterator = cache.getAdvancedCache().cacheEntrySet().stream()
.filter(entry -> !Objects.equals(excludedKey, entry.getKey()))
.iterator();
Map<MagicKey, String> results = mapFromIterator(iterator);
assertEquals(values, results);
}
@Test
public void testFilterAndConverterCombined() {
Map<Object, String> values = putValuesInCache();
Iterator<Map.Entry<Object, String>> iter = values.entrySet().iterator();
Object excludedKey = iter.next().getKey();
// Remove it so comparison below will be correct
iter.remove();
Cache<MagicKey, String> cache = cache(0, CACHE_NAME);
KeyValueFilter<Object, String> filter = (Serializable & KeyValueFilter<Object, String>)(k, v, m) -> !Objects.equals(k, excludedKey);
KeyValueFilterConverter<MagicKey, String, String> filterConverter = new CompositeKeyValueFilterConverter<>(filter,
new StringTruncator(2, 5));
try (CacheStream<CacheEntry<MagicKey, String>> stream = CacheFilters.filterAndConvert(
cache.getAdvancedCache().cacheEntrySet().stream(), filterConverter)) {
Map<MagicKey, String> results = mapFromStream(stream);
assertEquals(values.size(), results.size());
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertEquals(entry.getValue().substring(2, 7), results.get(entry.getKey()));
}
}
}
@Test
public void testKeySetRemove() {
Map<Object, String> values = putValuesInCache();
final Cache<Object, Object> cache = cache(0, CACHE_NAME);
extractInterceptorChain(cache).addInterceptor(new AssertSkipCacheStoreInterceptor(), 0);
for (Iterator<Object> it = cache(0, CACHE_NAME).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).keySet().iterator();
it.hasNext();) {
assertTrue(values.containsKey(it.next()));
it.remove();
}
assertEquals(0, cache.size());
}
@Test
public void testKeySetStreamRemove() {
Map<Object, String> values = putValuesInCache();
final Cache<Object, Object> cache = cache(0, CACHE_NAME);
extractInterceptorChain(cache).addInterceptor(new AssertSkipCacheStoreInterceptor(), 0);
Iterator<Object> it = cache(0, CACHE_NAME).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE)
.keySet()
.stream()
.iterator();
assertTrue(it.hasNext());
assertTrue(values.containsKey(it.next()));
// We don't support remove on stream iterator
Exceptions.expectException(UnsupportedOperationException.class, it::remove);
}
@Test
public void testValuesRemove() {
Map<Object, String> values = putValuesInCache();
final Cache<Object, Object> cache = cache(0, CACHE_NAME);
extractInterceptorChain(cache).addInterceptor(new AssertSkipCacheStoreInterceptor(), 0);
for (Iterator<Object> it = cache(0, CACHE_NAME).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).values().iterator();
it.hasNext();) {
assertTrue(values.containsValue(it.next()));
it.remove();
}
assertEquals(0, cache.size());
}
@Test
public void testValuesStreamRemove() {
Map<Object, String> values = putValuesInCache();
final Cache<Object, Object> cache = cache(0, CACHE_NAME);
extractInterceptorChain(cache).addInterceptor(new AssertSkipCacheStoreInterceptor(), 0);
Iterator<Object> it = cache(0, CACHE_NAME).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE)
.values()
.stream()
.iterator();
assertTrue(it.hasNext());
assertTrue(values.containsValue(it.next()));
// We don't support remove on stream iterator
Exceptions.expectException(UnsupportedOperationException.class, it::remove);
}
@Test
public void testEntrySetRemove() {
Map<Object, String> values = putValuesInCache();
final Cache<Object, Object> cache = cache(0, CACHE_NAME);
extractInterceptorChain(cache).addInterceptor(new AssertSkipCacheStoreInterceptor(), 0);
for (Iterator<Map.Entry<Object, Object>> it = cache(0, CACHE_NAME).getAdvancedCache().withFlags(
Flag.SKIP_CACHE_STORE).entrySet().iterator(); it.hasNext();) {
Map.Entry<Object, Object> entry = it.next();
Object key = entry.getKey();
assertEquals(values.get(key), entry.getValue());
it.remove();
}
assertEquals(0, cache.size());
}
@Test
public void testEntrySetStreamRemove() {
Map<Object, String> values = putValuesInCache();
final Cache<Object, Object> cache = cache(0, CACHE_NAME);
extractInterceptorChain(cache).addInterceptor(new AssertSkipCacheStoreInterceptor(), 0);
Iterator<Map.Entry<Object, Object>> it = cache(0, CACHE_NAME).getAdvancedCache().withFlags(
Flag.SKIP_CACHE_STORE)
.entrySet()
.stream()
.iterator();
assertTrue(it.hasNext());
Map.Entry<Object, Object> entry = it.next();
Object key = entry.getKey();
assertEquals(values.get(key), entry.getValue());
// We don't support remove on stream iterator
Exceptions.expectException(UnsupportedOperationException.class, it::remove);
}
static class AssertSkipCacheStoreInterceptor extends DDAsyncInterceptor {
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
assertTrue(command.hasAnyFlag(FlagBitSets.SKIP_CACHE_STORE));
return super.visitRemoveCommand(ctx, command);
}
}
}
| 10,464
| 37.759259
| 138
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/ReplicatedStreamIteratorWithLoaderTest.java
|
package org.infinispan.stream;
import org.infinispan.configuration.cache.CacheMode;
import org.testng.annotations.Test;
/**
* Test to verify replicated stream behavior when a loader is present
*
* @author afield
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.ReplicatedStreamIteratorWithLoaderTest")
public class ReplicatedStreamIteratorWithLoaderTest extends BaseStreamIteratorWithLoaderTest {
public ReplicatedStreamIteratorWithLoaderTest() {
super(false, CacheMode.REPL_SYNC);
}
}
| 539
| 26
| 94
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/BaseSetupStreamIteratorTest.java
|
package org.infinispan.stream;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.infinispan.CacheStream;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.HashConfigurationBuilder;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.distribution.MagicKey;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.filter.Converter;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.Metadata;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoName;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.BaseControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* Base class used solely for setting up cluster configuration for use with stream iterators
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.BaseSetupStreamIteratorTest")
public abstract class BaseSetupStreamIteratorTest extends MultipleCacheManagersTest {
public static final int NUM_NODES = 3;
protected final String CACHE_NAME = "testCache";
protected ConfigurationBuilder builderUsed;
protected SerializationContextInitializer sci;
public BaseSetupStreamIteratorTest(boolean tx, CacheMode mode) {
transactional = tx;
cacheMode = mode;
}
protected void enhanceConfiguration(ConfigurationBuilder builder) {
// Do nothing to config by default, used by people who extend this
}
@Override
protected void createCacheManagers() throws Throwable {
builderUsed = new ConfigurationBuilder();
sci = new StreamSerializationContextImpl();
HashConfigurationBuilder hashConfiguration = builderUsed.clustering().cacheMode(cacheMode).hash().numSegments(3);
if (!cacheMode.isReplicated()) {
BaseControlledConsistentHashFactory<? extends ConsistentHash> chf = new TestDefaultConsistentHashFactory();
hashConfiguration.consistentHashFactory(chf);
}
if (transactional) {
builderUsed.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
}
if (cacheMode.isClustered()) {
builderUsed.clustering().stateTransfer().chunkSize(5);
enhanceConfiguration(builderUsed);
createClusteredCaches(NUM_NODES, CACHE_NAME, sci, builderUsed, new TransportFlags().withFD(true));
} else {
enhanceConfiguration(builderUsed);
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(sci, builderUsed);
cacheManagers.add(cm);
cm.defineConfiguration(CACHE_NAME, builderUsed.build());
}
}
protected static <K, V> Map<K, V> mapFromIterator(Iterator<? extends Map.Entry<K, V>> iterator) {
Map<K, V> map = new HashMap<>();
while (iterator.hasNext()) {
Map.Entry<K, V> entry = iterator.next();
map.put(entry.getKey(), entry.getValue());
}
return map;
}
protected static <K, V> Map<K, V> mapFromStream(CacheStream<CacheEntry<K, V>> stream) {
return stream.collect(() -> Collectors.toMap(CacheEntry::getKey, CacheEntry::getValue));
}
@ProtoName("BaseSetupStreamStringTrunctator")
public static class StringTruncator implements Converter<Object, String, String> {
@ProtoField(number = 1, defaultValue = "0")
final int beginning;
@ProtoField(number = 2, defaultValue = "0")
final int length;
@ProtoFactory
StringTruncator(int beginning, int length) {
this.beginning = beginning;
this.length = length;
}
@Override
public String convert(Object key, String value, Metadata metadata) {
if (value != null && value.length() > beginning + length) {
return value.substring(beginning, beginning + length);
} else {
throw new IllegalStateException("String should be longer than truncation size! Possible double conversion performed!");
}
}
}
public static class TestDefaultConsistentHashFactory
extends BaseControlledConsistentHashFactory<DefaultConsistentHash> {
TestDefaultConsistentHashFactory() {
super(new DefaultTrait(), 3);
}
@Override
protected int[][] assignOwners(int numSegments, List<Address> members) {
// The test needs a segment owned by nodes 01, 12, and 21 when there are 3 nodes in the cluster.
// There are no restrictions for before/after, so we make the coordinator the primary owner of all segments.
switch (members.size()) {
case 1:
return new int[][]{{0}, {0}, {0}};
case 2:
return new int[][]{{0, 0}, {0, 1}, {0, 1}};
default:
return new int[][]{{0, 1}, {1, 2}, {2, 1}};
}
}
}
protected Map<Integer, Set<Map.Entry<Object, String>>> generateEntriesPerSegment(KeyPartitioner keyPartitioner,
Iterable<Map.Entry<Object, String>> entries) {
Map<Integer, Set<Map.Entry<Object, String>>> returnMap = new HashMap<>();
for (Map.Entry<Object, String> value : entries) {
int segment = keyPartitioner.getSegment(value.getKey());
Set<Map.Entry<Object, String>> set = returnMap.computeIfAbsent(segment, k -> new LinkedHashSet<>());
set.add(new ImmortalCacheEntry(value.getKey(), value.getValue()));
}
return returnMap;
}
@AutoProtoSchemaBuilder(
// TODO use this or just explicitly add required classes?
// dependsOn = org.infinispan.test.TestDataSCI.class,
includeClasses = {
BaseSetupStreamIteratorTest.StringTruncator.class,
BaseSetupStreamIteratorTest.TestDefaultConsistentHashFactory.class,
MagicKey.class
},
schemaFileName = "core.stream.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.stream",
service = false
)
interface StreamSerializationContext extends SerializationContextInitializer {
}
}
| 6,985
| 40.337278
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedSequentialStreamTest.java
|
package org.infinispan.stream;
import org.infinispan.CacheCollection;
import org.infinispan.CacheStream;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.annotations.Test;
/**
* Verifies stream tests work on a sequential stream
*/
@Test(groups = "functional", testName = "streams.DistributedSequentialStreamTest")
@InCacheMode({CacheMode.DIST_SYNC})
public class DistributedSequentialStreamTest extends DistributedStreamTest {
@Override
protected <E> CacheStream<E> createStream(CacheCollection<E> entries) {
return entries.stream().sequentialDistribution();
}
}
| 649
| 29.952381
| 82
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DistributedStreamIteratorWithStoreAsBinaryTest.java
|
package org.infinispan.stream;
import static org.testng.Assert.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.distribution.MagicKey;
import org.infinispan.filter.CacheFilters;
import org.infinispan.filter.KeyValueFilter;
import org.infinispan.metadata.Metadata;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
/**
* Test to verify distributed entry behavior when store as binary is used
*
* @author wburns
* @since 8.0
*/
@Test(groups = "functional", testName = "stream.DistributedStreamIteratorWithStoreAsBinaryTest")
@InCacheMode({ CacheMode.DIST_SYNC })
public class DistributedStreamIteratorWithStoreAsBinaryTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builderUsed = new ConfigurationBuilder();
builderUsed.clustering().cacheMode(cacheMode);
builderUsed.clustering().hash().numOwners(1);
builderUsed.memory().storageType(StorageType.BINARY);
createClusteredCaches(3, new StreamStoreAsBinarySerializationContextImpl(), builderUsed);
}
@Test
public void testFilterWithStoreAsBinary() {
Cache<MagicKey, String> cache0 = cache(0);
Cache<MagicKey, String> cache1 = cache(1);
Cache<MagicKey, String> cache2 = cache(2);
Map<MagicKey, String> originalValues = new HashMap<>();
originalValues.put(new MagicKey(cache0), "cache0");
originalValues.put(new MagicKey(cache1), "cache1");
originalValues.put(new MagicKey(cache2), "cache2");
cache0.putAll(originalValues);
// Try filter for all values
Iterator<CacheEntry<MagicKey, String>> iterator = cache1.getAdvancedCache().cacheEntrySet().stream().
filter(CacheFilters.predicate(new MagicKeyStringFilter(originalValues))).iterator();
// we need this count since the map will replace same key'd value
int count = 0;
Map<MagicKey, String> results = new HashMap<MagicKey, String>();
while (iterator.hasNext()) {
Map.Entry<MagicKey, String> entry = iterator.next();
results.put(entry.getKey(), entry.getValue());
count++;
}
assertEquals(count, 3);
assertEquals(originalValues, results);
}
@Test
public void testFilterWithStoreAsBinaryPartialKeys() {
Cache<MagicKey, String> cache0 = cache(0);
Cache<MagicKey, String> cache1 = cache(1);
Cache<MagicKey, String> cache2 = cache(2);
MagicKey findKey = new MagicKey(cache1);
Map<MagicKey, String> originalValues = new HashMap<>();
originalValues.put(new MagicKey(cache0), "cache0");
originalValues.put(findKey, "cache1");
originalValues.put(new MagicKey(cache2), "cache2");
cache0.putAll(originalValues);
// Try filter for all values
Iterator<CacheEntry<MagicKey, String>> iterator = cache1.getAdvancedCache().cacheEntrySet().stream().
filter(CacheFilters.predicate(new MagicKeyStringFilter(Collections.singletonMap(findKey, "cache1")))).iterator();
CacheEntry<MagicKey, String> entry = iterator.next();
AssertJUnit.assertEquals(findKey, entry.getKey());
AssertJUnit.assertEquals("cache1", entry.getValue());
assertFalse(iterator.hasNext());
}
static class MagicKeyStringFilter implements KeyValueFilter<MagicKey, String> {
Map<MagicKey, String> allowedEntries;
MagicKeyStringFilter() {}
MagicKeyStringFilter(Map<MagicKey, String> allowedEntries) {
this.allowedEntries = allowedEntries;
}
@ProtoField(number = 1, collectionImplementation = ArrayList.class)
public List<MapPair> getMapEntries() {
return allowedEntries.entrySet().stream().map(MapPair::new).collect(Collectors.toCollection(ArrayList::new));
}
public void setMapEntries(List<MapPair> entries) {
this.allowedEntries = entries.stream().collect(Collectors.toMap(m -> m.key, m -> m.value));
}
@Override
public boolean accept(MagicKey key, String value, Metadata metadata) {
String allowedValue = allowedEntries.get(key);
return allowedValue != null && allowedValue.equals(value);
}
}
static class MapPair {
@ProtoField(1)
MagicKey key;
@ProtoField(2)
String value;
MapPair() {}
MapPair(Map.Entry<MagicKey, String> entry) {
this.key = entry.getKey();
this.value = entry.getValue();
}
}
@AutoProtoSchemaBuilder(
includeClasses = {
MagicKey.class,
MagicKeyStringFilter.class,
MapPair.class,
},
schemaFileName = "core.stream.binary.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.stream.binary",
service = false
)
interface StreamStoreAsBinarySerializationContext extends SerializationContextInitializer {
}
}
| 5,707
| 34.899371
| 127
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/LocalStreamIteratorExceptionTest.java
|
package org.infinispan.stream;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertSame;
import static org.testng.AssertJUnit.fail;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* Test to verify stream exception propagation behavior for a local cache.
*
* @author wburns
* @since 7.0
*/
@Test(groups = "functional", testName = "stream.LocalStreamIteratorExceptionTest")
public class LocalStreamIteratorExceptionTest extends BaseSetupStreamIteratorTest {
public LocalStreamIteratorExceptionTest() {
super(false, CacheMode.LOCAL);
}
public void ensureDataContainerExceptionPropagated() {
Cache cache = cache(0, CACHE_NAME);
// Extract real one to replace after
InternalDataContainer dataContainer = TestingUtil.extractComponent(cache, InternalDataContainer.class);
try {
Throwable t = new CacheException();
InternalDataContainer mockContainer = when(mock(InternalDataContainer.class).publisher(any(IntSet.class))).thenThrow(t).getMock();
TestingUtil.replaceComponent(cache, InternalDataContainer.class, mockContainer, true);
try {
cache.entrySet().stream().iterator().hasNext();
fail("We should have gotten a CacheException");
} catch (CacheException e) {
assertSame("We should have found the throwable as a cause", t, e);
}
} finally {
TestingUtil.replaceComponent(cache, InternalDataContainer.class, dataContainer, true);
}
}
}
| 1,875
| 37.285714
| 139
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/DenyReadWritesStreamTest.java
|
package org.infinispan.stream;
import java.util.Map;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Test the handling of backpressure when partition handling is enabled.
*
* See ISPN-12594.
*
* @author Wolf-Dieter Fink
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "stream.DenyReadWritesStreamTest")
public class DenyReadWritesStreamTest extends SingleCacheManagerTest {
private static final Log log = LogFactory.getLog(DenyReadWritesStreamTest.class);
public static final int CHUNK_SIZE = 2;
public static final int NUM_KEYS = 20;
public DenyReadWritesStreamTest() {
}
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
// Setup up a clustered cache manager
GlobalConfigurationBuilder global = GlobalConfigurationBuilder.defaultClusteredBuilder();
cacheManager = new DefaultCacheManager(global.build());
ConfigurationBuilder builder = new ConfigurationBuilder();
// With a dist cache
builder.clustering().cacheMode(CacheMode.DIST_SYNC)
.stateTransfer().chunkSize(CHUNK_SIZE)
.hash().numOwners(4);
// DENY_READ_WRITES will cause iteration to fail
// ALLOW_READS or ALLOW_READ_WRITES work as expected
builder.clustering().partitionHandling().whenSplit(PartitionHandling.DENY_READ_WRITES);
cacheManager.defineConfiguration("testCache", builder.build());
cache = cacheManager.getCache("testCache");
for (int i = 0; i < NUM_KEYS; i++) {
cache.put(String.valueOf(i), String.valueOf(i));
}
return cacheManager;
}
@Override
protected void clearCacheManager() {
// Do nothing
}
public void testValuesForEachNoBatchSize() {
try (CacheStream<Object> cacheStream = cache.values().stream()) {
cacheStream.forEach(v -> {
log.tracef("foreach: %s", v);
});
}
}
public void testEntriesIteratorNoBatchSize() {
try (CloseableIterator<Map.Entry<Object, Object>> it = cache.entrySet().iterator()) {
while (it.hasNext()) {
Object key = it.next();
log.tracef("iterator: %s", key);
}
}
}
public void testKeysForEachBatchSizeEqualsCacheSize() {
try (CacheStream<Object> cacheStream = cache.keySet().stream().distributedBatchSize(NUM_KEYS)) {
cacheStream.forEach(k -> {
log.tracef("foreach: %s", k);
});
}
}
public void testKeysForEachBatchSizeIsLessThanCacheSize() {
try (CacheStream<Object> cacheStream = cache.keySet().stream().distributedBatchSize(NUM_KEYS - 2)) {
cacheStream.forEach(k -> {
log.tracef("foreach %s", k);
});
}
}
}
| 3,317
| 32.18
| 106
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/stream/stress/DistributedStreamRehashStressTest.java
|
package org.infinispan.stream.stress;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.testng.Assert.assertEquals;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commands.StressTest;
import org.infinispan.commons.executors.BlockingThreadPoolExecutorFactory;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.util.function.SerializablePredicate;
import org.testng.annotations.Test;
/**
* Stress test designed to test to verify that distributed stream works properly when constant rehashes occur
*
* @author wburns
* @since 8.0
*/
@Test(groups = "stress", testName = "stream.stress.DistributedStreamRehashStressTest", timeOut = 15*60*1000)
@InCacheMode({CacheMode.DIST_SYNC, CacheMode.REPL_SYNC })
public class DistributedStreamRehashStressTest extends StressTest {
protected final String CACHE_NAME = "testCache";
protected final static int CACHE_COUNT = 5;
protected final static int THREAD_MULTIPLIER = 5;
protected final static long CACHE_ENTRY_COUNT = 250000;
@Override
protected void createCacheManagers() throws Throwable {
builderUsed = new ConfigurationBuilder();
builderUsed.clustering().cacheMode(cacheMode);
builderUsed.clustering().hash().numOwners(3);
builderUsed.clustering().stateTransfer().chunkSize(25000);
// This is increased just for the put all command when doing full tracing
builderUsed.clustering().remoteTimeout(12000000);
// This way if an iterator gets stuck we know earlier
builderUsed.clustering().stateTransfer().timeout(240, TimeUnit.SECONDS);
createClusteredCaches(CACHE_COUNT, CACHE_NAME, builderUsed);
}
protected EmbeddedCacheManager addClusterEnabledCacheManager(TransportFlags flags) {
GlobalConfigurationBuilder gcb = GlobalConfigurationBuilder.defaultClusteredBuilder();
// Amend first so we can increase the transport thread pool
TestCacheManagerFactory.amendGlobalConfiguration(gcb, flags);
// we need to increase the transport and remote thread pools to default values
BlockingThreadPoolExecutorFactory executorFactory = new BlockingThreadPoolExecutorFactory(
25, 25, 10000, 30000);
gcb.transport().transportThreadPool().threadPoolFactory(executorFactory);
gcb.transport().remoteCommandThreadPool().threadPoolFactory(executorFactory);
EmbeddedCacheManager cm = TestCacheManagerFactory.newDefaultCacheManager(true, gcb, new ConfigurationBuilder());
cacheManagers.add(cm);
return cm;
}
public void testStressNodesLeavingWhileMultipleCollectors() throws Throwable {
testStressNodesLeavingWhilePerformingCallable((cache, masterValues, iteration) -> {
SerializablePredicate<Map.Entry<Integer, Integer>> predicate = e -> (e.getKey() & 1) == 1;
// Remote invocation with data from cache
Map<Integer, Integer> results = cache.entrySet().stream()
.filter(predicate)
.collect(() -> Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
// Local invocation
Map<Integer, Integer> filteredMasterValues = masterValues.entrySet().stream()
.filter(predicate)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
KeyPartitioner keyPartitioner = extractComponent(cache, KeyPartitioner.class);
findMismatchedSegments(keyPartitioner, filteredMasterValues, results, iteration);
assertEquals(CACHE_ENTRY_COUNT / 2, results.size());
});
}
public void testStressNodesLeavingWhileMultipleCount() throws Throwable {
testStressNodesLeavingWhilePerformingCallable(((cache, masterValues, iteration) -> {
long size;
assertEquals(CACHE_ENTRY_COUNT, (size = cache.entrySet().stream().count()),
"We didn't get a matching size! Expected " + CACHE_ENTRY_COUNT + " but was " + size);
}));
}
// TODO: this fails still for some reason - NEED to find out why!
public void testStressNodesLeavingWhileMultipleIterators() throws Throwable {
testStressNodesLeavingWhilePerformingCallable((cache, masterValues, iteration) -> {
Map<Integer, Integer> seenValues = new HashMap<>();
Iterator<Map.Entry<Integer, Integer>> iterator = cache.entrySet().stream()
.distributedBatchSize(50000)
.iterator();
while (iterator.hasNext()) {
Map.Entry<Integer, Integer> entry = iterator.next();
if (seenValues.containsKey(entry.getKey())) {
log.tracef("Seen values were: %s", seenValues);
throw new IllegalArgumentException(Thread.currentThread() + "-Found duplicate value: " + entry.getKey() + " on iteration " + iteration);
} else if (!masterValues.get(entry.getKey()).equals(entry.getValue())) {
log.tracef("Seen values were: %s", seenValues);
throw new IllegalArgumentException(Thread.currentThread() + "-Found incorrect value: " + entry.getKey() + " with value " + entry.getValue() + " on iteration " + iteration);
}
seenValues.put(entry.getKey(), entry.getValue());
}
if (seenValues.size() != masterValues.size()) {
KeyPartitioner keyPartitioner = extractComponent(cache, KeyPartitioner.class);
findMismatchedSegments(keyPartitioner, masterValues, seenValues, iteration);
}
});
}
public void testStressNodesLeavingWhileMultipleIteratorsLocalSegments() throws Throwable {
testStressNodesLeavingWhilePerformingCallable((cache, masterValues, iteration) -> {
Map<Integer, Integer> seenValues = new HashMap<>();
KeyPartitioner keyPartitioner = extractComponent(cache, KeyPartitioner.class);
AdvancedCache<Integer, Integer> advancedCache = cache.getAdvancedCache();
LocalizedCacheTopology cacheTopology = advancedCache.getDistributionManager().getCacheTopology();
Set<Integer> targetSegments = cacheTopology.getWriteConsistentHash().getSegmentsForOwner(cacheTopology.getLocalAddress());
masterValues = masterValues.entrySet().stream()
.filter(e -> targetSegments.contains(keyPartitioner.getSegment(e.getKey())))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Iterator<Map.Entry<Integer, Integer>> iterator = cache.entrySet().stream()
.distributedBatchSize(50000)
.filterKeySegments(targetSegments)
.iterator();
while (iterator.hasNext()) {
Map.Entry<Integer, Integer> entry = iterator.next();
if (seenValues.containsKey(entry.getKey())) {
log.tracef("Seen values were: %s", seenValues);
throw new IllegalArgumentException(Thread.currentThread() + "-Found duplicate value: " + entry.getKey() + " on iteration " + iteration);
} else if (!masterValues.get(entry.getKey()).equals(entry.getValue())) {
log.tracef("Seen values were: %s", seenValues);
throw new IllegalArgumentException(Thread.currentThread() + "-Found incorrect value: " + entry.getKey() + " with value " + entry.getValue() + " on iteration " + iteration);
}
seenValues.put(entry.getKey(), entry.getValue());
}
if (seenValues.size() != masterValues.size()) {
findMismatchedSegments(keyPartitioner, masterValues, seenValues, iteration);
}
});
}
private void findMismatchedSegments(KeyPartitioner keyPartitioner, Map<Integer, Integer> masterValues,
Map<Integer, Integer> seenValues, int iteration) {
Map<Integer, Set<Map.Entry<Integer, Integer>>> target = generateEntriesPerSegment(keyPartitioner,
masterValues.entrySet());
Map<Integer, Set<Map.Entry<Integer, Integer>>> actual = generateEntriesPerSegment(keyPartitioner, seenValues.entrySet());
for (Map.Entry<Integer, Set<Map.Entry<Integer, Integer>>> entry : target.entrySet()) {
Set<Map.Entry<Integer, Integer>> entrySet = entry.getValue();
Set<Map.Entry<Integer, Integer>> actualEntries = actual.get(entry.getKey());
if (actualEntries != null) {
entrySet.removeAll(actualEntries);
}
if (!entrySet.isEmpty()) {
throw new IllegalArgumentException(Thread.currentThread() + "-Found incorrect amount " +
(actualEntries != null ? actualEntries.size() : 0) + " of entries, expected " +
entrySet.size() + " for segment " + entry.getKey() + " missing entries " + entrySet
+ " on iteration " + iteration);
}
}
}
void testStressNodesLeavingWhilePerformingCallable(final PerformOperation operation)
throws Throwable {
final Map<Integer, Integer> masterValues = new HashMap<>();
// First populate our caches
for (int i = 0; i < CACHE_ENTRY_COUNT; ++i) {
masterValues.put(i, i);
}
cache(0, CACHE_NAME).putAll(masterValues);
System.out.println("Done with inserts!");
List<Future<Void>> futures = forkWorkerThreads(CACHE_NAME, THREAD_MULTIPLIER, CACHE_COUNT, new Object[THREAD_MULTIPLIER * CACHE_COUNT],
(cache, args, iteration) -> operation.perform(cache, masterValues, iteration));
futures.add(forkRestartingThread(CACHE_COUNT));
waitAndFinish(futures, 1, TimeUnit.MINUTES);
}
interface PerformOperation {
void perform(Cache<Integer, Integer> cacheToUse, Map<Integer, Integer> masterValues, int iteration);
}
private <K, V> Map<Integer, Set<Map.Entry<K, V>>> generateEntriesPerSegment(KeyPartitioner keyPartitioner,
Iterable<Map.Entry<K, V>> entries) {
Map<Integer, Set<Map.Entry<K, V>>> returnMap = new HashMap<>();
for (Map.Entry<K, V> value : entries) {
int segment = keyPartitioner.getSegment(value.getKey());
Set<Map.Entry<K, V>> set = returnMap.computeIfAbsent(segment, k -> new HashSet<>());
set.add(new ImmortalCacheEntry(value.getKey(), value.getValue()));
}
return returnMap;
}
}
| 11,169
| 51.688679
| 187
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncUnsafeFuncTest.java
|
package org.infinispan.distribution;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSyncUnsafeFuncTest")
public class DistSyncUnsafeFuncTest extends DistSyncFuncTest {
@Override
public Object[] factory() {
return new Object[] {
new DistSyncUnsafeFuncTest(),
new DistSyncUnsafeFuncTest().groupers(true)
};
}
public DistSyncUnsafeFuncTest() {
testRetVals = false;
}
}
| 467
| 23.631579
| 78
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistStoreTxDisjointSetTest.java
|
package org.infinispan.distribution;
import static java.lang.String.format;
import static org.infinispan.distribution.DistributionTestHelper.addressOf;
import static org.infinispan.distribution.DistributionTestHelper.isOwner;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
/**
* This tests the access pattern where a Tx touches multiple keys such that: K1: {A, B} K2: {A, C}
* <p/>
* The tx starts and runs on A, and the TX must succeed even though each node only gets a subset of data. Particularly,
* needs to be checked when using a cache store.
*/
@Test(testName = "distribution.DistStoreTxDisjointSetTest", groups = "functional")
public class DistStoreTxDisjointSetTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
SerializationContextInitializer sci = TestDataSCI.INSTANCE;
addClusterEnabledCacheManager(sci, buildCacheConfig("DistCacheStoreTxDisjointSetTest0"));
addClusterEnabledCacheManager(sci, buildCacheConfig("DistCacheStoreTxDisjointSetTest1"));
addClusterEnabledCacheManager(sci, buildCacheConfig("DistCacheStoreTxDisjointSetTest2"));
waitForClusterToForm();
}
private ConfigurationBuilder buildCacheConfig(String storeName) {
ConfigurationBuilder cb = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
cb.persistence().addStore(DummyInMemoryStoreConfigurationBuilder.class).storeName(storeName);
return cb;
}
public void testDisjointSetTransaction() throws Exception {
MagicKey k1 = new MagicKey(cache(0), cache(1));
MagicKey k2 = new MagicKey(cache(1), cache(2));
// make sure the owners of k1 and k2 are NOT the same!
Set<Address> k1Owners = new HashSet<Address>();
Set<Address> k2Owners = new HashSet<Address>();
for (Cache<?, ?> cache: caches()) {
if (isOwner(cache, k1)) k1Owners.add(addressOf(cache));
if (isOwner(cache, k2)) k2Owners.add(addressOf(cache));
}
assert k1Owners.size() == 2: "Expected 2 owners for k1; was " + k1Owners;
assert k2Owners.size() == 2: "Expected 2 owners for k1; was " + k2Owners;
assert !k1Owners.equals(k2Owners) : format("k1 and k2 should have different ownership set. Was %s and %s", k1Owners, k2Owners);
tm(0).begin();
cache(0).put(k1, "v1");
cache(0).put(k2, "v2");
tm(0).commit();
}
}
| 2,871
| 41.235294
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncTx1PCL1FuncTest.java
|
package org.infinispan.distribution;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSyncTx1PCL1FuncTest")
public class DistSyncTx1PCL1FuncTest extends DistSyncTxL1FuncTest {
public DistSyncTx1PCL1FuncTest() {
isolationLevel = IsolationLevel.READ_COMMITTED;
onePhaseCommitOptimization = true;
}
@Override
protected Class<? extends VisitableCommand> getCommitCommand() {
return PrepareCommand.class;
}
@Test(groups = "unstable")
@Override
public void testBackupOwnerInvalidatesL1WhenPrimaryIsUnaware() throws InterruptedException, TimeoutException, BrokenBarrierException, ExecutionException {
super.testBackupOwnerInvalidatesL1WhenPrimaryIsUnaware();
}
}
| 1,057
| 33.129032
| 157
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/UnicastInvalidationFuncTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.ArrayList;
import java.util.Collection;
import org.infinispan.Cache;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.test.ReplListener;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.UnicastInvalidationFuncTest")
public class UnicastInvalidationFuncTest extends BaseDistFunctionalTest<Object, String> {
public static final String KEY1 = "k1";
public UnicastInvalidationFuncTest() {
testRetVals = true;
l1Threshold = -1;
}
public void testPut() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner(KEY1);
Cache<Object, String> owner = getOwners(KEY1)[0];
Cache<Object, String> secondNonOwner = getSecondNonOwner(KEY1);
Collection<ReplListener> listeners = new ArrayList<ReplListener>();
// Put an object in from a non-owner, this will cause an L1 record to be created there
nonOwner.put(KEY1, "foo");
assertNull(nonOwner.getAdvancedCache().getDataContainer().get(KEY1));
assertEquals(owner.getAdvancedCache().getDataContainer().get(KEY1).getValue(), "foo");
// Request from another non-owner so that we can get an invalidation command there
assertEquals(secondNonOwner.get(KEY1), "foo");
assertEquals(secondNonOwner.getAdvancedCache().getDataContainer().get(KEY1).getValue(), "foo");
// Check that the non owners are notified
ReplListener rl = new ReplListener(nonOwner);
rl.expect(InvalidateL1Command.class);
listeners.add(rl);
rl = new ReplListener(secondNonOwner);
rl.expect(InvalidateL1Command.class);
listeners.add(rl);
// Put an object into an owner, this will cause the L1 records for this key to be invalidated
owner.put(KEY1, "bar");
for (ReplListener r : listeners) {
r.waitForRpc();
}
Assert.assertNull(secondNonOwner.getAdvancedCache().getDataContainer().get(KEY1));
Assert.assertNull(nonOwner.getAdvancedCache().getDataContainer().get(KEY1));
}
}
| 2,231
| 33.875
| 101
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ZeroCapacityNodeTest.java
|
package org.infinispan.distribution;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.commons.test.Exceptions.expectCompletionException;
import static org.infinispan.test.TestingUtil.extractCacheTopology;
import static org.infinispan.test.TestingUtil.installNewView;
import static org.infinispan.test.TestingUtil.waitForNoRebalance;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.lang.reflect.Method;
import java.util.Collections;
import java.util.Queue;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHashFactory;
import org.infinispan.distribution.ch.impl.SyncConsistentHashFactory;
import org.infinispan.distribution.ch.impl.SyncReplicatedConsistentHashFactory;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated;
import org.infinispan.notifications.cachelistener.event.Event;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.op.TestFunctionalWriteOperation;
import org.infinispan.test.op.TestOperation;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.topology.CacheStatusResponse;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.util.concurrent.TimeoutException;
import org.mockito.stubbing.Answer;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* Test the capacity factor for lite instance
*
* @author Katia Aresti
* @since 9.4
*/
@Test(groups = "functional", testName = "distribution.ch.ZeroCapacityNodeTest")
public class ZeroCapacityNodeTest extends MultipleCacheManagersTest {
public static final int NUM_SEGMENTS = 60;
private EmbeddedCacheManager node1;
private EmbeddedCacheManager node2;
private EmbeddedCacheManager zeroCapacityNode;
@Override
protected void createCacheManagers() throws Throwable {
node1 = addClusterEnabledCacheManager();
node2 = addClusterEnabledCacheManager();
GlobalConfigurationBuilder zeroCapacityBuilder =
GlobalConfigurationBuilder.defaultClusteredBuilder().zeroCapacityNode(true);
zeroCapacityNode = addClusterEnabledCacheManager(zeroCapacityBuilder, null);
}
@DataProvider(name = "cm_chf")
protected Object[][] consistentHashFactory() {
return new Object[][]{
{CacheMode.DIST_SYNC, new DefaultConsistentHashFactory()},
{CacheMode.DIST_SYNC, new SyncConsistentHashFactory()},
{CacheMode.REPL_SYNC, new ReplicatedConsistentHashFactory()},
{CacheMode.REPL_SYNC, new SyncReplicatedConsistentHashFactory()},
};
}
@Test(dataProvider = "cm_chf")
public void testCapacityFactors(CacheMode cacheMode, ConsistentHashFactory<?> consistentHashFactory) {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(cacheMode);
cb.clustering().hash().numSegments(NUM_SEGMENTS).consistentHashFactory(consistentHashFactory);
cb.clustering().hash().capacityFactor(1f);
String cacheName = "" + cacheMode + consistentHashFactory;
createCache(cb, cacheName);
Cache<Object, Object> cache1 = node1.getCache(cacheName);
Cache<Object, Object> cache2 = node2.getCache(cacheName);
Cache<Object, Object> zeroCapacityCache = zeroCapacityNode.getCache(cacheName);
ConsistentHash ch = extractCacheTopology(cache1).getReadConsistentHash();
assertEquals(1f, capacityFactor(ch, node1), 0.0);
assertEquals(1f, capacityFactor(ch, node2), 0.0);
assertEquals(0f, capacityFactor(ch, zeroCapacityNode), 0.0);
assertEquals(Collections.emptySet(), ch.getPrimarySegmentsForOwner(zeroCapacityNode.getAddress()));
assertEquals(Collections.emptySet(), ch.getSegmentsForOwner(zeroCapacityNode.getAddress()));
cache1.stop();
ConsistentHash ch2 = extractCacheTopology(cache2).getReadConsistentHash();
assertEquals(Collections.emptySet(), ch2.getPrimarySegmentsForOwner(zeroCapacityNode.getAddress()));
assertEquals(Collections.emptySet(), ch2.getSegmentsForOwner(zeroCapacityNode.getAddress()));
// Test simple put and get
zeroCapacityCache.put("key", "value");
assertEquals("value", zeroCapacityCache.get("key"));
}
public void testReplicatedWriteOperations() {
String cacheName = "replConditional";
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.clustering().cacheMode(CacheMode.REPL_SYNC);
createCache(builder, cacheName);
for (TestOperation op : TestWriteOperation.values()) {
doTestReplicatedWriteOperation(cacheName, op);
}
for (TestFunctionalWriteOperation op : TestFunctionalWriteOperation.values()) {
doTestReplicatedWriteOperation(cacheName, op);
}
}
private void doTestReplicatedWriteOperation(String cacheName, TestOperation op) {
log.debugf("Testing %s", op);
for (Cache<Object, Object> cache : caches(cacheName)) {
String key = String.format("key-%s-%s", op, address(cache));
op.insertPreviousValue(cache.getAdvancedCache(), key);
Object result = op.perform(cache.getAdvancedCache(), key);
assertEquals(op.getReturnValue(), result);
cache.clear();
assertTrue(cache.isEmpty());
}
}
public void testReplicatedClusteredListener() {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(CacheMode.REPL_SYNC);
cb.clustering().hash().numSegments(NUM_SEGMENTS);
cb.clustering().hash().capacityFactor(1f);
String cacheName = "replicated_clustered_listener";
createCache(cb, cacheName);
ClusteredListener listener = new ClusteredListener();
zeroCapacityNode.getCache(cacheName).addListener(listener);
zeroCapacityNode.getCache(cacheName).put("key1", "value1");
assertEquals(1, listener.events.get());
node1.getCache(cacheName).put("key2", "value2");
assertEquals(2, listener.events.get());
}
private void createCache(ConfigurationBuilder cb, String cacheName) {
node1.createCache(cacheName, cb.build());
node2.createCache(cacheName, cb.build());
zeroCapacityNode.createCache(cacheName, cb.build());
waitForClusterToForm(cacheName);
}
public void testZeroCapacityFactorNodeStartsFirst(Method m) throws Exception {
String cacheName = m.getName();
Queue<CacheStatusResponse> joinResponses = new LinkedBlockingQueue<>();
assertTrue(node1.isCoordinator());
ClusterTopologyManager originalCTM = TestingUtil.extractGlobalComponent(node1, ClusterTopologyManager.class);
Answer<?> delegateAnswer = invocation -> invocation.getMethod().invoke(originalCTM, invocation.getArguments());
ClusterTopologyManager trackingCTM = mock(ClusterTopologyManager.class, delegateAnswer);
when(trackingCTM.handleJoin(eq(cacheName), any(), any(), anyInt()))
.thenAnswer(invocation -> {
return originalCTM.handleJoin(cacheName, invocation.getArgument(1),
invocation.getArgument(2), invocation.getArgument(3))
.thenApply(r -> {
joinResponses.offer(r);
return r;
});
});
TestingUtil.replaceComponent(node1, ClusterTopologyManager.class, trackingCTM, true);
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(CacheMode.DIST_SYNC)
.hash().numSegments(NUM_SEGMENTS);
ConfigurationBuilder cbZero = new ConfigurationBuilder();
cbZero.clustering().cacheMode(CacheMode.DIST_SYNC)
.hash().numSegments(NUM_SEGMENTS).capacityFactor(0f);
Future<Cache<Object, Object>> zeroCapacityNodeFuture =
fork(() -> zeroCapacityNode.createCache(cacheName, cb.build()));
Future<Cache<Object, Object>> node1Future =
fork(() -> node1.createCache(cacheName, cbZero.build()));
assertFalse(zeroCapacityNodeFuture.isDone());
assertFalse(node1Future.isDone());
assertEquals(0, joinResponses.size());
// Node2 is the only one that can create the initial topology
node2.createCache(cacheName, cb.build());
node1Future.get(10, SECONDS);
zeroCapacityNodeFuture.get(10, SECONDS);
// 2 join responses: for node2 and zeroCapacityNode
assertEquals(3, joinResponses.size());
while (!joinResponses.isEmpty()) {
CacheStatusResponse joinResponse = joinResponses.poll();
assertTrue(joinResponse.getCacheTopology().getMembers().contains(node2.getAddress()));
}
waitForClusterToForm(cacheName);
ConsistentHash ch3 = consistentHash(0, cacheName);
assertEquals(0f, capacityFactor(ch3, zeroCapacityNode), 0.0);
assertEquals(0f, capacityFactor(ch3, node1), 0.0);
assertEquals(1f, capacityFactor(ch3, node2), 0.0);
cache(0, cacheName).put("key", "value");
assertEquals("value", cache(0, cacheName).get("key"));
TestingUtil.replaceComponent(node1, ClusterTopologyManager.class, originalCTM, true);
}
public void testOnlyZeroCapacityNodesRemain(Method m) {
String cacheName = m.getName();
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(CacheMode.DIST_SYNC)
.hash().numSegments(NUM_SEGMENTS);
ConfigurationBuilder cbZero = new ConfigurationBuilder();
cbZero.clustering().cacheMode(CacheMode.DIST_SYNC)
.hash().numSegments(NUM_SEGMENTS).capacityFactor(0f);
node2.createCache(cacheName, cb.build());
node1.createCache(cacheName, cbZero.build());
zeroCapacityNode.createCache(cacheName, cb.build());
waitForClusterToForm(cacheName);
// Stop the only non-zero-capacity node
node2.stop();
cacheManagers.remove(1);
// There is no new cache topology, so any operation will time out
// Lower the remote timeout just for this operation
zeroCapacityNode.getCache(cacheName).getCacheConfiguration().clustering().remoteTimeout(10);
expectCompletionException(TimeoutException.class, zeroCapacityNode.getCache(cacheName).getAsync("key"));
// Start a new node with capacity
node2 = addClusterEnabledCacheManager();
node2.defineConfiguration(cacheName, cb.build());
node2.getCache(cacheName);
// Operations succeed again
zeroCapacityNode.getCache(cacheName).getCacheConfiguration().clustering().remoteTimeout(10_000);
zeroCapacityNode.getCache(cacheName).get("key");
}
public void testDenyReadWritesCacheStaysAvailableAfterZeroCapacityNodeCrash(Method m) {
String cacheName = m.getName();
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().cacheMode(CacheMode.DIST_SYNC)
.partitionHandling().whenSplit(PartitionHandling.DENY_READ_WRITES)
.hash().numSegments(NUM_SEGMENTS);
ConfigurationBuilder cbZero = new ConfigurationBuilder();
cbZero.clustering().cacheMode(CacheMode.DIST_SYNC)
.partitionHandling().whenSplit(PartitionHandling.DENY_READ_WRITES)
.hash().numSegments(NUM_SEGMENTS).capacityFactor(0f);
node1.createCache(cacheName, cb.build());
node2.createCache(cacheName, cbZero.build());
zeroCapacityNode.createCache(cacheName, cb.build());
waitForClusterToForm(cacheName);
installNewView(node1);
installNewView(node2, zeroCapacityNode);
waitForNoRebalance(node1.getCache(cacheName));
cache(0, cacheName).get("key");
installNewView(node1, node2, zeroCapacityNode);
waitForNoRebalance(caches(cacheName));
cache(0, cacheName).get("key");
}
private ConsistentHash consistentHash(int managerIndex, String cacheName) {
return cache(managerIndex, cacheName).getAdvancedCache().getDistributionManager()
.getCacheTopology().getReadConsistentHash();
}
private Float capacityFactor(ConsistentHash ch, EmbeddedCacheManager node) {
return ch.getCapacityFactors().get(node.getAddress());
}
@Listener(clustered = true)
private class ClusteredListener {
AtomicInteger events = new AtomicInteger();
@CacheEntryCreated
public void event(Event event) throws Throwable {
log.tracef("Received event %s", event);
events.incrementAndGet();
}
}
}
| 13,557
| 41.63522
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/BaseDistSyncL1Test.java
|
package org.infinispan.distribution;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.distribution.L1WriteSynchronizer;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.transaction.TransactionMode;
import org.mockito.AdditionalAnswers;
import org.mockito.Mockito;
import org.mockito.stubbing.Answer;
import org.testng.annotations.Test;
/**
* Base class for various L1 tests for use with distributed cache. Note these only currently work for synchronous based
* caches
*
* @author wburns
* @since 6.0
*/
@Test(groups = "functional", testName = "distribution.BaseDistSyncL1Test")
public abstract class BaseDistSyncL1Test extends BaseDistFunctionalTest<Object, String> {
protected static final String key = "key-to-the-cache";
protected static final String firstValue = "first-put";
protected static final String secondValue = "second-put";
@Override
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder builder = super.buildConfiguration();
builder.locking().isolationLevel(isolationLevel);
return builder;
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
protected BlockingInterceptor addBlockingInterceptorBeforeTx(Cache<?, ?> cache,
final CyclicBarrier barrier,
Class<? extends VisitableCommand> commandClass) {
return addBlockingInterceptorBeforeTx(cache, barrier, commandClass, true);
}
protected BlockingInterceptor addBlockingInterceptorBeforeTx(Cache<?, ?> cache, final CyclicBarrier barrier,
Class<? extends VisitableCommand> commandClass,
boolean blockAfterCommand) {
return addBlockingInterceptor(cache, barrier, commandClass, getDistributionInterceptorClass(),
blockAfterCommand);
}
protected BlockingInterceptor addBlockingInterceptor(Cache<?, ?> cache, final CyclicBarrier barrier,
Class<? extends VisitableCommand> commandClass,
Class<? extends AsyncInterceptor> interceptorPosition,
boolean blockAfterCommand) {
BlockingInterceptor bi = new BlockingInterceptor<>(barrier, commandClass, blockAfterCommand, false);
AsyncInterceptorChain interceptorChain = cache.getAdvancedCache().getAsyncInterceptorChain();
assertTrue(interceptorChain.addInterceptorBefore(bi, interceptorPosition));
return bi;
}
protected abstract Class<? extends AsyncInterceptor> getDistributionInterceptorClass();
protected abstract Class<? extends AsyncInterceptor> getL1InterceptorClass();
protected <K> void assertL1StateOnLocalWrite(Cache<? super K,?> cache, Cache<?, ?> updatingCache, K key, Object valueWrite) {
// Default just assumes it invalidated the cache
assertIsNotInL1(cache, key);
}
protected void assertL1GetWithConcurrentUpdate(final Cache<Object, String> nonOwnerCache, Cache<Object, String> ownerCache,
final Object key, String originalValue, String updateValue)
throws InterruptedException, ExecutionException, TimeoutException, BrokenBarrierException {
CyclicBarrier barrier = new CyclicBarrier(2);
addBlockingInterceptorBeforeTx(nonOwnerCache, barrier, GetKeyValueCommand.class);
try {
Future<String> future = fork(() -> nonOwnerCache.get(key));
// Now wait for the get to return and block it for now
barrier.await(5, TimeUnit.SECONDS);
assertEquals(originalValue, ownerCache.put(key, updateValue));
// Now let owner key->updateValue go through
barrier.await(5, TimeUnit.SECONDS);
// This should be originalValue still as we did the get
assertEquals(originalValue, future.get(5, TimeUnit.SECONDS));
// Remove the interceptor now since we don't want to block ourselves - if using phaser this isn't required
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
assertL1StateOnLocalWrite(nonOwnerCache, ownerCache, key, updateValue);
// The nonOwnerCache should retrieve new value as it isn't in L1
assertEquals(updateValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
}
finally {
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
}
}
@Test
public void testNoEntryInL1GetWithConcurrentInvalidation() throws InterruptedException, ExecutionException, TimeoutException, BrokenBarrierException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in the owner, so the L1 is empty
ownerCache.put(key, firstValue);
assertL1GetWithConcurrentUpdate(nonOwnerCache, ownerCache, key, firstValue, secondValue);
}
@Test
public void testEntryInL1GetWithConcurrentInvalidation() throws InterruptedException, ExecutionException, TimeoutException, BrokenBarrierException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in a non owner, so the L1 has the key
ownerCache.put(key, firstValue);
nonOwnerCache.get(key);
assertIsInL1(nonOwnerCache, key);
assertL1GetWithConcurrentUpdate(nonOwnerCache, ownerCache, key, firstValue, secondValue);
}
@Test
public void testEntryInL1GetWithConcurrentPut() throws InterruptedException, ExecutionException, TimeoutException, BrokenBarrierException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in a non owner, so the L1 has the key
ownerCache.put(key, firstValue);
nonOwnerCache.get(key);
assertIsInL1(nonOwnerCache, key);
assertL1GetWithConcurrentUpdate(nonOwnerCache, nonOwnerCache, key, firstValue, secondValue);
}
@Test
public void testNoEntryInL1GetWithConcurrentPut() throws InterruptedException, ExecutionException, TimeoutException, BrokenBarrierException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in the owner, so the L1 is empty
ownerCache.put(key, firstValue);
assertL1GetWithConcurrentUpdate(nonOwnerCache, nonOwnerCache, key, firstValue, secondValue);
}
@Test
public void testNoEntryInL1MultipleConcurrentGetsWithInvalidation() throws TimeoutException, InterruptedException, ExecutionException, BrokenBarrierException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
CyclicBarrier invalidationBarrier = new CyclicBarrier(2);
// We want to block right before the invalidation would hit the L1 interceptor to prevent it from invaliding until we want
addBlockingInterceptor(nonOwnerCache, invalidationBarrier, InvalidateL1Command.class, getL1InterceptorClass(), false);
try {
assertEquals(firstValue, nonOwnerCache.get(key));
Future<String> futurePut = fork(() -> ownerCache.put(key, secondValue));
// Wait for the invalidation to be processing
invalidationBarrier.await(5, TimeUnit.SECONDS);
// Now remove the value - assimilates that a get came earlier to owner registering as a invalidatee, however
// the invalidation blocked the update from going through
nonOwnerCache.getAdvancedCache().getDataContainer().remove(key);
// Hack, but we remove the blocking interceptor while a call is in it, it still retains a reference to the next
// interceptor to invoke and when we unblock it will continue forward
// This is done because we can't have 2 interceptors of the same class.
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
CyclicBarrier getBarrier = new CyclicBarrier(2);
addBlockingInterceptorBeforeTx(nonOwnerCache, getBarrier, GetKeyValueCommand.class);
Future<String> futureGet = fork(() -> nonOwnerCache.get(key));
// Wait for the get to retrieve the remote value but not try to update L1 yet
getBarrier.await(5, TimeUnit.SECONDS);
// Let the invalidation unblock now
invalidationBarrier.await(5, TimeUnit.SECONDS);
// Wait for the invalidation complete fully
assertEquals(firstValue, futurePut.get(5, TimeUnit.SECONDS));
// Now let our get go through
getBarrier.await(5, TimeUnit.SECONDS);
// Technically this could be firstValue or secondValue depending on the ordering of if the put has updated
// it's in memory contents (since the L1 is sent asynchronously with the update) - For Tx this is always
// firstValue - the point though is to ensure it doesn't write to the L1
assertNotNull(futureGet.get(5, TimeUnit.SECONDS));
// Remove the interceptor now since we don't want to block ourselves - if using phaser this isn't required
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
// The value shouldn't be in the L1 still
assertIsNotInL1(nonOwnerCache, key);
// It is possible that the async L1LastChance will blow away this get, so we have to make sure to check
// it eventually
eventually(() -> {
// The nonOwnerCache should retrieve new value as it isn't in L1
assertEquals(secondValue, nonOwnerCache.get(key));
return isInL1(nonOwnerCache, key);
});
}
finally {
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
}
}
/**
* See ISPN-3657
*/
@Test
public void testGetAfterWriteAlreadyInvalidatedCurrentGet() throws InterruptedException, TimeoutException,
BrokenBarrierException, ExecutionException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
CyclicBarrier nonOwnerGetBarrier = new CyclicBarrier(2);
// We want to block after it retrieves the value from remote owner so the L1 value will be invalidated
BlockingInterceptor blockingInterceptor =
addBlockingInterceptor(nonOwnerCache, nonOwnerGetBarrier, GetKeyValueCommand.class,
getDistributionInterceptorClass(), true);
try {
Future<String> future = fork(() -> nonOwnerCache.get(key));
// Wait for the get to register L1 before it has sent remote
nonOwnerGetBarrier.await(10, TimeUnit.SECONDS);
blockingInterceptor.suspend(true);
// Now force the L1 sync to be blown away by an update
ownerCache.put(key, secondValue);
assertEquals(secondValue, nonOwnerCache.get(key));
// It should be in L1 now with the second value
assertIsInL1(nonOwnerCache, key);
assertEquals(secondValue, nonOwnerCache.getAdvancedCache().getDataContainer().get(key).getValue());
// Now let the original get complete
nonOwnerGetBarrier.await(10, TimeUnit.SECONDS);
assertEquals(firstValue, future.get(10, TimeUnit.SECONDS));
// It should STILL be in L1 now with the second value
assertIsInL1(nonOwnerCache, key);
assertEquals(secondValue, nonOwnerCache.getAdvancedCache().getDataContainer().get(key).getValue());
} finally {
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
}
}
/**
* See ISPN-3364
*/
@Test
public void testRemoteGetArrivesButWriteOccursBeforeRegistration() throws Throwable {
final Cache<Object, String>[] owners = getOwners(key, 2);
final Cache<Object, String> ownerCache = owners[0];
final Cache<Object, String> backupOwnerCache = owners[1];
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
// Add a barrier to block the owner/backupowner from going further after retrieving the value before coming back into the L1
// interceptor
CyclicBarrier getBarrier = new CyclicBarrier(3);
addBlockingInterceptor(ownerCache, getBarrier, GetCacheEntryCommand.class,
getL1InterceptorClass(), true);
addBlockingInterceptor(backupOwnerCache, getBarrier, GetCacheEntryCommand.class,
getL1InterceptorClass(), true);
try {
Future<String> future = fork(() -> nonOwnerCache.get(key));
// Wait until get goes remote and retrieves value before going back into L1 interceptor
getBarrier.await(10, TimeUnit.SECONDS);
assertEquals(firstValue, ownerCache.put(key, secondValue));
// Let the get complete finally
getBarrier.await(10, TimeUnit.SECONDS);
final String expectedValue;
expectedValue = firstValue;
assertEquals(expectedValue, future.get(10, TimeUnit.SECONDS));
assertIsNotInL1(nonOwnerCache, key);
} finally {
removeAllBlockingInterceptorsFromCache(ownerCache);
removeAllBlockingInterceptorsFromCache(backupOwnerCache);
}
}
@Test
public void testGetBlockedInvalidation() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
CheckPoint checkPoint = new CheckPoint();
waitUntilAboutToAcquireLock(nonOwnerCache, checkPoint);
log.warn("Doing get here - ignore all previous");
Future<String> getFuture = fork(() -> nonOwnerCache.get(key));
// Wait until we are about to write value into data container on non owner
checkPoint.awaitStrict("pre_acquire_shared_topology_lock_invoked", 10, TimeUnit.SECONDS);
Future<String> putFuture = fork(() -> ownerCache.put(key, secondValue));
Exceptions.expectException(TimeoutException.class, () -> putFuture.get(1, TimeUnit.SECONDS));
// Let the get complete finally
checkPoint.triggerForever("pre_acquire_shared_topology_lock_released");
assertEquals(firstValue, getFuture.get(10, TimeUnit.SECONDS));
assertEquals(firstValue, putFuture.get(10, TimeUnit.SECONDS));
assertIsNotInL1(nonOwnerCache, key);
}
@Test
public void testGetBlockingAnotherGet() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
CheckPoint checkPoint = new CheckPoint();
StateTransferLock lock = waitUntilAboutToAcquireLock(nonOwnerCache, checkPoint);
try {
log.warn("Doing get here - ignore all previous");
Future<String> getFuture = fork(() -> nonOwnerCache.get(key));
// Wait until we are about to write value into data container on non owner
checkPoint.awaitStrict("pre_acquire_shared_topology_lock_invoked", 10, TimeUnit.SECONDS);
Future<String> getFuture2 = fork(() -> nonOwnerCache.get(key));
Exceptions.expectException(TimeoutException.class, () -> getFuture2.get(1, TimeUnit.SECONDS));
// Let the get complete finally
checkPoint.triggerForever("pre_acquire_shared_topology_lock_released");
assertEquals(firstValue, getFuture.get(10, TimeUnit.SECONDS));
assertEquals(firstValue, getFuture2.get(10, TimeUnit.SECONDS));
assertIsInL1(nonOwnerCache, key);
} finally {
TestingUtil.replaceComponent(nonOwnerCache, StateTransferLock.class, lock, true);
}
}
@Test
public void testGetBlockingAnotherGetWithMiss() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
assertIsNotInL1(nonOwnerCache, key);
CheckPoint checkPoint = new CheckPoint();
L1Manager l1Manager = waitUntilL1Registration(nonOwnerCache, checkPoint);
try {
log.warn("Doing get here - ignore all previous");
Future<String> getFuture = fork(() -> nonOwnerCache.get(key));
// Wait until we are about to write value into data container on non owner
checkPoint.awaitStrict("pre_acquire_shared_topology_lock_invoked", 10, TimeUnit.SECONDS);
Future<String> getFuture2 = fork(() -> nonOwnerCache.get(key));
Exceptions.expectException(TimeoutException.class, () -> getFuture2.get(1, TimeUnit.SECONDS));
// Let the get complete finally
checkPoint.triggerForever("pre_acquire_shared_topology_lock_released");
assertNull(getFuture.get(10, TimeUnit.SECONDS));
assertNull(getFuture2.get(10, TimeUnit.SECONDS));
} finally {
TestingUtil.replaceComponent(nonOwnerCache, L1Manager.class, l1Manager, true);
}
}
@Test
public void testGetBlockingLocalPut() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
CheckPoint checkPoint = new CheckPoint();
waitUntilAboutToAcquireLock(nonOwnerCache, checkPoint);
log.warn("Doing get here - ignore all previous");
Future<String> getFuture = fork(() -> nonOwnerCache.get(key));
// Wait until we are about to write value into data container on non owner
checkPoint.awaitStrict("pre_acquire_shared_topology_lock_invoked", 10, TimeUnit.SECONDS);
Future<String> putFuture = fork(() -> nonOwnerCache.put(key, secondValue));
Exceptions.expectException(TimeoutException.class, () -> putFuture.get(1, TimeUnit.SECONDS));
// Let the get complete finally
checkPoint.triggerForever("pre_acquire_shared_topology_lock_released");
assertEquals(firstValue, getFuture.get(10, TimeUnit.SECONDS));
assertEquals(firstValue, putFuture.get(10, TimeUnit.SECONDS));
if (nonOwnerCache.getCacheConfiguration().transaction().transactionMode() == TransactionMode.TRANSACTIONAL) {
assertIsInL1(nonOwnerCache, key);
} else {
assertIsNotInL1(nonOwnerCache, key);
}
}
public void testL1GetAndCacheEntryGet() {
final Cache<Object, String>[] owners = getOwners(key, 2);
final Cache<Object, String> ownerCache = owners[0];
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
ownerCache.put(key, firstValue);
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
CacheEntry<Object, String> entry = nonOwnerCache.getAdvancedCache().getCacheEntry(key);
assertEquals(key, entry.getKey());
assertEquals(firstValue, entry.getValue());
}
@Test
public void testGetBlockingAnotherGetCacheEntry() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
CheckPoint checkPoint = new CheckPoint();
StateTransferLock lock = waitUntilAboutToAcquireLock(nonOwnerCache, checkPoint);
try {
log.warn("Doing get here - ignore all previous");
Future<String> getFuture = fork(() -> nonOwnerCache.get(key));
// Wait until we are about to write value into data container on non owner
checkPoint.awaitStrict("pre_acquire_shared_topology_lock_invoked", 10, TimeUnit.SECONDS);
Future<CacheEntry<Object, String>> getFuture2 = fork(() -> nonOwnerCache.getAdvancedCache().getCacheEntry(key));
Exceptions.expectException(TimeoutException.class, () -> getFuture2.get(1, TimeUnit.SECONDS));
// Let the get complete finally
checkPoint.triggerForever("pre_acquire_shared_topology_lock_released");
assertEquals(firstValue, getFuture.get(10, TimeUnit.SECONDS));
CacheEntry<Object, String> entry = getFuture2.get(10, TimeUnit.SECONDS);
assertEquals(key, entry.getKey());
assertEquals(firstValue, entry.getValue());
assertIsInL1(nonOwnerCache, key);
} finally {
TestingUtil.replaceComponent(nonOwnerCache, StateTransferLock.class, lock, true);
}
}
/**
* Replaces StateTransferLock in cache with a proxy one that will block on
* {#link StateTransferLock#acquireSharedTopologyLock} until the checkpoint is triggered
* @param cache The cache to replace the StateTransferLock on
* @param checkPoint The checkpoint to use to trigger blocking
* @return The original real StateTransferLock
*/
protected StateTransferLock waitUntilAboutToAcquireLock(final Cache<?, ?> cache, final CheckPoint checkPoint) {
StateTransferLock stl = TestingUtil.extractComponent(cache, StateTransferLock.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(stl);
StateTransferLock mockLock = mock(StateTransferLock.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
// Wait for main thread to sync up
checkPoint.trigger("pre_acquire_shared_topology_lock_invoked");
// Now wait until main thread lets us through
checkPoint.awaitStrict("pre_acquire_shared_topology_lock_released", 10, TimeUnit.SECONDS);
return forwardedAnswer.answer(invocation);
}).when(mockLock).acquireSharedTopologyLock();
TestingUtil.replaceComponent(cache, StateTransferLock.class, mockLock, true);
return stl;
}
/**
* Replaces L1Manager in cache with a proxy one that will block on
* {#link L1Manager#registerL1WriteSynchronizer} until the checkpoint is triggered
* @param cache The cache to replace the L1Manager on
* @param checkPoint The checkpoint to use to trigger blocking
* @return The original real L1Manager
*/
protected L1Manager waitUntilL1Registration(final Cache<?, ?> cache, final CheckPoint checkPoint) {
L1Manager l1Manager = TestingUtil.extractComponent(cache, L1Manager.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(l1Manager);
L1Manager mockL1 = mock(L1Manager.class, withSettings().defaultAnswer(forwardedAnswer).extraInterfaces(RemoteValueRetrievedListener.class));
doAnswer(invocation -> {
// Wait for main thread to sync up
checkPoint.trigger("pre_acquire_shared_topology_lock_invoked");
// Now wait until main thread lets us through
checkPoint.awaitStrict("pre_acquire_shared_topology_lock_released", 10, TimeUnit.SECONDS);
return forwardedAnswer.answer(invocation);
}).when(mockL1).registerL1WriteSynchronizer(Mockito.notNull(), Mockito.any(L1WriteSynchronizer.class));
TestingUtil.replaceComponent(cache, L1Manager.class, mockL1, true);
return l1Manager;
}
}
| 25,123
| 41.947009
| 162
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/InvalidationNoReplicationTest.java
|
package org.infinispan.distribution;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import java.util.Collections;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test(groups = "functional", testName = "distribution.InvalidationNoReplicationTest")
public class InvalidationNoReplicationTest extends MultipleCacheManagersTest {
protected Object k0;
{
transactional = true;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder config = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, transactional);
config.clustering().l1().enable().hash().numOwners(1);
createCluster(TestDataSCI.INSTANCE, config, 2);
waitForClusterToForm();
k0 = getKeyForCache(0);
}
public void testInvalidation() throws Exception {
assertEquals(Collections.singletonList(address(0)), cacheTopology(0).getDistribution(k0).writeOwners());
assertEquals(Collections.singletonList(address(0)), cacheTopology(1).getDistribution(k0).writeOwners());
advancedCache(1).put(k0, "k1");
assertTrue(advancedCache(1).getDataContainer().containsKey(k0));
assertTrue(advancedCache(0).getDataContainer().containsKey(k0));
tm(0).begin();
cache(0).put(k0, "v2");
tm(0).commit();
assertFalse(advancedCache(1).getDataContainer().containsKey(k0));
}
}
| 1,709
| 30.666667
| 110
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/RemoteGetPerfTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
@Test(groups = "profiling", testName = "distribution.RemoteGetPerfTest")
public class RemoteGetPerfTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
createCluster(getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false), 3);
waitForClusterToForm();
}
public void testRepeatedRemoteGet() {
String key = "key";
List<Address> owners = cacheTopology(0).getDistribution(key).writeOwners();
Cache<Object, Object> nonOwnerCache = caches().stream()
.filter(c -> !owners.contains(address(c)))
.findAny()
.orElse(null);
cache(0).put(key, "value");
for (int i = 0; i < 50000; i++) {
assertEquals("value", nonOwnerCache.get(key));
}
}
}
| 1,250
| 35.794118
| 94
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncStoreSharedTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.Cache;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.marshall.persistence.impl.MarshalledEntryUtil;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* DistSyncStoreSharedTest.
*
* @author Galder Zamarreño
* @since 4.0
*/
@Test(groups = "functional", testName = "distribution.DistSyncStoreSharedTest")
public class DistSyncStoreSharedTest<D extends DistSyncStoreSharedTest> extends BaseDistStoreTest<Object, String, D> {
public DistSyncStoreSharedTest() {
testRetVals = true;
shared = true;
}
@Override
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder configurationBuilder = super.buildConfiguration();
// So we can track persistence manager writes with a shared store
configurationBuilder.statistics().enable();
return configurationBuilder;
}
@AfterMethod
@Override
protected void clearContent() throws Throwable {
super.clearContent();
// Make sure to clear stats after clearing content
for (Cache<?, ?> c: caches) {
log.trace("Clearing stats for cache store on cache "+ c);
clearStats(c);
}
}
@Override
public Object[] factory() {
return new Object[] {
new DistSyncStoreSharedTest().segmented(true),
new DistSyncStoreSharedTest().segmented(false),
};
}
public void testPutFromNonOwner() throws Exception {
String key = "k4", value = "value4";
for (Cache<Object, String> c : caches) assert c.isEmpty();
Cache<Object, String> nonOwner = getFirstNonOwner(key);
DummyInMemoryStore nonOwnerStore = TestingUtil.getFirstStore(nonOwner);
assert !nonOwnerStore.contains(key);
Object retval = nonOwner.put(key, value);
asyncWait(key, PutKeyValueCommand.class);
Cache[] owners = getOwners(key);
DummyInMemoryStore store = TestingUtil.<DummyInMemoryStore, Object, Object>getFirstStore(owners[0]);
assertIsInContainerImmortal(owners[0], key);
assert store.contains(key);
for (int i = 1; i < owners.length; i++) {
store = TestingUtil.<DummyInMemoryStore, Object, Object>getFirstStore(owners[i]);
assertIsInContainerImmortal(owners[i], key);
assert store.contains(key);
}
for (Cache<Object, String> c : caches) {
store = TestingUtil.getFirstStore(c);
assert store.contains(key);
assertNumberOfInvocations(store, "write", 1);
}
if (testRetVals) assert retval == null;
assertOnAllCachesAndOwnership(key, value);
}
public void testPutFromOwner() throws Exception {
String key = "k5", value = "value5";
for (Cache<Object, String> c : caches) assert c.isEmpty();
Cache[] owners = getOwners(key);
Object retval = owners[0].put(key, value);
asyncWait(key, PutKeyValueCommand.class);
DummyInMemoryStore store = TestingUtil.<DummyInMemoryStore, Object, Object>getFirstStore(owners[0]);
assertIsInContainerImmortal(owners[0], key);
assert store.contains(key);
for (int i = 1; i < owners.length; i++) {
store = TestingUtil.<DummyInMemoryStore, Object, Object>getFirstStore(owners[i]);
assertIsInContainerImmortal(owners[i], key);
assert store.contains(key);
}
for (Cache<Object, String> c : caches) {
store = TestingUtil.getFirstStore(c);
if (isOwner(c, key)) {
assertIsInContainerImmortal(c, key);
}
assert store.contains(key);
assertNumberOfInvocations(store, "write", 1);
}
if (testRetVals) assert retval == null;
assertOnAllCachesAndOwnership(key, value);
}
public void testPutAll() throws Exception {
log.trace("Here it begins");
String k1 = "1", v1 = "one", k2 = "2", v2 = "two", k3 = "3", v3 = "three", k4 = "4", v4 = "four";
String[] keys = new String[]{k1, k2, k3, k4};
Map<String, String> data = new HashMap<String, String>();
data.put(k1, v1);
data.put(k2, v2);
data.put(k3, v3);
data.put(k4, v4);
c1.putAll(data);
for (String key : keys) {
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
if (isFirstOwner(c, key)) {
assertIsInContainerImmortal(c, key);
}
log.debug("Testing " + c);
assertNumberOfInvocations(store, "write", 4);
assertTrue(store.contains(key));
}
}
long persistenceManagerInserts = 0;
for (Cache<Object, String> c : caches) {
persistenceManagerInserts += getCacheWriterInterceptor(c).getWritesToTheStores();
}
assertEquals(expectedWriteCount(), persistenceManagerInserts);
}
protected int expectedWriteCount() {
return 4;
}
public void testRemoveFromNonOwner() throws Exception {
String key = "k1", value = "value";
initAndTest();
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
if (isFirstOwner(c, key)) {
assertIsInContainerImmortal(c, key);
assert store.loadEntry(key).getValue().equals(value);
}
}
Object retval = getFirstNonOwner(key).remove(key);
asyncWait("k1", RemoveCommand.class);
if (testRetVals) assert value.equals(retval);
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
MarshallableEntry me = store.loadEntry(key);
if (me == null) {
assertNumberOfInvocations(store, "delete", 1);
assertNumberOfInvocations(store, "write", 1);
} else {
assertNumberOfInvocations(store, "write", 2);
}
}
}
public void testReplaceFromNonOwner() throws Exception {
String key = "k1", value = "value", value2 = "v2";
initAndTest();
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
if (isFirstOwner(c, key)) {
assertIsInContainerImmortal(c, key);
assert store.loadEntry(key).getValue().equals(value);
}
}
Object retval = getFirstNonOwner(key).replace(key, value2);
asyncWait(key, ReplaceCommand.class);
if (testRetVals) assert value.equals(retval);
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
if (isFirstOwner(c, key)) {
assertIsInContainerImmortal(c, key);
}
assert store.loadEntry(key).getValue().equals(value2);
assertNumberOfInvocations(store, "write", 2);
}
}
public void testClear() throws Exception {
for (Cache<Object, String> c : caches) assert c.isEmpty();
for (int i = 0; i < 5; i++) {
getOwners("k" + i)[0].put("k" + i, "value" + i);
asyncWait("k" + i, PutKeyValueCommand.class);
}
// this will fill up L1 as well
for (int i = 0; i < 5; i++) assertOnAllCachesAndOwnership("k" + i, "value" + i);
for (Cache<Object, String> c : caches) assert !c.isEmpty();
c1.clear();
asyncWait(null, ClearCommand.class);
for (Cache<Object, String> c : caches) assert c.isEmpty();
/* We only check c1 because on a shared situation, no matter where the clear is called,
* it should clear the whole store regardless. Bear in mind that in the test, even though
* the cache store is shared, each cache has each own cache store, that allows for checking
* who execute puts, removes...etc. */
DummyInMemoryStore store = TestingUtil.getFirstStore(c1);
// DummyInMemoryStore is segmented, so only 1 clear should be invoked
assertNumberOfInvocations(store, "clear", 1);
for (int i = 0; i < 5; i++) {
String key = "k" + i;
assert !store.contains(key);
}
}
public void testGetOnlyQueriesCacheOnOwners() throws PersistenceException {
// Make a key that own'ers is c1 and c2
final MagicKey k = getMagicKey();
final String v1 = "real-data";
final String v2 = "stale-data";
// Simulate c3 was by itself and someone wrote a value that is now stale
DummyInMemoryStore store = TestingUtil.getFirstStore(c3);
store.write(MarshalledEntryUtil.create(k, v2, c3));
c1.put(k, v1);
assertEquals(v1, c3.get(k));
}
}
| 9,192
| 35.335968
| 118
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/RemoteGetTest.java
|
package org.infinispan.distribution;
import static org.infinispan.test.TestingUtil.extractCacheTopology;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.RemoteGetTest")
public class RemoteGetTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
createCluster(TestDataSCI.INSTANCE, getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false), 3);
// make sure all caches are started...
cache(0);
cache(1);
cache(2);
waitForClusterToForm();
}
@SuppressWarnings("unchecked")
private Cache<MagicKey, String> getCacheForAddress(Address a) {
for (Cache<?, ?> c: caches())
if (c.getAdvancedCache().getRpcManager().getAddress().equals(a)) return (Cache<MagicKey, String>) c;
return null;
}
@SuppressWarnings("unchecked")
private Cache<MagicKey, String> getNonOwner(List<Address> a) {
for (Cache<?, ?> c: caches())
if (!a.contains(c.getAdvancedCache().getRpcManager().getAddress())) return (Cache<MagicKey, String>) c;
return null;
}
public void testRemoteGet() {
Cache<MagicKey, String> c1 = cache(0);
Cache<MagicKey, String> c2 = cache(1);
Cache<MagicKey, String> c3 = cache(2);
MagicKey k = new MagicKey(c1, c2);
List<Address> owners = extractCacheTopology(c1).getDistribution(k).writeOwners();
assert owners.size() == 2: "Key should have 2 owners";
Cache<MagicKey, String> owner1 = getCacheForAddress(owners.get(0));
assert owner1 == c1;
Cache<MagicKey, String> owner2 = getCacheForAddress(owners.get(1));
assert owner2 == c2;
Cache<MagicKey, String> nonOwner = getNonOwner(owners);
assert nonOwner == c3;
owner1.put(k, "value");
assert "value".equals(nonOwner.get(k));
}
public void testGetOfNonexistentKey() {
Object v = cache(0).get("__ doesn't exist ___");
assert v == null : "Should get a null response";
}
public void testGetOfNonexistentKeyOnOwner() {
MagicKey mk = new MagicKey("does not exist", cache(0));
Object v = cache(0).get(mk);
assert v == null : "Should get a null response";
}
}
| 2,485
| 33.527778
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ZeroCapacityAdministrationTest.java
|
package org.infinispan.distribution;
import static org.infinispan.commons.test.CommonsTestingUtil.tmpDirectory;
import static org.testng.AssertJUnit.assertNotNull;
import java.nio.file.Paths;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.globalstate.ConfigurationStorage;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
/**
* Tests that it's possible to perform operations via EmbeddedCacheManagerAdmin when zero-capacity-node=true
*
* @author Ryan Emerson
* @since 12.0
*/
@Test(groups = "functional", testName = "distribution.ch.ZeroCapacityAdministrationTest")
public class ZeroCapacityAdministrationTest extends MultipleCacheManagersTest {
private static final String TEST_DIR = tmpDirectory(ZeroCapacityAdministrationTest.class.getSimpleName());
private EmbeddedCacheManager node1;
private EmbeddedCacheManager zeroCapacityNode;
@Override
protected void createCacheManagers() throws Throwable {
String state1 = Paths.get(TEST_DIR, "1").toString();
GlobalConfigurationBuilder global1 = statefulGlobalBuilder(state1);
node1 = addClusterEnabledCacheManager(global1, new ConfigurationBuilder());
String zeroState = Paths.get(TEST_DIR, "zero").toString();
GlobalConfigurationBuilder globalZero = statefulGlobalBuilder(zeroState).zeroCapacityNode(true);
zeroCapacityNode = addClusterEnabledCacheManager(globalZero, new ConfigurationBuilder());
waitForClusterToForm();
}
public void testDefineClusterConfiguration() {
Configuration config = new ConfigurationBuilder().build();
zeroCapacityNode.administration().createCache("zero-cache", config);
zeroCapacityNode.administration().createTemplate("zero-template", config);
assertNotNull(node1.getCache("zero-cache"));
assertNotNull(node1.getCacheConfiguration("zero-template"));
}
private GlobalConfigurationBuilder statefulGlobalBuilder(String stateDirectory) {
Util.recursiveFileRemove(stateDirectory);
GlobalConfigurationBuilder global = GlobalConfigurationBuilder.defaultClusteredBuilder();
global.globalState().enable().persistentLocation(stateDirectory).sharedPersistentLocation(stateDirectory).configurationStorage(ConfigurationStorage.OVERLAY);
return global;
}
}
| 2,561
| 43.947368
| 163
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/MagicKeyTest.java
|
package org.infinispan.distribution;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "distribution.MagicKeyTest")
public class MagicKeyTest extends BaseDistFunctionalTest<Object, String> {
public void testMagicKeys() {
MagicKey k1 = new MagicKey(c1, c2);
assert getCacheTopology(c1).isWriteOwner(k1);
assert getCacheTopology(c2).isWriteOwner(k1);
assert !getCacheTopology(c3).isWriteOwner(k1);
assert !getCacheTopology(c4).isWriteOwner(k1);
MagicKey k2 = new MagicKey(c2, c3);
assert !getCacheTopology(c1).isWriteOwner(k2);
assert getCacheTopology(c2).isWriteOwner(k2);
assert getCacheTopology(c3).isWriteOwner(k2);
assert !getCacheTopology(c4).isWriteOwner(k2);
MagicKey k3 = new MagicKey(c3, c4);
assert !getCacheTopology(c1).isWriteOwner(k3);
assert !getCacheTopology(c2).isWriteOwner(k3);
assert getCacheTopology(c3).isWriteOwner(k3);
assert getCacheTopology(c4).isWriteOwner(k3);
MagicKey k4 = new MagicKey(c4, c1);
assert getCacheTopology(c1).isWriteOwner(k4);
assert !getCacheTopology(c2).isWriteOwner(k4);
assert !getCacheTopology(c3).isWriteOwner(k4);
assert getCacheTopology(c4).isWriteOwner(k4);
}
}
| 1,264
| 37.333333
| 74
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/TestTopologyAwareAddress.java
|
package org.infinispan.distribution;
import org.infinispan.remoting.transport.TopologyAwareAddress;
/**
* Mock TopologyAwareAddress to be used in tests.
* We only care about the addressNum for equality, so we don't override compareTo(), equals() and hashCode().
*
* @author Dan Berindei <dberinde@redhat.com>
* @since 5.0
*/
public class TestTopologyAwareAddress extends TestAddress implements TopologyAwareAddress {
String siteId, rackId, machineId;
public TestTopologyAwareAddress(int addressNum, String siteId, String rackId, String machineId) {
super(addressNum);
this.siteId = siteId;
this.rackId = rackId;
this.machineId = machineId;
}
public TestTopologyAwareAddress(int addressNum) {
this(addressNum, null, null, null);
}
@Override
public String toString() {
return super.toString() + "|" + machineId + "|" + rackId + "|" + siteId;
}
@Override
public boolean isSameSite(TopologyAwareAddress addr) {
return siteId != null ? siteId.equals(addr.getSiteId()) : addr.getSiteId() == null;
}
@Override
public boolean isSameRack(TopologyAwareAddress addr) {
if (!isSameSite(addr))
return false;
return rackId != null ? rackId.equals(addr.getRackId()) : addr.getRackId() == null;
}
@Override
public boolean isSameMachine(TopologyAwareAddress addr) {
if (!isSameSite(addr) || !isSameRack(addr))
return false;
return machineId != null ? machineId.equals(addr.getMachineId()) : addr.getMachineId() == null;
}
public String getSiteId() {
return siteId;
}
public void setSiteId(String siteId) {
this.siteId = siteId;
}
public String getRackId() {
return rackId;
}
public void setRackId(String rackId) {
this.rackId = rackId;
}
public String getMachineId() {
return machineId;
}
public void setMachineId(String machineId) {
this.machineId = machineId;
}
}
| 1,983
| 25.453333
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DisabledL1WithRetValsTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertFalse;
import org.infinispan.Cache;
import org.testng.annotations.Test;
/**
* Test distribution when L1 is disabled and return values are needed.
*
* @author Galder Zamarreño
* @author Manik Surtani
* @since 5.0
*/
@Test(groups = "functional", testName = "distribution.DisabledL1WithRetValsTest")
public class DisabledL1WithRetValsTest extends BaseDistFunctionalTest<Object, String> {
public DisabledL1WithRetValsTest() {
l1CacheEnabled = false;
testRetVals = true;
numOwners = 1;
INIT_CLUSTER_SIZE = 2;
}
public void testReplaceFromNonOwner() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner("k1");
Object retval = nonOwner.replace("k1", "value2");
assert "value".equals(retval);
assertOnAllCachesAndOwnership("k1", "value2");
}
public void testConditionalReplaceFromNonOwner() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner("k1");
boolean success = nonOwner.replace("k1", "blah", "value2");
assert !success;
assertOnAllCachesAndOwnership("k1", "value");
success = nonOwner.replace("k1", "value", "value2");
assert success;
assertOnAllCachesAndOwnership("k1", "value2");
}
public void testPutFromNonOwner() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner("k1");
Object retval = nonOwner.put("k1", "value2");
assert "value".equals(retval);
assertOnAllCachesAndOwnership("k1", "value2");
}
public void testRemoveFromNonOwner() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner("k1");
Object retval = nonOwner.remove("k1");
assert "value".equals(retval);
assertRemovedOnAllCaches("k1");
}
public void testConditionalRemoveFromNonOwner() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner("k1");
boolean removed = nonOwner.remove("k1", "blah");
assert !removed;
assertOnAllCachesAndOwnership("k1", "value");
removed = nonOwner.remove("k1", "value");
assert removed;
assertRemovedOnAllCaches("k1");
}
public void testPutIfAbsentFromNonOwner() {
initAndTest();
Object retval = getFirstNonOwner("k1").putIfAbsent("k1", "value2");
assert "value".equals(retval);
assertOnAllCachesAndOwnership("k1", "value");
c1.clear();
assertFalse(c1.getAdvancedCache().getLockManager().isLocked("k1"));
assertFalse(c2.getAdvancedCache().getLockManager().isLocked("k1"));
retval = getFirstNonOwner("k1").putIfAbsent("k1", "value2");
assert null == retval;
assertOnAllCachesAndOwnership("k1", "value2");
}
}
| 2,802
| 25.951923
| 87
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/PessimisticDistSyncTxStoreSharedTest.java
|
package org.infinispan.distribution;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.Cache;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.PersistenceUtil;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* Tests distributed caches with shared cache stores under transactional
* environments.
*
* @author Thomas Fromm
* @since 5.1
*/
@Test(groups = "functional", testName = "distribution.PessimisticDistSyncTxStoreSharedTest")
public class PessimisticDistSyncTxStoreSharedTest extends MultipleCacheManagersTest {
private ConfigurationBuilder getCB(){
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering()
.cacheMode(CacheMode.DIST_SYNC)
.remoteTimeout(60000)
.stateTransfer().timeout(180000).fetchInMemoryState(true)
.hash().numOwners(1);
// transactions
cb.transaction()
.transactionMode(TransactionMode.TRANSACTIONAL)
.lockingMode(LockingMode.PESSIMISTIC);
// cb.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
cb.persistence().passivation(false);
// Make it really shared by adding the test's name as store name
cb.persistence()
.addStore(DummyInMemoryStoreConfigurationBuilder.class).preload(true).shared(true)
.storeName(getClass().getSimpleName()).async()
.disable();
return cb;
}
@Override
protected void createCacheManagers() throws Throwable {
createCluster(getCB(), 1);
defineConfigurationOnAllManagers("P006", getCB());
waitForClusterToForm();
}
@Test
public void testInvalidPut() throws Exception {
Cache<String, String> cache = cacheManagers.get(0).getCache("P006");
IntSet allSegments = IntSets.immutableRangeSet(cache.getCacheConfiguration().clustering().hash().numSegments());
// add 1st 4 elements
for(int i = 0; i < 4; i++){
cache.put(cacheManagers.get(0).getAddress().toString()+"-"+i, "42");
}
// lets check if all elements arrived
DummyInMemoryStore cs1 = TestingUtil.getFirstStore(cache);
Set<Object> keys = PersistenceUtil.toKeySet(cs1, allSegments, null);
Assert.assertEquals(keys.size(), 4);
// now start 2nd node
addClusterEnabledCacheManager(getCB()).defineConfiguration("P006", getCB().build());
waitForClusterToForm("P006");
cache = cacheManagers.get(1).getCache("P006");
// add next 4 elements
for(int i = 0; i < 4; i++){
cache.put(cacheManagers.get(1).getAddress().toString()+"-"+i, "42");
}
Set mergedKeys = new HashSet();
// add keys from all cache stores
DummyInMemoryStore cs2 = TestingUtil.getFirstStore(cache);
log.debugf("Load from cache store via cache 1");
mergedKeys.addAll(PersistenceUtil.toKeySet(cs1, allSegments, null));
log.debugf("Load from cache store via cache 2");
mergedKeys.addAll(PersistenceUtil.toKeySet(cs2, allSegments, null));
Assert.assertEquals(mergedKeys.size(), 8);
}
}
| 3,620
| 34.851485
| 118
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncTxL1FuncTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commands.remote.recovery.TxCompletionNotificationCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.VersionedCommitCommand;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.distribution.L1TxInterceptor;
import org.infinispan.interceptors.distribution.TxDistributionInterceptor;
import org.infinispan.interceptors.distribution.VersionedDistributionInterceptor;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.ControlledRpcManager;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSyncTxL1FuncTest")
public class DistSyncTxL1FuncTest extends BaseDistSyncL1Test {
@Override
public Object[] factory() {
return new Object[] {
new DistSyncTxL1FuncTest().isolationLevel(IsolationLevel.READ_COMMITTED),
new DistSyncTxL1FuncTest().isolationLevel(IsolationLevel.REPEATABLE_READ)
};
}
public DistSyncTxL1FuncTest() {
transactional = true;
testRetVals = true;
}
@Override
protected Class<? extends AsyncInterceptor> getDistributionInterceptorClass() {
return isVersioned() ? VersionedDistributionInterceptor.class : TxDistributionInterceptor.class;
}
@Override
protected Class<? extends AsyncInterceptor> getL1InterceptorClass() {
return L1TxInterceptor.class;
}
protected Class<? extends VisitableCommand> getCommitCommand() {
return isVersioned() ? VersionedCommitCommand.class : CommitCommand.class;
}
private boolean isVersioned() {
return (lockingMode == null || lockingMode == LockingMode.OPTIMISTIC) &&
(isolationLevel == null || isolationLevel == IsolationLevel.REPEATABLE_READ);
}
@Override
protected <K> void assertL1StateOnLocalWrite(Cache<? super K, ?> cache, Cache<?, ?> updatingCache, K key, Object valueWrite) {
if (cache != updatingCache) {
super.assertL1StateOnLocalWrite(cache, updatingCache, key, valueWrite);
}
else {
InternalCacheEntry ice = cache.getAdvancedCache().getDataContainer().get(key);
assertNotNull(ice);
assertEquals(valueWrite, ice.getValue());
}
}
@Test
public void testL1UpdatedOnReplaceOperationFailure() {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
assertFalse(nonOwnerCache.replace(key, "not-same", secondValue));
assertIsInL1(nonOwnerCache, key);
}
@Test
public void testL1UpdatedOnRemoveOperationFailure() {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
assertFalse(nonOwnerCache.remove(key, "not-same"));
assertIsInL1(nonOwnerCache, key);
}
@Test
public void testL1UpdatedBeforePutCommits() throws Exception {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
nonOwnerCache.getAdvancedCache().getTransactionManager().begin();
assertEquals(firstValue, nonOwnerCache.put(key, secondValue));
InternalCacheEntry ice = nonOwnerCache.getAdvancedCache().getDataContainer().get(key);
assertNotNull(ice);
assertEquals(firstValue, ice.getValue());
// Commit the put which should now update
nonOwnerCache.getAdvancedCache().getTransactionManager().commit();
ice = nonOwnerCache.getAdvancedCache().getDataContainer().get(key);
assertNotNull(ice);
assertEquals(secondValue, ice.getValue());
}
@Test
public void testL1UpdatedBeforeRemoveCommits() throws Exception {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
assertIsNotInL1(nonOwnerCache, key);
nonOwnerCache.getAdvancedCache().getTransactionManager().begin();
assertEquals(firstValue, nonOwnerCache.remove(key));
InternalCacheEntry ice = nonOwnerCache.getAdvancedCache().getDataContainer().get(key);
assertNotNull(ice);
assertEquals(firstValue, ice.getValue());
// Commit the put which should now update
nonOwnerCache.getAdvancedCache().getTransactionManager().commit();
assertIsNotInL1(nonOwnerCache, key);
}
@Test
public void testGetOccursAfterReplaceRunningBeforeRetrievedRemote() throws ExecutionException, InterruptedException, BrokenBarrierException, TimeoutException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
CyclicBarrier barrier = new CyclicBarrier(2);
addBlockingInterceptorBeforeTx(nonOwnerCache, barrier, ReplaceCommand.class, false);
try {
// The replace will internally block the get until it gets the remote value
Future<Boolean> futureReplace = fork(() -> nonOwnerCache.replace(key, firstValue, secondValue));
barrier.await(5, TimeUnit.SECONDS);
Future<String> futureGet = fork(() -> nonOwnerCache.get(key));
TestingUtil.assertNotDone(futureGet);
// Let the replace now finish
barrier.await(5, TimeUnit.SECONDS);
assertTrue(futureReplace.get(5, TimeUnit.SECONDS));
assertEquals(firstValue, futureGet.get(5, TimeUnit.SECONDS));
} finally {
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
}
}
@Test
public void testGetOccursAfterReplaceRunningBeforeWithRemoteException() throws Exception {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
CyclicBarrier barrier = new CyclicBarrier(2);
addBlockingInterceptorBeforeTx(nonOwnerCache, barrier, ReplaceCommand.class, false);
ControlledRpcManager controlledRpcManager = ControlledRpcManager.replaceRpcManager(nonOwnerCache);
try {
// The replace will internally block the get until it gets the remote value
Future<Boolean> futureReplace = fork(() -> nonOwnerCache.replace(key, firstValue, secondValue));
barrier.await(5, TimeUnit.SECONDS);
Future<String> futureGet = fork(() -> nonOwnerCache.get(key));
// The get will blocks locally on the L1WriteSynchronizer registered by the replace command
TestingUtil.assertNotDone(futureGet);
controlledRpcManager.expectNoCommand();
// Continue the replace
barrier.await(5, TimeUnit.SECONDS);
// That also unblocks the get command and allows it to perform the remote get
controlledRpcManager.expectCommand(ClusteredGetCommand.class)
.skipSend()
.receive(address(ownerCache), new ExceptionResponse(new TestException()));
Exceptions.expectExecutionException(RemoteException.class, TestException.class, futureReplace);
Exceptions.expectExecutionException(RemoteException.class, TestException.class, futureGet);
} finally {
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
controlledRpcManager.revertRpcManager();
}
}
@Test
public void testGetOccursBeforePutCompletesButRetrievesRemote() throws InterruptedException, TimeoutException, BrokenBarrierException, ExecutionException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ownerCache.put(key, firstValue);
CyclicBarrier barrier = new CyclicBarrier(2);
// This way the put should retrieve remote value, but before it has actually tried to update the value
addBlockingInterceptorBeforeTx(nonOwnerCache, barrier, PutKeyValueCommand.class, true);
try {
// The replace will internally block the get until it gets the remote value
Future<String> futureReplace = fork(() -> nonOwnerCache.put(key, secondValue));
barrier.await(5, TimeUnit.SECONDS);
Future<String> futureGet = fork(() -> nonOwnerCache.get(key));
// If this errors here it means the get was blocked by the write operation even though it already retrieved
// the remoteValue and should have unblocked any other waiters
assertEquals(firstValue, futureGet.get(3, TimeUnit.SECONDS));
// Just make sure it was put into L1 properly as well
assertIsInL1(nonOwnerCache, key);
// Let the put now finish
barrier.await(5, TimeUnit.SECONDS);
assertEquals(firstValue, futureReplace.get());
assertEquals(firstValue, futureGet.get(5, TimeUnit.SECONDS));
} finally {
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
}
}
/**
* See ISPN-3648
*/
public void testBackupOwnerInvalidatesL1WhenPrimaryIsUnaware() throws InterruptedException, TimeoutException,
BrokenBarrierException, ExecutionException {
final Cache<Object, String>[] owners = getOwners(key, 2);
final Cache<Object, String> ownerCache = owners[0];
final Cache<Object, String> backupOwnerCache = owners[1];
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
ownerCache.put(key, firstValue);
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
// Add a barrier to block the commit on the backup owner so it doesn't yet update the value. Note this
// will also block the primary owner since it is a sync call
CyclicBarrier backupPutBarrier = new CyclicBarrier(2);
addBlockingInterceptor(backupOwnerCache, backupPutBarrier, getCommitCommand(), getL1InterceptorClass(),
false);
try {
Future<String> future = fork(() -> ownerCache.put(key, secondValue));
// Wait until owner has tried to replicate to backup owner
backupPutBarrier.await(10, TimeUnit.SECONDS);
assertEquals(firstValue, ownerCache.getAdvancedCache().getDataContainer().get(key).getValue());
assertEquals(firstValue, backupOwnerCache.getAdvancedCache().getDataContainer().get(key).getValue());
// Now remove the interceptor, just so we can add another. This is okay since it still retains the next
// interceptor reference properly
removeAllBlockingInterceptorsFromCache(ownerCache);
// Add a barrier to block the get from being retrieved on the primary owner
CyclicBarrier ownerGetBarrier = new CyclicBarrier(2);
addBlockingInterceptor(ownerCache, ownerGetBarrier, GetCacheEntryCommand.class, getL1InterceptorClass(),
false);
// This should be retrieved from the backup owner
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
// Just let the owner put and backup puts complete now
backupPutBarrier.await(10, TimeUnit.SECONDS);
// Now wait for the owner put to complete which has to happen before the owner gets the get from non owner
assertEquals(firstValue, future.get(10, TimeUnit.SECONDS));
// Finally let the get to go to the owner
ownerGetBarrier.await(10, TimeUnit.SECONDS);
ownerGetBarrier.await(10, TimeUnit.SECONDS);
// This is async in the LastChance interceptor
eventually(() -> !isInL1(nonOwnerCache, key));
assertEquals(secondValue, ownerCache.getAdvancedCache().getDataContainer().get(key).getValue());
} finally {
removeAllBlockingInterceptorsFromCache(ownerCache);
removeAllBlockingInterceptorsFromCache(backupOwnerCache);
}
}
/**
* See ISPN-3518
*/
public void testInvalidationSynchronous() throws Exception {
final Cache<Object, String>[] owners = getOwners(key, 2);
final Cache<Object, String> ownerCache = owners[0];
final Cache<Object, String> backupOwnerCache = owners[1];
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
ownerCache.put(key, firstValue);
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
ControlledRpcManager crm = ControlledRpcManager.replaceRpcManager(ownerCache);
ControlledRpcManager crm2 = ControlledRpcManager.replaceRpcManager(backupOwnerCache);
try {
Future<String> future = fork(() -> ownerCache.put(key, secondValue));
if (!onePhaseCommitOptimization) {
// With 2PC the invalidation is sent before the commit but after the prepare
crm.expectCommand(PrepareCommand.class).send().receiveAll();
}
// Wait for the L1 invalidation commands and block them
ControlledRpcManager.BlockedRequest blockedInvalidate1 = crm.expectCommand(InvalidateL1Command.class);
crm2.expectNoCommand(100, TimeUnit.MILLISECONDS);
try {
future.get(1, TimeUnit.SECONDS);
fail("This should have timed out since, they cannot invalidate L1");
} catch (TimeoutException e) {
// We should get a timeout exception as the L1 invalidation commands are blocked and it should be sync
// so the invalidations are completed before the write completes
}
// Now we should let the L1 invalidations go through
blockedInvalidate1.send().receiveAll();
if (onePhaseCommitOptimization) {
// With 1PC the invalidation command is sent before the prepare command
crm.expectCommand(PrepareCommand.class).send().receiveAll();
} else {
crm.expectCommand(CommitCommand.class).send().receiveAll();
crm.expectCommand(TxCompletionNotificationCommand.class).send();
}
assertEquals(firstValue, future.get(10, TimeUnit.SECONDS));
assertIsNotInL1(nonOwnerCache, key);
assertEquals(secondValue, nonOwnerCache.get(key));
} finally {
removeAllBlockingInterceptorsFromCache(ownerCache);
removeAllBlockingInterceptorsFromCache(backupOwnerCache);
crm.revertRpcManager();
crm2.revertRpcManager();
}
}
}
| 16,087
| 39.936387
| 162
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncTxStoreSharedTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import org.infinispan.Cache;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* Distributed, transactional, shared cache store tests.
*
* @author Galder Zamarreño
* @since 5.1
*/
@Test(groups = "functional", testName = "distribution.DistSyncTxStoreSharedTest")
public class DistSyncTxStoreSharedTest extends BaseDistStoreTest {
public DistSyncTxStoreSharedTest() {
transactional = true;
testRetVals = true;
shared = true;
}
@Override
public Object[] factory() {
return new Object[] {
new DistSyncTxStoreSharedTest().segmented(true),
new DistSyncTxStoreSharedTest().segmented(false),
};
}
public void testPutFromNonOwner() throws Exception {
Cache<Object, String> cacheX = getFirstNonOwner("key1");
DummyInMemoryStore storeX = TestingUtil.getFirstStore(cacheX);
cacheX.put("key1", "v1");
assertEquals("v1", cacheX.get("key1"));
assertNotNull(storeX.loadEntry("key1"));
assertEquals("v1", storeX.loadEntry("key1").getValue());
}
}
| 1,283
| 28.181818
| 81
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/IllegalMonitorTest.java
|
package org.infinispan.distribution;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.AdvancedCache;
import org.infinispan.context.Flag;
import org.infinispan.commons.test.TestResourceTracker;
import org.testng.annotations.Test;
/**
* See ISPN-919 : It's possible we try to release a lock we didn't acquire.
* This is by design, so that we don't have to keep track of them:
* @see org.infinispan.util.concurrent.locks.LockManager#possiblyLocked(org.infinispan.container.entries.CacheEntry)
*
* @author Sanne Grinovero <sanne@hibernate.org> (C) 2011 Red Hat Inc.
* @since 5.0
*/
@Test(groups = "functional", testName = IllegalMonitorTest.TEST_NAME)
public class IllegalMonitorTest extends BaseDistFunctionalTest<Object, String> {
protected static final String TEST_NAME = "distribution.IllegalMonitorTest";
private static final AtomicInteger sequencer = new AtomicInteger();
private final String key = TEST_NAME;
public IllegalMonitorTest() {
testRetVals = true;
l1CacheEnabled = true;
}
/**
* This test would throw many IllegalMonitorStateException if they where not hidden by the
* implementation of the LockManager
*
* @throws InterruptedException
*/
@Test(threadPoolSize = 7, invocationCount = 21)
public void testScenario() throws InterruptedException {
TestResourceTracker.testThreadStarted(this.getTestName());
int myId = sequencer.incrementAndGet();
AdvancedCache cache = this.caches.get(myId % this.INIT_CLUSTER_SIZE).getAdvancedCache();
for (int i = 0; i < 100; i++) {
if (i % 4 == 0)
cache.withFlags(Flag.SKIP_LOCKING).put(key, "value");
cache.withFlags(Flag.SKIP_LOCKING).remove(key);
}
cache.clear();
}
}
| 1,785
| 34.72
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/InvalidationNoReplicationNoTxTest.java
|
package org.infinispan.distribution;
import static org.testng.Assert.assertEquals;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.1
*/
@Test (groups = "functional", testName = "distribution.InvalidationNoReplicationNoTxTest")
public class InvalidationNoReplicationNoTxTest extends InvalidationNoReplicationTest {
public InvalidationNoReplicationNoTxTest() {
transactional = false;
}
public void testInvalidation() throws Exception {
cache(1).put(k0, "v0");
assert advancedCache(0).getDataContainer().containsKey(k0);
assert !advancedCache(1).getDataContainer().containsKey(k0);
assertEquals(cache(1).get(k0), "v0");
assert advancedCache(0).getDataContainer().containsKey(k0);
assert advancedCache(1).getDataContainer().containsKey(k0);
log.info("Here is the put!");
log.infof("Cache 0=%s cache 1=%s", address(0), address(1));
cache(0).put(k0, "v1");
log.info("before assertions!");
assertEquals(advancedCache(1).getDataContainer().get(k0), null);
assertEquals(advancedCache(0).getDataContainer().get(k0).getValue(), "v1");
}
}
| 1,157
| 30.297297
| 90
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncL1PessimisticFuncTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.context.Flag;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSyncL1PessimisticFuncTest")
public class DistSyncL1PessimisticFuncTest extends BaseDistFunctionalTest {
public DistSyncL1PessimisticFuncTest() {
transactional = true;
testRetVals = true;
lockingMode = LockingMode.PESSIMISTIC;
}
public void testWriteLockBlockingForceWriteL1Update() throws Exception {
final String key = "some-key";
String value = "some-value";
final String otherValue = "some-new-value";
final Cache<Object, String> nonOwner = getFirstNonOwner(key);
final Cache<Object, String> owner = getFirstOwner(key);
owner.put(key, value);
// Get put in L1
nonOwner.get(key);
assertIsInL1(nonOwner, key);
try {
// Owner now does a write
TransactionManager ownerManger = TestingUtil.getTransactionManager(owner);
ownerManger.begin();
// This should lock the key
owner.put(key, otherValue);
// Now non owner tries to lock the key, but should get blocked
Future<String> futureGet = fork(new Callable<String>() {
@Override
public String call() throws Exception {
TransactionManager mgr = TestingUtil.getTransactionManager(nonOwner);
mgr.begin();
try {
return nonOwner.getAdvancedCache().withFlags(Flag.FORCE_WRITE_LOCK).get(key);
} finally {
mgr.commit();
}
}
});
// Get should not be able to complete
try {
futureGet.get(1, TimeUnit.SECONDS);
fail("Get command should have blocked waiting");
} catch (TimeoutException e) {
}
ownerManger.commit();
assertEquals(otherValue, futureGet.get(1, TimeUnit.SECONDS));
assertIsInL1(nonOwner, key);
} finally {
nonOwner.getAdvancedCache().getAsyncInterceptorChain().removeInterceptor(BlockingInterceptor.class);
}
}
public void testForceWriteLockWithL1Invalidation() throws Exception {
final String key = "some-key";
String value = "some-value";
final String otherValue = "some-new-value";
final Cache<Object, String> nonOwner = getFirstNonOwner(key);
final Cache<Object, String> owner = getFirstOwner(key);
owner.put(key, value);
// Get put in L1
nonOwner.get(key);
assertIsInL1(nonOwner, key);
try {
// Owner now does a write
TransactionManager ownerManger = TestingUtil.getTransactionManager(owner);
ownerManger.begin();
// This should lock the key
assertEquals(value, nonOwner.getAdvancedCache().withFlags(Flag.FORCE_WRITE_LOCK).get(key));
// Now non owner tries to lock the key, but should get blocked
Future<String> futurePut = fork(new Callable<String>() {
@Override
public String call() throws Exception {
TransactionManager mgr = TestingUtil.getTransactionManager(nonOwner);
mgr.begin();
try {
return owner.put(key, otherValue);
} finally {
mgr.commit();
}
}
});
// Put should not be able to complete
try {
futurePut.get(1, TimeUnit.SECONDS);
fail("Get command should have blocked waiting");
} catch (TimeoutException e) {
}
ownerManger.commit();
assertEquals(value, futurePut.get(1, TimeUnit.SECONDS));
eventually(new Condition() {
@Override
public boolean isSatisfied() throws Exception {
// Value should be removed from L1 eventually
return !isInL1(nonOwner, key);
}
});
assertIsNotInL1(nonOwner, key);
} finally {
nonOwner.getAdvancedCache().getAsyncInterceptorChain().removeInterceptor(BlockingInterceptor.class);
}
}
}
| 4,583
| 30.833333
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistributionTestHelper.java
|
package org.infinispan.distribution;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Test helper class
*
* @author Manik Surtani
* @since 4.2.1
*/
public class DistributionTestHelper {
public static String safeType(Object o) {
if (o == null) return "null";
return o.getClass().getSimpleName();
}
public static void assertIsInL1(Cache<?, ?> cache, Object key) {
DataContainer<?, ?> dc = cache.getAdvancedCache().getDataContainer();
InternalCacheEntry<?, ?> ice = dc.peek(key);
assert ice != null : "Entry for key [" + key + "] should be in L1 on cache at [" + addressOf(cache) + "]!";
assert !(ice instanceof ImmortalCacheEntry) : "Entry for key [" + key + "] should have a lifespan on cache at [" + addressOf(cache) + "]!";
}
public static void assertIsNotInL1(Cache<?, ?> cache, Object key) {
DataContainer<?, ?> dc = cache.getAdvancedCache().getDataContainer();
InternalCacheEntry<?, ?> ice = dc.peek(key);
assert ice == null : "Entry for key [" + key + "] should not be in data container at all on cache at [" + addressOf(cache) + "]!";
}
public static void assertIsInContainerImmortal(Cache<?, ?> cache, Object key) {
Log log = LogFactory.getLog(BaseDistFunctionalTest.class);
DataContainer<?, ?> dc = cache.getAdvancedCache().getDataContainer();
InternalCacheEntry<?, ?> ice = dc.peek(key);
if (ice == null) {
String msg = "Entry for key [" + key + "] should be in data container on cache at [" + addressOf(cache) + "]!";
log.fatal(msg);
assert false : msg;
}
if (!(ice instanceof ImmortalCacheEntry)) {
String msg = "Entry for key [" + key + "] on cache at [" + addressOf(cache) + "] should be immortal but was [" + ice + "]!";
log.fatal(msg);
assert false : msg;
}
}
public static void assertIsInL1OrNull(Cache<?, ?> cache, Object key) {
Log log = LogFactory.getLog(BaseDistFunctionalTest.class);
DataContainer<?, ?> dc = cache.getAdvancedCache().getDataContainer();
InternalCacheEntry<?, ?> ice = dc.peek(key);
if (ice instanceof ImmortalCacheEntry) {
String msg = "Entry for key [" + key + "] on cache at [" + addressOf(cache) + "] should be mortal or null but was [" + ice + "]!";
log.fatal(msg);
assert false : msg;
}
}
public static boolean isOwner(Cache<?, ?> c, Object key) {
DistributionManager dm = c.getAdvancedCache().getDistributionManager();
return dm.getCacheTopology().isWriteOwner(key);
}
public static boolean isFirstOwner(Cache<?, ?> c, Object key) {
DistributionManager dm = c.getAdvancedCache().getDistributionManager();
return dm.getCacheTopology().getDistribution(key).isPrimary();
}
public static boolean hasOwners(Object key, Cache<?, ?> primaryOwner, Cache<?, ?>... backupOwners) {
DistributionManager dm = primaryOwner.getAdvancedCache().getDistributionManager();
List<Address> ownerAddresses = dm.getCacheTopology().getDistribution(key).writeOwners();
if (!addressOf(primaryOwner).equals(ownerAddresses.get(0)))
return false;
for (Cache<?, ?> backupOwner : backupOwners) {
if (!ownerAddresses.contains(addressOf(backupOwner)))
return false;
}
return true;
}
public static <K, V> Collection<Cache<K, V>> getOwners(Object key, List<Cache<K, V>> caches) {
List<Cache<K, V>> owners = new ArrayList<>();
for (Cache<K, V> c : caches) {
if (isFirstOwner(c, key)) {
owners.add(c);
break;
}
}
for (Cache<K, V> c : caches)
if (isOwner(c, key) && !isFirstOwner(c, key)) owners.add(c);
return owners;
}
public static <K, V> Cache<K, V> getFirstOwner(Object key, List<Cache<K, V>> caches) {
return getOwners(key, caches).iterator().next();
}
public static <K, V> Cache<K, V> getFirstBackupOwner(Object key, List<Cache<K, V>> caches) {
for (Cache<K, V> c : caches) {
if (isOwner(c, key) && !isFirstOwner(c, key)) {
return c;
}
}
return null;
}
public static <K, V> Collection<Cache<K, V>> getNonOwners(Object key, List<Cache<K, V>> caches) {
List<Cache<K, V>> nonOwners = new ArrayList<>();
for (Cache<K, V> c : caches)
if (!isOwner(c, key)) nonOwners.add(c);
return nonOwners;
}
public static <K, V> Cache<K, V> getFirstNonOwner(Object key, List<Cache<K, V>> caches) {
return getNonOwners(key, caches).iterator().next();
}
public static Address addressOf(Cache<?, ?> cache) {
EmbeddedCacheManager cacheManager = cache.getCacheManager();
return cacheManager.getAddress();
}
}
| 5,206
| 37.007299
| 145
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/HashFunctionComparisonTest.java
|
package org.infinispan.distribution;
import static org.infinispan.commons.util.Util.padString;
import static org.infinispan.commons.util.Util.prettyPrintTime;
import static org.infinispan.profiling.testinternals.Generator.generateAddress;
import static org.infinispan.profiling.testinternals.Generator.getRandomByteArray;
import static org.infinispan.profiling.testinternals.Generator.getRandomString;
import java.nio.charset.StandardCharsets;
import java.text.NumberFormat;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
import org.infinispan.commons.hash.Hash;
import org.infinispan.commons.hash.MurmurHash3;
import org.infinispan.remoting.transport.Address;
import org.testng.annotations.Test;
/**
* This test benchmarks different hash functions.
*/
@Test (groups = "manual", testName = "distribution.HashFunctionComparisonTest")
public class HashFunctionComparisonTest {
private static final int MAX_STRING_SIZE = 16;
private static final int MAX_BYTE_ARRAY_SIZE = 16;
private static final int NUM_KEYS_PER_TYPE = 1000 * 100;
private static final int MODULUS_BASE = 1024;
private static final NumberFormat nf = NumberFormat.getInstance();
private static List<Hash> getHashFunctions() {
List<Hash> functions = new LinkedList<Hash>();
functions.add(MurmurHash3.getInstance());
functions.add(new SuperFastHash());
return functions;
}
public void doTest() {
for (int i : Arrays.asList(10, 50, 100, 500, 1000)) {
System.out.printf("----------------- Testing with %s nodes -----------------%n", i);
addressDistribution(i);
}
}
/**
* Tests how well JGroupsAddresses are distributed on a hash wheel.
*/
private void addressDistribution(int numAddresses) {
int hashSpace = 10240;
Collection<Hash> functions = getHashFunctions();
System.out.printf("%s %s %s %s %s %n%n", padString("Function", 25), padString("Greatest dist", 15), padString("Smallest dist", 15), padString("Mean dist", 15), padString("Positions", 15));
for (Hash f : functions) {
List<Address> addresses = new LinkedList<Address>();
for (int i=0; i<numAddresses; i++) addresses.add(generateAddress());
SortedMap<Integer, Address> positions = new TreeMap<Integer, Address>();
for (Address a : addresses) positions.put(f.hash(a.hashCode()) % hashSpace, a);
System.out.printf("%s %s %s %s %s %n%n",
padString(f.getClass().getSimpleName(), 25),
padString(greatestDist(positions, hashSpace), 15),
padString(smallestDist(positions, hashSpace), 15),
padString(meanDist(positions, hashSpace), 15),
// positions);
"-");
}
System.out.printf("%s %s %s %s %s %n%n",
padString("Perfectly Balanced", 25),
padString("-", 15),
padString("-", 15),
padString(Integer.toString(hashSpace / numAddresses), 15),
"-");
}
private String greatestDist(SortedMap<Integer, Address> pos, int hashSpace) {
// calc distances between entries 0 and n - 1 first.
int largest = 0;
int lastPos = lastEntry(pos).getKey();
int firstPos = -1;
for (int currentPos: pos.keySet()) {
if (firstPos == -1) firstPos = currentPos;
largest = Math.max(largest, Math.abs(currentPos - lastPos));
lastPos = currentPos;
}
return String.valueOf(largest);
}
private String smallestDist(SortedMap<Integer, Address> pos, int hashSpace) {
// calc distances between entries 0 and n - 1 first.
int smallest = Integer.MAX_VALUE;
int lastPos = lastEntry(pos).getKey();
int firstPos = -1;
for (int currentPos: pos.keySet()) {
if (firstPos == -1) firstPos = currentPos;
smallest = Math.min(smallest, Math.abs(currentPos - lastPos));
lastPos = currentPos;
}
return String.valueOf(smallest);
}
private String meanDist(SortedMap<Integer, Address> pos, int hashSpace) {
// calc distances between entries 0 and n - 1 first.
int totalDist = 0;
int lastPos = lastEntry(pos).getKey();
int firstPos = -1;
for (int currentPos: pos.keySet()) {
if (firstPos == -1) firstPos = currentPos;
totalDist += Math.abs(currentPos - lastPos);
lastPos = currentPos;
}
return String.valueOf(totalDist / pos.size());
}
private Map.Entry<Integer, Address> lastEntry(SortedMap<Integer, Address> m) {
Map.Entry<Integer, Address> last = null;
for (Map.Entry<Integer, Address> e: m.entrySet()) last = e;
return last;
}
public void testHashs() {
Collection<Hash> functions = getHashFunctions();
Set<Object> objectKeys = new HashSet<Object>(NUM_KEYS_PER_TYPE);
Set<String> stringKeys = new HashSet<String>(NUM_KEYS_PER_TYPE);
Set<byte[]> byteArrayKeys = new HashSet<byte[]>(NUM_KEYS_PER_TYPE);
// generate keys
for (int i = 0; i < NUM_KEYS_PER_TYPE; i++) {
String s = getRandomString(MAX_STRING_SIZE);
objectKeys.add(s);
stringKeys.add(s);
byteArrayKeys.add(getRandomByteArray(MAX_BYTE_ARRAY_SIZE));
}
perform(functions, objectKeys, stringKeys, byteArrayKeys, false);
perform(functions, objectKeys, stringKeys, byteArrayKeys, true);
}
private void captureStats(int hash, DescriptiveStatistics stats) {
// comment this impl out if measuring raw performance
stats.addValue(hash % MODULUS_BASE);
}
private void perform(Collection<Hash> functions, Set<Object> objectKeys, Set<String> stringKeys, Set<byte[]> byteArrayKeys, boolean warmup) {
if (!warmup)
System.out.printf("%s %s %s %s%n", padString("Function Impl", 25), padString("String keys", 18), padString("Byte array keys", 18), padString("Object keys", 18));
for (Hash f : functions) {
long oRes = 0, sRes = 0, bRes = 0;
DescriptiveStatistics oStats = new DescriptiveStatistics();
DescriptiveStatistics sStats = new DescriptiveStatistics();
DescriptiveStatistics bStats = new DescriptiveStatistics();
long st = System.currentTimeMillis();
for (Object o : objectKeys) captureStats(f.hash(o.hashCode()), oStats);
oRes = System.currentTimeMillis() - st;
st = System.currentTimeMillis();
for (String s : stringKeys) captureStats(f.hash(s), sStats);
sRes = System.currentTimeMillis() - st;
st = System.currentTimeMillis();
for (byte[] b : byteArrayKeys) captureStats(f.hash(b), bStats);
bRes = System.currentTimeMillis() - st;
if (!warmup) {
System.out.printf("%s %s %s %s%n",
padString(f.getClass().getSimpleName(), 25),
padString(prettyPrintTime(sRes), 18),
padString(prettyPrintTime(bRes), 18),
padString(prettyPrintTime(oRes), 18)
);
System.out.printf("%s %s %s %s%n",
padString(" mean", 25),
padDouble(sStats.getMean()),
padDouble(bStats.getMean()),
padDouble(oStats.getMean())
);
System.out.printf("%s %s %s %s%n",
padString(" median", 25),
padDouble(sStats.getPercentile(50.0)),
padDouble(bStats.getPercentile(50.0)),
padDouble(oStats.getPercentile(50.0))
);
System.out.printf("%s %s %s %s%n",
padString(" deviation", 25),
padDouble(sStats.getStandardDeviation()),
padDouble(bStats.getStandardDeviation()),
padDouble(oStats.getStandardDeviation())
);
System.out.printf("%s %s %s %s%n",
padString(" variance", 25),
padDouble(sStats.getVariance()),
padDouble(bStats.getVariance()),
padDouble(oStats.getVariance())
);
}
}
}
private String padDouble(double d) {
return padString(nf.format(d), 18);
}
}
class SuperFastHash implements Hash {
@Override
public int hash(int hashcode) {
byte[] b = new byte[4];
b[0] = (byte) hashcode;
b[1] = (byte) (hashcode >> 8);
b[2] = (byte) (hashcode >> 16);
b[3] = (byte) (hashcode >> 24);
return hash(b);
}
@Override
public int hash(Object o) {
if (o instanceof byte[])
return hash((byte[]) o);
else if (o instanceof String)
return hash(((String) o).getBytes(StandardCharsets.UTF_8));
else
return hash(o.hashCode());
}
@Override
public int hash(byte[] data) {
if (data == null || data.length == 0) return 0;
int len = data.length;
int hash = len;
int rem = len & 3;
len >>= 2;
int tmp;
int offset = 0;
/* Main loop */
for (; len > 0; len--) {
hash += get16bits(data, offset);
tmp = (get16bits(data, offset + 2) << 11) ^ hash;
hash = (hash << 16) ^ tmp;
offset += 4;
hash += hash >> 11;
}
/* Handle end cases */
switch (rem) {
case 3:
hash += get16bits(data, offset);
hash ^= hash << 16;
hash ^= data[2] << 18;
hash += hash >> 11;
break;
case 2:
hash += get16bits(data, offset);
hash ^= hash << 11;
hash += hash >> 17;
break;
case 1:
hash += data[0];
hash ^= hash << 10;
hash += hash >> 1;
}
/* Force "avalanching" of final 127 bits */
hash ^= hash << 3;
hash += hash >> 5;
hash ^= hash << 4;
hash += hash >> 17;
hash ^= hash << 25;
hash += hash >> 6;
return hash;
}
private int get16bits(byte[] bytes, int offset) {
short s = bytes[offset];
s &= bytes[offset + 1] << 8;
return s;
}
}
| 10,421
| 33.509934
| 194
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/SingleOwnerAndAsyncMethodsTest.java
|
package org.infinispan.distribution;
import static org.infinispan.test.TestingUtil.k;
import static org.infinispan.test.TestingUtil.v;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNotNull;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.context.Flag;
import org.testng.annotations.Test;
/**
* Non-transactional tests for asynchronous methods in a distributed
* environment and a single owner.
*
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
* @since 4.2
*/
@Test(groups = "functional", testName = "distribution.SingleOwnerAndAsyncMethodsTest")
public class SingleOwnerAndAsyncMethodsTest extends BaseDistFunctionalTest<Object, String> {
public SingleOwnerAndAsyncMethodsTest() {
INIT_CLUSTER_SIZE = 2;
numOwners = 1;
l1CacheEnabled = false;
}
public void testAsyncPut(Method m) throws Exception {
Cache<Object, String> ownerCache = getOwner(k(m));
ownerCache.put(k(m), v(m));
CompletableFuture<String> f = ownerCache.putAsync(k(m), v(m, 1));
assert f != null;
assertEquals(v(m), f.get());
}
public void testAsyncGet(Method m) throws Exception {
final String key = k(m);
final String value = v(m);
Cache<Object, String> ownerCache = getOwner(key);
ownerCache.put(key, value);
AdvancedCache<Object, String> nonOwnerCache = getNonOwner(key).getAdvancedCache();
// Make the cache getAsync call go remote to verify it gets it correctly
CompletableFuture<String> f = nonOwnerCache.getAsync(key);
assert f != null;
assert f.get().equals(value);
f = nonOwnerCache.withFlags(Flag.SKIP_REMOTE_LOOKUP).getAsync(key);
assert f != null;
assert f.get() == null;
f = nonOwnerCache.getAsync(key);
assert f != null;
assert f.get().equals(value);
f = nonOwnerCache.withFlags(Flag.CACHE_MODE_LOCAL).getAsync(key);
assert f != null;
assert f.get() == null;
f = nonOwnerCache.getAsync(key);
assert f != null;
assert f.get().equals(value);
}
public void testAsyncGetAll(Method m) throws Exception {
final String k1 = k(m, 1);
final String v1 = v(m, 1), v2 = v(m, 2);
Cache<Object, String> ownerCache = getOwner(k1);
AdvancedCache<Object, String> nonOwnerCache = getNonOwner(k1).getAdvancedCache();
String k2;
for (int counter = 2;; ++counter) {
k2 = k(m, counter);
if (getOwner(k2).equals(nonOwnerCache)) break;
if (counter > 1000) {
// unlucky, don't run the test further
throw new IllegalStateException("Cannot find suitable key");
}
}
Set<String> keys = new HashSet<>(Arrays.asList(k1, k2));
Map<String, String> entries = new HashMap<>();
entries.put(k1, v1);
entries.put(k2, v2);
ownerCache.put(k1, v1);
nonOwnerCache.put(k2, v2);
// Make the cache getAsync call go remote to verify it gets it correctly
CompletableFuture<Map<Object, String>> f = nonOwnerCache.getAllAsync(keys);
assertNotNull(f);
assertEquals(entries, f.get());
f = nonOwnerCache.withFlags(Flag.SKIP_REMOTE_LOOKUP).getAllAsync(keys);
assertNotNull(f);
assertEquals(Collections.singletonMap(k2, v2), f.get());
f = nonOwnerCache.withFlags(Flag.SKIP_REMOTE_LOOKUP).getAllAsync(Collections.singleton(k1));
assertNotNull(f);
assertEquals(Collections.emptyMap(), f.get());
f = nonOwnerCache.getAllAsync(keys);
assertNotNull(f);
assertEquals(entries, f.get());
f = nonOwnerCache.withFlags(Flag.CACHE_MODE_LOCAL).getAllAsync(keys);
assertNotNull(f);
assertEquals(Collections.singletonMap(k2, v2), f.get());
f = nonOwnerCache.withFlags(Flag.CACHE_MODE_LOCAL).getAllAsync(Collections.singleton(k1));
assertNotNull(f);
assertEquals(Collections.emptyMap(), f.get());
f = nonOwnerCache.getAllAsync(keys);
assertNotNull(f);
assertEquals(entries, f.get());
}
public void testAsyncReplace(Method m) throws Exception {
// Calling replaceAsync() on a cache that does not own the key will force
// a remote get call to find out whether the key is associated with any
// value.
CompletableFuture<String> f = getOwner(k(m)).replaceAsync(k(m), v(m));
assert f != null;
// In this case k1 is not present in cache(1), so should return null
assert f.get() == null;
// Now let's put put the key in the owner cache and then verify
// that a replace call from the non-owner cache resolves correctly.
getOwner(k(m)).put(k(m), v(m));
f = getNonOwner(k(m)).replaceAsync(k(m), v(m, 1));
assert f != null;
assert f.get().equals(v(m));
}
public void testAsyncGetThenPutOnSameNode(Method m) throws Exception {
Cache<Object, String> ownerCache = getOwner(k(m));
Cache<Object, String> nonOwnerCache = getNonOwner(k(m));
ownerCache.put(k(m), v(m));
// Make the cache getAsync call go remote to verify it gets it correctly
CompletableFuture<String> f = nonOwnerCache.getAsync(k(m));
assert f != null;
assert f.get().equals(v(m));
nonOwnerCache.put(k(m, 1), v(m, 1));
}
public void testParallelAsyncGets(Method m) throws Exception {
getOwner(k(m, 1)).put(k(m, 1), v(m, 1));
getOwner(k(m, 2)).put(k(m, 2), v(m, 2));
getOwner(k(m, 3)).put(k(m, 3), v(m, 3));
CompletableFuture<String> f1 = getNonOwner(k(m, 1)).getAsync(k(m, 1));
CompletableFuture<String> f2 = getNonOwner(k(m, 2)).getAsync(k(m, 2));
CompletableFuture<String> f3 = getNonOwner(k(m, 3)).getAsync(k(m, 3));
assert f1 != null;
assert f1.get().equals(v(m, 1));
assert f2 != null;
assert f2.get().equals(v(m, 2));
assert f3 != null;
assert f3.get().equals(v(m, 3));
getNonOwner(k(m, 1)).put(k(m, 1), v(m, 11));
getNonOwner(k(m, 2)).put(k(m, 2), v(m, 22));
getNonOwner(k(m, 3)).put(k(m, 3), v(m, 33));
f1 = getOwner(k(m, 1)).getAsync(k(m, 1));
f2 = getOwner(k(m, 2)).getAsync(k(m, 2));
f3 = getOwner(k(m, 3)).getAsync(k(m, 3));
assert f1 != null;
assert f1.get().equals(v(m, 11));
assert f2 != null;
assert f2.get().equals(v(m, 22));
assert f3 != null;
assert f3.get().equals(v(m, 33));
}
public void testLocalAsyncGet(Method m) throws Exception {
Cache<Object, String> ownerCache = getOwner(k(m));
ownerCache.put(k(m), v(m));
CompletableFuture<String> f = getNonOwner(k(m)).getAdvancedCache().withFlags(Flag.SKIP_REMOTE_LOOKUP).getAsync(k(m));
assert f != null;
assert f.get() == null;
}
protected Cache<Object, String> getOwner(Object key) {
return getOwners(key)[0];
}
protected Cache<Object, String> getNonOwner(Object key) {
return getNonOwners(key)[0];
}
public Cache<Object, String>[] getOwners(Object key) {
return getOwners(key, 1);
}
public Cache<Object, String>[] getNonOwners(Object key) {
return getNonOwners(key, 1);
}
}
| 7,412
| 33.640187
| 123
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/BaseDistStoreTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.configuration.cache.StoreConfigurationBuilder;
import org.infinispan.interceptors.impl.CacheWriterInterceptor;
import org.infinispan.interceptors.impl.DistCacheWriterInterceptor;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.TestingUtil;
/**
* DistSyncCacheStoreTest.
*
* @author Galder Zamarreño
* @since 4.0
*/
public abstract class BaseDistStoreTest<K, V, C extends BaseDistStoreTest> extends BaseDistFunctionalTest<K, V> {
protected boolean shared;
protected boolean preload;
protected boolean segmented;
protected C shared(boolean shared) {
this.shared = shared;
return (C) this;
}
protected C preload(boolean preload) {
this.preload = preload;
return (C) this;
}
protected C segmented(boolean segmented) {
this.segmented = segmented;
return (C) this;
}
@Override
protected String[] parameterNames() {
return concat(super.parameterNames(), "shared", "preload", "segmented");
}
@Override
protected Object[] parameterValues() {
return concat(super.parameterValues(), shared, preload, segmented);
}
@Override
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder cfg = super.buildConfiguration();
StoreConfigurationBuilder<?, ?> storeConfigurationBuilder;
storeConfigurationBuilder = addStore(cfg.persistence(), shared);
storeConfigurationBuilder
.shared(shared)
.preload(preload)
.segmented(segmented);
return cfg;
}
protected StoreConfigurationBuilder addStore(PersistenceConfigurationBuilder persistenceConfigurationBuilder, boolean shared) {
if (shared) {
return persistenceConfigurationBuilder.addStore(new DummyInMemoryStoreConfigurationBuilder(
persistenceConfigurationBuilder).storeName(getClass().getSimpleName()));
} else {
return persistenceConfigurationBuilder.addStore(new DummyInMemoryStoreConfigurationBuilder(
persistenceConfigurationBuilder));
}
}
protected int getCacheStoreStats(Cache<?, ?> cache, String cacheStoreMethod) {
DummyInMemoryStore dummyInMemoryStore = TestingUtil.getFirstStore(cache);
return dummyInMemoryStore.stats().get(cacheStoreMethod);
}
protected void assertNumberOfInvocations(DummyInMemoryStore dims, String method, int expected) {
assertEquals(expected, dims.stats().get(method).intValue());
}
protected void clearStats(Cache<?, ?> cache) {
DummyInMemoryStore store = TestingUtil.getFirstStore(cache);
store.clearStats();
CacheWriterInterceptor cacheWriterInterceptor = getCacheWriterInterceptor(cache);
if (cacheWriterInterceptor != null) {
cacheWriterInterceptor.resetStatistics();
}
}
protected CacheWriterInterceptor getCacheWriterInterceptor(Cache<?, ?> cache) {
return TestingUtil.extractComponent(cache, DistCacheWriterInterceptor.class);
}
}
| 3,361
| 34.389474
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncTxUnsafeFuncTest.java
|
package org.infinispan.distribution;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSyncTxUnsafeFuncTest")
public class DistSyncTxUnsafeFuncTest extends DistSyncTxFuncTest {
public DistSyncTxUnsafeFuncTest() {
testRetVals = false;
cleanup = CleanupPhase.AFTER_METHOD; // ensure any stale TXs are wiped
}
}
| 373
| 30.166667
| 80
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/SingleOwnerAndAsyncMethodsWithTxTest.java
|
package org.infinispan.distribution;
import static org.infinispan.test.TestingUtil.k;
import static org.infinispan.test.TestingUtil.v;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import java.lang.reflect.Method;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import jakarta.transaction.Status;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.context.Flag;
import org.infinispan.transaction.LockingMode;
import org.infinispan.util.concurrent.TimeoutException;
import org.testng.annotations.Test;
/**
* Transactional tests for asynchronous methods in a distributed
* environment and a single owner.
*
* @author Galder Zamarreño
* @since 5.0
*/
@Test(groups = "functional", testName = "distribution.SingleOwnerAndAsyncMethodsWithTxTest")
public class SingleOwnerAndAsyncMethodsWithTxTest extends BaseDistFunctionalTest<Object, String> {
public SingleOwnerAndAsyncMethodsWithTxTest() {
INIT_CLUSTER_SIZE = 2;
numOwners = 1;
transactional = true;
l1CacheEnabled = true;
lockTimeout = 5;
lockingMode = LockingMode.PESSIMISTIC;
}
public void testAsyncGetsWithinTx(Method m) throws Exception {
String k = k(m);
String v = v(m);
Cache<Object, String> ownerCache = getOwner(k);
Cache<Object, String> nonOwnerCache = getNonOwner(k);
ownerCache.put(k, v);
TransactionManager tm = getTransactionManager(nonOwnerCache);
tm.begin();
CompletableFuture<String> f = nonOwnerCache.getAsync(k);
assertNotNull(f);
assertEquals(v, f.get());
CompletableFuture<Map<Object, String>> allF = nonOwnerCache.getAllAsync(Collections.singleton(k));
assertNotNull(allF);
assertEquals(Collections.singletonMap(k, v), allF.get());
nonOwnerCache.put(k, v(m, 2));
tm.commit();
f = nonOwnerCache.getAsync(k);
assertNotNull(f);
assertEquals(v(m, 2), f.get());
allF = nonOwnerCache.getAllAsync(Collections.singleton(k));
assertNotNull(allF);
assertEquals(Collections.singletonMap(k, v(m, 2)), allF.get());
}
public void testAsyncGetToL1AndConcurrentModification(final Method m) throws Throwable {
// The storage to L1 should fail "silently" and not affect other transactions.
modifyConcurrently(m, getNonOwner(k(m)), false);
}
public void testAsyncGetWithForceWriteLockFlag(final Method m) throws Throwable {
modifyConcurrently(m, getOwner(k(m)), true);
}
private void modifyConcurrently(final Method m, final Cache cache, final boolean withFlag) throws Throwable {
final String k = k(m);
final String v = v(m);
Cache<Object, String> ownerCache = getOwner(k);
ownerCache.put(k, v);
final CountDownLatch getAsynclatch = new CountDownLatch(1);
final CountDownLatch putLatch = new CountDownLatch(1);
Callable<Void> c1 = () -> {
Cache localCache = cache;
TransactionManager tm = getTransactionManager(localCache);
tm.begin();
// This brings k,v to L1 in non-owner cache
if (withFlag)
localCache = cache.getAdvancedCache().withFlags(Flag.FORCE_WRITE_LOCK);
CompletableFuture<String> f = localCache.getAsync(k);
CompletableFuture<Map<String, String>> allF = localCache.getAllAsync(Collections.singleton(k));
assertNotNull(f);
assertEquals(v, f.get());
assertNotNull(allF);
assertEquals(Collections.singletonMap(k, v), allF.get());
putLatch.countDown();
getAsynclatch.await();
tm.commit();
return null;
};
Callable<Void> c2 = () -> {
putLatch.await();
TransactionManager tm = getTransactionManager(cache);
tm.begin();
try {
// If getAsync was done within a tx, k should be locked
// and put() should timeout
cache.put(k, v(m, 1));
getAsynclatch.countDown();
assertFalse("Put operation should have timed out if the get operation acquires a write lock", withFlag);
} catch (TimeoutException e) {
tm.setRollbackOnly();
getAsynclatch.countDown();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE)
tm.commit();
else
tm.rollback();
}
return null;
};
Future f1 = fork(c1);
Future f2 = fork(c2);
f1.get();
try {
f2.get();
assert !withFlag : "Should throw a TimeoutException if the get operation acquired a lock";
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof AssertionError)
throw cause; // Assert failed so rethrow as is
else
assert e.getCause() instanceof TimeoutException : String.format(
"The exception should be a TimeoutException but instead was %s",
e.getCause());
}
}
protected Cache<Object, String> getOwner(Object key) {
return getOwners(key)[0];
}
protected Cache<Object, String> getNonOwner(Object key) {
return getNonOwners(key)[0];
}
public Cache<Object, String>[] getOwners(Object key) {
return getOwners(key, 1);
}
public Cache<Object, String>[] getNonOwners(Object key) {
return getNonOwners(key, 1);
}
}
| 5,754
| 32.654971
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/MagicKey.java
|
package org.infinispan.distribution;
import static org.infinispan.distribution.DistributionTestHelper.addressOf;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.Random;
import java.util.Set;
import java.util.WeakHashMap;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.remoting.transport.Address;
/**
* A special type of key that if passed a cache in its constructor, will ensure it will always be assigned to that cache
* (plus however many additional caches in the hash space).
*
* Note that this only works if all the caches have joined a single cluster before creating the key.
* If the cluster membership changes then the keys may move to other servers.
*/
public class MagicKey implements Serializable {
private static final WeakHashMap<Integer, int[]> hashCodes = new WeakHashMap<>();
private static final AtomicLong counter = new AtomicLong();
/**
* The name is used only for easier debugging and may be null. It is not part of equals()/hashCode().
*/
@ProtoField(1)
final String name;
@ProtoField(number = 2, defaultValue = "0")
final int hashcode;
/**
* As hash codes can collide, using counter makes the key unique.
*/
@ProtoField(number = 3, defaultValue = "0")
final long unique;
@ProtoField(number = 4, defaultValue = "0")
final int segment;
@ProtoField(5)
final String address;
@ProtoFactory
MagicKey(String name, int hashcode, long unique, int segment, String address) {
this.name = name;
this.hashcode = hashcode;
this.unique = unique;
this.segment = segment;
this.address = address;
}
public MagicKey(String name, Cache<?, ?> primaryOwner) {
this.name = name;
Address primaryAddress = addressOf(primaryOwner);
this.address = primaryAddress.toString();
LocalizedCacheTopology cacheTopology = primaryOwner.getAdvancedCache().getDistributionManager().getCacheTopology();
ConsistentHash ch = cacheTopology.getWriteConsistentHash();
int segment = findSegment(ch.getNumSegments(), s -> primaryAddress.equals(ch.locatePrimaryOwnerForSegment(s)));
if (segment < 0) {
throw new IllegalStateException("Could not find any segment owned by " + primaryOwner +
", primary segments: " + segments(primaryOwner));
}
this.segment = segment;
hashcode = getHashCodeForSegment(cacheTopology, segment);
unique = counter.getAndIncrement();
}
public MagicKey(String name, Cache<?, ?> primaryOwner, Cache<?, ?>... backupOwners) {
this.name = name;
Address primaryAddress = addressOf(primaryOwner);
this.address = primaryAddress.toString();
LocalizedCacheTopology cacheTopology = primaryOwner.getAdvancedCache().getDistributionManager().getCacheTopology();
ConsistentHash ch = cacheTopology.getWriteConsistentHash();
segment = findSegment(ch.getNumSegments(), s -> {
List<Address> owners = ch.locateOwnersForSegment(s);
if (!primaryAddress.equals(owners.get(0))) return false;
for (Cache<?, ?> backup : backupOwners) {
if (!owners.contains(addressOf(backup))) return false;
}
return true;
});
if (segment < 0) {
throw new IllegalStateException("Could not find any segment owned by " + primaryOwner + ", "
+ Arrays.toString(backupOwners) + ", primary segments: " + segments(primaryOwner)
+ ", backup segments: " + Stream.of(backupOwners).collect(Collectors.toMap(Function.identity(), this::segments)));
}
hashcode = getHashCodeForSegment(cacheTopology, segment);
unique = counter.getAndIncrement();
}
private int findSegment(int numSegments, Predicate<Integer> predicate) {
// use random offset so that we don't use only lower segments
int offset = ThreadLocalRandom.current().nextInt(numSegments);
for (int i = 0; i < numSegments; ++i) {
int segment = (offset + i) % numSegments;
if (predicate.test(segment)) {
return segment;
}
}
return -1;
}
private static synchronized int getHashCodeForSegment(LocalizedCacheTopology cacheTopology, int segment) {
int numSegments = cacheTopology.getReadConsistentHash().getNumSegments();
// Caching the hash codes prevents random failures in tests where we create many magic keys
int[] hcs = hashCodes.computeIfAbsent(numSegments, k -> new int[numSegments]);
int hc = hcs[segment];
if (hc != 0) {
return hc;
}
Random r = new Random();
int attemptsLeft = 100 * numSegments;
int dummy;
do {
dummy = r.nextInt();
attemptsLeft--;
if (attemptsLeft < 0) {
throw new IllegalStateException("Could not find any key in segment " + segment);
}
} while (cacheTopology.getSegment(dummy) != segment);
return hcs[segment] = dummy;
}
private Set<Integer> segments(Cache<?, ?> owner) {
return owner.getAdvancedCache().getDistributionManager().getWriteConsistentHash()
.getPrimarySegmentsForOwner(owner.getCacheManager().getAddress());
}
public MagicKey(Cache<?, ?> primaryOwner) {
this(null, primaryOwner);
}
public MagicKey(Cache<?, ?> primaryOwner, Cache<?, ?>... backupOwners) {
this(null, primaryOwner, backupOwners);
}
@Override
public int hashCode () {
return hashcode;
}
public int getSegment() {
return segment;
}
@Override
public boolean equals (Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MagicKey magicKey = (MagicKey) o;
return hashcode == magicKey.hashcode && address.equals(magicKey.address) &&
Objects.equals(name, magicKey.name) && unique == magicKey.unique;
}
@Override
public String toString() {
return String.format("MagicKey%s{%X/%08X/%d@%s}", name == null ? "" : "#" + name,
unique, hashcode, segment, address);
}
}
| 6,530
| 35.283333
| 126
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/PutMapReturnValueTest.java
|
package org.infinispan.distribution;
import static org.testng.Assert.assertNotNull;
import static org.testng.AssertJUnit.assertEquals;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.fwk.InCacheMode;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.PutMapReturnValueTest")
@InCacheMode({ CacheMode.DIST_SYNC, CacheMode.REPL_SYNC })
public class PutMapReturnValueTest extends MultipleCacheManagersTest {
private AdvancedCache<Object, String> c1;
private AdvancedCache<Object, String> c2;
@Override
protected void createCacheManagers() throws Throwable {
createCluster(TestDataSCI.INSTANCE, getDefaultClusteredCacheConfig(cacheMode), 2);
c1 = this.<Object, String>cache(0).getAdvancedCache();
c2 = this.<Object, String>cache(1).getAdvancedCache();
}
public void testGetAndPutAll() {
MagicKey k1 = new MagicKey(c1);
MagicKey k2 = new MagicKey(c1);
MagicKey k3 = new MagicKey(c2);
MagicKey k4 = new MagicKey(c2);
c1.put(k1, "v1-0");
c2.put(k3, "v3-0");
Map<Object, String> map = new HashMap<>();
map.put(k1, "v1-1");
map.put(k2, "v2-1");
map.put(k3, "v3-1");
map.put(k4, "v4-1");
Map<Object, String> result = c1.getAndPutAll(map);
assertNotNull(result);
assertEquals(2, result.size());
assertEquals("v1-0", result.get(k1));
assertEquals("v3-0", result.get(k3));
map.put(k1, "v1-2");
map.put(k2, "v2-2");
map.put(k3, "v3-2");
map.put(k4, "v4-2");
result = c1.getAndPutAll(map);
assertNotNull(result);
assertEquals(4, result.size());
assertEquals("v1-1", result.get(k1));
assertEquals("v2-1", result.get(k2));
assertEquals("v3-1", result.get(k3));
assertEquals("v4-1", result.get(k4));
result = c1.getAll(map.keySet());
assertEquals(4, result.size());
assertEquals("v1-2", result.get(k1));
assertEquals("v2-2", result.get(k2));
assertEquals("v3-2", result.get(k3));
assertEquals("v4-2", result.get(k4));
}
}
| 2,303
| 31.450704
| 88
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncTxCommitDiffThreadTest.java
|
package org.infinispan.distribution;
import static org.infinispan.test.TestingUtil.k;
import static org.infinispan.test.TestingUtil.v;
import static org.testng.AssertJUnit.assertEquals;
import java.lang.reflect.Method;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.testng.annotations.Test;
/**
* Test that emulates transactions being started in a thread and then being
* committed in a different thread.
*
* @author Galder Zamarreño
* @since 4.2
*/
@Test(groups = "functional", testName = "distribution.DistSyncTxCommitDiffThreadTest")
public class DistSyncTxCommitDiffThreadTest extends BaseDistFunctionalTest<Object, String> {
public DistSyncTxCommitDiffThreadTest() {
cacheName = this.getClass().getSimpleName();
INIT_CLUSTER_SIZE = 2;
transactional = true;
l1CacheEnabled = false;
numOwners = 1;
}
public void testCommitInDifferentThread(Method m) throws Exception {
final String key = k(m), value = v(m);
final Cache nonOwnerCache = getNonOwners(key, 1)[0];
final Cache ownerCache = getOwners(key, 1)[0];
final TransactionManager tmNonOwner = getTransactionManager(nonOwnerCache);
final CountDownLatch commitLatch = new CountDownLatch(1);
tmNonOwner.begin();
final Transaction tx = tmNonOwner.getTransaction();
Callable<Void> commitCallable = new Callable<Void>() {
@Override
public Void call() throws Exception {
tmNonOwner.resume(tx);
commitLatch.await();
tmNonOwner.commit();
return null;
}
};
Future commitFuture = fork(commitCallable);
Thread.sleep(500);
nonOwnerCache.put(key, value);
commitLatch.countDown();
commitFuture.get();
Callable<Void> getCallable = new Callable<Void>() {
@Override
public Void call() throws Exception {
TransactionManager tmOwner = getTransactionManager(ownerCache);
tmOwner.begin();
assertEquals(value, ownerCache.get(key));
tmOwner.commit();
return null;
}
};
Future getFuture = fork(getCallable);
getFuture.get();
}
}
| 2,387
| 30.421053
| 92
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSkipRemoteLookupTest.java
|
package org.infinispan.distribution;
import static org.infinispan.context.Flag.SKIP_REMOTE_LOOKUP;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNull;
import java.util.concurrent.ExecutionException;
import org.infinispan.context.Flag;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSkipRemoteLookupTest")
public class DistSkipRemoteLookupTest extends BaseDistFunctionalTest<Object, String> {
@Override
public Object[] factory() {
return new Object[] {
new DistSkipRemoteLookupTest(),
new DistSkipRemoteLookupTest().l1(false),
};
}
public DistSkipRemoteLookupTest() {
cleanup = CleanupPhase.AFTER_METHOD;
}
public void testSkipLookupOnGet() {
MagicKey k1 = getMagicKey();
c1.put(k1, "value");
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsNotInL1(c3, k1);
assertIsNotInL1(c4, k1);
assertNull(c4.getAdvancedCache().withFlags(SKIP_REMOTE_LOOKUP).get(k1));
assertOwnershipAndNonOwnership(k1, false);
}
@Test(enabled = false, description = "does it make sense to have skip_remote_lookup with conditional commands?")
public void testCorrectFunctionalityOnConditionalWrite() {
MagicKey k1 = getMagicKey();
c1.put(k1, "value");
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsNotInL1(c3, k1);
assertIsNotInL1(c4, k1);
assertNull(c4.getAdvancedCache().withFlags(SKIP_REMOTE_LOOKUP).putIfAbsent(k1, "new_val"));
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsNotInL1(c3, k1);
if (l1CacheEnabled) assertIsNotInL1(c4, k1);
}
public void testCorrectFunctionalityOnUnconditionalWrite() {
MagicKey k1 = getMagicKey();
c1.put(k1, "value");
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsNotInL1(c3, k1);
assertIsNotInL1(c4, k1);
assertNull(c4.getAdvancedCache().withFlags(SKIP_REMOTE_LOOKUP).put(k1, "new_val"));
assertEquals(c3.get(k1), "new_val");
assertOnAllCachesAndOwnership(k1, "new_val");
}
@Test
public void testSkipLookupOnRemove() {
MagicKey k1 = getMagicKey();
final String value = "SomethingToSayHere";
assert null == c1.put(k1, value);
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsNotInL1(c3, k1);
assertIsNotInL1(c4, k1);
assert value.equals(c1.get(k1));
assert value.equals(c1.remove(k1));
assert null == c1.put(k1, value);
assertIsNotInL1(c3, k1);
assert value.equals(c3.remove(k1));
assert null == c1.put(k1, value);
assert null == c4.getAdvancedCache().withFlags(Flag.SKIP_REMOTE_LOOKUP).remove(k1);
}
@Test
public void testSkipLookupOnAsyncRemove() throws InterruptedException, ExecutionException {
MagicKey k1 = getMagicKey();
final String value = "SomethingToSayHere-async";
assert null == c1.put(k1, value);
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsNotInL1(c3, k1);
assertIsNotInL1(c4, k1);
assert value.equals(c1.get(k1));
assert value.equals(c1.remove(k1));
assert null == c1.put(k1, value);
assertIsNotInL1(c3, k1);
log.trace("here it is");
assertEquals(value, c3.remove(k1));
assert null == c1.put(k1, value);
assert null == c4.getAdvancedCache().withFlags(Flag.SKIP_REMOTE_LOOKUP).removeAsync(k1).get();
}
}
| 3,682
| 29.94958
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/TriangleExceptionDuringMarshallingTest.java
|
package org.infinispan.distribution;
import static java.util.Collections.emptyList;
import static java.util.Collections.singleton;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.infinispan.test.TestingUtil.extractCacheTopology;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.Cache;
import org.infinispan.commons.marshall.JavaSerializationMarshaller;
import org.infinispan.commons.marshall.MarshallingException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.distribution.TriangleDistributionInterceptor;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.MarshallingExceptionGenerator;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.CommandAckCollector;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Test that ack collectors are closed properly after a marshalling exception.
*
* <p>See ISPN-12435</p>
*
* @author Dan Berindei
* @since 12.1
*/
@Test(groups = "unit", testName = "distribution.TriangleExceptionDuringMarshallingTest")
public class TriangleExceptionDuringMarshallingTest extends MultipleCacheManagersTest {
public static final int NUM_SEGMENTS = 3;
@Override
protected void createCacheManagers() throws Throwable {
GlobalConfigurationBuilder globalBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder();
globalBuilder.serialization().marshaller(new JavaSerializationMarshaller());
globalBuilder.serialization().allowList()
.addClasses(MagicKey.class, MarshallingExceptionGenerator.class);
ConfigurationBuilder cacheBuilder = new ConfigurationBuilder();
ControlledConsistentHashFactory<?> chf =
new ControlledConsistentHashFactory.Default(new int[][]{{0, 1}, {1, 2}, {2, 0}});
cacheBuilder.clustering().cacheMode(CacheMode.DIST_SYNC)
.hash().numSegments(NUM_SEGMENTS).consistentHashFactory(chf);
createCluster(globalBuilder, cacheBuilder, 3);
// Make sure we're using the triangle algorithm
AsyncInterceptorChain asyncInterceptorChain = extractInterceptorChain(cache(0));
assertTrue(asyncInterceptorChain.containsInterceptorType(TriangleDistributionInterceptor.class));
}
public void testExceptionDuringMarshallingOnOriginator() {
// fail during the first serialization (i.e. on the originator)
Object value = MarshallingExceptionGenerator.failOnSerialization(0);
Cache<Object, Object> originCache = cache(0);
MagicKey primaryKey = new MagicKey("primary", cache(0));
expectException(MarshallingException.class, () -> originCache.put(primaryKey, value));
assertCleanFailure(originCache, primaryKey);
MagicKey nonOwnerKey = new MagicKey("non-owner", cache(1));
expectException(MarshallingException.class, () -> originCache.put(nonOwnerKey, value));
assertCleanFailure(originCache, nonOwnerKey);
MagicKey backupKey = new MagicKey("backup", cache(2));
expectException(MarshallingException.class, () -> originCache.put(backupKey, value));
assertCleanFailure(originCache, backupKey);
}
public void testExceptionDuringMarshallingOnRemote() {
// fail during the second serialization, i.e. remotely
Object value = MarshallingExceptionGenerator.failOnSerialization(1);
Cache<Object, Object> originCache = cache(0);
// does not fail when the originator is the primary, as there is no remote serialization
MagicKey primaryKey = new MagicKey("primary", cache(0));
originCache.put(primaryKey, value);
originCache.remove(primaryKey);
// fails when the originator is not an owner
MagicKey nonOwnerKey = new MagicKey("non-owner", cache(1));
expectException(RemoteException.class, MarshallingException.class, () -> originCache.put(nonOwnerKey, value));
assertCleanFailure(originCache, nonOwnerKey);
// fails when the originator is a backup
MagicKey backupKey = new MagicKey("backup", cache(2));
expectException(RemoteException.class, MarshallingException.class, () -> originCache.put(backupKey, value));
assertCleanFailure(originCache, backupKey);
}
@Test(enabled = false, description = "See ISPN-12770")
public void testExceptionDuringUnmarshalling() {
// fail during the second serialization, i.e. remotely
Object value = MarshallingExceptionGenerator.failOnDeserialization(0);
Cache<Object, Object> originCache = cache(0);
MagicKey primaryKey = new MagicKey("primary", cache(0));
expectException(MarshallingException.class, () -> originCache.put(primaryKey, value));
assertCleanFailure(originCache, primaryKey);
MagicKey nonOwnerKey = new MagicKey("non-owner", cache(1));
expectException(MarshallingException.class, () -> originCache.put(nonOwnerKey, value));
assertCleanFailure(originCache, nonOwnerKey);
MagicKey backupKey = new MagicKey("backup", cache(2));
expectException(MarshallingException.class, () -> originCache.put(backupKey, value));
assertCleanFailure(originCache, backupKey);
}
private void assertCleanFailure(Cache<Object, Object> originCache, MagicKey key) {
// verify that ack collector it cleaned up and value is not inserted
assertInvocationIsDone(singleton(key));
assertCacheIsEmpty();
// verify that a put and remove with the same key and a marshallable value succeeds
originCache.put(key, "good_value");
originCache.remove(key);
}
public void testExceptionDuringMarshallingOnOriginatorMultiKey() {
MarshallingExceptionGenerator value = MarshallingExceptionGenerator.failOnSerialization(0);
Map<Object, Object> values = new HashMap<>();
values.put(new MagicKey(cache(0)), value);
values.put(new MagicKey(cache(1)), value);
values.put(new MagicKey(cache(2)), value);
for (Cache<Object, Object> cache : caches()) {
expectException(MarshallingException.class, () -> cache.putAll(values));
assertInvocationIsDone(values.keySet());
assertCacheIsEmpty();
}
}
public void testExceptionDuringMarshallingOnRemoteMultiKey() {
MarshallingExceptionGenerator value = MarshallingExceptionGenerator.failOnSerialization(1);
Map<Object, Object> values = new HashMap<>();
values.put(new MagicKey(cache(0)), value);
values.put(new MagicKey(cache(1)), value);
values.put(new MagicKey(cache(2)), value);
for (Cache<Object, Object> cache : caches()) {
expectException(RemoteException.class, MarshallingException.class, () -> cache.putAll(values));
assertInvocationIsDone(values.keySet());
for (Object key : values.keySet()) {
cache.remove(key);
}
}
}
private void assertCacheIsEmpty() {
for (Cache<Object, Object> cache : caches()) {
assertEquals(0, cache.getAdvancedCache().getDataContainer().sizeIncludingExpired());
}
}
private void assertInvocationIsDone(Collection<?> keys) {
for (Cache<Object, Object> cache : caches()) {
CommandAckCollector ackCollector = extractComponent(cache, CommandAckCollector.class);
assertEquals(emptyList(), ackCollector.getPendingCommands());
LockManager lm = TestingUtil.extractLockManager(cache);
for (Object key : keys) {
assert !lm.isLocked(key);
}
}
}
@AfterMethod(alwaysRun = true)
public void cleanup() {
LocalizedCacheTopology cacheTopology = extractCacheTopology(cache(0));
int topologyId = cacheTopology.getTopologyId();
for (Cache<Object, Object> cache : caches()) {
// Complete pending commands
CommandAckCollector ackCollector = extractComponent(cache, CommandAckCollector.class);
for (Long pendingCommand : ackCollector.getPendingCommands()) {
ackCollector.completeExceptionally(pendingCommand, new TestException(), topologyId);
}
// Release locks
LockManager lockManager = extractComponent(cache, LockManager.class);
assertEquals(0, lockManager.getNumberOfLocksHeld());
}
// Mark all sequence ids as delivered
for (int segment = 0; segment < NUM_SEGMENTS; segment++) {
DistributionInfo segmentDistribution = cacheTopology.getSegmentDistribution(segment);
Address primary = segmentDistribution.primary();
Cache<?, ?> primaryCache = manager(primary).getCache();
long latestSequenceId = extractComponent(primaryCache, TriangleOrderManager.class)
.latestSent(segment, topologyId);
for (int i = 0; i <= latestSequenceId; i++) {
for (Address backup : segmentDistribution.writeBackups()) {
Cache<Object, Object> backupCache = manager(backup).getCache();
extractComponent(backupCache, TriangleOrderManager.class)
.markDelivered(segment, i, topologyId);
}
}
}
}
}
| 9,837
| 43.515837
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistStorePreloadTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commons.time.ControlledTimeService;
import org.infinispan.commons.time.TimeService;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* Test preloading with a distributed cache.
*
* @author Dan Berindei <dan@infinispan.org>
* @since 5.1
*/
@Test(groups = "functional", testName = "distribution.DistStorePreloadTest")
public class DistStorePreloadTest<D extends DistStorePreloadTest<?>> extends BaseDistStoreTest<String, String, D> {
public static final int NUM_KEYS = 10;
public DistStorePreloadTest() {
INIT_CLUSTER_SIZE = 1;
testRetVals = true;
// Have to be shared and preload
shared = true;
preload = true;
}
@Override
public Object[] factory() {
return new Object[] {
new DistStorePreloadTest<D>().segmented(true).transactional(false),
new DistStorePreloadTest<D>().segmented(true).transactional(true),
new DistStorePreloadTest<D>().segmented(false).transactional(false),
new DistStorePreloadTest<D>().segmented(false).transactional(true),
};
}
@AfterMethod
public void clearStats() {
for (Cache<String, String> c: caches) {
log.trace("Clearing stats for cache store on cache "+ c);
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
store.clear();
}
// Make sure to clean up any extra caches
if (managers().length > 1) {
killMember(1, cacheName);
}
}
public void testPreloadOnStart() throws PersistenceException {
for (int i = 0; i < NUM_KEYS; i++) {
c1.put("k" + i, "v" + i);
}
DataContainer<String, String> dc1 = c1.getAdvancedCache().getDataContainer();
assert dc1.size() == NUM_KEYS;
DummyInMemoryStore store = TestingUtil.getFirstStore(c1);
assertEquals(NUM_KEYS, store.size());
addClusterEnabledCacheManager(TestDataSCI.INSTANCE, null, new TransportFlags().withFD(false));
EmbeddedCacheManager cm2 = cacheManagers.get(1);
cm2.defineConfiguration(cacheName, buildConfiguration().build());
c2 = cache(1, cacheName);
caches.add(c2);
waitForClusterToForm(cacheName);
DataContainer<String, String> dc2 = c2.getAdvancedCache().getDataContainer();
assertEquals("Expected all the cache store entries to be preloaded on the second cache", NUM_KEYS, dc2.size());
for (int i = 0; i < NUM_KEYS; i++) {
assertOwnershipAndNonOwnership("k" + i, true);
}
}
public void testPreloadExpirationMemoryPresent() {
testPreloadExpiration(true);
}
public void testPreloadExpirationNoMemoryPresent() {
testPreloadExpiration(false);
}
private void testPreloadExpiration(boolean hasMemoryContents) {
ControlledTimeService timeService = new ControlledTimeService();
TestingUtil.replaceComponent(c1.getCacheManager(), TimeService.class, timeService, true);
long createdTime = timeService.wallClockTime();
String key = "key";
String value = "value";
c1.put(key, value, 10, TimeUnit.MINUTES);
DataContainer<String, String> dc1 = c1.getAdvancedCache().getDataContainer();
CacheEntry<?, ?> entry = dc1.get(key);
assertNotNull(entry);
assertEquals(createdTime, entry.getCreated());
if (!hasMemoryContents) {
dc1.clear();
}
timeService.advance(1000);
DummyInMemoryStore store = TestingUtil.getFirstStore(c1);
assertEquals(1, store.getStoreDataSize());
addClusterEnabledCacheManager();
EmbeddedCacheManager cm2 = cacheManagers.get(1);
TestingUtil.replaceComponent(cm2, TimeService.class, timeService, true);
cm2.defineConfiguration(cacheName, buildConfiguration().build());
c2 = cache(1, cacheName);
caches.add(c2);
waitForClusterToForm(cacheName);
DataContainer<String, String> dc2 = c2.getAdvancedCache().getDataContainer();
entry = dc2.peek(key);
assertNotNull(entry);
// Created time should be the same, not the incremented one
assertEquals(createdTime, entry.getCreated());
}
}
| 4,764
| 33.781022
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ConsistentHashPerfTest.java
|
package org.infinispan.distribution;
import static java.lang.Math.abs;
import static java.lang.Math.min;
import static java.lang.Math.sqrt;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.util.Util;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.distribution.ch.impl.HashFunctionPartitioner;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.Test;
/**
* Tests the uniformity of the distribution hash algo.
*
* @author Manik Surtani
* @since 4.0
*/
@Test(testName = "distribution.ConsistentHashPerfTest", groups = "manual", description = "Disabled until we can configure Surefire to skip manual tests")
public class ConsistentHashPerfTest extends AbstractInfinispanTest {
private List<Address> createAddresses(int numNodes) {
Random r = new Random();
List<Address> addresses = new ArrayList<>(numNodes);
while (addresses.size() < numNodes)
addresses.add(new JGroupsAddress(new org.jgroups.util.UUID(r.nextLong(), r.nextLong())));
return addresses;
}
private ConsistentHash createNewConsistentHash(List<Address> servers) {
try {
// TODO Revisit after we have replaced the CH with the CHFactory in the configuration
return new DefaultConsistentHashFactory().create(2, 10,
servers, null);
} catch (RuntimeException re) {
throw re;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void testSpeed() {
int[] numNodes = {1, 2, 3, 4, 10, 100, 1000};
int iterations = 100000;
// warmup
doPerfTest(10, 2, iterations);
for (int numOwners = 1; numOwners < 5; numOwners++) {
System.out.println("numOwners=" + numOwners);
for (int nn: numNodes) {
Long duration = doPerfTest(nn, numOwners, iterations);
System.out.println("With "+nn+" cache(s), time to do " + iterations + " lookups was " + Util.prettyPrintTime(TimeUnit.NANOSECONDS.toMillis(duration)));
}
}
}
private Long doPerfTest(int numNodes, int numOwners, int iterations) {
ConsistentHash ch = createNewConsistentHash(createAddresses(numNodes));
int dummy = 0;
long start = System.nanoTime();
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(ch.getNumSegments());
for (int i = 0; i < iterations; i++) {
Object key = i;
int segment = keyPartitioner.getSegment(key);
dummy += ch.locateOwnersForSegment(segment).size();
}
long duration = System.nanoTime() - start;
assertEquals(dummy, iterations * min(numOwners, numNodes));
return duration;
}
public void testDistribution() {
final int numKeys = 10000;
final int[] numNodes = {1, 2, 3, 4, 10, 100, 1000};
List<Object> keys = new ArrayList<Object>(numKeys);
for (int i = 0; i < numKeys; i++) keys.add(i);
for (int nn : numNodes) {
doTestDistribution(numKeys, nn, keys);
}
}
private void doTestDistribution(int numKeys, int numNodes, List<Object> keys) {
ConsistentHash ch = createNewConsistentHash(createAddresses(numNodes));
Map<Address, Integer> distribution = new HashMap<>();
KeyPartitioner keyPartitioner = new HashFunctionPartitioner(ch.getNumSegments());
for (Object key : keys) {
int segment = keyPartitioner.getSegment(key);
Address a = ch.locateOwnersForSegment(segment).get(0);
if (distribution.containsKey(a)) {
int i = distribution.get(a);
distribution.put(a, i + 1);
} else {
distribution.put(a, 1);
}
}
System.out.printf("\nTesting distribution with %d keys, %d nodes\n", numKeys, numNodes);
//System.out.println("" + distribution);
// calc numbers
ArrayList<Integer> counts = new ArrayList<Integer>(distribution.values());
Collections.sort(counts);
// When we go to 100 nodes, one or two nodes may not receive any keys and would cause the next assertion to fail
//assert numNodes == counts.size() : "Only reached " + distribution.size() + " nodes : " + distribution;
// instead we add a 0 for all the nonexistent keys in the distribution map and do the calculations
for (int i = 0; i < numNodes - counts.size(); i++)
counts.add(0, 0);
double mean = 0;
int sum = 0;
for (Integer count : counts) sum += count;
assert sum == numKeys;
mean = sum / numNodes;
double variance = 0;
for (Integer count : counts) variance += (count - mean) * (count - mean);
double stdDev = sqrt(variance);
double avgAbsDev = 0;
for (Integer count : counts) avgAbsDev += abs(count - mean);
avgAbsDev /= numNodes;
int median = counts.get(numNodes / 2);
ArrayList<Integer> medianDevs = new ArrayList<Integer>(numNodes);
for (Integer count : counts) medianDevs.add(abs(count - median));
Collections.sort(medianDevs);
int medianAbsDev = medianDevs.get(numNodes / 2);
System.out.printf("Mean = %f, median = %d\n", mean, median);
System.out.printf("Standard deviation = %.3f, or %.3f%%\n", stdDev, stdDev / mean * 100);
System.out.printf("Average absolute deviation = %.3f, or %.3f%%\n", avgAbsDev, avgAbsDev / mean * 100);
System.out.printf("Median absolute deviation = %d or %.3f%%\n", medianAbsDev, (double)medianAbsDev / mean * 100);
}
}
| 5,951
| 37.4
| 163
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/UnknownCacheStartTest.java
|
package org.infinispan.distribution;
import static org.infinispan.test.AbstractCacheTest.getDefaultClusteredCacheConfig;
import static org.infinispan.test.TestingUtil.killCacheManagers;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createCacheManager;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.commons.test.TestResourceTracker;
import org.testng.TestException;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.UnknownCacheStartTest")
public class UnknownCacheStartTest extends AbstractInfinispanTest {
ConfigurationBuilder configuration;
EmbeddedCacheManager cm1, cm2;
@BeforeClass(alwaysRun = true)
public void setUp() {
configuration = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false);
}
@AfterClass(alwaysRun = true)
public void tearDown() {
killCacheManagers(cm1, cm2);
}
@Test (expectedExceptions = {CacheException.class, TestException.class}, timeOut = 60000)
public void testStartingUnknownCaches() throws Throwable {
TestResourceTracker.testThreadStarted(this.getTestName());
cm1 = createCacheManager(configuration);
cm1.defineConfiguration("new_1", configuration.build());
Cache<String, String> c1 = cm1.getCache();
Cache<String, String> c1_new = cm1.getCache("new_1");
c1.put("k", "v");
c1_new.put("k", "v");
assertEquals("v", c1.get("k"));
assertEquals("v", c1_new.get("k"));
cm2 = createCacheManager(configuration);
cm2.defineConfiguration("new_2", configuration.build());
Cache<String, String> c2 = cm2.getCache();
Cache<String, String> c2_new = cm2.getCache("new_AND_DEFINITELY_UNKNOWN_cache_2");
c2.put("k", "v");
c2_new.put("k", "v");
assertEquals("v", c2.get("k"));
assertEquals("v", c2_new.get("k"));
TestingUtil.blockUntilViewsReceived(60000, false, c2, c2_new);
TestingUtil.waitForNoRebalance(c2, c2_new);
fail("Should have thrown an exception!");
}
}
| 2,516
| 33.479452
| 92
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/SingleOwnerTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commons.marshall.MarshallingException;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.transport.Address;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.data.BrokenMarshallingPojo;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* Test single owner distributed cache configurations.
*
* @author Galder Zamarreño
* @since 4.0
*/
@Test(groups = "functional", testName = "distribution.SingleOwnerTest")
public class SingleOwnerTest extends BaseDistFunctionalTest<Object, String> {
@Override
protected void createCacheManagers() throws Throwable {
cacheName = "dist";
configuration = getDefaultClusteredCacheConfig(cacheMode, transactional);
if (!testRetVals) {
configuration.unsafe().unreliableReturnValues(true);
// we also need to use repeatable read for tests to work when we dont have reliable return values, since the
// tests repeatedly queries changes
configuration.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
}
configuration.clustering().remoteTimeout(3, TimeUnit.SECONDS);
configuration.clustering().hash().numOwners(1);
configuration.locking().lockAcquisitionTimeout(45, TimeUnit.SECONDS);
createClusteredCaches(2, cacheName, configuration);
caches = caches(cacheName);
c1 = caches.get(0);
c2 = caches.get(1);
cacheAddresses = new ArrayList<Address>(2);
for (Cache cache : caches) {
EmbeddedCacheManager cacheManager = cache.getCacheManager();
cacheAddresses.add(cacheManager.getAddress());
}
waitForClusterToForm(cacheName);
}
public void testPutOnKeyOwner() {
Cache[] caches = getOwners("mykey", 1);
assert caches.length == 1;
Cache ownerCache = caches[0];
ownerCache.put("mykey", new Object());
}
public void testClearOnKeyOwner() {
Cache[] caches = getOwners("mykey", 1);
assert caches.length == 1;
Cache ownerCache = caches[0];
ownerCache.clear();
}
public void testRetrieveNonSerializableValueFromNonOwner() {
Cache[] owners = getOwners("yourkey", 1);
Cache[] nonOwners = getNonOwners("yourkey", 1);
assert owners.length == 1;
assert nonOwners.length == 1;
Cache ownerCache = owners[0];
Cache nonOwnerCache = nonOwners[0];
ownerCache.put("yourkey", new Object());
try {
nonOwnerCache.get("yourkey");
fail("Should have failed with a org.infinispan.commons.marshall.MarshallingException");
} catch (RemoteException e) {
assertTrue(e.getCause() instanceof MarshallingException);
}
}
public void testErrorWhenRetrievingKeyFromNonOwner() {
log.trace("Before test");
Cache[] owners = getOwners("diffkey", 1);
Cache[] nonOwners = getNonOwners("diffkey", 1);
assert owners.length == 1;
assert nonOwners.length == 1;
Cache ownerCache = owners[0];
Cache nonOwnerCache = nonOwners[0];
ownerCache.put("diffkey", new BrokenMarshallingPojo());
Exceptions.expectException(RemoteException.class, MarshallingException.class, () -> nonOwnerCache.get("diffkey"));
}
}
| 3,562
| 34.989899
| 120
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/RemoteGetFailureTest.java
|
package org.infinispan.distribution;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.ImmortalCacheValue;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.statetransfer.StateTransferInterceptor;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.util.ByteString;
import org.infinispan.util.concurrent.TimeoutException;
import org.jgroups.JChannel;
import org.jgroups.View;
import org.jgroups.protocols.pbcast.GMS;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.RemoteGetFailureTest")
@CleanupAfterMethod
public class RemoteGetFailureTest extends MultipleCacheManagersTest {
private Object key;
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
builder.clustering().stateTransfer().timeout(10, TimeUnit.SECONDS);
builder.clustering().remoteTimeout(5, TimeUnit.SECONDS);
createClusteredCaches(3, TestDataSCI.INSTANCE, builder, new TransportFlags().withFD(true));
waitForClusterToForm();
key = getKeyForCache(cache(1), cache(2));
}
@AfterMethod(alwaysRun = true)
@Override
protected void clearContent() throws Throwable {
// Merge the cluster back so that the leave requests don't have to time out
for (Cache<Object, Object> cache : caches()) {
installNewView(cache, caches().toArray(new Cache[0]));
}
super.clearContent();
}
public void testDelayed(Method m) {
initAndCheck(m);
CountDownLatch release = new CountDownLatch(1);
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(null, release), 0);
long requestStart = System.nanoTime();
assertEquals(m.getName(), cache(0).get(key));
long requestEnd = System.nanoTime();
long remoteTimeout = cache(0).getCacheConfiguration().clustering().remoteTimeout();
long delay = TimeUnit.NANOSECONDS.toMillis(requestEnd - requestStart);
assertTrue(delay < remoteTimeout);
release.countDown();
}
public void testExceptionFromBothOwners(Method m) {
initAndCheck(m);
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new FailingInterceptor(), 0);
cache(2).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new FailingInterceptor(), 0);
expectException(RemoteException.class, CacheException.class, "Injected", () -> cache(0).get(key));
}
public void testExceptionFromOneOwnerOtherTimeout(Method m) {
initAndCheck(m);
CountDownLatch release = new CountDownLatch(1);
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new FailingInterceptor(), 0);
cache(2).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(null, release), 0);
// It's not enough to test if the exception is TimeoutException as we want the remote get fail immediately
// upon exception.
// We cannot mock TimeService in ScheduledExecutor, so we have to measure if the response was fast
// remoteTimeout is gracious enough (15s) to not cause false positives
long requestStart = System.nanoTime();
try {
expectException(RemoteException.class, CacheException.class, "Injected", () -> cache(0).get(key));
long exceptionThrown = System.nanoTime();
long remoteTimeout = cache(0).getCacheConfiguration().clustering().remoteTimeout();
long delay = TimeUnit.NANOSECONDS.toMillis(exceptionThrown - requestStart);
assertTrue(delay < remoteTimeout);
} finally {
release.countDown();
}
}
public void testBothOwnersSuspected(Method m) throws ExecutionException, InterruptedException {
initAndCheck(m);
CountDownLatch arrival = new CountDownLatch(2);
CountDownLatch release = new CountDownLatch(1);
AtomicInteger thrown = new AtomicInteger();
AtomicInteger retried = new AtomicInteger();
cache(0).getAdvancedCache().getAsyncInterceptorChain().addInterceptorAfter(new CheckOTEInterceptor(thrown, retried), StateTransferInterceptor.class);
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release), 0);
cache(2).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release), 0);
Future<Object> future = fork(() -> cache(0).get(key));
assertTrue(arrival.await(10, TimeUnit.SECONDS));
installNewView(cache(0), cache(0));
// The entry was lost, so we'll get null
assertNull(future.get());
// Since we've lost all owners, we get an OutdatedTopologyException and we retry
assertEquals(1, thrown.get());
assertEquals(1, retried.get());
release.countDown();
}
public void testOneOwnerSuspected(Method m) throws ExecutionException, InterruptedException {
initAndCheck(m);
CountDownLatch arrival = new CountDownLatch(2);
CountDownLatch release1 = new CountDownLatch(1);
CountDownLatch release2 = new CountDownLatch(1);
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release1), 0);
cache(2).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release2), 0);
Future<?> future = fork(() -> {
assertEquals(cache(0).get(key), m.getName());
});
assertTrue(arrival.await(10, TimeUnit.SECONDS));
installNewView(cache(0), cache(0), cache(1));
// suspection should not fail the operation
assertFalse(future.isDone());
release1.countDown();
future.get();
release2.countDown();
}
public void testOneOwnerSuspectedNoFilter(Method m) throws ExecutionException, InterruptedException {
initAndCheck(m);
CountDownLatch arrival = new CountDownLatch(2);
CountDownLatch release1 = new CountDownLatch(1);
CountDownLatch release2 = new CountDownLatch(1);
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release1), 0);
cache(2).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release2), 0);
Address address1 = address(1);
Address address2 = address(2);
List<Address> owners = Arrays.asList(address1, address2);
ClusteredGetCommand clusteredGet = new ClusteredGetCommand(key, ByteString.fromString(cache(0).getName()),
TestingUtil.getSegmentForKey(key, cache(1)), 0);
final int timeout = 15;
RpcOptions rpcOptions = new RpcOptions(DeliverOrder.NONE, timeout, TimeUnit.SECONDS);
RpcManager rpcManager = cache(0).getAdvancedCache().getRpcManager();
clusteredGet.setTopologyId(rpcManager.getTopologyId());
CompletableFuture<Map<Address, Response>> future = rpcManager.invokeCommand(owners, clusteredGet, MapResponseCollector.ignoreLeavers(), rpcOptions).toCompletableFuture();
assertTrue(arrival.await(10, TimeUnit.SECONDS));
installNewView(cache(0), cache(0), cache(1));
// RequestCorrelator processes the view asynchronously, so we need to wait a bit for node 2 to be suspected
Thread.sleep(100);
// suspection should not fail the operation
assertFalse(future.isDone());
long requestAllowed = System.nanoTime();
release1.countDown();
Map<Address, Response> responses = future.get();
long requestCompleted = System.nanoTime();
long requestSeconds = TimeUnit.NANOSECONDS.toSeconds(requestCompleted - requestAllowed);
assertTrue("Request took too long: " + requestSeconds, requestSeconds < timeout / 2);
assertEquals(SuccessfulResponse.create(new ImmortalCacheValue(m.getName())), responses.get(address1));
assertEquals(CacheNotFoundResponse.INSTANCE, responses.get(address2));
release2.countDown();
}
public void testOneOwnerSuspectedOtherTimeout(Method m) throws ExecutionException, InterruptedException {
initAndCheck(m);
CountDownLatch arrival = new CountDownLatch(2);
CountDownLatch release = new CountDownLatch(1);
cache(1).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release), 0);
cache(2).getAdvancedCache().getAsyncInterceptorChain().addInterceptor(new DelayingInterceptor(arrival, release), 0);
Future<?> future = fork(() -> {
long start = System.nanoTime();
Exceptions.expectException(TimeoutException.class, () -> cache(0).get(key));
long end = System.nanoTime();
long duration = TimeUnit.NANOSECONDS.toMillis(end - start);
assertTrue("Request did not wait for long enough: " + duration,
duration >= cache(0).getCacheConfiguration().clustering().remoteTimeout());
});
assertTrue(arrival.await(10, TimeUnit.SECONDS));
installNewView(cache(0), cache(0), cache(1));
// suspection should not fail the operation
assertFalse(future.isDone());
future.get();
release.countDown();
}
private void initAndCheck(Method m) {
cache(0).put(key, m.getName());
assertEquals(m.getName(), cache(1).get(key));
assertEquals(m.getName(), cache(2).get(key));
}
private void installNewView(Cache installing, Cache... cachesInView) {
JGroupsTransport transport = (JGroupsTransport) installing.getCacheManager().getTransport();
JChannel channel = transport.getChannel();
org.jgroups.Address[] members = Stream.of(cachesInView)
.map(c -> ((JGroupsAddress) address(c)).getJGroupsAddress())
.toArray(org.jgroups.Address[]::new);
View view = View.create(members[0], transport.getViewId() + 1, members);
((GMS) channel.getProtocolStack().findProtocol(GMS.class)).installView(view);
}
static class FailingInterceptor extends DDAsyncInterceptor {
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) throws Throwable {
throw new CacheException("Injected");
}
}
static class DelayingInterceptor extends DDAsyncInterceptor {
private final CountDownLatch arrival;
private final CountDownLatch release;
private DelayingInterceptor(CountDownLatch arrival, CountDownLatch release) {
this.arrival = arrival;
this.release = release;
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) throws Throwable {
if (arrival != null) arrival.countDown();
// the timeout has to be longer than remoteTimeout!
release.await(30, TimeUnit.SECONDS);
return super.visitGetCacheEntryCommand(ctx, command);
}
}
class CheckOTEInterceptor extends DDAsyncInterceptor {
private final AtomicInteger thrown;
private final AtomicInteger retried;
public CheckOTEInterceptor(AtomicInteger thrown, AtomicInteger retried) {
this.thrown = thrown;
this.retried = retried;
}
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command) throws Throwable {
if (command.hasAnyFlag(FlagBitSets.COMMAND_RETRY)) {
retried.incrementAndGet();
}
return invokeNextAndExceptionally(ctx, command, (rCtx, rCommand, t) -> {
thrown.incrementAndGet();
throw t;
});
}
}
}
| 13,709
| 43.083601
| 176
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/AsyncAPITxSyncDistTest.java
|
package org.infinispan.distribution;
import static org.infinispan.context.Flag.SKIP_REMOTE_LOOKUP;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.replication.AsyncAPITxSyncReplTest;
import org.infinispan.test.data.Key;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.AsyncAPITxSyncDistTest")
public class AsyncAPITxSyncDistTest extends AsyncAPITxSyncReplTest {
@Override
protected ConfigurationBuilder getConfig() {
return getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
}
@Override
protected void assertOnAllCaches(Key k, String v, Cache c1, Cache c2) {
assertEquals("Error in cache 1.", v, c1.getAdvancedCache().withFlags(SKIP_REMOTE_LOOKUP).get(k));
assertEquals("Error in cache 2,", v, c2.getAdvancedCache().withFlags(SKIP_REMOTE_LOOKUP).get(k));
}
}
| 1,021
| 36.851852
| 103
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/MultipleNodesLeavingTest.java
|
package org.infinispan.distribution;
import java.util.List;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.0
*/
@Test (groups = "functional", testName = "distribution.MultipleNodesLeavingTest")
public class MultipleNodesLeavingTest extends MultipleCacheManagersTest {
@Override
public Object[] factory() {
return new Object[] {
new MultipleNodesLeavingTest().cacheMode(CacheMode.DIST_SYNC),
};
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(cacheMode, false);
createCluster(builder, 4);
waitForClusterToForm();
}
public void testMultipleLeaves() throws Exception {
//kill 3 caches at once
fork(() -> manager(3).stop());
fork(() -> manager(2).stop());
fork(() -> manager(1).stop());
eventuallyEquals(1, () -> advancedCache(0).getRpcManager().getTransport().getMembers().size());
log.trace("MultipleNodesLeavingTest.testMultipleLeaves");
TestingUtil.blockUntilViewsReceived(60000, false, cache(0));
TestingUtil.waitForNoRebalance(cache(0));
List<Address> caches = advancedCache(0).getDistributionManager().getWriteConsistentHash().getMembers();
log.tracef("caches = %s", caches);
int size = caches.size();
assert size == 1;
}
}
| 1,648
| 31.333333
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/ConcurrentStartWithReplTest.java
|
package org.infinispan.distribution;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import org.infinispan.Cache;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.CacheContainer;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Tests concurrent startup of replicated and distributed caches
*
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@Test(testName = "distribution.ConcurrentStartWithReplTest", groups = "functional")
public class ConcurrentStartWithReplTest extends AbstractInfinispanTest {
private ConfigurationBuilder replCfg, distCfg;
@BeforeClass(alwaysRun = true)
public void setUp() {
replCfg = MultipleCacheManagersTest.getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
replCfg.clustering().stateTransfer().fetchInMemoryState(true);
distCfg = MultipleCacheManagersTest.getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false);
distCfg.clustering().stateTransfer().fetchInMemoryState(true);
}
@Test(timeOut = 60000)
public void testSequence1() throws ExecutionException, InterruptedException {
TestResourceTracker.testThreadStarted(this.getTestName());
/*
Sequence 1:
C1 (repl) (becomes coord)
C2 (dist)
C1 (repl)
C2 (dist)
in the same thread.
*/
doTest(true, false);
}
@Test(timeOut = 60000)
public void testSequence2() throws ExecutionException, InterruptedException {
TestResourceTracker.testThreadStarted(this.getTestName());
/*
Sequence 2:
C1 (repl) (becomes coord)
C2 (repl)
C2 (dist)
C1 (dist)
in the same thread.
*/
doTest(false, false);
}
@Test(timeOut = 60000)
public void testSequence3() throws ExecutionException, InterruptedException {
TestResourceTracker.testThreadStarted(this.getTestName());
/*
Sequence 3:
C1 (repl) (becomes coord)
C2 (repl)
C1 (dist) (async thread)
C2 (dist) (async thread)
in the same thread, except the last two which are in separate threads
*/
doTest(true, true);
}
@Test(timeOut = 60000)
public void testSequence4() throws ExecutionException, InterruptedException {
TestResourceTracker.testThreadStarted(this.getTestName());
/*
Sequence 4:
C1 (repl) (becomes coord)
C2 (repl)
C2 (dist) (async thread)
C1 (dist) (async thread)
in the same thread, except the last two which are in separate threads
*/
doTest(false, true);
}
private void doTest(boolean inOrder, boolean nonBlockingStartupForDist) throws ExecutionException, InterruptedException {
EmbeddedCacheManager cm1 = TestCacheManagerFactory.createClusteredCacheManager(new ConfigurationBuilder());
EmbeddedCacheManager cm2 = TestCacheManagerFactory.createClusteredCacheManager(new ConfigurationBuilder());
try {
cm1.defineConfiguration("r", replCfg.build());
cm1.defineConfiguration("d", distCfg.build());
cm2.defineConfiguration("r", replCfg.build());
cm2.defineConfiguration("d", distCfg.build());
// first start the repl caches
Cache<String, String> c1r = startCache(cm1, "r", false).get();
c1r.put("key", "value");
Cache<String, String> c2r = startCache(cm2, "r", false).get();
TestingUtil.blockUntilViewsReceived(10000, c1r, c2r);
TestingUtil.waitForNoRebalance(c1r, c2r);
assert "value".equals(c2r.get("key"));
// now the dist ones
Future<Cache<String, String>> c1df = startCache(inOrder ? cm1 : cm2, "d", nonBlockingStartupForDist);
Future<Cache<String, String>> c2df = startCache(inOrder ? cm2 : cm1, "d", nonBlockingStartupForDist);
Cache<String, String> c1d = c1df.get();
Cache<String, String> c2d = c2df.get();
c1d.put("key", "value");
assert "value".equals(c2d.get("key"));
} finally {
TestingUtil.killCacheManagers(cm1, cm2);
}
}
private Future<Cache<String, String>> startCache(final CacheContainer cm, final String cacheName, boolean nonBlockingStartup) {
final Callable<Cache<String, String>> cacheCreator = () -> cm.getCache(cacheName);
if (nonBlockingStartup) {
return fork(cacheCreator);
} else {
try {
Cache<String, String> cache = cacheCreator.call();
return CompletableFuture.completedFuture(cache);
} catch (Exception e) {
return CompletableFuture.failedFuture(e);
}
}
}
}
| 5,205
| 31.335404
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/BlockingInterceptor.java
|
package org.infinispan.distribution;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Interceptor that allows for waiting for a command to be invoked, blocking that command and subsequently
* allowing that command to be released.
*
* @author William Burns
* @since 6.0
*/
public class BlockingInterceptor<T extends VisitableCommand> extends DDAsyncInterceptor {
private static final Log log = LogFactory.getLog(BlockingInterceptor.class);
private final CyclicBarrier barrier;
private final boolean blockAfter;
private final boolean originLocalOnly;
private final AtomicBoolean suspended = new AtomicBoolean();
private final Predicate<VisitableCommand> acceptCommand;
public BlockingInterceptor(CyclicBarrier barrier, Class<T> commandClass,
boolean blockAfter, boolean originLocalOnly) {
this(barrier, commandClass, blockAfter, originLocalOnly, t -> t != null && commandClass.equals(t.getClass()));
}
public BlockingInterceptor(CyclicBarrier barrier, Class<T> commandClass,
boolean blockAfter, boolean originLocalOnly, Predicate<T> acceptCommand) {
this(barrier, blockAfter, originLocalOnly,
t -> t != null && commandClass.equals(t.getClass()) && acceptCommand.test(commandClass.cast(t)));
}
public BlockingInterceptor(CyclicBarrier barrier, boolean blockAfter, boolean originLocalOnly,
Predicate<VisitableCommand> acceptCommand) {
this.barrier = barrier;
this.blockAfter = blockAfter;
this.originLocalOnly = originLocalOnly;
this.acceptCommand = acceptCommand;
}
public void suspend(boolean s) {
this.suspended.set(s);
}
public void proceed() throws Exception {
barrier.await(30, TimeUnit.SECONDS);
}
private void blockIfNeeded(InvocationContext ctx, VisitableCommand command) throws Exception {
if (suspended.get()) {
log.tracef("Suspended, not blocking command %s", command);
return;
}
if ((!originLocalOnly || ctx.isOriginLocal()) && acceptCommand.test(command)) {
log.tracef("Command blocking %s completion of %s", blockAfter ? "after" : "before", command);
// The first arrive and await is to sync with main thread
barrier.await(30, TimeUnit.SECONDS);
// Now we actually block until main thread lets us go
barrier.await(30, TimeUnit.SECONDS);
log.tracef("Command completed blocking completion of %s", command);
} else {
log.tracef("Not blocking command %s", command);
}
}
@Override
protected Object handleDefault(InvocationContext ctx, VisitableCommand command) throws Throwable {
if (!blockAfter) {
blockIfNeeded(ctx, command);
}
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
if (blockAfter) {
blockIfNeeded(rCtx, rCommand);
}
});
}
}
| 3,293
| 37.302326
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncStoreNotSharedTest.java
|
package org.infinispan.distribution;
import static org.infinispan.test.TestingUtil.k;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Future;
import org.infinispan.Cache;
import org.infinispan.container.DataContainer;
import org.infinispan.context.Flag;
import org.infinispan.marshall.persistence.impl.MarshalledEntryUtil;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.testng.annotations.Test;
/**
* DistSyncSharedTest.
*
* @author Galder Zamarreño
* @author Sanne Grinovero <sanne@hibernate.org> (C) 2011 Red Hat Inc.
* @since 4.0
*/
@Test(groups = "functional", testName = "distribution.DistSyncStoreNotSharedTest")
@CleanupAfterMethod
public class DistSyncStoreNotSharedTest<D extends DistSyncStoreNotSharedTest> extends BaseDistStoreTest<Object, String, D> {
private static final String k1 = "1", v1 = "one", k2 = "2", v2 = "two", k3 = "3", v3 = "three", k4 = "4", v4 = "four";
private static final String[] keys = new String[]{k1, k2, k3, k4};
private static final String[] values = new String[] { v1, v2, v3, v4 };
public DistSyncStoreNotSharedTest() {
testRetVals = true;
shared = false;
}
@Override
public Object[] factory() {
return new Object[] {
new DistSyncStoreNotSharedTest().segmented(true),
new DistSyncStoreNotSharedTest().segmented(false),
};
}
public void testPutFromNonOwner(Method m) throws Exception {
String key = k(m), value = "value2";
Cache<Object, String> nonOwner = getFirstNonOwner(key);
Cache<Object, String> owner = getFirstOwner(key);
DummyInMemoryStore nonOwnerLoader = TestingUtil.getFirstStore(nonOwner);
DummyInMemoryStore ownerLoader = TestingUtil.getFirstStore(owner);
assertFalse(nonOwnerLoader.contains(key));
assertFalse(ownerLoader.contains(key));
Object retval = nonOwner.put(key, value);
assertInStores(key, value, true);
if (testRetVals) assert retval == null;
assertOnAllCachesAndOwnership(key, value);
}
public void testGetFromNonOwnerWithFlags(Method m) throws Exception {
String key = k(m), value = "value2";
Cache<Object, String> nonOwner = getFirstNonOwner(key);
Cache<Object, String> owner = getFirstOwner(key);
DummyInMemoryStore ownerLoader = TestingUtil.getFirstStore(owner);
owner.put(key, value);
assertEquals(value, ownerLoader.loadEntry(key).getValue());
owner.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).clear();
assertEquals(value, ownerLoader.loadEntry(key).getValue());
assertNull(owner.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD).get(key));
assertNull(nonOwner.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD).get(key));
assertEquals(value, nonOwner.get(key));
// need to do the get() on all the owners first to load the values, otherwise assertOwnershipAndNonOwnership might fail
assertOnAllCaches(key, value);
assertOwnershipAndNonOwnership(key, true);
}
public void testAsyncGetCleansContextFlags(Method m) throws Exception {
String key = k(m), value = "value2";
Cache<Object, String> nonOwner = getFirstNonOwner(key);
Cache<Object, String> owner = getFirstOwner(key);
owner.put(key, value);
owner.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).clear();
Future<String> async = nonOwner.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD).getAsync(key);
assertNotNull(async);
assertNull(async.get());
async = nonOwner.getAdvancedCache().getAsync(key);
assertNotNull(async);
String returnedValue = async.get();
assertEquals(value, returnedValue);
}
public void testPutFromNonOwnerWithFlags(Method m) throws Exception {
String key = k(m), value = "value2";
Cache<Object, String> nonOwner = getFirstNonOwner(key);
Cache<Object, String> owner = getFirstOwner(key);
DummyInMemoryStore nonOwnerLoader = TestingUtil.getFirstStore(nonOwner);
DummyInMemoryStore ownerLoader = TestingUtil.getFirstStore(owner);
assertFalse(ownerLoader.contains(key));
assertFalse(nonOwnerLoader.contains(key));
Object retval = nonOwner.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).put(key, value);
assertFalse(ownerLoader.contains(key));
assertFalse(nonOwnerLoader.contains(key));
if (testRetVals) assert retval == null;
assertOnAllCachesAndOwnership(key, value);
}
public void testPutFromOwner(Method m) throws Exception {
String key = k(m), value = "value3";
getOwners(key)[0].put(key, value);
assertInStores(key, value, false);
}
protected void assertInStores(String key, String value, boolean allowL1) {
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
if (isOwner(c, key)) {
assertIsInContainerImmortal(c, key);
assertEquals(value, store.loadEntry(key).getValue());
} else {
if (!allowL1) {
assertIsNotInL1(c, key);
}
assertFalse(store.contains(key));
}
}
}
public void testPutForStateTransfer() throws Exception {
MagicKey k1 = getMagicKey();
DummyInMemoryStore store2 = TestingUtil.getFirstStore(c2);
c2.put(k1, v1);
assertTrue(store2.contains(k1));
assertEquals(v1, store2.loadEntry(k1).getValue());
c2.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).put(k1, v2);
assertEquals(v2, store2.loadEntry(k1).getValue());
}
public void testPutAll() throws Exception {
c1.putAll(makePutAllTestData());
for (int i = 0; i < keys.length; ++i) {
assertInStores(keys[i], values[i], true);
}
}
public void testPutAllWithFlags() throws Exception {
Map<String, String> data = makePutAllTestData();
c1.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).putAll(data);
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
for (String key : keys) {
assertFalse(store.contains(key));
if (isOwner(c, key)) {
assertIsInContainerImmortal(c, key);
}
}
}
}
public void testRemoveFromNonOwner() throws Exception {
String key = "k1", value = "value";
initAndTest();
assertInStores(key, value, true);
Object retval = getFirstNonOwner(key).remove(key);
if (testRetVals) assert "value".equals(retval);
assertRemovedFromStores(key);
}
protected void assertRemovedFromStores(String key) {
for (Cache<Object, String> c : caches) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
MarshallableEntry me = store.loadEntry(key);
// handle possible tombstones
assert me == null || me.getValue() == null;
}
}
public void testRemoveFromNonOwnerWithFlags() throws Exception {
String key = "k1", value = "value";
initAndTest();
Object retval = getFirstNonOwner(key).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).remove(key);
if (testRetVals) assert value.equals(retval);
for (Cache<Object, String> c : caches) {
if (isOwner(c, key)) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
assertTrue(store.contains(key));
}
}
}
public void testReplaceFromNonOwner() throws Exception {
String key = "k1", value = "value", value2 = "v2";
initAndTest();
assertInStores(key, value, true);
Object retval = getFirstNonOwner(key).replace(key, value2);
if (testRetVals) assert value.equals(retval);
assertInStores(key, value2, true);
}
public void testReplaceFromNonOwnerWithFlag() throws Exception {
String key = "k1", value = "value", value2 = "v2";
initAndTest();
Object retval = getFirstNonOwner(key).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).replace(key, value2);
if (testRetVals) assert value.equals(retval);
assertEquals(getFirstOwner(key).get(key), value2);
assertInStores(key, value, true);
}
public void testAtomicReplaceFromNonOwner() throws Exception {
String key = "k1", value = "value", value2 = "v2";
initAndTest();
boolean replaced = getFirstNonOwner(key).replace(key, value2, value);
assertFalse(replaced);
replaced = getFirstNonOwner(key).replace(key, value, value2);
assertTrue(replaced);
for (Cache<Object, String> c : caches) {
assertEquals(value2, c.get(key));
if (isOwner(c, key)) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
assertTrue(store.contains(key));
assertEquals(value2, store.loadEntry(key).getValue());
}
}
}
public void testAtomicReplaceFromNonOwnerWithFlag() throws Exception {
String key = "k1", value = "value", value2 = "v2";
initAndTest();
boolean replaced = getFirstNonOwner(key).replace(key, value2, value);
assertFalse(replaced);
replaced = getFirstNonOwner(key).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).replace(key, value, value2);
assertTrue(replaced);
for (Cache<Object, String> c : caches) {
assertEquals(value2, c.get(key));
if (isOwner(c, key)) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
assertTrue(store.contains(key));
assertEquals(value, store.loadEntry(key).getValue());
}
}
}
public void testAtomicPutIfAbsentFromNonOwner(Method m) throws Exception {
String key = k(m), value = "value", value2 = "v2";
String replaced = getFirstNonOwner(key).putIfAbsent(key, value);
assertNull(replaced);
replaced = getFirstNonOwner(key).putIfAbsent(key, value2);
assertEquals(replaced, value);
for (Cache<Object, String> c : caches) {
assertEquals(replaced, c.get(key));
if (isOwner(c, key)) {
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
assertTrue(store.contains(key));
assertEquals(value, store.loadEntry(key).getValue());
}
}
}
public void testAtomicPutIfAbsentFromNonOwnerWithFlag(Method m) throws Exception {
String key = k(m), value = "value";
String replaced = getFirstNonOwner(key).getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).putIfAbsent(key, value);
assertNull(replaced);
//interesting case: fails to put as value exists, put actually missing in Store
replaced = getFirstNonOwner(key).putIfAbsent(key, value);
assertEquals(replaced, value);
for (Cache<Object, String> c : caches) {
assertEquals(replaced, c.get(key));
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
assertFalse(store.contains(key));
}
}
public void testClear() throws Exception {
prepareClearTest();
c1.clear();
for (Cache<Object, String> c : caches) assert c.isEmpty();
for (int i = 0; i < 5; i++) {
String key = "k" + i;
assertRemovedFromStores(key);
}
}
public void testClearWithFlag() throws Exception {
prepareClearTest();
c1.getAdvancedCache().withFlags(Flag.SKIP_CACHE_STORE).clear();
for (Cache<Object, String> c : caches) {
DataContainer dc = c.getAdvancedCache().getDataContainer();
assertEquals(0, dc.size());
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
for (int i = 0; i < 5; i++) {
String key = "k" + i;
if (isOwner(c, key)) {
assertTrue(store.contains(key));
}
}
}
}
public void testGetOnlyQueriesCacheOnOwners() throws PersistenceException {
// Make a key that own'ers is c1 and c2
final MagicKey k = getMagicKey();
final String v1 = "real-data";
final String v2 = "stale-data";
// Simulate a cache had it by itself and someone wrote a value that is now stale
Cache<Object, String> c = getFirstOwner(k);
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
store.write(MarshalledEntryUtil.create(k, v2, c3));
getFirstNonOwner(k).put(k, v1);
assertEquals(v1, c.get(k));
}
/*--- test helpers ---*/
private Map<String, String> makePutAllTestData() {
Map<String, String> data = new HashMap<String, String>();
data.put(k1, v1);
data.put(k2, v2);
data.put(k3, v3);
data.put(k4, v4);
return data;
}
private void prepareClearTest() throws PersistenceException {
for (Cache<Object, String> c : caches) assert c.isEmpty() : "Data container " + c + " should be empty, instead it contains keys " + c.keySet();
for (int i = 0; i < 5; i++) {
getOwners("k" + i)[0].put("k" + i, "value" + i);
}
// this will fill up L1 as well
for (int i = 0; i < 5; i++) assertOnAllCachesAndOwnership("k" + i, "value" + i);
for (Cache<Object, String> c : caches) {
assertFalse(c.isEmpty());
DummyInMemoryStore store = TestingUtil.getFirstStore(c);
for (int i = 0; i < 5; i++) {
String key = "k" + i;
if (isOwner(c, key)) {
assertTrue("Cache store " + c + " does not contain key " + key, store.contains(key));
}
}
}
}
}
| 14,025
| 37.322404
| 149
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncTxFuncTest.java
|
package org.infinispan.distribution;
import static java.lang.String.format;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSyncTxFuncTest")
public class DistSyncTxFuncTest extends BaseDistFunctionalTest<Object, String> {
public DistSyncTxFuncTest() {
transactional = true;
testRetVals = true;
cleanup = CleanupPhase.AFTER_METHOD; // ensure any stale TXs are wiped
}
public void testTransactionsSpanningKeysCommit() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
// now test a transaction that spans both keys.
TransactionManager tm4 = getTransactionManager(c4);
assertNotLocked(c3, k1);
tm4.begin();
c4.put(k1, "new_value1");
c4.put(k2, "new_value2");
tm4.commit();
assertNotLocked(c3, k1);
assertNotLocked(c3, k2);
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertIsInL1(c4, k1);
assertIsInL1(c4, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
assertNotLocked(c4, k1, k2);
assertNotLocked(c3, k1);
assertNotLocked(c3, k2);
assertNotLocked(c1, k1, k2);
assertNotLocked(c2, k1, k2);
checkOwnership(k1, k2, "new_value1", "new_value2");
}
public void testTransactionsSpanningKeysRollback() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
// now test a transaction that spans both keys.
TransactionManager tm4 = getTransactionManager(c4);
tm4.begin();
c4.put(k1, "new_value1");
c4.put(k2, "new_value2");
tm4.rollback();
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertTransactionOriginatorDataAfterRollback(c4, k1, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
checkOwnership(k1, k2, "value1", "value2");
}
public void testPutFromNonOwner() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
TransactionManager tm4 = getTransactionManager(c4);
tm4.begin();
assertRetVal("value1", c4.put(k1, "new_value"));
assertRetVal("value2", c4.put(k2, "new_value"));
tm4.rollback();
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertTransactionOriginatorDataAfterRollback(c4, k1, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
checkOwnership(k1, k2, "value1", "value2");
}
public void testPutIfAbsentFromNonOwner() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
TransactionManager tm4 = getTransactionManager(c4);
LockManager lockManager4 = extractComponent(c4, LockManager.class);
tm4.begin();
assertRetVal("value1", c4.putIfAbsent(k1, "new_value"));
assertRetVal("value2", c4.putIfAbsent(k2, "new_value"));
assertEquals("value1", c4.get(k1));
assertEquals("value2", c4.get(k2));
tm4.rollback();
assertFalse(lockManager4.isLocked(k1));
assertFalse(lockManager4.isLocked(k2));
assertEquals("value1", c2.get(k1));
assertEquals("value2", c2.get(k2));
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertTransactionOriginatorDataAfterRollback(c4, k1, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
checkOwnership(k1, k2, "value1", "value2");
}
public void testRemoveFromNonOwner() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
assertNotLocked(c1, k1, k2);
assertNotLocked(c2, k1, k2);
assertNotLocked(c3, k1, k2);
assertNotLocked(c4, k1, k2);
log.info("***** Here it starts!");
TransactionManager tm4 = getTransactionManager(c4);
tm4.begin();
assertRetVal("value1", c4.remove(k1));
assertRetVal("value2", c4.remove(k2));
assertFalse(c4.containsKey(k1));
assertFalse(c4.containsKey(k2));
tm4.rollback();
log.info("----- Here it ends!");
assertNotLocked(c1, k1, k2);
assertNotLocked(c2, k1, k2);
assertNotLocked(c3, k1, k2);
assertNotLocked(c4, k1);
assertNotLocked(c4, k2);
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertNotLocked(c1, k1, k2);
assertNotLocked(c2, k1, k2);
assertNotLocked(c3, k1, k2);
assertNotLocked(c4, k1, k2);
assertTransactionOriginatorDataAfterRollback(c4, k1, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
assertNotLocked(c1, k1, k2);
assertNotLocked(c2, k1, k2);
assertNotLocked(c3, k1, k2);
assertNotLocked(c4, k1, k2);
checkOwnership(k1, k2, "value1", "value2");
}
public void testConditionalRemoveFromNonOwner() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
TransactionManager tm4 = getTransactionManager(c4);
tm4.begin();
assertRetVal(false, c4.remove(k1, "valueX"));
assertRetVal(false, c4.remove(k1, "valueX"));
assertTrue(c4.containsKey(k1));
assertTrue(c4.containsKey(k2));
assertRetVal(true, c4.remove(k1, "value1"));
assertRetVal(true, c4.remove(k2, "value2"));
assertFalse(c4.containsKey(k1));
assertFalse(c4.containsKey(k2));
tm4.rollback();
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertIsInL1(c4, k1);
assertIsInL1(c4, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
checkOwnership(k1, k2, "value1", "value2");
}
public void testReplaceFromNonOwner() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
TransactionManager tm4 = getTransactionManager(c4);
tm4.begin();
assertRetVal("value1", c4.replace(k1, "new_value"));
assertRetVal("value2", c4.replace(k2, "new_value"));
assertEquals("new_value", c4.get(k1));
assertEquals("new_value", c4.get(k2));
tm4.rollback();
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertTransactionOriginatorDataAfterRollback(c4, k1, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
checkOwnership(k1, k2, "value1", "value2");
}
public void testConditionalReplaceFromNonOwner() throws Exception {
// we need 2 keys that reside on different caches...
MagicKey k1 = new MagicKey("k1", c1, c2); // maps on to c1 and c2
MagicKey k2 = new MagicKey("k2", c2, c3); // maps on to c2 and c3
init(k1, k2);
TransactionManager tm4 = getTransactionManager(c4);
tm4.begin();
assertRetVal(false, c4.replace(k1, "valueX", "new_value"));
assertRetVal(false, c4.replace(k2, "valueX", "new_value"));
assertEquals("value1", c4.get(k1));
assertEquals("value2", c4.get(k2));
assertRetVal(true, c4.replace(k1, "value1", "new_value"));
assertRetVal(true, c4.replace(k2, "value2", "new_value"));
assertEquals("new_value", c4.get(k1));
assertEquals("new_value", c4.get(k2));
tm4.rollback();
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
//conditional operation always fetch entry from remote.
assertIsInL1(c4, k1);
assertIsInL1(c4, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
checkOwnership(k1, k2, "value1", "value2");
}
public void testMergeFromNonOwner() {
initAndTest();
// merge function applied
Object retval = getFirstNonOwner("k1").merge("k1", "value2", (v1, v2) -> "merged_" + v1 + "_" + v2);
asyncWait("k1", ReadWriteKeyCommand.class);
if (testRetVals) assertEquals("merged_value_value2", retval);
assertOnAllCachesAndOwnership("k1", "merged_value_value2");
}
@Override
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder builder = super.buildConfiguration();
ControlledConsistentHashFactory.Default chf = new ControlledConsistentHashFactory.Default(
new int[][]{{0, 1}, {1, 2}});
builder.clustering().hash().numOwners(2).numSegments(2).consistentHashFactory(chf);
return builder;
}
private void checkOwnership(MagicKey k1, MagicKey k2, String v1, String v2) {
assertOnAllCachesAndOwnership(k1, v1);
assertOnAllCachesAndOwnership(k2, v2);
assertIsInL1(c4, k1);
assertIsInL1(c4, k2);
assertIsInL1(c1, k2);
assertIsInL1(c3, k1);
}
private void assertNotLocked(Cache c, Object... keys) {
LockManager lm = extractComponent(c, LockManager.class);
for (Object key : keys) {
//the keys are unlocked asynchronously
eventually(() -> format("Expected unlocked key '%s' (lock-owner='%s')", key, lm.getOwner(key)),
() -> !lm.isLocked(key));
}
}
private void init(MagicKey k1, MagicKey k2) {
// neither key maps on to c4
c2.put(k1, "value1");
c2.put(k2, "value2");
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsInContainerImmortal(c2, k2);
assertIsInContainerImmortal(c3, k2);
assertIsNotInL1(c4, k1);
assertIsNotInL1(c4, k2);
assertIsNotInL1(c1, k2);
assertIsNotInL1(c3, k1);
}
private <T> void assertRetVal(T expected, T retVal) {
if (testRetVals) {
assertEquals(expected, retVal);
}
}
private void assertTransactionOriginatorDataAfterRollback(Cache<Object, String> cache, MagicKey k1, MagicKey k2) {
if (testRetVals) {
//entry is fetched and stored in L1 even if the TX rollbacks
assertIsInL1(cache, k1);
assertIsInL1(cache, k2);
} else {
//unsafe is enabled and the entry isn't fetched remotely.
assertIsNotInL1(cache, k1);
assertIsNotInL1(cache, k2);
}
}
}
| 12,385
| 31.941489
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DisabledL1Test.java
|
package org.infinispan.distribution;
import static org.infinispan.test.TestingUtil.k;
import static org.infinispan.test.TestingUtil.v;
import java.lang.reflect.Method;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DisabledL1Test")
public class DisabledL1Test extends BaseDistFunctionalTest<Object, String> {
public DisabledL1Test () {
testRetVals = false;
l1CacheEnabled = false;
}
public void testRemoveFromNonOwner() {
Object retval = getFirstNonOwner("k1").put("k1", "value");
asyncWait("k1", PutKeyValueCommand.class);
if (testRetVals) assert retval == null;
retval = getOwners("k1")[0].remove("k1");
asyncWait("k1", RemoveCommand.class);
if (testRetVals) assert "value".equals(retval);
assertRemovedOnAllCaches("k1");
}
public void testReplaceFromNonOwner(Method m) {
final String k = k(m);
final String v = v(m);
getOwners(k)[0].put(k, v);
getNonOwners(k)[0].replace(k, v(m, 1));
}
}
| 1,148
| 28.461538
| 76
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistAsyncFuncTest.java
|
package org.infinispan.distribution;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.remote.SingleRpcCommand;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.test.ReplListener;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.AbstractDelegatingRpcManager;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
@Test(groups = {"functional"}, testName = "distribution.DistAsyncFuncTest")
public class DistAsyncFuncTest extends DistSyncFuncTest {
ReplListener r1, r2, r3, r4;
ReplListener[] r;
Map<Address, ReplListener> listenerLookup;
ConcurrentMap<Address, List<InvalidateL1Command>> expectedL1Invalidations = new ConcurrentHashMap<>();
@Override
public Object[] factory() {
return new Object[] {
new DistAsyncFuncTest(),
new DistAsyncFuncTest().groupers(true)
};
}
public DistAsyncFuncTest() {
cacheMode = CacheMode.DIST_ASYNC;
testRetVals = false;
}
@Override
protected void createCacheManagers() throws Throwable {
super.createCacheManagers();
r1 = new ReplListener(c1, true, true);
r2 = new ReplListener(c2, true, true);
r3 = new ReplListener(c3, true, true);
r4 = new ReplListener(c4, true, true);
r = new ReplListener[]{r1, r2, r3, r4};
listenerLookup = new HashMap<>();
for (ReplListener rl : r) listenerLookup.put(rl.getCache().getCacheManager().getAddress(), rl);
for (Cache c : caches) {
TestingUtil.wrapComponent(c, RpcManager.class, original -> new AbstractDelegatingRpcManager(original) {
@Override
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker,
RpcOptions rpcOptions) {
if (command instanceof SingleRpcCommand) {
command = ((SingleRpcCommand) command).getCommand();
}
if (command instanceof InvalidateL1Command) {
InvalidateL1Command invalidateL1Command = (InvalidateL1Command) command;
log.tracef("Sending invalidation %s to %s", command, targets);
Collection<Address> realTargets = targets != null ? targets : cacheAddresses;
for (Address target : realTargets) {
expectedL1Invalidations.computeIfAbsent(
target, ignored -> Collections.synchronizedList(new ArrayList<>())).add(invalidateL1Command);
}
}
return super.performRequest(targets, command, collector, invoker, rpcOptions);
}
});
}
}
@AfterMethod
public void resetEagerCommands() {
for (ReplListener rl: r) {
rl.resetEager();
}
expectedL1Invalidations.clear();
}
@Override
protected void asyncWait(Object key, Predicate<VisitableCommand> command) {
if (key == null) {
// test all caches.
for (ReplListener rl : r) rl.expect(command);
for (ReplListener rl : r) rl.waitForRpc();
} else {
for (Cache<?, ?> c : getOwners(key)) {
listenerLookup.get(address(c)).expect(command);
listenerLookup.get(address(c)).waitForRpc();
}
}
waitForInvalidations();
}
private void waitForInvalidations() {
for (Map.Entry<Address, List<InvalidateL1Command>> expected : expectedL1Invalidations.entrySet()) {
Address address = expected.getKey();
ReplListener replListener = listenerLookup.get(address);
List<InvalidateL1Command> list = expected.getValue();
if (!list.isEmpty()) {
log.tracef("Waiting for invalidations on %s: %s", address, list);
synchronized (list) {
for (InvalidateL1Command cmd : list) {
replListener.expect(InvalidateL1Command.class);
}
list.clear();
}
replListener.waitForRpc();
}
}
}
@Override
protected void asyncWaitOnPrimary(Object key, Class<? extends VisitableCommand> command) {
assert key != null;
Cache<?, ?> primary = getFirstOwner(key);
listenerLookup.get(address(primary)).expect(command);
listenerLookup.get(address(primary)).waitForRpc();
waitForInvalidations();
}
}
| 5,375
| 37.4
| 119
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/CacheStoppedDuringReadTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.CacheStoppedDuringReadTest")
public class CacheStoppedDuringReadTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(3, TestDataSCI.INSTANCE, getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC));
}
public void test() throws Exception {
MagicKey key = new MagicKey(cache(0), cache(1));
cache(2).put(key, "value");
CyclicBarrier barrier0 = new CyclicBarrier(2);
cache(0).getAdvancedCache().getAsyncInterceptorChain().addInterceptorBefore(
new BlockingInterceptor<>(barrier0, GetCacheEntryCommand.class, false, false),
EntryWrappingInterceptor.class);
Future<Object> f = fork(() -> cache(2).get(key));
barrier0.await(10, TimeUnit.SECONDS);
cache(0).stop();
barrier0.await(10, TimeUnit.SECONDS);
assertEquals("value", f.get(10, TimeUnit.SECONDS));
}
}
| 1,498
| 35.560976
| 106
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/MulticastInvalidationFuncTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertNull;
import java.util.ArrayList;
import java.util.Collection;
import org.infinispan.Cache;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.test.ReplListener;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.MulticastInvalidationFuncTest")
public class MulticastInvalidationFuncTest extends BaseDistFunctionalTest<Object, String> {
public static final String KEY1 = "k1";
public MulticastInvalidationFuncTest() {
testRetVals = true;
l1Threshold = 0;
}
public void testPut() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner(KEY1);
Cache<Object, String> owner = getOwners(KEY1)[0];
Collection<ReplListener> listeners = new ArrayList<ReplListener>();
// Put an object in from a non-owner, this will cause an L1 record to be created there
nonOwner.put(KEY1, "foo");
assertNull(nonOwner.getAdvancedCache().getDataContainer().get(KEY1));
Assert.assertEquals(owner.getAdvancedCache().getDataContainer().get(KEY1).getValue(), "foo");
// Check that all nodes (except the one we put to) are notified
// but only if the transport is multicast-capable
if (owner.getAdvancedCache().getRpcManager().getTransport().isMulticastCapable()) {
for (Cache<Object, String> c : getNonOwners(KEY1)) {
ReplListener rl = new ReplListener(c);
rl.expect(InvalidateL1Command.class);
listeners.add(rl);
log.debugf("Added nonowner %s", c);
}
} else {
ReplListener rl = new ReplListener(nonOwner);
rl.expect(InvalidateL1Command.class);
listeners.add(rl);
}
// Put an object into an owner, this will cause the L1 records for this key to be invalidated
owner.put(KEY1, "bar");
for (ReplListener rl : listeners) {
rl.waitForRpc();
}
Assert.assertNull(nonOwner.getAdvancedCache().getDataContainer().get(KEY1));
}
}
| 2,127
| 33.322581
| 99
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncL1PassivationFuncTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.DistSyncL1PassivationFuncTest")
public class DistSyncL1PassivationFuncTest extends BaseDistFunctionalTest {
protected int MAX_ENTRIES = 4;
protected DummyInMemoryStore ownerCacheStore;
protected DummyInMemoryStore nonOwnerCacheStore;
public DistSyncL1PassivationFuncTest() {
testRetVals = true;
numOwners = 1;
INIT_CLUSTER_SIZE = 2;
}
@Override
protected void createCacheManagers() throws Throwable {
super.createCacheManagers();
ownerCacheStore = TestingUtil.extractComponent(cache(0, cacheName), PersistenceManager.class).getStores(DummyInMemoryStore.class).iterator().next();
nonOwnerCacheStore = TestingUtil.extractComponent(cache(1, cacheName), PersistenceManager.class).getStores(DummyInMemoryStore.class).iterator().next();
}
@Override
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder builder = super.buildConfiguration();
builder
.memory()
.size(MAX_ENTRIES)
.persistence()
.passivation(true)
.addStore(DummyInMemoryStoreConfigurationBuilder.class);
return builder;
}
@Test
public void testPassivatedL1Entries() {
final int minPassivated = 2;
final int insertCount = MAX_ENTRIES + minPassivated;
List<MagicKey> keys = new ArrayList<MagicKey>(insertCount);
Cache<MagicKey, Object> ownerCache = cache(0, cacheName);
Cache<MagicKey, Object> nonOwnerCache = cache(1, cacheName);
// Need to put 2+ magic keys to make sure we fill up the L1 on the local node
for (int i = 0; i < insertCount; ++i) {
// If the put worked then keep the key otherwise we need to generate a new one
MagicKey key = new MagicKey(ownerCache);
while (ownerCache.putIfAbsent(key, key) != null) {
key = new MagicKey(ownerCache);
}
keys.add(key);
}
// Passivation is async
eventually(() -> ownerCacheStore.size() >= minPassivated);
assertTrue(MAX_ENTRIES >= ownerCache.getAdvancedCache().getDataContainer().size());
assertEquals(0, nonOwnerCache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).size());
assertEquals(0, nonOwnerCacheStore.size());
// Now load those keys in our non owner cache which should store them in L1
for (MagicKey key : keys) {
nonOwnerCache.get(key);
}
// L1 entries should not be passivated
assertEquals("Some L1 values were passivated", 0, nonOwnerCacheStore.size());
}
}
| 3,206
| 36.290698
| 157
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/BaseDistFunctionalTest.java
|
package org.infinispan.distribution;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.distribution.groups.KXGrouper;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.util.concurrent.IsolationLevel;
public abstract class BaseDistFunctionalTest<K, V> extends MultipleCacheManagersTest {
protected String cacheName;
protected int INIT_CLUSTER_SIZE = 4;
protected Cache<K, V> c1 = null, c2 = null, c3 = null, c4 = null;
protected ConfigurationBuilder configuration;
protected List<Cache<K, V>> caches;
protected List<Address> cacheAddresses;
protected boolean testRetVals = true;
protected boolean l1CacheEnabled = true;
protected int l1Threshold = 5;
protected boolean performRehashing = false;
protected boolean batchingEnabled = false;
protected int numOwners = 2;
protected int lockTimeout = 45;
protected boolean groupers = false;
protected boolean onePhaseCommitOptimization = false;
{
cacheMode = CacheMode.DIST_SYNC;
transactional = false;
}
public BaseDistFunctionalTest<K, V> numOwners(int numOwners) {
this.numOwners = numOwners;
return this;
}
public BaseDistFunctionalTest<K, V> l1(boolean l1) {
this.l1CacheEnabled = l1;
return this;
}
public BaseDistFunctionalTest<K, V> groupers(boolean groupers) {
this.groupers = groupers;
return this;
}
@Override
protected String[] parameterNames() {
return concat(super.parameterNames(), "numOwners", "l1", "groupers");
}
@Override
protected Object[] parameterValues() {
return concat(super.parameterValues(), numOwners != 2 ? numOwners : null, l1CacheEnabled ? null : Boolean.FALSE, groupers ? Boolean.TRUE : null);
}
@Override
protected void createCacheManagers() throws Throwable {
cacheName = "dist";
configuration = buildConfiguration();
// Create clustered caches with failure detection protocols on
createClusteredCaches(INIT_CLUSTER_SIZE, cacheName, getSerializationContext(), configuration,
new TransportFlags().withFD(false));
caches = caches(cacheName);
if (INIT_CLUSTER_SIZE > 0) c1 = caches.get(0);
if (INIT_CLUSTER_SIZE > 1) c2 = caches.get(1);
if (INIT_CLUSTER_SIZE > 2) c3 = caches.get(2);
if (INIT_CLUSTER_SIZE > 3) c4 = caches.get(3);
cacheAddresses = new ArrayList<>(INIT_CLUSTER_SIZE);
for (Cache<K, V> cache : caches) {
EmbeddedCacheManager cacheManager = cache.getCacheManager();
cacheAddresses.add(cacheManager.getAddress());
}
}
protected SerializationContextInitializer getSerializationContext() {
return TestDataSCI.INSTANCE;
}
@Override
protected void killMember(int cacheIndex, String cacheName, boolean awaitRehash) {
super.killMember(cacheIndex, cacheName, awaitRehash);
caches.remove(cacheIndex);
}
protected ConfigurationBuilder buildConfiguration() {
ConfigurationBuilder configuration = getDefaultClusteredCacheConfig(cacheMode, transactional);
configuration.clustering().stateTransfer().fetchInMemoryState(performRehashing);
if (lockingMode != null) {
configuration.transaction().lockingMode(lockingMode);
}
configuration.clustering().hash().numOwners(numOwners);
if (!testRetVals) {
configuration.unsafe().unreliableReturnValues(true);
// we also need to use repeatable read for tests to work when we dont have reliable return values, since the
// tests repeatedly queries changes
configuration.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
} else {
configuration.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
}
if (transactional) {
configuration.invocationBatching().enable();
if (onePhaseCommitOptimization) {
configuration.transaction().use1PcForAutoCommitTransactions(true);
}
}
if (cacheMode.isSynchronous()) configuration.clustering().remoteTimeout(60, TimeUnit.SECONDS);
configuration.locking().lockAcquisitionTimeout(lockTimeout, TimeUnit.SECONDS);
configuration.clustering().l1().enabled(l1CacheEnabled);
if (groupers) {
configuration.clustering().hash().groups().enabled(true);
configuration.clustering().hash().groups().withGroupers(Collections.singletonList(new KXGrouper()));
}
if (l1CacheEnabled) configuration.clustering().l1().invalidationThreshold(l1Threshold);
return configuration;
}
// ----------------- HELPERS ----------------
protected boolean isTriangle() {
return TestingUtil.isTriangleAlgorithm(cacheMode, transactional);
}
protected void initAndTest() {
for (Cache<K, V> c : caches) assert c.isEmpty();
// TODO: A bit hacky, this should be moved somewhere else really...
Cache<Object, Object> firstCache = (Cache<Object, Object>) caches.get(0);
firstCache.put("k1", "value");
asyncWait("k1", PutKeyValueCommand.class);
assertOnAllCachesAndOwnership("k1", "value");
}
protected Address addressOf(Cache<?, ?> cache) {
return DistributionTestHelper.addressOf(cache);
}
protected Cache<K, V> getFirstNonOwner(Object key) {
return DistributionTestHelper.getFirstNonOwner(key, caches);
}
protected Cache<K, V> getFirstOwner(Object key) {
return DistributionTestHelper.getFirstOwner(key, caches);
}
protected Cache<K, V> getSecondNonOwner(String key) {
return getNonOwners(key)[1];
}
protected void assertOnAllCachesAndOwnership(Object key, String value) {
assertOwnershipAndNonOwnership(key, l1CacheEnabled);
// checking the values will bring the keys to L1, so we want to do it after checking ownership
assertOnAllCaches(key, value);
}
protected void assertRemovedOnAllCaches(Object key) {
assertOnAllCaches(key, null);
}
protected void assertOnAllCaches(Object key, String value) {
for (Cache<K, V> c : caches) {
Object realVal = c.get(key);
if (value == null) {
assert realVal == null : "Expecting [" + key + "] to equal [" + value + "] on cache ["
+ addressOf(c) + "] but was [" + realVal + "]. Owners are " + Arrays.toString(getOwners(key));
} else {
assert value.equals(realVal) : "Expecting [" + key + "] to equal [" + value + "] on cache ["
+ addressOf(c) + "] but was [" + realVal + "]";
}
}
// Allow some time for all ClusteredGetCommands to finish executing
TestingUtil.sleepThread(100);
}
protected void assertOwnershipAndNonOwnership(Object key, boolean allowL1) {
for (Cache<K, V> c : caches) {
DataContainer<K, V> dc = c.getAdvancedCache().getDataContainer();
InternalCacheEntry<K, V> ice = dc.get(key);
if (isOwner(c, key)) {
assert ice != null && ice.getValue() != null : "Fail on owner cache " + addressOf(c) + ": dc.get(" + key + ") returned " + ice;
assert ice instanceof ImmortalCacheEntry : "Fail on owner cache " + addressOf(c) + ": dc.get(" + key + ") returned " + safeType(ice);
} else {
if (allowL1) {
assert ice == null || ice.getValue() == null || ice.isL1Entry() : "Fail on non-owner cache " + addressOf(c) + ": dc.get(" + key + ") returned " + safeType(ice);
} else {
// Segments no longer owned are invalidated asynchronously
eventually("Fail on non-owner cache " + addressOf(c) + ": dc.get(" + key + ")", () -> {
InternalCacheEntry<K, V> ice2 = dc.get(key);
return ice2 == null || ice2.getValue() == null;
});
}
}
}
}
protected String safeType(Object o) {
return DistributionTestHelper.safeType(o);
}
protected boolean isInL1(Cache<?, ?> cache, Object key) {
DataContainer<?, ?> dc = cache.getAdvancedCache().getDataContainer();
InternalCacheEntry<?, ?> ice = dc.get(key);
return ice != null && ice.getValue() != null && !(ice instanceof ImmortalCacheEntry);
}
protected void assertIsInL1(Cache<?, ?> cache, Object key) {
DistributionTestHelper.assertIsInL1(cache, key);
}
protected void assertIsNotInL1(Cache<?, ?> cache, Object key) {
DistributionTestHelper.assertIsNotInL1(cache, key);
}
protected void assertIsInContainerImmortal(Cache<?, ?> cache, Object key) {
DistributionTestHelper.assertIsInContainerImmortal(cache, key);
}
protected void assertIsInL1OrNull(Cache<?, ?> cache, Object key) {
DistributionTestHelper.assertIsInL1OrNull(cache, key);
}
protected boolean isOwner(Cache<?, ?> c, Object key) {
return DistributionTestHelper.isOwner(c, key);
}
protected boolean isFirstOwner(Cache<?, ?> c, Object key) {
return DistributionTestHelper.isFirstOwner(c, key);
}
protected Cache<K, V>[] getOwners(Object key) {
Cache<K, V>[] arr = new Cache[numOwners];
DistributionTestHelper.getOwners(key, caches).toArray(arr);
return arr;
}
protected Cache<K, V>[] getOwners(Object key, int expectedNumberOwners) {
Cache<K, V>[] arr = new Cache[expectedNumberOwners];
DistributionTestHelper.getOwners(key, caches).toArray(arr);
return arr;
}
protected Cache<K, V>[] getNonOwnersExcludingSelf(Object key, Address self) {
Cache<K, V>[] nonOwners = getNonOwners(key);
boolean selfInArray = false;
for (Cache<?, ?> c : nonOwners) {
if (addressOf(c).equals(self)) {
selfInArray = true;
break;
}
}
if (selfInArray) {
Cache<K, V>[] nonOwnersExclSelf = new Cache[nonOwners.length - 1];
int i = 0;
for (Cache<K, V> c : nonOwners) {
if (!addressOf(c).equals(self)) nonOwnersExclSelf[i++] = c;
}
return nonOwnersExclSelf;
} else {
return nonOwners;
}
}
protected Cache<K, V>[] getNonOwners(Object key) {
return getNonOwners(key, 2);
}
protected Cache<K, V>[] getNonOwners(Object key, int expectedNumberNonOwners) {
Cache<K, V>[] nonOwners = new Cache[expectedNumberNonOwners];
DistributionTestHelper.getNonOwners(key, caches).toArray(nonOwners);
return nonOwners;
}
protected DistributionManager getDistributionManager(Cache<?, ?> c) {
return TestingUtil.extractComponent(c, DistributionManager.class);
}
protected LocalizedCacheTopology getCacheTopology(Cache<?, ?> c) {
return getDistributionManager(c).getCacheTopology();
}
/**
* Blocks and waits for a replication event on async caches
* @param key key that causes the replication. Used to determine which caches to listen on. If null, all caches
* are checked
* @param command command to listen for
*/
protected void asyncWait(Object key, Class<? extends VisitableCommand> command) {
asyncWait(key, command::isInstance);
}
protected void asyncWait(Object key, Predicate<VisitableCommand> test) {
// no op.
}
/**
* Blocks and waits for a replication event on primary owners in async caches
* @param key key that causes the replication. Must be non-null.
* @param command command to listen for
*/
protected void asyncWaitOnPrimary(Object key, Class<? extends VisitableCommand> command) {
// no op.
}
protected TransactionManager getTransactionManager(Cache<?, ?> cache) {
return TestingUtil.getTransactionManager(cache);
}
protected static void removeAllBlockingInterceptorsFromCache(Cache<?, ?> cache) {
AsyncInterceptorChain chain = TestingUtil.extractInterceptorChain(cache);
BlockingInterceptor<?> blockingInterceptor = chain.findInterceptorExtending(BlockingInterceptor.class);
while (blockingInterceptor != null) {
blockingInterceptor.suspend(true);
chain.removeInterceptor(blockingInterceptor.getClass());
blockingInterceptor = chain.findInterceptorExtending(BlockingInterceptor.class);
}
}
protected MagicKey getMagicKey() {
switch (numOwners) {
case 1:
return new MagicKey(c1);
case 2:
return new MagicKey(c1, c2);
default:
throw new IllegalArgumentException();
}
}
}
| 13,479
| 37.186969
| 175
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSkipRemoteLookupBatchingTest.java
|
package org.infinispan.distribution;
import static org.infinispan.context.Flag.SKIP_REMOTE_LOOKUP;
import org.infinispan.test.AbstractCacheTest;
import org.testng.annotations.Test;
/**
* @author Mircea Markus
* @since 5.1
*/
@Test (groups = "functional", testName = "distribution.DistSkipRemoteLookupBatchingTest")
public class DistSkipRemoteLookupBatchingTest extends BaseDistFunctionalTest<Object, String> {
public DistSkipRemoteLookupBatchingTest() {
cleanup = AbstractCacheTest.CleanupPhase.AFTER_METHOD;
batchingEnabled = true;
transactional = true;
}
public void testSkipLookupOnGetWhileBatching() {
MagicKey k1 = new MagicKey(c1, c2);
c1.put(k1, "batchingMagicValue-h1");
assertIsInContainerImmortal(c1, k1);
assertIsInContainerImmortal(c2, k1);
assertIsNotInL1(c3, k1);
assertIsNotInL1(c4, k1);
c4.startBatch();
assert c4.getAdvancedCache().withFlags(SKIP_REMOTE_LOOKUP).get(k1) == null;
c4.endBatch(true);
assertOwnershipAndNonOwnership(k1, false);
}
}
| 1,063
| 27.756757
| 94
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/InvalidationFailureTest.java
|
package org.infinispan.distribution;
import jakarta.transaction.Transaction;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
@Test(groups = "functional", testName = "distribution.InvalidationFailureTest")
public class InvalidationFailureTest extends MultipleCacheManagersTest {
private Object k0;
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder config = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, true);
config.clustering().l1().enable().hash().numOwners(1);
config.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
createCluster(TestDataSCI.INSTANCE, config, 2);
final String cacheName = manager(0).getCacheManagerConfiguration().defaultCacheName().get();
manager(0).defineConfiguration("second", config.build());
manager(1).defineConfiguration("second", config.build());
manager(0).startCaches(cacheName, "second");
manager(1).startCaches(cacheName, "second");
waitForClusterToForm(cacheName, "second");
cache(0).put("k","v");
cache(0,"second").put("k","v");
assert cache(1).get("k").equals("v");
assert cache(1, "second").get("k").equals("v");
k0 = new MagicKey(cache(0));
}
public void testL1Invalidated() throws Exception {
tm(1).begin();
cache(1).put(k0,"v");
cache(1, "second").put(k0,"v");
assert !lockManager(1).isLocked(k0);
assert !lockManager(1,"second").isLocked(k0);
Transaction transaction = tm(1).suspend();
tm(0).begin();
log.info("Before the put");
cache(0, "second").put(k0, "v1");
cache(0).put(k0, "v2");
try {
tm(0).commit();
log.info("After the Commit");
} catch (Exception e) {
log.error("Error during commit", e);
assert false : "this should not fail even if the invalidation does";
} finally {
tm(1).resume(transaction);
tm(1).rollback();
assert !lockManager(0).isLocked(k0);
assert !lockManager(0, "second").isLocked(k0);
assert !lockManager(1).isLocked(k0);
assert !lockManager(1, "second").isLocked(k0);
}
}
}
| 2,492
| 35.661765
| 98
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/TestAddress.java
|
package org.infinispan.distribution;
import java.util.Objects;
import org.infinispan.remoting.transport.Address;
/**
* @author Mircea.Markus@jboss.com
* @since 4.2
*/
public class TestAddress implements Address {
private final int addressNum;
private String name;
public void setName(String name) {
this.name = name;
}
public TestAddress(int addressNum) {
this.addressNum = addressNum;
}
public TestAddress(int addressNum, String name) {
this.addressNum = addressNum;
this.name = name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TestAddress that = (TestAddress) o;
return addressNum == that.addressNum && Objects.equals(name, that.name);
}
@Override
public int hashCode() {
return addressNum;
}
@Override
public String toString() {
if (name != null) {
return name + "#" + addressNum;
} else
return "TestAddress#" + addressNum;
}
@Override
public int compareTo(Address o) {
return this.addressNum - ((TestAddress) o).addressNum;
}
}
| 1,185
| 19.807018
| 78
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncL1FuncTest.java
|
package org.infinispan.distribution;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.List;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.Cache;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.distribution.L1NonTxInterceptor;
import org.infinispan.interceptors.distribution.NonTxDistributionInterceptor;
import org.infinispan.interceptors.distribution.TriangleDistributionInterceptor;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.ControlledRpcManager;
import org.infinispan.util.concurrent.CommandAckCollector;
import org.testng.annotations.Test;
@Test(groups = {"functional", "smoke"}, testName = "distribution.DistSyncL1FuncTest")
public class DistSyncL1FuncTest extends BaseDistSyncL1Test {
public DistSyncL1FuncTest() {
testRetVals = true;
}
@Override
protected Class<? extends AsyncInterceptor> getDistributionInterceptorClass() {
return isTriangle() ? TriangleDistributionInterceptor.class : NonTxDistributionInterceptor.class;
}
@Override
protected Class<? extends AsyncInterceptor> getL1InterceptorClass() {
return L1NonTxInterceptor.class;
}
protected void assertL1PutWithConcurrentUpdate(final Cache<Object, String> nonOwnerCache, Cache<Object, String> ownerCache,
final boolean replace, final Object key, final String originalValue,
final String nonOwnerValue, String updateValue) throws Throwable {
CyclicBarrier barrier = new CyclicBarrier(2);
BlockingInterceptor blockingInterceptor = addBlockingInterceptorBeforeTx(nonOwnerCache, barrier,
replace ? ReplaceCommand.class : PutKeyValueCommand.class);
try {
Future<String> future = fork(() -> {
if (replace) {
// This should always be true
if (nonOwnerCache.replace(key, originalValue, nonOwnerValue)) {
return originalValue;
}
return nonOwnerCache.get(key);
}
else {
return nonOwnerCache.put(key, nonOwnerValue);
}
});
// Now wait for the put/replace to return and block it for now
barrier.await(5, TimeUnit.SECONDS);
// Stop blocking new commands as we check that a put returns the correct previous value
blockingInterceptor.suspend(true);
// Owner should have the new value
assertEquals(nonOwnerValue, ownerCache.put(key, updateValue));
// Now let owner key->updateValue go through
barrier.await(5, TimeUnit.SECONDS);
// This should be originalValue still as we did the get
assertEquals(originalValue, future.get(5, TimeUnit.SECONDS));
// Remove the interceptor now since we don't want to block ourselves - if using phaser this isn't required
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
assertL1StateOnLocalWrite(nonOwnerCache, ownerCache, key, updateValue);
// The nonOwnerCache should retrieve new value as it isn't in L1
assertEquals(updateValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
}
finally {
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
}
}
public void testNoEntryInL1PutWithConcurrentInvalidation() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in the owner, so the L1 is empty
ownerCache.put(key, firstValue);
assertL1PutWithConcurrentUpdate(nonOwnerCache, ownerCache, false, key, firstValue, "intermediate-put", secondValue);
}
public void testEntryInL1PutWithConcurrentInvalidation() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in a non owner, so the L1 has the key
ownerCache.put(key, firstValue);
nonOwnerCache.get(key);
assertIsInL1(nonOwnerCache, key);
assertL1PutWithConcurrentUpdate(nonOwnerCache, ownerCache, false, key, firstValue, "intermediate-put", secondValue);
}
public void testNoEntryInL1PutWithConcurrentPut() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in the owner, so the L1 is empty
ownerCache.put(key, firstValue);
assertL1PutWithConcurrentUpdate(nonOwnerCache, nonOwnerCache, false, key, firstValue, "intermediate-put", secondValue);
}
public void testEntryInL1PutWithConcurrentPut() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in a non owner, so the L1 has the key
ownerCache.put(key, firstValue);
nonOwnerCache.get(key);
assertIsInL1(nonOwnerCache, key);
assertL1PutWithConcurrentUpdate(nonOwnerCache, nonOwnerCache, false, key, firstValue, "intermediate-put", secondValue);
}
public void testNoEntryInL1ReplaceWithConcurrentInvalidation() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in the owner, so the L1 is empty
ownerCache.put(key, firstValue);
assertL1PutWithConcurrentUpdate(nonOwnerCache, ownerCache, true, key, firstValue, "intermediate-put", secondValue);
}
public void testEntryInL1ReplaceWithConcurrentInvalidation() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in a non owner, so the L1 has the key
ownerCache.put(key, firstValue);
nonOwnerCache.get(key);
assertIsInL1(nonOwnerCache, key);
assertL1PutWithConcurrentUpdate(nonOwnerCache, ownerCache, true, key, firstValue, "intermediate-put", secondValue);
}
public void testNoEntryInL1ReplaceWithConcurrentPut() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in the owner, so the L1 is empty
ownerCache.put(key, firstValue);
assertL1PutWithConcurrentUpdate(nonOwnerCache, nonOwnerCache, true, key, firstValue, "intermediate-put", secondValue);
}
public void testEntryInL1ReplaceWithConcurrentPut() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in a non owner, so the L1 has the key
ownerCache.put(key, firstValue);
nonOwnerCache.get(key);
assertIsInL1(nonOwnerCache, key);
assertL1PutWithConcurrentUpdate(nonOwnerCache, nonOwnerCache, true, key, firstValue, "intermediate-put", secondValue);
}
public void testNoEntryInL1GetWithConcurrentReplace() throws Throwable {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
// Put the first value in a non owner, so the L1 has the key
ownerCache.put(key, firstValue);
nonOwnerCache.get(key);
assertIsInL1(nonOwnerCache, key);
assertL1PutWithConcurrentUpdate(nonOwnerCache, nonOwnerCache, true, key, firstValue, "intermediate-put", secondValue);
}
public void testNoEntryInL1PutReplacedNullValueConcurrently() throws InterruptedException, ExecutionException, TimeoutException {
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
final Cache<Object, String> ownerCache = getFirstOwner(key);
ControlledRpcManager crm = ControlledRpcManager.replaceRpcManager(nonOwnerCache);
crm.excludeCommands(ClusteredGetCommand.class);
try {
Future<String> future = fork(() -> nonOwnerCache.putIfAbsent(key, firstValue));
// Now wait for the get to return and block it for now
ControlledRpcManager.BlockedResponseMap blockedPutResponses =
crm.expectCommand(PutKeyValueCommand.class).send().expectAllResponses();
// Owner should have the new value
assertEquals(firstValue, ownerCache.remove(key));
// Now let owner key->updateValue go through
blockedPutResponses.receive();
// This should be originalValue still as we did the get
assertNull(future.get(5, TimeUnit.SECONDS));
// Remove the interceptor now since we don't want to block ourselves - if using phaser this isn't required
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
assertIsNotInL1(nonOwnerCache, key);
// The nonOwnerCache should retrieve new value as it isn't in L1
assertNull(nonOwnerCache.get(key));
assertIsNotInL1(nonOwnerCache, key);
} finally {
crm.revertRpcManager();
}
}
public void testNonOwnerRetrievesValueFromBackupOwnerWhileWrite() throws Exception {
final Cache<Object, String>[] owners = getOwners(key, 2);
final Cache<Object, String> ownerCache = owners[0];
final Cache<Object, String> backupOwnerCache = owners[1];
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
ownerCache.put(key, firstValue);
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
// Add a barrier to block the owner from receiving the get command from the non owner
CyclicBarrier ownerGetBarrier = new CyclicBarrier(2);
addBlockingInterceptor(ownerCache, ownerGetBarrier, GetCacheEntryCommand.class, L1NonTxInterceptor.class, false);
// Add a barrier to block the backup owner from committing the write to memory
CyclicBarrier backupOwnerWriteBarrier = new CyclicBarrier(2);
addBlockingInterceptor(backupOwnerCache, backupOwnerWriteBarrier, PutKeyValueCommand.class, L1NonTxInterceptor.class, true);
try {
Future<String> future = fork(() -> ownerCache.put(key, secondValue));
// Wait until the put is trying to replicate
backupOwnerWriteBarrier.await(5, TimeUnit.SECONDS);
// Wait until the L1 is cleared out from the owners L1 invalidation
eventually(() -> !isInL1(nonOwnerCache, key), 5000, TimeUnit.MILLISECONDS);
// This should come back from the backup owner, since the primary owner is blocked
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
// Now let the backup owner put complete and send response
backupOwnerWriteBarrier.await(5, TimeUnit.SECONDS);
// Wait for the put to complete
future.get(5, TimeUnit.SECONDS);
// The Last chance interceptor is async so wait to make sure it was invalidated
eventually(() -> !isInL1(nonOwnerCache, key), 5000, TimeUnit.MILLISECONDS);
// The L1 value shouldn't be present
assertIsNotInL1(nonOwnerCache, key);
// Now finally let the get from the non owner to the primary owner go, which at this point will finally
// register the requestor
ownerGetBarrier.await(5, TimeUnit.SECONDS);
ownerGetBarrier.await(5, TimeUnit.SECONDS);
// The L1 value shouldn't be present
assertIsNotInL1(nonOwnerCache, key);
} finally {
removeAllBlockingInterceptorsFromCache(ownerCache);
removeAllBlockingInterceptorsFromCache(backupOwnerCache);
}
}
/**
* See ISPN-3617
*/
public void testNonOwnerRemovesValueFromL1ProperlyOnWrite() throws InterruptedException, TimeoutException,
BrokenBarrierException, ExecutionException {
final Cache<Object, String>[] owners = getOwners(key, 2);
final Cache<Object, String> ownerCache = owners[0];
final Cache<Object, String> backupOwnerCache = owners[1];
final Cache<Object, String> nonOwnerCache = getFirstNonOwner(key);
ownerCache.put(key, firstValue);
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
// Add a barrier to block the owner from actually updating it's own local value
CyclicBarrier ownerPutBarrier = new CyclicBarrier(2);
addBlockingInterceptor(ownerCache, ownerPutBarrier, PutKeyValueCommand.class, L1NonTxInterceptor.class, true);
// Add a barrier to block the get from being retrieved on the backup owner
CyclicBarrier backupGetBarrier = new CyclicBarrier(2);
addBlockingInterceptor(backupOwnerCache, backupGetBarrier, GetCacheEntryCommand.class, L1NonTxInterceptor.class,
false);
try {
Future<String> future = fork(() -> nonOwnerCache.put(key, secondValue));
// Wait until owner has already replicated to backup owner, but hasn't updated local value
ownerPutBarrier.await(10, TimeUnit.SECONDS);
CommandAckCollector collector = TestingUtil.extractComponent(nonOwnerCache, CommandAckCollector.class);
List<Long> pendingIds = collector.getPendingCommands();
assertEquals(1, pendingIds.size());
eventually(() -> !collector.hasPendingBackupAcks(pendingIds.get(0)));
assertEquals(firstValue, ownerCache.getAdvancedCache().getDataContainer().peek(key).getValue());
assertEquals(secondValue, backupOwnerCache.getAdvancedCache().getDataContainer().peek(key).getValue());
assertEquals(firstValue, nonOwnerCache.get(key));
assertIsInL1(nonOwnerCache, key);
// Let the backup get return now
try {
backupGetBarrier.await(5, TimeUnit.SECONDS);
backupGetBarrier.await(5, TimeUnit.SECONDS);
} catch (TimeoutException e) {
// A timeout is expected if the backup never gets the request (because of the staggered get)
}
// Finally let the put complete
ownerPutBarrier.await(10, TimeUnit.SECONDS);
assertEquals(firstValue, future.get(10, TimeUnit.SECONDS));
assertIsNotInL1(nonOwnerCache, key);
assertEquals(secondValue, ownerCache.getAdvancedCache().getDataContainer().peek(key).getValue());
} finally {
removeAllBlockingInterceptorsFromCache(ownerCache);
removeAllBlockingInterceptorsFromCache(backupOwnerCache);
}
}
}
| 15,335
| 41.6
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/TriangleOrderManagerTest.java
|
package org.infinispan.distribution;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.Collections;
import java.util.List;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHash;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheTopology;
import org.testng.annotations.Test;
/**
* Unit test for {@link TriangleOrderManager}.
*
* @author Pedro Ruivo
* @since 9.0
*/
@Test(groups = "unit", testName = "distribution.TriangleOrderManagerTest")
public class TriangleOrderManagerTest extends AbstractInfinispanTest {
private static final Address LOCAL_ADDRESS = new TestAddress(0, "A");
private static LocalizedCacheTopology mockCacheTopology(int topologyId) {
List<Address> members = Collections.singletonList(LOCAL_ADDRESS);
ConsistentHash ch = new ReplicatedConsistentHash(members, new int[]{0});
CacheTopology cacheTopology = new CacheTopology(topologyId, 0, ch, null, CacheTopology.Phase.NO_REBALANCE, members, null);
return new LocalizedCacheTopology(CacheMode.DIST_SYNC, cacheTopology, key -> 0, LOCAL_ADDRESS, true);
}
public void testInvalidTopologyId() {
TriangleOrderManager triangleOrderManager = new TriangleOrderManager(4);
DistributionManager mockDistributionManager = mock(DistributionManager.class);
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(1));
TestingUtil.inject(triangleOrderManager, mockDistributionManager);
try {
triangleOrderManager.next(0, 0);
fail("Exception expected!");
} catch (OutdatedTopologyException e) {
Exceptions.assertException(OutdatedTopologyException.class, e);
}
try {
triangleOrderManager.next(1, 2);
fail("Exception expected!");
} catch (OutdatedTopologyException e) {
Exceptions.assertException(OutdatedTopologyException.class, e);
}
}
public void testSequence() {
TriangleOrderManager triangleOrderManager = new TriangleOrderManager(4);
DistributionManager mockDistributionManager = mock(DistributionManager.class);
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(0));
TestingUtil.inject(triangleOrderManager, mockDistributionManager);
assertEquals(1, triangleOrderManager.next(0, 0));
assertEquals(1, triangleOrderManager.next(1, 0));
assertEquals(2, triangleOrderManager.next(1, 0));
}
public void testSequenceWithTopologyChange() {
int topologyId = 1;
TriangleOrderManager triangleOrderManager = new TriangleOrderManager(5);
DistributionManager mockDistributionManager = mock(DistributionManager.class);
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(topologyId));
TestingUtil.inject(triangleOrderManager, mockDistributionManager);
assertEquals(1, triangleOrderManager.next(1, topologyId));
assertEquals(2, triangleOrderManager.next(1, topologyId));
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(++topologyId));
assertEquals(1, triangleOrderManager.next(1, topologyId));
assertEquals(2, triangleOrderManager.next(1, topologyId));
assertEquals(1, triangleOrderManager.next(4, topologyId));
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(++topologyId));
assertEquals(1, triangleOrderManager.next(1, topologyId));
assertEquals(1, triangleOrderManager.next(2, topologyId));
assertEquals(1, triangleOrderManager.next(3, topologyId));
assertEquals(1, triangleOrderManager.next(4, topologyId));
}
public void testDeliverOrder() {
TriangleOrderManager triangleOrderManager = new TriangleOrderManager(4);
DistributionManager mockDistributionManager = mock(DistributionManager.class);
when(mockDistributionManager.getCacheTopology()).then(i -> mockCacheTopology(0));
TestingUtil.inject(triangleOrderManager, mockDistributionManager);
assertTrue(triangleOrderManager.isNext(1, 1, 0));
assertFalse(triangleOrderManager.isNext(1, 2, 0));
assertFalse(triangleOrderManager.isNext(1, 3, 0));
triangleOrderManager.markDelivered(1, 1, 0);
assertTrue(triangleOrderManager.isNext(1, 2, 0));
assertFalse(triangleOrderManager.isNext(1, 3, 0));
triangleOrderManager.markDelivered(1, 2, 0);
assertTrue(triangleOrderManager.isNext(1, 3, 0));
triangleOrderManager.markDelivered(1, 3, 0);
triangleOrderManager.markDelivered(2, 1, 0);
triangleOrderManager.markDelivered(3, 1, 0);
triangleOrderManager.markDelivered(3, 2, 0);
}
public void testUnblockOldTopology() {
TriangleOrderManager triangleOrderManager = new TriangleOrderManager(4);
DistributionManager mockDistributionManager = mock(DistributionManager.class);
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(1));
TestingUtil.inject(triangleOrderManager, mockDistributionManager);
//same topology, but incorrect sequence number
assertFalse(triangleOrderManager.isNext(0, 2, 1));
//lower topology. should unlock everything
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(2));
assertTrue(triangleOrderManager.isNext(0, 2, 1));
//higher topology than current one, everything is blocked
assertFalse(triangleOrderManager.isNext(0, 1, 3));
//unlocks everything (correct sequence number)
when(mockDistributionManager.getCacheTopology()).thenReturn(mockCacheTopology(3));
assertTrue(triangleOrderManager.isNext(0, 1, 3));
}
}
| 6,236
| 42.3125
| 128
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/DistSyncFuncTest.java
|
package org.infinispan.distribution;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commons.util.ObjectDuplicator;
import org.infinispan.context.Flag;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.function.SerializableBiFunction;
import org.infinispan.util.function.SerializableFunction;
import org.testng.annotations.Test;
@Test(groups = {"functional", "smoke"}, testName = "distribution.DistSyncFuncTest")
public class DistSyncFuncTest extends BaseDistFunctionalTest<Object, String> {
public DistSyncFuncTest() {
testRetVals = true;
}
public void testLocationConsensus() {
String[] keys = new String[100];
Random r = new Random();
for (int i = 0; i < 100; i++) keys[i] = Integer.toHexString(r.nextInt());
for (String key : keys) {
List<Address> owners = new ArrayList<>();
for (Cache<Object, String> c : caches) {
boolean isOwner = isOwner(c, key);
if (isOwner) owners.add(addressOf(c));
boolean secondCheck = getCacheTopology(c).getWriteOwners(key).contains(addressOf(c));
assertTrue("Second check failed for key " + key + " on cache " + addressOf(c) + " isO = " + isOwner + " sC = " + secondCheck, isOwner == secondCheck);
}
// check consensus
assertOwnershipConsensus(key);
assertEquals("Expected " + numOwners + " owners for key " + key + " but was " + owners, numOwners, owners.size());
}
}
protected void assertOwnershipConsensus(String key) {
List l1 = getCacheTopology(c1).getDistribution(key).writeOwners();
List l2 = getCacheTopology(c2).getDistribution(key).writeOwners();
List l3 = getCacheTopology(c3).getDistribution(key).writeOwners();
List l4 = getCacheTopology(c4).getDistribution(key).writeOwners();
assertEquals("L1 "+l1+" and L2 "+l2+" don't agree.", l1, l2);
assertEquals("L2 "+l2+" and L3 "+l3+" don't agree.", l2, l3);
assertEquals("L3 "+l3+" and L4 "+l4+" don't agree.", l3, l4);
}
public void testBasicDistribution() throws Throwable {
for (Cache<Object, String> c : caches)
assertTrue(c.isEmpty());
final Object k1 = getKeyForCache(caches.get(0));
getOwners(k1)[0].put(k1, "value");
// No non-owners have requested the key, so no invalidations
asyncWait(k1, PutKeyValueCommand.class);
// should be available everywhere!
assertOnAllCachesAndOwnership(k1, "value");
// and should now be in L1
if (l1CacheEnabled) {
for (Cache<Object, String> c : caches) {
if (isOwner(c, k1)) {
assertIsInContainerImmortal(c, k1);
} else {
assertIsInL1(c, k1);
}
}
}
}
public void testPutFromNonOwner() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner("k1");
Object retval = nonOwner.put("k1", "value2");
asyncWait("k1", PutKeyValueCommand.class);
if (testRetVals) assertEquals("value", retval);
assertOnAllCachesAndOwnership("k1", "value2");
}
public void testPutIfAbsentFromNonOwner() {
initAndTest();
log.trace("Here it begins");
Object retval = getFirstNonOwner("k1").putIfAbsent("k1", "value2");
if (testRetVals) assertEquals("value", retval);
asyncWaitOnPrimary("k1", PutKeyValueCommand.class);
assertOnAllCachesAndOwnership("k1", "value");
c1.clear();
asyncWait(null, ClearCommand.class);
retval = getFirstNonOwner("k1").putIfAbsent("k1", "value2");
asyncWait("k1", PutKeyValueCommand.class);
assertOnAllCachesAndOwnership("k1", "value2");
if (testRetVals) assertNull(retval);
}
public void testRemoveFromNonOwner() {
initAndTest();
Object retval = getFirstNonOwner("k1").remove("k1");
asyncWait("k1", RemoveCommand.class);
if (testRetVals) assertEquals("value", retval);
assertRemovedOnAllCaches("k1");
}
public void testConditionalRemoveFromNonOwner() {
initAndTest();
log.trace("Here we start");
boolean retval = getFirstNonOwner("k1").remove("k1", "value2");
if (testRetVals) assertFalse("Should not have removed entry", retval);
asyncWaitOnPrimary("k1", RemoveCommand.class);
assertOnAllCachesAndOwnership("k1", "value");
assertEquals("value", caches.get(1).get("k1"));
retval = getFirstNonOwner("k1").remove("k1", "value");
asyncWait("k1", RemoveCommand.class);
if (testRetVals) assertTrue("Should have removed entry", retval);
assertNull("expected null but received " + caches.get(1).get("k1"), caches.get(1).get("k1"));
assertRemovedOnAllCaches("k1");
}
public void testReplaceFromNonOwner() {
initAndTest();
Object retval = getFirstNonOwner("k1").replace("k1", "value2");
if (testRetVals) assertEquals("value", retval);
// Replace going to backup owners becomes PKVC
asyncWait("k1", cmd -> Stream.of(ReplaceCommand.class, PutKeyValueCommand.class)
.anyMatch(clazz -> clazz.isInstance(cmd)));
assertOnAllCachesAndOwnership("k1", "value2");
c1.clear();
asyncWait(null, ClearCommand.class);
retval = getFirstNonOwner("k1").replace("k1", "value2");
if (testRetVals) assertNull(retval);
assertRemovedOnAllCaches("k1");
}
public void testConditionalReplaceFromNonOwner() {
initAndTest();
Cache<Object, String> nonOwner = getFirstNonOwner("k1");
boolean retval = nonOwner.replace("k1", "valueX", "value2");
if (testRetVals) assertFalse("Should not have replaced", retval);
asyncWaitOnPrimary("k1", ReplaceCommand.class);
assertOnAllCachesAndOwnership("k1", "value");
assertFalse(extractComponent(nonOwner, DistributionManager.class).getCacheTopology().isWriteOwner("k1"));
retval = nonOwner.replace("k1", "value", "value2");
asyncWait("k1", cmd -> Stream.of(ReplaceCommand.class, PutKeyValueCommand.class)
.anyMatch(clazz -> clazz.isInstance(cmd)));
if (testRetVals) assertTrue("Should have replaced", retval);
assertOnAllCachesAndOwnership("k1", "value2");
}
public void testClear() throws InterruptedException {
for (Cache<Object, String> c : caches)
assertTrue(c.isEmpty());
for (int i = 0; i < 10; i++) {
getOwners("k" + i)[0].put("k" + i, "value" + i);
// There will be no caches to invalidate as this is the first command of the test
asyncWait("k" + i, PutKeyValueCommand.class);
assertOnAllCachesAndOwnership("k" + i, "value" + i);
}
// this will fill up L1 as well
for (int i = 0; i < 10; i++) assertOnAllCachesAndOwnership("k" + i, "value" + i);
for (Cache<Object, String> c : caches)
assertFalse(c.isEmpty());
c1.clear();
asyncWait(null, ClearCommand.class);
for (Cache<Object, String> c : caches)
assertTrue(c.isEmpty());
}
public void testKeyValueEntryCollections() {
c1.put("1", "one");
asyncWait("1", PutKeyValueCommand.class);
if (c2 != null) {
c2.put("2", "two");
asyncWait("2", PutKeyValueCommand.class);
}
if (c3 != null) {
c3.put("3", "three");
asyncWait("3", PutKeyValueCommand.class);
}
if (c4 != null) {
c4.put("4", "four");
asyncWait("4", PutKeyValueCommand.class);
}
for (Cache c : caches) {
Set expKeys = TestingUtil.getInternalKeys(c);
Collection expValues = TestingUtil.getInternalValues(c);
Set expKeyEntries = ObjectDuplicator.duplicateSet(expKeys);
Collection expValueEntries = ObjectDuplicator.duplicateCollection(expValues);
// CACHE_MODE_LOCAL prohibits RPCs and SKIP_OWNERSHIP_CHECKS forces that all entries from DC are read
AdvancedCache cacheWithIgnoredOwnership = c.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL, Flag.SKIP_OWNERSHIP_CHECK);
Set keys = cacheWithIgnoredOwnership.keySet();
for (Object key : keys)
assertTrue(expKeys.remove(key));
assertTrue("Did not see keys " + expKeys + " in iterator!", expKeys.isEmpty());
Collection values = cacheWithIgnoredOwnership.values();
for (Object value : values)
assertTrue(expValues.remove(value));
assertTrue("Did not see keys " + expValues + " in iterator!", expValues.isEmpty());
Set<Map.Entry> entries = cacheWithIgnoredOwnership.entrySet();
for (Map.Entry entry : entries) {
assertTrue(expKeyEntries.remove(entry.getKey()));
assertTrue(expValueEntries.remove(entry.getValue()));
}
assertTrue("Did not see keys " + expKeyEntries + " in iterator!", expKeyEntries.isEmpty());
assertTrue("Did not see keys " + expValueEntries + " in iterator!", expValueEntries.isEmpty());
}
}
public void testLockedStreamSetValue() {
int size = 5;
for (int i = 0; i < size; i++) {
getOwners("k" + i)[0].put("k" + i, "value" + i);
// There will be no caches to invalidate as this is the first command of the test
asyncWait("k" + i, PutKeyValueCommand.class);
assertOnAllCachesAndOwnership("k" + i, "value" + i);
}
c1.getAdvancedCache().lockedStream().forEach((c, e) -> e.setValue(e.getValue() + "-changed"));
for (int i = 0; i < size; i++) {
String key = "k" + i;
asyncWait(key, c -> commandIsPutForKey(key, c));
Cache<Object, String>[] caches = getOwners(key);
for (Cache<Object, String> cache : caches) {
assertEquals("value" + i + "-changed",
cache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).get(key));
}
}
}
public void testLockedStreamPutValue() {
int size = 5;
for (int i = 0; i < size; i++) {
getOwners("k" + i)[0].put("k" + i, "value" + i);
// There will be no caches to invalidate as this is the first command of the test
asyncWait("k" + i, PutKeyValueCommand.class);
assertOnAllCachesAndOwnership("k" + i, "value" + i);
}
c1.getAdvancedCache().lockedStream().forEach((c, e) -> c.put(e.getKey(), e.getValue() + "-changed"));
for (int i = 0; i < size; i++) {
String key = "k" + i;
asyncWait(key, c -> commandIsPutForKey(key, c));
Cache<Object, String>[] caches = getOwners(key);
for (Cache<Object, String> cache : caches) {
assertEquals("value" + i + "-changed",
cache.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).get(key));
}
}
}
private boolean commandIsPutForKey(String key, VisitableCommand c) {
return c instanceof PutKeyValueCommand && key.equals(((PutKeyValueCommand) c).getKey());
}
public void testComputeFromNonOwner() throws InterruptedException {
// compute function applied
initAndTest();
Object retval = getFirstNonOwner("k1").compute("k1", (k, v) -> "computed_" + k + "_" + v);
asyncWait("k1", ComputeCommand.class);
if (testRetVals) assertEquals("computed_k1_value", retval);
assertOnAllCachesAndOwnership("k1", "computed_k1_value");
// remove if after compute value is null
retval = getFirstNonOwner("k1").compute("k1", (v1, v2) -> null);
asyncWait("k1", ComputeCommand.class);
if (testRetVals) assertNull(retval);
assertRemovedOnAllCaches("k1");
// put computed value if absent
retval = getFirstNonOwner("notThere").compute("notThere", (k, v) -> "add_" + k);
asyncWait("notThere", ComputeCommand.class);
if (testRetVals) assertEquals("add_notThere", retval);
assertOnAllCachesAndOwnership("notThere", "add_notThere");
RuntimeException computeRaisedException = new RuntimeException("hi there");
SerializableBiFunction<Object, Object, String> mappingToException = (k, v) -> {
throw computeRaisedException;
};
expectException(RemoteException.class, () -> getFirstNonOwner("k1").compute("k1", mappingToException));
}
public void testComputeIfPresentFromNonOwner() throws InterruptedException {
// compute function applied
initAndTest();
Object retval = getFirstNonOwner("k1").computeIfPresent("k1", (k, v) -> "computed_" + k + "_" + v);
if (testRetVals) assertEquals("computed_k1_value", retval);
asyncWait("k1", ComputeCommand.class);
assertOnAllCachesAndOwnership("k1", "computed_k1_value");
RuntimeException computeRaisedException = new RuntimeException("hi there");
SerializableBiFunction<Object, Object, String> mappingToException = (k, v) -> {
throw computeRaisedException;
};
expectException(RemoteException.class, () -> getFirstNonOwner("k1").computeIfPresent("k1", mappingToException));
// remove if after compute value is null
retval = getFirstNonOwner("k1").computeIfPresent("k1", (v1, v2) -> null);
asyncWait("k1", ComputeCommand.class);
if (testRetVals) assertNull(retval);
assertRemovedOnAllCaches("k1");
// do nothing if absent
retval = getFirstNonOwner("notThere").computeIfPresent("notThere", (k, v) -> "add_" + k);
asyncWaitOnPrimary("notThere", ComputeCommand.class);
if (testRetVals) assertNull(retval);
assertRemovedOnAllCaches("notThere");
}
public void testComputeIfAbsentFromNonOwner() throws InterruptedException {
// do nothing if value exists
initAndTest();
Object retval = getFirstNonOwner("k1").computeIfAbsent("k1", (k) -> "computed_" + k);
if (testRetVals) assertEquals("value", retval);
// Since the command fails on primary it won't be replicated to the other nodes
asyncWaitOnPrimary("k1", ComputeIfAbsentCommand.class);
assertOnAllCachesAndOwnership("k1", "value");
// Compute key and add result value if absent
retval = getFirstNonOwner("notExists").computeIfAbsent("notExists", (k) -> "computed_" + k);
if (testRetVals) assertEquals("computed_notExists", retval);
asyncWait("notExists", ComputeIfAbsentCommand.class);
assertOnAllCachesAndOwnership("notExists", "computed_notExists");
// do nothing if function result is null
retval = getFirstNonOwner("doNothing").computeIfAbsent("doNothing", k -> null);
asyncWaitOnPrimary("doNothing", ComputeIfAbsentCommand.class);
if (testRetVals) assertNull(retval);
assertRemovedOnAllCaches("doNothing");
RuntimeException computeRaisedException = new RuntimeException("hi there");
SerializableFunction<Object, String> mappingToException = k -> {
throw computeRaisedException;
};
expectException(RemoteException.class, () -> getFirstNonOwner("somethingWrong").computeIfAbsent("somethingWrong", mappingToException));
}
public void testMergeFromNonOwner() {
initAndTest();
// exception raised by the user
RuntimeException mergeException = new RuntimeException("hi there");
expectException(RemoteException.class, () -> getFirstNonOwner("k1").merge("k1", "ex", (k, v) -> {
throw mergeException;
}));
asyncWaitOnPrimary("k1", ReadWriteKeyCommand.class);
assertOnAllCachesAndOwnership("k1", "value");
// merge function applied
Object retval = getFirstNonOwner("k1").merge("k1", "value2", (v1, v2) -> "merged_" + v1 + "_" + v2);
asyncWait("k1", ReadWriteKeyCommand.class);
if (testRetVals) assertEquals("merged_value_value2", retval);
assertOnAllCachesAndOwnership("k1", "merged_value_value2");
// remove when null
retval = getFirstNonOwner("k1").merge("k1", "valueRem", (v1, v2) -> null);
asyncWait("k1", ReadWriteKeyCommand.class);
if (testRetVals) assertNull(retval);
assertRemovedOnAllCaches("k1");
// put if absent
retval = getFirstNonOwner("notThere").merge("notThere", "value2", (v1, v2) -> "merged_" + v1 + "_" + v2);
asyncWait("notThere", ReadWriteKeyCommand.class);
if (testRetVals) assertEquals("value2", retval);
assertOnAllCachesAndOwnership("notThere", "value2");
}
}
| 17,377
| 39.413953
| 162
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashLeaveTestBase.java
|
package org.infinispan.distribution.rehash;
import org.infinispan.test.TestingUtil;
public abstract class RehashLeaveTestBase extends RehashTestBase {
void waitForRehashCompletion() {
TestingUtil.blockUntilViewsReceived(60000, false, caches);
TestingUtil.waitForNoRebalance(caches);
}
}
| 307
| 27
| 66
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/TxNonL1StateTransferOverwriteTest.java
|
package org.infinispan.distribution.rehash;
import org.infinispan.test.op.TestWriteOperation;
import org.testng.annotations.Test;
/**
* Test that ensures that state transfer values aren't overridden with a tx cache without L1 enabled.
*
* @author William Burns
* @since 6.0
*/
@Test(groups = "functional", testName = "distribution.rehash.TxNonL1StateTransferOverwriteTest")
public class TxNonL1StateTransferOverwriteTest extends BaseTxStateTransferOverwriteTest {
public TxNonL1StateTransferOverwriteTest() {
l1CacheEnabled = false;
}
public void testNonCoordinatorOwnerLeavingDuringReplace() throws Exception {
doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(TestWriteOperation.REPLACE);
}
public void testNonCoordinatorOwnerLeavingDuringReplaceExact() throws Exception {
doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(TestWriteOperation.REPLACE_EXACT);
}
public void testNonCoordinatorOwnerLeavingDuringRemove() throws Exception {
doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(TestWriteOperation.REMOVE);
}
public void testNonCoordinatorOwnerLeavingDuringRemoveExact() throws Exception {
doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(TestWriteOperation.REMOVE_EXACT);
}
public void testNonCoordinatorOwnerLeavingDuringPutOverwrite() throws Exception {
doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(TestWriteOperation.PUT_OVERWRITE);
}
}
| 1,510
| 38.763158
| 104
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/NonTxJoinerBecomingBackupOwnerTest.java
|
package org.infinispan.distribution.rehash;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInterceptor;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnOutboundRpc;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchCommand;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.distribution.MagicKey;
import org.infinispan.statetransfer.StateTransferInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.CommandMatcher;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.op.TestOperation;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
/**
* Test that a joiner that became a backup owner for a key does not check the previous value when doing a conditional
* write. Also check that if executing a write command during state transfer, it doesn't perform a remote get to obtain
* the previous value from one of the readCH owners.
*
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "distribution.rehash.NonTxJoinerBecomingBackupOwnerTest")
@CleanupAfterMethod
public class NonTxJoinerBecomingBackupOwnerTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() throws Throwable {
createCluster(TestDataSCI.INSTANCE, getConfigurationBuilder(), 2);
waitForClusterToForm();
}
private ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC);
c.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
return c;
}
public void testBackupOwnerJoiningDuringPut() throws Exception {
doTest(TestWriteOperation.PUT_CREATE);
}
public void testBackupOwnerJoiningDuringPutIfAbsent() throws Exception {
doTest(TestWriteOperation.PUT_IF_ABSENT);
}
public void testBackupOwnerJoiningDuringReplace() throws Exception {
doTest(TestWriteOperation.REPLACE);
}
public void testBackupOwnerJoiningDuringReplaceWithPreviousValue() throws Exception {
doTest(TestWriteOperation.REPLACE_EXACT);
}
public void testBackupOwnerJoiningDuringRemove() throws Exception {
doTest(TestWriteOperation.REMOVE);
}
public void testBackupOwnerJoiningDuringRemoveWithPreviousValue() throws Exception {
doTest(TestWriteOperation.REMOVE_EXACT);
}
protected void doTest(TestOperation op) throws Exception {
final StateSequencer sequencer = new StateSequencer();
sequencer.logicalThread("st", "st:cache0_before_send_state");
sequencer.logicalThread("write", "write:before_start", "write:start", "write:cache1_before_return", "write:cache2_before_dist", "write:end", "write:after_end");
sequencer.logicalThread("remote_get_cache0", "remote_get_cache0");
sequencer.logicalThread("remote_get_cache1", "remote_get_cache1");
sequencer.order("write:end", "remote_get_cache0").order("write:end", "remote_get_cache1");
sequencer.action("st:cache0_before_send_state", () -> {
sequencer.advance("write:before_start");
// The whole write logical thread happens here
sequencer.advance("write:after_end");
return null;
});
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
final AdvancedCache<Object, Object> cache1 = advancedCache(1);
// We only block the StateResponseCommand on cache0, because that's the node cache2 will ask for the magic key
advanceOnOutboundRpc(sequencer, cache0, matchCommand(StateResponseCommand.class).build()).before("st:cache0_before_send_state");
// Prohibit any remote get from cache2 to either cache0 or cache1
advanceOnInterceptor(sequencer, cache0, StateTransferInterceptor.class, matchCommand(GetKeyValueCommand.class).build()).before("remote_get_cache0");
advanceOnInterceptor(sequencer, cache1, StateTransferInterceptor.class, matchCommand(GetKeyValueCommand.class).build()).before("remote_get_cache1");
// Add a new member, but don't start the cache yet
ConfigurationBuilder c = getConfigurationBuilder();
c.clustering().stateTransfer().awaitInitialTransfer(false);
addClusterEnabledCacheManager(TestDataSCI.INSTANCE, c);
// Start the cache and wait until it's a member in the write CH
log.tracef("Starting the cache on the joiner");
final AdvancedCache<Object,Object> cache2 = advancedCache(2);
// Wait for the write CH to contain the joiner everywhere
eventually(() -> cache0.getRpcManager().getMembers().size() == 3 &&
cache1.getRpcManager().getMembers().size() == 3 &&
cache2.getRpcManager().getMembers().size() == 3);
CommandMatcher writeCommandMatcher = matchCommand(op.getCommandClass()).build();
// Allow the value to be written on cache1 before "write:cache1_before_return"
advanceOnInterceptor(sequencer, cache1, StateTransferInterceptor.class, writeCommandMatcher).before("write:cache1_before_return");
// The remote get (if any) will happen after "write:cache2_before_dist"
advanceOnInterceptor(sequencer, cache2, StateTransferInterceptor.class, writeCommandMatcher).before("write:cache2_before_dist");
// Wait for cache0 to send the StateResponseCommand to cache2, but keep it blocked
sequencer.advance("write:start");
final MagicKey key = getKeyForCache2();
// Prepare for replace: put a previous value in cache0 and cache1
if (op.getPreviousValue() != null) {
cache0.withFlags(Flag.CACHE_MODE_LOCAL).put(key, op.getPreviousValue());
cache1.withFlags(Flag.CACHE_MODE_LOCAL).put(key, op.getPreviousValue());
}
log.tracef("Initial value set, %s = %s", key, op.getPreviousValue());
// Put from cache0 with cache0 as primary owner, cache2 will become a backup owner for the retry
// The put command will be blocked on cache1 and cache2.
Future<Object> future = fork(() -> op.perform(cache0, key));
// Check that the put command didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertEquals(op.getReturnValue(), result);
log.tracef("%s operation is done", op);
// Allow the state transfer to finish, and any remote gets
sequencer.advance("write:end");
// Wait for the topology to change everywhere
TestingUtil.waitForNoRebalance(cache0, cache1, cache2);
// Stop blocking get commands and check the value on all the nodes
sequencer.stop();
assertEquals(op.getValue(), cache0.get(key));
assertEquals(op.getValue(), cache1.get(key));
assertEquals(op.getValue(), cache2.get(key));
}
private MagicKey getKeyForCache2() {
return new MagicKey(cache(0), cache(1), cache(2));
}
}
| 7,473
| 45.42236
| 166
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashAfterJoinWithPreloadTest.java
|
package org.infinispan.distribution.rehash;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Set;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Test with a distributed cache (numOwners=1), a shared cache store and 'preload' enabled
* (ISPN-1964).
*
* @author Carsten Lohmann
*/
@Test(testName = "distribution.rehash.RehashAfterJoinWithPreloadTest", groups = "functional")
public class RehashAfterJoinWithPreloadTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(RehashAfterJoinWithPreloadTest.class);
public static final int NUM_KEYS = 20;
private final String testCacheName = "testCache" + getClass().getSimpleName();
@Override
protected void createCacheManagers() {
// cacheManagers started one after another in test()
}
private void addNewCacheManagerAndWaitForRehash() {
EmbeddedCacheManager cacheManager = addClusterEnabledCacheManager(getDefaultClusteredCacheConfig(
CacheMode.DIST_SYNC, false));
cacheManager.defineConfiguration(testCacheName, buildCfg(true));
log.debugf("\n\nstarted CacheManager #%d", getCacheManagers().size() - 1);
waitForClusterToForm(testCacheName);
}
private Configuration buildCfg(boolean clustered) {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.persistence()
.addStore(DummyInMemoryStoreConfigurationBuilder.class)
.storeName(testCacheName)
.preload(true)
.shared(true)
.purgeOnStartup(false);
cb.persistence().passivation(false);
if (clustered) {
cb.clustering().l1().disable();
cb.clustering().cacheMode(CacheMode.DIST_SYNC);
cb.clustering().hash().numOwners(1); // one owner!
cb.clustering().stateTransfer().fetchInMemoryState(true);
cb.clustering().hash().groups().enabled();
}
return cb.build(true);
}
public void test() {
// start a cluster that uses this cache store
// add 1st member
addNewCacheManagerAndWaitForRehash();
// insert the data in the cache and check the contents
putTestDataInCacheStore();
printCacheContents();
// stop the 1st member
killMember(0);
// re-add the 1st member
addNewCacheManagerAndWaitForRehash();
printCacheContents();
assertEvenDistribution();
// add 2nd member
addNewCacheManagerAndWaitForRehash();
printCacheContents();
assertEvenDistribution();
// add 3rd member
addNewCacheManagerAndWaitForRehash();
printCacheContents();
assertEvenDistribution();
}
private void assertEvenDistribution() {
for (int i = 0; i < getCacheManagers().size(); i++) {
Cache<String, String> testCache = manager(i).getCache(testCacheName);
DistributionManager dm = testCache.getAdvancedCache().getDistributionManager();
DataContainer dataContainer = testCache.getAdvancedCache().getDataContainer();
// Note there is stale data in the cache store that this owner no longer owns
for (int j = 0; j < NUM_KEYS; j++) {
String key = "key" + j;
// each key must only occur once (numOwners is one)
if (dm.getCacheTopology().isReadOwner(key)) {
assertTrue("Key '" + key + "' is owned by node " + address(i) + " but it doesn't appear there",
dataContainer.containsKey(key));
} else {
assertTrue("Key '" + key + "' is not owned by node " + address(i) + " but it still appears there",
!dataContainer.containsKey(key));
}
}
}
}
private void putTestDataInCacheStore() {
Cache<String, String> cache = manager(0).getCache(testCacheName);
for (int i = 0; i < NUM_KEYS; i++) {
cache.put("key" + i, Integer.toString(i));
}
log.debugf("added %d entries to test cache", NUM_KEYS);
}
private void printCacheContents() {
log.debugf("%d cache manager(s)", getCacheManagers().size());
for (int i = 0; i < getCacheManagers().size(); i++) {
Cache<String, String> testCache = manager(i).getCache(testCacheName);
log.debugf(" Contents of Cache with CacheManager #%d (%s, all members: %s)", i, address(i),
testCache.getAdvancedCache().getRpcManager().getMembers());
Set<String> keySet = testCache.keySet();
log.debugf(" keySet = %s", keySet);
for (String key : keySet) {
log.debugf(" key: %s value: %s", key, testCache.get(key));
}
}
}
}
| 5,220
| 35.767606
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashCompletedOnJoinTest.java
|
package org.infinispan.distribution.rehash;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.distribution.BaseDistFunctionalTest;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.MagicKey;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.TestDataSCI;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.RehashCompletedOnJoinTest")
public class RehashCompletedOnJoinTest extends BaseDistFunctionalTest<Object, String> {
public RehashCompletedOnJoinTest() {
INIT_CLUSTER_SIZE = 2;
performRehashing = true;
}
public void testJoinComplete() {
List<MagicKey> keys = new ArrayList<MagicKey>(Arrays.asList(
new MagicKey("k1", c1), new MagicKey("k2", c2),
new MagicKey("k3", c1), new MagicKey("k4", c2)
));
int i = 0;
for (Cache<Object, String> c : caches) c.put(keys.get(i++), "v" + i);
log.infof("Initialized with keys %s", keys);
EmbeddedCacheManager joinerManager = addClusterEnabledCacheManager(TestDataSCI.INSTANCE);
joinerManager.defineConfiguration(cacheName, configuration.build());
Cache joiner = joinerManager.getCache(cacheName);
DistributionManager dmi = joiner.getAdvancedCache().getDistributionManager();
assert dmi.isJoinComplete();
}
}
| 1,456
| 34.536585
| 95
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/DataLossOnJoinOneOwnerTest.java
|
package org.infinispan.distribution.rehash;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Tests data loss during state transfer when a single data owner is configured.
* @author Sanne Grinovero <sanne@infinispan.org> (C) 2011 Red Hat Inc.
* @author Alex Heneveld
* @author Manik Surtani
*/
@Test(groups = "functional", testName = "distribution.rehash.DataLossOnJoinOneOwnerTest")
public class DataLossOnJoinOneOwnerTest extends AbstractInfinispanTest {
private static final String VALUE = DataLossOnJoinOneOwnerTest.class.getName() + "value";
private static final String KEY = DataLossOnJoinOneOwnerTest.class.getName() + "key";
EmbeddedCacheManager cm1;
EmbeddedCacheManager cm2;
/**
* It seems that sometimes when a new node joins, existing data is lost.
* Can not reproduce with numOwners=2.
*/
public void testDataLossOnJoin() {
try {
cm1 = newCM();
Cache<String, String> c1 = cm1.getCache();
c1.put(KEY, VALUE);
hasKey(c1);
cm2 = newCM();
Cache<String, String> c2 = cm2.getCache();
TestingUtil.blockUntilViewsReceived(45000, cm1, cm2);
hasKey(c1);
hasKey(c2);
}
finally {
TestingUtil.killCacheManagers(cm1, cm2);
}
}
private void hasKey(Cache<String, String> cache) {
Object object = cache.get(KEY);
assert VALUE.equals(object);
}
public EmbeddedCacheManager newCM() {
ConfigurationBuilder c = new ConfigurationBuilder();
c.clustering().cacheMode(CacheMode.DIST_SYNC)
.hash().numOwners(1)
.clustering().l1().disable();
return TestCacheManagerFactory.createClusteredCacheManager(c);
}
}
| 2,066
| 32.885246
| 92
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/ConcurrentOverlappingLeaveTest.java
|
package org.infinispan.distribution.rehash;
import java.util.Arrays;
import org.infinispan.manager.CacheContainer;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.ConcurrentOverlappingLeaveTest")
public class ConcurrentOverlappingLeaveTest extends RehashLeaveTestBase {
Address l1, l2;
{
// since we have two leavers, for some keys we're going to lose 2 owners
// we set numOwners to 3 so that all keys will have at least 1 owner remaining
numOwners = 3;
}
void performRehashEvent(boolean offline) {
l1 = addressOf(c3);
l2 = addressOf(c4);
CacheContainer cm3 = c3.getCacheManager();
CacheContainer cm4 = c4.getCacheManager();
cacheManagers.removeAll(Arrays.asList(cm3, cm4));
caches.removeAll(Arrays.asList(c3, c4));
TestingUtil.killCacheManagers(cm3, cm4);
}
}
| 984
| 28.848485
| 93
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/TxL1StateTransferOverwriteTest.java
|
package org.infinispan.distribution.rehash;
import org.testng.annotations.Test;
/**
* Test that ensures that state transfer values aren't overridden with a tx cache with L1 enabled.
*
* @author William Burns
* @since 6.0
*/
@Test(groups = "functional", testName = "distribution.rehash.TxL1StateTransferOverwriteTest")
public class TxL1StateTransferOverwriteTest extends BaseTxStateTransferOverwriteTest {
public TxL1StateTransferOverwriteTest() {
l1CacheEnabled = true;
}
}
| 493
| 28.058824
| 98
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/OptimisticPrimaryOwnerCrashDuringPrepareTest.java
|
package org.infinispan.distribution.rehash;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.concurrent.StateSequencerUtil.advanceOnInboundRpc;
import static org.infinispan.test.concurrent.StateSequencerUtil.matchCommand;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.Future;
import jakarta.transaction.RollbackException;
import jakarta.transaction.Status;
import javax.transaction.xa.XAException;
import org.infinispan.commands.tx.VersionedPrepareCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedTransaction;
import org.infinispan.util.ControlledConsistentHashFactory;
import org.infinispan.util.concurrent.TimeoutException;
import org.testng.annotations.Test;
/**
* Test that if the primary owner crashes while two transactions are in the prepare phase, only one of them will be able
* to commit the transaction.
*
* @author Dan Berindei
* @since 8.0
*/
@Test(groups = "functional", testName = "distribution.rehash.OptimisticPrimaryOwnerCrashDuringPrepareTest")
@CleanupAfterMethod
public class OptimisticPrimaryOwnerCrashDuringPrepareTest extends MultipleCacheManagersTest {
public void testPrimaryOwnerCrash() throws Exception {
// cache 0 is the originator and backup, cache 1 is the primary owner
StateSequencer ss = new StateSequencer();
ss.logicalThread("main", "block_prepare", "crash_primary", "resume_prepare");
tm(0).begin();
cache(0).put("k", "v1");
EmbeddedTransaction tx1 = (EmbeddedTransaction) tm(0).suspend();
tx1.runPrepare();
advanceOnInboundRpc(ss, cache(1), matchCommand(VersionedPrepareCommand.class).build())
.before("block_prepare", "resume_prepare");
Future<EmbeddedTransaction> tx2Future = fork(() -> {
tm(0).begin();
cache(0).put("k", "v2");
EmbeddedTransaction tx2 = (EmbeddedTransaction) tm(0).suspend();
tx2.runPrepare();
return tx2;
});
ss.enter("crash_primary");
killMember(1);
ss.exit("crash_primary");
// tx2 prepare times out trying to acquire the lock, but does not throw an exception at this time
EmbeddedTransaction tx2 = tx2Future.get(30, SECONDS);
assertEquals(Status.STATUS_MARKED_ROLLBACK, tx2.getStatus());
Exceptions.expectException(RollbackException.class, XAException.class, TimeoutException.class, () -> tx2.runCommit(false));
// tx1 should commit successfully
tx1.runCommit(false);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder config = new ConfigurationBuilder();
config.clustering().cacheMode(CacheMode.DIST_SYNC);
config.transaction().lockingMode(LockingMode.OPTIMISTIC);
config.clustering().locking().lockAcquisitionTimeout(2, SECONDS);
config.clustering().hash().numSegments(1)
.consistentHashFactory(new ControlledConsistentHashFactory.Default(1, 0));
config.transaction().transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.cacheStopTimeout(1, SECONDS);
createCluster(config, 2);
waitForClusterToForm();
}
}
| 3,619
| 40.609195
| 129
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/XAResourceAdapter.java
|
package org.infinispan.distribution.rehash;
import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.infinispan.transaction.xa.recovery.RecoveryManager;
/**
* abstract class that needs to be overridden
*
* @author Manik Surtani
* @since 4.0
*/
public class XAResourceAdapter implements XAResource {
public void commit(Xid xid, boolean b) throws XAException {
// no-op
}
public void end(Xid xid, int i) throws XAException {
// no-op
}
public void forget(Xid xid) throws XAException {
// no-op
}
public int getTransactionTimeout() throws XAException {
return 0;
}
public boolean isSameRM(XAResource xaResource) throws XAException {
return false;
}
public int prepare(Xid xid) throws XAException {
return XA_OK;
}
public Xid[] recover(int i) throws XAException {
return RecoveryManager.RecoveryIterator.NOTHING;
}
public void rollback(Xid xid) throws XAException {
// no-op
}
public boolean setTransactionTimeout(int i) throws XAException {
return false;
}
public void start(Xid xid, int i) throws XAException {
// no-op
}
}
| 1,228
| 20.561404
| 70
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/RehashAfterPartitionMergeTest.java
|
package org.infinispan.distribution.rehash;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.test.fwk.TransportFlags;
import org.jgroups.protocols.DISCARD;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.RehashAfterPartitionMergeTest")
@InCacheMode({ CacheMode.DIST_SYNC })
public class RehashAfterPartitionMergeTest extends MultipleCacheManagersTest {
Cache<Object, Object> c1, c2;
List<Cache<Object, Object>> caches;
DISCARD d1, d2;
@Override
protected void createCacheManagers() throws Throwable {
createClusteredCaches(2, "test",
getDefaultClusteredCacheConfig(cacheMode),
new TransportFlags().withFD(true).withMerge(true));
caches = caches("test");
c1 = caches.get(0);
c2 = caches.get(1);
d1 = TestingUtil.getDiscardForCache(c1.getCacheManager());
d2 = TestingUtil.getDiscardForCache(c2.getCacheManager());
}
public void testCachePartition() {
c1.put("1", "value");
c2.put("2", "value");
for (Cache<Object, Object> c: caches) {
assert "value".equals(c.get("1"));
assert "value".equals(c.get("2"));
assert manager(c).getMembers().size() == 2;
}
d1.discardAll(true);
d2.discardAll(true);
// Wait until c1 and c2 have a view of 1 member each
TestingUtil.blockUntilViewsChanged(60000, 1, c1, c2);
// we should see a network partition
for (Cache<Object, Object> c: caches) assert manager(c).getMembers().size() == 1;
c1.put("3", "value");
c2.put("4", "value");
assert "value".equals(c1.get("3"));
assert null == c2.get("3");
assert "value".equals(c2.get("4"));
assert null == c1.get("4");
// lets "heal" the partition
d1.discardAll(false);
d2.discardAll(false);
// Wait until c1 and c2 have a view of 2 members each
TestingUtil.blockUntilViewsChanged(45000, 2, c1, c2);
TestingUtil.waitForNoRebalance(c1, c2);
c1.put("5", "value");
c2.put("6", "value");
for (Cache<Object, Object> c: caches) {
assert "value".equals(c.get("5"));
assert "value".equals(c.get("6"));
assert manager(c).getMembers().size() == 2;
}
}
}
| 2,499
| 29.487805
| 93
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/DistributionRehashSCI.java
|
package org.infinispan.distribution.rehash;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
@AutoProtoSchemaBuilder(
includeClasses = {
NonTxBackupOwnerBecomingPrimaryOwnerTest.CustomConsistentHashFactory.class,
NonTxPrimaryOwnerBecomingNonOwnerTest.CustomConsistentHashFactory.class,
},
schemaFileName = "test.core.distribution.rehash.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core.distribution.rehash",
service = false
)
public interface DistributionRehashSCI extends SerializationContextInitializer {
DistributionRehashSCI INSTANCE = new DistributionRehashSCIImpl();
}
| 769
| 39.526316
| 87
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/WriteOperationDuringLeaverTest.java
|
package org.infinispan.distribution.rehash;
import static org.infinispan.util.BlockingLocalTopologyManager.replaceTopologyManagerDefaultCache;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashMap;
import java.util.Map;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.MagicKey;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.BlockingLocalTopologyManager;
import org.testng.annotations.Test;
/**
* Test case for ISPN-6599
* <p>
* During a rehash, if the backup owner leaves, the new backup isn't in the read consistent hash. However, the
* EntryWrappingInterceptor checks the ownership in the read consistent hash. The backup update will fail in this node
* making the data inconsistent.
*
* @author Pedro Ruivo
* @since 9.0
*/
@Test(groups = "functional", testName = "distribution.rehash.WriteOperationDuringLeaverTest")
@CleanupAfterMethod
public class WriteOperationDuringLeaverTest extends MultipleCacheManagersTest {
private static final int NUMBER_NODES = 3;
public void testSingleKeyCommandWithExistingKey() throws Exception {
doTest(Operation.SINGLE_KEY, true);
}
public void testMultipleKeyCommandWithExistingKey() throws Exception {
doTest(Operation.MULTIPLE_KEYS, true);
}
public void testSingleKeyCommandWithNewgKey() throws Exception {
doTest(Operation.SINGLE_KEY, false);
}
public void testMultipleKeyCommandWithNewKey() throws Exception {
doTest(Operation.MULTIPLE_KEYS, false);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC, false);
builder.clustering().hash().numOwners(2);
createClusteredCaches(NUMBER_NODES, TestDataSCI.INSTANCE, builder);
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
private void doTest(Operation operation, boolean init) throws Exception {
final MagicKey key = new MagicKey(cache(1), cache(2));
if (init) {
cache(0).put(key, "v1");
assertInAllCache(key, "v1");
}
BlockingLocalTopologyManager bltm0 = replaceTopologyManagerDefaultCache(manager(0));
BlockingLocalTopologyManager bltm1 = replaceTopologyManagerDefaultCache(manager(1));
killMember(2, null, false);
//CH_UPDATE + REBALANCE_START + CH_UPDATE(blocked)
bltm0.expectRebalanceStartAfterLeave().unblock();
bltm1.expectRebalanceStartAfterLeave().unblock();
bltm0.expectPhaseConfirmation().unblock();
bltm1.expectPhaseConfirmation().unblock();
//check if we are in the correct state
LocalizedCacheTopology cacheTopology = TestingUtil.extractComponent(cache(0), DistributionManager.class).getCacheTopology();
DistributionInfo distributionInfo = cacheTopology.getDistribution(key);
assertFalse(distributionInfo.isReadOwner());
assertTrue(distributionInfo.isWriteOwner());
assertEquals(address(1), distributionInfo.primary());
operation.put(key, "v2", cache(1));
BlockingLocalTopologyManager.finishRebalance(CacheTopology.Phase.READ_ALL_WRITE_ALL, bltm0, bltm1);
waitForClusterToForm(); //let the cluster finish the state transfer
assertInAllCache(key, "v2");
}
private <K, V> void assertInAllCache(K key, V value) {
for (Cache<K, V> cache : this.<K, V>caches()) {
assertEquals("Wrong value in cache " + address(cache), value, cache.get(key));
}
}
//all the single key are handled in the same way. No need to test remove/replace.
private enum Operation {
SINGLE_KEY {
@Override
<K, V> void put(K key, V value, Cache<K, V> cache) {
cache.put(key, value);
}
},
MULTIPLE_KEYS {
@Override
<K, V> void put(K key, V value, Cache<K, V> cache) {
Map<K, V> map = new HashMap<>();
map.put(key, value);
cache.putAll(map);
}
};
abstract <K, V> void put(K key, V value, Cache<K, V> cache);
}
private static class CacheTopologyMatcher extends BaseMatcher<Object> {
private final int topologyId;
CacheTopologyMatcher(int topologyId) {
this.topologyId = topologyId;
}
@Override
public boolean matches(Object item) {
return (item instanceof CacheTopology) && ((CacheTopology) item).getTopologyId() == topologyId;
}
@Override
public void describeTo(Description description) {
description.appendText("CacheTopology(" + topologyId + ")");
}
}
}
| 5,460
| 35.406667
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/SharedStoreInvalidationDuringRehashTest.java
|
package org.infinispan.distribution.rehash;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.DataContainer;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.dummy.DummyInMemoryStoreConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Test that entries in a shared store are not touched in any way during state transfer.
*
* @author Dan Berindei
*/
@Test(testName = "distribution.rehash.SharedStoreInvalidationDuringRehashTest", groups = "functional")
@CleanupAfterMethod
@InCacheMode({CacheMode.DIST_SYNC })
public class SharedStoreInvalidationDuringRehashTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(SharedStoreInvalidationDuringRehashTest.class);
private static final int NUM_KEYS = 20;
private static final String TEST_CACHE_NAME = "testCache";
@Override
protected void createCacheManagers() {
// cacheManagers started one after another in test()
}
private void addNewCacheManagerAndWaitForRehash(int index, boolean preload) {
EmbeddedCacheManager cacheManager = addClusterEnabledCacheManager(getDefaultClusteredCacheConfig(
cacheMode, false));
Configuration config = buildCfg(index, true, preload);
cacheManager.defineConfiguration(TEST_CACHE_NAME, config);
log.debugf("\n\nstarted CacheManager #%d", getCacheManagers().size() - 1);
waitForClusterToForm(TEST_CACHE_NAME);
}
private Configuration buildCfg(final int index, boolean clustered, boolean preload) {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.persistence().passivation(false);
DummyInMemoryStoreConfigurationBuilder dummyCB = cb.persistence().addStore(DummyInMemoryStoreConfigurationBuilder.class);
dummyCB.preload(preload).shared(true).purgeOnStartup(false);
dummyCB.storeName(SharedStoreInvalidationDuringRehashTest.class.getSimpleName());
if (clustered) {
cb.clustering().l1().disable();
cb.clustering().cacheMode(cacheMode);
cb.clustering().hash().numOwners(1); // one owner!
cb.clustering().stateTransfer().fetchInMemoryState(true);
cb.clustering().hash().groups().enabled();
}
return cb.build(true);
}
private void incrementCounter(ConcurrentMap<Integer, ConcurrentMap<Object, AtomicInteger>> counterMap, int index, Object[] keys) {
ConcurrentMap<Object, AtomicInteger> counters = counterMap.computeIfAbsent(index, ignored -> new ConcurrentHashMap<>());
for (Object key : keys) {
counters.computeIfAbsent(key, k -> new AtomicInteger()).incrementAndGet();
}
}
private int getCounter(ConcurrentMap<Integer, ConcurrentMap<Object, AtomicInteger>> counterMap, int index) {
ConcurrentMap<Object, AtomicInteger> counters = counterMap.get(index);
return counters == null ? 0 : counters.values().stream().mapToInt(AtomicInteger::get).sum();
}
private int getSum(ConcurrentMap<Integer, ConcurrentMap<Object, AtomicInteger>> counterMap) {
return counterMap.values().stream().flatMapToInt(
m -> m.values().stream().mapToInt(AtomicInteger::get)
).sum();
}
public void testRehashWithPreload() {
doTest(true);
}
public void testRehashWithoutPreload() {
doTest(false);
}
private void doTest(boolean preload) {
// start a cluster that uses this cache store
// add 1st member
addNewCacheManagerAndWaitForRehash(0, preload);
// insert the data and test that it's in the store
insertTestData();
printCacheContents();
printStoreContents();
checkContentAndInvalidations(preload);
// stop 1st member
killMember(0);
// re-add 1st member
addNewCacheManagerAndWaitForRehash(0, preload);
printCacheContents();
printStoreContents();
checkContentAndInvalidations(preload);
// add 2nd member
addNewCacheManagerAndWaitForRehash(1, preload);
printCacheContents();
printStoreContents();
checkContentAndInvalidations(preload);
// add 3rd member
addNewCacheManagerAndWaitForRehash(2, preload);
printCacheContents();
printStoreContents();
checkContentAndInvalidations(preload);
}
private void insertTestData() {
Cache<String, String> cache = manager(0).getCache(TEST_CACHE_NAME);
for (int i = 0; i < NUM_KEYS; i++) {
cache.put("key" + i, Integer.toString(i));
}
log.debugf("Added %d entries to test cache", NUM_KEYS);
}
private void checkContentAndInvalidations(boolean preload) {
int clusterSize = getCacheManagers().size();
HashMap<Object, Integer> currentOwners = new HashMap<>();
for (int i = 0; i < clusterSize; i++) {
Cache<String, String> testCache = manager(i).getCache(TEST_CACHE_NAME);
DistributionManager dm = testCache.getAdvancedCache().getDistributionManager();
DataContainer dataContainer = testCache.getAdvancedCache().getDataContainer();
for (int j = 0; j < NUM_KEYS; j++) {
String key = "key" + j;
if (!dm.getCacheTopology().isReadOwner(key)) {
assertFalse("Key '" + key + "' is not owned by node " + address(i) + " but it still appears there",
dataContainer.containsKey(key));
} else {
currentOwners.put(key, i);
if (preload) {
assertTrue("Key '" + key + "' is owned by node " + address(i) + " but it does not appear there",
dataContainer.containsKey(key));
}
}
}
}
DummyInMemoryStore store = TestingUtil.getFirstStore(cache(0, TEST_CACHE_NAME));
for (int i = 0; i < NUM_KEYS; i++) {
String key = "key" + i;
assertTrue("Key " + key + " is missing from the shared store", store.keySet().contains(key));
}
// Reset stats for the next check
store.clearStats();
}
private int computeDiff(Map<Object, Integer> previous, Map<Object, Integer> current) {
assertEquals(previous.size(), current.size());
int diff = 0;
for (Map.Entry<Object, Integer> pair : previous.entrySet()) {
if (Integer.compare(pair.getValue(), current.get(pair.getKey())) != 0) ++diff;
}
return diff;
}
private void printCacheContents() {
log.debugf("%d cache managers: %s", getCacheManagers().size(), getCacheManagers());
for (int i = 0; i < getCacheManagers().size(); i++) {
Cache<String, String> testCache = manager(i).getCache(TEST_CACHE_NAME);
DataContainer<String, String> dataContainer = testCache.getAdvancedCache().getDataContainer();
log.debugf("DC on %s has %d keys: %s", address(i), dataContainer.size(),
StreamSupport.stream(dataContainer.spliterator(), false).map(Map.Entry::getKey).collect(Collectors.joining(",")));
Set<String> keySet = testCache.keySet();
log.debugf("Cache %s has %d keys: %s", address(i), keySet.size(), keySet);
}
}
private void printStoreContents() {
DummyInMemoryStore store = TestingUtil.getFirstStore(cache(0, TEST_CACHE_NAME));
Set<Object> keySet = store.keySet();
log.debugf("Shared store has %d keys: %s", keySet.size(), keySet);
}
}
| 8,369
| 38.668246
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/OngoingTransactionsAndJoinTest.java
|
package org.infinispan.distribution.rehash;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.infinispan.test.TestingUtil.replaceComponent;
import static org.infinispan.test.TestingUtil.wrapInboundInvocationHandler;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import jakarta.transaction.Transaction;
import org.infinispan.Cache;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.topology.TopologyUpdateCommand;
import org.infinispan.commands.topology.RebalanceStartCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.impl.TxInterceptor;
import org.infinispan.remoting.inboundhandler.AbstractDelegatingHandler;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
/**
* This tests the following scenario:
* <p/>
* 1 node exists. Transactions running. Some complete, some in prepare, some in commit. New node joins, rehash occurs.
* Test that the new node is the owner and receives this state.
*/
@Test(groups = "unstable", testName = "distribution.rehash.OngoingTransactionsAndJoinTest",
description = "See ISPN-4044 -- original group: functional")
@CleanupAfterMethod
public class OngoingTransactionsAndJoinTest extends MultipleCacheManagersTest {
ConfigurationBuilder configuration;
ScheduledExecutorService delayedExecutor = Executors.newScheduledThreadPool(1, getTestThreadFactory("Timer"));
@Override
protected void createCacheManagers() throws Throwable {
configuration = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
configuration.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
configuration.locking().lockAcquisitionTimeout(60000).useLockStriping(false);
configuration.clustering().stateTransfer().timeout(30, SECONDS);
addClusterEnabledCacheManager(configuration);
}
public void testRehashOnJoin() throws InterruptedException {
Cache<Object, Object> firstNode = cache(0);
final CountDownLatch txsStarted = new CountDownLatch(3), txsReady = new CountDownLatch(3), joinEnded = new CountDownLatch(1), rehashStarted = new CountDownLatch(1);
wrapInboundInvocationHandler(firstNode, original -> new ListeningHandler(original, txsReady, joinEnded, rehashStarted));
for (int i = 0; i < 10; i++) firstNode.put("OLD" + i, "value");
UnpreparedDuringRehashTask ut = new UnpreparedDuringRehashTask(firstNode, txsStarted, txsReady, joinEnded, rehashStarted);
PrepareDuringRehashTask pt = new PrepareDuringRehashTask(firstNode, txsStarted, txsReady, joinEnded, rehashStarted);
CommitDuringRehashTask ct = new CommitDuringRehashTask(firstNode, txsStarted, txsReady, joinEnded, rehashStarted);
AsyncInterceptorChain ic = firstNode.getAdvancedCache().getAsyncInterceptorChain();
ic.addInterceptorAfter(pt, TxInterceptor.class);
ic.addInterceptorAfter(ct, TxInterceptor.class);
Set<Thread> threads = new HashSet<>();
threads.add(new Thread(ut, "Worker-UnpreparedDuringRehashTask"));
threads.add(new Thread(pt, "Worker-PrepareDuringRehashTask"));
threads.add(new Thread(ct, "Worker-CommitDuringRehashTask"));
for (Thread t : threads) t.start();
txsStarted.await(10, SECONDS);
// we don't have a hook for the start of the rehash any more
delayedExecutor.schedule(new Callable<Object>() {
@Override
public Object call() throws Exception {
rehashStarted.countDown();
return null;
}
}, 10, TimeUnit.MILLISECONDS);
// start a new node!
addClusterEnabledCacheManager(configuration);
ListeningHandler listeningHandler2 = new ListeningHandler(extractComponent(firstNode, PerCacheInboundInvocationHandler.class), txsReady, joinEnded, rehashStarted);
replaceComponent(cache(1), PerCacheInboundInvocationHandler.class, listeningHandler2, true);
Cache<?, ?> joiner = cache(1);
for (Thread t : threads) t.join();
TestingUtil.waitForNoRebalance(cache(0), cache(1));
for (int i = 0; i < 10; i++) {
Object key = "OLD" + i;
Object value = joiner.get(key);
log.infof(" TEST: Key %s is %s", key, value);
assert "value".equals(value) : "Couldn't see key " + key + " on joiner!";
}
for (Object key: Arrays.asList(ut.key(), pt.key(), ct.key())) {
Object value = joiner.get(key);
log.infof(" TEST: Key %s is %s", key, value);
assert "value".equals(value) : "Couldn't see key " + key + " on joiner!";
}
}
abstract class TransactionalTask extends DDAsyncInterceptor implements Runnable {
Cache<Object, Object> cache;
CountDownLatch txsStarted, txsReady, joinEnded, rehashStarted;
volatile Transaction tx;
protected void startTx() throws Exception {
tm(cache).begin();
cache.put(key(), "value");
tx = tm(cache).getTransaction();
tx.enlistResource(new XAResourceAdapter()); // this is to force 2PC and to prevent transaction managers attempting to optimise the call to a 1PC.
txsStarted.countDown();
}
abstract Object key();
}
class UnpreparedDuringRehashTask extends TransactionalTask {
UnpreparedDuringRehashTask(Cache<Object, Object> cache, CountDownLatch txsStarted, CountDownLatch txsReady, CountDownLatch joinEnded, CountDownLatch rehashStarted) {
this.cache = cache;
this.txsStarted = txsStarted;
this.txsReady = txsReady;
this.joinEnded = joinEnded;
this.rehashStarted = rehashStarted;
}
@Override
Object key() {
return "unprepared_during_rehash";
}
@Override
public void run() {
try {
// start a tx
startTx();
txsReady.countDown();
joinEnded.await(10, SECONDS);
tm(cache).commit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
class PrepareDuringRehashTask extends TransactionalTask {
PrepareDuringRehashTask(Cache<Object, Object> cache, CountDownLatch txsStarted, CountDownLatch txsReady, CountDownLatch joinEnded, CountDownLatch rehashStarted) {
this.cache = cache;
this.txsStarted = txsStarted;
this.txsReady = txsReady;
this.joinEnded = joinEnded;
this.rehashStarted = rehashStarted;
}
@Override
Object key() {
return "prepare_during_rehash";
}
@Override
public void run() {
try {
startTx();
tm(cache).commit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Object visitPrepareCommand(TxInvocationContext tcx, PrepareCommand cc) throws Throwable {
if (tx.equals(tcx.getTransaction())) {
txsReady.countDown();
rehashStarted.await(10, SECONDS);
}
return super.visitPrepareCommand(tcx, cc);
}
@Override
public Object visitCommitCommand(TxInvocationContext tcx, CommitCommand cc) throws Throwable {
if (tx.equals(tcx.getTransaction())) {
joinEnded.await(10, SECONDS);
}
return super.visitCommitCommand(tcx, cc);
}
}
class CommitDuringRehashTask extends TransactionalTask {
CommitDuringRehashTask(Cache<Object, Object> cache, CountDownLatch txsStarted, CountDownLatch txsReady, CountDownLatch joinEnded, CountDownLatch rehashStarted) {
this.cache = cache;
this.txsStarted = txsStarted;
this.txsReady = txsReady;
this.joinEnded = joinEnded;
this.rehashStarted = rehashStarted;
}
@Override
Object key() {
return "commit_during_rehash";
}
@Override
public void run() {
try {
startTx();
tm(cache).commit();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public Object visitPrepareCommand(TxInvocationContext tcx, PrepareCommand cc) throws Throwable {
return invokeNextThenAccept(tcx, cc, (rCtx, rCommand, rv) -> {
if (tx.equals(tcx.getTransaction())) {
txsReady.countDown();
}
});
}
@Override
public Object visitCommitCommand(TxInvocationContext tcx, CommitCommand cc) throws Throwable {
if (tx.equals(tcx.getTransaction())) {
rehashStarted.await(10, SECONDS);
}
return super.visitCommitCommand(tcx, cc);
}
}
class ListeningHandler extends AbstractDelegatingHandler {
final CountDownLatch txsReady, joinEnded, rehashStarted;
public ListeningHandler(PerCacheInboundInvocationHandler delegate, CountDownLatch txsReady, CountDownLatch joinEnded, CountDownLatch rehashStarted) {
super(delegate);
this.txsReady = txsReady;
this.joinEnded = joinEnded;
this.rehashStarted = rehashStarted;
}
@Override
public void handle(CacheRpcCommand cmd, Reply reply, DeliverOrder order) {
boolean notifyRehashStarted = false;
if (cmd instanceof RebalanceStartCommand) {
try {
txsReady.await(10, SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
reply.reply(new ExceptionResponse(e));
return;
}
notifyRehashStarted = true;
} else if (cmd instanceof TopologyUpdateCommand) {
joinEnded.countDown();
}
delegate.handle(cmd, reply, order);
if (notifyRehashStarted) rehashStarted.countDown();
}
}
}
| 10,882
| 36.657439
| 171
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/BaseTxStateTransferOverwriteTest.java
|
package org.infinispan.distribution.rehash;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyCollection;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.isNull;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.withSettings;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import java.util.concurrent.Callable;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.remote.recovery.TxCompletionNotificationCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commands.statetransfer.StateTransferGetTransactionsCommand;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.commands.triangle.BackupWriteCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.BackupAckCommand;
import org.infinispan.distribution.BaseDistFunctionalTest;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.distribution.MagicKey;
import org.infinispan.globalstate.NoOpGlobalConfigurationManager;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.StateConsumer;
import org.infinispan.statetransfer.StateTransferInterceptor;
import org.infinispan.test.Mocks;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.test.op.TestWriteOperation;
import org.infinispan.topology.ClusterTopologyManager;
import org.infinispan.util.ControlledRpcManager;
import org.mockito.AdditionalAnswers;
import org.mockito.stubbing.Answer;
import org.testng.annotations.Test;
import jakarta.transaction.TransactionManager;
/**
* Base class used to test various write commands interleaving with state transfer with a tx cache
*
* @author William Burns
* @since 6.0
*/
@Test(groups = "functional")
public abstract class BaseTxStateTransferOverwriteTest extends BaseDistFunctionalTest<Object, Object> {
public BaseTxStateTransferOverwriteTest() {
INIT_CLUSTER_SIZE = 3;
numOwners = 2;
transactional = true;
performRehashing = true;
cleanup = CleanupPhase.AFTER_METHOD;
}
protected boolean l1Enabled() {
return cache(0, cacheName).getCacheConfiguration().clustering().l1().enabled();
}
@Override
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
NoOpGlobalConfigurationManager.amendCacheManager(cm);
}
/**
* This command should return a class that extends {@link VisitableCommand} that should match
* the command that will cause data to soon be placed into the data container. Since this test is
* transaction based the default value is to return a {@link PrepareCommand}, however other tests
* can change this behavior if desired.
*/
protected Class<? extends VisitableCommand> getVisitableCommand(TestWriteOperation op) {
return PrepareCommand.class;
}
protected Callable<?> runWithTx(final TransactionManager tm, final Callable<?> callable) {
return () -> TestingUtil.withTx(tm, callable);
}
@Test
public void testStateTransferInBetweenPrepareCommitWithPut() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.PUT_OVERWRITE, true);
}
@Test
public void testStateTransferInBetweenPrepareCommitMultipleEntryWithPut() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.PUT_OVERWRITE, false);
}
@Test
public void testStateTransferInBetweenPrepareCommitWithPutCreate() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.PUT_CREATE, true);
}
@Test
public void testStateTransferInBetweenPrepareCommitMultipleEntryWithPutCreate() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.PUT_CREATE, false);
}
@Test
public void testStateTransferInBetweenPrepareCommitWithPutIfAbsent() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.PUT_IF_ABSENT, true);
}
@Test
public void testStateTransferInBetweenPrepareCommitMultipleEntryWithPutIfAbsent() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.PUT_IF_ABSENT, false);
}
@Test
public void testStateTransferInBetweenPrepareCommitWithRemoveExact() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REMOVE_EXACT, true);
}
@Test
public void testStateTransferInBetweenPrepareCommitMultipleEntryWithRemoveExact() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REMOVE_EXACT, false);
}
@Test
public void testStateTransferInBetweenPrepareCommitWithRemove() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REMOVE, true);
}
@Test
public void testStateTransferInBetweenPrepareCommitMultipleEntryWithRemove() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REMOVE, false);
}
@Test
public void testStateTransferInBetweenPrepareCommitWithReplace() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REPLACE, true);
}
@Test
public void testStateTransferInBetweenPrepareCommitMultipleEntryWithReplace() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REPLACE, false);
}
@Test
public void testStateTransferInBetweenPrepareCommitWithReplaceExact() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REPLACE_EXACT, true);
}
@Test
public void testStateTransferInBetweenPrepareCommitMultipleEntryWithReplaceExact() throws Exception {
doStateTransferInBetweenPrepareCommit(TestWriteOperation.REPLACE_EXACT, false);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringPut() throws Exception {
doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(TestWriteOperation.PUT_CREATE);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringPutIfAbsent() throws Exception {
doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(TestWriteOperation.PUT_IF_ABSENT);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringPut2() throws Exception {
doL1InvalidationOldTopologyComesAfterRebalance(TestWriteOperation.PUT_CREATE);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringPutOverwrite2() throws Exception {
doL1InvalidationOldTopologyComesAfterRebalance(TestWriteOperation.PUT_OVERWRITE);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringPutIfAbsent2() throws Exception {
doL1InvalidationOldTopologyComesAfterRebalance(TestWriteOperation.PUT_IF_ABSENT);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringReplace2() throws Exception {
doL1InvalidationOldTopologyComesAfterRebalance(TestWriteOperation.REPLACE);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringReplaceWithPreviousValue2() throws Exception {
doL1InvalidationOldTopologyComesAfterRebalance(TestWriteOperation.REPLACE_EXACT);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringRemove2() throws Exception {
doL1InvalidationOldTopologyComesAfterRebalance(TestWriteOperation.REMOVE);
}
@Test
public void testNonCoordinatorOwnerLeavingDuringRemoveWithPreviousValue2() throws Exception {
doL1InvalidationOldTopologyComesAfterRebalance(TestWriteOperation.REMOVE_EXACT);
}
protected void doStateTransferInBetweenPrepareCommit(final TestWriteOperation op,
final boolean additionalValueOnNonOwner) throws Exception {
// Test scenario:
// cache0,1,2 are in the cluster, an owner leaves
// Key k is in the cache, and is transferred to the non owner
// A user operation also modifies key k causing an invalidation
// on the non owner which is getting the state transfer
final AdvancedCache<Object, Object> primaryOwnerCache = advancedCache(0, cacheName);
final AdvancedCache<Object, Object> backupOwnerCache = advancedCache(1, cacheName);
final AdvancedCache<Object, Object> nonOwnerCache = advancedCache(2, cacheName);
final MagicKey key = new MagicKey(op + "-key", cache(0, cacheName), cache(1, cacheName));
// Prepare for replace/remove: put a previous value in cache0
final Object previousValue = op.getPreviousValue();
if (previousValue != null) {
primaryOwnerCache.put(key, previousValue);
assertEquals(previousValue, primaryOwnerCache.get(key));
log.tracef("Previous value inserted: %s = %s", key, previousValue);
assertEquals(previousValue, nonOwnerCache.get(key));
if (l1Enabled()) {
assertIsInL1(nonOwnerCache, key);
}
}
// Need to block after Prepare command was sent after it clears the StateTransferInterceptor
final CyclicBarrier cyclicBarrier = new CyclicBarrier(2);
try {
TransactionManager tm = primaryOwnerCache.getTransactionManager();
Future<?> future = fork(runWithTx(tm, () -> {
if (additionalValueOnNonOwner) {
MagicKey mk = new MagicKey("placeholder", nonOwnerCache);
String value = "somevalue";
primaryOwnerCache.put(mk, value);
log.tracef("Adding additional value on nonOwner value inserted: %s = %s", mk, value);
}
extractInterceptorChain(primaryOwnerCache)
.addInterceptorBefore(new BlockingInterceptor<>(cyclicBarrier, getVisitableCommand(op), true, false),
StateTransferInterceptor.class);
return op.perform(primaryOwnerCache, key);
}));
cyclicBarrier.await(10, SECONDS);
// After the barrier has been hit remove the interceptor, since we can just wake it up through the barrier,
// this way the state transfer won't be blocked if the normal put occurs before it.
removeAllBlockingInterceptorsFromCache(primaryOwnerCache);
// Block the rebalance confirmation on nonOwnerCache
CheckPoint checkPoint = new CheckPoint();
log.trace("Adding proxy to state transfer");
waitUntilStateBeingTransferred(nonOwnerCache, checkPoint);
backupOwnerCache.getCacheManager().stop();
// Wait for non owner to just about get state
checkPoint.awaitStrict("pre_state_apply_invoked_for_" + nonOwnerCache, 10, SECONDS);
// let prepare complete and thus commit command invalidating on nonOwner
cyclicBarrier.await(10, SECONDS);
assertEquals(op.getReturnValue(), future.get(10, SECONDS));
// let state transfer go
checkPoint.trigger("pre_state_apply_release_for_" + nonOwnerCache);
TestingUtil.waitForNoRebalance(primaryOwnerCache, nonOwnerCache);
switch (op) {
case REMOVE:
case REMOVE_EXACT:
break;
default:
assertIsInContainerImmortal(primaryOwnerCache, key);
assertIsInContainerImmortal(nonOwnerCache, key);
break;
}
// Check the value to make sure data container contains correct value
assertEquals(op.getValue(), primaryOwnerCache.get(key));
assertEquals(op.getValue(), nonOwnerCache.get(key));
} finally {
removeAllBlockingInterceptorsFromCache(primaryOwnerCache);
}
}
/**
* When L1 is enabled this test should not be ran when a previous value is present as it will cause timeouts. Due
* to how locking works with L1 this cannot occur when the previous value exists.
*/
protected void doTestWhereCommitOccursAfterStateTransferBeginsBeforeCompletion(final TestWriteOperation op) throws Exception {
if (l1Enabled() && op.getPreviousValue() != null) {
fail("This test cannot be ran with L1 when a previous value is set");
}
// Test scenario:
// cache0,1,2 are in the cluster, an owner leaves
// Key k is in the cache, and is transferred to the non owner
// A user operation also modifies key k causing an invalidation
// on the non owner which is getting the state transfer
final AdvancedCache<Object, Object> primaryOwnerCache = cache(0, cacheName).getAdvancedCache();
final AdvancedCache<Object, Object> backupOwnerCache = cache(1, cacheName).getAdvancedCache();
final AdvancedCache<Object, Object> nonOwnerCache = cache(2, cacheName).getAdvancedCache();
final MagicKey key = new MagicKey(primaryOwnerCache, backupOwnerCache);
// Prepare for replace/remove: put a previous value in cache0
final Object previousValue = op.getPreviousValue();
if (previousValue != null) {
primaryOwnerCache.put(key, previousValue);
assertEquals(previousValue, primaryOwnerCache.get(key));
log.tracef("Previous value inserted: %s = %s", key, previousValue);
assertEquals(previousValue, nonOwnerCache.get(key));
if (l1Enabled()) {
assertIsInL1(nonOwnerCache, key);
}
}
int preJoinTopologyId = primaryOwnerCache.getDistributionManager().getCacheTopology().getTopologyId();
// Block any state response commands on cache0
CheckPoint checkPoint = new CheckPoint();
ControlledRpcManager blockingRpcManager0 = ControlledRpcManager.replaceRpcManager(primaryOwnerCache);
ControlledRpcManager blockingRpcManager2 = ControlledRpcManager.replaceRpcManager(nonOwnerCache);
// The execution of the write/prepare/commit commands is controlled with the BlockingInterceptor
blockingRpcManager0.excludeCommands(BackupWriteCommand.class, PrepareCommand.class, CommitCommand.class,
TxCompletionNotificationCommand.class
);
blockingRpcManager2.excludeCommands(BackupAckCommand.class);
// Block the rebalance confirmation on cache0
int rebalanceTopologyId = preJoinTopologyId + 2;
blockRebalanceConfirmation(primaryOwnerCache.getCacheManager(), checkPoint, rebalanceTopologyId);
assertEquals(primaryOwnerCache.getCacheManager().getCoordinator(),
primaryOwnerCache.getCacheManager().getAddress());
// Remove the leaver
log.trace("Stopping the cache");
backupOwnerCache.getCacheManager().stop();
// Wait for the write CH to contain the joiner everywhere
eventuallyEquals(2, () -> primaryOwnerCache.getRpcManager().getMembers().size());
eventuallyEquals(2, () -> nonOwnerCache.getRpcManager().getMembers().size());
assertEquals(primaryOwnerCache.getCacheManager().getCoordinator(),
primaryOwnerCache.getCacheManager().getAddress());
// Wait for both nodes to start state transfer
if (transactional) {
blockingRpcManager0.expectCommand(StateTransferGetTransactionsCommand.class).send().receiveAll();
blockingRpcManager2.expectCommand(StateTransferGetTransactionsCommand.class).send().receiveAll();
}
ControlledRpcManager.BlockedRequest<StateTransferStartCommand> blockedStateRequest0 =
blockingRpcManager0.expectCommand(StateTransferStartCommand.class);
ControlledRpcManager.BlockedRequest<StateTransferStartCommand> blockedStateRequest2 =
blockingRpcManager2.expectCommand(StateTransferStartCommand.class);
// Unblock the state request from node 2
// Don't wait for response, because node 2 might be sending the first state response on the request thread
blockedStateRequest2.send().receiveAllAsync();
// Wait for cache0 to collect the state to send to node 2 (including our previous value).
ControlledRpcManager.BlockedRequest<StateResponseCommand> blockedStateResponse0 =
blockingRpcManager0.expectCommand(StateResponseCommand.class);
// Every PutKeyValueCommand will be blocked before committing the entry on cache1
CyclicBarrier beforeCommitCache1Barrier = new CyclicBarrier(2);
BlockingInterceptor<?> blockingInterceptor1 =
new BlockingInterceptor<>(beforeCommitCache1Barrier, op.getCommandClass(), true, false);
extractInterceptorChain(nonOwnerCache)
.addInterceptorAfter(blockingInterceptor1, EntryWrappingInterceptor.class);
// Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 will become a backup owner for the retry
// The put command will be blocked on cache1 just before committing the entry.
Future<Object> future = fork(() -> op.perform(primaryOwnerCache, key));
// Wait for the entry to be wrapped on node 2
// The replicated command could be either a non-tx BackupWriteCommand or a PrepareCommand
beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);
// Remove the interceptor so we don't mess up any other state transfer puts
removeAllBlockingInterceptorsFromCache(nonOwnerCache);
// Allow the state to be applied on cache1 (writing the old value for our entry)
blockedStateResponse0.send().receiveAll();
// Wait for second in line to finish applying the state, but don't allow the rebalance confirmation to be processed.
// (It would change the topology and it would trigger a retry for the command.)
// Don't wait for response, because node 2 might be sending the first state response on the request thread
blockedStateRequest0.send().receiveAllAsync();
blockingRpcManager2.expectCommand(StateResponseCommand.class).send().receiveAll();
checkPoint.awaitStrict("pre_rebalance_confirmation_" + rebalanceTopologyId + "_from_" +
primaryOwnerCache.getCacheManager().getAddress(), 10, SECONDS);
// Now allow the command to commit on cache1
beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);
// Wait for the command to finish and check that it didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertEquals(op.getReturnValue(), result);
log.tracef("%s operation is done", op);
// Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + primaryOwnerCache.getCacheManager().getAddress());
checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + nonOwnerCache.getCacheManager().getAddress());
TestingUtil.waitForNoRebalance(primaryOwnerCache, nonOwnerCache);
switch (op) {
case REMOVE:
case REMOVE_EXACT:
break;
default:
assertIsInContainerImmortal(primaryOwnerCache, key);
assertIsInContainerImmortal(nonOwnerCache, key);
break;
}
// Check the value to make sure data container contains correct value
assertEquals(op.getValue(), primaryOwnerCache.get(key));
assertEquals(op.getValue(), nonOwnerCache.get(key));
}
private void doL1InvalidationOldTopologyComesAfterRebalance(final TestWriteOperation op) throws Exception {
// Test scenario:
// cache0,1,2 are in the cluster, an owner leaves
// Key k is in the cache, and is transferred to the non owner
// A user operation also modifies key k causing an invalidation
// on the non owner which is getting the state transfer
final AdvancedCache<Object, Object> primaryOwnerCache = advancedCache(0, cacheName);
final AdvancedCache<Object, Object> backupOwnerCache = advancedCache(1, cacheName);
final AdvancedCache<Object, Object> nonOwnerCache = advancedCache(2, cacheName);
final MagicKey key = new MagicKey(op + "-key", cache(0, cacheName), cache(1, cacheName));
// Prepare for replace/remove: put a previous value in cache0
final Object previousValue = op.getPreviousValue();
if (previousValue != null) {
primaryOwnerCache.put(key, previousValue);
assertEquals(previousValue, primaryOwnerCache.get(key));
log.tracef("Previous value inserted: %s = %s", key, previousValue);
assertEquals(previousValue, nonOwnerCache.get(key));
if (l1Enabled()) {
assertIsInL1(nonOwnerCache, key);
}
}
// Block on the interceptor right after ST which should now have the soon to be old topology id
CyclicBarrier beforeCommitCache1Barrier = new CyclicBarrier(2);
BlockingInterceptor<?> blockingInterceptor1 =
new BlockingInterceptor<>(beforeCommitCache1Barrier, getVisitableCommand(op), false, false);
extractInterceptorChain(primaryOwnerCache)
.addInterceptorAfter(blockingInterceptor1, StateTransferInterceptor.class);
// Put/Replace/Remove from primary owner. This will block before it is committing on remote nodes
Future<Object> future = fork(() -> {
try {
return op.perform(primaryOwnerCache, key);
} finally {
log.tracef("%s operation is done", op);
}
});
beforeCommitCache1Barrier.await(10, SECONDS);
// Remove blocking interceptor now since we have blocked
removeAllBlockingInterceptorsFromCache(primaryOwnerCache);
// Remove the leaver
log.tracef("Stopping the cache");
backupOwnerCache.getCacheManager().stop();
// Wait for the write CH to contain the joiner everywhere
eventually(() -> primaryOwnerCache.getRpcManager().getMembers().size() == 2 &&
nonOwnerCache.getRpcManager().getMembers().size() == 2);
TestingUtil.waitForNoRebalance(primaryOwnerCache, nonOwnerCache);
// Now let the update go through
beforeCommitCache1Barrier.await(10, SECONDS);
// Run the update now that we are in the middle of a rebalance
assertEquals(op.getReturnValue(), future.get(10, SECONDS));
log.tracef("%s operation is done", op);
switch (op) {
case REMOVE:
case REMOVE_EXACT:
break;
default:
assertIsInContainerImmortal(primaryOwnerCache, key);
assertIsInContainerImmortal(nonOwnerCache, key);
break;
}
// Check the value to make sure data container contains correct value
assertEquals(op.getValue(), primaryOwnerCache.get(key));
assertEquals(op.getValue(), nonOwnerCache.get(key));
}
private void blockRebalanceConfirmation(final EmbeddedCacheManager manager, final CheckPoint checkPoint, int rebalanceTopologyId)
throws Exception {
ClusterTopologyManager ctm = TestingUtil.extractGlobalComponent(manager, ClusterTopologyManager.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(ctm);
ClusterTopologyManager mockManager = mock(ClusterTopologyManager.class,
withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
Object[] arguments = invocation.getArguments();
Address source = (Address) arguments[1];
int topologyId = (Integer) arguments[2];
if (topologyId == rebalanceTopologyId) {
checkPoint.trigger("pre_rebalance_confirmation_" + topologyId + "_from_" + source);
return checkPoint.future("resume_rebalance_confirmation_" + topologyId + "_from_" + source,
20, SECONDS, testExecutor())
.thenCompose(ignored -> Mocks.callAnotherAnswer(forwardedAnswer, invocation));
}
return forwardedAnswer.answer(invocation);
}).when(mockManager).handleRebalancePhaseConfirm(anyString(), any(), anyInt(), isNull(), anyInt());
TestingUtil.replaceComponent(manager, ClusterTopologyManager.class, mockManager, true);
}
protected void waitUntilStateBeingTransferred(final Cache<?, ?> cache, final CheckPoint checkPoint) {
StateConsumer sc = TestingUtil.extractComponent(cache, StateConsumer.class);
final Answer<Object> forwardedAnswer = AdditionalAnswers.delegatesTo(sc);
StateConsumer mockConsumer = mock(StateConsumer.class, withSettings().defaultAnswer(forwardedAnswer));
doAnswer(invocation -> {
// Wait for main thread to sync up
checkPoint.trigger("pre_state_apply_invoked_for_" + cache);
// Now wait until main thread lets us through
checkPoint.awaitStrict("pre_state_apply_release_for_" + cache, 20, TimeUnit.SECONDS);
return forwardedAnswer.answer(invocation);
}).when(mockConsumer).applyState(any(Address.class), anyInt(), anyCollection());
TestingUtil.replaceComponent(cache, StateConsumer.class, mockConsumer, true);
}
}
| 25,503
| 45.882353
| 143
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/ConsistencyStressTest.java
|
package org.infinispan.distribution.rehash;
import static java.lang.String.format;
import static org.infinispan.test.TestingUtil.sleepRandom;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createClusteredCacheManager;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import jakarta.transaction.HeuristicMixedException;
import jakarta.transaction.HeuristicRollbackException;
import jakarta.transaction.NotSupportedException;
import jakarta.transaction.RollbackException;
import jakarta.transaction.SystemException;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.distribution.DistributionTestHelper;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
// As this is a SLOW stress test, leave it disabled by default. Only run it manually.
@Test(groups = "stress", testName = "distribution.rehash.ConsistencyStressTest", timeOut = 15*60*1000)
public class ConsistencyStressTest extends MultipleCacheManagersTest {
private static final int NUM_NODES = 10;
private static final int WORKERS_PER_NODE = 2;
private static final int NUM_ITERATIONS = 5000;
private static final boolean IGNORE_TX_FAILURES = true;
private static final Log log = LogFactory.getLog(ConsistencyStressTest.class);
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = new ConfigurationBuilder();
c
.locking()
.isolationLevel(IsolationLevel.READ_COMMITTED)
.lockAcquisitionTimeout(60000)
.useLockStriping(false)
.clustering()
.cacheMode(CacheMode.DIST_SYNC)
.remoteTimeout(30000)
.l1().disable()
.transaction()
.lockingMode(LockingMode.PESSIMISTIC)
.transactionManagerLookup(new EmbeddedTransactionManagerLookup());
GlobalConfigurationBuilder gc = GlobalConfigurationBuilder.defaultClusteredBuilder();
gc.transport().distributedSyncTimeout(60000);
List<EmbeddedCacheManager> cacheManagers = new LinkedList<>();
for (int i = 0; i < NUM_NODES; i++)
cacheManagers.add(createClusteredCacheManager(gc, c));
registerCacheManager(cacheManagers.toArray(new EmbeddedCacheManager[NUM_NODES]));
}
public void testConsistency() throws Throwable {
Set<Future<Void>> futures = new HashSet<>(NUM_NODES * WORKERS_PER_NODE);
Set<String> keysToIgnore = new HashSet<>();
for (int i = 0; i < NUM_NODES; i++) {
Cache<String, String> c = cache(i);
for (int j = 0; j < WORKERS_PER_NODE; j++) {
Future<Void> f = fork(new Stressor(c, i, j, keysToIgnore));
futures.add(f);
sleepRandom(500);
}
}
// stressors are now running, generating a lot of data.
// wait for all stressors to finish.
log.info("Waiting for stressors to finish");
for (Future<Void> f : futures) f.get();
// Now shut down a node:
TestingUtil.killCacheManagers(cacheManagers.get(0));
// ... and ensure no data is lost.
// Stressors encode data in the format nodeNumber|workerNumber|iterationNumber, and all have the value "value".
Map<Address, Cache<Object, Object>> cacheMap = new HashMap<>();
for (int i = 1; i < NUM_NODES; i++) {
Cache<Object, Object> c = cache(i);
cacheMap.put(address(c), c);
}
// Let's enforce a quiet period to allow queued up transactions to complete.
Thread.sleep(25000);
// lets make sure any rehashing work has completed
TestingUtil.blockUntilViewsReceived(60000, false, cacheMap.values());
TestingUtil.waitForNoRebalance(cacheMap.values());
LocalizedCacheTopology cacheTopology = cache(1).getAdvancedCache().getDistributionManager().getCacheTopology();
for (int i = 0; i < NUM_NODES; i++) {
for (int j = 0; j < WORKERS_PER_NODE; j++) {
for (int k = 0; k < NUM_ITERATIONS; k++) {
String key = keyFor(i, j, k);
if (keysToIgnore.contains(key)) {
log.infof("Skipping test on failing key %s", key);
} else {
Collection<Address> owners = cacheTopology.getWriteOwners(key);
for (Map.Entry<Address, Cache<Object, Object>> e : cacheMap.entrySet()) {
try {
if (owners.contains(e.getKey())) {
DistributionTestHelper.assertIsInContainerImmortal(e.getValue(), key);
}
// Don't bother testing non-owners since invalidations caused by rehashing are async!
} catch (Throwable th) {
log.fatalf("Key %s (segment %s) should be on owners %s according to %s",
key, cacheTopology.getSegment(key), owners, cacheTopology);
throw th;
}
}
}
}
}
}
}
private static String keyFor(int nodeId, int workerId, int iterationId) {
return format("__%s_%s_%s__", nodeId, workerId, iterationId);
}
private static class Stressor implements Callable<Void> {
private final Cache<String, String> cache;
private final TransactionManager tm;
private final int cacheId, workerId;
private final Set<String> keysToIgnore;
private Stressor(Cache<String, String> cache, int cacheId, int workerId, Set<String> keysToIgnore) {
this.cache = cache;
tm = TestingUtil.getTransactionManager(cache);
this.cacheId = cacheId;
this.workerId = workerId;
this.keysToIgnore = keysToIgnore;
}
@Override
public Void call() throws TimeoutException {
for (int iterationId = 0; iterationId < NUM_ITERATIONS; iterationId++) {
if (iterationId % 500 == 0)
log.infof(" >> Stressor %s Worker %s Iteration %s", cacheId, workerId, iterationId);
boolean txError = false;
Exception exception = null;
String key = keyFor(cacheId, workerId, iterationId);
try {
tm.begin();
cache.getAdvancedCache().withFlags(Flag.SKIP_REMOTE_LOOKUP).put(key, "value");
tm.commit();
} catch (HeuristicRollbackException | RollbackException | SystemException | HeuristicMixedException | NotSupportedException | TimeoutException e) {
txError = true;
exception = e;
}
if (txError) {
//first try and roll back the tx
try {
tm.rollback();
} catch (Exception exc) {
// rollback failed?
log.error(" >> Rollback failed");
}
if (IGNORE_TX_FAILURES) {
keysToIgnore.add(key);
log.errorf(" >> Saw a %s when trying to process key %s", exception.getClass().getSimpleName(), key);
} else {
throw new RuntimeException(exception);
}
}
}
return null;
}
}
}
| 8,193
| 39.766169
| 159
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/distribution/rehash/SingleJoinTest.java
|
package org.infinispan.distribution.rehash;
import java.util.ArrayList;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "distribution.rehash.SingleJoinTest")
public class SingleJoinTest extends RehashTestBase {
EmbeddedCacheManager joinerManager;
Cache<Object, String> joiner;
void performRehashEvent(boolean offline) {
joinerManager = addClusterEnabledCacheManager(TestDataSCI.INSTANCE);
joinerManager.defineConfiguration(cacheName, configuration.build());
joiner = joinerManager.getCache(cacheName);
}
void waitForRehashCompletion() {
// need to block until this join has completed!
List<Cache> allCaches = new ArrayList<>(caches);
allCaches.add(joiner);
TestingUtil.blockUntilViewsReceived(60000, allCaches);
waitForClusterToForm(cacheName);
cacheManagers.add(joinerManager);
caches.add(joiner);
}
@Test(groups = "unstable", description = "ISPN-8276")
@Override
public void testNonTransactional() throws Throwable {
super.testNonTransactionalStress();
}
}
| 1,274
| 30.875
| 77
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.