repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/InternalDataContainer.java
|
package org.infinispan.container.impl;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.concurrent.CompletionStage;
import java.util.function.Consumer;
import java.util.function.ObjIntConsumer;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* Interface describing methods of a data container where operations can be indexed by the segment of the key
* stored in the map. This allows for much more efficient iteration when only a subset of segments are required for
* a given operation (which is the case very often with Distributed caches).
* <p>
* This container has a notion of what segments are currently associated with it and these can be controlled via
* the {@link #removeSegments(IntSet)} and {@link #addSegments(IntSet)} methods. A segment can be added multiple times
* and the implementation must be able to handle this. If a write occurs on a segment that is not associated with this
* container it may ignore the write or it could store it temporarily if needed (additional caching). When segments
* are removed, an implementation is free to remove any entries that map to segments that aren't associated to this
* container.
* @author wburns
* @since 9.3
*/
public interface InternalDataContainer<K, V> extends DataContainer<K, V> {
/**
* {@inheritDoc}
* <p>
* We should only ever be using the non blocking variant {@link #peek(int, Object)} in Infinispan
* @deprecated since 10.1
*/
@Deprecated
@Override
InternalCacheEntry<K, V> get(Object k);
/**
* Same as {@link DataContainer#get(Object)} except that the segment of the key can provided to lookup entries
* without calculating the segment for the given key
* @param segment segment for the key
* @param k key under which entry is stored
* @return entry, if it exists and has not expired, or null if not
* @deprecated since 10.1
*/
@Deprecated
InternalCacheEntry<K, V> get(int segment, Object k);
/**
* Same as {@link DataContainer#peek(Object)} except that the segment of the key can provided to lookup entries
* without calculating the segment for the given key
* @param segment segment for the key
* @param k key under which entry is stored
* @return entry, if it exists, or null if not
*/
InternalCacheEntry<K, V> peek(int segment, Object k);
/**
* Touches an entry in the data container. This will update the last access time of the entry as well as count this
* as a access for eviction based recency.
* @param segment segment for the key
* @param k key under which entry is stored
* @param currentTimeMillis the current time in milliseconds to touch the entry with
* @return true if the entry timestamp was touched
*/
boolean touch(int segment, Object k, long currentTimeMillis);
/**
* Same as {@link DataContainer#put(Object, Object, Metadata)} except that the segment of the key can provided to
* write/lookup entries without calculating the segment for the given key.
*
* <p>Note: The timestamps ignored if the entry already exists in the data container.</p>
*
* @param segment segment for the key
* @param k key under which to store entry
* @param v value to store
* @param metadata metadata of the entry
* @param internalMetadata
* @param createdTimestamp creation timestamp, or {@code -1} to use the current time
* @param lastUseTimestamp last use timestamp, or {@code -1} to use the current time
*
* @since 10.0
*/
void put(int segment, K k, V v, Metadata metadata, PrivateMetadata internalMetadata, long createdTimestamp,
long lastUseTimestamp);
/**
* Same as {@link DataContainer#containsKey(Object)} except that the segment of the key can provided to
* lookup if the entry exists without calculating the segment for the given key.
* @param segment segment for the key
* @param k key under which entry is stored
* @return true if entry exists and has not expired; false otherwise
*/
boolean containsKey(int segment, Object k);
/**
* Same as {@link DataContainer#remove(Object)} except that the segment of the key can provided to
* remove the entry without calculating the segment for the given key.
* @param segment segment for the key
* @param k key to remove
* @return entry removed, or null if it didn't exist or had expired
*/
InternalCacheEntry<K, V> remove(int segment, Object k);
/**
* Same as {@link DataContainer#evict(Object)} except that the segment of the key can provided to
* remove the entry without calculating the segment for the given key.
* @param segment segment for the key
* @param key The key to evict.
*/
CompletionStage<Void> evict(int segment, K key);
/**
* Same as {@link DataContainer#compute(Object, ComputeAction)} except that the segment of the key can provided to
* update entries without calculating the segment for the given key.
* @param segment segment for the key
* @param key The key.
* @param action The action that will compute the new value.
* @return The {@link org.infinispan.container.entries.InternalCacheEntry} associated to the key.
*/
InternalCacheEntry<K, V> compute(int segment, K key, ComputeAction<K, V> action);
/**
* Returns how many entries are present in the data container that map to the given segments without counting entries
* that are currently expired.
* @param segments segments of entries to count
* @return count of the number of entries in the container excluding expired entries
* @implSpec
* Default method invokes the {@link #iterator(IntSet)} method and just counts entries.
*/
default int size(IntSet segments) {
int size = 0;
// We have to loop through and count the entries
for (Iterator<InternalCacheEntry<K, V>> iter = iterator(segments); iter.hasNext(); ) {
iter.next();
if (++size == Integer.MAX_VALUE) return Integer.MAX_VALUE;
}
return size;
}
/**
* Returns how many entries are present in the data container that map to the given segments including any entries
* that may be expired
* @param segments segments of entries to count
* @return count of the number of entries in the container including expired entries
*/
default int sizeIncludingExpired(IntSet segments) {
int size = 0;
// We have to loop through and count the expired entries
for (Iterator<InternalCacheEntry<K, V>> iter = iteratorIncludingExpired(segments); iter.hasNext(); ) {
iter.next();
if (++size == Integer.MAX_VALUE) return Integer.MAX_VALUE;
}
return size;
}
/**
* Removes entries from the container whose key maps to one of the provided segments
* @param segments segments of entries to remove
*/
void clear(IntSet segments);
/**
* Same as {@link DataContainer#spliterator()} except that only entries that map to the provided segments are
* returned via this spliterator. The spliterator will not return expired entries.
* @param segments segments of entries to return
* @return spliterator containing entries mapping to those segments that aren't expired
*/
Spliterator<InternalCacheEntry<K, V>> spliterator(IntSet segments);
/**
* Same as {@link DataContainer#spliteratorIncludingExpired()} except that only entries that map to the provided
* segments are returned via this spliterator. The spliterator will return expired entries as well.
* @param segments segments of entries to use
* @return spliterator containing entries mapping to those segments that could be expired
*/
Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired(IntSet segments);
/**
* Same as {@link DataContainer#iterator()} except that only entries that map to the provided segments are
* returned via the iterator. The iterator will not return expired entries.
* @param segments segments of entries to use
* @return iterator that returns all entries mapped to the given segments
*/
Iterator<InternalCacheEntry<K, V>> iterator(IntSet segments);
/**
* Same as {@link DataContainer#iteratorIncludingExpired()} except that only entries that map to the provided
* segments are returned via the iterator. The iterator can return expired entries.
* @param segments segments of entries to use
* @return iterator that returns all entries mapped to the given segments that could be expired
*/
Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired(IntSet segments);
default Publisher<InternalCacheEntry<K, V>> publisher(int segment) {
return Flowable.fromIterable(() -> iterator(IntSets.immutableSet(segment)));
}
default Publisher<InternalCacheEntry<K, V>> publisher(IntSet segments) {
return Flowable.fromIterable(segments)
.flatMap(this::publisher);
}
/**
* Performs the given action for each element of the container that maps to the given set of segments
* until all elements have been processed or the action throws an exception. Unless otherwise specified by the
* implementing class, actions are performed in the order of iteration (if an iteration order is specified).
* Exceptions thrown by the action are relayed to the caller.
*
* @implSpec
* <p>The default implementation behaves as if:
* <pre>{@code
* for (Iterator<InternalCacheEntry<K, V>> iter = iterator(segments) ; iter.hasNext() ; ) {
* InternalCacheEntry<K, V> ice = iter.next();
* action.accept(ice);
* }
* }</pre>
*
* @param action The action to be performed for each element
* @throws NullPointerException if the specified action is null
*/
default void forEach(IntSet segments, Consumer<? super InternalCacheEntry<K, V>> action) {
for (Iterator<InternalCacheEntry<K, V>> iter = iterator(segments) ; iter.hasNext() ; ) {
InternalCacheEntry<K, V> ice = iter.next();
action.accept(ice);
}
}
/**
* Performs the given consumer for each map inside this container, once for each segment until all maps have been
* processed or the action throws an exception. Exceptions thrown by the action are relayed to the caller. The
* consumer will be provided with the segment as well that maps to the given segment.
*
* @param segmentMapConsumer The action to be performed for each element map
* @throws NullPointerException if the specified action is null
*/
void forEachSegment(ObjIntConsumer<PeekableTouchableMap<K, V>> segmentMapConsumer);
/**
* Sets what segments this data container should be using. Already associated segments are unaffected by this and
* takes a union of existing and new segments.
* @param segments segments to associate with this container
*/
void addSegments(IntSet segments);
/**
* Removes and un-associates the given segments. This will notify any listeners registered via
* {@link #addRemovalListener(Consumer)} of entries that were removed due to no longer being associated with this
* container. There is no guarantee if the consumer is invoked once or multiple times for a given group of segments
* and could be in any order.
* <p>
* When this method is invoked an implementation is free to remove any entries that don't map to segments currently
* associated with this container. Note that entries that were removed due to their segments never being associated
* with this container do not notify listeners registered via {@link #addRemovalListener(Consumer)}.
* @param segments segments that should no longer be associated with this container
*/
void removeSegments(IntSet segments);
/**
* Adds a listener that is invoked whenever {@link #removeSegments(IntSet)} is invoked providing a way for
* the listener to see what actual entries were removed from the container.
* @param listener listener that invoked of removed entries
*/
void addRemovalListener(Consumer<Iterable<InternalCacheEntry<K, V>>> listener);
/**
* Removes a previously registered listener via {@link #addRemovalListener(Consumer)}.
* @param listener the listener to remove
*/
void removeRemovalListener(Object listener);
/**
* Method used to cleanup any pending data, such as evictions
*/
default void cleanUp() {
// Default is to do nothing
}
/**
* Verify if the container has entries that can expire. This is __not__ the same thing as verifying
* for expired entries. This method can return true even if entries are not expired.
*
* @return true if any entry can expire, false otherwise.
*/
boolean hasExpirable();
}
| 13,164
| 43.778912
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/DefaultSegmentedDataContainer.java
|
package org.infinispan.container.impl;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.PrimitiveIterator;
import java.util.Spliterator;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.IntConsumer;
import java.util.function.ObjIntConsumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.util.ConcatIterator;
import org.infinispan.commons.util.FlattenSpliterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* DataContainer implementation that internally stores entries in an array of maps. This array is indexed by
* the segment that the entries belong to. This provides for much better iteration of entries when a subset of
* segments are required.
* <p>
* This implementation doesn't support bounding or temporary entries (L1).
* @author wburns
* @since 9.3
*/
public class DefaultSegmentedDataContainer<K, V> extends AbstractInternalDataContainer<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
protected final AtomicReferenceArray<PeekableTouchableMap<K, V>> maps;
protected final Supplier<PeekableTouchableMap<K, V>> mapSupplier;
protected boolean shouldStopSegments;
public DefaultSegmentedDataContainer(Supplier<PeekableTouchableMap<K, V>> mapSupplier, int numSegments) {
maps = new AtomicReferenceArray<>(numSegments);
this.mapSupplier = Objects.requireNonNull(mapSupplier);
}
@Start
public void start() {
// Local (invalidation) and replicated we just instantiate all the maps immediately
// Distributed needs them all only at beginning for preload of data - rehash event will remove others
for (int i = 0; i < maps.length(); ++i) {
startNewMap(i);
}
// Distributed is the only mode that allows for dynamic addition/removal of maps as others own all segments
// in some fashion
shouldStopSegments = configuration.clustering().cacheMode().isDistributed();
}
@Stop
public void stop() {
clear();
for (int i = 0; i < maps.length(); ++i) {
stopMap(i, false);
}
}
@Override
public int getSegmentForKey(Object key) {
return keyPartitioner.getSegment(key);
}
@Override
public PeekableTouchableMap<K, V> getMapForSegment(int segment) {
return maps.get(segment);
}
@Override
public Publisher<InternalCacheEntry<K, V>> publisher(int segment) {
return Flowable.defer(() -> {
long accessTime = timeService.wallClockTime();
return innerPublisher(segment, accessTime);
});
}
private Publisher<InternalCacheEntry<K, V>> innerPublisher(int segment, long accessTime) {
ConcurrentMap<K, InternalCacheEntry<K, V>> mapForSegment = maps.get(segment);
if (mapForSegment == null) {
return Flowable.empty();
}
return Flowable.fromIterable(mapForSegment.values()).filter(e -> !e.isExpired(accessTime));
}
@Override
public Publisher<InternalCacheEntry<K, V>> publisher(IntSet segments) {
return Flowable.defer(() -> {
long accessTime = timeService.wallClockTime();
return Flowable.fromIterable(segments)
.flatMap(segment -> innerPublisher(segment, accessTime));
});
}
@Override
public Iterator<InternalCacheEntry<K, V>> iterator(IntSet segments) {
return new EntryIterator(iteratorIncludingExpired(segments));
}
@Override
public Iterator<InternalCacheEntry<K, V>> iterator() {
return new EntryIterator(iteratorIncludingExpired());
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliterator(IntSet segments) {
return filterExpiredEntries(spliteratorIncludingExpired(segments));
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliterator() {
return filterExpiredEntries(spliteratorIncludingExpired());
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired(IntSet segments) {
// TODO: explore creating streaming approach to not create this list?
List<Collection<InternalCacheEntry<K, V>>> valueIterables = new ArrayList<>(segments.size());
segments.forEach((int s) -> {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(s);
if (map != null) {
valueIterables.add(map.values());
}
});
return new ConcatIterator<>(valueIterables);
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired() {
List<Collection<InternalCacheEntry<K, V>>> valueIterables = new ArrayList<>(maps.length() + 1);
for (int i = 0; i < maps.length(); ++i) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(i);
if (map != null) {
valueIterables.add(map.values());
}
}
return new ConcatIterator<>(valueIterables);
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired(IntSet segments) {
// Copy the ints into an array to parallelize them
int[] segmentArray = segments.toIntArray();
return new FlattenSpliterator<>(i -> {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(segmentArray[i]);
if (map == null) {
return Collections.emptyList();
}
return map.values();
}, segmentArray.length, Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.DISTINCT);
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired() {
return new FlattenSpliterator<>(i -> {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(i);
if (map == null) {
return Collections.emptyList();
}
return map.values();
}, maps.length(), Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.DISTINCT);
}
@Override
public int sizeIncludingExpired(IntSet segment) {
int size = 0;
for (PrimitiveIterator.OfInt iter = segment.iterator(); iter.hasNext(); ) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(iter.nextInt());
size += map != null ? map.size() : 0;
// Overflow
if (size < 0) {
return Integer.MAX_VALUE;
}
}
return size;
}
@Override
public int sizeIncludingExpired() {
int size = 0;
for (int i = 0; i < maps.length(); ++i) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(i);
if (map != null) {
size += map.size();
// Overflow
if (size < 0) {
return Integer.MAX_VALUE;
}
}
}
return size;
}
@Override
public void clear() {
for (int i = 0; i < maps.length(); ++i) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(i);
if (map != null) {
map.clear();
}
}
}
@Override
public void forEach(IntSet segments, Consumer<? super InternalCacheEntry<K, V>> action) {
Predicate<InternalCacheEntry<K, V>> expiredPredicate = expiredIterationPredicate(timeService.wallClockTime());
BiConsumer<? super K, ? super InternalCacheEntry<K, V>> biConsumer = (k, ice) -> {
if (expiredPredicate.test(ice)) {
action.accept(ice);
}
};
segments.forEach((int s) -> {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(s);
if (map != null) {
map.forEach(biConsumer);
}
});
}
@Override
public void addSegments(IntSet segments) {
if (shouldStopSegments) {
if (log.isTraceEnabled()) {
log.tracef("Ensuring segments %s are started", segments);
}
// Without this we will get a boxing and unboxing from int to Integer and back to int
segments.forEach((IntConsumer) this::startNewMap);
}
}
@Override
public void removeSegments(IntSet segments) {
if (shouldStopSegments) {
if (log.isTraceEnabled()) {
log.tracef("Removing segments: %s from container", segments);
}
for (PrimitiveIterator.OfInt segmentIterator = segments.iterator(); segmentIterator.hasNext(); ) {
int segment = segmentIterator.nextInt();
stopMap(segment, true);
}
}
}
@Override
public void forEachSegment(ObjIntConsumer<PeekableTouchableMap<K, V>> segmentMapConsumer) {
for (int i = 0; i < maps.length(); ++i) {
PeekableTouchableMap<K, V> map = maps.get(i);
if (map != null) {
segmentMapConsumer.accept(map, i);
}
}
}
private void startNewMap(int segment) {
if (maps.get(segment) == null) {
PeekableTouchableMap<K, V> newMap = mapSupplier.get();
// Just in case of concurrent starts - this shouldn't be possible
if (!maps.compareAndSet(segment, null, newMap) && newMap instanceof AutoCloseable) {
try {
((AutoCloseable) newMap).close();
} catch (Exception e) {
throw new CacheException(e);
}
}
}
}
private void stopMap(int segment, boolean notifyListener) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.getAndSet(segment, null);
if (map != null) {
if (notifyListener && !map.isEmpty()) {
listeners.forEach(c -> c.accept(map.values()));
}
segmentRemoved(map);
if (map instanceof AutoCloseable) {
try {
((AutoCloseable) map).close();
} catch (Exception e) {
throw new CacheException(e);
}
}
}
}
}
| 10,422
| 33.513245
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/DefaultDataContainer.java
|
package org.infinispan.container.impl;
import java.lang.invoke.MethodHandles;
import java.util.Iterator;
import java.util.Optional;
import java.util.Spliterator;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.ObjIntConsumer;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.util.EntrySizeCalculator;
import org.infinispan.commons.util.FilterIterator;
import org.infinispan.commons.util.FilterSpliterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.CacheEntrySizeCalculator;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.PrimitiveEntrySizeCalculator;
import org.infinispan.eviction.EvictionType;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.marshall.core.WrappedByteArraySizeCalculator;
import org.reactivestreams.Publisher;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.Policy;
import io.reactivex.rxjava3.core.Flowable;
import net.jcip.annotations.ThreadSafe;
/**
* DefaultDataContainer is both eviction and non-eviction based data container.
*
*
* @author Manik Surtani
* @author Galder Zamarreño
* @author Vladimir Blagojevic
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
*
* @since 4.0
*/
@ThreadSafe
public class DefaultDataContainer<K, V> extends AbstractInternalDataContainer<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final PeekableTouchableMap<K, V> entries;
private final Cache<K, InternalCacheEntry<K, V>> evictionCache;
public DefaultDataContainer(int concurrencyLevel) {
// If no comparing implementations passed, could fallback on JDK CHM
entries = new PeekableTouchableContainerMap<>(new ConcurrentHashMap<>(128));
evictionCache = null;
}
protected DefaultDataContainer(int concurrencyLevel, long thresholdSize, EvictionType thresholdPolicy) {
DefaultEvictionListener evictionListener = new DefaultEvictionListener();
Caffeine<K, InternalCacheEntry<K, V>> caffeine = caffeineBuilder();
switch (thresholdPolicy) {
case MEMORY:
CacheEntrySizeCalculator<K, V> calc = new CacheEntrySizeCalculator<>(new WrappedByteArraySizeCalculator<>(
new PrimitiveEntrySizeCalculator()));
caffeine.weigher((k, v) -> (int) calc.calculateSize(k, v)).maximumWeight(thresholdSize);
break;
case COUNT:
caffeine.maximumSize(thresholdSize);
break;
default:
throw new UnsupportedOperationException("Policy not supported: " + thresholdPolicy);
}
evictionCache = applyListener(caffeine, evictionListener).build();
entries = new PeekableTouchableCaffeineMap<>(evictionCache);
}
/**
* Method invoked when memory policy is used. This calculator only calculates the given key and value.
* @param concurrencyLevel
* @param thresholdSize
* @param sizeCalculator
*/
protected DefaultDataContainer(int concurrencyLevel, long thresholdSize,
EntrySizeCalculator<? super K, ? super V> sizeCalculator) {
this(thresholdSize, new CacheEntrySizeCalculator<>(sizeCalculator));
}
/**
* Constructor that allows user to provide a size calculator that also handles the cache entry and metadata.
* @param thresholdSize
* @param sizeCalculator
*/
protected DefaultDataContainer(long thresholdSize,
EntrySizeCalculator<? super K, ? super InternalCacheEntry<K, V>> sizeCalculator) {
DefaultEvictionListener evictionListener = new DefaultEvictionListener();
evictionCache = applyListener(Caffeine.newBuilder()
.weigher((K k, InternalCacheEntry<K, V> v) -> (int) sizeCalculator.calculateSize(k, v))
.maximumWeight(thresholdSize), evictionListener)
.build();
entries = new PeekableTouchableCaffeineMap<>(evictionCache);
}
public static <K, V> DefaultDataContainer<K, V> boundedDataContainer(int concurrencyLevel, long maxEntries,
EvictionType thresholdPolicy) {
return new DefaultDataContainer<>(concurrencyLevel, maxEntries, thresholdPolicy);
}
public static <K, V> DefaultDataContainer<K, V> boundedDataContainer(int concurrencyLevel, long maxEntries,
EntrySizeCalculator<? super K, ? super V> sizeCalculator) {
return new DefaultDataContainer<>(concurrencyLevel, maxEntries, sizeCalculator);
}
public static <K, V> DefaultDataContainer<K, V> unBoundedDataContainer(int concurrencyLevel) {
return new DefaultDataContainer<>(concurrencyLevel);
}
@Override
protected PeekableTouchableMap<K, V> getMapForSegment(int segment) {
return entries;
}
@Override
protected int getSegmentForKey(Object key) {
// We always map to same map, so no reason to waste finding out segment
return -1;
}
private Policy.Eviction<K, InternalCacheEntry<K, V>> eviction() {
if (evictionCache != null) {
Optional<Policy.Eviction<K, InternalCacheEntry<K, V>>> eviction = evictionCache.policy().eviction();
if (eviction.isPresent()) {
return eviction.get();
}
}
throw new UnsupportedOperationException();
}
@Override
public long capacity() {
Policy.Eviction<K, InternalCacheEntry<K, V>> evict = eviction();
return evict.getMaximum();
}
@Override
public void resize(long newSize) {
Policy.Eviction<K, InternalCacheEntry<K, V>> evict = eviction();
evict.setMaximum(newSize);
}
@Override
public int sizeIncludingExpired() {
return entries.size();
}
@Override
public void clear(IntSet segments) {
Iterator<InternalCacheEntry<K, V>> iter = iteratorIncludingExpired(segments);
while (iter.hasNext()) {
iter.next();
iter.remove();
}
}
@Stop
@Override
public void clear() {
log.tracef("Clearing data container");
entries.clear();
}
@Override
public Publisher<InternalCacheEntry<K, V>> publisher(IntSet segments) {
return Flowable.fromIterable(() -> iterator(segments));
}
@Override
public Iterator<InternalCacheEntry<K, V>> iterator() {
return new EntryIterator(entries.values().iterator());
}
@Override
public Iterator<InternalCacheEntry<K, V>> iterator(IntSet segments) {
return new FilterIterator<>(iterator(), ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliterator() {
return filterExpiredEntries(spliteratorIncludingExpired());
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliterator(IntSet segments) {
return new FilterSpliterator<>(spliterator(),
ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired() {
// Technically this spliterator is distinct, but it won't be set - we assume that is okay for now
return entries.values().spliterator();
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired(IntSet segments) {
return new FilterSpliterator<>(spliteratorIncludingExpired(),
ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired() {
return entries.values().iterator();
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired(IntSet segments) {
return new FilterIterator<>(iteratorIncludingExpired(),
ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public long evictionSize() {
Policy.Eviction<K, InternalCacheEntry<K, V>> evict = eviction();
return evict.weightedSize().orElse(entries.size());
}
@Override
public void addSegments(IntSet segments) {
throw new UnsupportedOperationException();
}
@Override
public void removeSegments(IntSet segments) {
throw new UnsupportedOperationException();
}
@Override
public void cleanUp() {
// Caffeine may not evict an entry right away if concurrent threads are writing, so this forces a cleanUp
if (evictionCache != null) {
evictionCache.cleanUp();
}
}
@Override
public void forEachSegment(ObjIntConsumer<PeekableTouchableMap<K, V>> segmentMapConsumer) {
segmentMapConsumer.accept(entries, 0);
}
}
| 8,860
| 34.302789
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/L1SegmentedDataContainer.java
|
package org.infinispan.container.impl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.PrimitiveIterator;
import java.util.Spliterator;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.util.ConcatIterator;
import org.infinispan.commons.util.FlattenSpliterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.container.entries.InternalCacheEntry;
/**
* Segmented data container that also allows for non owned segments to be written to a temporary map (L1). This
* temporary map is cleared whenever a segment becomes no longer owned.
* <p>
* If the segment is owned, only the owner segment is used. If the segment is not owned it will query the temporary
* map to see if the object is stored there.
* @author wburns
* @since 9.3
*/
public class L1SegmentedDataContainer<K, V> extends DefaultSegmentedDataContainer<K, V> {
private final PeekableTouchableMap<K, V> nonOwnedEntries;
public L1SegmentedDataContainer(Supplier<PeekableTouchableMap<K, V>> mapSupplier, int numSegments) {
super(mapSupplier, numSegments);
this.nonOwnedEntries = mapSupplier.get();
}
@Override
public void stop() {
super.stop();
if (nonOwnedEntries instanceof AutoCloseable) {
try {
((AutoCloseable) nonOwnedEntries).close();
} catch (Exception e) {
throw new CacheException(e);
}
}
}
@Override
public PeekableTouchableMap<K, V> getMapForSegment(int segment) {
PeekableTouchableMap<K, V> map = super.getMapForSegment(segment);
if (map == null) {
map = nonOwnedEntries;
}
return map;
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired(IntSet segments) {
// We could explore a streaming approach here to not have to allocate an additional ArrayList
List<Collection<InternalCacheEntry<K, V>>> valueIterables = new ArrayList<>(segments.size() + 1);
PrimitiveIterator.OfInt iter = segments.iterator();
boolean includeOthers = false;
while (iter.hasNext()) {
int segment = iter.nextInt();
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(segment);
if (map != null) {
valueIterables.add(map.values());
} else {
includeOthers = true;
}
}
if (includeOthers) {
valueIterables.add(nonOwnedEntries.values().stream()
.filter(e -> segments.contains(getSegmentForKey(e.getKey())))
.collect(Collectors.toSet()));
}
return new ConcatIterator<>(valueIterables);
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired() {
List<Collection<InternalCacheEntry<K, V>>> valueIterables = new ArrayList<>(maps.length() + 1);
for (int i = 0; i < maps.length(); ++i) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(i);
if (map != null) {
valueIterables.add(map.values());
}
}
valueIterables.add(nonOwnedEntries.values());
return new ConcatIterator<>(valueIterables);
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired(IntSet segments) {
// Copy the ints into an array to parallelize them if needed
int[] segmentArray = segments.toIntArray();
// This variable is used when we query a segment we don't own. In this case we return all the tempoary
// entries and set the variable, ensuring we don't return it on any subsequent segment "misses"
AtomicBoolean usedOthers = new AtomicBoolean(false);
return new FlattenSpliterator<>(i -> {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(segmentArray[i]);
if (map == null) {
if (!usedOthers.getAndSet(true)) {
return nonOwnedEntries.values().stream()
.filter(e -> segments.contains(getSegmentForKey(e.getKey())))
.collect(Collectors.toSet());
}
return Collections.emptyList();
}
return map.values();
}, segmentArray.length, Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.DISTINCT);
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired() {
// This variable is used when we query a segment we don't own. In this case we return all the tempoary
// entries and set the variable, ensuring we don't return it on any subsequent segment "misses"
AtomicBoolean usedOthers = new AtomicBoolean(false);
return new FlattenSpliterator<>(i -> {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(i);
if (map == null) {
if (!usedOthers.getAndSet(true)) {
return nonOwnedEntries.values();
}
return Collections.emptyList();
}
return map.values();
}, maps.length(), Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.DISTINCT);
}
@Override
public void clear() {
nonOwnedEntries.clear();
super.clear();
}
/**
* Removes all entries that map to the given segments
* @param segments the segments to clear data for
*/
@Override
public void clear(IntSet segments) {
IntSet extraSegments = null;
PrimitiveIterator.OfInt iter = segments.iterator();
// First try to just clear the respective maps
while (iter.hasNext()) {
int segment = iter.nextInt();
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(segment);
if (map != null) {
map.clear();
} else {
// If we don't have a map for a segment we have to later go through the unowned segments and remove
// those entries separately
if (extraSegments == null) {
extraSegments = IntSets.mutableEmptySet(segments.size());
}
extraSegments.set(segment);
}
}
if (extraSegments != null) {
IntSet finalExtraSegments = extraSegments;
nonOwnedEntries.keySet().removeIf(k -> finalExtraSegments.contains(getSegmentForKey(k)));
}
}
@Override
public void removeSegments(IntSet segments) {
nonOwnedEntries.clear();
super.removeSegments(segments);
}
}
| 6,665
| 36.661017
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/AbstractDelegatingInternalDataContainer.java
|
package org.infinispan.container.impl;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.concurrent.CompletionStage;
import java.util.function.Consumer;
import java.util.function.ObjIntConsumer;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Delegating data container that delegates all calls to the container returned from {@link #delegate()}
* @author wburns
* @since 9.0
*/
@Scope(Scopes.NAMED_CACHE)
public abstract class AbstractDelegatingInternalDataContainer<K, V> implements InternalDataContainer<K, V> {
@Inject
void inject(ComponentRegistry componentRegistry) {
componentRegistry.wireDependencies(delegate(), false);
}
protected abstract InternalDataContainer<K, V> delegate();
@Override
public InternalCacheEntry<K, V> get(Object k) {
return delegate().get(k);
}
@Override
public InternalCacheEntry<K, V> get(int segment, Object k) {
return delegate().get(segment, k);
}
@Override
public InternalCacheEntry<K, V> peek(Object k) {
return delegate().peek(k);
}
@Override
public InternalCacheEntry<K, V> peek(int segment, Object k) {
return delegate().peek(segment, k);
}
@Override
public boolean touch(int segment, Object k, long currentTimeMillis) {
return delegate().touch(segment, k, currentTimeMillis);
}
@Override
public void put(K k, V v, Metadata metadata) {
delegate().put(k, v, metadata);
}
@Override
public void put(int segment, K k, V v, Metadata metadata, PrivateMetadata internalMetadata, long createdTimestamp,
long lastUseTimestamp) {
delegate().put(segment, k, v, metadata, internalMetadata, createdTimestamp, lastUseTimestamp);
}
@Override
public boolean containsKey(Object k) {
return delegate().containsKey(k);
}
@Override
public boolean containsKey(int segment, Object k) {
return delegate().containsKey(segment, k);
}
@Override
public InternalCacheEntry<K, V> remove(Object k) {
return delegate().remove(k);
}
@Override
public InternalCacheEntry<K, V> remove(int segment, Object k) {
return delegate().remove(segment, k);
}
@Override
public void evict(K key) {
delegate().evict(key);
}
@Override
public CompletionStage<Void> evict(int segment, K key) {
return delegate().evict(segment, key);
}
@Override
public InternalCacheEntry<K, V> compute(K key, ComputeAction<K, V> action) {
return delegate().compute(key, action);
}
@Override
public InternalCacheEntry<K, V> compute(int segment, K key, ComputeAction<K, V> action) {
return delegate().compute(segment, key, action);
}
@Stop
@Override
public void clear() {
delegate().clear();
}
@Override
public void clear(IntSet segments) {
delegate().clear(segments);
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliterator() {
return delegate().spliterator();
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliterator(IntSet segments) {
return delegate().spliterator(segments);
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired() {
return delegate().spliteratorIncludingExpired();
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired(IntSet segments) {
return delegate().spliteratorIncludingExpired(segments);
}
@Override
public Iterator<InternalCacheEntry<K, V>> iterator() {
return delegate().iterator();
}
@Override
public Iterator<InternalCacheEntry<K, V>> iterator(IntSet segments) {
return delegate().iterator(segments);
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired() {
return delegate().iteratorIncludingExpired();
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired(IntSet segments) {
return delegate().iteratorIncludingExpired(segments);
}
@Override
public void forEach(Consumer<? super InternalCacheEntry<K, V>> action) {
delegate().forEach(action);
}
@Override
public void forEach(IntSet segments, Consumer<? super InternalCacheEntry<K, V>> action) {
delegate().forEach(segments, action);
}
@Override
public void forEachSegment(ObjIntConsumer<PeekableTouchableMap<K, V>> segmentMapConsumer) {
delegate().forEachSegment(segmentMapConsumer);
}
@Override
public int size() {
return delegate().size();
}
@Override
public int size(IntSet segments) {
return delegate().size(segments);
}
@Override
public int sizeIncludingExpired() {
return delegate().sizeIncludingExpired();
}
@Override
public int sizeIncludingExpired(IntSet segments) {
return delegate().sizeIncludingExpired(segments);
}
@Override
public void addSegments(IntSet segments) {
delegate().addSegments(segments);
}
@Override
public void removeSegments(IntSet segments) {
delegate().removeSegments(segments);
}
@Override
public void addRemovalListener(Consumer<Iterable<InternalCacheEntry<K, V>>> listener) {
delegate().addRemovalListener(listener);
}
@Override
public void removeRemovalListener(Object listener) {
delegate().removeRemovalListener(listener);
}
// Eviction related methods
@Override
public long capacity() {
return delegate().capacity();
}
@Override
public long evictionSize() {
return delegate().evictionSize();
}
@Override
public void resize(long newSize) {
delegate().resize(newSize);
}
@Override
public boolean hasExpirable() {
return delegate().hasExpirable();
}
}
| 6,185
| 25.211864
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/EntryFactoryImpl.java
|
package org.infinispan.container.impl;
import static org.infinispan.commons.util.Util.toStr;
import java.util.concurrent.CompletionStage;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.container.entries.NullCacheEntry;
import org.infinispan.container.entries.ReadCommittedEntry;
import org.infinispan.container.entries.RepeatableReadEntry;
import org.infinispan.container.entries.VersionedRepeatableReadEntry;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.expiration.impl.InternalExpirationManager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* {@link EntryFactory} implementation to be used for optimistic locking scheme.
*
* @author Mircea Markus
* @since 5.1
*/
@Scope(Scopes.NAMED_CACHE)
public class EntryFactoryImpl implements EntryFactory {
private static final Log log = LogFactory.getLog(EntryFactoryImpl.class);
@Inject InternalDataContainer container;
@Inject Configuration configuration;
@Inject TimeService timeService;
@Inject VersionGenerator versionGenerator;
@Inject DistributionManager distributionManager;
@Inject InternalExpirationManager expirationManager;
private boolean isL1Enabled;
private boolean useRepeatableRead;
private boolean useVersioning;
private PrivateMetadata nonExistingPrivateMetadata;
@Start (priority = 8)
public void init() {
// Scattered mode needs repeatable-read entries to properly retry half-committed multi-key operations
// (see RetryingEntryWrappingInterceptor for details).
useRepeatableRead = configuration.transaction().transactionMode().isTransactional()
&& configuration.locking().isolationLevel() == IsolationLevel.REPEATABLE_READ;
isL1Enabled = configuration.clustering().l1().enabled();
// Write-skew check implies isolation level = REPEATABLE_READ && locking mode = OPTIMISTIC
useVersioning = Configurations.isTxVersioned(configuration);
nonExistingPrivateMetadata = new PrivateMetadata.Builder()
.entryVersion(versionGenerator.nonExistingVersion())
.build();
}
@Override
public final CompletionStage<Void> wrapEntryForReading(InvocationContext ctx, Object key, int segment, boolean isOwner,
boolean hasLock, CompletionStage<Void> previousStage) {
if (!isOwner && !isL1Enabled) {
return previousStage;
}
CacheEntry cacheEntry = getFromContext(ctx, key);
if (cacheEntry == null) {
InternalCacheEntry readEntry = getFromContainer(key, segment);
if (readEntry == null) {
if (isOwner) {
addReadEntryToContext(ctx, NullCacheEntry.getInstance(), key);
}
} else if (isOwner || readEntry.isL1Entry()) {
if (readEntry.canExpire()) {
CompletionStage<Boolean> expiredStage = expirationManager.handlePossibleExpiration(readEntry, segment, hasLock);
if (CompletionStages.isCompletedSuccessfully(expiredStage)) {
Boolean expired = CompletionStages.join(expiredStage);
handleExpiredEntryContextAddition(expired, ctx, readEntry, key, isOwner);
} else {
return expiredStage.thenAcceptBoth(previousStage, (expired, __) -> {
handleExpiredEntryContextAddition(expired, ctx, readEntry, key, isOwner);
});
}
} else {
addReadEntryToContext(ctx, readEntry, key);
}
}
}
return previousStage;
}
private void handleExpiredEntryContextAddition(Boolean expired, InvocationContext ctx, InternalCacheEntry readEntry,
Object key, boolean isOwner) {
// Multi-key commands perform the expiration check in parallel, so they need synchronization
if (expired == Boolean.FALSE) {
addReadEntryToContext(ctx, readEntry, key);
} else if (isOwner) {
addReadEntryToContext(ctx, NullCacheEntry.getInstance(), key);
}
}
private void addReadEntryToContext(InvocationContext ctx, CacheEntry cacheEntry, Object key) {
// With repeatable read, we need to create a RepeatableReadEntry as internal cache entries are mutable
// Otherwise we can store the InternalCacheEntry directly in the context
if (useRepeatableRead) {
MVCCEntry mvccEntry = createWrappedEntry(key, cacheEntry);
mvccEntry.setRead();
cacheEntry = mvccEntry;
}
if (log.isTraceEnabled()) {
log.tracef("Wrap %s for read. Entry=%s", toStr(key), cacheEntry);
}
ctx.putLookedUpEntry(key, cacheEntry);
}
private void addWriteEntryToContext(InvocationContext ctx, CacheEntry cacheEntry, Object key, boolean isRead) {
MVCCEntry mvccEntry = createWrappedEntry(key, cacheEntry);
if (cacheEntry.isNull()) {
mvccEntry.setCreated(true);
}
if (isRead) {
mvccEntry.setRead();
}
ctx.putLookedUpEntry(key, mvccEntry);
if (log.isTraceEnabled())
log.tracef("Added context entry %s", mvccEntry);
}
@Override
public CompletionStage<Void> wrapEntryForWriting(InvocationContext ctx, Object key, int segment, boolean isOwner,
boolean isRead, CompletionStage<Void> previousStage) {
CacheEntry contextEntry = getFromContext(ctx, key);
if (contextEntry instanceof MVCCEntry) {
// Nothing to do, already wrapped.
} else if (contextEntry != null) {
// Already in the context as an InternalCacheEntry
// Need to wrap it in a MVCCEntry.
MVCCEntry mvccEntry = createWrappedEntry(key, contextEntry);
ctx.putLookedUpEntry(key, mvccEntry);
if (log.isTraceEnabled())
log.tracef("Updated context entry %s -> %s", contextEntry, mvccEntry);
} else {
// Not in the context yet.
InternalCacheEntry ice = getFromContainer(key, segment);
if (isOwner) {
if (ice == null) {
addWriteEntryToContext(ctx, NullCacheEntry.getInstance(), key, isRead);
} else {
if (ice.canExpire()) {
CompletionStage<Boolean> expiredStage = expirationManager.handlePossibleExpiration(ice, segment, true);
if (CompletionStages.isCompletedSuccessfully(expiredStage)) {
Boolean expired = CompletionStages.join(expiredStage);
handleWriteExpiredEntryContextAddition(expired, ctx, ice, key, isRead);
} else {
// Serialize invocation context access
return expiredStage.thenAcceptBoth(previousStage, (expired, __) -> {
handleWriteExpiredEntryContextAddition(expired, ctx, ice, key, isRead);
});
}
} else {
addWriteEntryToContext(ctx, ice, key, isRead);
}
}
} else if (isL1Enabled && ice != null && !ice.isL1Entry()) {
addWriteEntryToContext(ctx, ice, key, isRead);
}
}
return previousStage;
}
private void handleWriteExpiredEntryContextAddition(Boolean expired, InvocationContext ctx, InternalCacheEntry ice,
Object key, boolean isRead) {
// Multi-key commands perform the expiration check in parallel, so they need synchronization
if (expired == Boolean.FALSE) {
addWriteEntryToContext(ctx, ice, key, isRead);
} else {
addWriteEntryToContext(ctx, NullCacheEntry.getInstance(), key, isRead);
}
}
@Override
public void wrapEntryForWritingSkipExpiration(InvocationContext ctx, Object key, int segment, boolean isOwner) {
CacheEntry contextEntry = getFromContext(ctx, key);
if (contextEntry instanceof MVCCEntry) {
// Nothing to do, already wrapped.
} else if (contextEntry != null) {
// Already in the context as an InternalCacheEntry
// Need to wrap it in a MVCCEntry.
MVCCEntry mvccEntry = createWrappedEntry(key, contextEntry);
ctx.putLookedUpEntry(key, mvccEntry);
if (log.isTraceEnabled())
log.tracef("Updated context entry %s -> %s", contextEntry, mvccEntry);
} else if (isOwner) {
// Not in the context yet.
CacheEntry cacheEntry = getFromContainer(key, segment);
if (cacheEntry == null) {
cacheEntry = NullCacheEntry.getInstance();
}
MVCCEntry mvccEntry = createWrappedEntry(key, cacheEntry);
// Make sure to set the created date so we can verify if the entry actually expired
mvccEntry.setCreated(cacheEntry.getCreated());
if (cacheEntry.isNull()) {
mvccEntry.setCreated(true);
}
mvccEntry.setRead();
ctx.putLookedUpEntry(key, mvccEntry);
if (log.isTraceEnabled())
log.tracef("Updated context entry null -> %s", mvccEntry);
}
}
@Override
public void wrapExternalEntry(InvocationContext ctx, Object key, CacheEntry externalEntry, boolean isRead,
boolean isWrite) {
// For a write operation, the entry is always already wrapped. For a read operation, the entry may be
// in the context as an InternalCacheEntry, as null, or missing altogether.
CacheEntry<?, ?> contextEntry = getFromContext(ctx, key);
if (contextEntry instanceof MVCCEntry) {
MVCCEntry mvccEntry = (MVCCEntry) contextEntry;
// Already wrapped for a write. Update the value and the metadata.
if (mvccEntry.skipLookup()) {
// This can happen during getGroup() invocations, which request the whole group from remote nodes
// even if some keys are already in the context.
if (log.isTraceEnabled())
log.tracef("Ignored update for context entry %s", contextEntry);
return;
}
// Without updating initial value a local write skew check would fail when the entry is loaded
// from the cache store. This shouldn't be called more than once since afterwards we set skipLookup
mvccEntry.setValue(externalEntry.getValue());
mvccEntry.setCreated(externalEntry.getCreated());
mvccEntry.setLastUsed(externalEntry.getLastUsed());
mvccEntry.setMetadata(externalEntry.getMetadata());
mvccEntry.setInternalMetadata(externalEntry.getInternalMetadata());
mvccEntry.updatePreviousValue();
if (log.isTraceEnabled()) log.tracef("Updated context entry %s", contextEntry);
} else if (contextEntry == null || contextEntry.isNull()) {
if (isWrite || useRepeatableRead) {
MVCCEntry<?, ?> mvccEntry = createWrappedEntry(key, externalEntry);
if (isRead) {
mvccEntry.setRead();
}
ctx.putLookedUpEntry(key, mvccEntry);
if (log.isTraceEnabled())
log.tracef("Updated context entry %s -> %s", contextEntry, mvccEntry);
} else {
// This is a read operation, store the external entry in the context directly.
ctx.putLookedUpEntry(key, externalEntry);
if (log.isTraceEnabled())
log.tracef("Updated context entry %s -> %s", contextEntry, externalEntry);
}
} else {
if (useRepeatableRead) {
if (log.isTraceEnabled()) log.tracef("Ignored update %s -> %s as we do repeatable reads", contextEntry, externalEntry);
} else {
ctx.putLookedUpEntry(key, externalEntry);
if (log.isTraceEnabled()) log.tracef("Updated context entry %s -> %s", contextEntry, externalEntry);
}
}
}
private CacheEntry<?, ?> getFromContext(InvocationContext ctx, Object key) {
final CacheEntry<?, ?> cacheEntry = ctx.lookupEntry(key);
if (log.isTraceEnabled()) log.tracef("Exists in context? %s ", cacheEntry);
return cacheEntry;
}
private boolean isPrimaryOwner(int segment) {
return distributionManager == null ||
distributionManager.getCacheTopology().getSegmentDistribution(segment).isPrimary();
}
private InternalCacheEntry getFromContainer(Object key, int segment) {
InternalCacheEntry ice = container.peek(segment, key);
if (log.isTraceEnabled()) {
log.tracef("Retrieved from container %s", ice);
}
return ice;
}
protected MVCCEntry<?, ?> createWrappedEntry(Object key, CacheEntry<?, ?> cacheEntry) {
Object value = null;
Metadata metadata = null;
PrivateMetadata internalMetadata = null;
if (cacheEntry != null) {
synchronized (cacheEntry) {
value = cacheEntry.getValue();
metadata = cacheEntry.getMetadata();
internalMetadata = cacheEntry.getInternalMetadata();
}
}
if (log.isTraceEnabled()) log.tracef("Creating new entry for key %s", toStr(key));
MVCCEntry<?, ?> mvccEntry;
if (useRepeatableRead) {
if (useVersioning) {
if (internalMetadata == null) {
internalMetadata = nonExistingPrivateMetadata;
}
mvccEntry = new VersionedRepeatableReadEntry(key, value, metadata);
} else {
mvccEntry = new RepeatableReadEntry(key, value, metadata);
}
} else {
mvccEntry = new ReadCommittedEntry(key, value, metadata);
}
mvccEntry.setInternalMetadata(internalMetadata);
if (cacheEntry != null) {
mvccEntry.setCreated(cacheEntry.getCreated());
mvccEntry.setLastUsed(cacheEntry.getLastUsed());
}
return mvccEntry;
}
}
| 14,616
| 43.837423
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/TouchableMap.java
|
package org.infinispan.container.impl;
public interface TouchableMap {
/**
* Touches the entry for the given key in this map. This method will update any recency timestamps for both
* expiration or eviction as needed.
* @param key key to touch
* @param currentTimeMillis the recency timestamp to set
* @return whether the entry was touched or not
*/
boolean touchKey(Object key, long currentTimeMillis);
}
| 437
| 32.692308
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/UnpooledOffHeapMemoryAllocator.java
|
package org.infinispan.container.offheap;
import java.util.concurrent.atomic.LongAdder;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import sun.misc.Unsafe;
/**
* Memory allocator that just allocates memory directly using {@link Unsafe}.
* @author wburns
* @since 9.0
*/
public class UnpooledOffHeapMemoryAllocator implements OffHeapMemoryAllocator {
private static final Log log = LogFactory.getLog(UnpooledOffHeapMemoryAllocator.class, Log.class);
private static final OffHeapMemory MEMORY = OffHeapMemory.INSTANCE;
private final LongAdder amountAllocated = new LongAdder();
@Override
public long allocate(long memoryLength) {
long estimatedMemoryLength = estimateSizeOverhead(memoryLength);
long memoryLocation = MEMORY.allocate(memoryLength);
amountAllocated.add(estimatedMemoryLength);
if (log.isTraceEnabled()) {
log.tracef("Allocated off-heap memory at 0x%016x with %d bytes. Total size: %d", memoryLocation,
estimatedMemoryLength, amountAllocated.sum());
}
return memoryLocation;
}
@Override
public void deallocate(long memoryAddress, long size) {
long estimatedMemoryLength = estimateSizeOverhead(size);
innerDeallocate(memoryAddress, estimatedMemoryLength);
}
private void innerDeallocate(long memoryAddress, long estimatedSize) {
amountAllocated.add(- estimatedSize);
if (log.isTraceEnabled()) {
log.tracef("Deallocating off-heap memory at 0x%016x with %d bytes. Total size: %d", memoryAddress,
estimatedSize, amountAllocated.sum());
}
MEMORY.free(memoryAddress);
}
@Override
public long getAllocatedAmount() {
return amountAllocated.sum();
}
/**
* Tries to estimate overhead of the allocation by first adding 8 to account for underlying allocator housekeeping
* and then rounds up to nearest power of 16 to account for 16 byte alignment.
*
* @param size the desired size of the allocation
* @return the resulting size taking into account various overheads
*/
public static long estimateSizeOverhead(long size) {
// We take 8 and add the number provided and then round up to 16 (& operator has higher precedence than +)
return (size + 8 + 15) & ~15;
}
/**
* See {@link #offHeapEntrySize(boolean, boolean, int, int, int, int)}
*/
public static long offHeapEntrySize(boolean evictionEnabled, boolean writeMetadataSize, int keySize, int valueSize) {
return offHeapEntrySize(evictionEnabled, writeMetadataSize, keySize, valueSize, 0, 0);
}
/**
* It returns the off-heap size of an entry without alignment.
* <p>
* If alignment is required, use {@code estimateSizeOverhead(offHeapEntrySize(...))}. See {@link
* #estimateSizeOverhead(long)},
*
* @param evictionEnabled Set to {@code true} if eviction is enabled.
* @param writeMetadataSize Set to {@code true} if the {@link org.infinispan.metadata.Metadata} has versioning or
* it is a custom implementation.
* @param keySize The key size.
* @param valueSize The value size.
* @param metadataSize The {@link org.infinispan.metadata.Metadata} size. If {@code writeMetadataSize} is
* false, this parameter must include the size of mortal/transient entries (2 or 4
* longs).
* @param internalMetadataSize The {@link PrivateMetadata} size.
* @return The off-heap entry size without alignment!
*/
public static long offHeapEntrySize(boolean evictionEnabled, boolean writeMetadataSize, int keySize, int valueSize,
int metadataSize, int internalMetadataSize) {
long size = 0;
if (evictionEnabled) {
size += 16; // Eviction requires 2 additional pointers at the beginning (8 + 8 bytes)
}
if (writeMetadataSize) {
size += 4; // If has version or it is a custom metadata, we write the metadata size (4 bytes)
}
size += 8; // linked pointer to next address (8 bytes)
size += OffHeapEntryFactoryImpl.HEADER_LENGTH;
size += keySize;
size += valueSize;
size += metadataSize;
if (internalMetadataSize > 0) {
size += 4;
size += internalMetadataSize;
}
return size;
}
}
| 4,476
| 39.333333
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/OffHeapMemoryAllocator.java
|
package org.infinispan.container.offheap;
/**
* Allows for allocation of memory outside of the heap as well additional functionality surrounding it if
* necessary.
* @author wburns
* @since 9.0
*/
public interface OffHeapMemoryAllocator {
/**
* Allocates a new chunk of memory sized to the given length.
* @param memoryLength the size of memory to allocate
* @return the memory address where the memory resides
*/
long allocate(long memoryLength);
/**
* Deallocates the memory at the given address assuming a given size. This size is the size that was provided
* to allocate.
* @param memoryAddress the address to deallocate from
* @param size the total size
*/
void deallocate(long memoryAddress, long size);
long getAllocatedAmount();
}
| 798
| 28.592593
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/BoundedOffHeapDataContainer.java
|
package org.infinispan.container.offheap;
import java.util.Iterator;
import java.util.List;
import java.util.Spliterator;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Consumer;
import org.infinispan.commons.marshall.WrappedBytes;
import org.infinispan.commons.util.FilterIterator;
import org.infinispan.commons.util.FilterSpliterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.eviction.EvictionType;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.util.concurrent.CompletionStages;
/**
* @author wburns
* @since 9.4
*/
public class BoundedOffHeapDataContainer extends SegmentedBoundedOffHeapDataContainer {
@Inject KeyPartitioner keyPartitioner;
protected final List<Consumer<Iterable<InternalCacheEntry<WrappedBytes, WrappedBytes>>>> listeners =
new CopyOnWriteArrayList<>();
public BoundedOffHeapDataContainer(long maxSize, EvictionType type) {
super(1, maxSize, type);
}
@Override
protected OffHeapConcurrentMap getMapThatContainsKey(byte[] key) {
return (OffHeapConcurrentMap) dataContainer.getMapForSegment(0);
}
@Override
public boolean containsKey(Object k) {
return super.containsKey(0, k);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> peek(Object k) {
return super.peek(0, k);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> get(Object k) {
return super.get(0, k);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> compute(WrappedBytes key, ComputeAction<WrappedBytes, WrappedBytes> action) {
return super.compute(0, key, action);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> remove(Object k) {
return super.remove(0, k);
}
@Override
public void evict(WrappedBytes key) {
CompletionStages.join(super.evict(0, key));
}
@Override
public void put(WrappedBytes key, WrappedBytes value, Metadata metadata) {
super.put(0, key, value, metadata, null, -1, -1);
}
@Override
public boolean containsKey(int segment, Object k) {
return super.containsKey(0, k);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> peek(int segment, Object k) {
return super.peek(0, k);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> get(int segment, Object k) {
return super.get(0, k);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> compute(int segment, WrappedBytes key,
ComputeAction<WrappedBytes, WrappedBytes> action) {
return super.compute(0, key, action);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> remove(int segment, Object k) {
return super.remove(0, k);
}
@Override
public CompletionStage<Void> evict(int segment, WrappedBytes key) {
return super.evict(0, key);
}
@Override
public void put(int segment, WrappedBytes key, WrappedBytes value, Metadata metadata,
PrivateMetadata internalMetadata, long createdTimestamp,
long lastUseTimestamp) {
super.put(0, key, value, metadata, internalMetadata, createdTimestamp, lastUseTimestamp);
}
@Override
public Spliterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> spliterator(IntSet segments) {
return new FilterSpliterator<>(spliterator(), ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Spliterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> spliteratorIncludingExpired(IntSet segments) {
return new FilterSpliterator<>(spliteratorIncludingExpired(),
ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iterator(IntSet segments) {
return new FilterIterator<>(iterator(), ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iteratorIncludingExpired(IntSet segments) {
return new FilterIterator<>(iteratorIncludingExpired(),
ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public int sizeIncludingExpired(IntSet segments) {
int size = 0;
// We have to loop through and count all the entries
for (Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iter = iteratorIncludingExpired(segments); iter.hasNext(); ) {
iter.next();
if (++size == Integer.MAX_VALUE) return Integer.MAX_VALUE;
}
return size;
}
@Override
public int size(IntSet segments) {
int size = 0;
// We have to loop through and count the non expired entries
for (Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iter = iterator(segments); iter.hasNext(); ) {
iter.next();
if (++size == Integer.MAX_VALUE) return Integer.MAX_VALUE;
}
return size;
}
@Override
public void addRemovalListener(Consumer<Iterable<InternalCacheEntry<WrappedBytes, WrappedBytes>>> listener) {
listeners.add(listener);
}
@Override
public void removeRemovalListener(Object listener) {
listeners.remove(listener);
}
@Override
public void addSegments(IntSet segments) {
throw new UnsupportedOperationException();
}
@Override
public void removeSegments(IntSet segments) {
throw new UnsupportedOperationException();
}
}
| 5,914
| 32.230337
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/StripedLock.java
|
package org.infinispan.container.offheap;
import java.util.concurrent.locks.StampedLock;
import org.infinispan.commons.util.Util;
/**
* Holder for stamped locks that provides ability to retrieve them by offset and hashCode
* Note that locks protect entries
* @author wburns
* @since 9.0
*/
public class StripedLock {
private final StampedLock[] locks;
public StripedLock(int lockCount) {
locks = new StampedLock[Util.findNextHighestPowerOfTwo(lockCount)];
for (int i = 0; i< locks.length; ++i) {
locks[i] = new StampedLock();
}
}
/**
* Retrieves the given lock at a provided offset. Note this is not hashCode based. This method requires care
* and the knowledge of how many locks there are. This is useful when iterating over all locks
* @param offset the offset of the lock to find
* @return the lock at the given offset
*/
public StampedLock getLockWithOffset(int offset) {
if (offset >= locks.length) {
throw new ArrayIndexOutOfBoundsException();
}
return locks[offset];
}
/**
* Locks all write locks. Ensure that {@link StripedLock#unlockAll()} is called in a proper finally block
*/
public void lockAll() {
for (StampedLock rwLock : locks) {
rwLock.asWriteLock().lock();
}
}
/**
* Unlocks all write locks, useful after {@link StripedLock#lockAll()} was invoked.
*/
void unlockAll() {
for (StampedLock rwLock : locks) {
rwLock.asWriteLock().unlock();
}
}
}
| 1,545
| 27.62963
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/OffHeapMemory.java
|
package org.infinispan.container.offheap;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import sun.misc.Unsafe;
import java.util.concurrent.ConcurrentHashMap;
/**
* Simple wrapper around Unsafe to provide for trace messages for method calls.
* @author wburns
* @since 9.0
*/
class OffHeapMemory {
private static final Log log = LogFactory.getLog(OffHeapMemory.class);
private final ConcurrentHashMap<Long, Long> allocatedBlocks = log.isTraceEnabled() ? new ConcurrentHashMap<>() : null;
private static final Unsafe UNSAFE = UnsafeHolder.UNSAFE;
static final OffHeapMemory INSTANCE = new OffHeapMemory();
private static final int BYTE_ARRAY_BASE_OFFSET = Unsafe.ARRAY_BYTE_BASE_OFFSET;
private OffHeapMemory() { }
byte getByte(long srcAddress, long offset) {
checkAddress(srcAddress, offset + 1);
byte value = UNSAFE.getByte(srcAddress + offset);
if (log.isTraceEnabled()) {
log.tracef("Read byte value 0x%02x from address 0x%016x+%d", value, srcAddress, offset);
}
return value;
}
void putByte(long destAddress, long offset, byte value) {
checkAddress(destAddress, offset + 1);
if (log.isTraceEnabled()) {
log.tracef("Wrote byte value 0x%02x to address 0x%016x+%d", value, destAddress, offset);
}
UNSAFE.putByte(destAddress + offset, value);
}
int getInt(long srcAddress, long offset) {
checkAddress(srcAddress, offset + 4);
int value = UNSAFE.getInt(srcAddress + offset);
if (log.isTraceEnabled()) {
log.tracef("Read int value 0x%08x from address 0x%016x+%d", value, srcAddress, offset);
}
return value;
}
void putInt(long destAddress, long offset, int value) {
checkAddress(destAddress, offset + 4);
if (log.isTraceEnabled()) {
log.tracef("Wrote int value 0x%08x to address 0x%016x+%d", value, destAddress, offset);
}
UNSAFE.putInt(destAddress + offset, value);
}
long getLong(long srcAddress, long offset) {
return getLong(srcAddress, offset, true);
}
long getAndSetLong(long destAddress, long offset, long value) {
checkAddress(destAddress, offset + 8);
if (log.isTraceEnabled()) {
log.tracef("Get and setting long value 0x%016x to address 0x%016x+%d", value, destAddress, offset);
}
return UNSAFE.getAndSetLong(null, destAddress + offset, value);
}
long getAndSetLongNoTraceIfAbsent(long destAddress, long offset, long value) {
checkAddress(destAddress, offset + 8);
long previous = UNSAFE.getAndSetLong(null, destAddress + offset, value);
if (previous != 0) {
if (log.isTraceEnabled()) {
log.tracef("Get and set long value 0x%016x to address 0x%016x+%d was 0x%016x", value, destAddress, offset, previous);
}
}
return previous;
}
long getLongNoTraceIfAbsent(long srcAddress, long offset) {
return getLong(srcAddress, offset, false);
}
private long getLong(long srcAddress, long offset, boolean alwaysTrace) {
checkAddress(srcAddress, offset + 8);
long value = UNSAFE.getLong(srcAddress + offset);
if (log.isTraceEnabled() && (alwaysTrace || value != 0)) {
log.tracef("Read long value 0x%016x from address 0x%016x+%d", value, srcAddress, offset);
}
return value;
}
void putLong(long destAddress, long offset, long value) {
checkAddress(destAddress, offset + 8);
if (log.isTraceEnabled()) {
log.tracef("Wrote long value 0x%016x to address 0x%016x+%d", value, destAddress, offset);
}
UNSAFE.putLong(destAddress + offset, value);
}
void getBytes(long srcAddress, long srcOffset, byte[] destArray, long destOffset, long length) {
checkAddress(srcAddress, srcOffset + length);
if (log.isTraceEnabled()) {
log.tracef("Read %d bytes from address 0x%016x+%d into array %s+%d", length, srcAddress, srcOffset, destArray, destOffset);
}
UNSAFE.copyMemory(null, srcAddress + srcOffset, destArray, BYTE_ARRAY_BASE_OFFSET + destOffset, length);
}
void putBytes(byte[] srcArray, long srcOffset, long destAddress, long destOffset, long length) {
checkAddress(destAddress, destOffset + length);
if (log.isTraceEnabled()) {
log.tracef("Wrote %d bytes from array %s+%d to address 0x%016x+%d", length, srcArray, srcOffset, destAddress, destOffset);
}
UNSAFE.copyMemory(srcArray, BYTE_ARRAY_BASE_OFFSET + srcOffset, null, destAddress + destOffset, length);
}
void copy(long srcAddress, long srcOffset, long destAddress, long destOffset, long length) {
checkAddress(srcAddress, srcOffset + length);
checkAddress(destAddress, destOffset + length);
if (log.isTraceEnabled()) {
log.tracef("Copying %d bytes from address 0x%016x+%d to address 0x%016x+%d", length, srcAddress, srcOffset, destAddress, destOffset);
}
UNSAFE.copyMemory(srcAddress + srcOffset, destAddress + destOffset, length);
}
/**
* @deprecated Only use for debugging
*/
private byte[] getBytes(long srcAddress, long srcOffset, int length) {
checkAddress(srcAddress, srcOffset + length);
byte[] bytes = new byte[length];
UNSAFE.copyMemory(null, srcAddress + srcOffset, bytes, BYTE_ARRAY_BASE_OFFSET, length);
return bytes;
}
private void checkAddress(long address, long offset) {
if (!log.isTraceEnabled())
return;
Long blockSize = allocatedBlocks.get(address);
if (blockSize == null || blockSize < offset) {
throw new IllegalArgumentException(String.format("Trying to access address 0x%016x+%d, but blockSize was %d",
address, offset, blockSize));
}
}
long allocate(long size) {
long address = UNSAFE.allocateMemory(size);
if (log.isTraceEnabled()) {
Long prev = allocatedBlocks.put(address, size);
if (prev != null) {
throw new IllegalArgumentException();
}
}
return address;
}
void free(long address) {
if (log.isTraceEnabled()) {
Long prev = allocatedBlocks.remove(address);
if (prev == null) {
throw new IllegalArgumentException();
}
}
UNSAFE.freeMemory(address);
}
}
| 6,347
| 35.906977
| 142
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/OffHeapEntryFactoryImpl.java
|
package org.infinispan.container.offheap;
import static org.infinispan.container.offheap.UnpooledOffHeapMemoryAllocator.estimateSizeOverhead;
import static org.infinispan.container.offheap.UnpooledOffHeapMemoryAllocator.offHeapEntrySize;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.marshall.StreamingMarshaller;
import org.infinispan.commons.marshall.WrappedByteArray;
import org.infinispan.commons.marshall.WrappedBytes;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.container.versioning.EntryVersion;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Factory that can create CacheEntry instances from off-heap memory.
*
* @author wburns
* @since 9.0
*/
@Scope(Scopes.NAMED_CACHE)
public class OffHeapEntryFactoryImpl implements OffHeapEntryFactory {
private static final OffHeapMemory MEMORY = OffHeapMemory.INSTANCE;
@Inject @ComponentName(KnownComponentNames.INTERNAL_MARSHALLER)
StreamingMarshaller marshaller;
@Inject OffHeapMemoryAllocator allocator;
@Inject TimeService timeService;
@Inject InternalEntryFactory internalEntryFactory;
@Inject Configuration configuration;
private boolean evictionEnabled;
// If custom than we just store the metadata as is (no other bits should be used)
private static final byte CUSTOM = 1;
// Version can be set with any combination of the following types
private static final byte HAS_VERSION = 2;
// Only one of the following 4 should ever be set
// It should be possible to reuse this bit for something else if needed as the absence of the other three
// can imply it is immortal
private static final byte IMMORTAL = 1 << 2;
private static final byte MORTAL = 1 << 3;
private static final byte TRANSIENT = 1 << 4;
private static final byte TRANSIENT_MORTAL = 1 << 5;
private static final byte EXPIRATION_TYPES = IMMORTAL | MORTAL | TRANSIENT | TRANSIENT_MORTAL;
// Whether this entry has private metadata or not
private static final byte HAS_PRIVATE_METADATA = 1 << 6;
/**
* HEADER is composed of type (byte), hashCode (int), keyLength (int), valueLength (int)
* Note that metadata is not included as this is now optional
*/
static final int HEADER_LENGTH = 1 + 4 + 4 + 4;
@Start
public void start() {
this.evictionEnabled = configuration.memory().isEvictionEnabled();
}
@Override
public long create(WrappedBytes key, int hashCode, InternalCacheEntry<WrappedBytes, WrappedBytes> ice) {
byte type;
boolean shouldWriteMetadataSize = false;
byte[] metadataBytes;
Metadata metadata = ice.getMetadata();
if (metadata instanceof EmbeddedMetadata) {
EntryVersion version = metadata.version();
byte[] versionBytes;
if (version != null) {
type = HAS_VERSION;
shouldWriteMetadataSize = true;
try {
versionBytes = marshaller.objectToByteBuffer(version);
} catch (IOException | InterruptedException e) {
throw new CacheException(e);
}
} else {
type = 0;
versionBytes = Util.EMPTY_BYTE_ARRAY;
}
long lifespan = metadata.lifespan();
long maxIdle = metadata.maxIdle();
if (lifespan < 0 && maxIdle < 0) {
type |= IMMORTAL;
metadataBytes = versionBytes;
} else if (lifespan > -1 && maxIdle < 0) {
type |= MORTAL;
metadataBytes = new byte[16 + versionBytes.length];
Bits.putLong(metadataBytes, 0, lifespan);
Bits.putLong(metadataBytes, 8, ice.getCreated());
System.arraycopy(versionBytes, 0, metadataBytes, 16, versionBytes.length);
} else if (lifespan < 0) {
type |= TRANSIENT;
metadataBytes = new byte[16 + versionBytes.length];
Bits.putLong(metadataBytes, 0, maxIdle);
Bits.putLong(metadataBytes, 8, ice.getLastUsed());
System.arraycopy(versionBytes, 0, metadataBytes, 16, versionBytes.length);
} else {
type |= TRANSIENT_MORTAL;
metadataBytes = new byte[32 + versionBytes.length];
Bits.putLong(metadataBytes, 0, lifespan);
Bits.putLong(metadataBytes, 8, maxIdle);
Bits.putLong(metadataBytes, 16, ice.getCreated());
Bits.putLong(metadataBytes, 24, ice.getLastUsed());
System.arraycopy(versionBytes, 0, metadataBytes, 32, versionBytes.length);
}
} else {
type = CUSTOM;
shouldWriteMetadataSize = true;
metadataBytes = marshall(metadata);
}
int keySize = key.getLength();
int metadataSize = metadataBytes.length;
WrappedBytes value = ice.getValue();
int valueSize = value != null ? value.getLength() : 0;
byte[] internalMetadataBytes;
int internalMetadataSize;
if (shouldWriteInternalMetadata(ice.getInternalMetadata())) {
internalMetadataBytes = marshall(ice.getInternalMetadata());
internalMetadataSize = internalMetadataBytes.length;
type |= HAS_PRIVATE_METADATA;
} else {
internalMetadataBytes = null;
internalMetadataSize = 0;
}
// Eviction requires 2 additional pointers at the beginning
int offset = evictionEnabled ? 16 : 0;
long totalSize = offHeapEntrySize(evictionEnabled, shouldWriteMetadataSize, keySize, valueSize, metadataSize, internalMetadataSize);
long memoryAddress = allocator.allocate(totalSize);
// Write the empty linked address pointer first
MEMORY.putLong(memoryAddress, offset, 0);
offset += 8;
MEMORY.putByte(memoryAddress, offset, type);
offset += 1;
MEMORY.putInt(memoryAddress, offset, hashCode);
offset += 4;
MEMORY.putInt(memoryAddress, offset, key.getLength());
offset += 4;
if (shouldWriteMetadataSize) {
MEMORY.putInt(memoryAddress, offset, metadataBytes.length);
offset += 4;
}
MEMORY.putInt(memoryAddress, offset, valueSize);
offset += 4;
if (internalMetadataSize > 0) {
MEMORY.putInt(memoryAddress, offset, internalMetadataSize);
offset += 4;
}
MEMORY.putBytes(key.getBytes(), key.backArrayOffset(), memoryAddress, offset, keySize);
offset += keySize;
MEMORY.putBytes(metadataBytes, 0, memoryAddress, offset, metadataSize);
offset += metadataSize;
if (valueSize > 0) {
MEMORY.putBytes(value.getBytes(), value.backArrayOffset(), memoryAddress, offset, valueSize);
offset += valueSize;
}
if (internalMetadataSize > 0) {
MEMORY.putBytes(internalMetadataBytes, 0, memoryAddress, offset, internalMetadataSize);
offset += internalMetadataSize;
}
assert offset == totalSize;
return memoryAddress;
}
@Override
public long getSize(long entryAddress, boolean includeAllocationOverhead) {
int headerOffset = evictionEnabled ? 24 : 8;
byte type = MEMORY.getByte(entryAddress, headerOffset);
headerOffset++;
// Skip the hashCode
headerOffset += 4;
int keyLength = MEMORY.getInt(entryAddress, headerOffset);
headerOffset += 4;
int metadataLength;
if ((type & (CUSTOM | HAS_VERSION)) != 0) {
metadataLength = MEMORY.getInt(entryAddress, headerOffset);
headerOffset += 4;
} else {
switch (type & EXPIRATION_TYPES) {
case IMMORTAL:
metadataLength = 0;
break;
case MORTAL:
case TRANSIENT:
metadataLength = 16;
break;
case TRANSIENT_MORTAL:
metadataLength = 32;
break;
default:
throw new IllegalArgumentException("Unsupported type: " + type);
}
}
int valueLength = MEMORY.getInt(entryAddress, headerOffset);
headerOffset += 4;
int internalMetadataLength;
if (requiresInternalMetadataSize(type)) {
internalMetadataLength = MEMORY.getInt(entryAddress, headerOffset);
headerOffset += 4;
} else {
internalMetadataLength = 0;
}
int size = headerOffset + keyLength + metadataLength + valueLength + internalMetadataLength;
return includeAllocationOverhead ? estimateSizeOverhead(size) : size;
}
@Override
public long getNext(long entryAddress) {
return MEMORY.getLong(entryAddress, evictionEnabled ? 16 : 0);
}
@Override
public void setNext(long entryAddress, long value) {
MEMORY.putLong(entryAddress, evictionEnabled ? 16 : 0, value);
}
@Override
public int getHashCode(long entryAddress) {
// 16 bytes for eviction if needed (optional)
// 8 bytes for linked pointer
// 1 for type
int headerOffset = evictionEnabled ? 25 : 9;
return MEMORY.getInt(entryAddress, headerOffset);
}
@Override
public byte[] getKey(long address) {
// 16 bytes for eviction if needed (optional)
// 8 bytes for linked pointer
int offset = evictionEnabled ? 24 : 8;
byte metadataType = MEMORY.getByte(address, offset);
offset += 1;
// Ignore hashCode bytes
offset += 4;
byte[] keyBytes = new byte[MEMORY.getInt(address, offset)];
offset += 4;
if ((metadataType & (CUSTOM + HAS_VERSION)) != 0) {
// These have additional 4 bytes for custom metadata or version
offset += 4;
}
// Ignore value bytes
offset += 4;
// Ignore internal metadata bytes
if (requiresInternalMetadataSize(metadataType)) {
offset += 4;
}
// Finally read the bytes and return
MEMORY.getBytes(address, offset, keyBytes, 0, keyBytes.length);
return keyBytes;
}
/**
* Assumes the address doesn't contain the linked pointer at the beginning
* @param address the address to read the entry from
* @return the entry at the memory location
*/
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> fromMemory(long address) {
// 16 bytes for eviction if needed (optional)
// 8 bytes for linked pointer
int offset = evictionEnabled ? 24 : 8;
byte metadataType = MEMORY.getByte(address, offset);
offset += 1;
int hashCode = MEMORY.getInt(address, offset);
offset += 4;
byte[] keyBytes = new byte[MEMORY.getInt(address, offset)];
offset += 4;
byte[] metadataBytes;
switch (metadataType & (~HAS_PRIVATE_METADATA)) {
case IMMORTAL:
metadataBytes = Util.EMPTY_BYTE_ARRAY;
break;
case MORTAL:
case TRANSIENT:
metadataBytes = new byte[16];
break;
case TRANSIENT_MORTAL:
metadataBytes = new byte[32];
break;
default:
// This means we had CUSTOM or HAS_VERSION so we have to read it all
metadataBytes = new byte[MEMORY.getInt(address, offset)];
offset += 4;
}
int valueSize = MEMORY.getInt(address, offset);
offset += 4;
int internalMetadataSize;
if (requiresInternalMetadataSize(metadataType)) {
internalMetadataSize = MEMORY.getInt(address, offset);
offset += 4;
} else {
internalMetadataSize = 0;
}
MEMORY.getBytes(address, offset, keyBytes, 0, keyBytes.length);
offset += keyBytes.length;
MEMORY.getBytes(address, offset, metadataBytes, 0, metadataBytes.length);
offset += metadataBytes.length;
WrappedBytes valueWrappedBytes;
if (valueSize > 0) {
byte[] valueBytes = new byte[valueSize];
MEMORY.getBytes(address, offset, valueBytes, 0, valueBytes.length);
offset += valueBytes.length;
valueWrappedBytes = new WrappedByteArray(valueBytes);
} else {
valueWrappedBytes = null;
}
PrivateMetadata internalMetadata = PrivateMetadata.empty();
if (internalMetadataSize > 0) {
byte[] internalMetadataBytes = new byte[internalMetadataSize];
MEMORY.getBytes(address, offset, internalMetadataBytes, 0, internalMetadataSize);
offset += internalMetadataSize;
internalMetadata = unmarshall(internalMetadataBytes);
}
Metadata metadata;
// This is a custom metadata
if ((metadataType & CUSTOM) == CUSTOM) {
metadata = unmarshall(metadataBytes);
InternalCacheEntry<WrappedBytes, WrappedBytes> ice= internalEntryFactory.create(new WrappedByteArray(keyBytes, hashCode),
valueWrappedBytes, metadata);
ice.setInternalMetadata(internalMetadata);
return ice;
} else {
long lifespan;
long maxIdle;
long created;
long lastUsed;
offset = 0;
boolean hasVersion = (metadataType & HAS_VERSION) == HAS_VERSION;
// Ignore CUSTOM and VERSION to find type
switch (metadataType & EXPIRATION_TYPES) {
case IMMORTAL:
lifespan = -1;
maxIdle = -1;
created = -1;
lastUsed = -1;
break;
case MORTAL:
maxIdle = -1;
lifespan = Bits.getLong(metadataBytes, offset);
offset += 8;
created = Bits.getLong(metadataBytes, offset);
offset += 8;
lastUsed = -1;
break;
case TRANSIENT:
lifespan = -1;
maxIdle = Bits.getLong(metadataBytes, offset);
offset += 8;
created = -1;
lastUsed = Bits.getLong(metadataBytes, offset);
offset += 8;
break;
case TRANSIENT_MORTAL:
lifespan = Bits.getLong(metadataBytes, offset);
offset += 8;
maxIdle = Bits.getLong(metadataBytes, offset);
offset += 8;
created = Bits.getLong(metadataBytes, offset);
offset += 8;
lastUsed = Bits.getLong(metadataBytes, offset);
offset += 8;
break;
default:
throw new IllegalArgumentException("Unsupported type: " + metadataType);
}
if (hasVersion) {
try {
EntryVersion version = (EntryVersion) marshaller.objectFromByteBuffer(metadataBytes, offset,
metadataBytes.length - offset);
InternalCacheEntry<WrappedBytes, WrappedBytes> ice= internalEntryFactory.create(new WrappedByteArray(keyBytes, hashCode),
valueWrappedBytes, version, created, lifespan, lastUsed, maxIdle);
ice.setInternalMetadata(internalMetadata);
return ice;
} catch (IOException | ClassNotFoundException e) {
throw new CacheException(e);
}
} else {
InternalCacheEntry<WrappedBytes, WrappedBytes> ice= internalEntryFactory.create(new WrappedByteArray(keyBytes, hashCode),
valueWrappedBytes, (Metadata) null, created, lifespan, lastUsed, maxIdle);
ice.setInternalMetadata(internalMetadata);
return ice;
}
}
}
@Override
public boolean equalsKey(long address, WrappedBytes wrappedBytes, int hashCode) {
// 16 bytes for eviction if needed (optional)
// 8 bytes for linked pointer
int headerOffset = evictionEnabled ? 24 : 8;
byte type = MEMORY.getByte(address, headerOffset);
headerOffset++;
// First if hashCode doesn't match then the key can't be equal
if (hashCode != MEMORY.getInt(address, headerOffset)) {
return false;
}
headerOffset += 4;
// If the length of the key is not the same it can't match either!
int keyLength = MEMORY.getInt(address, headerOffset);
if (keyLength != wrappedBytes.getLength()) {
return false;
}
headerOffset += 4;
if (requiresMetadataSize(type)) {
headerOffset += 4;
}
// This is for the value size which we don't need to read
headerOffset += 4;
// This is for the internal metadata size which we don't need to read
if (requiresInternalMetadataSize(type)) {
headerOffset += 4;
}
// Finally read each byte individually so we don't have to copy them into a byte[]
for (int i = 0; i < keyLength; i++) {
byte b = MEMORY.getByte(address, headerOffset + i);
if (b != wrappedBytes.getByte(i))
return false;
}
return true;
}
/**
* Returns whether entry is expired.
* @param address the address of the entry to check
* @return {@code true} if the entry is expired, {@code false} otherwise
*/
@Override
public boolean isExpired(long address) {
// 16 bytes for eviction if needed (optional)
// 8 bytes for linked pointer
int offset = evictionEnabled ? 24 : 8;
byte metadataType = MEMORY.getByte(address, offset);
if ((metadataType & IMMORTAL) != 0) {
return false;
}
// type
offset += 1;
// hashCode
offset += 4;
// key length
int keyLength = MEMORY.getInt(address, offset);
offset += 4;
long now = timeService.wallClockTime();
byte[] metadataBytes;
if ((metadataType & CUSTOM) == CUSTOM) {
// TODO: this needs to be fixed in ISPN-8539
return false;
// int metadataLength = MEMORY.getInt(address, offset);
// metadataBytes = new byte[metadataLength];
//
// // value and keyLength
// offset += 4 + keyLength;
//
// MEMORY.getBytes(address, offset, metadataBytes, 0, metadataBytes.length);
//
// Metadata metadata;
// try {
// metadata = (Metadata) marshaller.objectFromByteBuffer(metadataBytes);
// // TODO: custom metadata is not implemented properly for expiration
// return false;
// } catch (IOException | ClassNotFoundException e) {
// throw new CacheException(e);
// }
} else {
// value and keyLength
offset += 4 + keyLength;
// internal metadata length (if applicable)
if (requiresInternalMetadataSize(metadataType)) {
offset += 4;
}
// If it has version that means we wrote the size as well which goes after key length
if ((metadataType & HAS_VERSION) != 0) {
offset += 4;
}
switch (metadataType & EXPIRATION_TYPES) {
case MORTAL:
metadataBytes = new byte[16];
MEMORY.getBytes(address, offset, metadataBytes, 0, metadataBytes.length);
return ExpiryHelper.isExpiredMortal(Bits.getLong(metadataBytes, 0), Bits.getLong(metadataBytes, 8), now);
case TRANSIENT:
metadataBytes = new byte[16];
MEMORY.getBytes(address, offset, metadataBytes, 0, metadataBytes.length);
return ExpiryHelper.isExpiredTransient(Bits.getLong(metadataBytes, 0), Bits.getLong(metadataBytes, 8), now);
case TRANSIENT_MORTAL:
metadataBytes = new byte[32];
MEMORY.getBytes(address, offset, metadataBytes, 0, metadataBytes.length);
long lifespan = Bits.getLong(metadataBytes, 0);
long maxIdle = Bits.getLong(metadataBytes, 8);
long created = Bits.getLong(metadataBytes, 16);
long lastUsed = Bits.getLong(metadataBytes, 24);
return ExpiryHelper.isExpiredTransientMortal(maxIdle, lastUsed, lifespan, created, now);
default:
return false;
}
}
}
static private boolean requiresMetadataSize(byte type) {
return (type & (CUSTOM | HAS_VERSION)) != 0;
}
static private boolean requiresInternalMetadataSize(byte type) {
return (type & HAS_PRIVATE_METADATA) == HAS_PRIVATE_METADATA;
}
@Override
public long calculateSize(WrappedBytes key, WrappedBytes value, Metadata metadata, PrivateMetadata internalMetadata) {
long totalSize = evictionEnabled ? 24 : 8;
totalSize += HEADER_LENGTH;
totalSize += key.getLength() + value.getLength();
long metadataSize = 0;
if (metadata instanceof EmbeddedMetadata) {
EntryVersion version = metadata.version();
if (version != null) {
metadataSize = marshall(version).length;
// We have to write the size of the version
metadataSize += 4;
}
if (metadata.maxIdle() >= 0) {
metadataSize += 16;
}
if (metadata.lifespan() >= 0) {
metadataSize += 16;
}
} else {
// We have to write the size of the metadata object
metadataSize += 4;
metadataSize += marshall(metadata).length;
}
long internalMetadataSize = shouldWriteInternalMetadata(internalMetadata) ?
marshall(internalMetadata).length + 4:
0;
return estimateSizeOverhead(totalSize + metadataSize + internalMetadataSize);
}
@Override
public long updateMaxIdle(long address, long currentTimeMillis) {
// 16 bytes for eviction if needed (optional)
// 8 bytes for linked pointer
long offset = evictionEnabled ? 24 : 8;
byte metadataType = MEMORY.getByte(address, offset);
if ((metadataType & (IMMORTAL + MORTAL)) != 0) {
return 0;
}
// skips over metadataType, hashCode
offset += 5;
int keySize = MEMORY.getInt(address, offset);
offset += 4;
boolean hasVersion = (metadataType & HAS_VERSION) != 0;
boolean hasInternalMetadata = requiresInternalMetadataSize(metadataType);
if ((metadataType & TRANSIENT) != 0) {
// Skip the metadataSize (if version present), valueSize and the keyBytes
offset += (hasVersion ? 4 : 0) + (hasInternalMetadata ? 4 : 0 ) + 4 + keySize;
// Skip the max idle value
storeLongLittleEndian(address, offset + 8, currentTimeMillis);
return 0;
}
if ((metadataType & TRANSIENT_MORTAL) != 0) {
// Skip the metadataSize (if version present), valueSize and the keyBytes
offset += (hasVersion ? 4 : 0) + (hasInternalMetadata ? 4 : 0 ) + 4 + keySize;
// Skip the lifespan/max idle values and created
storeLongLittleEndian(address, offset + 24, currentTimeMillis);
return 0;
}
// If we got here it means it is custom type, so we have to read the metadata and update it
byte[] metadataBytes = new byte[MEMORY.getInt(address, offset)];
int metadataSize = metadataBytes.length;
offset += 4;
int valueSize = MEMORY.getInt(address, offset);
offset += 4;
int internalMetadataSize;
if (hasInternalMetadata) {
internalMetadataSize = MEMORY.getInt(address, offset);
offset += 4;
} else {
internalMetadataSize = 0;
}
// skips over the actual key bytes
offset += keySize;
MEMORY.getBytes(address, offset, metadataBytes, 0, metadataSize);
Metadata metadata = unmarshall(metadataBytes);
Metadata newMetadata = metadata.builder()
.maxIdle(currentTimeMillis, TimeUnit.MILLISECONDS)
.build();
byte[] newMetadataBytes = marshall(newMetadata, metadataSize);
int newMetadataSize = newMetadataBytes.length;
if (newMetadataSize != metadataSize) {
// The new marshalled size is different then before, we have to rewrite the object!
// Offset is still set to the end of the key bytes (before metadata)
long newPointer = MEMORY.allocate(newMetadataSize + offset + valueSize + internalMetadataSize);
// This writes the next pointer, eviction pointers (if applicable),
// type, hashCode, keyLength, metadataLength, valueLength and key bytes.
MEMORY.copy(address, 0, newPointer, 0, offset);
// This copies the new metadata bytes to the new metadata location
MEMORY.putBytes(newMetadataBytes, 0, newPointer, offset, newMetadataSize);
// This copies the value bytes from the old to the new location
MEMORY.copy(address, offset + metadataSize, newPointer, offset + newMetadataSize, valueSize);
if (internalMetadataSize > 0) {
// This copies the internal metadata bytes from the old to the new location
MEMORY.copy(address, offset + metadataSize + valueSize, newPointer, offset + newMetadataSize + valueSize, internalMetadataSize);
}
return newPointer;
}
// Replace the metadata bytes with the new ones in place
MEMORY.putBytes(metadataBytes, 0, address, offset, metadataSize);
return 0;
}
private void storeLongLittleEndian(long destAddres, long offset, long value) {
MEMORY.putByte(destAddres, offset, (byte) (value >> 56));
MEMORY.putByte(destAddres, offset + 1, (byte) (value >> 48));
MEMORY.putByte(destAddres, offset + 2, (byte) (value >> 40));
MEMORY.putByte(destAddres, offset + 3, (byte) (value >> 32));
MEMORY.putByte(destAddres, offset + 4, (byte) (value >> 24));
MEMORY.putByte(destAddres, offset + 5, (byte) (value >> 16));
MEMORY.putByte(destAddres, offset + 6, (byte) (value >> 8));
MEMORY.putByte(destAddres, offset + 7, (byte) value);
}
private <T> byte[] marshall(T obj) {
try {
return marshaller.objectToByteBuffer(obj);
} catch (IOException e) {
throw new CacheException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException(e);
}
}
private <T> byte[] marshall(T obj, int estimatedSize) {
try {
return marshaller.objectToByteBuffer(obj, estimatedSize);
} catch (IOException e) {
throw new CacheException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException(e);
}
}
private <T> T unmarshall(byte[] bytes) {
try {
//noinspection unchecked
return (T) marshaller.objectFromByteBuffer(bytes);
} catch (IOException | ClassNotFoundException e) {
throw new CacheException(e);
}
}
private static boolean shouldWriteInternalMetadata(PrivateMetadata metadata) {
return metadata != null && !metadata.isEmpty();
}
}
| 27,465
| 36.470668
| 140
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/SegmentedBoundedOffHeapDataContainer.java
|
package org.infinispan.container.offheap;
import java.lang.invoke.MethodHandles;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.StampedLock;
import java.util.function.Supplier;
import org.infinispan.commons.marshall.WrappedBytes;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.AbstractDelegatingInternalDataContainer;
import org.infinispan.container.impl.AbstractInternalDataContainer;
import org.infinispan.container.impl.DefaultSegmentedDataContainer;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.impl.PeekableTouchableMap;
import org.infinispan.eviction.EvictionManager;
import org.infinispan.eviction.EvictionType;
import org.infinispan.eviction.impl.PassivationManager;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.util.concurrent.DataOperationOrderer;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @author wburns
* @since 9.4
*/
@Scope(Scopes.NAMED_CACHE)
public class SegmentedBoundedOffHeapDataContainer extends AbstractDelegatingInternalDataContainer<WrappedBytes, WrappedBytes> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final OffHeapListener offHeapListener;
@Inject ComponentRegistry componentRegistry;
@Inject protected OffHeapMemoryAllocator allocator;
@Inject protected OffHeapEntryFactory offHeapEntryFactory;
@Inject protected EvictionManager evictionManager;
@Inject protected ComponentRef<PassivationManager> passivator;
@Inject protected DataOperationOrderer orderer;
protected final long maxSize;
protected final Lock lruLock;
protected final boolean useCount;
protected final int numSegments;
// Must be updated inside lruLock#writeLock - but can be read outside of lock
protected volatile long currentSize;
protected long firstAddress;
protected long lastAddress;
protected DefaultSegmentedDataContainer dataContainer;
public SegmentedBoundedOffHeapDataContainer(int numSegments, long maxSize, EvictionType type) {
this.numSegments = numSegments;
offHeapListener = new OffHeapListener();
this.maxSize = maxSize;
this.useCount = type == EvictionType.COUNT;
OffHeapMapSupplier offHeapMapSupplier = new OffHeapMapSupplier();
this.lruLock = new ReentrantLock();
firstAddress = 0;
dataContainer = new DefaultSegmentedDataContainer<>(offHeapMapSupplier, numSegments);
}
@Start
public void start() {
dataContainer.start();
}
@Stop(priority = 999)
public void stop() {
dataContainer.stop();
}
@Override
protected InternalDataContainer<WrappedBytes, WrappedBytes> delegate() {
return dataContainer;
}
@Override
public void put(WrappedBytes key, WrappedBytes value, Metadata metadata) {
super.put(key, value, metadata);
// The following is called outside of the write lock specifically - since we may not have to evict and even
// if we did it would quite possibly need a different lock
ensureSize();
}
@Override
public void put(int segment, WrappedBytes key, WrappedBytes value, Metadata metadata,
PrivateMetadata internalMetadata, long createdTimestamp,
long lastUseTimestamp) {
super.put(segment, key, value, metadata, internalMetadata, createdTimestamp, lastUseTimestamp);
// The following is called outside of the write lock specifically - since we may not have to evict and even
// if we did it would quite possibly need a different lock
ensureSize();
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> compute(WrappedBytes key,
ComputeAction<WrappedBytes, WrappedBytes> action) {
InternalCacheEntry<WrappedBytes, WrappedBytes> result = super.compute(key, action);
if (result != null) {
// Means we had a put or replace called so we have to confirm sizes
// The following is called outside of the write lock specifically - since we may not have to evict and even
// if we did it would quite possibly need a different lock
ensureSize();
}
return result;
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> compute(int segment, WrappedBytes key, ComputeAction<WrappedBytes, WrappedBytes> action) {
InternalCacheEntry<WrappedBytes, WrappedBytes> result = super.compute(segment, key, action);
if (result != null) {
// Means we had a put or replace called so we have to confirm sizes
// The following is called outside of the write lock specifically - since we may not have to evict and even
// if we did it would quite possibly need a different lock
ensureSize();
}
return result;
}
protected OffHeapConcurrentMap getMapThatContainsKey(byte[] key) {
int segment = dataContainer.getSegmentForKey(key);
// This can become null if we have a concurrent removal of segments
return (OffHeapConcurrentMap) dataContainer.getMapForSegment(segment);
}
/**
* This method repeatedly removes the head of the LRU list until there the current size is less than or equal to
* `maxSize`.
* <p>
* We need to hold the LRU lock in order to check the current size and to read the head entry,
* and then we need to hold the head entry's write lock in order to remove it.
* The problem is that the correct acquisition order is entry write lock first, LRU lock second,
* and we need to hold the LRU lock so that we know which entry write lock to acquire.
* <p>
* To work around it, we first try to acquire the entry write lock without blocking.
* If that fails, we release the LRU lock and we acquire the locks in the correct order, hoping that
* the LRU head doesn't change while we wait. Because the entry write locks are striped, we actually
* tolerate a LRU head change as long as the new head entry is in the same lock stripe.
* If the LRU list head changes, we release both locks and try again.
*/
private void ensureSize() {
// Try reading outside of lock first to allow for less locking for insert that doesn't require eviction
if (currentSize <= maxSize) {
return;
}
while (true) {
long addressToRemove;
StampedLock stampedLock;
long writeStamp;
OffHeapConcurrentMap map;
lruLock.lock();
try {
if (currentSize <= maxSize) {
break;
}
// We shouldn't be able to get into this state
assert firstAddress > 0;
// We read the key before hashCode due to how off-heap bytes are written (key requires reading metadata
// which comes before hashCode, which should keep hashCode bytes in memory register in most cases)
byte[] key = offHeapEntryFactory.getKey(firstAddress);
map = getMapThatContainsKey(key);
if (map != null) {
int hashCode = offHeapEntryFactory.getHashCode(firstAddress);
// This is always non null
stampedLock = map.getStampedLock(hashCode);
if ((writeStamp = stampedLock.tryWriteLock()) != 0) {
addressToRemove = firstAddress;
} else {
addressToRemove = 0;
}
} else {
// We have to loop back around - more than likely the concurrent removal of a segment probably reduced
// the max size so we probably don't have to worry
continue;
}
} finally {
lruLock.unlock();
}
// If we got here it means we were unable to acquire the write lock, so we have to attempt a blocking
// write lock and then acquire the lruLock, since they have to be acquired in that order (exception using
// try lock as above)
if (addressToRemove == 0) {
writeStamp = stampedLock.writeLock();
try {
lruLock.lock();
try {
if (currentSize <= maxSize) {
break;
}
// Now that we have locks we have to verify the first address is protected by the same lock still
byte[] key = offHeapEntryFactory.getKey(firstAddress);
OffHeapConcurrentMap protectedMap = getMapThatContainsKey(key);
if (protectedMap == map) {
int hashCode = offHeapEntryFactory.getHashCode(firstAddress);
StampedLock innerLock = map.getStampedLock(hashCode);
if (innerLock == stampedLock) {
addressToRemove = firstAddress;
}
}
} finally {
lruLock.unlock();
}
} finally {
if (addressToRemove == 0) {
stampedLock.unlockWrite(writeStamp);
}
}
}
if (addressToRemove != 0) {
if (log.isTraceEnabled()) {
log.tracef("Removing entry: 0x%016x due to eviction due to size %d being larger than maximum of %d",
addressToRemove, currentSize, maxSize);
}
try {
InternalCacheEntry<WrappedBytes, WrappedBytes> ice = offHeapEntryFactory.fromMemory(addressToRemove);
map.remove(ice.getKey(), addressToRemove);
// Note this is non blocking now - this MUST be invoked after removing the entry from the
// underlying map
AbstractInternalDataContainer.handleEviction(ice, orderer, passivator.running(), evictionManager, this, null);
} finally {
stampedLock.unlockWrite(writeStamp);
}
}
}
}
private class OffHeapMapSupplier implements Supplier<PeekableTouchableMap<WrappedBytes,
WrappedBytes>> {
@Override
public PeekableTouchableMap<WrappedBytes, WrappedBytes> get() {
return new OffHeapConcurrentMap(allocator, offHeapEntryFactory, offHeapListener);
}
}
private class OffHeapListener implements OffHeapConcurrentMap.EntryListener {
@Override
public boolean resize(int pointerCount) {
if (useCount) {
return true;
}
lruLock.lock();
try {
boolean isNegative = pointerCount < 0;
long memoryUsed = ((long) Math.abs(pointerCount)) << 3;
long change = UnpooledOffHeapMemoryAllocator.estimateSizeOverhead(memoryUsed);
// We only attempt to deny resizes that are an increase in pointers
if (!isNegative) {
long changeSizeForAllSegments = change * numSegments;
// If the pointers for all segments alone would fill the entire memory cache region, don't let it resize
if (changeSizeForAllSegments < 0 || changeSizeForAllSegments >= maxSize) {
return false;
}
}
if (isNegative) {
currentSize -= change;
} else {
currentSize += change;
}
} finally {
lruLock.unlock();
}
return true;
}
@Override
public void entryCreated(long newAddress) {
long newSize = getSize(newAddress);
lruLock.lock();
try {
currentSize += newSize;
addEntryAddressToEnd(newAddress);
} finally {
lruLock.unlock();
}
}
@Override
public void entryRemoved(long removedAddress) {
long removedSize = getSize(removedAddress);
lruLock.lock();
try {
// Current size has to be updated in the lock
currentSize -= removedSize;
removeNode(removedAddress);
} finally {
lruLock.unlock();
}
}
@Override
public void entryReplaced(long newAddress, long oldAddress) {
long oldSize = getSize(oldAddress);
long newSize = getSize(newAddress);
lruLock.lock();
try {
removeNode(oldAddress);
addEntryAddressToEnd(newAddress);
currentSize += newSize;
currentSize -= oldSize;
} finally {
lruLock.unlock();
}
}
@Override
public void entryRetrieved(long entryAddress) {
lruLock.lock();
try {
if (log.isTraceEnabled()) {
log.tracef("Moving entry 0x%016x to the end of the LRU list", entryAddress);
}
moveToEnd(entryAddress);
} finally {
lruLock.unlock();
}
}
/**
* Method to be invoked when adding a new entry address to the end of the lru nodes. This occurs for newly created
* entries.
* This method should only be invoked after acquiring the lruLock
*
* @param entryAddress the new entry address pointer *NOT* the lru node
*/
private void addEntryAddressToEnd(long entryAddress) {
if (log.isTraceEnabled()) {
log.tracef("Adding entry 0x%016x to the end of the LRU list", entryAddress);
}
// This means it is the first entry
if (lastAddress == 0) {
firstAddress = entryAddress;
lastAddress = entryAddress;
// Have to make sure the memory is cleared so we don't use unitialized values
OffHeapLruNode.setPrevious(entryAddress, 0);
} else {
// Writes back pointer to the old lastAddress
OffHeapLruNode.setPrevious(entryAddress, lastAddress);
// Write the forward pointer in old lastAddress to point to us
OffHeapLruNode.setNext(lastAddress, entryAddress);
// Finally make us the last address
lastAddress = entryAddress;
}
// Since we are last there is no pointer after us
OffHeapLruNode.setNext(entryAddress, 0);
}
/**
* Removes the address node and updates previous and next lru node pointers properly
* The {@link BoundedOffHeapDataContainer#lruLock} <b>must</b> be held when invoking this
* @param address
*/
private void removeNode(long address) {
boolean middleNode = true;
if (address == lastAddress) {
if (log.isTraceEnabled()) {
log.tracef("Removed entry 0x%016x from the end of the LRU list", address);
}
long previousLRUNode = OffHeapLruNode.getPrevious(address);
if (previousLRUNode != 0) {
OffHeapLruNode.setNext(previousLRUNode, 0);
}
lastAddress = previousLRUNode;
middleNode = false;
}
if (address == firstAddress) {
if (log.isTraceEnabled()) {
log.tracef("Removed entry 0x%016x from the beginning of the LRU list", address);
}
long nextLRUNode = OffHeapLruNode.getNext(address);
if (nextLRUNode != 0) {
OffHeapLruNode.setPrevious(nextLRUNode, 0);
}
firstAddress = nextLRUNode;
middleNode = false;
}
if (middleNode) {
if (log.isTraceEnabled()) {
log.tracef("Removed entry 0x%016x from the middle of the LRU list", address);
}
// We are a middle pointer so both of these have to be non zero
long previousLRUNode = OffHeapLruNode.getPrevious(address);
long nextLRUNode = OffHeapLruNode.getNext(address);
assert previousLRUNode != 0;
assert nextLRUNode != 0;
OffHeapLruNode.setNext(previousLRUNode, nextLRUNode);
OffHeapLruNode.setPrevious(nextLRUNode, previousLRUNode);
}
}
/**
* Method to be invoked when moving an existing lru node to the end. This occurs when the entry is accessed for this
* node.
* This method should only be invoked after acquiring the lruLock.
*
* @param lruNode the node to move to the end
*/
private void moveToEnd(long lruNode) {
if (lruNode != lastAddress) {
long nextLruNode = OffHeapLruNode.getNext(lruNode);
assert nextLruNode != 0;
if (lruNode == firstAddress) {
OffHeapLruNode.setPrevious(nextLruNode, 0);
firstAddress = nextLruNode;
} else {
long prevLruNode = OffHeapLruNode.getPrevious(lruNode);
assert prevLruNode != 0;
OffHeapLruNode.setNext(prevLruNode, nextLruNode);
OffHeapLruNode.setPrevious(nextLruNode, prevLruNode);
}
// Link the previous last node to our new last node
OffHeapLruNode.setNext(lastAddress, lruNode);
// Sets the previous node of our new tail node to the previous tail node
OffHeapLruNode.setPrevious(lruNode, lastAddress);
OffHeapLruNode.setNext(lruNode, 0);
lastAddress = lruNode;
}
}
}
public long getSize(long address) {
if (useCount) {
return 1;
} else {
// Use size of entry plus 16 for our LRU pointers
return offHeapEntryFactory.getSize(address, true);
}
}
@Override
public long capacity() {
return maxSize;
}
@Override
public long evictionSize() {
return currentSize;
}
}
| 18,197
| 38.304536
| 147
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/OffHeapEntryFactory.java
|
package org.infinispan.container.offheap;
import org.infinispan.commons.marshall.WrappedBytes;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.KeyValueMetadataSizeCalculator;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Factory that can create {@link InternalCacheEntry} objects that use off-heap heap memory. These are stored by
* a long to symbolize the memory address.
* @author wburns
* @since 9.0
*/
public interface OffHeapEntryFactory extends KeyValueMetadataSizeCalculator<WrappedBytes, WrappedBytes> {
/**
* Creates an off-heap entry using the provided key value and metadata
* @param key the key to use
* @param hashCode the hashCode of the key
* @param ice the internal entry to use
* @return the address of where the entry was created
*/
long create(WrappedBytes key, int hashCode, InternalCacheEntry<WrappedBytes, WrappedBytes> ice);
/**
* Returns how many bytes in memory this address location uses assuming it is an {@link InternalCacheEntry}.
*
* @param address the address of the entry
* @param includeAllocationOverhead if true, align to 8 bytes and add 16 bytes allocation overhead
* @return how many bytes this address was estimated to be
*/
long getSize(long address, boolean includeAllocationOverhead);
/**
* Returns the address to the next linked pointer if there is one for this bucket or 0 if there isn't one
* @param address the address of the entry
* @return the next address entry for this bucket or 0
*/
long getNext(long address);
/**
* Called to update the next pointer index when a collision occurs requiring a linked list within the entries
* themselves
* @param address the address of the entry to update
* @param value the value of the linked node to set
*/
void setNext(long address, long value);
/**
* Returns the hashCode of the address. This
* @param address the address of the entry
* @return the has code of the entry
*/
int getHashCode(long address);
/**
* Returns the key of the address.
* @param address the address of the entry
* @return the bytes for the key
*/
byte[] getKey(long address);
/**
* Create an entry from the off-heap pointer
* @param address the address of the entry to read
* @return the entry created on heap from off-heap
*/
InternalCacheEntry<WrappedBytes, WrappedBytes> fromMemory(long address);
/**
* Returns whether the given key as bytes is the same key as the key stored in the entry for the given address.
* @param address the address of the entry's key to check
* @param wrappedBytes the key to check equality with
* @return whether or not the keys are equal
*/
default boolean equalsKey(long address, WrappedBytes wrappedBytes) {
return equalsKey(address, wrappedBytes, wrappedBytes.hashCode());
}
/**
* Returns whether the given key as bytes is the same key as the key stored in the entry for the given address.
* @param address the address of the entry's key to check
* @param wrappedBytes the key to check equality with
* @param hashCode the hashCode of the key
* @return whether or not the keys are equal
*/
boolean equalsKey(long address, WrappedBytes wrappedBytes, int hashCode);
/**
* Returns whether entry is expired or not.
* @param address the address of the entry's key to check
* @return {@code true} if the entry is expired, {@code false} otherwise
*/
boolean isExpired(long address);
/**
* Method used to calculate how much memory in size the key, value and metadata use.
* @param key The key for this entry to be used in size calculation
* @param value The value for this entry to be used in size calculation
* @param metadata The metadata for this entry to be used in size calculation
* @return The size approximately in memory the key, value and metadata use.
*/
default long calculateSize(WrappedBytes key, WrappedBytes value, Metadata metadata) {
return calculateSize(key, value, metadata, null);
}
/**
* Method used to calculate how much memory in size the key, value, metadata and internal metadata use.
*
* @param key The key for this entry to be used in size calculation
* @param value The value for this entry to be used in size calculation
* @param metadata The metadata for this entry to be used in size calculation
* @param internalMetadata The internal metadata for this entry to be used in size calculation
* @return The size approximately in memory the key, value and metadata use.
*/
long calculateSize(WrappedBytes key, WrappedBytes value, Metadata metadata, PrivateMetadata internalMetadata);
/**
* Update max idle time for an entry. This method will try to do an in place update of the access time, however if
* the new resulting value cannot fit it will allocate a new block of memory. The caller should free the old
* address in this case.
* @param address the address of the entry's to update
* @param accessTime the timestamp to set for max idle access time (must be in milliseconds)
* @return address of the new entry to use or 0 if the same one can be reused
*/
long updateMaxIdle(long address, long accessTime);
}
| 5,457
| 41.310078
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/UnsafeHolder.java
|
package org.infinispan.container.offheap;
import java.lang.reflect.Field;
import org.infinispan.commons.CacheException;
import sun.misc.Unsafe;
/**
* @author wburns
* @since 9.0
*/
class UnsafeHolder {
static Unsafe UNSAFE = UnsafeHolder.getUnsafe();
@SuppressWarnings("restriction")
private static Unsafe getUnsafe() {
// attempt to access field Unsafe#theUnsafe
try {
final Field unsafeField = Unsafe.class.getDeclaredField("theUnsafe");
unsafeField.setAccessible(true);
// the unsafe instance
return (Unsafe) unsafeField.get(null);
} catch (NoSuchFieldException | SecurityException | IllegalAccessException e) {
throw new CacheException(e);
}
}
}
| 739
| 24.517241
| 85
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/OffHeapConcurrentMap.java
|
package org.infinispan.container.offheap;
import java.lang.invoke.MethodHandles;
import java.util.AbstractCollection;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.ArrayDeque;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.PrimitiveIterator;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.StampedLock;
import java.util.function.BiFunction;
import java.util.function.LongConsumer;
import java.util.stream.LongStream;
import org.infinispan.commons.marshall.WrappedBytes;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.commons.util.ProcessorInfo;
import org.infinispan.commons.util.Util;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.PeekableTouchableMap;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import net.jcip.annotations.GuardedBy;
/**
* A {@link ConcurrentMap} implementation that stores the keys and values off the JVM heap in native heap. This map
* does not permit null for key or values.
* <p>
* The key and value are limited to objects that implement the {@link WrappedBytes} interface. Currently this map only allows
* for implementations that always return a backing array via the {@link WrappedBytes#getBytes()} method.
* <p>
* For reference here is a list of commonly used terms:
* <ul>
* <li><code>bucket</code>: Can store multiple entries (normally via a forward only list)
* <li><code>memory lookup</code>: Stores an array of buckets - used primarily to lookup the location a key would be
* <li><code>lock region</code>: The number of lock regions is fixed, and each region has {@code bucket count / lock count} buckets.
* </ul>
* <p>
* This implementation provides constant-time performance for the basic
* operations ({@code get}, {@code put}, {@code remove} and {@code compute}), assuming the hash function
* disperses the elements properly among the buckets. Iteration over
* collection views requires time proportional to the number of buckets plus its size (the number
* of key-value mappings). This map always assumes a load factor of .75 that is not changeable.
* <p>
* A map must be started after creating to create the initial memory lookup, which is also store in the native heap.
* When the size of the map reaches the load factor, that is .75 times the capacity, the map will attempt to resize
* by increasing its internal memory lookup to have an array of buckets twice as big. Normal operations can still
* proceed during this, allowing for minimal downtime during a resize.
* <p>
* This map is created assuming some knowledge of expiration in the Infinispan system. Thus operations that do not
* expose this information via its APIs are not supported. These methods are {@code keySet}, {@code containsKey} and
* {@code containsValue}.
* <p>
* This map guarantees consistency under concurrent read ands writes through a {@link StripedLock} where each
* {@link java.util.concurrent.locks.ReadWriteLock} instance protects an equivalent region of buckets in the underlying
* memory lookup. Read operations, that is ones that only acquire the read lock for their specific lock region, are
* ({@code get} and {@code peek}). Iteration on a returned entrySet or value collection will acquire only a single
* read lock at a time while inspecting a given lock region for a valid value. Write operations, ones that acquire the
* write lock for the lock region, are ({@code put}, {@code remove}, {@code replace}, {@code compute}. A clear
* will acquire all write locks when invoked. This allows the clear to also resize the map down to the initial size.
* <p>
* When this map is constructed it is also possible to provide an {@link EntryListener} that is invoked when various
* operations are performed in the map. Note that the various modification callbacks <b>MUST</b> free the old address,
* or else a memory leak will occur. Please see the various methods for clarification on these methods.
* <p>
* Since this map is based on holding references to memory that lives outside of the scope of the JVM garbage collector
* users need to ensure they properly invoke the {@link #close()} when the map is no longer in use to properly free
* all allocated native memory.
* @author wburns
* @since 9.4
*/
public class OffHeapConcurrentMap implements ConcurrentMap<WrappedBytes, InternalCacheEntry<WrappedBytes, WrappedBytes>>,
PeekableTouchableMap<WrappedBytes, WrappedBytes>, AutoCloseable {
/** Some implementation details
* <p>
* All methods that must hold a lock when invoked are annotated with a {@link GuardedBy} annotation. They can have a
* few different designations, which are described in this table.
*
* locks#readLock: The appropriate read or write lock for the given key must be held when invoking this method.
* locks#writeLock: The appropriate write lock for the given key must be held when invoking this method.
* locks#lockAll: All write locks must be held before invoking this method.
* locks: Any read or write lock must be held while reading these - however writes must acquire all write locks.
*/
/* ---------------- Constants -------------- */
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
// We always have to have more buckets than locks
public final static int INITIAL_SIZE = 256;
private final static int LOCK_COUNT = Math.min(Util.findNextHighestPowerOfTwo(ProcessorInfo.availableProcessors()) << 1,
INITIAL_SIZE);
// This is the largest power of 2 positive integer value
private final static int MAX_ADDRESS_COUNT = 1 << 31;
// Since lockCount is always a power of 2 - We can just shift by this many bits which is the same as dividing by
// the number of locks
private final static int LOCK_SHIFT = 31 - Integer.numberOfTrailingZeros(LOCK_COUNT);
// The number of bits required to shift to the right to get the bucket size from a given pointer address
private final static int LOCK_REGION_SHIFT = Integer.numberOfTrailingZeros(LOCK_COUNT);
private final AtomicLong size = new AtomicLong();
private final StripedLock locks;
private final OffHeapMemoryAllocator allocator;
private final OffHeapEntryFactory offHeapEntryFactory;
private final EntryListener listener;
// Once this threshold size is met, the underlying buckets will be re-sized if possible
// This variable can be read outside of locks - thus is volatile, however should only be modified while holding
// all write locks
@GuardedBy("locks#lockAll")
private volatile int sizeThreshold;
// Non null during a resize operation - this will be initialized to contain all of the numbers equal to how many
// locks we have - This and oldMemoryLookup should always be either both null or not null at the same time.
@GuardedBy("locks")
private IntSet pendingBlocks;
// Always non null, unless map has been stopped
@GuardedBy("locks")
private MemoryAddressHash memoryLookup;
@GuardedBy("locks")
private int memoryShift;
// Non null during a resize operation - this will contain the previous old lookup and may or may not contain valid
// elements depending upon if a lock region is still pending transfer - This and pendingBlocks should always be
// either both null or not null at the same time.
@GuardedBy("locks")
private MemoryAddressHash oldMemoryLookup;
@GuardedBy("locks")
private int oldMemoryShift;
public OffHeapConcurrentMap(OffHeapMemoryAllocator allocator,
OffHeapEntryFactory offHeapEntryFactory, EntryListener listener) {
this.allocator = Objects.requireNonNull(allocator);
this.offHeapEntryFactory = Objects.requireNonNull(offHeapEntryFactory);
this.listener = listener;
locks = new StripedLock(LOCK_COUNT);
locks.lockAll();
try {
if (!sizeMemoryBuckets(INITIAL_SIZE)) {
throw new IllegalArgumentException("Unable to initialize off-heap addresses as memory eviction is too low!");
}
} finally {
locks.unlockAll();
}
}
@Override
public boolean touchKey(Object k, long currentTimeMillis) {
if (!(k instanceof WrappedBytes)) {
return false;
}
int hashCode = k.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
// We need the write lock as we may have to replace the value entirely
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
MemoryAddressHash memoryLookup;
if (pendingBlocks != null && pendingBlocks.contains(lockOffset)) {
memoryLookup = this.oldMemoryLookup;
} else {
memoryLookup = this.memoryLookup;
}
return lockedTouch(memoryLookup, (WrappedBytes) k, hashCode, currentTimeMillis);
} finally {
stampedLock.unlockWrite(writeStamp);
}
}
@Override
public void touchAll(long currentTimeMillis) {
// TODO: eventually optimize this to not create object instances and just touch memory directly
// but requires additional rewrite as we need to ensure this is done with a write lock
Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iterator = entryIterator();
while (iterator.hasNext()) {
InternalCacheEntry<WrappedBytes, WrappedBytes> ice = iterator.next();
touchKey(ice.getKey(), currentTimeMillis);
}
}
@GuardedBy("locks#writeLock")
private boolean lockedTouch(MemoryAddressHash memoryLookup, WrappedBytes k, int hashCode, long currentTimeMillis) {
int memoryOffset = getMemoryOffset(memoryLookup, hashCode);
long bucketAddress = memoryLookup.getMemoryAddressOffset(memoryOffset);
if (bucketAddress == 0) {
return false;
}
long actualAddress = performGet(bucketAddress, k, hashCode);
if (actualAddress != 0) {
long newAddress = offHeapEntryFactory.updateMaxIdle(actualAddress, currentTimeMillis);
if (newAddress != 0) {
// Replaces the old value with the newly created one
performPut(bucketAddress, actualAddress, newAddress, k, memoryOffset, false, false);
} else {
entryRetrieved(actualAddress);
}
return true;
}
return false;
}
/**
* Listener interface that is notified when certain operations occur for various memory addresses. Note that when
* this listener is used certain operations are not performed and require the listener to do these instead. Please
* note each method documentation to tell what those are.
*/
public interface EntryListener {
/**
* Invoked when a resize event occurs. This will be invoked up to two times: once for the new container with
* a positive count and a possibly a second time for the now old container with a negative count. Note that
* the pointers are in a single contiguous block. It is possible to prevent the resize by returning false
* from the invocation.
* @param pointerCount the change in pointers
* @return whether the resize should continue
*/
boolean resize(int pointerCount);
/**
* Invoked when an entry is about to be created. The new address is fully addressable,
* The write lock will already be acquired for the given segment the key mapped to.
* @param newAddress the address just created that will be the new entry
*/
void entryCreated(long newAddress);
/**
* Invoked when an entry is about to be removed. You can read values from this but after this method is completed
* this memory address may be freed. The write lock will already be acquired for the given segment the key mapped to.
* @param removedAddress the address about to be removed
*/
void entryRemoved(long removedAddress);
/**
* Invoked when an entry is about to be replaced with a new one. The old and new address are both addressable,
* however oldAddress may be freed after this method returns. The write lock will already be acquired for the given
* segment the key mapped to.
* @param newAddress the address just created that will be the new entry
* @param oldAddress the old address for this entry that will be soon removed
*/
void entryReplaced(long newAddress, long oldAddress);
/**
* Invoked when an entry is successfully retrieved. The read lock will already
* be acquired for the given segment the key mapped to.
* @param entryAddress the address of the entry retrieved
*/
void entryRetrieved(long entryAddress);
}
@GuardedBy("locks#writeLock")
private void entryCreated(long newAddress) {
if (listener != null) {
listener.entryCreated(newAddress);
}
}
@GuardedBy("locks#writeLock")
private void entryRemoved(long removedAddress) {
if (listener != null) {
listener.entryRemoved(removedAddress);
}
allocator.deallocate(removedAddress, offHeapEntryFactory.getSize(removedAddress, false));
}
@GuardedBy("locks#writeLock")
private void entryReplaced(long newAddress, long oldAddress) {
if (listener != null) {
listener.entryReplaced(newAddress, oldAddress);
}
allocator.deallocate(oldAddress, offHeapEntryFactory.getSize(oldAddress, false));
}
@GuardedBy("locks#readLock")
private void entryRetrieved(long entryAddress) {
if (listener != null) {
listener.entryRetrieved(entryAddress);
}
}
private static int spread(int h) {
// Spread using fibonacci hash (using golden ratio)
// This number is ((2^31 -1) / 1.61803398875) - then rounded to nearest odd number
// We want something that will prevent hashCodes that are near each other being in the same bucket but still fast
// We then force the number to be positive by throwing out the first bit
return (h * 1327217885) & Integer.MAX_VALUE;
}
/**
* Returns the bucket offset calculated from the provided hashCode for the current memory lookup.
* @param hashCode hashCode of the key to find the bucket offset for
* @return offset to use in the memory lookup
*/
@GuardedBy("locks#readLock")
private int getMemoryOffset(int hashCode) {
return getOffset(hashCode, memoryShift);
}
private int getOffset(int hashCode, int shift) {
return spread(hashCode) >>> shift;
}
/**
* Returns the bucket offset calculated from the provided hashCode for the provided memory lookup.
* @param hashCode hashCode of the key to find the bucket offset for
* @return offset to use in the memory lookup
*/
@GuardedBy("locks#readLock")
private int getMemoryOffset(MemoryAddressHash memoryLookup, int hashCode) {
return getOffset(hashCode, memoryLookup == this.memoryLookup ? memoryShift : oldMemoryShift);
}
StampedLock getStampedLock(int hashCode) {
return locks.getLockWithOffset(getLockOffset(hashCode));
}
private int getLockOffset(int hashCode) {
return getOffset(hashCode, LOCK_SHIFT);
}
/**
* Returns how large a region of buckets (that is how many buckets a single lock protects). The returned number
* will always be less than or equal to the provided <b>bucketTotal</b>.
* @param bucketTotal number of buckets
* @return how many buckets map to a lock region
*/
private int getBucketRegionSize(int bucketTotal) {
return bucketTotal >>> LOCK_REGION_SHIFT;
}
private void checkDeallocation() {
if (memoryLookup == null) {
throw new IllegalStateException("Map was already shut down!");
}
}
/**
* Expands the memory buckets if possible, returning if it was successful.
* If it was unable to expand the bucket array, it will set the sizeThreshold to MAX_VALUE to prevent future
* attempts to resize the container
* @param bucketCount the expected new size
* @return true if the bucket was able to be resized
*/
@GuardedBy("locks#lockAll")
private boolean sizeMemoryBuckets(int bucketCount) {
if (listener != null) {
if (!listener.resize(bucketCount)) {
sizeThreshold = Integer.MAX_VALUE;
return false;
}
}
sizeThreshold = computeThreshold(bucketCount);
oldMemoryLookup = memoryLookup;
oldMemoryShift = memoryShift;
memoryLookup = new MemoryAddressHash(bucketCount, allocator);
// Max capacity is 2^31 (thus find the bit position that would be like dividing evenly into that)
memoryShift = 31 - Integer.numberOfTrailingZeros(bucketCount);
return true;
}
/**
* Computes the threshold for when a resize should occur. The returned value will be 75% of provided number, assuming
* it is a power of two (provides a .75 load factor)
* @param bucketCount the current bucket size
* @return the resize threshold to use
*/
static int computeThreshold(int bucketCount) {
return bucketCount - (bucketCount >> 2);
}
StripedLock getLocks() {
return locks;
}
/**
* This method checks if the map must be resized and if so starts the operation. This caller <b>MUST NOT</b>
* hold any locks when invoked.
*/
private void checkResize() {
// We don't do a resize if we aren't to the boundary or if we are in a pending resize
if (size.get() < sizeThreshold || oldMemoryLookup != null) {
return;
}
boolean onlyHelp = false;
IntSet localPendingBlocks;
locks.lockAll();
try {
// Don't replace blocks if it was already done - means we had concurrent requests
if (oldMemoryLookup != null) {
onlyHelp = true;
localPendingBlocks = this.pendingBlocks;
} else {
int newBucketCount = memoryLookup.getPointerCount() << 1;
if (newBucketCount == MAX_ADDRESS_COUNT) {
sizeThreshold = Integer.MAX_VALUE;
}
// We couldn't resize
if (!sizeMemoryBuckets(newBucketCount)) {
return;
}
localPendingBlocks = IntSets.concurrentSet(LOCK_COUNT);
for (int i = 0; i < LOCK_COUNT; ++i) {
localPendingBlocks.set(i);
}
this.pendingBlocks = localPendingBlocks;
}
} finally {
locks.unlockAll();
}
// Try to complete without waiting if possible for locks
helpCompleteTransfer(localPendingBlocks, true);
if (!onlyHelp) {
if (!localPendingBlocks.isEmpty()) {
// We attempted to transfer without waiting on locks - but we didn't finish them all yet - so now we have
// to wait to ensure they are all transferred
helpCompleteTransfer(localPendingBlocks, false);
// Now everything should be empty for sure
assert localPendingBlocks.isEmpty();
}
// Now that all blocks have been transferred we can replace references
locks.lockAll();
try {
// This means that someone else completed the transfer for us - only clear can do that currently
if (this.pendingBlocks == null) {
return;
}
transferComplete();
} finally {
locks.unlockAll();
}
}
}
/**
* Invoked when a transfer has completed to clean up the old memory lookup
*/
@GuardedBy("locks#lockAll")
private void transferComplete() {
MemoryAddressHash oldMemoryLookup = this.oldMemoryLookup;
this.pendingBlocks = null;
if (listener != null) {
boolean resized = listener.resize(-oldMemoryLookup.getPointerCount());
assert resized : "Resize of negative pointers should always work!";
}
this.oldMemoryLookup = null;
oldMemoryLookup.deallocate();
}
/**
* This <b>MUST NOT</b> be invoked while holding any lock
* @param tryLock whether the lock acquisition only does a try, returning earlier with some lock segments not transferred possibly
*/
private void helpCompleteTransfer(IntSet pendingBlocks, boolean tryLock) {
if (pendingBlocks != null) {
PrimitiveIterator.OfInt iterator = pendingBlocks.iterator();
while (iterator.hasNext()) {
int offset = iterator.nextInt();
StampedLock lock = locks.getLockWithOffset(offset);
long stamp;
if (tryLock) {
// If we can't get it - just assume another person is working on it - so try next one
if ((stamp = lock.tryWriteLock()) == 0) {
continue;
}
} else {
stamp = lock.writeLock();
}
try {
// Only run it now that we have lock if someone else just didn't finish it
if (pendingBlocks.remove(offset)) {
transfer(offset);
}
} finally {
lock.unlockWrite(stamp);
}
}
}
}
/**
* Ensures that the block that maps to the given lock offset is transferred. This method <b>MUST</b> be invoked by
* any write operation before doing anything. This ensures that the write operation only needs to modify the
* current memory lookup.
* @param lockOffset the lock offset to confirm has been transferred
*/
@GuardedBy("locks#writeLock")
private void ensureTransferred(int lockOffset) {
if (pendingBlocks != null) {
if (pendingBlocks.remove(lockOffset)) {
transfer(lockOffset);
}
}
}
/**
* Transfers all the entries that map to the given lock offset position from the old lookup to the current one.
* @param lockOffset the offset in the lock array - this is the same between all memory lookups
*/
@GuardedBy("locks#writeLock")
private void transfer(int lockOffset) {
int pointerCount = oldMemoryLookup.getPointerCount();
int blockSize = getBucketRegionSize(pointerCount);
LongStream memoryLocations = oldMemoryLookup.removeAll(lockOffset * blockSize, blockSize);
memoryLocations.forEach(address -> {
while (address != 0) {
long nextAddress = offHeapEntryFactory.getNext(address);
offHeapEntryFactory.setNext(address, 0);
int hashCode = offHeapEntryFactory.getHashCode(address);
int memoryOffset = getMemoryOffset(hashCode);
long newBucketAddress = memoryLookup.getMemoryAddressOffset(memoryOffset);
// We should be only inserting a new value - thus we don't worry about key or return value
performPut(newBucketAddress, address, address, null, memoryOffset,false, true);
address = nextAddress;
}
});
}
@Override
public void close() {
locks.lockAll();
try {
actualClear();
memoryLookup.deallocate();
memoryLookup = null;
} finally {
locks.unlockAll();
}
}
@Override
public int size() {
return (int) Math.min(size.get(), Integer.MAX_VALUE);
}
@Override
public boolean isEmpty() {
return size.get() == 0;
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> compute(WrappedBytes key, BiFunction<? super WrappedBytes,
? super InternalCacheEntry<WrappedBytes, WrappedBytes>, ? extends InternalCacheEntry<WrappedBytes, WrappedBytes>> remappingFunction) {
int hashCode = key.hashCode();
int lockOffset = getLockOffset(hashCode);
InternalCacheEntry<WrappedBytes, WrappedBytes> result;
InternalCacheEntry<WrappedBytes, WrappedBytes> prev;
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
ensureTransferred(lockOffset);
int memoryOffset = getMemoryOffset(hashCode);
long bucketAddress = memoryLookup.getMemoryAddressOffset(memoryOffset);
long actualAddress = bucketAddress == 0 ? 0 : performGet(bucketAddress, key, hashCode);
if (actualAddress != 0) {
prev = offHeapEntryFactory.fromMemory(actualAddress);
} else {
prev = null;
}
result = remappingFunction.apply(key, prev);
if (prev == result) {
// noop
} else if (result != null) {
long newAddress = offHeapEntryFactory.create(key, hashCode, result);
// TODO: Technically actualAddress could be a 0 and bucketAddress != 0, which means we will loop through
// entire bucket for no reason as it will never match (doing key equality checks)
performPut(bucketAddress, actualAddress, newAddress, key, memoryOffset, false, false);
} else {
// result is null here - so we remove the entry
performRemove(bucketAddress, actualAddress, key, null, memoryOffset, false);
}
} finally {
stampedLock.unlockWrite(writeStamp);
}
if (prev == null && result != null) {
checkResize();
}
return result;
}
@Override
public boolean containsKey(Object key) {
throw new UnsupportedOperationException();
}
@Override
public boolean containsValue(Object value) {
throw new UnsupportedOperationException();
}
private InternalCacheEntry<WrappedBytes, WrappedBytes> peekOrGet(WrappedBytes k, boolean peek) {
int hashCode = k.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long readStamp = stampedLock.readLock();
try {
checkDeallocation();
MemoryAddressHash memoryLookup;
if (pendingBlocks != null && pendingBlocks.contains(lockOffset)) {
memoryLookup = this.oldMemoryLookup;
} else {
memoryLookup = this.memoryLookup;
}
return lockedPeekOrGet(memoryLookup, k, hashCode, peek);
} finally {
stampedLock.unlockRead(readStamp);
}
}
@GuardedBy("locks#readLock")
private InternalCacheEntry<WrappedBytes, WrappedBytes> lockedPeekOrGet(MemoryAddressHash memoryLookup,
WrappedBytes k, int hashCode, boolean peek) {
long bucketAddress = memoryLookup.getMemoryAddressOffset(getMemoryOffset(memoryLookup, hashCode));
if (bucketAddress == 0) {
return null;
}
long actualAddress = performGet(bucketAddress, k, hashCode);
if (actualAddress != 0) {
InternalCacheEntry<WrappedBytes, WrappedBytes> ice = offHeapEntryFactory.fromMemory(actualAddress);
if (!peek) {
entryRetrieved(actualAddress);
}
return ice;
}
return null;
}
/**
* Gets the actual address for the given key in the given bucket or 0 if it isn't present or expired
* @param bucketHeadAddress the starting address of the bucket
* @param k the key to retrieve the address for it if matches
* @return the address matching the key or 0
*/
@GuardedBy("locks#readLock")
private long performGet(long bucketHeadAddress, WrappedBytes k, int hashCode) {
long address = bucketHeadAddress;
while (address != 0) {
long nextAddress = offHeapEntryFactory.getNext(address);
if (offHeapEntryFactory.equalsKey(address, k, hashCode)) {
break;
} else {
address = nextAddress;
}
}
return address;
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> get(Object key) {
if (!(key instanceof WrappedBytes)) {
return null;
}
return peekOrGet((WrappedBytes) key, false);
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> peek(Object key) {
if (!(key instanceof WrappedBytes)) {
return null;
}
return peekOrGet((WrappedBytes) key, true);
}
@Override
public void putNoReturn(WrappedBytes key, InternalCacheEntry<WrappedBytes, WrappedBytes> value) {
int hashCode = key.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
ensureTransferred(lockOffset);
int memoryOffset = getMemoryOffset(hashCode);
long address = memoryLookup.getMemoryAddressOffset(memoryOffset);
long newAddress = offHeapEntryFactory.create(key, hashCode, value);
performPut(address, 0, newAddress, key, memoryOffset, false, false);
} finally {
stampedLock.unlockWrite(writeStamp);
}
checkResize();
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> put(WrappedBytes key,
InternalCacheEntry<WrappedBytes, WrappedBytes> value) {
InternalCacheEntry<WrappedBytes, WrappedBytes> returnedValue;
int hashCode = key.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
ensureTransferred(lockOffset);
int memoryOffset = getMemoryOffset(hashCode);
long address = memoryLookup.getMemoryAddressOffset(memoryOffset);
long newAddress = offHeapEntryFactory.create(key, hashCode, value);
returnedValue = performPut(address, 0, newAddress, key, memoryOffset, true, false);
} finally {
stampedLock.unlockWrite(writeStamp);
}
// If we added a new entry, check the resize
if (returnedValue == null) {
checkResize();
}
return returnedValue;
}
/**
* Performs the actual put operation, adding the new address into the memoryOffset bucket
* and possibly removing the old entry with the same key.
* Always adds the new entry at the end of the bucket's linked list.
* @param bucketHeadAddress the entry address of the first element in the lookup
* @param actualAddress the actual address if it is known or 0. By passing this != 0 equality checks can be bypassed.
* If a value of 0 is provided this will use key equality.
* @param newAddress the address of the new entry
* @param key the key of the entry
* @param requireReturn whether the return value is required
* @return {@code true} if the entry doesn't exists in memory and was newly create, {@code false} otherwise
*/
@GuardedBy("locks#writeLock")
private InternalCacheEntry<WrappedBytes, WrappedBytes> performPut(long bucketHeadAddress, long actualAddress,
long newAddress, WrappedBytes key, int memoryOffset, boolean requireReturn, boolean transfer) {
// Have to start new linked node list
if (bucketHeadAddress == 0) {
memoryLookup.putMemoryAddressOffset(memoryOffset, newAddress);
if (!transfer) {
entryCreated(newAddress);
size.incrementAndGet();
}
return null;
} else {
boolean replaceHead = false;
boolean foundPrevious = false;
// Whether the key was found or not - short circuit equality checks
InternalCacheEntry<WrappedBytes, WrappedBytes> previousValue = null;
long address = bucketHeadAddress;
// Holds the previous linked list address
long prevAddress = 0;
// Keep looping until we get the tail end - we always append the put to the end
while (address != 0) {
long nextAddress = offHeapEntryFactory.getNext(address);
if (!foundPrevious) {
// If the actualAddress was not known check key equality otherwise just compare with the address
if (actualAddress == 0 ? offHeapEntryFactory.equalsKey(address, key) : actualAddress == address) {
assert !transfer : "We should never have a replace with put from a transfer!";
foundPrevious = true;
if (requireReturn) {
previousValue = offHeapEntryFactory.fromMemory(address);
}
entryReplaced(newAddress, address);
// If this is true it means this was the first node in the linked list
if (prevAddress == 0) {
if (nextAddress == 0) {
// This branch is the case where our key is the only one in the linked list
replaceHead = true;
} else {
// This branch is the case where our key is the first with another after
memoryLookup.putMemoryAddressOffset(memoryOffset, nextAddress);
}
} else {
// This branch means our node was not the first, so we have to update the address before ours
// to the one we previously referenced
offHeapEntryFactory.setNext(prevAddress, nextAddress);
// We purposely don't update prevAddress, because we have to keep it as the current pointer
// since we removed ours
address = nextAddress;
continue;
}
}
}
prevAddress = address;
address = nextAddress;
}
// If we didn't find the key previous, it means we are a new entry
if (!foundPrevious && !transfer) {
entryCreated(newAddress);
size.incrementAndGet();
}
if (replaceHead) {
memoryLookup.putMemoryAddressOffset(memoryOffset, newAddress);
} else {
// Now prevAddress should be the last link so we fix our link
offHeapEntryFactory.setNext(prevAddress, newAddress);
}
return previousValue;
}
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> remove(Object key) {
if (!(key instanceof WrappedBytes)) {
return null;
}
int hashCode = key.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
ensureTransferred(lockOffset);
int memoryOffset = getMemoryOffset(hashCode);
long address = memoryLookup.getMemoryAddressOffset(memoryOffset);
if (address == 0) {
return null;
}
return performRemove(address, 0, (WrappedBytes) key, null, memoryOffset,true);
} finally {
stampedLock.unlockWrite(writeStamp);
}
}
/**
* This method is designed to be called by an outside class. The write lock for the given key must
* be acquired via the lock returned from {@link #getStampedLock(int)} using the key's hash code.
* This method will avoid some additional lookups as the memory address is already acquired and not return
* the old entry.
* @param key key to remove
* @param address the address for the key
*/
@GuardedBy("locks#writeLock")
void remove(WrappedBytes key, long address) {
int hashCode = key.hashCode();
ensureTransferred(getLockOffset(hashCode));
int memoryOffset = getMemoryOffset(hashCode);
long bucketAddress = memoryLookup.getMemoryAddressOffset(memoryOffset);
assert bucketAddress != 0;
performRemove(bucketAddress, address, key, null, memoryOffset, false);
}
/**
* Performs the actual remove operation removing the new address from its appropriate bucket.
* @param bucketHeadAddress the starting address of the bucket
* @param actualAddress the actual address if it is known or 0. By passing this != 0 equality checks can be bypassed.
* If a value of 0 is provided this will use key equality. key is not required when this != 0
* @param key the key of the entry
* @param value the value to match if present
* @param memoryOffset the offset in the memory bucket where this key mapped to
* @param requireReturn whether this method is forced to return the entry removed (optimizations can be done if
* the entry is not needed)
*/
@GuardedBy("locks#writeLock")
private InternalCacheEntry<WrappedBytes, WrappedBytes> performRemove(long bucketHeadAddress, long actualAddress,
WrappedBytes key, WrappedBytes value, int memoryOffset, boolean requireReturn) {
long prevAddress = 0;
long address = bucketHeadAddress;
InternalCacheEntry<WrappedBytes, WrappedBytes> ice = null;
while (address != 0) {
long nextAddress = offHeapEntryFactory.getNext(address);
boolean removeThisAddress;
// If the actualAddress was not known, check key equality otherwise just compare with the address
removeThisAddress = actualAddress == 0 ? offHeapEntryFactory.equalsKey(address, key) : actualAddress == address;
if (removeThisAddress) {
if (value != null) {
ice = offHeapEntryFactory.fromMemory(address);
// If value doesn't match and was provided then don't remove it
if (!value.equalsWrappedBytes(ice.getValue())) {
ice = null;
break;
}
}
if (requireReturn && ice == null) {
ice = offHeapEntryFactory.fromMemory(address);
}
entryRemoved(address);
if (prevAddress != 0) {
offHeapEntryFactory.setNext(prevAddress, nextAddress);
} else {
memoryLookup.putMemoryAddressOffset(memoryOffset, nextAddress);
}
size.decrementAndGet();
break;
}
prevAddress = address;
address = nextAddress;
}
return ice;
}
@Override
public void putAll(Map<? extends WrappedBytes, ? extends InternalCacheEntry<WrappedBytes, WrappedBytes>> m) {
for (Entry<? extends WrappedBytes, ? extends InternalCacheEntry<WrappedBytes, WrappedBytes>> entry : m.entrySet()) {
put(entry.getKey(), entry.getValue());
}
}
@Override
public void clear() {
locks.lockAll();
try {
actualClear();
} finally {
locks.unlockAll();
}
}
@GuardedBy("locks#lockAll")
private void actualClear() {
checkDeallocation();
if (log.isTraceEnabled()) {
log.trace("Clearing off-heap data");
}
LongConsumer removeEntries = address -> {
while (address != 0) {
long nextAddress = offHeapEntryFactory.getNext(address);
entryRemoved(address);
address = nextAddress;
}
};
int pointerCount = memoryLookup.getPointerCount();
memoryLookup.removeAll().forEach(removeEntries);
memoryLookup.deallocate();
memoryLookup = null;
if (listener != null) {
boolean resized = listener.resize(-pointerCount);
assert resized : "Resize of negative pointers should always work!";
}
if (oldMemoryLookup != null) {
oldMemoryLookup.removeAll().forEach(removeEntries);
transferComplete();
}
// Initialize to beginning again
sizeMemoryBuckets(INITIAL_SIZE);
size.set(0);
if (log.isTraceEnabled()) {
log.trace("Cleared off-heap data");
}
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> putIfAbsent(WrappedBytes key,
InternalCacheEntry<WrappedBytes, WrappedBytes> value) {
return compute(key, (k, v) -> {
if (v == null) {
return value;
}
return v;
});
}
@Override
public boolean remove(Object key, Object value) {
if (!(key instanceof WrappedBytes) || !(value instanceof InternalCacheEntry)) {
return false;
}
Object innerValue = ((InternalCacheEntry) value).getValue();
if (!(innerValue instanceof WrappedBytes)) {
return false;
}
int hashCode = key.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
ensureTransferred(lockOffset);
int memoryOffset = getMemoryOffset(hashCode);
long address = memoryLookup.getMemoryAddressOffset(memoryOffset);
return address != 0 && performRemove(address, 0, (WrappedBytes) key, (WrappedBytes) innerValue, memoryOffset, true) != null;
} finally {
stampedLock.unlockWrite(writeStamp);
}
}
@Override
public boolean replace(WrappedBytes key, InternalCacheEntry<WrappedBytes, WrappedBytes> oldValue,
InternalCacheEntry<WrappedBytes, WrappedBytes> newValue) {
int hashCode = key.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
ensureTransferred(lockOffset);
int memoryOffset = getMemoryOffset(hashCode);
long address = memoryLookup.getMemoryAddressOffset(memoryOffset);
return address != 0 && performReplace(address, key, hashCode, memoryOffset, oldValue, newValue) != null;
} finally {
stampedLock.unlockWrite(writeStamp);
}
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> replace(WrappedBytes key,
InternalCacheEntry<WrappedBytes, WrappedBytes> value) {
int hashCode = key.hashCode();
int lockOffset = getLockOffset(hashCode);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long writeStamp = stampedLock.writeLock();
try {
checkDeallocation();
ensureTransferred(lockOffset);
int memoryOffset = getMemoryOffset(hashCode);
long address = memoryLookup.getMemoryAddressOffset(memoryOffset);
if (address == 0) {
return null;
}
return performReplace(address, key, hashCode, memoryOffset, null, value);
} finally {
stampedLock.unlockWrite(writeStamp);
}
}
/**
* Performs the actual replace operation removing the old entry and if removed writes the new entry into the same
* bucket.
* @param bucketHeadAddress the starting address of the bucket
* @param key the key of the entry
* @param hashCode the hasCode of the key
* @param memoryOffset the offset in the memory bucket where this key mapped to
* @param oldValue optional old value to match against - if null then any value will be replaced
* @param newValue new value to place into the map replacing the old if possible
* @return replaced value or null if the entry wasn't present
*/
@GuardedBy("locks#writeLock")
private InternalCacheEntry<WrappedBytes, WrappedBytes> performReplace(long bucketHeadAddress, WrappedBytes key,
int hashCode, int memoryOffset, InternalCacheEntry<WrappedBytes, WrappedBytes> oldValue,
InternalCacheEntry<WrappedBytes, WrappedBytes> newValue) {
long prevAddress = 0;
// We only use the head pointer for the first iteration
long address = bucketHeadAddress;
InternalCacheEntry<WrappedBytes, WrappedBytes> ice = null;
while (address != 0) {
long nextAddress = offHeapEntryFactory.getNext(address);
// If the actualAddress was not known, check key equality otherwise just compare with the address
if (offHeapEntryFactory.equalsKey(address, key)) {
if (oldValue != null) {
ice = offHeapEntryFactory.fromMemory(address);
// If value doesn't match and was provided then don't replace it
if (!ice.getValue().equalsWrappedBytes(oldValue.getValue())) {
ice = null;
break;
}
}
// Need to always return the previous, so make sure we read it
if (ice == null) {
ice = offHeapEntryFactory.fromMemory(address);
}
long newAddress = offHeapEntryFactory.create(key, hashCode, newValue);
entryReplaced(newAddress, address);
if (prevAddress != 0) {
offHeapEntryFactory.setNext(prevAddress, newAddress);
} else {
memoryLookup.putMemoryAddressOffset(memoryOffset, newAddress);
}
// We always set the next address on the newly created address - this will be 0 if the previous value
// was the end of the linked list
offHeapEntryFactory.setNext(newAddress, nextAddress);
break;
}
prevAddress = address;
address = nextAddress;
}
return ice;
}
@Override
public Set<WrappedBytes> keySet() {
throw new UnsupportedOperationException("keySet is not supported as it doesn't contain expiration data");
}
@Override
public Collection<InternalCacheEntry<WrappedBytes, WrappedBytes>> values() {
return new AbstractCollection<InternalCacheEntry<WrappedBytes, WrappedBytes>>() {
@Override
public Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iterator() {
return entryIterator();
}
@Override
public int size() {
return OffHeapConcurrentMap.this.size();
}
@Override
public boolean remove(Object o) {
return o instanceof InternalCacheEntry && OffHeapConcurrentMap.this.remove(((InternalCacheEntry) o).getKey(),
((InternalCacheEntry) o).getValue());
}
};
}
/**
* Stateful iterator implementation that works by going through the underlying buckets one by one until it finds
* a non empty bucket. It will then store the values from that bucket to be returned via the {@code next} method.
* <p>
* When the iterator is used without a resize the operation is pretty straight forward as it will keep
* an offset into the buckets and continually reading the next and acquiring the appropriate read lock for that
* bucket location.
* <p>
* During a resize, iteration can be a bit more interesting. If a resize occurs when an iteration is ongoing it
* can cause the iteration to change to change behavior temporarily. If the iteration is in the middle of iterating
* over a lock region and that region is resized it must now extrapolate given by the size of the increase of size
* which buckets the corresponding resize have moved to. Luckily this operation is still efficient as resized buckets
* are stored contiguously.
*/
private class ValueIterator implements Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> {
int bucketPosition;
// -1 symbolizes it is the first time the iterator is used
int bucketCount = -1;
int bucketLockShift;
int bucketLockStop;
Queue<InternalCacheEntry<WrappedBytes, WrappedBytes>> values = new ArrayDeque<>();
@Override
public boolean hasNext() {
if (!values.isEmpty()) {
return true;
}
checkAndReadBucket();
return !values.isEmpty();
}
private void checkAndReadBucket() {
while (bucketPosition != bucketCount) {
if (readNextBucket()) {
break;
}
}
}
@Override
public InternalCacheEntry<WrappedBytes, WrappedBytes> next() {
InternalCacheEntry<WrappedBytes, WrappedBytes> ice = values.poll();
if (ice == null) {
// Caller invoked next without checking hasNext - try to see if anything is available
checkAndReadBucket();
ice = values.remove();
}
return ice;
}
/**
* Reads buckets until it finds one that is not empty or it finishes reading a lock region. This method is meant
* to be invoked multiple times changing iteration state on each call.
* @return whether a value has been read
*/
boolean readNextBucket() {
boolean foundValue = false;
int lockOffset = getLockOffset(bucketPosition);
StampedLock stampedLock = locks.getLockWithOffset(lockOffset);
long readStamp = stampedLock.readLock();
try {
checkDeallocation();
MemoryAddressHash memoryAddressHash;
if (pendingBlocks != null && pendingBlocks.contains(lockOffset)) {
memoryAddressHash = oldMemoryLookup;
} else {
memoryAddressHash = memoryLookup;
}
int pointerCount = memoryAddressHash.getPointerCount();
if (bucketCount == -1) {
bucketCount = pointerCount;
bucketLockStop = getBucketRegionSize(bucketCount);
bucketLockShift = Integer.numberOfTrailingZeros(bucketLockStop);
} else if (bucketCount > pointerCount) {
// If bucket count is greater than pointer count - it means we had a clear in the middle of iterating
// Just return without adding anymore values
bucketPosition = bucketCount;
return false;
} else if (bucketCount < pointerCount) {
resizeIteration(pointerCount);
}
boolean completedLockBucket;
// Normal iteration just keep adding entries until either we complete the lock bucket region or
// we read bytes over the read threshold
while (!(completedLockBucket = bucketLockStop == bucketPosition)) {
long address = memoryAddressHash.getMemoryAddressOffsetNoTraceIfAbsent(bucketPosition++);
if (address != 0) {
long nextAddress;
do {
nextAddress = offHeapEntryFactory.getNext(address);
values.add(offHeapEntryFactory.fromMemory(address));
foundValue = true;
} while ((address = nextAddress) != 0);
// We read a single bucket now return to get the value back
break;
}
}
// If we completed the lock region and we haven't yet gone through the all buckets, we have to
// prepare for the next lock region worth of buckets
if (completedLockBucket && bucketPosition != bucketCount) {
bucketLockStop += getBucketRegionSize(bucketCount);
}
} finally {
stampedLock.unlockRead(readStamp);
}
return foundValue;
}
/**
* Invoked when the iteration saw a bucket size less than the current bucket size of the memory lookup. This
* means we had a resize during iteration. We must update our bucket position, stop and counts properly based
* on how many resizes have occurred.
* @param newBucketSize how large the new bucket size is
*/
@GuardedBy("locks#readLock")
private void resizeIteration(int newBucketSize) {
int bucketIncreaseShift = 31 - Integer.numberOfTrailingZeros(bucketCount) - memoryShift;
bucketPosition = bucketPosition << bucketIncreaseShift;
bucketLockStop = bucketLockStop << bucketIncreaseShift;
bucketLockShift = Integer.numberOfTrailingZeros(bucketLockStop);
bucketCount = newBucketSize;
}
private int getLockOffset(int bucketPosition) {
return bucketPosition >>> bucketLockShift;
}
}
private Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> entryIterator() {
if (size.get() == 0) {
return Collections.emptyIterator();
}
return new ValueIterator();
}
@Override
public Set<Entry<WrappedBytes, InternalCacheEntry<WrappedBytes, WrappedBytes>>> entrySet() {
return new AbstractSet<Entry<WrappedBytes, InternalCacheEntry<WrappedBytes, WrappedBytes>>>() {
@Override
public Iterator<Entry<WrappedBytes, InternalCacheEntry<WrappedBytes, WrappedBytes>>> iterator() {
Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> entryIterator = entryIterator();
return new IteratorMapper<>(entryIterator, ice -> new AbstractMap.SimpleImmutableEntry<>(ice.getKey(), ice));
}
@Override
public int size() {
return OffHeapConcurrentMap.this.size();
}
};
}
}
| 53,388
| 40.742768
| 143
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/OffHeapLruNode.java
|
package org.infinispan.container.offheap;
/**
* Accessors for the fields of a native LRU list node.
*
* @since 9.1
*/
class OffHeapLruNode {
private static final OffHeapMemory MEMORY = OffHeapMemory.INSTANCE;
private static final int ADDRESS_SIZE = 8;
private static final int PREVIOUS_NODE_OFFSET = 0;
private static final int NEXT_NODE_OFFSET = PREVIOUS_NODE_OFFSET + ADDRESS_SIZE;
private OffHeapLruNode() {
}
static long getNext(long lruNodeAddress) {
return MEMORY.getLong(lruNodeAddress, NEXT_NODE_OFFSET);
}
static void setNext(long lruNodeAddress, long nextAddress) {
MEMORY.putLong(lruNodeAddress, NEXT_NODE_OFFSET, nextAddress);
}
static long getPrevious(long lruNodeAddress) {
return MEMORY.getLong(lruNodeAddress, PREVIOUS_NODE_OFFSET);
}
static void setPrevious(long lruNodeAddress, long previousAddress) {
MEMORY.putLong(lruNodeAddress, PREVIOUS_NODE_OFFSET, previousAddress);
}
static String debugString(long address) {
return String.format("0x%016x <-- entry 0x%016x --> 0x%016x", OffHeapLruNode.getPrevious(address), address,
OffHeapLruNode.getNext(address));
}
}
| 1,184
| 28.625
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/Bits.java
|
package org.infinispan.container.offheap;
/**
* Utility method for inserting and retrieving values from to/from a byte[]
*
* @author wburns
* @since 9.0
*/
public class Bits {
static int getInt(byte[] b, int off) {
return ((b[off + 3] & 0xFF)) +
((b[off + 2] & 0xFF) << 8) +
((b[off + 1] & 0xFF) << 16) +
((b[off]) << 24);
}
static long getLong(byte[] b, int off) {
return ((b[off + 7] & 0xFFL)) +
((b[off + 6] & 0xFFL) << 8) +
((b[off + 5] & 0xFFL) << 16) +
((b[off + 4] & 0xFFL) << 24) +
((b[off + 3] & 0xFFL) << 32) +
((b[off + 2] & 0xFFL) << 40) +
((b[off + 1] & 0xFFL) << 48) +
(((long) b[off]) << 56);
}
static void putInt(byte[] b, int off, int val) {
b[off + 3] = (byte) (val);
b[off + 2] = (byte) (val >>> 8);
b[off + 1] = (byte) (val >>> 16);
b[off] = (byte) (val >>> 24);
}
static void putLong(byte[] b, int off, long val) {
b[off + 7] = (byte) (val);
b[off + 6] = (byte) (val >>> 8);
b[off + 5] = (byte) (val >>> 16);
b[off + 4] = (byte) (val >>> 24);
b[off + 3] = (byte) (val >>> 32);
b[off + 2] = (byte) (val >>> 40);
b[off + 1] = (byte) (val >>> 48);
b[off] = (byte) (val >>> 56);
}
}
| 1,338
| 27.489362
| 75
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/OffHeapDataContainer.java
|
package org.infinispan.container.offheap;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.function.ObjIntConsumer;
import org.infinispan.commons.marshall.WrappedBytes;
import org.infinispan.commons.util.FilterIterator;
import org.infinispan.commons.util.FilterSpliterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.AbstractInternalDataContainer;
import org.infinispan.container.impl.PeekableTouchableMap;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
/**
* @author wburns
* @since 9.4
*/
public class OffHeapDataContainer extends AbstractInternalDataContainer<WrappedBytes, WrappedBytes> {
@Inject protected OffHeapMemoryAllocator allocator;
@Inject protected OffHeapEntryFactory offHeapEntryFactory;
private OffHeapConcurrentMap map;
@Start
public void start() {
map = new OffHeapConcurrentMap(allocator, offHeapEntryFactory, null);
}
@Stop
public void stop() {
clear();
map.close();
}
@Override
protected PeekableTouchableMap<WrappedBytes, WrappedBytes> getMapForSegment(int segment) {
return map;
}
@Override
protected int getSegmentForKey(Object key) {
// We always map to same map, so no reason to waste finding out segment
return -1;
}
@Override
public Spliterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> spliterator() {
return filterExpiredEntries(spliteratorIncludingExpired());
}
@Override
public Spliterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> spliterator(IntSet segments) {
return new FilterSpliterator<>(spliterator(), ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Spliterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> spliteratorIncludingExpired() {
return map.values().spliterator();
}
@Override
public Spliterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> spliteratorIncludingExpired(IntSet segments) {
return new FilterSpliterator<>(spliteratorIncludingExpired(),
ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iterator() {
return new EntryIterator(iteratorIncludingExpired());
}
@Override
public Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iterator(IntSet segments) {
return new FilterIterator<>(iterator(), ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iteratorIncludingExpired() {
return map.values().iterator();
}
@Override
public Iterator<InternalCacheEntry<WrappedBytes, WrappedBytes>> iteratorIncludingExpired(IntSet segments) {
return new FilterIterator<>(iteratorIncludingExpired(),
ice -> segments.contains(keyPartitioner.getSegment(ice.getKey())));
}
@Override
public void addSegments(IntSet segments) {
throw new UnsupportedOperationException();
}
@Override
public void removeSegments(IntSet segments) {
throw new UnsupportedOperationException();
}
@Override
public int sizeIncludingExpired() {
return map.size();
}
@Override
public void clear() {
map.clear();
}
@Override
public void forEachSegment(ObjIntConsumer<PeekableTouchableMap<WrappedBytes, WrappedBytes>> segmentMapConsumer) {
segmentMapConsumer.accept(map, 0);
}
}
| 3,690
| 30.547009
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/offheap/MemoryAddressHash.java
|
package org.infinispan.container.offheap;
import java.util.stream.LongStream;
import org.infinispan.commons.util.Util;
import sun.misc.Unsafe;
/**
* @author wburns
* @since 9.0
*/
public class MemoryAddressHash {
private static final Unsafe UNSAFE = UnsafeHolder.UNSAFE;
private static final OffHeapMemory MEMORY = OffHeapMemory.INSTANCE;
private final long memory;
private final int pointerCount;
private final OffHeapMemoryAllocator allocator;
public MemoryAddressHash(int pointers, OffHeapMemoryAllocator allocator) {
this.pointerCount = Util.findNextHighestPowerOfTwo(pointers);
long bytes = ((long) pointerCount) << 3;
this.allocator = allocator;
memory = allocator.allocate(bytes);
// Have to clear out bytes to make sure no bad stuff was read in
UNSAFE.setMemory(memory, bytes, (byte) 0);
}
public void putMemoryAddressOffset(int offset, long address) {
MEMORY.putLong(memory, offset << 3, address);
}
public long getMemoryAddressOffset(int offset) {
return MEMORY.getLong(memory,((long) offset) << 3);
}
public long getMemoryAddressOffsetNoTraceIfAbsent(int offset) {
return MEMORY.getLongNoTraceIfAbsent(memory,((long) offset) << 3);
}
public void deallocate() {
allocator.deallocate(memory, pointerCount << 3);
}
/**
* Returns a stream of longs that are all of the various memory locations
* @return stream of the various memory locations
*/
public LongStream toStream() {
return LongStream.iterate(0, l -> l + 8)
.limit(pointerCount)
.map(l -> MEMORY.getLong(memory, l))
.filter(l -> l != 0);
}
/**
* Removes all the address lookups by setting them to 0. This method returns a LongStream that contains all of
* valid (non zero) addresses that were present during this operation.
* @return stream with the valid memory pointers to stored values
*/
public LongStream removeAll() {
return LongStream.iterate(0, l -> l + 8)
.limit(pointerCount)
.map(l -> MEMORY.getAndSetLongNoTraceIfAbsent(memory, l, 0))
.filter(l -> l != 0);
}
/**
* Removes all the address lookups by setting them to 0 within the given offset, limiting the removal to only
* a specific count of addresses. This method returns a LongStream that contains all of
* valid (non zero) addresses that were present during this operation.
* @param offset offset into the block
* @param count how many pointers to look at
* @return stream with the valid memory pointers to stored values
*/
public LongStream removeAll(int offset, int count) {
return LongStream.iterate(((long) offset) << 3, l -> l + 8)
.limit(count)
.map(l -> MEMORY.getAndSetLongNoTraceIfAbsent(memory, l, 0))
.filter(l -> l != 0);
}
public int getPointerCount() {
return pointerCount;
}
}
| 2,968
| 32.738636
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/package-info.java
|
/**
* Entries which are stored in data containers. This package contains different implementations of
* entries based on the information needed to store an entry. Certain entries need more information - such as timestamps
* and lifespans, if they are used - than others, and the appropriate implementation is selected dynamically. This
* helps minimize Infinispan's memory requirements without storing unnecessary metadata.
*/
package org.infinispan.container.entries;
| 477
| 58.75
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/TransientCacheEntry.java
|
package org.infinispan.container.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A cache entry that is transient, i.e., it can be considered expired after a period of not being used.
*
* @author Manik Surtani
* @since 4.0
*/
public class TransientCacheEntry extends AbstractInternalCacheEntry {
protected long maxIdle;
protected long lastUsed;
public TransientCacheEntry(Object key, Object value, long maxIdle, long lastUsed) {
this(key, value, null, maxIdle, lastUsed);
}
protected TransientCacheEntry(Object key, Object value, PrivateMetadata internalMetadata, long maxIdle,
long lastUsed) {
super(key, value, internalMetadata);
this.maxIdle = maxIdle;
this.lastUsed = lastUsed;
}
@Override
public final void touch(long currentTimeMillis) {
this.lastUsed = currentTimeMillis;
}
@Override
public void reincarnate(long now) {
// no-op
}
@Override
public final boolean canExpire() {
return true;
}
@Override
public boolean canExpireMaxIdle() {
return true;
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransient(maxIdle, lastUsed, now);
}
public void setMaxIdle(long maxIdle) {
this.maxIdle = maxIdle;
}
@Override
public long getCreated() {
return -1;
}
@Override
public final long getLastUsed() {
return lastUsed;
}
@Override
public long getLifespan() {
return -1;
}
@Override
public long getExpiryTime() {
return maxIdle > -1 ? lastUsed + maxIdle : -1;
}
@Override
public final long getMaxIdle() {
return maxIdle;
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new TransientCacheValue(value, internalMetadata, maxIdle, lastUsed);
}
@Override
public Metadata getMetadata() {
return new EmbeddedMetadata.Builder()
.maxIdle(maxIdle, TimeUnit.MILLISECONDS).build();
}
@Override
public void setMetadata(Metadata metadata) {
throw new IllegalStateException(
"Metadata cannot be set on mortal entries. They need to be recreated via the entry factory.");
}
@Override
public TransientCacheEntry clone() {
return (TransientCacheEntry) super.clone();
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", lastUsed=").append(lastUsed);
builder.append(", maxIdle=").append(maxIdle);
}
public static class Externalizer extends AbstractExternalizer<TransientCacheEntry> {
@Override
public void writeObject(ObjectOutput output, TransientCacheEntry tce) throws IOException {
output.writeObject(tce.key);
output.writeObject(tce.value);
output.writeObject(tce.internalMetadata);
UnsignedNumeric.writeUnsignedLong(output, tce.lastUsed);
output.writeLong(tce.maxIdle); // could be negative so should not use unsigned longs
}
@Override
public TransientCacheEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
long maxIdle = input.readLong();
return new TransientCacheEntry(key, value, internalMetadata, maxIdle, lastUsed);
}
@Override
public Integer getId() {
return Ids.TRANSIENT_ENTRY;
}
@Override
public Set<Class<? extends TransientCacheEntry>> getTypeClasses() {
return Collections.singleton(TransientCacheEntry.class);
}
}
}
| 4,258
| 26.655844
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/RemoteMetadata.java
|
package org.infinispan.container.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.commons.util.Util;
import org.infinispan.container.versioning.EntryVersion;
import org.infinispan.container.versioning.SimpleClusteredVersion;
import org.infinispan.metadata.Metadata;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
/**
* This is a metadata type used by scattered cache during state transfer. The address points to node which has last
* known version of given entry: During key transfer RemoteMetadata is created and overwritten if another response
* with higher version comes. During value transfer the address is already final and we request the value + metadata
* only from this node.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
@ProtoTypeId(ProtoStreamTypeIds.REMOTE_METADATA)
public class RemoteMetadata implements Metadata {
private final JGroupsAddress address;
private final SimpleClusteredVersion version;
public RemoteMetadata(Address address, EntryVersion version) {
this((JGroupsAddress) address, (SimpleClusteredVersion) version);
}
@ProtoFactory
RemoteMetadata(JGroupsAddress address, SimpleClusteredVersion version) {
this.address = address;
this.version = version;
}
@ProtoField(number = 1, javaType = JGroupsAddress.class)
public Address getAddress() {
return address;
}
@Override
public long lifespan() {
return -1;
}
@Override
public long maxIdle() {
return -1;
}
@Override
@ProtoField(number = 2, javaType = SimpleClusteredVersion.class)
public EntryVersion version() {
return version;
}
@Override
public Builder builder() {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("RemoteMetadata{");
sb.append("address=").append(address);
sb.append(", version=").append(version);
sb.append('}');
return sb.toString();
}
public static class Externalizer implements AdvancedExternalizer<RemoteMetadata> {
@Override
public Set<Class<? extends RemoteMetadata>> getTypeClasses() {
return Util.asSet(RemoteMetadata.class);
}
@Override
public Integer getId() {
return Ids.METADATA_REMOTE;
}
@Override
public void writeObject(ObjectOutput output, RemoteMetadata entry) throws IOException {
output.writeObject(entry.getAddress());
if (entry.version != null) {
output.writeInt(entry.version.getTopologyId());
output.writeLong(entry.version.getVersion());
} else {
output.writeInt(-1);
}
}
@Override
public RemoteMetadata readObject(ObjectInput input) throws IOException, ClassNotFoundException {
JGroupsAddress address = (JGroupsAddress) input.readObject();
int topologyId = input.readInt();
SimpleClusteredVersion clusteredVersion;
if (topologyId == -1) {
clusteredVersion = null;
} else {
long version = input.readLong();
clusteredVersion = new SimpleClusteredVersion(topologyId, version);
}
return new RemoteMetadata(address, clusteredVersion);
}
}
}
| 3,783
| 31.62069
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/CacheEntrySizeCalculator.java
|
package org.infinispan.container.entries;
import org.infinispan.commons.util.AbstractEntrySizeCalculatorHelper;
import org.infinispan.commons.util.EntrySizeCalculator;
import org.infinispan.container.entries.metadata.MetadataImmortalCacheEntry;
import org.infinispan.container.entries.metadata.MetadataMortalCacheEntry;
import org.infinispan.container.entries.metadata.MetadataTransientCacheEntry;
import org.infinispan.container.entries.metadata.MetadataTransientMortalCacheEntry;
import org.infinispan.container.impl.InternalEntryFactoryImpl;
import org.infinispan.container.impl.KeyValueMetadataSizeCalculator;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Implementation of a size calculator that calculates only the size of the value assuming it is an InternalCacheEntry.
* This delegates the calculation of the key and the value contained within the InternalCacheEntry to the provided
* SizeCalculator.
* @param <K> The type of the key
* @param <V> The type of the value
* @author William Burns
* @since 8.0
*/
public class CacheEntrySizeCalculator<K, V> extends AbstractEntrySizeCalculatorHelper<K, InternalCacheEntry<K, V>>
implements KeyValueMetadataSizeCalculator<K, V> {
public CacheEntrySizeCalculator(EntrySizeCalculator<? super K, ? super V> calculator) {
this.calculator = calculator;
}
private final EntrySizeCalculator<? super K, ? super V> calculator;
@Override
public long calculateSize(K key, InternalCacheEntry<K, V> ice) {
// This will be non zero when use expiration, but don't want to store the metadata
long noMetadataSize = 0;
boolean metadataAware;
// We want to put immortal entries first as they are very common. Also MetadataImmortalCacheEntry extends
// ImmortalCacheEntry so it has to come before
if (ice instanceof MetadataImmortalCacheEntry) {
metadataAware = true;
} else if (ice instanceof ImmortalCacheEntry) {
metadataAware = false;
} else if (ice instanceof MortalCacheEntry) {
noMetadataSize += 16;
metadataAware = false;
} else if (ice instanceof TransientCacheEntry) {
noMetadataSize += 16;
metadataAware = false;
} else if (ice instanceof TransientMortalCacheEntry) {
noMetadataSize += 32;
metadataAware = false;
} else if (ice instanceof MetadataMortalCacheEntry) {
metadataAware = true;
} else if (ice instanceof MetadataTransientCacheEntry) {
metadataAware = true;
} else {
metadataAware = ice instanceof MetadataTransientMortalCacheEntry;
}
Metadata metadata;
if (metadataAware) {
metadata = ice.getMetadata();
// We don't support other metadata types currently
if (!(metadata instanceof EmbeddedMetadata)) {
metadata = null;
}
} else {
metadata = null;
}
long keyValueMetadataSize = calculateSize(key, ice.getValue(), metadata, ice.getInternalMetadata());
return keyValueMetadataSize + noMetadataSize;
}
@Override
public long calculateSize(K key, V value, Metadata metadata, PrivateMetadata pvtMetadata) {
long objSize = calculator.calculateSize(key, value);
// This is for the surrounding ICE
long iceSize = 0;
// ICE itself is an object and has a reference to it's class
iceSize += OBJECT_SIZE + POINTER_SIZE;
// Each ICE references key and value and private metadata
iceSize += 3 * POINTER_SIZE;
long metadataSize = 0;
if (metadata != null) {
// Mortal uses 2 longs to keep track of created and lifespan
if (metadata.lifespan() != -1) {
iceSize += 16;
}
// Transient uses 2 longs to keep track of last access and max idle
if (metadata.maxIdle() != -1) {
iceSize += 16;
}
if (InternalEntryFactoryImpl.isStoreMetadata(metadata, null)) {
// Assume it has a pointer for the metadata
iceSize += POINTER_SIZE;
// The metadata has itself and the class reference
metadataSize += OBJECT_SIZE + POINTER_SIZE;
// We only support embedded metadata that has a reference and NumericVersion instance
metadataSize += POINTER_SIZE;
metadataSize = roundUpToNearest8(metadataSize);
// This is for the NumericVersion and the long inside of it
metadataSize += numericVersionSize();
metadataSize = roundUpToNearest8(metadataSize);
}
}
long pvtMetadataSize = pvtMetadata == null || pvtMetadata.isEmpty() ? 0 : privateMetadataSize(pvtMetadata);
return objSize + roundUpToNearest8(iceSize) + metadataSize + pvtMetadataSize;
}
private static long privateMetadataSize(PrivateMetadata metadata) {
long size = HEADER_AND_CLASS_REFERENCE;
size += 2 * POINTER_SIZE; //two fields, IracMetadata & EntryVersion
size = roundUpToNearest8(size);
if (metadata.iracMetadata() != null) {
size += iracMetadataSize();
}
if (metadata.getNumericVersion() != null) {
size += numericVersionSize();
} else if (metadata.getClusteredVersion() != null) {
size += simpleClusteredVersionSize();
}
return size;
}
private static long iracMetadataSize() {
//estimated
long size = HEADER_AND_CLASS_REFERENCE;
size += 2 * POINTER_SIZE; //site: String, version: IracEntryVersion
//go recursive?
return roundUpToNearest8(size);
}
private static long numericVersionSize() {
//only a long stored
return roundUpToNearest8(HEADER_AND_CLASS_REFERENCE + 8);
}
private static long simpleClusteredVersionSize() {
//only a int and long
return roundUpToNearest8(HEADER_AND_CLASS_REFERENCE + 4 + 8);
}
}
| 5,971
| 40.186207
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/AbstractInternalCacheEntry.java
|
package org.infinispan.container.entries;
import java.util.Map;
import java.util.Objects;
import org.infinispan.commons.util.Util;
import org.infinispan.container.DataContainer;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* An abstract internal cache entry that is typically stored in the data container
*
* @author Manik Surtani
* @since 4.0
*/
public abstract class AbstractInternalCacheEntry implements InternalCacheEntry {
protected Object key;
protected Object value;
protected PrivateMetadata internalMetadata;
protected AbstractInternalCacheEntry(Object key, Object value, PrivateMetadata internalMetadata) {
this.key = key;
this.value = value;
this.internalMetadata = internalMetadata;
}
@Override
public final void commit(DataContainer container) {
// no-op
}
@Override
public void setChanged(boolean changed) {
// no-op
}
@Override
public final void setCreated(boolean created) {
// no-op
}
@Override
public final void setRemoved(boolean removed) {
// no-op
}
@Override
public final void setEvicted(boolean evicted) {
// no-op
}
@Override
public void setSkipLookup(boolean skipLookup) {
//no-op
}
@Override
public final boolean isNull() {
return false;
}
@Override
public final boolean isChanged() {
return false;
}
@Override
public final boolean isCreated() {
return false;
}
@Override
public final boolean isRemoved() {
return false;
}
@Override
public final boolean isEvicted() {
return true;
}
@Override
public boolean skipLookup() {
return true;
}
@Override
public Metadata getMetadata() {
return null;
}
@Override
public void setMetadata(Metadata metadata) {
// no-op
}
@Override
public final Object getKey() {
return key;
}
@Override
public final Object getValue() {
return value;
}
@Override
public final Object setValue(Object value) {
Object old = this.value;
this.value = value;
return old;
}
@Override
public boolean isL1Entry() {
return false;
}
@Override
public final PrivateMetadata getInternalMetadata() {
return internalMetadata;
}
@Override
public final void setInternalMetadata(PrivateMetadata metadata) {
this.internalMetadata = metadata;
}
@Override
public final String toString() {
StringBuilder sb = new StringBuilder(getClass().getSimpleName());
sb.append('{');
appendFieldsToString(sb);
return sb.append('}').toString();
}
@Override
public AbstractInternalCacheEntry clone() {
try {
return (AbstractInternalCacheEntry) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException("Should never happen!", e);
}
}
@Override
public final boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Map.Entry)) return false;
Map.Entry that = (Map.Entry) o;
return Objects.equals(getKey(), that.getKey()) && Objects.equals(getValue(), that.getValue());
}
@Override
public final int hashCode() {
return 31 * Objects.hashCode(getKey()) + Objects.hashCode(getValue());
}
protected void appendFieldsToString(StringBuilder builder) {
builder.append("key=").append(Util.toStr(key));
builder.append(", value=").append(Util.toStr(value));
builder.append(", internalMetadata=").append(internalMetadata);
}
}
| 3,652
| 20.615385
| 101
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/ExpiryHelper.java
|
package org.infinispan.container.entries;
/**
* Provide utility methods for dealing with expiration of cache entries.
*
* @author Manik Surtani
* @author Sanne Grinovero
* @since 4.0
*/
public class ExpiryHelper {
public static boolean isExpiredMortal(long lifespan, long created, long now) {
return lifespan > -1 && created > -1 && now > created + lifespan;
}
public static boolean isExpiredTransient(long maxIdle, long lastUsed, long now) {
return maxIdle > -1 && lastUsed > -1 && now > maxIdle + lastUsed;
}
public static boolean isExpiredTransientMortal(long maxIdle, long lastUsed, long lifespan, long created, long now) {
return isExpiredTransient(maxIdle, lastUsed, now) || isExpiredMortal(lifespan, created, now);
}
/**
* Returns the most recent (i.e. smallest number) that is not negative or if both are negative it returns a negative
* number
*
* @param firstTime one of the times
* @param secondTime one of the times
* @return the lowest time of each, that is not negative unless both are negative
*/
public static long mostRecentExpirationTime(long firstTime, long secondTime) {
if (firstTime < 0) {
return secondTime;
} else if (secondTime < 0) {
return firstTime;
}
return Math.min(firstTime, secondTime);
}
}
| 1,351
| 31.97561
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/ImmortalCacheValue.java
|
package org.infinispan.container.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* An immortal cache value, to correspond with {@link org.infinispan.container.entries.ImmortalCacheEntry}
*
* @author Manik Surtani
* @since 4.0
*/
public class ImmortalCacheValue implements InternalCacheValue, Cloneable {
public Object value;
protected PrivateMetadata internalMetadata;
public ImmortalCacheValue(Object value) {
this(value, null);
}
protected ImmortalCacheValue(Object value, PrivateMetadata internalMetadata) {
this.value = value;
this.internalMetadata = internalMetadata;
}
@Override
public InternalCacheEntry<?,?> toInternalCacheEntry(Object key) {
return new ImmortalCacheEntry(key, value, internalMetadata);
}
public final Object setValue(Object value) {
Object old = this.value;
this.value = value;
return old;
}
@Override
public Object getValue() {
return value;
}
@Override
public boolean isExpired(long now) {
return false;
}
@Override
public boolean canExpire() {
return false;
}
@Override
public long getCreated() {
return -1;
}
@Override
public long getLastUsed() {
return -1;
}
@Override
public long getLifespan() {
return -1;
}
@Override
public long getMaxIdle() {
return -1;
}
@Override
public long getExpiryTime() {
return -1;
}
@Override
public Metadata getMetadata() {
return new EmbeddedMetadata.Builder().lifespan(getLifespan()).maxIdle(getMaxIdle()).build();
}
@Override
public final PrivateMetadata getInternalMetadata() {
return internalMetadata;
}
@Override
public final void setInternalMetadata(PrivateMetadata internalMetadata) {
this.internalMetadata = internalMetadata;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ImmortalCacheValue)) return false;
ImmortalCacheValue that = (ImmortalCacheValue) o;
return Objects.equals(value, that.value) &&
Objects.equals(internalMetadata, that.internalMetadata);
}
@Override
public int hashCode() {
int result = Objects.hashCode(value);
result = 31 * result + Objects.hashCode(internalMetadata);
return result;
}
@Override
public final String toString() {
StringBuilder builder = new StringBuilder(getClass().getSimpleName());
builder.append('{');
appendFieldsToString(builder);
return builder.append('}').toString();
}
@Override
public ImmortalCacheValue clone() {
try {
return (ImmortalCacheValue) super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException("Should never happen", e);
}
}
protected void appendFieldsToString(StringBuilder builder) {
builder.append("value=").append(Util.toStr(value));
builder.append(", internalMetadata=").append(internalMetadata);
}
public static class Externalizer extends AbstractExternalizer<ImmortalCacheValue> {
@Override
public void writeObject(ObjectOutput output, ImmortalCacheValue icv) throws IOException {
output.writeObject(icv.value);
output.writeObject(icv.internalMetadata);
}
@Override
public ImmortalCacheValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
return new ImmortalCacheValue(value, internalMetadata);
}
@Override
public Integer getId() {
return Ids.IMMORTAL_VALUE;
}
@Override
public Set<Class<? extends ImmortalCacheValue>> getTypeClasses() {
return Collections.singleton(ImmortalCacheValue.class);
}
}
}
| 4,344
| 24.863095
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/ClearCacheEntry.java
|
package org.infinispan.container.entries;
import org.infinispan.container.DataContainer;
import org.infinispan.metadata.Metadata;
/**
* Used in {@link org.infinispan.context.impl.ClearInvocationContext} to process the {@link
* org.infinispan.commands.write.ClearCommand}.
*
* @author Pedro Ruivo
* @since 7.2
*/
public class ClearCacheEntry<K, V> implements CacheEntry<K, V> {
//singleton, we have no state
private static final ClearCacheEntry INSTANCE = new ClearCacheEntry();
private ClearCacheEntry() {
}
public static <K, V> ClearCacheEntry<K, V> getInstance() {
//noinspection unchecked
return INSTANCE;
}
@Override
public boolean isNull() {
return true;
}
@Override
public boolean isChanged() {
return true;
}
@Override
public void setChanged(boolean changed) {
/*no-op*/
}
@Override
public boolean isCreated() {
return false;
}
@Override
public void setCreated(boolean created) {
/*no-op*/
}
@Override
public boolean isRemoved() {
return true;
}
@Override
public void setRemoved(boolean removed) {
/*no-op*/
}
@Override
public boolean isEvicted() {
return false;
}
@Override
public void setEvicted(boolean evicted) {
/*no-op*/
}
@Override
public K getKey() {
return null;
}
@Override
public V getValue() {
return null;
}
@Override
public long getLifespan() {
return -1;
}
@Override
public long getMaxIdle() {
return -1;
}
@Override
public boolean skipLookup() {
return true;
}
@Override
public V setValue(V value) {
/*-no-op*/
return null;
}
@Override
public void commit(DataContainer<K, V> container) {
container.clear();
}
@Override
public void setSkipLookup(boolean skipLookup) {
/*no-op*/
}
@SuppressWarnings("CloneDoesntCallSuperClone")
@Override
public CacheEntry<K, V> clone() {
return getInstance(); //no clone. singleton
}
@Override
public Metadata getMetadata() {
return null;
}
@Override
public void setMetadata(Metadata metadata) {
/*no-op*/
}
@Override
public String toString() {
return "ClearCacheEntry{}";
}
}
| 2,321
| 16.458647
| 91
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/PrimitiveEntrySizeCalculator.java
|
package org.infinispan.container.entries;
import java.lang.reflect.Array;
import org.infinispan.commons.util.AbstractEntrySizeCalculatorHelper;
import sun.misc.Unsafe;
/**
* Entry Size calculator that returns an approximation of how much various primitives, primitive wrappers, Strings, and
* arrays
*
* @author wburns
* @since 8.0
*/
public class PrimitiveEntrySizeCalculator extends AbstractEntrySizeCalculatorHelper<Object, Object> {
public long calculateSize(Object key, Object value) {
return handleObject(key) + handleObject(value);
}
protected long handleObject(Object object) {
Class<?> objClass;
try {
objClass = object.getClass();
} catch (NullPointerException e) {
throw e;
}
if (objClass == String.class) {
String realString = (String) object;
// The string is an object and has a reference to its class, int for the hash code and a pointer to the char[]
long objectSize = roundUpToNearest8(OBJECT_SIZE + POINTER_SIZE + 4 + POINTER_SIZE);
// We then include the char[] offset and size
return objectSize + roundUpToNearest8(Unsafe.ARRAY_CHAR_BASE_OFFSET + realString.length() *
Unsafe.ARRAY_CHAR_INDEX_SCALE);
} else if (objClass == Long.class) {
long longValue = ((Long) object).longValue();
if (longValue >= LongCacheConstraints.MIN_CACHE_VALUE &&
longValue <= LongCacheConstraints.MAX_CACHE_VALUE) {
return 0;
}
// We add in the size for a long, plus the object reference and the class ref
return roundUpToNearest8(Unsafe.ARRAY_LONG_INDEX_SCALE + OBJECT_SIZE + POINTER_SIZE);
} else if (objClass == Integer.class) {
int intValue = ((Integer) object).intValue();
if (intValue >= IntegerCacheConstraints.MIN_CACHE_VALUE &&
intValue <= IntegerCacheConstraints.MAX_CACHE_VALUE) {
return 0;
}
// We add in the size for a long, plus the object reference and the class ref
return roundUpToNearest8(Unsafe.ARRAY_INT_INDEX_SCALE + OBJECT_SIZE + POINTER_SIZE);
} else if (objClass == Short.class) {
short shortValue = ((Short) object).shortValue();
if (shortValue >= ShortCacheConstraints.MIN_CACHE_VALUE &&
shortValue <= ShortCacheConstraints.MAX_CACHE_VALUE) {
return 0;
}
return roundUpToNearest8(Unsafe.ARRAY_SHORT_INDEX_SCALE + OBJECT_SIZE + POINTER_SIZE);
} else if (objClass == Double.class) {
return roundUpToNearest8(Unsafe.ARRAY_DOUBLE_INDEX_SCALE + OBJECT_SIZE + POINTER_SIZE);
} else if (objClass == Float.class) {
return roundUpToNearest8(Unsafe.ARRAY_FLOAT_INDEX_SCALE + OBJECT_SIZE + POINTER_SIZE);
} else if (objClass == Boolean.class) {
// We assume all provided booleans are cached
return 0;
} else if (objClass == Character.class) {
char charValue = ((Character) object).charValue();
if (charValue >= CharacterCacheConstraints.MIN_CACHE_VALUE &&
charValue <= CharacterCacheConstraints.MAX_CACHE_VALUE) {
return 0;
}
return roundUpToNearest8(Unsafe.ARRAY_CHAR_INDEX_SCALE + OBJECT_SIZE + POINTER_SIZE);
} else if (objClass == Byte.class) {
// All byte values are cached
return 0;
} else if (objClass.isArray()) {
// We assume the array is of a type that supports shallow copy, such as the ones above
// We don't verify cached values if the array contains Booleans for example
Unsafe unsafe = getUnsafe();
Class<?> compClass = objClass.getComponentType();
int arrayLength = Array.getLength(object);
// Every array has a base offset which defines how large the array is and other overhead.
// Then each element in the array is indexed contiguously in memory thus we can simply multiply how
// many elements are in the array by how much of an offset each element requires. A normal object for example
// takes up the standard Object pointer worth of size but primitives take up space equivalent to how many byte
// they occupy.
long arraySize = roundUpToNearest8(unsafe.arrayBaseOffset(objClass) + unsafe.arrayIndexScale(objClass) *
arrayLength);
// If the component type isn't primitive we have to add in each of the instances
if (!compClass.isPrimitive()) {
// TODO: we could assume some values for given primitive wrappers.
for (int i = 0; i < arrayLength; ++i) {
arraySize += handleObject(Array.get(object, i));
}
}
return arraySize;
} else {
throw new IllegalArgumentException("Size of Class " + objClass +
" cannot be determined using given entry size calculator :" + getClass());
}
}
static class CharacterCacheConstraints {
static final short MAX_CACHE_VALUE = 127;
static final short MIN_CACHE_VALUE = 0;
}
static class ShortCacheConstraints {
static final short MAX_CACHE_VALUE = 127;
static final short MIN_CACHE_VALUE = -128;
}
static class LongCacheConstraints {
static final long MAX_CACHE_VALUE = 127;
static final long MIN_CACHE_VALUE = -128;
}
static class IntegerCacheConstraints {
static final int MAX_CACHE_VALUE = calculateMaxIntCache();
static final int MIN_CACHE_VALUE = -128;
static int calculateMaxIntCache() {
//We start from 128 as caching numbers up to 127 is required by the JLS:
//see 5.1.7 Boxing Conversion
for (int i = 128; i < Integer.MAX_VALUE; i++) {
if (Integer.valueOf(i) == Integer.valueOf(i))
continue;
else
return i - 1;
}
return Integer.MAX_VALUE;
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. Replace with a simple call to
* Unsafe.getUnsafe when integrating into a jdk.
*
* @return a sun.misc.Unsafe
*/
static Unsafe getUnsafe() {
try {
return Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {
try {
Class<Unsafe> k = Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
} catch (Exception e) {
throw new RuntimeException("Could not initialize intrinsics", e.getCause());
}
}
}
}
| 6,794
| 41.735849
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/CacheEntry.java
|
package org.infinispan.container.entries;
import java.util.Map;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.metadata.MetadataAware;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* An entry that is stored in the data container
*
* @author Manik Surtani
* @author Galder Zamarreño
* @since 4.0
*/
public interface CacheEntry<K, V> extends Cloneable, Map.Entry<K, V>, MetadataAware {
/**
* Tests whether the entry represents a null value, typically used for repeatable read.
*
* @return true if this represents a null, false otherwise.
*/
boolean isNull();
/**
* @return true if this entry has changed since being read from the container, false otherwise.
*/
boolean isChanged();
/**
* @return true if this entry has been newly created, false otherwise.
*/
boolean isCreated();
/**
* @return true if this entry has been removed since being read from the container, false otherwise.
*/
boolean isRemoved();
/**
* @return true if this entry has been evicted since being read from the container, false otherwise.
*/
boolean isEvicted();
/**
* Retrieves the key to this entry
*
* @return a key
*/
@Override
K getKey();
/**
* Retrieves the value of this entry
*
* @return the value of the entry
*/
@Override
V getValue();
/**
* @return retrieves the lifespan of this entry. -1 means an unlimited lifespan.
*/
long getLifespan();
/**
* @return the maximum allowed time for which this entry can be idle, after which it is considered expired.
*/
long getMaxIdle();
/**
* @return {@code true} if the value must not be fetch from an external source
*/
boolean skipLookup();
/**
* @return timestamp when the entry was created
*/
default long getCreated() {
return -1;
}
/**
* @return timestamp when the entry was last used
*/
default long getLastUsed() {
return -1;
}
/**
* Sets the value of the entry, returning the previous value
*
* @param value value to set
* @return previous value
*/
@Override
V setValue(V value);
/**
* Commits changes
*
* @param container data container to commit to
*/
void commit(DataContainer<K, V> container);
void setChanged(boolean changed);
void setCreated(boolean created);
void setRemoved(boolean removed);
void setEvicted(boolean evicted);
/**
* See {@link #skipLookup()}.
* @param skipLookup
*/
void setSkipLookup(boolean skipLookup);
CacheEntry<K, V> clone();
default void setCreated(long created) {}
default void setLastUsed(long lastUsed) {}
default PrivateMetadata getInternalMetadata() {
return null;
}
default void setInternalMetadata(PrivateMetadata metadata) {
//no-op
}
}
| 2,923
| 20.5
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/L1InternalCacheEntry.java
|
package org.infinispan.container.entries;
/**
* A {@link org.infinispan.container.entries.InternalCacheEntry} implementation to store a L1 entry.
*
* @author Pedro Ruivo
* @since 7.1
*/
public class L1InternalCacheEntry extends MortalCacheEntry {
public L1InternalCacheEntry(Object key, Object value, long lifespan, long created) {
super(key, value, lifespan, created);
}
@Override
public boolean isL1Entry() {
return true;
}
}
| 463
| 22.2
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/TransientMortalCacheValue.java
|
package org.infinispan.container.entries;
import static java.lang.Math.min;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A transient, mortal cache value to correspond with {@link TransientMortalCacheEntry}
*
* @author Manik Surtani
* @since 4.0
*/
public class TransientMortalCacheValue extends MortalCacheValue {
protected long maxIdle;
protected long lastUsed;
public TransientMortalCacheValue(Object value, long created, long lifespan, long maxIdle, long lastUsed) {
this(value, null, created, lifespan, maxIdle, lastUsed);
}
protected TransientMortalCacheValue(Object value, PrivateMetadata internalMetadata, long created,
long lifespan, long maxIdle, long lastUsed) {
super(value, internalMetadata, created, lifespan);
this.maxIdle = maxIdle;
this.lastUsed = lastUsed;
}
@Override
public long getMaxIdle() {
return maxIdle;
}
public void setMaxIdle(long maxIdle) {
this.maxIdle = maxIdle;
}
@Override
public long getLastUsed() {
return lastUsed;
}
public void setLastUsed(long lastUsed) {
this.lastUsed = lastUsed;
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransientMortal(maxIdle, lastUsed, lifespan, created, now);
}
@Override
public boolean isMaxIdleExpirable() {
return true;
}
@Override
public InternalCacheEntry<?, ?> toInternalCacheEntry(Object key) {
return new TransientMortalCacheEntry(key, value, internalMetadata, maxIdle, lifespan, lastUsed, created);
}
@Override
public long getExpiryTime() {
long lset = lifespan > -1 ? created + lifespan : -1;
long muet = maxIdle > -1 ? lastUsed + maxIdle : -1;
if (lset == -1) {
return muet;
}
if (muet == -1) {
return lset;
}
return min(lset, muet);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof TransientMortalCacheValue)) return false;
if (!super.equals(o)) return false;
TransientMortalCacheValue that = (TransientMortalCacheValue) o;
return lastUsed == that.lastUsed && maxIdle == that.maxIdle;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (int) (maxIdle ^ (maxIdle >>> 32));
result = 31 * result + (int) (lastUsed ^ (lastUsed >>> 32));
return result;
}
@Override
public TransientMortalCacheValue clone() {
return (TransientMortalCacheValue) super.clone();
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", maxIdle=").append(maxIdle);
builder.append(", lastUsed=").append(lastUsed);
}
public static class Externalizer extends AbstractExternalizer<TransientMortalCacheValue> {
@Override
public void writeObject(ObjectOutput output, TransientMortalCacheValue value) throws IOException {
output.writeObject(value.value);
output.writeObject(value.internalMetadata);
UnsignedNumeric.writeUnsignedLong(output, value.created);
output.writeLong(value.lifespan); // could be negative so should not use unsigned longs
UnsignedNumeric.writeUnsignedLong(output, value.lastUsed);
output.writeLong(value.maxIdle); // could be negative so should not use unsigned longs
}
@Override
public TransientMortalCacheValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
long lifespan = input.readLong();
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
long maxIdle = input.readLong();
return new TransientMortalCacheValue(value, internalMetadata, created, lifespan, maxIdle, lastUsed);
}
@Override
public Integer getId() {
return Ids.TRANSIENT_MORTAL_VALUE;
}
@Override
public Set<Class<? extends TransientMortalCacheValue>> getTypeClasses() {
return Collections.singleton(TransientMortalCacheValue.class);
}
}
}
| 4,635
| 30.537415
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/NullCacheEntry.java
|
package org.infinispan.container.entries;
import org.infinispan.container.DataContainer;
import org.infinispan.metadata.Metadata;
public class NullCacheEntry<K, V> implements CacheEntry<K, V> {
private static final NullCacheEntry INSTANCE = new NullCacheEntry();
private NullCacheEntry() {
}
public static <K, V> NullCacheEntry<K, V> getInstance() {
return INSTANCE;
}
@Override
public boolean isNull() {
return true;
}
@Override
public boolean isChanged() {
return false;
}
@Override
public boolean isCreated() {
return false;
}
@Override
public boolean isRemoved() {
return false;
}
@Override
public boolean isEvicted() {
return false;
}
@Override
public K getKey() {
return null;
}
@Override
public V getValue() {
return null;
}
@Override
public long getLifespan() {
return -1;
}
@Override
public long getMaxIdle() {
return -1;
}
@Override
public boolean skipLookup() {
return false;
}
@Override
public Object setValue(Object value) {
return null;
}
@Override
public void commit(DataContainer container) {
// No-op
}
@Override
public void setChanged(boolean changed) {
// No-op
}
@Override
public void setCreated(boolean created) {
// No-op
}
@Override
public void setRemoved(boolean removed) {
// No-op
}
@Override
public void setEvicted(boolean evicted) {
// No-op
}
@Override
public void setSkipLookup(boolean skipLookup) {
// No-op
}
@Override
public CacheEntry clone() {
return INSTANCE;
}
@Override
public Metadata getMetadata() {
return null;
}
@Override
public void setMetadata(Metadata metadata) {
// No-op
}
@Override
public String toString() {
return "NullCacheEntry{}";
}
}
| 1,951
| 15
| 71
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/InternalCacheEntry.java
|
package org.infinispan.container.entries;
import org.infinispan.commons.time.TimeService;
/**
* Interface for internal cache entries that expose whether an entry has expired.
*
* @author Manik Surtani
* @author Sanne Grinovero
* @since 4.0
*/
public interface InternalCacheEntry<K, V> extends CacheEntry<K, V>, Cloneable {
/**
* @param now the current time as defined by {@link System#currentTimeMillis()} or {@link
* TimeService#wallClockTime()}
* @return true if the entry has expired; false otherwise
* @since 5.1
*/
boolean isExpired(long now);
/**
* @return true if the entry can expire, false otherwise
*/
boolean canExpire();
/**
* @return true if this entry can expire via max idle, false otherwise
*/
default boolean canExpireMaxIdle() {
return false;
}
/**
* Only used with entries that have a lifespan, this determines when an entry is due to expire.
*
* @return timestamp when the entry is due to expire, or -1 if it doesn't have a lifespan
*/
long getExpiryTime();
/**
* Updates access timestamps on this instance to a specified time
* @param currentTimeMillis the current time as defined by {@link System#currentTimeMillis()} or {@link
* TimeService#wallClockTime()}
*/
void touch(long currentTimeMillis);
/**
* "Reincarnates" an entry. Essentially, resets the 'created' timestamp of the entry to the current time.
* @param now the current time as defined by {@link System#currentTimeMillis()} or {@link
* TimeService#wallClockTime()}
*/
void reincarnate(long now);
/**
* @return {@code true} if the entry is a L1 entry.
*/
boolean isL1Entry();
/**
* Creates a representation of this entry as an {@link org.infinispan.container.entries.InternalCacheValue}. The main
* purpose of this is to provide a representation that does <i>not</i> have a reference to the key. This is useful in
* situations where the key is already known or stored elsewhere, making serialization and deserialization more
* efficient.
* <p/>
* Note that this should not be used to optimize memory overhead, since the saving of an additional reference to a
* key (a single object reference) does not warrant the cost of constructing an InternalCacheValue. This <i>only</i>
* makes sense when marshalling is involved, since the cost of marshalling the key again can be sidestepped using an
* InternalCacheValue if the key is already known/marshalled.
* <p/>
*
* @return a new InternalCacheValue encapsulating this InternalCacheEntry's value and expiration information.
*/
InternalCacheValue<V> toInternalCacheValue();
InternalCacheEntry<K, V> clone();
}
| 2,808
| 35.012821
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/ForwardingCacheEntry.java
|
package org.infinispan.container.entries;
import org.infinispan.container.DataContainer;
import org.infinispan.metadata.Metadata;
/**
* A class designed to forward all method invocations for a CacheEntry to the provided delegate. This
* class is useful to extend when you want to only modify
*
* @author wburns
* @since 7.0
*/
public abstract class ForwardingCacheEntry<K, V> implements CacheEntry<K, V> {
protected abstract CacheEntry<K, V> delegate();
@Override
public boolean isNull() {
return delegate().isNull();
}
@Override
public boolean isChanged() {
return delegate().isChanged();
}
@Override
public boolean isCreated() {
return delegate().isCreated();
}
@Override
public boolean isRemoved() {
return delegate().isRemoved();
}
@Override
public boolean isEvicted() {
return delegate().isEvicted();
}
@Override
public K getKey() {
return delegate().getKey();
}
@Override
public V getValue() {
return delegate().getValue();
}
@Override
public long getLifespan() {
return delegate().getLifespan();
}
@Override
public long getMaxIdle() {
return delegate().getMaxIdle();
}
@Override
public boolean skipLookup() {
return delegate().skipLookup();
}
@Override
public V setValue(V value) {
return delegate().setValue(value);
}
@Override
public void commit(DataContainer container) {
delegate().commit(container);
}
@Override
public void setChanged(boolean changed) {
delegate().setChanged(changed);
}
@Override
public void setCreated(boolean created) {
delegate().setCreated(created);
}
@Override
public void setRemoved(boolean removed) {
delegate().setRemoved(removed);
}
@Override
public void setEvicted(boolean evicted) {
delegate().setEvicted(evicted);
}
@Override
public void setSkipLookup(boolean skipLookup) {
delegate().setSkipLookup(skipLookup);
}
@Override
public CacheEntry<K, V> clone() {
return delegate().clone();
}
@Override
public Metadata getMetadata() {
return delegate().getMetadata();
}
@Override
public void setMetadata(Metadata metadata) {
delegate().setMetadata(metadata);
}
@Override
public long getCreated() {
return delegate().getCreated();
}
@Override
public long getLastUsed() {
return delegate().getLastUsed();
}
@Override
public String toString() {
return delegate().toString();
}
// We already break equals contract in several places when comparing all the various CacheEntry
// types as the same ones
@Override
public boolean equals(Object obj) {
return delegate().equals(obj);
}
// We already break hashcode contract in several places when comparing all the various CacheEntry
// types as the same ones
@Override
public int hashCode() {
return delegate().hashCode();
}
}
| 3,027
| 19.882759
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/MVCCEntry.java
|
package org.infinispan.container.entries;
import org.infinispan.container.DataContainer;
import org.infinispan.metadata.Metadata;
/**
* An entry that can be safely copied when updates are made, to provide MVCC semantics
*
* @author Manik Surtani
* @since 4.0
*/
public interface MVCCEntry<K, V> extends CacheEntry<K, V> {
void setChanged(boolean isChanged);
/**
* Marks this entry as being expired. This is a special form of removal.
* @param expired whether or not this entry should be expired
*/
void setExpired(boolean expired);
/**
* Returns whether this entry was marked as being expired or not
* @return whether expired has been set
*/
boolean isExpired();
/**
* Reset the current value of the entry to the value before the command was executed the first time.
* This is invoked before the command is retried.
*/
void resetCurrentValue();
/**
* Update the previous value of the entry - set it to current value. This is invoked when the command
* is successfully finished (there won't be any more retries) or when the value was updated from external
* source.
*/
void updatePreviousValue();
/**
* @return The previous value.
*/
V getOldValue();
/**
* @return The previous metadata.
*/
Metadata getOldMetadata();
/**
* Mark that this entry was loaded from the cache (as opposed to generated by the application
* using write-only command), mostly for purposes of the write skew check.
*/
default void setRead() {}
/**
* Check is this entry as loaded from the cache (as opposed to generated by the application
* using write-only command), mostly for purposes of the write skew check.
*/
default boolean isRead() {
return false;
}
/**
* Mark this context-entry as already committed to the {@link DataContainer}.
*/
default void setCommitted() {}
/**
* @return True if this context entry has been committed to the {@link DataContainer}
*/
default boolean isCommitted() { return false; }
/**
* @return True if we've checked persistence for presence of this entry.
*/
default boolean isLoaded() {
return false;
}
default void setLoaded(boolean loaded) {
}
/**
* @return True if this entry should non be written to shared persistence
*/
boolean isSkipSharedStore();
void setSkipSharedStore();
@Override
MVCCEntry<K, V> clone();
}
| 2,483
| 25.147368
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/MortalCacheValue.java
|
package org.infinispan.container.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A mortal cache value, to correspond with {@link MortalCacheEntry}
*
* @author Manik Surtani
* @since 4.0
*/
public class MortalCacheValue extends ImmortalCacheValue {
protected long created;
protected long lifespan;
public MortalCacheValue(Object value, long created, long lifespan) {
this(value, null, created, lifespan);
}
protected MortalCacheValue(Object value, PrivateMetadata internalMetadata, long created, long lifespan) {
super(value, internalMetadata);
this.created = created;
this.lifespan = lifespan;
}
@Override
public final long getCreated() {
return created;
}
public final void setCreated(long created) {
this.created = created;
}
@Override
public final long getLifespan() {
return lifespan;
}
public final void setLifespan(long lifespan) {
this.lifespan = lifespan;
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredMortal(lifespan, created, now);
}
@Override
public final boolean canExpire() {
return true;
}
@Override
public InternalCacheEntry<?, ?> toInternalCacheEntry(Object key) {
return new MortalCacheEntry(key, value, internalMetadata, lifespan, created);
}
@Override
public long getExpiryTime() {
return lifespan > -1 ? created + lifespan : -1;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof MortalCacheValue)) {
return false;
}
if (!super.equals(o)) return false;
MortalCacheValue that = (MortalCacheValue) o;
return created == that.created && lifespan == that.lifespan;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (int) (created ^ (created >>> 32));
result = 31 * result + (int) (lifespan ^ (lifespan >>> 32));
return result;
}
@Override
public MortalCacheValue clone() {
return (MortalCacheValue) super.clone();
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", created=").append(created);
builder.append(", lifespan=").append(lifespan);
}
public static class Externalizer extends AbstractExternalizer<MortalCacheValue> {
@Override
public void writeObject(ObjectOutput output, MortalCacheValue mcv) throws IOException {
output.writeObject(mcv.value);
output.writeObject(mcv.internalMetadata);
UnsignedNumeric.writeUnsignedLong(output, mcv.created);
output.writeLong(mcv.lifespan); // could be negative so should not use unsigned longs
}
@Override
public MortalCacheValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
long lifespan = input.readLong();
return new MortalCacheValue(value, internalMetadata, created, lifespan);
}
@Override
public Integer getId() {
return Ids.MORTAL_VALUE;
}
@Override
public Set<Class<? extends MortalCacheValue>> getTypeClasses() {
return Collections.singleton(MortalCacheValue.class);
}
}
}
| 3,844
| 27.065693
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/ReadCommittedEntry.java
|
package org.infinispan.container.entries;
import static org.infinispan.commons.util.Util.toStr;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.CHANGED;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.COMMITTED;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.CREATED;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.EVICTED;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.EXPIRED;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.LOADED;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.REMOVED;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.SKIP_SHARED_STORE;
import java.util.concurrent.CompletionStage;
import org.infinispan.commons.util.Util;
import org.infinispan.container.DataContainer;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A wrapper around a cached entry that encapsulates read committed semantics when writes are initiated, committed or
* rolled back.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @since 4.0
*/
public class ReadCommittedEntry<K, V> implements MVCCEntry<K, V> {
private static final Log log = LogFactory.getLog(ReadCommittedEntry.class);
protected K key;
protected V value;
protected long created = -1, lastUsed = -1;
protected short flags = 0;
protected Metadata metadata;
protected PrivateMetadata internalMetadata;
protected V oldValue;
protected Metadata oldMetadata;
public ReadCommittedEntry(K key, V value, Metadata metadata) {
this.key = key;
this.value = value;
this.metadata = metadata;
this.oldValue = value;
this.oldMetadata = metadata;
}
// if this or any MVCC entry implementation ever needs to store a boolean, always use a flag instead. This is far
// more space-efficient. Note that this value will be stored in a byte, which means up to 8 flags can be stored in
// a single byte. Always start shifting with 0, the last shift cannot be greater than 7.
protected enum Flags {
CHANGED(1),
CREATED(1 << 1),
REMOVED(1 << 2),
COMMITTED(1 << 3),
EVICTED(1 << 4),
EXPIRED(1 << 5),
SKIP_LOOKUP(1 << 6),
READ(1 << 7),
LOADED(1 << 8),
// Set if this write should not be persisted into any underlying shared stores
SKIP_SHARED_STORE(1 << 9),
;
final short mask;
Flags(int mask) {
this.mask = (short) mask;
}
}
/**
* Tests whether a flag is set.
*
* @param flag flag to test
* @return true if set, false otherwise.
*/
final boolean isFlagSet(Flags flag) {
return (flags & flag.mask) != 0;
}
/**
* Utility method that sets the value of the given flag to true.
*
* @param flag flag to set
*/
protected final void setFlag(Flags flag) {
flags |= flag.mask;
}
/**
* Utility method that sets the value of the flag to false.
*
* @param flag flag to unset
*/
private void unsetFlag(Flags flag) {
flags &= ~flag.mask;
}
@Override
public final long getLifespan() {
return metadata == null ? -1 : metadata.lifespan();
}
@Override
public final long getMaxIdle() {
return metadata == null ? -1 : metadata.maxIdle();
}
@Override
public final K getKey() {
return key;
}
@Override
public final V getValue() {
return value;
}
@Override
public final boolean isNull() {
return value == null;
}
@Override
public void commit(DataContainer<K, V> container) {
if (shouldCommit()) {
if (isEvicted()) {
container.evict(key);
} else if (isRemoved()) {
container.remove(key);
} else if (value != null) {
// Can't just rely on the entry's metadata because it could have
// been modified by the interceptor chain (i.e. new version
// generated if none provided by the user)
container.put(key, value, metadata);
}
}
}
public CompletionStage<Void> commit(int segment, InternalDataContainer<K, V> container) {
if (segment < 0) {
throw new IllegalArgumentException("Segment must be 0 or greater");
}
// only do stuff if there are changes.
if (shouldCommit()) {
setCommitted();
if (isEvicted()) {
return container.evict(segment, key);
} else if (isRemoved()) {
container.remove(segment, key);
} else if (value != null) {
container.put(segment, key, value, metadata, internalMetadata, created, lastUsed);
}
}
return CompletableFutures.completedNull();
}
private boolean shouldCommit() {
if (isChanged()) {
if (log.isTraceEnabled())
log.tracef("Updating entry (key=%s removed=%s changed=%s created=%s committed=%s value=%s metadata=%s internalMetadata=%s)",
toStr(getKey()), isRemoved(), isChanged(), isCreated(), isCommitted(), toStr(value), getMetadata(), internalMetadata);
return true;
}
return false;
}
@Override
public final boolean isChanged() {
return isFlagSet(CHANGED);
}
@Override
public final void setChanged(boolean changed) {
setFlag(changed, CHANGED);
}
@Override
public void setSkipLookup(boolean skipLookup) {
//no-op
}
@Override
public boolean skipLookup() {
//in read committed, it can read from the data container / remote source multiple times.
return false;
}
@Override
public long getCreated() {
return created;
}
@Override
public long getLastUsed() {
return lastUsed;
}
@Override
public V setValue(V value) {
V prev = this.value;
this.value = value;
return prev;
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
public final boolean isCreated() {
return isFlagSet(CREATED);
}
@Override
public final void setCreated(boolean created) {
setFlag(created, CREATED);
}
@Override
public boolean isRemoved() {
return isFlagSet(REMOVED);
}
@Override
public boolean isEvicted() {
return isFlagSet(EVICTED);
}
@Override
public boolean isExpired() {
return isFlagSet(EXPIRED);
}
@Override
public void setCommitted() {
setFlag(COMMITTED);
}
@Override
public boolean isCommitted() {
return isFlagSet(COMMITTED);
}
@Override
public boolean isLoaded() {
return isFlagSet(LOADED);
}
@Override
public boolean isSkipSharedStore() {
return isFlagSet(SKIP_SHARED_STORE);
}
@Override
public void setLoaded(boolean loaded) {
setFlag(loaded, LOADED);
}
@Override
public void resetCurrentValue() {
value = oldValue;
metadata = oldMetadata;
}
@Override
public final void updatePreviousValue() {
oldValue = value;
oldMetadata = metadata;
}
@Override
public final V getOldValue() {
return oldValue;
}
@Override
public final Metadata getOldMetadata() {
return oldMetadata;
}
@Override
public final void setRemoved(boolean removed) {
setFlag(removed, REMOVED);
}
@Override
public void setEvicted(boolean evicted) {
setFlag(evicted, EVICTED);
}
@Override
public void setExpired(boolean expired) {
setFlag(expired, EXPIRED);
}
@Override
public void setSkipSharedStore() {
setFlag(true, SKIP_SHARED_STORE);
}
final void setFlag(boolean enable, Flags flag) {
if (enable)
setFlag(flag);
else
unsetFlag(flag);
}
@Override
public ReadCommittedEntry<K, V> clone() {
try {
//noinspection unchecked
return (ReadCommittedEntry<K, V>) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException(e);
}
}
@Override
public void setCreated(long created) {
this.created = created;
}
@Override
public void setLastUsed(long lastUsed) {
this.lastUsed = lastUsed;
}
@Override
public PrivateMetadata getInternalMetadata() {
return internalMetadata;
}
@Override
public void setInternalMetadata(PrivateMetadata metadata) {
this.internalMetadata = metadata;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(" + Util.hexIdHashCode(this) + "){" +
"key=" + toStr(key) +
", value=" + toStr(value) +
", oldValue=" + toStr(oldValue) +
", isCreated=" + isCreated() +
", isChanged=" + isChanged() +
", isRemoved=" + isRemoved() +
", isExpired=" + isExpired() +
", isCommited=" + isCommitted() +
", skipLookup=" + skipLookup() +
", metadata=" + metadata +
", oldMetadata=" + oldMetadata +
", internalMetadata=" + internalMetadata +
'}';
}
}
| 9,568
| 25.002717
| 136
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/ImmortalCacheEntry.java
|
package org.infinispan.container.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A cache entry that is immortal/cannot expire
*
* @author Manik Surtani
* @since 4.0
*/
public class ImmortalCacheEntry extends AbstractInternalCacheEntry {
public ImmortalCacheEntry(Object key, Object value) {
this(key, value, null);
}
protected ImmortalCacheEntry(Object key, Object value, PrivateMetadata internalMetadata) {
super(key, value, internalMetadata);
}
@Override
public final boolean isExpired(long now) {
return false;
}
@Override
public final boolean canExpire() {
return false;
}
@Override
public final long getCreated() {
return -1;
}
@Override
public final long getLastUsed() {
return -1;
}
@Override
public final long getLifespan() {
return -1;
}
@Override
public final long getMaxIdle() {
return -1;
}
@Override
public final long getExpiryTime() {
return -1;
}
@Override
public void touch(long currentTimeMillis) {
// no-op
}
@Override
public void reincarnate(long now) {
// no-op
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new ImmortalCacheValue(value, internalMetadata);
}
@Override
public Metadata getMetadata() {
return EmbeddedMetadata.EMPTY;
}
@Override
public void setMetadata(Metadata metadata) {
throw new IllegalStateException(
"Metadata cannot be set on immortal entries. They need to be recreated via the entry factory.");
}
@Override
public ImmortalCacheEntry clone() {
return (ImmortalCacheEntry) super.clone();
}
public static class Externalizer extends AbstractExternalizer<ImmortalCacheEntry> {
@Override
public void writeObject(ObjectOutput output, ImmortalCacheEntry ice) throws IOException {
output.writeObject(ice.key);
output.writeObject(ice.value);
output.writeObject(ice.internalMetadata);
}
@Override
public ImmortalCacheEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
return new ImmortalCacheEntry(key, value, internalMetadata);
}
@Override
public Integer getId() {
return Ids.IMMORTAL_ENTRY;
}
@Override
public Set<Class<? extends ImmortalCacheEntry>> getTypeClasses() {
return Collections.singleton(ImmortalCacheEntry.class);
}
}
}
| 3,032
| 23.264
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/VersionedRepeatableReadEntry.java
|
package org.infinispan.container.entries;
import static org.infinispan.commons.util.Util.toStr;
import static org.infinispan.transaction.impl.WriteSkewHelper.versionFromEntry;
import java.util.concurrent.CompletionStage;
import org.infinispan.container.versioning.EntryVersion;
import org.infinispan.container.versioning.IncrementableEntryVersion;
import org.infinispan.container.versioning.InequalVersionComparisonResult;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.metadata.Metadata;
import org.infinispan.persistence.util.EntryLoader;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A version of RepeatableReadEntry that can perform write-skew checks during prepare.
*
* @author Manik Surtani
* @since 5.1
*/
public class VersionedRepeatableReadEntry<K, V> extends RepeatableReadEntry<K, V> {
private static final Log log = LogFactory.getLog(VersionedRepeatableReadEntry.class);
public VersionedRepeatableReadEntry(K key, V value, Metadata metadata) {
super(key, value, metadata);
}
/**
*
* @param segment the segment matching this entry
* @param ctx the invocation context
* @param versionSeen what version has been seen for this entry
* @param versionGenerator generator to generate a new version if needed
* @param rollingUpgrade
* @return whether a write skew occurred for this entry
*/
public CompletionStage<Boolean> performWriteSkewCheck(EntryLoader<K, V> entryLoader, int segment,
TxInvocationContext<?> ctx, EntryVersion versionSeen,
VersionGenerator versionGenerator, boolean rollingUpgrade) {
if (versionSeen == null) {
if (log.isTraceEnabled()) {
log.tracef("Perform write skew check for key %s but the key was not read. Skipping check!", toStr(key));
}
//version seen is null when the entry was not read. In this case, the write skew is not needed.
return CompletableFutures.completedTrue();
}
CompletionStage<IncrementableEntryVersion> entryStage;
if (ctx.isOriginLocal()) {
entryStage = getCurrentEntryVersion(entryLoader, segment, ctx, versionGenerator, rollingUpgrade);
} else {
// If this node is an owner and not originator, the entry has been loaded and wrapped under lock,
// so the version in context should be up-to-date
IncrementableEntryVersion prevVersion = ctx.getCacheTransaction().getVersionsRead().get(key);
if (prevVersion == null) {
// If the command has IGNORE_RETURN_VALUE flags it's possible that the entry was not loaded
// from cache loader - we have to force load
entryStage = getCurrentEntryVersion(entryLoader, segment, ctx, versionGenerator, rollingUpgrade);
} else {
return CompletableFutures.booleanStage(skewed(prevVersion, versionSeen, versionGenerator));
}
}
return entryStage.thenApply(prevVersion -> skewed(prevVersion, versionSeen, versionGenerator));
}
private boolean skewed(IncrementableEntryVersion prevVersion, EntryVersion versionSeen, VersionGenerator versionGenerator) {
// If it is expired then it is possible the previous version doesn't exist - because entry didn't exist)
if (isExpired() && prevVersion == versionGenerator.nonExistingVersion()) {
return true;
}
//in this case, the transaction read some value and the data container has a value stored.
//version seen and previous version are not null. Simple version comparation.
InequalVersionComparisonResult result = prevVersion.compareTo(versionSeen);
if (log.isTraceEnabled()) {
log.tracef("Comparing versions %s and %s for key %s: %s", prevVersion, versionSeen, key, result);
}
// TODO: there is a risk associated with versions that are not monotonous per entry - if an entry is removed
// and then written several times, it can reach the previous version.
return InequalVersionComparisonResult.EQUAL == result;
}
private CompletionStage<IncrementableEntryVersion> getCurrentEntryVersion(EntryLoader<K, V> entryLoader, int segment, TxInvocationContext ctx, VersionGenerator versionGenerator, boolean rollingUpgrade) {
// TODO: persistence should be more orthogonal to any entry type - this should be handled in interceptor
// on origin, the version seen is acquired without the lock, so we have to retrieve it again
CompletionStage<InternalCacheEntry<K, V>> entry = entryLoader.loadAndStoreInDataContainer(ctx, getKey(), segment, null);
return entry.thenApply(ice -> {
if (ice == null) {
if (log.isTraceEnabled()) {
log.tracef("No entry for key %s found in data container", toStr(key));
}
//in this case, the key does not exist. So, the only result possible is the version seen be the NonExistingVersion
return versionGenerator.nonExistingVersion();
}
if (log.isTraceEnabled()) {
log.tracef("Entry found in data container: %s", toStr(ice));
}
IncrementableEntryVersion prevVersion = versionFromEntry(ice);
if (prevVersion == null) {
if (rollingUpgrade) {
return versionGenerator.nonExistingVersion();
}
throw new IllegalStateException("Entries cannot have null versions!");
}
return prevVersion;
});
}
@Override
public VersionedRepeatableReadEntry<K, V> clone() {
return (VersionedRepeatableReadEntry<K, V>) super.clone();
}
}
| 5,848
| 47.741667
| 206
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/InternalCacheValue.java
|
package org.infinispan.container.entries;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A representation of an InternalCacheEntry that does not have a reference to the key. This should be used if the key
* is either not needed or available elsewhere as it is more efficient to marshall and unmarshall. Probably most useful
* in cache stores.
* <p/>
* Note that this should not be used to optimize memory overhead, since the saving of an additional reference to a key
* (a single object reference) does not warrant the cost of constructing an InternalCacheValue, where an existing
* InternalCacheEntry is already referenced.
* <p/>
* Use of this interface <i>only</i> makes sense when marshalling is involved, since the cost of marshalling the key
* again can be sidestepped using an InternalCacheValue if the key is already known/marshalled.
* <p/>
*
* @author Manik Surtani
* @since 4.0
*/
public interface InternalCacheValue<V> {
/**
* @return the value represented by this internal wrapper
*/
V getValue();
<K> InternalCacheEntry<K, V> toInternalCacheEntry(K key);
/**
* @param now the current time as expressed by {@link System#currentTimeMillis()}
* @return true if the entry has expired; false otherwise
*/
boolean isExpired(long now);
/**
* @return true if the entry can expire, false otherwise
*/
boolean canExpire();
/**
* @return true if this entry can expire via max idle, false otherwise
*/
default boolean isMaxIdleExpirable() {
return false;
}
/**
* @return timestamp when the entry was created
*/
long getCreated();
/**
* @return timestamp when the entry was last used
*/
long getLastUsed();
/**
* @return lifespan of the value
*/
long getLifespan();
/**
* @return max idle time allowed
*/
long getMaxIdle();
long getExpiryTime();
Metadata getMetadata();
PrivateMetadata getInternalMetadata();
void setInternalMetadata(PrivateMetadata internalMetadata);
}
| 2,097
| 26.246753
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/RepeatableReadEntry.java
|
package org.infinispan.container.entries;
import static org.infinispan.container.entries.ReadCommittedEntry.Flags.SKIP_LOOKUP;
import org.infinispan.metadata.Metadata;
/**
* An extension of {@link ReadCommittedEntry} that provides Repeatable Read semantics
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @since 4.0
*/
public class RepeatableReadEntry<K, V> extends ReadCommittedEntry<K, V> {
public RepeatableReadEntry(K key, V value, Metadata metadata) {
super(key, value, metadata);
}
@Override
public void setSkipLookup(boolean skipLookup) {
setFlag(skipLookup, SKIP_LOOKUP);
}
@Override
public boolean skipLookup() {
return isFlagSet(SKIP_LOOKUP);
}
@Override
public RepeatableReadEntry<K, V> clone() {
return (RepeatableReadEntry<K, V>) super.clone();
}
@Override
public final V setValue(V value) {
V prev = super.setValue(value);
setSkipLookup(true);
return prev;
}
@Override
public void setRead() {
setFlag(Flags.READ);
}
@Override
public boolean isRead() {
return isFlagSet(Flags.READ);
}
}
| 1,164
| 21.843137
| 85
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/TransientMortalCacheEntry.java
|
package org.infinispan.container.entries;
import static java.lang.Math.min;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A cache entry that is both transient and mortal.
*
* @author Manik Surtani
* @since 4.0
*/
public class TransientMortalCacheEntry extends AbstractInternalCacheEntry {
protected long maxIdle;
protected long lastUsed;
protected long lifespan;
protected long created;
public TransientMortalCacheEntry(Object key, Object value, long maxIdle, long lifespan, long currentTimeMillis) {
this(key, value, maxIdle, lifespan, currentTimeMillis, currentTimeMillis);
}
public TransientMortalCacheEntry(Object key, Object value, long maxIdle, long lifespan, long lastUsed,
long created) {
this(key, value, null, maxIdle, lifespan, lastUsed, created);
}
protected TransientMortalCacheEntry(Object key, Object value, PrivateMetadata internalMetadata,
long maxIdle, long lifespan, long lastUsed, long created) {
super(key, value, internalMetadata);
this.maxIdle = maxIdle;
this.lifespan = lifespan;
this.created = created;
this.lastUsed = lastUsed;
}
public void setLifespan(long lifespan) {
this.lifespan = lifespan;
}
public void setMaxIdle(long maxIdle) {
this.maxIdle = maxIdle;
}
@Override
public long getLifespan() {
return lifespan;
}
@Override
public final boolean canExpire() {
return true;
}
@Override
public boolean canExpireMaxIdle() {
return true;
}
@Override
public long getCreated() {
return created;
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransientMortal(maxIdle, lastUsed, lifespan, created, now);
}
@Override
public final long getExpiryTime() {
long lset = lifespan > -1 ? created + lifespan : -1;
long muet = maxIdle > -1 ? lastUsed + maxIdle : -1;
if (lset == -1) {
return muet;
}
if (muet == -1) {
return lset;
}
return min(lset, muet);
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new TransientMortalCacheValue(value, internalMetadata, created, lifespan, maxIdle, lastUsed);
}
@Override
public long getLastUsed() {
return lastUsed;
}
@Override
public final void touch(long currentTimeMillis) {
this.lastUsed = currentTimeMillis;
}
@Override
public void reincarnate(long now) {
this.created = now;
}
@Override
public long getMaxIdle() {
return maxIdle;
}
@Override
public Metadata getMetadata() {
return new EmbeddedMetadata.Builder()
.lifespan(lifespan)
.maxIdle(maxIdle).build();
}
@Override
public void setMetadata(Metadata metadata) {
throw new IllegalStateException(
"Metadata cannot be set on mortal entries. They need to be recreated via the entry factory.");
}
@Override
public TransientMortalCacheEntry clone() {
return (TransientMortalCacheEntry) super.clone();
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", lastUsed=").append(lastUsed);
builder.append(", maxIdle=").append(maxIdle);
builder.append(", created=").append(created);
builder.append(", lifespan=").append(lifespan);
}
public static class Externalizer extends AbstractExternalizer<TransientMortalCacheEntry> {
@Override
public void writeObject(ObjectOutput output, TransientMortalCacheEntry entry) throws IOException {
output.writeObject(entry.key);
output.writeObject(entry.value);
output.writeObject(entry.internalMetadata);
UnsignedNumeric.writeUnsignedLong(output, entry.created);
output.writeLong(entry.lifespan); // could be negative so should not use unsigned longs
UnsignedNumeric.writeUnsignedLong(output, entry.lastUsed);
output.writeLong(entry.maxIdle); // could be negative so should not use unsigned longs
}
@Override
public TransientMortalCacheEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
long lifespan = input.readLong();
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
long maxIdle = input.readLong();
return new TransientMortalCacheEntry(key, value, internalMetadata, maxIdle, lifespan, lastUsed, created);
}
@Override
public Integer getId() {
return Ids.TRANSIENT_MORTAL_ENTRY;
}
@Override
public Set<Class<? extends TransientMortalCacheEntry>> getTypeClasses() {
return Collections.singleton(TransientMortalCacheEntry.class);
}
}
}
| 5,449
| 28.945055
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/TransientCacheValue.java
|
package org.infinispan.container.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A transient cache value, to correspond with {@link TransientCacheEntry}
*
* @author Manik Surtani
* @since 4.0
*/
public class TransientCacheValue extends ImmortalCacheValue {
protected long maxIdle;
protected long lastUsed;
public TransientCacheValue(Object value, long maxIdle, long lastUsed) {
this(value, null, maxIdle, lastUsed);
}
protected TransientCacheValue(Object value, PrivateMetadata internalMetadata, long maxIdle, long lastUsed) {
super(value, internalMetadata);
this.maxIdle = maxIdle;
this.lastUsed = lastUsed;
}
@Override
public long getMaxIdle() {
return maxIdle;
}
public void setMaxIdle(long maxIdle) {
this.maxIdle = maxIdle;
}
@Override
public long getLastUsed() {
return lastUsed;
}
public void setLastUsed(long lastUsed) {
this.lastUsed = lastUsed;
}
@Override
public final boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransient(maxIdle, lastUsed, now);
}
@Override
public boolean canExpire() {
return true;
}
@Override
public boolean isMaxIdleExpirable() {
return true;
}
@Override
public InternalCacheEntry<?, ?> toInternalCacheEntry(Object key) {
return new TransientCacheEntry(key, value, internalMetadata, maxIdle, lastUsed);
}
@Override
public long getExpiryTime() {
return maxIdle > -1 ? lastUsed + maxIdle : -1;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof TransientCacheValue)) {
return false;
}
if (!super.equals(o)) {
return false;
}
TransientCacheValue that = (TransientCacheValue) o;
return lastUsed == that.lastUsed && maxIdle == that.maxIdle;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (int) (maxIdle ^ (maxIdle >>> 32));
result = 31 * result + (int) (lastUsed ^ (lastUsed >>> 32));
return result;
}
@Override
public TransientCacheValue clone() {
return (TransientCacheValue) super.clone();
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", maxIdle=").append(maxIdle);
builder.append(", lastUsed=").append(lastUsed);
}
public static class Externalizer extends AbstractExternalizer<TransientCacheValue> {
@Override
public void writeObject(ObjectOutput output, TransientCacheValue tcv) throws IOException {
output.writeObject(tcv.value);
output.writeObject(tcv.internalMetadata);
UnsignedNumeric.writeUnsignedLong(output, tcv.lastUsed);
output.writeLong(tcv.maxIdle); // could be negative so should not use unsigned longs
}
@Override
public TransientCacheValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
long maxIdle = input.readLong();
return new TransientCacheValue(value, internalMetadata, maxIdle, lastUsed);
}
@Override
public Integer getId() {
return Ids.TRANSIENT_VALUE;
}
@Override
public Set<Class<? extends TransientCacheValue>> getTypeClasses() {
return Collections.singleton(TransientCacheValue.class);
}
}
}
| 3,973
| 26.79021
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/MortalCacheEntry.java
|
package org.infinispan.container.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A cache entry that is mortal. I.e., has a lifespan.
*
* @author Manik Surtani
* @since 4.0
*/
public class MortalCacheEntry extends AbstractInternalCacheEntry {
protected long lifespan;
protected long created;
public MortalCacheEntry(Object key, Object value, long lifespan, long created) {
this(key, value, null, lifespan, created);
}
protected MortalCacheEntry(Object key, Object value, PrivateMetadata internalMetadata, long lifespan,
long created) {
super(key, value, internalMetadata);
this.lifespan = lifespan;
this.created = created;
}
@Override
public final boolean isExpired(long now) {
return ExpiryHelper.isExpiredMortal(lifespan, created, now);
}
@Override
public final boolean canExpire() {
return true;
}
public void setLifespan(long lifespan) {
this.lifespan = lifespan;
}
@Override
public final long getCreated() {
return created;
}
@Override
public final long getLastUsed() {
return -1;
}
@Override
public final long getLifespan() {
return lifespan;
}
@Override
public final long getMaxIdle() {
return -1;
}
@Override
public final long getExpiryTime() {
return lifespan > -1 ? created + lifespan : -1;
}
@Override
public final void touch(long currentTimeMillis) {
// no-op
}
@Override
public void reincarnate(long now) {
this.created = now;
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new MortalCacheValue(value, internalMetadata, created, lifespan);
}
@Override
public Metadata getMetadata() {
return new EmbeddedMetadata.Builder().lifespan(lifespan).build();
}
@Override
public void setMetadata(Metadata metadata) {
throw new IllegalStateException(
"Metadata cannot be set on mortal entries. They need to be recreated via the entry factory.");
}
@Override
public MortalCacheEntry clone() {
return (MortalCacheEntry) super.clone();
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", created=").append(created);
builder.append(", lifespan=").append(lifespan);
}
public static class Externalizer extends AbstractExternalizer<MortalCacheEntry> {
@Override
public void writeObject(ObjectOutput output, MortalCacheEntry mce) throws IOException {
output.writeObject(mce.key);
output.writeObject(mce.value);
output.writeObject(mce.internalMetadata);
UnsignedNumeric.writeUnsignedLong(output, mce.created);
output.writeLong(mce.lifespan); // could be negative so should not use unsigned longs
}
@Override
public MortalCacheEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
long lifespan = input.readLong();
return new MortalCacheEntry(key, value, internalMetadata, lifespan, created);
}
@Override
public Integer getId() {
return Ids.MORTAL_ENTRY;
}
@Override
public Set<Class<? extends MortalCacheEntry>> getTypeClasses() {
return Collections.singleton(MortalCacheEntry.class);
}
}
}
| 4,031
| 26.616438
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/versioned/Versioned.java
|
package org.infinispan.container.entries.versioned;
import org.infinispan.container.versioning.EntryVersion;
/**
* An interface that marks the ability to handle versions
*
* @author Manik Surtani
* @since 5.1
* @deprecated since 11.0 (no longer used).
*/
@Deprecated
public interface Versioned {
/**
* @return the version of the entry. May be null if versioning is not supported, and must never be null if
* versioning is supported.
*/
default EntryVersion getVersion() {
throw new UnsupportedOperationException();
}
/**
* Sets the version on this entry.
*
* @param version version to set
*/
default void setVersion(EntryVersion version) {
throw new UnsupportedOperationException();
}
}
| 764
| 22.90625
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataTransientMortalCacheEntry.java
|
package org.infinispan.container.entries.metadata;
import static java.lang.Math.min;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.AbstractInternalCacheEntry;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.entries.TransientMortalCacheEntry;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A form of {@link TransientMortalCacheEntry} that stores {@link Metadata}
*
* @author Manik Surtani
* @since 5.1
*/
public class MetadataTransientMortalCacheEntry extends AbstractInternalCacheEntry implements MetadataAware {
Metadata metadata;
long created;
long lastUsed;
public MetadataTransientMortalCacheEntry(Object key, Object value, Metadata metadata, long now) {
this(key, value, metadata, now, now);
}
public MetadataTransientMortalCacheEntry(Object key, Object value, Metadata metadata, long lastUsed, long created) {
this(key, value, null, metadata, lastUsed, created);
}
protected MetadataTransientMortalCacheEntry(Object key, Object value, PrivateMetadata internalMetadata,
Metadata metadata, long lastUsed, long created) {
super(key, value, internalMetadata);
this.metadata = metadata;
this.lastUsed = lastUsed;
this.created = created;
}
@Override
public long getLifespan() {
return metadata.lifespan();
}
@Override
public final boolean canExpire() {
return true;
}
@Override
public long getCreated() {
return created;
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransientMortal(
metadata.maxIdle(), lastUsed, metadata.lifespan(), created, now);
}
@Override
public boolean canExpireMaxIdle() {
return true;
}
@Override
public final long getExpiryTime() {
long lifespan = metadata.lifespan();
long lset = lifespan > -1 ? created + lifespan : -1;
long maxIdle = metadata.maxIdle();
long muet = maxIdle > -1 ? lastUsed + maxIdle : -1;
if (lset == -1) {
return muet;
}
if (muet == -1) {
return lset;
}
return min(lset, muet);
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new MetadataTransientMortalCacheValue(value, internalMetadata, metadata, created, lastUsed);
}
@Override
public long getLastUsed() {
return lastUsed;
}
@Override
public final void touch(long currentTimeMillis) {
lastUsed = currentTimeMillis;
}
@Override
public void reincarnate(long now) {
created = now;
}
@Override
public long getMaxIdle() {
return metadata.maxIdle();
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", metadata=").append(metadata);
builder.append(", created=").append(created);
builder.append(", lastUsed=").append(lastUsed);
}
public static class Externalizer extends AbstractExternalizer<MetadataTransientMortalCacheEntry> {
@Override
public void writeObject(ObjectOutput output, MetadataTransientMortalCacheEntry ice) throws IOException {
output.writeObject(ice.key);
output.writeObject(ice.value);
output.writeObject(ice.internalMetadata);
output.writeObject(ice.metadata);
UnsignedNumeric.writeUnsignedLong(output, ice.created);
UnsignedNumeric.writeUnsignedLong(output, ice.lastUsed);
}
@Override
public MetadataTransientMortalCacheEntry readObject(ObjectInput input)
throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
return new MetadataTransientMortalCacheEntry(key, value, internalMetadata, metadata, lastUsed, created);
}
@Override
public Integer getId() {
return Ids.METADATA_TRANSIENT_MORTAL_ENTRY;
}
@Override
public Set<Class<? extends MetadataTransientMortalCacheEntry>> getTypeClasses() {
return Collections.singleton(MetadataTransientMortalCacheEntry.class);
}
}
}
| 5,021
| 29.071856
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataTransientCacheEntry.java
|
package org.infinispan.container.entries.metadata;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.AbstractInternalCacheEntry;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A cache entry that is transient, i.e., it can be considered expired after a period of not being used, and {@link
* MetadataAware}
*
* @author Galder Zamarreño
* @since 5.3
*/
public class MetadataTransientCacheEntry extends AbstractInternalCacheEntry implements MetadataAware {
protected Metadata metadata;
protected long lastUsed;
public MetadataTransientCacheEntry(Object key, Object value, Metadata metadata, long lastUsed) {
this(key, value, null, metadata, lastUsed);
}
protected MetadataTransientCacheEntry(Object key, Object value, PrivateMetadata internalMetadata,
Metadata metadata, long lastUsed) {
super(key, value, internalMetadata);
this.metadata = metadata;
this.lastUsed = lastUsed;
}
@Override
public final void touch(long currentTimeMillis) {
lastUsed = currentTimeMillis;
}
@Override
public void reincarnate(long now) {
//no-op
}
@Override
public final boolean canExpire() {
return true;
}
@Override
public boolean canExpireMaxIdle() {
return true;
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransient(metadata.maxIdle(), lastUsed, now);
}
@Override
public long getCreated() {
return -1;
}
@Override
public final long getLastUsed() {
return lastUsed;
}
@Override
public long getLifespan() {
return -1;
}
@Override
public long getExpiryTime() {
long maxIdle = metadata.maxIdle();
return maxIdle > -1 ? lastUsed + maxIdle : -1;
}
@Override
public final long getMaxIdle() {
return metadata.maxIdle();
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new MetadataTransientCacheValue(value, internalMetadata, metadata, lastUsed);
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", metadata=").append(metadata);
builder.append(", lastUsed=").append(lastUsed);
}
public static class Externalizer extends AbstractExternalizer<MetadataTransientCacheEntry> {
@Override
public void writeObject(ObjectOutput output, MetadataTransientCacheEntry ice) throws IOException {
output.writeObject(ice.key);
output.writeObject(ice.value);
output.writeObject(ice.internalMetadata);
output.writeObject(ice.metadata);
UnsignedNumeric.writeUnsignedLong(output, ice.lastUsed);
}
@Override
public MetadataTransientCacheEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
return new MetadataTransientCacheEntry(key, value, internalMetadata, metadata, lastUsed);
}
@Override
public Integer getId() {
return Ids.METADATA_TRANSIENT_ENTRY;
}
@Override
public Set<Class<? extends MetadataTransientCacheEntry>> getTypeClasses() {
return Collections.singleton(MetadataTransientCacheEntry.class);
}
}
}
| 4,170
| 27.568493
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataTransientCacheValue.java
|
package org.infinispan.container.entries.metadata;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.container.entries.ImmortalCacheValue;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.TransientCacheEntry;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A transient cache value, to correspond with {@link TransientCacheEntry} which is {@link MetadataAware}
*
* @author Galder Zamarreño
* @since 5.3
*/
public class MetadataTransientCacheValue extends ImmortalCacheValue implements MetadataAware {
Metadata metadata;
long lastUsed;
public MetadataTransientCacheValue(Object value, Metadata metadata, long lastUsed) {
this(value, null, metadata, lastUsed);
}
protected MetadataTransientCacheValue(Object value, PrivateMetadata internalMetadata, Metadata metadata,
long lastUsed) {
super(value, internalMetadata);
this.metadata = metadata;
this.lastUsed = lastUsed;
}
@Override
public InternalCacheEntry<?, ?> toInternalCacheEntry(Object key) {
return new MetadataTransientCacheEntry(key, value, internalMetadata, metadata, lastUsed);
}
@Override
public long getMaxIdle() {
return metadata.maxIdle();
}
@Override
public long getLastUsed() {
return lastUsed;
}
@Override
public final boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransient(metadata.maxIdle(), lastUsed, now);
}
@Override
public boolean canExpire() {
return true;
}
@Override
public boolean isMaxIdleExpirable() {
return true;
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
public long getExpiryTime() {
long maxIdle = metadata.maxIdle();
return maxIdle > -1 ? lastUsed + maxIdle : -1;
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", metadata=").append(metadata);
builder.append(", lastUsed=").append(lastUsed);
}
public static class Externalizer extends AbstractExternalizer<MetadataTransientCacheValue> {
@Override
public void writeObject(ObjectOutput output, MetadataTransientCacheValue tcv) throws IOException {
output.writeObject(tcv.value);
output.writeObject(tcv.internalMetadata);
output.writeObject(tcv.metadata);
UnsignedNumeric.writeUnsignedLong(output, tcv.lastUsed);
}
@Override
public MetadataTransientCacheValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
return new MetadataTransientCacheValue(value, internalMetadata, metadata, lastUsed);
}
@Override
public Integer getId() {
return Ids.METADATA_TRANSIENT_VALUE;
}
@Override
public Set<Class<? extends MetadataTransientCacheValue>> getTypeClasses() {
return Collections.singleton(MetadataTransientCacheValue.class);
}
}
}
| 3,742
| 29.430894
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataImmortalCacheEntry.java
|
package org.infinispan.container.entries.metadata;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A form of {@link org.infinispan.container.entries.ImmortalCacheEntry} that is {@link
* org.infinispan.container.entries.metadata.MetadataAware}
*
* @author Galder Zamarreño
* @since 5.3
*/
public class MetadataImmortalCacheEntry extends ImmortalCacheEntry implements MetadataAware {
protected Metadata metadata;
public MetadataImmortalCacheEntry(Object key, Object value, Metadata metadata) {
this(key, value, null, metadata);
}
protected MetadataImmortalCacheEntry(Object key, Object value, PrivateMetadata internalMetadata, Metadata metadata) {
super(key, value, internalMetadata);
this.metadata = metadata;
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new MetadataImmortalCacheValue(value, internalMetadata, metadata);
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", metadata=").append(metadata);
}
public static class Externalizer extends AbstractExternalizer<MetadataImmortalCacheEntry> {
@Override
public void writeObject(ObjectOutput output, MetadataImmortalCacheEntry ice) throws IOException {
output.writeObject(ice.key);
output.writeObject(ice.value);
output.writeObject(ice.internalMetadata);
output.writeObject(ice.metadata);
}
@Override
public MetadataImmortalCacheEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
return new MetadataImmortalCacheEntry(key, value, internalMetadata, metadata);
}
@Override
public Integer getId() {
return Ids.METADATA_IMMORTAL_ENTRY;
}
@Override
public Set<Class<? extends MetadataImmortalCacheEntry>> getTypeClasses() {
return Collections.singleton(MetadataImmortalCacheEntry.class);
}
}
}
| 2,818
| 31.77907
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/L1MetadataInternalCacheEntry.java
|
package org.infinispan.container.entries.metadata;
import org.infinispan.metadata.Metadata;
/**
* A {@link org.infinispan.container.entries.InternalCacheEntry} implementation to store a L1 entry.
*
* @author Pedro Ruivo
* @since 7.1
*/
public class L1MetadataInternalCacheEntry extends MetadataMortalCacheEntry {
public L1MetadataInternalCacheEntry(Object key, Object value, Metadata metadata, long created) {
super(key, value, metadata, created);
}
@Override
public boolean isL1Entry() {
return true;
}
}
| 542
| 23.681818
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataAware.java
|
package org.infinispan.container.entries.metadata;
import org.infinispan.metadata.Metadata;
/**
* Marker interface for metadata aware cache entry.
*
* @author Galder Zamarreño
* @since 5.3
*/
public interface MetadataAware {
/**
* Get metadata of this cache entry.
*
* @return a Metadata instance
*/
Metadata getMetadata();
/**
* Set the metadata in the cache entry.
*
* @param metadata to apply to the cache entry
*/
void setMetadata(Metadata metadata);
}
| 511
| 17.285714
| 51
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataMortalCacheValue.java
|
package org.infinispan.container.entries.metadata;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.container.entries.ImmortalCacheValue;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A mortal cache value, to correspond with {@link MetadataMortalCacheEntry}
*
* @author Galder Zamarreño
* @since 5.1
*/
public class MetadataMortalCacheValue extends ImmortalCacheValue implements MetadataAware {
Metadata metadata;
long created;
public MetadataMortalCacheValue(Object value, Metadata metadata, long created) {
this(value, null, metadata, created);
}
protected MetadataMortalCacheValue(Object value, PrivateMetadata internalMetadata, Metadata metadata, long created) {
super(value, internalMetadata);
this.metadata = metadata;
this.created = created;
}
@Override
public InternalCacheEntry<?, ?> toInternalCacheEntry(Object key) {
return new MetadataMortalCacheEntry(key, value, internalMetadata, metadata, created);
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
public final long getCreated() {
return created;
}
@Override
public final long getLifespan() {
return metadata.lifespan();
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredMortal(metadata.lifespan(), created, now);
}
@Override
public long getExpiryTime() {
long lifespan = metadata.lifespan();
return lifespan > -1 ? created + lifespan : -1;
}
@Override
public final boolean canExpire() {
return true;
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", metadata=").append(metadata);
builder.append(", created=").append(created);
}
public static class Externalizer extends AbstractExternalizer<MetadataMortalCacheValue> {
@Override
public void writeObject(ObjectOutput output, MetadataMortalCacheValue mcv) throws IOException {
output.writeObject(mcv.value);
output.writeObject(mcv.internalMetadata);
output.writeObject(mcv.metadata);
UnsignedNumeric.writeUnsignedLong(output, mcv.created);
}
@Override
public MetadataMortalCacheValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
return new MetadataMortalCacheValue(value, internalMetadata, metadata, created);
}
@Override
public Integer getId() {
return Ids.METADATA_MORTAL_VALUE;
}
@Override
public Set<Class<? extends MetadataMortalCacheValue>> getTypeClasses() {
return Collections.singleton(MetadataMortalCacheValue.class);
}
}
}
| 3,532
| 29.196581
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataTransientMortalCacheValue.java
|
package org.infinispan.container.entries.metadata;
import static java.lang.Math.min;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.TransientMortalCacheValue;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A form of {@link TransientMortalCacheValue} that stores {@link Metadata}
*
* @author Manik Surtani
* @since 5.1
*/
public class MetadataTransientMortalCacheValue extends MetadataMortalCacheValue implements MetadataAware {
long lastUsed;
public MetadataTransientMortalCacheValue(Object value, Metadata metadata, long created, long lastUsed) {
this(value, null, metadata, created, lastUsed);
}
protected MetadataTransientMortalCacheValue(Object value, PrivateMetadata internalMetadata,
Metadata metadata, long created, long lastUsed) {
super(value, internalMetadata, metadata, created);
this.lastUsed = lastUsed;
}
@Override
public InternalCacheEntry<?, ?> toInternalCacheEntry(Object key) {
return new MetadataTransientMortalCacheEntry(key, value, internalMetadata, metadata, lastUsed, created);
}
@Override
public long getMaxIdle() {
return metadata.maxIdle();
}
@Override
public long getLastUsed() {
return lastUsed;
}
@Override
public boolean isExpired(long now) {
return ExpiryHelper.isExpiredTransientMortal(metadata.maxIdle(), lastUsed, metadata.lifespan(), created, now);
}
@Override
public boolean isMaxIdleExpirable() {
return true;
}
@Override
public long getExpiryTime() {
long lifespan = metadata.lifespan();
long lset = lifespan > -1 ? created + lifespan : -1;
long maxIdle = metadata.maxIdle();
long muet = maxIdle > -1 ? lastUsed + maxIdle : -1;
if (lset == -1) return muet;
if (muet == -1) return lset;
return min(lset, muet);
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", lastUsed=").append(lastUsed);
}
public static class Externalizer extends AbstractExternalizer<MetadataTransientMortalCacheValue> {
@Override
public void writeObject(ObjectOutput output, MetadataTransientMortalCacheValue value) throws IOException {
output.writeObject(value.value);
output.writeObject(value.internalMetadata);
output.writeObject(value.metadata);
UnsignedNumeric.writeUnsignedLong(output, value.created);
UnsignedNumeric.writeUnsignedLong(output, value.lastUsed);
}
@Override
public MetadataTransientMortalCacheValue readObject(ObjectInput input)
throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
long lastUsed = UnsignedNumeric.readUnsignedLong(input);
return new MetadataTransientMortalCacheValue(value, internalMetadata, metadata, created, lastUsed);
}
@Override
public Integer getId() {
return Ids.METADATA_TRANSIENT_MORTAL_VALUE;
}
@Override
public Set<Class<? extends MetadataTransientMortalCacheValue>> getTypeClasses() {
return Collections.singleton(MetadataTransientMortalCacheValue.class);
}
}
}
| 3,875
| 32.704348
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataMortalCacheEntry.java
|
package org.infinispan.container.entries.metadata;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.AbstractInternalCacheEntry;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A cache entry that is mortal and is {@link MetadataAware}
*
* @author Galder Zamarreño
* @since 5.3
*/
public class MetadataMortalCacheEntry extends AbstractInternalCacheEntry implements MetadataAware {
protected Metadata metadata;
protected long created;
public MetadataMortalCacheEntry(Object key, Object value, Metadata metadata, long created) {
this(key, value, null, metadata, created);
}
protected MetadataMortalCacheEntry(Object key, Object value, PrivateMetadata internalMetadata,
Metadata metadata, long created) {
super(key, value, internalMetadata);
this.metadata = metadata;
this.created = created;
}
@Override
public final boolean isExpired(long now) {
return ExpiryHelper.isExpiredMortal(metadata.lifespan(), created, now);
}
@Override
public final boolean canExpire() {
return true;
}
@Override
public final long getCreated() {
return created;
}
@Override
public final long getLastUsed() {
return -1;
}
@Override
public final long getLifespan() {
return metadata.lifespan();
}
@Override
public final long getMaxIdle() {
return -1;
}
@Override
public final long getExpiryTime() {
long lifespan = metadata.lifespan();
return lifespan > -1 ? created + lifespan : -1;
}
@Override
public final void touch(long currentTimeMillis) {
// no-op
}
@Override
public void reincarnate(long now) {
this.created = now;
}
@Override
public InternalCacheValue<?> toInternalCacheValue() {
return new MetadataMortalCacheValue(value, internalMetadata, metadata, created);
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", metadata=").append(metadata);
builder.append(", created=").append(created);
}
public static class Externalizer extends AbstractExternalizer<MetadataMortalCacheEntry> {
@Override
public void writeObject(ObjectOutput output, MetadataMortalCacheEntry ice) throws IOException {
output.writeObject(ice.key);
output.writeObject(ice.value);
output.writeObject(ice.internalMetadata);
output.writeObject(ice.metadata);
UnsignedNumeric.writeUnsignedLong(output, ice.created);
}
@Override
public MetadataMortalCacheEntry readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
long created = UnsignedNumeric.readUnsignedLong(input);
return new MetadataMortalCacheEntry(key, value, internalMetadata, metadata, created);
}
@Override
public Integer getId() {
return Ids.METADATA_MORTAL_ENTRY;
}
@Override
public Set<Class<? extends MetadataMortalCacheEntry>> getTypeClasses() {
return Collections.singleton(MetadataMortalCacheEntry.class);
}
}
}
| 3,989
| 27.705036
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/entries/metadata/MetadataImmortalCacheValue.java
|
package org.infinispan.container.entries.metadata;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.ImmortalCacheValue;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.marshall.core.Ids;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* A form of {@link ImmortalCacheValue} that is {@link MetadataAware}.
*
* @author Galder Zamarreño
* @since 5.3
*/
public class MetadataImmortalCacheValue extends ImmortalCacheValue implements MetadataAware {
Metadata metadata;
public MetadataImmortalCacheValue(Object value, Metadata metadata) {
this(value, null, metadata);
}
protected MetadataImmortalCacheValue(Object value, PrivateMetadata internalMetadata, Metadata metadata) {
super(value, internalMetadata);
this.metadata = metadata;
}
@Override
public InternalCacheEntry<?, ?> toInternalCacheEntry(Object key) {
return new MetadataImmortalCacheEntry(key, value, internalMetadata, metadata);
}
@Override
public Metadata getMetadata() {
return metadata;
}
@Override
public void setMetadata(Metadata metadata) {
this.metadata = metadata;
}
@Override
protected void appendFieldsToString(StringBuilder builder) {
super.appendFieldsToString(builder);
builder.append(", metadata=").append(metadata);
}
public static class Externalizer extends AbstractExternalizer<MetadataImmortalCacheValue> {
@Override
public void writeObject(ObjectOutput output, MetadataImmortalCacheValue icv) throws IOException {
output.writeObject(icv.value);
output.writeObject(icv.internalMetadata);
output.writeObject(icv.metadata);
}
@Override
public MetadataImmortalCacheValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
PrivateMetadata internalMetadata = (PrivateMetadata) input.readObject();
Metadata metadata = (Metadata) input.readObject();
return new MetadataImmortalCacheValue(value, internalMetadata, metadata);
}
@Override
public Integer getId() {
return Ids.METADATA_IMMORTAL_VALUE;
}
@Override
public Set<Class<? extends MetadataImmortalCacheValue>> getTypeClasses() {
return Collections.singleton(MetadataImmortalCacheValue.class);
}
}
}
| 2,631
| 30.333333
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/package-info.java
|
/**
* Cache manager API.
*
* @api.public
*/
package org.infinispan.manager;
| 80
| 10.571429
| 31
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/ClusterExecutionPolicy.java
|
package org.infinispan.manager;
import org.infinispan.remoting.transport.TopologyAwareAddress;
/**
* ClusterExecutionPolicy controls how which nodes commands submitted via {@link ClusterExecutor}. That is the user
* can ensure that a command goes or doesn't go to a specific physical location such as on the existing machine, rack
* or site.
* <p>
* ClusterExecutionPolicy effectively scopes execution of commands to a subset of nodes. For
* example, someone might want to exclusively execute commands on a local network site instead of a
* backup remote network centre as well. Others might, for example, use only a dedicated subset of a
* certain Infinispan rack nodes for specific task execution.
*
*
* @author William Burns
* @since 9.0
*/
public enum ClusterExecutionPolicy {
/**
* The command can be executed on any node in the cluster
*/
ALL {
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return true;
}
},
/**
* The command can be executed only on the same machine from where it was initiated. Note this implies
* same rack and same site.
*/
SAME_MACHINE {
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return thisAddress.isSameMachine(otherAddress);
}
},
/**
* The command will be executed only on a different machine. Note this means it may or may not be on the same rack
* or site.
*/
DIFFERENT_MACHINE {
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return !thisAddress.isSameMachine(otherAddress);
}
},
/**
* The command will be executed on a machine on the same rack. Note this means it may or may not be executed on the
* same machine.
*/
SAME_RACK {
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return thisAddress.isSameRack(otherAddress);
}
},
/**
* The command will be executed on machine on a different rack. Note this means may or may not be on the same site.
*/
DIFFERENT_RACK {
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return !thisAddress.isSameRack(otherAddress);
}
},
/**
* The command will be executed on a machine on the same site. Note this means it may or may not be executed on the
* same machine or even same rack.
*/
SAME_SITE {
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return thisAddress.isSameSite(otherAddress);
}
},
/**
* The command will be executed on a different site.
*/
DIFFERENT_SITE {
@Override
public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress) {
return !thisAddress.isSameSite(otherAddress);
}
},
;
abstract public boolean include(TopologyAwareAddress thisAddress, TopologyAwareAddress otherAddress);
}
| 3,199
| 34.164835
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/EmbeddedCacheManager.java
|
package org.infinispan.manager;
import java.io.Closeable;
import java.util.List;
import java.util.Set;
import javax.security.auth.Subject;
import org.infinispan.Cache;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.commons.configuration.ClassAllowList;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.annotations.SurvivesRestarts;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.health.Health;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.notifications.Listenable;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.stats.CacheContainerStats;
/**
* EmbeddedCacheManager is an CacheManager that runs in the same JVM as the client.
* <p/>
* Constructing a <tt>EmbeddedCacheManager</tt> is done via one of its constructors, which optionally take in a {@link
* org.infinispan.configuration.cache.Configuration} or a path or URL to a configuration XML file: see {@link org.infinispan.manager.DefaultCacheManager}.
* <p/>
* Lifecycle - <tt>EmbeddedCacheManager</tt>s have a lifecycle (it implements {@link Lifecycle}) and
* the default constructors also call {@link #start()}. Overloaded versions of the constructors are available, that do
* not start the <tt>CacheManager</tt>, although it must be kept in mind that <tt>CacheManager</tt>s need to be started
* before they can be used to readWriteMap <tt>Cache</tt> instances.
* <p/>
* Once constructed, <tt>EmbeddedCacheManager</tt>s should be made available to any component that requires a <tt>Cache</tt>,
* via <a href="http://en.wikipedia.org/wiki/Java_Naming_and_Directory_Interface">JNDI</a> or via some other mechanism
* such as an <a href="http://en.wikipedia.org/wiki/Dependency_injection">dependency injection</a> framework.
* <p/>
*
* @see org.infinispan.manager.DefaultCacheManager
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreno
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@Scope(Scopes.GLOBAL)
@SurvivesRestarts
public interface EmbeddedCacheManager extends CacheContainer, Listenable, Closeable {
/**
* Register a cache configuration in the cache manager.
* <p/>
* The configuration will be automatically used when creating a cache with the same name,
* unless it is a template.
* If it is a template and it contains wildcards (`*` or `?`), it will be automatically used
* when creating caches that match the template.
* <p/>
* In order to extend an existing configuration,
* use {@link ConfigurationBuilder#read(org.infinispan.configuration.cache.Configuration)}.
* <p/>
* The other way to define a cache configuration is declaratively, in the XML file passed in to the cache
* manager.
*
* @param cacheName name of the cache configuration
* @param configuration the cache configuration
* @return the cache configuration
* @throws org.infinispan.commons.CacheConfigurationException if a configuration with the same name already exists.
*/
Configuration defineConfiguration(String cacheName, Configuration configuration);
/**
* Defines a cache configuration by first reading the template configuration and then applying the override.
* <p/>
* The configuration will be automatically used when creating a cache with the same name,
* unless it is a template.
* If it is a template and it contains wildcards (`*` or `?`), it will be automatically used
* when creating caches that match the template.
* <p/>
* The other way to define a cache configuration is declaratively, in the XML file passed in to the cache
* manager.
* <p/>
* If templateName is null, this method works exactly like {@link #defineConfiguration(String, Configuration)}.
*
* @param cacheName name of cache whose configuration is being defined
* @param templateCacheName configuration to use as a template
* @param configurationOverride configuration overrides on top of the template
* @return the configuration
* @throws org.infinispan.commons.CacheConfigurationException if a configuration with the same name already exists.
*/
Configuration defineConfiguration(String cacheName, String templateCacheName, Configuration configurationOverride);
/**
* Removes a configuration from the set of defined configurations.
* <p/>
* If the named configuration does not exist, nothing happens.
*
* @param configurationName the named configuration
* @throws IllegalStateException if the configuration is in use
*/
void undefineConfiguration(String configurationName);
/**
* @return the name of the cluster. Null if running in local mode.
*/
String getClusterName();
/**
* @return the addresses of all the members in the cluster, or {@code null} if not connected
*/
List<Address> getMembers();
/**
* Warning: the address may be {@code null} before the first clustered cache starts
* and after all the clustered caches have been stopped.
*
* @return the address of the local node, or {@code null} if not connected
*/
Address getAddress();
/**
* @return the address of the cluster's coordinator, or {@code null} if not connected
*/
Address getCoordinator();
/**
* @return whether the local node is the cluster's coordinator, or {@code null} if not connected
*/
boolean isCoordinator();
/**
* @return the status of the cache manager
*/
ComponentStatus getStatus();
/**
* Returns global configuration for this CacheManager
*
* @return the global configuration object associated to this CacheManager
*/
GlobalConfiguration getCacheManagerConfiguration();
/**
* Returns the configuration for the given cache.
*
* @return the configuration for the given cache or null if no such cache is defined
*/
Configuration getCacheConfiguration(String name);
/**
* @return the default cache's configuration, or {@code null} if there is no default cache.
*/
org.infinispan.configuration.cache.Configuration getDefaultCacheConfiguration();
/**
* This method returns a collection of all cache configuration names.
* <p/>
* The configurations may have been defined via XML,
* programmatically via {@link org.infinispan.configuration.parsing.ConfigurationBuilderHolder},
* or at runtime via {@link #defineConfiguration(String, Configuration)}.
* <p/>
* Internal caches defined via {@link org.infinispan.registry.InternalCacheRegistry}
* are not included.
*
* @return an immutable set of configuration names registered in this cache manager.
*
* @since 8.2
*/
default Set<String> getCacheConfigurationNames() {
throw new UnsupportedOperationException();
}
/**
* Similar to {@link #getCacheNames()} but filters out caches that are not accessible by the current user
*/
Set<String> getAccessibleCacheNames();
/**
* Tests whether a cache is running.
* @param cacheName name of cache to test.
* @return true if the cache exists and is running; false otherwise.
*/
boolean isRunning(String cacheName);
/**
* Tests whether the default cache is running.
* @return true if the default cache is running; false otherwise.
*/
boolean isDefaultRunning();
/**
* A cache is considered to exist if it has been created and started via
* one of the {@link #getCache()} methods and has not yet been removed via
* {@link #removeCache(String)}. </p>
*
* In environments when caches are continuously created and removed, this
* method offers the possibility to find out whether a cache has either,
* not been started, or if it was started, whether it's been removed already
* or not.
*
* @param cacheName cache to check
* @return <tt>true</tt> if the cache with the given name has not yet been
* started, or if it was started, whether it's been removed or not.
*/
boolean cacheExists(String cacheName);
/**
* Retrieves the default cache associated with this cache container.
*
* @return the default cache.
* @throws org.infinispan.commons.CacheConfigurationException if a default cache does not exist.
*/
<K, V> Cache<K, V> getCache();
/**
* Retrieves a cache by name.
* <p/>
* If the cache has been previously created with the same name, the running
* cache instance is returned.
* Otherwise, this method attempts to create the cache first.
* <p/>
* When creating a new cache, this method requires a defined configuration that either has exactly the same name,
* or is a template with wildcards and matches the cache name.
*
* @param cacheName name of cache to retrieve
* @return a cache instance identified by cacheName
*/
<K, V> Cache<K, V> getCache(String cacheName);
/**
* Creates a cache on the local node using the supplied configuration.
* <p/>
* The cache may be clustered, but this method (or the equivalent combination of
* {@link #defineConfiguration(String, Configuration)} and
* {@link #getCache(String, boolean)}) needs to be invoked on all nodes.
*
* @param name the name of the cache
* @param configuration the configuration to use.
* @param <K> the generic type of the key
* @param <V> the generic type of the value
* @return the cache
*/
<K, V> Cache<K, V> createCache(String name, Configuration configuration);
/**
* Similar to {@link #getCache(String)}, except if has the option
* to not create the cache if it is not already running.
*
* @param cacheName name of cache to retrieve
* @param createIfAbsent If <tt>true</tt>, this methods works just like {@link #getCache(String)}.
* If <tt>false</tt>, return the already running cache or <tt>null</tt>.
* @return <tt>null</tt> if the cache does not exist and <tt>createIfAbsent == false</tt>,
* otherwise a cache instance identified by cacheName
*/
<K, V> Cache<K, V> getCache(String cacheName, boolean createIfAbsent);
/**
* Starts a set of caches in parallel. Infinispan supports both symmetric
* and asymmetric clusters; that is, multiple nodes having the same or
* different sets of caches running, respectively. Calling this method on
* application/application server startup with all your cache names
* will ensure that the cluster is symmetric.
*
* @param cacheNames the names of the caches to start
* @since 5.0
*/
EmbeddedCacheManager startCaches(String... cacheNames);
/**
* Removes a cache with the given name from the system. This is a cluster
* wide operation that results not only in stopping the cache with the given
* name in all nodes in the cluster, but also deletes its contents both in
* memory and in any backing cache store.
*
* @param cacheName name of cache to remove
* @deprecated Since 9.2, obtain a {@link org.infinispan.commons.api.CacheContainerAdmin} instance using {@link #administration()} and invoke the {@link org.infinispan.commons.api.CacheContainerAdmin#removeCache(String)} method
*/
@Deprecated
void removeCache(String cacheName);
/**
* @deprecated Since 10.0, please use {@link #getAddress()}, {@link #getMembers()}, {@link #getCoordinator()}
*/
Transport getTransport();
/**
* @deprecated Since 10.0, with no public API replacement
*/
@Deprecated
GlobalComponentRegistry getGlobalComponentRegistry();
/**
* Add a dependency between two caches. The cache manager will make sure that
* a cache is stopped before any of its dependencies
*
* @param from cache name
* @param to cache name
* @since 7.0
*/
void addCacheDependency(String from, String to);
/**
* Returns statistics for this cache manager
*
* @since 7.1
* @return statistics for this cache manager
* @deprecated Since 10.1.3. This mixes statistics across unrelated caches so the reported numbers don't have too much
* relevance.
*/
@Deprecated
CacheContainerStats getStats();
/**
* Providess the cache manager based executor. This can be used to execute a given operation upon the
* cluster or a single node if desired. If this manager is not clustered this will execute locally only.
* <p>
* Note that not all {@link EmbeddedCacheManager} implementations may implement this. Those that don't will throw
* a {@link UnsupportedOperationException} upon invocation.
* @return
*/
default ClusterExecutor executor() {
throw new UnsupportedOperationException();
}
/**
* Returns an entry point for a Health Check API.
*
* @since 9.0
* @return Health API for this {@link EmbeddedCacheManager}.
*/
Health getHealth();
/**
* @return an instance of {@link CacheManagerInfo} used to get basic info about the cache manager.
*/
CacheManagerInfo getCacheManagerInfo();
/**
* Provides an {@link EmbeddedCacheManagerAdmin} whose methods affect the entire cluster as opposed to a single node.
*
* @since 9.2
* @return a cluster-aware {@link EmbeddedCacheManagerAdmin}
*/
default EmbeddedCacheManagerAdmin administration() {
throw new UnsupportedOperationException();
}
@Deprecated
ClassAllowList getClassWhiteList();
ClassAllowList getClassAllowList();
Subject getSubject();
EmbeddedCacheManager withSubject(Subject subject);
}
| 13,969
| 38.022346
| 230
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/ModuleRepository.java
|
package org.infinispan.manager;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.util.ServiceFinder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.impl.ComponentAccessor;
import org.infinispan.factories.impl.DynamicModuleMetadataProvider;
import org.infinispan.factories.impl.MBeanMetadata;
import org.infinispan.factories.impl.ModuleMetadataBuilder;
import org.infinispan.lifecycle.ModuleLifecycle;
import org.infinispan.util.CyclicDependencyException;
import org.infinispan.util.DependencyGraph;
/**
* Store for component and module information.
* <p>
* <b>NOTE:</b> Not public API: It exists in package {@code org.infinispan.manager}
* so that only {@code DefaultCacheManager} can instantiate it.
*
* @api.private
* @author Dan Berindei
* @since 10.0
*/
public final class ModuleRepository {
private final List<ModuleLifecycle> moduleLifecycles;
private final Map<String, ComponentAccessor<?>> components;
private final Map<String, String> factoryNames;
private final Map<String, MBeanMetadata> mbeans;
private ModuleRepository(List<ModuleLifecycle> moduleLifecycles,
Map<String, ComponentAccessor<?>> components,
Map<String, String> factoryNames,
Map<String, MBeanMetadata> mbeans) {
this.moduleLifecycles = moduleLifecycles;
this.components = components;
this.factoryNames = factoryNames;
this.mbeans = mbeans;
}
public static ModuleRepository newModuleRepository(ClassLoader classLoader, GlobalConfiguration globalConfiguration) {
return new Builder(classLoader, globalConfiguration).build();
}
public ComponentAccessor<Object> getComponentAccessor(String componentClassName) {
return (ComponentAccessor<Object>) components.get(componentClassName);
}
public String getFactoryName(String componentName) {
return factoryNames.get(componentName);
}
public MBeanMetadata getMBeanMetadata(String componentName) {
return mbeans.get(componentName);
}
public Collection<ModuleLifecycle> getModuleLifecycles() {
return moduleLifecycles;
}
private static final class Builder implements ModuleMetadataBuilder.ModuleBuilder {
private final List<ModuleLifecycle> moduleLifecycles = new ArrayList<>();
private final Map<String, ComponentAccessor<?>> components = new HashMap<>();
private final Map<String, String> factoryNames = new HashMap<>();
private final Map<String, MBeanMetadata> mbeans = new HashMap<>();
private Builder(ClassLoader classLoader, GlobalConfiguration globalConfiguration) {
Collection<ModuleMetadataBuilder> serviceLoader =
ServiceFinder.load(ModuleMetadataBuilder.class, ModuleRepository.class.getClassLoader(), classLoader);
Map<String, ModuleMetadataBuilder> modulesMap = new HashMap<>();
for (ModuleMetadataBuilder module : serviceLoader) {
ModuleMetadataBuilder existing = modulesMap.put(module.getModuleName(), module);
if (existing != null) {
throw new IllegalStateException("Multiple modules registered with name " + module.getModuleName());
}
}
List<ModuleMetadataBuilder> modules = sortModuleDependencies(modulesMap);
for (ModuleMetadataBuilder module : modules) {
// register static metadata
module.registerMetadata(this);
ModuleLifecycle moduleLifecycle = module.newModuleLifecycle();
moduleLifecycles.add(moduleLifecycle);
// register dynamic metadata
if (moduleLifecycle instanceof DynamicModuleMetadataProvider) {
((DynamicModuleMetadataProvider) moduleLifecycle).registerDynamicMetadata(this, globalConfiguration);
}
}
}
private ModuleRepository build() {
return new ModuleRepository(moduleLifecycles, components, factoryNames, mbeans);
}
private static List<ModuleMetadataBuilder> sortModuleDependencies(Map<String, ModuleMetadataBuilder> modulesMap) {
DependencyGraph<ModuleMetadataBuilder> dependencyGraph = new DependencyGraph<>();
for (ModuleMetadataBuilder module : modulesMap.values()) {
for (String dependencyName : module.getRequiredDependencies()) {
ModuleMetadataBuilder dependency = modulesMap.get(dependencyName);
if (dependency == null) {
throw new CacheConfigurationException("Missing required dependency: Module '"
+ module.getModuleName() + "' requires '" + dependencyName + "'");
}
dependencyGraph.addDependency(dependency, module);
}
for (String dependencyName : module.getOptionalDependencies()) {
ModuleMetadataBuilder dependency = modulesMap.get(dependencyName);
if (dependency != null) {
dependencyGraph.addDependency(dependency, module);
}
}
}
try {
List<ModuleMetadataBuilder> sortedBuilders = dependencyGraph.topologicalSort();
for (ModuleMetadataBuilder module : modulesMap.values()) {
if (!sortedBuilders.contains(module)) {
sortedBuilders.add(module);
}
}
return sortedBuilders;
} catch (CyclicDependencyException e) {
throw new CacheConfigurationException(e);
}
}
@Override
public void registerComponentAccessor(String componentClassName, List<String> factoryComponentNames,
ComponentAccessor<?> accessor) {
components.put(componentClassName, accessor);
for (String factoryComponentName : factoryComponentNames) {
factoryNames.put(factoryComponentName, componentClassName);
}
}
@Override
public void registerMBeanMetadata(String componentClassName, MBeanMetadata mBeanMetadata) {
mbeans.put(componentClassName, mBeanMetadata);
}
@Override
public String getFactoryName(String componentName) {
return factoryNames.get(componentName);
}
}
}
| 6,436
| 41.348684
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/DefaultCacheManagerAdmin.java
|
package org.infinispan.manager;
import static org.infinispan.util.concurrent.CompletionStages.join;
import java.util.EnumSet;
import javax.security.auth.Subject;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.globalstate.GlobalConfigurationManager;
import org.infinispan.security.AuthorizationPermission;
import org.infinispan.security.impl.Authorizer;
/**
* The default implementation of {@link EmbeddedCacheManagerAdmin}
*
* @author Tristan Tarrant
* @since 9.2
*/
public class DefaultCacheManagerAdmin implements EmbeddedCacheManagerAdmin {
private final EmbeddedCacheManager cacheManager;
private final GlobalConfigurationManager clusterConfigurationManager;
private final Authorizer authorizer;
private final EnumSet<AdminFlag> flags;
private final Subject subject;
DefaultCacheManagerAdmin(EmbeddedCacheManager cm, Authorizer authorizer, EnumSet<AdminFlag> flags,
Subject subject, GlobalConfigurationManager clusterConfigurationManager) {
this.cacheManager = cm;
this.authorizer = authorizer;
this.clusterConfigurationManager = clusterConfigurationManager;
this.flags = flags;
this.subject = subject;
}
@Override
public <K, V> Cache<K, V> createCache(String cacheName, Configuration configuration) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.createCache(cacheName, configuration, flags));
return cacheManager.getCache(cacheName);
}
@Override
public <K, V> Cache<K, V> getOrCreateCache(String cacheName, Configuration configuration) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.getOrCreateCache(cacheName, configuration, flags));
return cacheManager.getCache(cacheName);
}
@Override
public <K, V> Cache<K, V> createCache(String cacheName, String template) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.createCache(cacheName, template, flags));
return cacheManager.getCache(cacheName);
}
@Override
public <K, V> Cache<K, V> getOrCreateCache(String cacheName, String template) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.getOrCreateCache(cacheName, template, flags));
return cacheManager.getCache(cacheName);
}
@Override
public void createTemplate(String name, Configuration configuration) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.createTemplate(name, configuration, flags));
}
@Override
public Configuration getOrCreateTemplate(String name, Configuration configuration) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.getOrCreateTemplate(name, configuration, flags));
return cacheManager.getCacheConfiguration(name);
}
@Override
public void removeTemplate(String name) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.removeTemplate(name, flags));
}
@Override
public void removeCache(String cacheName) {
authorizer.checkPermission(subject, AuthorizationPermission.CREATE);
join(clusterConfigurationManager.removeCache(cacheName, flags));
}
@Override
public EmbeddedCacheManagerAdmin withFlags(AdminFlag... flags) {
EnumSet<AdminFlag> newFlags = EnumSet.copyOf(this.flags);
for (AdminFlag flag : flags) newFlags.add(flag);
return new DefaultCacheManagerAdmin(cacheManager, authorizer, newFlags, subject, clusterConfigurationManager);
}
@Override
public EmbeddedCacheManagerAdmin withFlags(EnumSet<AdminFlag> flags) {
EnumSet<AdminFlag> newFlags = EnumSet.copyOf(this.flags);
newFlags.addAll(flags);
return new DefaultCacheManagerAdmin(cacheManager, authorizer, newFlags, subject, clusterConfigurationManager);
}
@Override
public EmbeddedCacheManagerAdmin withSubject(Subject subject) {
return new DefaultCacheManagerAdmin(cacheManager, authorizer, flags, subject, clusterConfigurationManager);
}
}
| 4,342
| 38.481818
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/EmbeddedCacheManagerStartupException.java
|
package org.infinispan.manager;
import org.infinispan.commons.CacheException;
/**
* An exception to encapsulate an error when starting up a cache manager
*
* @author Manik Surtani
* @since 4.2.2
*/
public class EmbeddedCacheManagerStartupException extends CacheException {
public EmbeddedCacheManagerStartupException() {
}
public EmbeddedCacheManagerStartupException(Throwable cause) {
super(cause);
}
public EmbeddedCacheManagerStartupException(String msg) {
super(msg);
}
public EmbeddedCacheManagerStartupException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 624
| 22.148148
| 77
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/CacheContainer.java
|
package org.infinispan.manager;
import org.infinispan.Cache;
import org.infinispan.commons.api.BasicCacheContainer;
import org.infinispan.commons.api.CacheContainerAdmin;
public interface CacheContainer extends BasicCacheContainer {
@Override
<K, V> Cache<K, V> getCache();
@Override
<K, V> Cache<K, V> getCache(String cacheName);
/**
* Provides access to administrative methods which affect the underlying cache container, such as cache creation and
* removal. If the underlying container is clustered or remote, the operations will affect all nodes.
*/
default CacheContainerAdmin<?, ?> administration() {
throw new UnsupportedOperationException();
}
}
| 699
| 29.434783
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/ClusterExecutor.java
|
package org.infinispan.manager;
import java.util.Collection;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.configuration.global.TransportConfiguration;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.function.SerializableFunction;
import org.infinispan.util.function.SerializableRunnable;
import org.infinispan.util.function.TriConsumer;
/**
* A cluster executor that can be used to invoke a given command across the cluster. Note this executor is not
* tied to any cache.
* <p>
* This executor also implements {@link Executor} so that it may be used with methods such as
* {@link CompletableFuture#runAsync(Runnable, Executor)} or {@link CompletableFuture#supplyAsync(Supplier, Executor)}.
* Unfortunately though these invocations do not have explicitly defined Serializable {@link Runnable} or
* {@link Supplier} arguments and manual casting is required when using a lambda.
* Something like the following:
* {@code CompletableFuture.runAsync((Serializable && Runnable)() -> doSomething(), clusterExecutor)}. Although note
* that the {@link ClusterExecutor#submit(SerializableRunnable)} does this automatically for you.
* <p>
* Any method that returns a value should make sure the returned value is properly serializable or else it will
* be replaced with a {@link org.infinispan.commons.marshall.NotSerializableException}
* @author wburns
* @since 8.2
*/
public interface ClusterExecutor extends Executor {
/**
* {@inheritDoc}
* <p>
* This command will be ran in the desired nodes, but no result is returned to notify the user of completion or
* failure. This command will return immediately while the runnable is processed asynchronously.
* @param command the command to execute
*/
@Override
default void execute(Runnable command) {
submit(command);
}
/**
* The same as {@link Executor#execute(Runnable)}, except the Runnable must also implement Serializable.
* <p>
* This method will be used automatically by lambdas, which prevents users from having to manually cast to
* a Serializable lambda.
* @param command the command to execute
*/
default void execute(SerializableRunnable command) {
execute((Runnable) command);
}
/**
* Submits the runnable to the desired nodes and returns a CompletableFuture that will be completed when
* all desired nodes complete the given command
* <p>
* If a node encounters an exception, the first one to respond with such an exception will set the responding
* future to an exceptional state passing the given exception.
* @param command the command to execute.
* @return a completable future that will signify the command is finished on all desired nodes when completed
*/
CompletableFuture<Void> submit(Runnable command);
/**
* The same as {@link ClusterExecutor#submit(Runnable)}, except the Runnable must also implement Serializable.
* <p>
*
* <p>
* This method will be used automatically by lambdas, which prevents users from having to manually cast to
* a Serializable lambda.
* @param command the command to execute
* @return a completable future that will signify the command is finished on all desired nodes when completed
*/
default CompletableFuture<Void> submit(SerializableRunnable command) {
return submit((Runnable) command);
}
/**
* Submits the given command to the desired nodes and allows for handling of results as they return. The user
* provides a {@link TriConsumer} which will be called back each time for each desired node. Note that these callbacks
* can be called from different threads at the same time. A completable future is returned to the caller used
* for the sole purpose of being completed when all nodes have sent responses back.
* <p>
* If this cluster executor is running in failover mode via {@link ClusterExecutor#singleNodeSubmission(int)} the
* triConsumer will be called back each time a failure occurs as well. To satisfy ordering a retry is not resubmitted
* until after the callback has completed.
* <p>
* Note the {@link TriConsumer} is only ran on the node where the task was submitted and thus doesn't need to be
* serialized.
* @param callable the task to execute
* @param triConsumer the tri-consumer to be called back upon for each node's result
* @param <V> the type of the task's result
* @return a completable future that will be completed after all results have been processed
*/
<V> CompletableFuture<Void> submitConsumer(Function<? super EmbeddedCacheManager, ? extends V> callable,
TriConsumer<? super Address, ? super V, ? super Throwable> triConsumer);
/**
* The same as {@link ClusterExecutor#submitConsumer(Function, TriConsumer)}, except the Callable must also implement
* Serializable.
* <p>
* This method will be used automatically by lambdas, which prevents users from having to manually cast to
* a Serializable lambda.
* @param callable the task to execute
* @param triConsumer the tri-consumer to be called back upon for each node's result
* @param <V> the type of the task's result
* @return a completable future that will be completed after all results have been processed
*/
default <V> CompletableFuture<Void> submitConsumer(SerializableFunction<? super EmbeddedCacheManager, ? extends V> callable,
TriConsumer<? super Address, ? super V, ? super Throwable> triConsumer) {
return submitConsumer((Function<? super EmbeddedCacheManager, ? extends V>) callable, triConsumer);
}
/**
* Sets a duration after which a command will timeout. This will cause the command to return a
* {@link org.infinispan.util.concurrent.TimeoutException} as the throwable.
* <p>The timeout parameter is used for both local and remote nodes. There are no guarantees as to whether
* the timed out command is interrupted.
* @param time the duration for the timeout
* @param unit what unit the duration is in
* @return a cluster executor with a timeout applied for remote commands
*/
ClusterExecutor timeout(long time, TimeUnit unit);
/**
* When a command is submitted it will only be submitted to one node of the available nodes, there is no strict
* requirements as to which node is chosen and is implementation specific. Fail over is not used with the returned
* executor, if you desire to use fail over you should invoke {@link ClusterExecutor#singleNodeSubmission(int)}
* instead.
* @return a cluster executor with commands submitted to a single node
*/
ClusterExecutor singleNodeSubmission();
/**
* When a command is submitted it will only be submitted to one node of the available nodes, there is no strict
* requirements as to which node is chosen and is implementation specific. However if a command were to fail either
* by the command itself or via network issues then the command will fail over, that is that it will retried up to
* the provided number of times using an available node until an exception is not met or the number of fail over
* counts has been reached. If a {@link org.infinispan.util.concurrent.TimeoutException} is throwing, this will not
* be retried as this is the same exception that is thrown when using
* {@link ClusterExecutor#timeout(long, TimeUnit)}. Each time the
* fail over occurs any available node is chosen, there is no requirement as to which can be chosen and is left up
* to the implementation to decide.
* @param failOverCount how many times this executor will attempt a failover
* @return a cluster executor with fail over retries applied
*/
ClusterExecutor singleNodeSubmission(int failOverCount);
/**
* When a command is submitted it will submit this command to all of the available nodes that pass the provided
* filter. Fail over is not supported with this configuration. This is the default submission method.
* @return a cluster executor with commands submitted to all nodes
*/
ClusterExecutor allNodeSubmission();
/**
* Allows for filtering of address nodes dynamically per invocation. The predicate is applied to each member in the
* cluster at invocation to determine which targets to contact. Note that this method overrides any previous
* filtering that was done (ie. calling {@link ClusterExecutor#filterTargets(Collection)}).
* @param predicate the dynamic predicate applied each time an invocation is done
* @return an executor with the predicate filter applied to determine which nodes are contacted
*/
ClusterExecutor filterTargets(Predicate<? super Address> predicate);
/**
* Allows for filtering of address nodes by only allowing addresses that match the given execution policy to be used.
* Note this method overrides any previous filtering that was done (ie. calling
* {@link ClusterExecutor#filterTargets(Collection)}).
* <p>
* The execution policy is only used if the addresses are configured to be topology aware. That is that the
* {@link TransportConfiguration#hasTopologyInfo()} method returns true. If this is false this method will throw
* an {@link IllegalStateException}.
* @param policy the policy to determine which nodes can be used
* @return an executor with the execution policy applied to determine which nodes are contacted
* @throws IllegalStateException thrown if topology info isn't available
*/
ClusterExecutor filterTargets(ClusterExecutionPolicy policy) throws IllegalStateException;
/**
* Allows for filtering of address nodes dynamically per invocation. The predicate is applied to each member that
* is part of the execution policy. Note that this method overrides any previous
* filtering that was done (ie. calling {@link ClusterExecutor#filterTargets(Collection)}).
* <p>
* The execution policy is only used if the addresses are configured to be topology aware. That is that the
* {@link TransportConfiguration#hasTopologyInfo()} method returns true. If this is false this method will throw
* an {@link IllegalStateException}.
* @param policy the execution policy applied before predicate to allow only nodes in that group
* @param predicate the dynamic predicate applied each time an invocation is done
* @return an executor with the execution policy and predicate both applied to determine which nodes are contacted
* @throws IllegalStateException thrown if topology info isn't available
*/
ClusterExecutor filterTargets(ClusterExecutionPolicy policy, Predicate<? super Address> predicate)
throws IllegalStateException;
/**
* Allows for filtering of address nodes by only allowing addresses in this collection from being contacted.
* Note that this method overrides any previous filtering that was done (ie. calling
* {@link ClusterExecutor#filterTargets(Predicate)}.
* @param addresses which nodes the executor invocations should go to
* @return an executor which will only contact nodes whose address are in the given collection
*/
ClusterExecutor filterTargets(Collection<Address> addresses);
/**
* Applies no filtering and will send any invocations to any/all current nodes.
* @return an executor with no filtering applied to target nodes
*/
ClusterExecutor noFilter();
}
| 11,765
| 53.725581
| 127
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/CacheManagerInfo.java
|
package org.infinispan.manager;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.infinispan.commons.dataconversion.internal.JsonSerialization;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.util.Immutables;
import org.infinispan.commons.util.Version;
import org.infinispan.configuration.ConfigurationManager;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.topology.LocalTopologyManager;
/**
* @since 10.0
*/
public class CacheManagerInfo implements JsonSerialization {
public static final List<String> LOCAL_NODE = Collections.singletonList("local");
private final DefaultCacheManager cacheManager;
private final ConfigurationManager configurationManager;
private final InternalCacheRegistry internalCacheRegistry;
private final LocalTopologyManager localTopologyManager;
public CacheManagerInfo(DefaultCacheManager cacheManager,
ConfigurationManager configurationManager,
InternalCacheRegistry internalCacheRegistry,
LocalTopologyManager localTopologyManager) {
this.cacheManager = cacheManager;
this.configurationManager = configurationManager;
this.internalCacheRegistry = internalCacheRegistry;
this.localTopologyManager = localTopologyManager;
}
public String getCoordinatorAddress() {
Transport t = cacheManager.getTransport();
return t == null ? "N/A" : t.getCoordinator().toString();
}
public boolean isCoordinator() {
return cacheManager.getTransport() != null && cacheManager.getTransport().isCoordinator();
}
public String getCacheManagerStatus() {
return cacheManager.getStatus().toString();
}
public Set<BasicCacheInfo> getDefinedCaches() {
return cacheManager.getCacheNames().stream().map(c -> {
boolean started = cacheManager.getCaches().containsKey(c);
return new BasicCacheInfo(c, started);
}).collect(Collectors.toSet());
}
public Set<String> getCacheConfigurationNames() {
Set<String> names = new HashSet<>(configurationManager.getDefinedConfigurations());
internalCacheRegistry.filterPrivateCaches(names);
if (names.isEmpty())
return Collections.emptySet();
else
return Immutables.immutableSetWrap(names);
}
public long getCreatedCacheCount() {
return cacheManager.getCaches().keySet().stream().filter(c -> !internalCacheRegistry.isInternalCache(c)).count();
}
public long getRunningCacheCount() {
return cacheManager.getCaches().keySet().stream().filter(c -> cacheManager.isRunning(c) && !internalCacheRegistry.isInternalCache(c)).count();
}
public String getVersion() {
return Version.getVersion();
}
public String getName() {
return configurationManager.getGlobalConfiguration().cacheManagerName();
}
public String getNodeName() {
if (cacheManager.getTransport() == null) return getNodeAddress();
return cacheManager.getTransport().localNodeName();
}
public String getNodeAddress() {
return cacheManager.getLogicalAddressString();
}
public String getPhysicalAddresses() {
if (cacheManager.getTransport() == null) return "local";
List<Address> address = cacheManager.getTransport().getPhysicalAddresses();
return address == null ? "local" : address.toString();
}
public List<String> getPhysicalAddressesRaw() {
if (cacheManager.getTransport() == null) return LOCAL_NODE;
List<Address> address = cacheManager.getTransport().getPhysicalAddresses();
return address == null
? LOCAL_NODE
: address.stream().map(Object::toString).collect(Collectors.toList());
}
public List<String> getClusterMembers() {
if (cacheManager.getTransport() == null) return LOCAL_NODE;
return cacheManager.getTransport().getMembers().stream().map(Objects::toString).collect(Collectors.toList());
}
public List<String> getClusterMembersPhysicalAddresses() {
if (cacheManager.getTransport() == null) return LOCAL_NODE;
List<Address> addressList = cacheManager.getTransport().getMembersPhysicalAddresses();
return addressList.stream().map(Objects::toString).collect(Collectors.toList());
}
public int getClusterSize() {
if (cacheManager.getTransport() == null) return 1;
return cacheManager.getTransport().getMembers().size();
}
public String getClusterName() {
return configurationManager.getGlobalConfiguration().transport().clusterName();
}
public String getLocalSite() {
if (cacheManager.getTransport() == null) return "local";
return cacheManager.getTransport().localSiteName();
}
public Collection<String> getSites() {
return Optional.ofNullable(cacheManager.getTransport())
.map(Transport::getSitesView)
.orElseGet(Collections::emptySet);
}
public boolean isRelayNode() {
Transport transport = cacheManager.getTransport();
return transport != null && transport.isSiteCoordinator();
}
public Boolean isRebalancingEnabled() {
try {
return localTopologyManager.isRebalancingEnabled();
} catch (Exception e) {
// Ignore the error
return null;
}
}
public Collection<String> getRelayNodesAddress() {
Transport transport = cacheManager.getTransport();
if (transport == null) {
return LOCAL_NODE;
}
return transport.getRelayNodesAddress().stream().map(Objects::toString).collect(Collectors.toList());
}
@Override
public Json toJson() {
Json result = Json.object()
.set("version", getVersion())
.set("name", getName())
.set("coordinator", isCoordinator())
.set("cache_configuration_names", Json.make(getCacheConfigurationNames()))
.set("cluster_name", getClusterName())
.set("physical_addresses", getPhysicalAddresses())
.set("coordinator_address", getCoordinatorAddress())
.set("cache_manager_status", getCacheManagerStatus())
.set("created_cache_count", getCreatedCacheCount())
.set("running_cache_count", getRunningCacheCount())
.set("node_address", getNodeAddress())
.set("cluster_members", Json.make(getClusterMembers()))
.set("cluster_members_physical_addresses", Json.make(getClusterMembersPhysicalAddresses()))
.set("cluster_size", getClusterSize())
.set("defined_caches", Json.make(getDefinedCaches()))
.set("local_site", getLocalSite())
.set("relay_node", isRelayNode())
.set("relay_nodes_address", Json.make(getRelayNodesAddress()))
.set("sites_view", Json.make(getSites()));
Boolean rebalancingEnabled = isRebalancingEnabled();
if (rebalancingEnabled != null) {
result.set("rebalancing_enabled", rebalancingEnabled);
}
return result;
}
static class BasicCacheInfo implements JsonSerialization {
String name;
boolean started;
BasicCacheInfo(String name, boolean started) {
this.name = name;
this.started = started;
}
public String getName() {
return name;
}
public boolean isStarted() {
return started;
}
@Override
public Json toJson() {
return Json.object("name", name).set("started", started);
}
}
}
| 7,821
| 34.880734
| 148
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/DefaultCacheManager.java
|
package org.infinispan.manager;
import static org.infinispan.factories.KnownComponentNames.CACHE_DEPENDENCY_GRAPH;
import static org.infinispan.util.logging.Log.CONFIG;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.StringJoiner;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import javax.security.auth.Subject;
import org.infinispan.Cache;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.api.CacheContainerAdmin;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.commons.configuration.ClassAllowList;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.io.ConfigurationResourceResolvers;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.internal.BlockHoundUtil;
import org.infinispan.commons.util.FileLookupFactory;
import org.infinispan.commons.util.Immutables;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.ConfigurationManager;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.format.PropertyFormatter;
import org.infinispan.configuration.global.GlobalAuthorizationConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.configuration.global.TransportConfiguration;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ParserRegistry;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.InternalCacheFactory;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.SurvivesRestarts;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.globalstate.GlobalConfigurationManager;
import org.infinispan.health.Health;
import org.infinispan.health.impl.HealthImpl;
import org.infinispan.health.impl.jmx.HealthJMXExposerImpl;
import org.infinispan.health.jmx.HealthJMXExposer;
import org.infinispan.jmx.CacheManagerJmxRegistration;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.jmx.annotations.Parameter;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.impl.ClusterExecutors;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifier;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.security.AuditContext;
import org.infinispan.security.AuthorizationPermission;
import org.infinispan.security.GlobalSecurityManager;
import org.infinispan.security.Security;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.security.impl.AuthorizationManagerImpl;
import org.infinispan.security.impl.AuthorizationMapperContextImpl;
import org.infinispan.security.impl.Authorizer;
import org.infinispan.security.impl.SecureCacheImpl;
import org.infinispan.stats.CacheContainerStats;
import org.infinispan.stats.impl.CacheContainerStatsImpl;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.util.ByteString;
import org.infinispan.util.CyclicDependencyException;
import org.infinispan.util.DependencyGraph;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A <tt>CacheManager</tt> is the primary mechanism for retrieving a {@link Cache} instance, and is often used as a
* starting point to using the {@link Cache}.
* <p/>
* <tt>CacheManager</tt>s are heavyweight objects, and we foresee no more than one <tt>CacheManager</tt> being used per
* JVM (unless specific configuration requirements require more than one; but either way, this would be a minimal and
* finite number of instances).
* <p/>
* Constructing a <tt>CacheManager</tt> is done via one of its constructors, which optionally take in a
* {@link org.infinispan.configuration.cache.Configuration} or a path or URL to a configuration XML file.
* <p/>
* Lifecycle - <tt>CacheManager</tt>s have a lifecycle (it implements {@link Lifecycle}) and the default constructors
* also call {@link #start()}. Overloaded versions of the constructors are available, that do not start the
* <tt>CacheManager</tt>, although it must be kept in mind that <tt>CacheManager</tt>s need to be started before they
* can be used to create <tt>Cache</tt> instances.
* <p/>
* Once constructed, <tt>CacheManager</tt>s should be made available to any component that requires a <tt>Cache</tt>,
* via JNDI or via some other mechanism such as an IoC container.
* <p/>
* You obtain <tt>Cache</tt> instances from the <tt>CacheManager</tt> by using one of the overloaded
* <tt>getCache()</tt>, methods. Note that with <tt>getCache()</tt>, there is no guarantee that the instance you get is
* brand-new and empty, since caches are named and shared. Because of this, the <tt>CacheManager</tt> also acts as a
* repository of <tt>Cache</tt>s, and is an effective mechanism of looking up or creating <tt>Cache</tt>s on demand.
* <p/>
* When the system shuts down, it should call {@link #stop()} on the <tt>CacheManager</tt>. This will ensure all caches
* within its scope are properly stopped as well.
* <p/>
* Sample usage:
* <pre><code>
* CacheManager manager = CacheManager.getInstance("my-config-file.xml");
* Cache<String, Person> entityCache = manager.getCache("myEntityCache");
* entityCache.put("aPerson", new Person());
*
* ConfigurationBuilder confBuilder = new ConfigurationBuilder();
* confBuilder.clustering().cacheMode(CacheMode.REPL_SYNC);
* manager.createCache("myReplicatedCache", confBuilder.build());
* Cache<String, String> replicatedCache = manager.getCache("myReplicatedCache");
* </code></pre>
*
* @author Manik Surtani
* @author Galder Zamarreño
* @since 4.0
*/
@Scope(Scopes.GLOBAL)
@SurvivesRestarts
@MBean(objectName = DefaultCacheManager.OBJECT_NAME, description = "Component that acts as a manager, factory and container for caches in the system.")
public class DefaultCacheManager implements EmbeddedCacheManager {
public static final String OBJECT_NAME = "CacheManager";
private static final Log log = LogFactory.getLog(DefaultCacheManager.class);
private final ConcurrentMap<String, CompletableFuture<Cache<?, ?>>> caches = new ConcurrentHashMap<>();
private final GlobalComponentRegistry globalComponentRegistry;
private final Authorizer authorizer;
private final DependencyGraph<String> cacheDependencyGraph = new DependencyGraph<>();
private final CacheContainerStats stats;
private final Health health;
private final ConfigurationManager configurationManager;
private final String defaultCacheName;
private final Lock lifecycleLock = new ReentrantLock();
private final Condition lifecycleCondition = lifecycleLock.newCondition();
private volatile ComponentStatus status = ComponentStatus.INSTANTIATED;
private final DefaultCacheManagerAdmin cacheManagerAdmin;
private final ClassAllowList classAllowList;
private final CacheManagerInfo cacheManagerInfo;
// Keep the transport around so async view listeners can still see the address after stop
private volatile Transport transport;
// When enabled, isRunning(name) sets the thread-local value and getCache(name) verifies it
private static ThreadLocal<String> getCacheBlockingCheck;
/**
* Constructs and starts a default instance of the CacheManager, using configuration defaults. See
* {@link org.infinispan.configuration.cache.Configuration} and
* {@link org.infinispan.configuration.global.GlobalConfiguration} for details of these defaults.
*/
public DefaultCacheManager() {
this(null, null, true);
}
/**
* Constructs a default instance of the CacheManager, using configuration defaults. See
* {@link org.infinispan.configuration.cache.Configuration} and
* {@link org.infinispan.configuration.global.GlobalConfiguration} for details of these defaults.
*
* @param start if true, the cache manager is started
*/
public DefaultCacheManager(boolean start) {
this(null, null, start);
}
/**
* Constructs and starts a new instance of the CacheManager, using the default configuration passed in. See
* {@link org.infinispan.configuration.cache.Configuration} and
* {@link org.infinispan.configuration.global.GlobalConfiguration} for details of these defaults.
*
* @param defaultConfiguration configuration to use as a template for all caches created
* @deprecated Since 11.0, please use {@link #DefaultCacheManager(ConfigurationBuilderHolder, boolean)} instead.
*/
@Deprecated
public DefaultCacheManager(Configuration defaultConfiguration) {
this(null, defaultConfiguration, true);
}
/**
* Constructs a new instance of the CacheManager, using the default configuration passed in. See
* {@link org.infinispan.configuration.global.GlobalConfiguration} for details of these defaults.
*
* @param defaultConfiguration configuration file to use as a template for all caches created
* @param start if true, the cache manager is started
* @deprecated Since 11.0, please use {@link #DefaultCacheManager(ConfigurationBuilderHolder, boolean)} instead.
*/
@Deprecated
public DefaultCacheManager(Configuration defaultConfiguration, boolean start) {
this(null, defaultConfiguration, start);
}
/**
* Constructs and starts a new instance of the CacheManager, using the global configuration passed in, and system
* defaults for the default named cache configuration. See {@link org.infinispan.configuration.cache.Configuration}
* for details of these defaults.
*
* @param globalConfiguration GlobalConfiguration to use for all caches created
*/
public DefaultCacheManager(GlobalConfiguration globalConfiguration) {
this(globalConfiguration, null, true);
}
/**
* Constructs a new instance of the CacheManager, using the global configuration passed in, and system defaults for
* the default named cache configuration. See {@link org.infinispan.configuration.cache.Configuration} for details
* of these defaults.
*
* @param globalConfiguration GlobalConfiguration to use for all caches created
* @param start if true, the cache manager is started.
*/
public DefaultCacheManager(GlobalConfiguration globalConfiguration, boolean start) {
this(globalConfiguration, null, start);
}
/**
* Constructs and starts a new instance of the CacheManager, using the global and default configurations passed in.
* If either of these are null, system defaults are used.
*
* @param globalConfiguration global configuration to use. If null, a default instance is created.
* @param defaultConfiguration default configuration to use. If null, a default instance is created.
* @deprecated Since 11.0, please use {@link #DefaultCacheManager(ConfigurationBuilderHolder, boolean)} instead.
*/
@Deprecated
public DefaultCacheManager(GlobalConfiguration globalConfiguration, Configuration defaultConfiguration) {
this(globalConfiguration, defaultConfiguration, true);
}
/**
* Constructs a new instance of the CacheManager, using the global and default configurations passed in. If either of
* these are null, system defaults are used.
*
* @param globalConfiguration global configuration to use. If null, a default instance is created.
* @param defaultConfiguration default configuration to use. If null, a default instance is created.
* @param start if true, the cache manager is started
* @deprecated Since 11.0, please use {@link #DefaultCacheManager(ConfigurationBuilderHolder, boolean)} instead.
*/
@Deprecated
public DefaultCacheManager(GlobalConfiguration globalConfiguration, Configuration defaultConfiguration,
boolean start) {
globalConfiguration = globalConfiguration == null ? new GlobalConfigurationBuilder().build() : globalConfiguration;
this.configurationManager = new ConfigurationManager(globalConfiguration);
if (defaultConfiguration != null) {
if (globalConfiguration.defaultCacheName().isPresent()) {
defaultCacheName = globalConfiguration.defaultCacheName().get();
} else {
throw CONFIG.defaultCacheConfigurationWithoutName();
}
configurationManager.putConfiguration(defaultCacheName, defaultConfiguration);
} else {
if (globalConfiguration.defaultCacheName().isPresent()) {
throw CONFIG.missingDefaultCacheDeclaration(globalConfiguration.defaultCacheName().get());
} else {
defaultCacheName = null;
}
}
ModuleRepository moduleRepository = ModuleRepository.newModuleRepository(globalConfiguration.classLoader(), globalConfiguration);
this.classAllowList = globalConfiguration.serialization().allowList().create();
this.globalComponentRegistry = new GlobalComponentRegistry(globalConfiguration, this, caches.keySet(),
moduleRepository, configurationManager);
InternalCacheRegistry internalCacheRegistry = globalComponentRegistry.getComponent(InternalCacheRegistry.class);
this.globalComponentRegistry.registerComponent(cacheDependencyGraph, CACHE_DEPENDENCY_GRAPH, false);
this.authorizer = new Authorizer(globalConfiguration.security(), AuditContext.CACHEMANAGER, globalConfiguration.cacheManagerName(), null);
this.globalComponentRegistry.registerComponent(authorizer, Authorizer.class);
this.stats = new CacheContainerStatsImpl(this);
globalComponentRegistry.registerComponent(stats, CacheContainerStats.class);
health = new HealthImpl(this, globalComponentRegistry.getComponent(InternalCacheRegistry.class));
cacheManagerInfo = new CacheManagerInfo(this, configurationManager, internalCacheRegistry, globalComponentRegistry.getComponent(
LocalTopologyManager.class));
globalComponentRegistry.registerComponent(new HealthJMXExposerImpl(health), HealthJMXExposer.class);
this.cacheManagerAdmin = new DefaultCacheManagerAdmin(this, authorizer, EnumSet.noneOf(CacheContainerAdmin.AdminFlag.class), null,
globalComponentRegistry.getComponent(GlobalConfigurationManager.class));
if (start)
start();
}
/**
* Constructs and starts a new instance of the CacheManager, using the configuration file name passed in. This
* constructor first searches for the named file on the classpath, and failing that, treats the file name as an
* absolute path.
*
* @param configurationFile name of configuration file to use as a template for all caches created
* @throws java.io.IOException if there is a problem with the configuration file.
*/
public DefaultCacheManager(String configurationFile) throws IOException {
this(configurationFile, true);
}
/**
* Constructs a new instance of the CacheManager, using the configuration file name passed in. This constructor first
* searches for the named file on the classpath, and failing that, treats the file name as an absolute path.
*
* @param configurationFile name of configuration file to use as a template for all caches created
* @param start if true, the cache manager is started
* @throws java.io.IOException if there is a problem with the configuration file.
*/
public DefaultCacheManager(String configurationFile, boolean start) throws IOException {
this(FileLookupFactory.newInstance().lookupFileStrict(configurationFile, Thread.currentThread().getContextClassLoader()), start);
}
/**
* Constructs and starts a new instance of the CacheManager, using the input stream passed in to read configuration
* file contents.
*
* @param configurationStream stream containing configuration file contents, to use as a template for all caches
* created
* @throws java.io.IOException if there is a problem with the configuration stream.
*/
public DefaultCacheManager(InputStream configurationStream) throws IOException {
this(configurationStream, true);
}
/**
* Constructs a new instance of the CacheManager, using the input stream passed in to read configuration file
* contents.
*
* @param configurationStream stream containing configuration file contents, to use as a template for all caches
* created
* @param start if true, the cache manager is started
* @throws java.io.IOException if there is a problem reading the configuration stream
*/
public DefaultCacheManager(InputStream configurationStream, boolean start) throws IOException {
this(new ParserRegistry().parse(configurationStream, ConfigurationResourceResolvers.DEFAULT, MediaType.APPLICATION_XML), start);
}
/**
* Constructs a new instance of the CacheManager, using the input stream passed in to read configuration file
* contents.
*
* @param configurationURL stream containing configuration file contents, to use as a template for all caches
* created
* @param start if true, the cache manager is started
* @throws java.io.IOException if there is a problem reading the configuration stream
*/
public DefaultCacheManager(URL configurationURL, boolean start) throws IOException {
this(new ParserRegistry().parse(configurationURL), start);
}
/**
* Constructs a new instance of the CacheManager, using the holder passed in to read configuration settings.
*
* @param holder holder containing configuration settings, to use as a template for all caches created
* @param start if true, the cache manager is started
*/
public DefaultCacheManager(ConfigurationBuilderHolder holder, boolean start) {
try {
configurationManager = new ConfigurationManager(holder);
GlobalConfiguration globalConfiguration = configurationManager.getGlobalConfiguration();
classAllowList = globalConfiguration.serialization().allowList().create();
defaultCacheName = globalConfiguration.defaultCacheName().orElse(null);
ModuleRepository moduleRepository = ModuleRepository.newModuleRepository(globalConfiguration.classLoader(), globalConfiguration);
globalComponentRegistry = new GlobalComponentRegistry(globalConfiguration, this, caches.keySet(),
moduleRepository, configurationManager);
InternalCacheRegistry internalCacheRegistry = globalComponentRegistry.getComponent(InternalCacheRegistry.class);
globalComponentRegistry.registerComponent(cacheDependencyGraph, CACHE_DEPENDENCY_GRAPH, false);
stats = new CacheContainerStatsImpl(this);
globalComponentRegistry.registerComponent(stats, CacheContainerStats.class);
health = new HealthImpl(this, internalCacheRegistry);
cacheManagerInfo = new CacheManagerInfo(this, getConfigurationManager(), internalCacheRegistry, globalComponentRegistry.getComponent(LocalTopologyManager.class));
globalComponentRegistry.registerComponent(new HealthJMXExposerImpl(health), HealthJMXExposer.class);
authorizer = new Authorizer(globalConfiguration.security(), AuditContext.CACHEMANAGER, globalConfiguration.cacheManagerName(), null);
globalComponentRegistry.registerComponent(authorizer, Authorizer.class);
cacheManagerAdmin = new DefaultCacheManagerAdmin(this, authorizer, EnumSet.noneOf(CacheContainerAdmin.AdminFlag.class),
null, globalComponentRegistry.getComponent(GlobalConfigurationManager.class));
} catch (CacheConfigurationException ce) {
throw ce;
} catch (RuntimeException re) {
throw new CacheConfigurationException(re);
}
if (start)
start();
}
private DefaultCacheManager(DefaultCacheManager original) {
this.authorizer = original.authorizer;
this.configurationManager = original.configurationManager;
this.health = original.health;
this.classAllowList = original.classAllowList;
this.cacheManagerInfo = original.cacheManagerInfo;
this.cacheManagerAdmin = original.cacheManagerAdmin;
this.defaultCacheName = original.defaultCacheName;
this.stats = original.stats;
this.globalComponentRegistry = original.globalComponentRegistry;
}
@Override
public Configuration defineConfiguration(String name, Configuration configuration) {
return doDefineConfiguration(name, configuration);
}
@Override
public Configuration defineConfiguration(String name, String template, Configuration configurationOverride) {
if (template != null) {
Configuration c = configurationManager.getConfiguration(template, true);
if (c == null) {
throw CONFIG.undeclaredConfiguration(template, name);
} else if (configurationOverride == null) {
return doDefineConfiguration(name, c);
} else {
return doDefineConfiguration(name, c, configurationOverride);
}
}
return doDefineConfiguration(name, configurationOverride);
}
private Configuration doDefineConfiguration(String name, Configuration... configurations) {
authorizer.checkPermission(getSubject(), AuthorizationPermission.ADMIN);
assertIsNotTerminated();
if (name == null || configurations == null)
throw new NullPointerException("Null arguments not allowed");
if (!ByteString.isValid(name))
throw CONFIG.invalidNameSize(name);
Configuration existing = configurationManager.getConfiguration(name, false);
if (existing != null) {
throw CONFIG.configAlreadyDefined(name);
}
ConfigurationBuilder builder = new ConfigurationBuilder();
boolean template = true;
for (Configuration configuration : configurations) {
if (configuration != null) {
builder.read(configuration, Combine.DEFAULT);
template = template && configuration.isTemplate();
} else {
throw new NullPointerException("Null arguments not allowed");
}
}
builder.template(template);
return configurationManager.putConfiguration(name, builder);
}
@Override
public void undefineConfiguration(String configurationName) {
authorizer.checkPermission(getSubject(), AuthorizationPermission.ADMIN);
Configuration existing = configurationManager.getConfiguration(configurationName, false);
if (existing != null) {
for (CompletableFuture<Cache<?, ?>> cacheFuture : caches.values()) {
Cache<?, ?> cache = cacheFuture.exceptionally(t -> null).join();
if (cache != null && cache.getCacheConfiguration() == existing && cache.getStatus() != ComponentStatus.TERMINATED) {
throw CONFIG.configurationInUse(configurationName);
}
}
configurationManager.removeConfiguration(configurationName);
globalComponentRegistry.removeCache(configurationName);
}
}
@Override
public <K, V> Cache<K, V> createCache(String name, Configuration configuration) {
defineConfiguration(name, configuration);
return getCache(name);
}
/**
* Retrieves the default cache associated with this cache manager. Note that the default cache does not need to be
* explicitly created with {@link #createCache(String)} (String)} since it is automatically created lazily when first
* used.
* <p/>
* As such, this method is always guaranteed to return the default cache.
*
* @return the default cache.
*/
@Override
public <K, V> Cache<K, V> getCache() {
if (defaultCacheName == null) {
throw CONFIG.noDefaultCache();
}
return internalGetCache(defaultCacheName);
}
/**
* Retrieves a named cache from the system. If the cache has been previously created with the same name, the running
* cache instance is returned. Otherwise, this method attempts to create the cache first.
* <p/>
* When creating a new cache, this method will use the configuration passed in to the CacheManager on construction,
* as a template, and then optionally apply any overrides previously defined for the named cache using the
* {@link #defineConfiguration(String, Configuration)} or {@link #defineConfiguration(String, String, Configuration)}
* methods, or declared in the configuration file.
*
* @param cacheName name of cache to retrieve
* @return a cache instance identified by cacheName
*/
@Override
public <K, V> Cache<K, V> getCache(String cacheName) {
return internalGetCache(cacheName);
}
private <K, V> Cache<K, V> internalGetCache(String cacheName) {
if (cacheName == null)
throw new NullPointerException("Null arguments not allowed");
assertIsNotTerminated();
if (getCacheBlockingCheck != null) {
if (cacheName.equals(getCacheBlockingCheck.get())) {
// isRunning() was called before getCache(), all good
getCacheBlockingCheck.set(null);
} else {
// isRunning() was not called, let BlockHound know getCache() is potentially blocking
BlockHoundUtil.pretendBlock();
}
}
// No need to block if another thread (or even the current thread) is starting the global components
// Because each cache component will wait for the global components it depends on
// And and ComponentRegistry depends on GlobalComponentRegistry.ModuleInitializer
internalStart(false);
CompletableFuture<Cache<?, ?>> cacheFuture = caches.get(cacheName);
if (cacheFuture != null) {
try {
return (Cache<K, V>) cacheFuture.join();
} catch (CompletionException e) {
caches.computeIfPresent(cacheName, (k, v) -> {
if (v == cacheFuture) {
return null;
}
return v;
});
}
}
return createCache(cacheName);
}
@Override
public boolean cacheExists(String cacheName) {
return caches.containsKey(cacheName);
}
@Override
public <K, V> Cache<K, V> getCache(String cacheName, boolean createIfAbsent) {
boolean cacheExists = cacheExists(cacheName);
if (!cacheExists && !createIfAbsent)
return null;
else {
return internalGetCache(cacheName);
}
}
@Override
public EmbeddedCacheManager startCaches(final String... cacheNames) {
authorizer.checkPermission(getSubject(), AuthorizationPermission.LIFECYCLE);
internalStart(false);
Map<String, Thread> threads = new HashMap<>(cacheNames.length);
final AtomicReference<RuntimeException> exception = new AtomicReference<>(null);
for (final String cacheName : cacheNames) {
if (!threads.containsKey(cacheName)) {
String threadName = "CacheStartThread," + identifierString() + "," + cacheName;
Thread thread = new Thread(threadName) {
@Override
public void run() {
try {
createCache(cacheName);
} catch (RuntimeException e) {
exception.set(e);
} catch (Throwable t) {
exception.set(new RuntimeException(t));
}
}
};
thread.start();
threads.put(cacheName, thread);
}
}
try {
for (Thread thread : threads.values()) {
thread.join();
}
} catch (InterruptedException e) {
throw new CacheException("Interrupted while waiting for the caches to start");
}
RuntimeException runtimeException = exception.get();
if (runtimeException != null) {
throw runtimeException;
}
return this;
}
@Override
public void removeCache(String cacheName) {
cacheManagerAdmin.removeCache(cacheName);
}
/**
* {@inheritDoc}
*/
@Override
public List<Address> getMembers() {
Transport t = getTransport();
return t == null ? null : t.getMembers();
}
/**
* {@inheritDoc}
*/
@Override
public Address getAddress() {
Transport t = getTransport();
return t == null ? null : t.getAddress();
}
/**
* {@inheritDoc}
*/
@Override
public Address getCoordinator() {
Transport t = getTransport();
return t == null ? null : t.getCoordinator();
}
@ManagedAttribute(description = "The logical address of the cluster's coordinator", displayName = "Coordinator address")
public String getCoordinatorAddress() {
return cacheManagerInfo.getCoordinatorAddress();
}
/**
* {@inheritDoc}
*/
@Override
@ManagedAttribute(description = "Indicates whether this node is coordinator", displayName = "Is coordinator?")
public boolean isCoordinator() {
return cacheManagerInfo.isCoordinator();
}
private <K, V> Cache<K, V> createCache(String cacheName) {
final boolean trace = log.isTraceEnabled();
LogFactory.pushNDC(cacheName, trace);
try {
return wireAndStartCache(cacheName);
} finally {
LogFactory.popNDC(trace);
}
}
/**
* @return a null return value means the cache was created by someone else before we got the lock
*/
private <K, V> Cache<K, V> wireAndStartCache(String cacheName) {
Configuration c = configurationManager.getConfiguration(cacheName);
if (c == null) {
throw CONFIG.noSuchCacheConfiguration(cacheName);
}
if (c.security().authorization().enabled()) {
// Don't even attempt to wire anything if we don't have LIFECYCLE privileges
authorizer.checkPermission(c.security().authorization(), getSubject(), AuthorizationPermission.LIFECYCLE, null);
}
if (c.isTemplate()) {
throw CONFIG.templateConfigurationStartAttempt(cacheName);
}
CompletableFuture<Cache<?, ?>> cacheFuture = new CompletableFuture<>();
CompletableFuture<Cache<?, ?>> oldFuture = caches.computeIfAbsent(cacheName, name -> {
assertIsNotTerminated();
return cacheFuture;
});
Cache<K, V> cache = null;
try {
if (oldFuture != cacheFuture) {
cache = (Cache<K, V>) oldFuture.join();
if (!cache.getStatus().isTerminated()) {
return cache;
}
}
} catch (CompletionException ce) {
throw ((CacheException) ce.getCause());
}
try {
log.debugf("Creating cache %s on %s", cacheName, identifierString());
if (cache == null) {
cache = new InternalCacheFactory<K, V>().createCache(c, globalComponentRegistry, cacheName);
if (cache.getAdvancedCache().getAuthorizationManager() != null) {
cache = new SecureCacheImpl<>(cache.getAdvancedCache());
}
}
ComponentRegistry cr = SecurityActions.getUnwrappedCache(cache).getAdvancedCache().getComponentRegistry();
boolean notStartedYet =
cr.getStatus() != ComponentStatus.RUNNING && cr.getStatus() != ComponentStatus.INITIALIZING;
// start the cache-level components
cache.start();
cacheFuture.complete(cache);
boolean needToNotifyCacheStarted = notStartedYet && cr.getStatus() == ComponentStatus.RUNNING;
if (needToNotifyCacheStarted) {
globalComponentRegistry.notifyCacheStarted(cacheName);
}
log.tracef("Cache %s is ready", cacheName);
return cache;
} catch (CacheException e) {
cacheFuture.completeExceptionally(e);
throw e;
} catch (Throwable t) {
cacheFuture.completeExceptionally(new CacheException(t));
throw t;
}
}
@Override
public void start() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.LIFECYCLE);
internalStart(true);
}
/**
* @param block {@code true} when we need all the global components to be running.
*/
private void internalStart(boolean block) {
if (status == ComponentStatus.RUNNING)
return;
final GlobalConfiguration globalConfiguration = configurationManager.getGlobalConfiguration();
lifecycleLock.lock();
try {
while (block && status == ComponentStatus.INITIALIZING) {
lifecycleCondition.await();
}
if (status != ComponentStatus.INSTANTIATED) {
return;
}
log.debugf("Starting cache manager %s", identifierString());
initializeSecurity(globalConfiguration);
updateStatus(ComponentStatus.INITIALIZING);
} catch (InterruptedException e) {
throw new CacheException("Interrupted waiting for the cache manager to start");
} finally {
lifecycleLock.unlock();
}
try {
globalComponentRegistry.getComponent(CacheManagerJmxRegistration.class).start();
globalComponentRegistry.start();
log.debugf("Started cache manager %s", identifierString());
} catch (Exception e) {
throw new EmbeddedCacheManagerStartupException(e);
} finally {
updateStatus(globalComponentRegistry.getStatus());
}
}
private void initializeSecurity(GlobalConfiguration globalConfiguration) {
GlobalAuthorizationConfiguration authorizationConfig = globalConfiguration.security().authorization();
if (authorizationConfig.enabled()) {
AuthorizationMapperContextImpl context = new AuthorizationMapperContextImpl(this);
authorizationConfig.principalRoleMapper().setContext(context);
authorizationConfig.rolePermissionMapper().setContext(context);
}
}
private void updateStatus(ComponentStatus status) {
lifecycleLock.lock();
try {
this.status = status;
lifecycleCondition.signalAll();
} finally {
lifecycleLock.unlock();
}
}
private void terminate(String cacheName) {
CompletableFuture<Cache<?, ?>> cacheFuture = this.caches.get(cacheName);
if (cacheFuture != null) {
Cache<?, ?> cache = cacheFuture.join();
if (cache.getStatus().isTerminated()) {
log.tracef("Ignoring cache %s, it is already terminated.", cacheName);
return;
}
cache.stop();
}
}
/*
* Shutdown cluster-wide resources of the CacheManager, calling {@link Cache#shutdown()} on both user and internal caches
* to ensure that they are safely terminated.
*/
public void shutdownAllCaches() {
log.tracef("Attempting to shutdown cache manager: " + getAddress());
authorizer.checkPermission(getSubject(), AuthorizationPermission.LIFECYCLE);
Set<String> cachesToShutdown = new LinkedHashSet<>(this.caches.size());
// stop ordered caches first
try {
List<String> ordered = cacheDependencyGraph.topologicalSort();
cachesToShutdown.addAll(ordered);
} catch (CyclicDependencyException e) {
CONTAINER.stopOrderIgnored();
}
// The caches map includes the default cache
cachesToShutdown.addAll(caches.keySet());
log.tracef("Cache shutdown order: %s", cachesToShutdown);
for (String cacheName : cachesToShutdown) {
try {
CompletableFuture<Cache<?, ?>> cacheFuture = this.caches.get(cacheName);
if (cacheFuture != null) {
Cache<?, ?> cache = cacheFuture.join();
if (cache.getStatus().isTerminated()) {
log.tracef("Ignoring cache %s, it is already terminated.", cacheName);
continue;
}
cache.shutdown();
}
} catch (Throwable t) {
CONTAINER.componentFailedToStop(t);
}
}
}
@Override
public void stop() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.LIFECYCLE);
internalStop();
}
private void internalStop() {
lifecycleLock.lock();
String identifierString = identifierString();
try {
while (status == ComponentStatus.STOPPING) {
lifecycleCondition.await();
}
if (status != ComponentStatus.RUNNING && status != ComponentStatus.FAILED) {
log.trace("Ignore call to stop as the cache manager is not running");
return;
}
// We can stop the manager
log.debugf("Stopping cache manager %s", identifierString);
updateStatus(ComponentStatus.STOPPING);
} catch (InterruptedException e) {
throw new CacheException("Interrupted waiting for the cache manager to stop");
} finally {
lifecycleLock.unlock();
}
try {
stopCaches();
globalComponentRegistry.getComponent(CacheManagerJmxRegistration.class).stop();
globalComponentRegistry.stop();
log.debugf("Stopped cache manager %s", identifierString);
} finally {
updateStatus(ComponentStatus.TERMINATED);
}
}
private void stopCaches() {
Set<String> cachesToStop = new LinkedHashSet<>(this.caches.size());
// stop ordered caches first
try {
List<String> ordered = cacheDependencyGraph.topologicalSort();
cachesToStop.addAll(ordered);
} catch (CyclicDependencyException e) {
CONTAINER.stopOrderIgnored();
}
// The caches map includes the default cache
cachesToStop.addAll(caches.keySet());
log.tracef("Cache stop order: %s", cachesToStop);
for (String cacheName : cachesToStop) {
try {
terminate(cacheName);
} catch (Throwable t) {
CONTAINER.componentFailedToStop(t);
}
}
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
authorizer.checkPermission(getSubject(), AuthorizationPermission.LISTEN);
CacheManagerNotifier notifier = globalComponentRegistry.getComponent(CacheManagerNotifier.class);
return notifier.addListenerAsync(listener);
}
@Override
public CompletionStage<Void> removeListenerAsync(Object listener) {
authorizer.checkPermission(getSubject(), AuthorizationPermission.LISTEN);
try {
CacheManagerNotifier notifier = globalComponentRegistry.getComponent(CacheManagerNotifier.class);
return notifier.removeListenerAsync(listener);
} catch (IllegalLifecycleStateException e) {
// Ignore the exception for backwards compatibility
return CompletableFutures.completedNull();
}
}
@Deprecated
@Override
public Set<Object> getListeners() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.LISTEN);
CacheManagerNotifier notifier = globalComponentRegistry.getComponent(CacheManagerNotifier.class);
return notifier.getListeners();
}
@Override
public ComponentStatus getStatus() {
return status;
}
@Override
public GlobalConfiguration getCacheManagerConfiguration() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.ADMIN);
return configurationManager.getGlobalConfiguration();
}
@Override
public org.infinispan.configuration.cache.Configuration getDefaultCacheConfiguration() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.ADMIN);
if (defaultCacheName != null) {
return configurationManager.getConfiguration(defaultCacheName, true);
} else {
return null;
}
}
@Override
public Configuration getCacheConfiguration(String name) {
authorizer.checkPermission(getSubject(), AuthorizationPermission.ADMIN);
Configuration configuration = configurationManager.getConfiguration(name, true);
if (configuration == null && cacheExists(name)) {
return getDefaultCacheConfiguration();
}
return configuration;
}
@Override
public Set<String> getCacheNames() {
// Get the XML/programmatically defined caches
Set<String> names = new HashSet<>(configurationManager.getDefinedCaches());
// Add the caches created dynamically without explicit config
names.addAll(caches.keySet());
InternalCacheRegistry internalCacheRegistry = globalComponentRegistry.getComponent(InternalCacheRegistry.class);
internalCacheRegistry.filterPrivateCaches(names);
if (names.isEmpty())
return Collections.emptySet();
else
return Immutables.immutableSetWrap(names);
}
@Override
public Set<String> getAccessibleCacheNames() {
if (configurationManager.getGlobalConfiguration().security().authorization().enabled()) {
Set<String> names = new HashSet<>();
GlobalSecurityManager gsm = globalComponentRegistry.getComponent(GlobalSecurityManager.class);
for (String name : configurationManager.getDefinedCaches()) {
Configuration cfg = configurationManager.getConfiguration(name);
AuthorizationManagerImpl am = new AuthorizationManagerImpl();
am.init(name, configurationManager.getGlobalConfiguration(), cfg, gsm);
if (!am.getPermissions(Security.getSubject()).isEmpty()) {
names.add(name);
}
}
InternalCacheRegistry internalCacheRegistry = globalComponentRegistry.getComponent(InternalCacheRegistry.class);
internalCacheRegistry.filterPrivateCaches(names);
return names;
} else {
return getCacheNames();
}
}
@Override
public Set<String> getCacheConfigurationNames() {
return cacheManagerInfo.getCacheConfigurationNames();
}
@Override
public boolean isRunning(String cacheName) {
if (getCacheBlockingCheck != null) {
getCacheBlockingCheck.set(cacheName);
}
CompletableFuture<Cache<?, ?>> cacheFuture = caches.get(cacheName);
boolean started = cacheFuture != null && cacheFuture.isDone() && !cacheFuture.isCompletedExceptionally();
return started && cacheFuture.join().getStatus() == ComponentStatus.RUNNING;
}
@Override
public boolean isDefaultRunning() {
Optional<String> defaultCacheName = configurationManager.getGlobalConfiguration().defaultCacheName();
return defaultCacheName.isPresent() && isRunning(defaultCacheName.get());
}
@ManagedAttribute(description = "The status of the cache manager instance.", displayName = "Cache manager status", dataType = DataType.TRAIT)
public String getCacheManagerStatus() {
return cacheManagerInfo.getCacheManagerStatus();
}
@ManagedAttribute(description = "The defined cache names and their statuses. The default cache is not included in this representation.", displayName = "List of defined caches", dataType = DataType.TRAIT)
public String getDefinedCacheNames() {
StringJoiner stringJoiner = new StringJoiner("", "[", "]");
cacheManagerInfo.getDefinedCaches().forEach(c -> stringJoiner.add(c.name).add(c.isStarted() ? "(created)" : "(not created)"));
return stringJoiner.toString();
}
@ManagedAttribute(description = "The defined cache configuration names.", displayName = "List of defined cache configurations", dataType = DataType.TRAIT)
public String getDefinedCacheConfigurationNames() {
StringJoiner stringJoiner = new StringJoiner(",", "[", "]");
cacheManagerInfo.getCacheConfigurationNames().forEach(stringJoiner::add);
return stringJoiner.toString();
}
@ManagedAttribute(description = "The total number of defined cache configurations.", displayName = "Number of caches defined")
public String getDefinedCacheCount() {
return String.valueOf(getNumberOfCacheConfigurations());
}
@ManagedAttribute(description = "The total number of defined cache configurations.", displayName = "Number of caches defined")
public int getNumberOfCacheConfigurations() {
return getCacheConfigurationNames().size();
}
@ManagedAttribute(description = "The total number of created caches, including the default cache.", displayName = "Number of caches created")
public String getCreatedCacheCount() {
return String.valueOf(getNumberOfCreatedCaches());
}
@ManagedAttribute(description = "The total number of created caches, including the default cache.", displayName = "Number of caches created")
public long getNumberOfCreatedCaches() {
return cacheManagerInfo.getCreatedCacheCount();
}
@ManagedAttribute(description = "The total number of running caches, including the default cache.", displayName = "Number of running caches")
public String getRunningCacheCount() {
return String.valueOf(getNumberOfRunningCaches());
}
@ManagedAttribute(description = "The total number of running caches, including the default cache.", displayName = "Number of running caches")
public long getNumberOfRunningCaches() {
return cacheManagerInfo.getRunningCacheCount();
}
@ManagedAttribute(description = "Returns the version of Infinispan", displayName = "Infinispan version", dataType = DataType.TRAIT)
public String getVersion() {
return cacheManagerInfo.getVersion();
}
@ManagedAttribute(description = "The name of this cache manager", displayName = "Cache manager name", dataType = DataType.TRAIT)
public String getName() {
return cacheManagerInfo.getName();
}
@ManagedOperation(description = "Starts the default cache associated with this cache manager", displayName = "Starts the default cache")
public void startCache() {
if (defaultCacheName == null) {
throw CONFIG.noDefaultCache();
}
startCache(defaultCacheName);
}
@ManagedOperation(description = "Starts a named cache from this cache manager", name = "startCache", displayName = "Starts a cache with the given name")
public void startCache(@Parameter(name = "cacheName", description = "Name of cache to start") String cacheName) {
if (cacheName == null)
throw new NullPointerException("Null arguments not allowed");
assertIsNotTerminated();
// No need to block if another thread (or even the current thread) is starting the global components
// Because each cache component will wait for the global components it depends on
// And and ComponentRegistry depends on GlobalComponentRegistry.ModuleInitializer
internalStart(false);
CompletableFuture<Cache<?, ?>> cacheFuture = caches.get(cacheName);
if (cacheFuture != null) {
try {
Cache<?, ?> cache = cacheFuture.join();
if (!cache.getStatus().isTerminated()) {
return;
}
} catch (CompletionException e) {
throw ((CacheException) e.getCause());
}
}
createCache(cacheName);
}
@ManagedAttribute(description = "The network address associated with this instance", displayName = "Network address", dataType = DataType.TRAIT)
public String getNodeAddress() {
return cacheManagerInfo.getNodeAddress();
}
@ManagedAttribute(description = "The physical network addresses associated with this instance", displayName = "Physical network addresses", dataType = DataType.TRAIT)
public String getPhysicalAddresses() {
return cacheManagerInfo.getPhysicalAddresses();
}
@ManagedAttribute(description = "List of members in the cluster", displayName = "Cluster members", dataType = DataType.TRAIT)
public String getClusterMembers() {
List<String> clusterMembers = cacheManagerInfo.getClusterMembers();
return clusterMembers.size() == 1 ? clusterMembers.iterator().next() : clusterMembers.toString();
}
@ManagedAttribute(description = "List of members in the cluster", displayName = "Cluster members", dataType = DataType.TRAIT)
public String getClusterMembersPhysicalAddresses() {
return cacheManagerInfo.getClusterMembersPhysicalAddresses().toString();
}
@ManagedAttribute(description = "Size of the cluster in number of nodes", displayName = "Cluster size")
public int getClusterSize() {
return cacheManagerInfo.getClusterSize();
}
/**
* {@inheritDoc}
*/
@ManagedAttribute(description = "Cluster name", displayName = "Cluster name", dataType = DataType.TRAIT)
@Override
public String getClusterName() {
return cacheManagerInfo.getClusterName();
}
@ManagedAttribute(description = "Returns the local site name", displayName = "Local site name", dataType = DataType.TRAIT)
public String getSite() {
return cacheManagerInfo.getLocalSite();
}
@ManagedAttribute(description = "Lists all online sites", displayName = "Online Sites", dataType = DataType.TRAIT)
public String getSiteView() {
return String.valueOf(cacheManagerInfo.getSites());
}
@ManagedAttribute(description = "Indicates whether this node is a relay node", displayName = "Is relay node?", dataType = DataType.TRAIT)
public boolean isRelayNode() {
return cacheManagerInfo.isRelayNode();
}
@ManagedAttribute(description = "Lists relay nodes in the local site", displayName = "Relay nodes", dataType = DataType.TRAIT)
public String getRelayNodesAddress() {
return String.valueOf(cacheManagerInfo.getRelayNodesAddress());
}
String getLogicalAddressString() {
return getAddress() == null ? "local" : getAddress().toString();
}
private void assertIsNotTerminated() {
if (status == ComponentStatus.STOPPING ||
status == ComponentStatus.TERMINATED ||
status == ComponentStatus.FAILED)
throw new IllegalLifecycleStateException(
"Cache container has been stopped and cannot be reused. Recreate the cache container.");
}
@Override
public Transport getTransport() {
if (transport == null) {
lifecycleLock.lock();
try {
// Do not start the transport if the manager hasn't been started yet or we are already stopping
if (transport == null && (status == ComponentStatus.RUNNING || status == ComponentStatus.INITIALIZING)) {
transport = globalComponentRegistry.getComponent(Transport.class);
}
} finally {
lifecycleLock.unlock();
}
}
return transport;
}
@Override
public GlobalComponentRegistry getGlobalComponentRegistry() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.ADMIN);
return globalComponentRegistry;
}
@Override
public void addCacheDependency(String from, String to) {
authorizer.checkPermission(getSubject(), AuthorizationPermission.ADMIN);
cacheDependencyGraph.addDependency(from, to);
}
@Override
public String toString() {
return getClass().getSimpleName() + " " + identifierString();
}
private String identifierString() {
if (getAddress() != null) {
return getAddress().toString();
} else if (configurationManager.getGlobalConfiguration().transport().nodeName() != null) {
return configurationManager.getGlobalConfiguration().transport().nodeName();
} else {
return configurationManager.getGlobalConfiguration().cacheManagerName();
}
}
/**
* {@inheritDoc}
*/
@ManagedAttribute(description = "Global configuration properties", displayName = "Global configuration properties", dataType = DataType.TRAIT)
public Properties getGlobalConfigurationAsProperties() {
return new PropertyFormatter().format(configurationManager.getGlobalConfiguration());
}
@Override
public CacheContainerStats getStats() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.MONITOR);
return stats;
}
@Override
public Health getHealth() {
return health;
}
@Override
public CacheManagerInfo getCacheManagerInfo() {
return cacheManagerInfo;
}
@Override
public ClusterExecutor executor() {
authorizer.checkPermission(getSubject(), AuthorizationPermission.EXEC);
// Allow INITIALIZING state so ClusterExecutor can be used by components in a @Start method.
if (globalComponentRegistry.getStatus() != ComponentStatus.RUNNING &&
globalComponentRegistry.getStatus() != ComponentStatus.INITIALIZING) {
throw new IllegalStateException("CacheManager must be started before retrieving a ClusterExecutor!");
}
// TODO: This is to be removed in https://issues.redhat.com/browse/ISPN-11482
Executor blockingExecutor = globalComponentRegistry.getComponent(ExecutorService.class, KnownComponentNames.BLOCKING_EXECUTOR);
// Have to make sure the transport is running before we retrieve it
Transport transport = globalComponentRegistry.getComponent(BasicComponentRegistry.class).getComponent(Transport.class).running();
if (transport != null) {
long time = configurationManager.getGlobalConfiguration().transport().distributedSyncTimeout();
return ClusterExecutors.allSubmissionExecutor(null, this, transport, time, TimeUnit.MILLISECONDS,
// This can run arbitrary code, including user - such commands can block
blockingExecutor,
globalComponentRegistry.getComponent(ScheduledExecutorService.class, KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR));
} else {
return ClusterExecutors.allSubmissionExecutor(null, this, null,
TransportConfiguration.DISTRIBUTED_SYNC_TIMEOUT.getDefaultValue(), TimeUnit.MILLISECONDS,
blockingExecutor,
globalComponentRegistry.getComponent(ScheduledExecutorService.class, KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR));
}
}
@Override
public void close() throws IOException {
stop();
}
@Override
public ClassAllowList getClassWhiteList() {
return getClassAllowList();
}
@Override
public ClassAllowList getClassAllowList() {
return classAllowList;
}
@Override
public EmbeddedCacheManagerAdmin administration() {
return cacheManagerAdmin;
}
ConcurrentMap<String, CompletableFuture<Cache<?, ?>>> getCaches() {
return caches;
}
ConfigurationManager getConfigurationManager() {
return configurationManager;
}
@Override
public Subject getSubject() {
return null;
}
@Override
public EmbeddedCacheManager withSubject(Subject subject) {
if (subject == null) {
return this;
} else {
return new DefaultCacheManager(this) {
@Override
public EmbeddedCacheManager withSubject(Subject subject) {
throw new IllegalArgumentException("Cannot set a Subject on an EmbeddedCacheManager more than once");
}
@Override
public Subject getSubject() {
return subject;
}
};
}
}
static void enableGetCacheBlockingCheck() {
getCacheBlockingCheck = new ThreadLocal<>();
}
}
| 56,966
| 41.544436
| 207
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/EmbeddedCacheManagerAdmin.java
|
package org.infinispan.manager;
import javax.security.auth.Subject;
import org.infinispan.Cache;
import org.infinispan.commons.api.CacheContainerAdmin;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
/**
* Cache manager operations which affect the whole cluster. An instance of this can be retrieved from
* {@link EmbeddedCacheManager#administration()}
*
* @author Tristan Tarrant
* @since 9.2
*/
public interface EmbeddedCacheManagerAdmin extends CacheContainerAdmin<EmbeddedCacheManagerAdmin, Configuration> {
/**
* Creates a cache on the container using the specified template.
*
* @param name the name of the cache to create
* @param template the template to use for the cache. If null, the configuration marked as default on the container
* will be used
* @return the cache
*
* @throws org.infinispan.commons.CacheException if a cache with the same name already exists
*/
<K, V> Cache<K, V> createCache(String name, String template);
/**
* Retrieves an existing cache or creates one using the specified template if it doesn't exist
*
* @param name the name of the cache to create
* @param template the template to use for the cache. If null, the configuration marked as default on the container
* will be used
* @return the cache
*/
<K, V> Cache<K, V> getOrCreateCache(String name, String template);
/**
* Creates a cache across the cluster. The cache will survive topology changes, e.g. when a new node joins the cluster,
* it will automatically be created there. This method will wait for the cache to be created on all nodes before
* returning.
*
* @param name the name of the cache
* @param configuration the configuration to use. It must be a clustered configuration (e.g. distributed)
* @param <K> the generic type of the key
* @param <V> the generic type of the value
* @return the cache
*
* @throws org.infinispan.commons.CacheException if a cache with the same name already exists
*/
<K, V> Cache<K, V> createCache(String name, Configuration configuration);
/**
* Retrieves an existing cache or creates one across the cluster using the specified configuration.
* The cache will survive topology changes, e.g. when a new node joins the cluster,
* it will automatically be created there. This method will wait for the cache to be created on all nodes before
* returning.
*
* @param name the name of the cache
* @param configuration the configuration to use. It must be a clustered configuration (e.g. distributed)
* @param <K> the generic type of the key
* @param <V> the generic type of the value
* @return the cache
*/
<K, V> Cache<K, V> getOrCreateCache(String name, Configuration configuration);
/**
* Creates a template that is replicated across the cluster using the specified configuration.
* The template will survive topology changes, e.g. when a new node joins the cluster,
* it will automatically be created there. This method will wait for the template to be created on all nodes before
* returning.
*
* @param name the name of the template
* @param configuration the configuration to use. It must be a clustered configuration (e.g. distributed)
* @throws org.infinispan.commons.CacheConfigurationException if a template with the same name already exists
*/
void createTemplate(String name, Configuration configuration);
/**
* Retrieves an existing template or creates one across the cluster using the specified configuration.
* The template will survive topology changes, e.g. when a new node joins the cluster,
* it will automatically be created there. This method will wait for the template to be created on all nodes before
* returning.
*
* @param name the name of the template
* @param configuration the configuration to use. It must be a clustered configuration (e.g. distributed)
* @return the template configuration
*/
Configuration getOrCreateTemplate(String name, Configuration configuration);
/**
* Removes a template from the cache container. Any persisted data will be cleared.
*
* @param name the name of the template to remove
*/
void removeTemplate(String name);
/**
* Performs any cache manager operations using the specified {@link Subject}. Only applies to cache managers with authorization
* enabled (see {@link GlobalConfigurationBuilder#security()}.
*
* @param subject
* @return an {@link EmbeddedCacheManagerAdmin} instance on which a real operation is to be invoked, using the specified subject
*/
EmbeddedCacheManagerAdmin withSubject(Subject subject);
}
| 4,858
| 42.774775
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/UnwrappingEmbeddedCacheManager.java
|
package org.infinispan.manager.impl;
import org.infinispan.Cache;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.actions.SecurityActions;
/**
* EmbeddedCacheManager used for cluster executor invocation so that caches are not wrapped with security
* since invoking via ClusterExecutor already requires ADMIN privileges
* @author wburns
* @since 10.0
*/
class UnwrappingEmbeddedCacheManager extends AbstractDelegatingEmbeddedCacheManager {
public UnwrappingEmbeddedCacheManager(EmbeddedCacheManager cm) {
super(cm);
}
@Override
public <K, V> Cache<K, V> getCache() {
Cache<K, V> cache = super.getCache();
return SecurityActions.getUnwrappedCache(cache);
}
@Override
public <K, V> Cache<K, V> getCache(String cacheName) {
Cache<K, V> cache = super.getCache(cacheName);
return SecurityActions.getUnwrappedCache(cache);
}
@Override
public <K, V> Cache<K, V> getCache(String cacheName, boolean createIfAbsent) {
Cache<K, V> cache = super.getCache(cacheName, createIfAbsent);
return SecurityActions.getUnwrappedCache(cache);
}
}
| 1,144
| 29.945946
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/FailOverClusterExecutor.java
|
package org.infinispan.manager.impl;
import java.util.Collection;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.manager.ClusterExecutionPolicy;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.function.TriConsumer;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Cluster Executor that submits to a single node at a time, but allows for automatic failover up to a certain number
* of times. The subsequent node where the retry is chosen at random.
* <p>
* This executor currently only functions properly when using a single submission cluster executor such as
* {@link LocalClusterExecutor} and {@link SingleClusterExecutor}
* @author wburns
* @since 9.1
*/
class FailOverClusterExecutor implements ClusterExecutor {
private static final Log log = LogFactory.getLog(FailOverClusterExecutor.class);
private final ClusterExecutor executor;
private final int failOverCount;
FailOverClusterExecutor(ClusterExecutor executor, int failOverCount) {
this.executor = executor;
this.failOverCount = failOverCount;
}
@Override
public CompletableFuture<Void> submit(Runnable command) {
CompletableFuture<Void> future = new CompletableFuture<>();
submit(command, future, failOverCount);
return future;
}
private void submit(Runnable command, CompletableFuture<Void> future, int retriesLeft) {
if (log.isTraceEnabled()) {
log.tracef("Submitting runnable %s retries left %d", command, retriesLeft);
}
executor.submit(command).whenComplete((v, t) -> {
if (t != null) {
if (t instanceof TimeoutException) {
log.tracef("Command %s was met with timeout", command);
future.completeExceptionally(t);
} else if (retriesLeft > 0) {
log.tracef("Retrying command %s - retries left %d", command, retriesLeft);
submit(command, future, retriesLeft - 1);
} else {
log.tracef("No retries left for command %s, passing last exception to user", command);
future.completeExceptionally(t);
}
} else {
log.tracef("Command %s completed successfully", command);
future.complete(null);
}
});
}
@Override
public <V> CompletableFuture<Void> submitConsumer(Function<? super EmbeddedCacheManager, ? extends V> callable,
TriConsumer<? super Address, ? super V, ? super Throwable> triConsumer) {
CompletableFuture<Void> future = new CompletableFuture<>();
submitConsumer(callable, triConsumer, future, failOverCount);
return future;
}
private <V> void submitConsumer(Function<? super EmbeddedCacheManager, ? extends V> function,
TriConsumer<? super Address, ? super V, ? super Throwable> triConsumer, CompletableFuture<Void> future,
int retriesLeft) {
if (log.isTraceEnabled()) {
log.tracef("Submitting function %d retries left %d",
function, retriesLeft);
}
executor.submitConsumer(function, triConsumer).whenComplete((v, t) -> {
if (t != null) {
if (t instanceof TimeoutException) {
log.tracef("Function %s was met with timeout", function);
future.completeExceptionally(t);
} else if (retriesLeft > 0) {
log.tracef("Retrying function %s - retries left %d", function, retriesLeft);
submitConsumer(function, triConsumer, future, retriesLeft - 1);
} else {
log.tracef("No retries left for function %s, passing last exception to user", function);
future.completeExceptionally(t);
}
} else {
log.tracef("Function %s completed successfully", function);
future.complete(null);
}
});
}
@Override
public ClusterExecutor timeout(long time, TimeUnit unit) {
ClusterExecutor newExecutor = executor.timeout(time, unit);
if (newExecutor == executor) {
return this;
}
return new FailOverClusterExecutor(newExecutor, failOverCount);
}
@Override
public ClusterExecutor singleNodeSubmission() {
return executor;
}
@Override
public ClusterExecutor singleNodeSubmission(int failOverCount) {
if (failOverCount == this.failOverCount) {
return this;
}
return new FailOverClusterExecutor(executor, failOverCount);
}
@Override
public ClusterExecutor allNodeSubmission() {
return executor.allNodeSubmission();
}
@Override
public ClusterExecutor filterTargets(Predicate<? super Address> predicate) {
ClusterExecutor newExecutor = executor.filterTargets(predicate);
if (newExecutor == executor) {
return this;
}
return new FailOverClusterExecutor(newExecutor, failOverCount);
}
@Override
public ClusterExecutor filterTargets(ClusterExecutionPolicy policy) throws IllegalStateException {
ClusterExecutor newExecutor = executor.filterTargets(policy);
if (newExecutor == executor) {
return this;
}
return new FailOverClusterExecutor(newExecutor, failOverCount);
}
@Override
public ClusterExecutor filterTargets(ClusterExecutionPolicy policy, Predicate<? super Address> predicate) throws IllegalStateException {
ClusterExecutor newExecutor = executor.filterTargets(policy, predicate);
if (newExecutor == executor) {
return this;
}
return new FailOverClusterExecutor(newExecutor, failOverCount);
}
@Override
public ClusterExecutor filterTargets(Collection<Address> addresses) {
ClusterExecutor newExecutor = executor.filterTargets(addresses);
if (newExecutor == executor) {
return this;
}
return new FailOverClusterExecutor(newExecutor, failOverCount);
}
@Override
public ClusterExecutor noFilter() {
ClusterExecutor newExecutor = executor.noFilter();
if (newExecutor == executor) {
return this;
}
return new FailOverClusterExecutor(newExecutor, failOverCount);
}
}
| 6,474
| 36.427746
| 139
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/AbstractDelegatingEmbeddedCacheManager.java
|
package org.infinispan.manager.impl;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import javax.security.auth.Subject;
import org.infinispan.Cache;
import org.infinispan.commons.configuration.ClassAllowList;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.annotations.SurvivesRestarts;
import org.infinispan.health.Health;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.CacheManagerInfo;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.manager.EmbeddedCacheManagerAdmin;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.stats.CacheContainerStats;
/**
* This is a convenient base class for implementing a cache manager delegate.
*
* The only constructor takes a {@link org.infinispan.manager.EmbeddedCacheManager}
* argument, to which each method call is delegated. One can extend this class and only override the method sub-set
* it is interested in.
*
* @author Dan Berindei <dan@infinispan.org>
* @see org.infinispan.cache.impl.AbstractDelegatingCache
* @see org.infinispan.cache.impl.AbstractDelegatingAdvancedCache
*/
@SurvivesRestarts
public class AbstractDelegatingEmbeddedCacheManager implements EmbeddedCacheManager {
protected EmbeddedCacheManager cm;
public AbstractDelegatingEmbeddedCacheManager(EmbeddedCacheManager cm) {
this.cm = cm;
}
@Override
public org.infinispan.configuration.cache.Configuration defineConfiguration(String cacheName,
org.infinispan.configuration.cache.Configuration configuration) {
return cm.defineConfiguration(cacheName, configuration);
}
@Override
public Configuration defineConfiguration(String cacheName, String templateCacheName, Configuration configurationOverride) {
return cm.defineConfiguration(cacheName, templateCacheName, configurationOverride);
}
@Override
public void undefineConfiguration(String configurationName) {
cm.undefineConfiguration(configurationName);
}
@Override
public String getClusterName() {
return cm.getClusterName();
}
@Override
public List<Address> getMembers() {
return cm.getMembers();
}
@Override
public Address getAddress() {
return cm.getAddress();
}
@Override
public Address getCoordinator() {
return cm.getCoordinator();
}
@Override
public boolean isCoordinator() {
return cm.isCoordinator();
}
@Override
public ComponentStatus getStatus() {
return cm.getStatus();
}
@Override
public org.infinispan.configuration.cache.Configuration getDefaultCacheConfiguration() {
return cm.getDefaultCacheConfiguration();
}
@Override
public org.infinispan.configuration.global.GlobalConfiguration getCacheManagerConfiguration() {
return cm.getCacheManagerConfiguration();
}
@Override
public org.infinispan.configuration.cache.Configuration getCacheConfiguration(String name) {
return cm.getCacheConfiguration(name);
}
@Override
public Set<String> getCacheNames() {
return cm.getCacheNames();
}
@Override
public Set<String> getAccessibleCacheNames() {
return cm.getAccessibleCacheNames();
}
@Override
public Set<String> getCacheConfigurationNames() {
return cm.getCacheConfigurationNames();
}
@Override
public ClusterExecutor executor() {
return cm.executor();
}
@Override
public Health getHealth() {
return cm.getHealth();
}
@Override
public CacheManagerInfo getCacheManagerInfo() {
return cm.getCacheManagerInfo();
}
@Override
public boolean isRunning(String cacheName) {
return cm.isRunning(cacheName);
}
@Override
public boolean isDefaultRunning() {
return cm.isDefaultRunning();
}
@Override
public boolean cacheExists(String cacheName) {
return cm.cacheExists(cacheName);
}
@Override
public EmbeddedCacheManagerAdmin administration() {
return cm.administration();
}
@Override
public ClassAllowList getClassWhiteList() {
return cm.getClassAllowList();
}
@Override
public ClassAllowList getClassAllowList() {
return cm.getClassAllowList();
}
@Override
public <K, V> Cache<K, V> createCache(String name, Configuration configuration) {
return cm.createCache(name, configuration);
}
@Override
public <K, V> Cache<K, V> getCache(String cacheName, boolean createIfAbsent) {
return cm.getCache(cacheName, createIfAbsent);
}
@Override
public EmbeddedCacheManager startCaches(String... cacheNames) {
return cm.startCaches(cacheNames);
}
@Override
public void removeCache(String cacheName) {
cm.removeCache(cacheName);
}
@Override
public Transport getTransport() {
return cm.getTransport();
}
@Override
public <K, V> Cache<K, V> getCache() {
return cm.getCache();
}
@Override
public <K, V> Cache<K, V> getCache(String cacheName) {
return cm.getCache(cacheName);
}
@Override
public void start() {
cm.start();
}
@Override
public void stop() {
cm.stop();
}
@Override
public GlobalComponentRegistry getGlobalComponentRegistry() {
return cm.getGlobalComponentRegistry();
}
@Override
public void addCacheDependency(String from, String to) {
cm.addCacheDependency(from, to);
}
@Override
public void addListener(Object listener) {
cm.addListener(listener);
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
return cm.addListenerAsync(listener);
}
@Override
public void removeListener(Object listener) {
cm.removeListener(listener);
}
@Override
public CompletionStage<Void> removeListenerAsync(Object listener) {
return cm.removeListenerAsync(listener);
}
@Deprecated
@Override
public Set<Object> getListeners() {
return cm.getListeners();
}
@Override
public CacheContainerStats getStats() {
return cm.getStats();
}
@Override
public void close() throws IOException {
cm.close();
}
@Override
public EmbeddedCacheManager withSubject(Subject subject) {
return cm.withSubject(subject);
}
@Override
public Subject getSubject() {
return cm.getSubject();
}
}
| 6,702
| 24.104869
| 144
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/ReplicableRunnableCommand.java
|
package org.infinispan.manager.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.GlobalRpcCommand;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.commons.util.concurrent.CompletableFutures;
/**
* Replicable Command that runs the given Runnable
*
* @author wburns
* @since 8.2
*/
public class ReplicableRunnableCommand implements GlobalRpcCommand {
public static final byte COMMAND_ID = 59;
private Runnable runnable;
public ReplicableRunnableCommand() {
}
public ReplicableRunnableCommand(Runnable runnable) {
this.runnable = runnable;
}
@Override
public CompletionStage<?> invokeAsync(GlobalComponentRegistry globalComponentRegistry) throws Throwable {
runnable.run();
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
runnable = (Runnable) input.readObject();
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeObject(runnable);
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public boolean canBlock() {
// These commands can be arbitrary user commands - so be careful about them blocking
return true;
}
}
| 1,518
| 23.111111
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/AbstractClusterExecutor.java
|
package org.infinispan.manager.impl;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.infinispan.manager.ClusterExecutionPolicy;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.TopologyAwareAddress;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.util.logging.Log;
/**
* Abstract executor that contains code that should be shared by all
* @author wburns
* @since 9.0
*/
abstract class AbstractClusterExecutor<T extends ClusterExecutor> extends LocalClusterExecutor {
protected final Transport transport;
protected final Address me;
AbstractClusterExecutor(Predicate<? super Address> predicate, EmbeddedCacheManager manager,
Transport transport, long time, TimeUnit unit, Executor localExecutor,
ScheduledExecutorService timeoutExecutor) {
super(predicate, manager, localExecutor, time, unit, timeoutExecutor);
this.transport = transport;
this.me = Objects.requireNonNull(transport.getAddress(),
"Transport was not started before retrieving a ClusterExecutor!");
}
protected abstract T sameClusterExecutor(Predicate<? super Address> predicate,
long time, TimeUnit unit);
protected abstract Log getLog();
@Override
Address getMyAddress() {
return me;
}
void consumeResponse(Response resp, Address target, Consumer<? super Throwable> throwableEater) {
consumeResponse(resp, target, o -> {}, throwableEater);
}
void consumeResponse(Response resp, Address target, Consumer<Object> resultsEater,
Consumer<? super Throwable> throwableEater) {
if (resp != null) {
if (resp instanceof ExceptionResponse) {
Exception exception = ((ExceptionResponse) resp).getException();
// Non-exceptions are wrapped in CacheExceptions on the remote node,
// but we don't treat them specially here
throwableEater.accept(exception);
} else if (resp instanceof SuccessfulResponse) {
resultsEater.accept(((SuccessfulResponse) resp).getResponseValue());
} else if (resp instanceof CacheNotFoundResponse) {
throwableEater.accept(getLog().remoteNodeSuspected(target));
} else {
throwableEater.accept(new IllegalStateException("Response was neither successful or an exception!"));
}
} else {
resultsEater.accept(null);
}
}
/**
* @param includeMe whether or not the list returned should contain the address for the local node
* @return the targets we should use for JGroups. This excludes the local node if it is a target.
*/
List<Address> getRealTargets(boolean includeMe) {
List<Address> list;
List<Address> ispnMembers = transport.getMembers();
int size = ispnMembers.size();
if (size == 0) {
list = Collections.emptyList();
} else {
if (predicate == null) {
if (size == 1) {
Address member = ispnMembers.get(0);
if (!includeMe && member.equals(me)) {
list = Collections.emptyList();
} else {
list = Collections.singletonList(member);
}
} else {
list = (includeMe ? ispnMembers.stream() : ispnMembers.stream().filter(a -> !a.equals(me)))
.collect(Collectors.toList());
}
} else {
list = (includeMe ? ispnMembers.stream() : ispnMembers.stream().filter(a -> !a.equals(me)))
.filter(predicate)
.collect(Collectors.toList());
}
}
return list;
}
@Override
public T filterTargets(Predicate<? super Address> predicate) {
return sameClusterExecutor(predicate, time, unit);
}
@Override
public T filterTargets(ClusterExecutionPolicy policy) throws IllegalStateException {
if (!manager.getCacheManagerConfiguration().transport().hasTopologyInfo()) {
throw new IllegalStateException("Topology information is not available!");
}
return sameClusterExecutor(a -> policy.include((TopologyAwareAddress) me,
(TopologyAwareAddress) a), time, unit);
}
@Override
public T filterTargets(ClusterExecutionPolicy policy, Predicate<? super Address> predicate) throws IllegalStateException {
if (!manager.getCacheManagerConfiguration().transport().hasTopologyInfo()) {
throw new IllegalStateException();
}
return sameClusterExecutor(a -> policy.include((TopologyAwareAddress) me,
(TopologyAwareAddress) a) && predicate.test(a), time, unit);
}
@Override
public T filterTargets(Collection<Address> addresses) {
return filterTargets(addresses::contains);
}
@Override
public T noFilter() {
if (predicate == null) {
return (T) this;
}
return sameClusterExecutor(null, time, unit);
}
}
| 5,624
| 37.265306
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/LocalClusterExecutor.java
|
package org.infinispan.manager.impl;
import java.util.Collection;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Predicate;
import javax.security.auth.Subject;
import org.infinispan.manager.ClusterExecutionPolicy;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.security.Security;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.function.TriConsumer;
/**
* @author wburns
* @since 9.0
*/
class LocalClusterExecutor implements ClusterExecutor {
protected final Predicate<? super Address> predicate;
protected final EmbeddedCacheManager manager;
protected final long time;
protected final TimeUnit unit;
protected final Executor localExecutor;
protected final ScheduledExecutorService timeoutExecutor;
LocalClusterExecutor(Predicate<? super Address> predicate, EmbeddedCacheManager manager, Executor localExecutor,
long time, TimeUnit unit, ScheduledExecutorService timeoutExecutor) {
this.predicate = predicate;
this.manager = new UnwrappingEmbeddedCacheManager(Objects.requireNonNull(manager));
this.localExecutor = Objects.requireNonNull(localExecutor);
if (time <= 0) {
throw new IllegalArgumentException("time must be greater than 0");
}
this.time = time;
this.unit = Objects.requireNonNull(unit);
this.timeoutExecutor = Objects.requireNonNull(timeoutExecutor);
}
Address getMyAddress() {
return null;
}
@Override
public void execute(Runnable command) {
// We ignore time out since user can't ever even respond to it, so no reason to create extra fluff
localExecutor.execute(command);
}
@Override
public CompletableFuture<Void> submit(Runnable command) {
CompletableFuture<Void> future = new CompletableFuture<>();
localExecutor.execute(() -> {
try {
command.run();
future.complete(null);
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
ScheduledFuture<Boolean> scheduledFuture = timeoutExecutor.schedule(
() -> future.completeExceptionally(new TimeoutException()), time, unit);
future.whenComplete((v, t) -> scheduledFuture.cancel(true));
return future;
}
@Override
public <V> CompletableFuture<Void> submitConsumer(Function<? super EmbeddedCacheManager, ? extends V> callable,
TriConsumer<? super Address, ? super V, ? super Throwable> triConsumer) {
CompletableFuture<Void> future = new CompletableFuture<>();
localInvocation(callable).whenComplete((r, t) -> {
try {
triConsumer.accept(getMyAddress(), r, t);
future.complete(null);
} catch (Throwable throwable) {
future.completeExceptionally(throwable);
}
});
ScheduledFuture<Boolean> scheduledFuture = timeoutExecutor.schedule(() ->
future.completeExceptionally(new TimeoutException()), time, unit);
future.whenComplete((v, t) -> scheduledFuture.cancel(true));
return future;
}
<T> CompletableFuture<T> localInvocation(Function<? super EmbeddedCacheManager, ? extends T> function) {
CompletableFuture<T> future = new CompletableFuture<>();
Subject subject = Security.getSubject();
localExecutor.execute(() -> {
try {
T result = Security.doAs(subject, function, manager);
future.complete(result);
} catch (Throwable t) {
future.completeExceptionally(t);
}
});
return future;
}
protected ClusterExecutor sameClusterExecutor(Predicate<? super Address> predicate,
long time, TimeUnit unit) {
return new LocalClusterExecutor(predicate, manager, localExecutor, time, unit, timeoutExecutor);
}
@Override
public ClusterExecutor timeout(long time, TimeUnit unit) {
if (time <= 0) {
throw new IllegalArgumentException("Time must be greater than 0!");
}
Objects.requireNonNull(unit, "TimeUnit must be non null!");
if (this.time == time && this.unit == unit) {
return this;
}
return sameClusterExecutor(predicate, time, unit);
}
@Override
public ClusterExecutor filterTargets(Predicate<? super Address> predicate) {
return sameClusterExecutor(predicate, time, unit);
}
@Override
public ClusterExecutor filterTargets(ClusterExecutionPolicy policy) throws IllegalStateException {
throw new IllegalStateException();
}
@Override
public ClusterExecutor filterTargets(ClusterExecutionPolicy policy, Predicate<? super Address> predicate) throws IllegalStateException {
throw new IllegalStateException();
}
@Override
public ClusterExecutor filterTargets(Collection<Address> addresses) {
return filterTargets(addresses::contains);
}
@Override
public ClusterExecutor noFilter() {
if (predicate == null) {
return this;
}
return sameClusterExecutor(null, time, unit);
}
@Override
public ClusterExecutor singleNodeSubmission() {
return this;
}
@Override
public ClusterExecutor singleNodeSubmission(int failOverCount) {
return new FailOverClusterExecutor(this, failOverCount);
}
@Override
public ClusterExecutor allNodeSubmission() {
return this;
}
}
| 5,738
| 33.365269
| 139
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/AllClusterExecutor.java
|
package org.infinispan.manager.impl;
import static org.infinispan.commons.util.concurrent.CompletableFutures.asCompletionException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.impl.PassthroughMapResponseCollector;
import org.infinispan.remoting.transport.impl.PassthroughSingleResponseCollector;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.security.Security;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.function.TriConsumer;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Cluster executor implementation that sends a request to all available nodes
*
* @author wburns
* @since 8.2
*/
class AllClusterExecutor extends AbstractClusterExecutor<AllClusterExecutor> {
private static final Log log = LogFactory.getLog(AllClusterExecutor.class);
AllClusterExecutor(Predicate<? super Address> predicate, EmbeddedCacheManager manager,
Transport transport, long time, TimeUnit unit, Executor localExecutor,
ScheduledExecutorService timeoutExecutor) {
super(predicate, manager, transport, time, unit, localExecutor, timeoutExecutor);
}
@Override
public Log getLog() {
return log;
}
@Override
protected AllClusterExecutor sameClusterExecutor(Predicate<? super Address> predicate, long time, TimeUnit unit) {
return new AllClusterExecutor(predicate, manager, transport, time, unit, localExecutor, timeoutExecutor);
}
private <T> CompletableFuture<Void> startLocalInvocation(Function<? super EmbeddedCacheManager, ? extends T> callable,
TriConsumer<? super Address, ? super T, ? super Throwable> triConsumer) {
if (me == null || predicate == null || predicate.test(me)) {
if (log.isTraceEnabled()) {
log.trace("Submitting callable to local node on executor thread! - Usually remote command thread pool");
}
return super.submitConsumer(callable, triConsumer);
} else {
return null;
}
}
protected CompletableFuture<Void> startLocalInvocation(Runnable runnable) {
if (me == null || predicate == null || predicate.test(me)) {
if (log.isTraceEnabled()) {
log.trace("Submitting runnable to local node on executor thread! - Usually remote command thread pool");
}
return super.submit(runnable);
} else {
return null;
}
}
@Override
public void execute(Runnable runnable) {
executeRunnable(runnable);
}
private CompletableFuture<?> executeRunnable(Runnable runnable) {
CompletableFuture<?> localFuture = startLocalInvocation(runnable);
List<Address> targets = getRealTargets(false);
int size = targets.size();
CompletableFuture<?> remoteFuture;
if (size == 1) {
Address target = targets.get(0);
if (log.isTraceEnabled()) {
log.tracef("Submitting runnable to single remote node - JGroups Address %s", target);
}
remoteFuture = new CompletableFuture<>();
ReplicableCommand command = new ReplicableRunnableCommand(runnable);
CompletionStage<Response> request = transport.invokeCommand(target, command, PassthroughSingleResponseCollector.INSTANCE, DeliverOrder.NONE, time, unit);
request.handle((r, t) -> {
if (t != null) {
remoteFuture.completeExceptionally(t);
} else {
consumeResponse(r, target, remoteFuture::completeExceptionally);
// This won't override exception if there was one
remoteFuture.complete(null);
}
return null;
});
} else if (size > 1) {
remoteFuture = new CompletableFuture<>();
ReplicableCommand command = new ReplicableRunnableCommand(runnable);
ResponseCollector<Map<Address, Response>> collector = new PassthroughMapResponseCollector(targets.size());
CompletionStage<Map<Address, Response>> request = transport.invokeCommand(targets, command, collector, DeliverOrder.NONE, time, unit);
request.handle((r, t) -> {
if (t != null) {
remoteFuture.completeExceptionally(t);
} else {
r.forEach((key, value) -> consumeResponse(value, key, remoteFuture::completeExceptionally));
remoteFuture.complete(null);
}
return null;
});
} else if (localFuture != null) {
return localFuture;
} else {
return CompletableFuture.failedFuture(new SuspectException("No available nodes!"));
}
// remoteFuture is guaranteed to be non null at this point
if (localFuture != null) {
CompletableFuture<Void> future = new CompletableFuture<>();
CompletableFuture.allOf(localFuture, remoteFuture).whenComplete((v, t) -> {
if (t != null) {
if (t instanceof CompletionException) {
future.completeExceptionally(t.getCause());
} else {
future.completeExceptionally(t);
}
} else {
future.complete(null);
}
});
return future;
}
return remoteFuture;
}
@Override
public CompletableFuture<Void> submit(Runnable command) {
CompletableFuture<Void> future = new CompletableFuture<>();
executeRunnable(command).handle((r, t) -> {
if (t != null) {
future.completeExceptionally(t);
}
future.complete(null);
return null;
});
return future;
}
@Override
public <V> CompletableFuture<Void> submitConsumer(Function<? super EmbeddedCacheManager, ? extends V> function,
TriConsumer<? super Address, ? super V, ? super Throwable> triConsumer) {
CompletableFuture<Void> localFuture = startLocalInvocation(function, triConsumer);
List<Address> targets = getRealTargets(false);
int size = targets.size();
if (size > 0) {
CompletableFuture<?>[] futures;
if (localFuture != null) {
futures = new CompletableFuture[size + 1];
futures[size] = localFuture;
} else {
futures = new CompletableFuture[size];
}
for (int i = 0; i < size; ++i) {
Address target = targets.get(i);
if (log.isTraceEnabled()) {
log.tracef("Submitting consumer to single remote node - address=%s, subject=%s", target, Security.getSubject());
}
ReplicableCommand command = new ReplicableManagerFunctionCommand(function, Security.getSubject());
CompletionStage<Response> request = transport.invokeCommand(target, command, PassthroughSingleResponseCollector.INSTANCE, DeliverOrder.NONE, time, unit);
futures[i] = request.toCompletableFuture().whenComplete((r, t) -> {
if (t != null) {
if (t instanceof TimeoutException) {
// Consumers for individual nodes should not be able to obscure the timeout
throw asCompletionException(t);
} else {
triConsumer.accept(target, null, t);
}
} else {
consumeResponse(r, target, v -> triConsumer.accept(target, (V) v, null),
throwable -> triConsumer.accept(target, null, throwable));
}
});
}
CompletableFuture<Void> resultFuture = new CompletableFuture<>();
CompletableFuture<Void> allFuture = CompletableFuture.allOf(futures);
allFuture.whenComplete((v, t) -> {
if (t != null) {
if (t instanceof CompletionException) {
resultFuture.completeExceptionally(t.getCause());
} else {
resultFuture.completeExceptionally(t);
}
} else {
resultFuture.complete(null);
}
});
return resultFuture;
} else if (localFuture != null) {
return localFuture;
} else {
return CompletableFuture.failedFuture(new SuspectException("No available nodes!"));
}
}
@Override
public ClusterExecutor singleNodeSubmission() {
return ClusterExecutors.singleNodeSubmissionExecutor(predicate, manager, transport, time, unit, localExecutor,
timeoutExecutor, 0);
}
@Override
public ClusterExecutor singleNodeSubmission(int failOverCount) {
return ClusterExecutors.singleNodeSubmissionExecutor(predicate, manager, transport, time, unit, localExecutor,
timeoutExecutor, failOverCount);
}
@Override
public ClusterExecutor allNodeSubmission() {
return this;
}
}
| 9,702
| 40.289362
| 165
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/ClusterExecutors.java
|
package org.infinispan.manager.impl;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
/**
* Static factory methods to construct a ClusterExecutor
* @author wburns
* @since 9.0
*/
public class ClusterExecutors {
private ClusterExecutors() { }
public static ClusterExecutor allSubmissionExecutor(Predicate<? super Address> predicate, EmbeddedCacheManager manager,
Transport transport, long time, TimeUnit unit, Executor localExecutor, ScheduledExecutorService timeoutExecutor) {
if (transport == null) {
return new LocalClusterExecutor(predicate, manager, localExecutor, time, unit, timeoutExecutor);
}
return new AllClusterExecutor(predicate, manager, transport, time, unit, localExecutor, timeoutExecutor);
}
public static ClusterExecutor singleNodeSubmissionExecutor(Predicate<? super Address> predicate, EmbeddedCacheManager manager,
Transport transport, long time, TimeUnit unit, Executor localExecutor, ScheduledExecutorService timeoutExecutor,
int failOverCount) {
if (failOverCount < 0) {
throw new IllegalArgumentException("Failover count must be 0 or greater");
}
ClusterExecutor executor;
if (transport == null) {
executor = new LocalClusterExecutor(predicate, manager, localExecutor, time, unit, timeoutExecutor);
} else {
executor = new SingleClusterExecutor(predicate, manager, transport, time, unit, localExecutor, timeoutExecutor);
}
if (failOverCount == 0) {
return executor;
}
return new FailOverClusterExecutor(executor, failOverCount);
}
}
| 1,945
| 40.404255
| 129
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/SingleClusterExecutor.java
|
package org.infinispan.manager.impl;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.impl.PassthroughSingleResponseCollector;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.security.Security;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.function.TriConsumer;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Cluster executor implementation that sends requests to a single node at a time
*
* @author wburns
* @since 9.1
*/
class SingleClusterExecutor extends AbstractClusterExecutor<SingleClusterExecutor> {
private static final Log log = LogFactory.getLog(SingleClusterExecutor.class);
SingleClusterExecutor(Predicate<? super Address> predicate, EmbeddedCacheManager manager,
Transport transport, long time, TimeUnit unit, Executor localExecutor,
ScheduledExecutorService timeoutExecutor) {
super(predicate, manager, transport, time, unit, localExecutor, timeoutExecutor);
}
@Override
public Log getLog() {
return log;
}
@Override
protected SingleClusterExecutor sameClusterExecutor(Predicate<? super Address> predicate, long time, TimeUnit unit) {
return new SingleClusterExecutor(predicate, manager, transport, time, unit, localExecutor, timeoutExecutor);
}
private Address findTarget() {
List<Address> possibleTargets = getRealTargets(true);
Address target;
int size = possibleTargets.size();
if (size == 0) {
target = null;
} else if (size == 1) {
target = possibleTargets.get(0);
} else {
target = possibleTargets.get(ThreadLocalRandom.current().nextInt(size));
}
return target;
}
@Override
public void execute(Runnable runnable) {
Address target = findTarget();
if (target != null) {
if (log.isTraceEnabled()) {
log.tracef("Submitting runnable to single remote node - JGroups Address %s", target);
}
if (target == me) {
// Interrupt does nothing
super.execute(runnable);
} else {
try {
ReplicableCommand command = new ReplicableRunnableCommand(runnable);
transport.sendTo(target, command, DeliverOrder.NONE);
} catch (Exception e) {
throw new CacheException(e);
}
}
}
}
@Override
public CompletableFuture<Void> submit(Runnable runnable) {
Address target = findTarget();
if (target == null) {
return CompletableFuture.failedFuture(new SuspectException("No available nodes!"));
}
if (log.isTraceEnabled()) {
log.tracef("Submitting runnable to single remote node - JGroups Address %s", target);
}
CompletableFuture<Void> future = new CompletableFuture<>();
if (target == me) {
return super.submit(runnable);
} else {
ReplicableCommand command = new ReplicableRunnableCommand(runnable);
CompletionStage<Response> request =
transport.invokeCommand(target, command, PassthroughSingleResponseCollector.INSTANCE, DeliverOrder.NONE,
time, unit);
request.whenComplete((r, t) -> {
if (t != null) {
future.completeExceptionally(t);
} else {
consumeResponse(r, target, future::completeExceptionally);
future.complete(null);
}
});
}
return future;
}
@Override
public <V> CompletableFuture<Void> submitConsumer(Function<? super EmbeddedCacheManager, ? extends V> function,
TriConsumer<? super Address, ? super V, ? super Throwable> triConsumer) {
Address target = findTarget();
if (target == null) {
return CompletableFuture.failedFuture(new SuspectException("No available nodes!"));
}
if (log.isTraceEnabled()) {
log.tracef("Submitting runnable to single remote node - JGroups Address %s", target);
}
if (target == me) {
return super.submitConsumer(function, triConsumer);
} else {
CompletableFuture<Void> future = new CompletableFuture<>();
ReplicableCommand command = new ReplicableManagerFunctionCommand(function, Security.getSubject());
CompletionStage<Response> request =
transport.invokeCommand(target, command, PassthroughSingleResponseCollector.INSTANCE, DeliverOrder.NONE,
time, unit);
request.whenComplete((r, t) -> {
try {
if (t != null) {
if (t instanceof TimeoutException) {
// Consumers for individual nodes should not be able to obscure the timeout
future.completeExceptionally(getLog().remoteNodeTimedOut(target, time, unit));
} else {
triConsumer.accept(target, null, t);
}
} else {
consumeResponse(r, target, v -> triConsumer.accept(target, (V) v, null),
throwable -> triConsumer.accept(target, null, throwable));
}
future.complete(null);
} catch (Throwable throwable) {
future.completeExceptionally(throwable);
}
});
return future;
}
}
@Override
public ClusterExecutor singleNodeSubmission() {
return this;
}
@Override
public ClusterExecutor singleNodeSubmission(int failOverCount) {
if (failOverCount == 0) {
return this;
}
return ClusterExecutors.singleNodeSubmissionExecutor(predicate, manager, transport, time, unit, localExecutor,
timeoutExecutor, failOverCount);
}
@Override
public ClusterExecutor allNodeSubmission() {
return ClusterExecutors.allSubmissionExecutor(predicate, manager, transport, time, unit, localExecutor,
timeoutExecutor);
}
}
| 6,821
| 37.111732
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/manager/impl/ReplicableManagerFunctionCommand.java
|
package org.infinispan.manager.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import javax.security.auth.Subject;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.Security;
/**
* Replicable Command that runs the given Function passing the {@link EmbeddedCacheManager} as an argument
*
* @author wburns
* @since 8.2
*/
@Scope(Scopes.NONE)
public class ReplicableManagerFunctionCommand implements ReplicableCommand {
public static final byte COMMAND_ID = 60;
private Function<? super EmbeddedCacheManager, ?> function;
private Subject subject;
@Inject EmbeddedCacheManager manager;
public ReplicableManagerFunctionCommand() {
}
public ReplicableManagerFunctionCommand(Function<? super EmbeddedCacheManager, ?> function, Subject subject) {
this.function = function;
this.subject = subject;
}
@Override
public CompletableFuture<Object> invokeAsync() throws Throwable {
if (subject == null) {
return CompletableFuture.completedFuture(function.apply(new UnwrappingEmbeddedCacheManager(manager)));
} else {
return CompletableFuture.completedFuture(Security.doAs(subject, function, new UnwrappingEmbeddedCacheManager(manager)));
}
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
function = (Function<? super EmbeddedCacheManager, ?>) input.readObject();
subject = (Subject) input.readObject();
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeObject(function);
output.writeObject(subject);
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public boolean canBlock() {
// Note that it is highly possible that a user command could block, and some internal Infinispan ones already do
// This should be remedied with https://issues.redhat.com/browse/ISPN-11482
return false;
}
}
| 2,397
| 28.975
| 129
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/package-info.java
|
/**
* Cache stream processing.
*
* @api.public
*/
package org.infinispan.stream;
| 85
| 11.285714
| 30
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/CacheCollectors.java
|
package org.infinispan.stream;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collector;
import org.infinispan.commons.marshall.Externalizer;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.util.function.SerializableSupplier;
/**
* Helper class designed to be used to create a serializable Collector for use with
* {@link org.infinispan.CacheStream#collect(Collector)} from a supplier of a collector. The problem is that the
* standard {@link java.util.stream.Collectors} class doesn't provide Serializable Collectors and no way to extend
* their functionality, so this class is used instead.
*/
public class CacheCollectors {
private CacheCollectors() { }
/**
* Creates a collector that is serializable and will upon usage create a collector using the serializable supplier
* provided by the user.
* @param supplier The supplier to crate the collector that is specifically serializable
* @param <T> The input type of the collector
* @param <R> The resulting type of the collector
* @return the collector which is serializable
* @see SerializableSupplier
*/
public static <T, R> Collector<T, ?, R> serializableCollector(SerializableSupplier<Collector<T, ?, R>> supplier) {
return new CollectorSupplier<>(supplier);
}
/**
* Similar to {@link CacheCollectors#serializableCollector(SerializableSupplier)} except that the supplier provided
* must be marshable through ISPN marshalling techniques. Note this is not detected until runtime.
* @param supplier The marshallable supplier of collectors
* @param <T> The input type of the collector
* @param <R> The resulting type of the collector
* @return the collector which is serializable
* @see Externalizer
* @see org.infinispan.commons.marshall.AdvancedExternalizer
*/
public static <T, R> Collector<T, ?, R> collector(Supplier<Collector<T, ?, R>> supplier) {
return new CollectorSupplier<>(supplier);
}
@SerializeWith(value = CollectorSupplier.CollectorSupplierExternalizer.class)
private static final class CollectorSupplier<T, R> implements Collector<T, Object, R> {
private final Supplier<Collector<T, ?, R>> supplier;
private transient Collector<T, Object, R> collector;
private Collector<T, Object, R> getCollector() {
if (collector == null) {
collector = (Collector<T, Object, R>) supplier.get();
}
return collector;
}
CollectorSupplier(Supplier<Collector<T, ?, R>> supplier) {
this.supplier = supplier;
}
@Override
public Supplier<Object> supplier() {
return getCollector().supplier();
}
@Override
public BiConsumer<Object, T> accumulator() {
return getCollector().accumulator();
}
@Override
public BinaryOperator<Object> combiner() {
return getCollector().combiner();
}
@Override
public Function<Object, R> finisher() {
return getCollector().finisher();
}
@Override
public Set<Characteristics> characteristics() {
return getCollector().characteristics();
}
public static final class CollectorSupplierExternalizer implements Externalizer<CollectorSupplier<?, ?>> {
@Override
public void writeObject(ObjectOutput output, CollectorSupplier object) throws IOException {
output.writeObject(object.supplier);
}
@Override
public CollectorSupplier readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new CollectorSupplier((Supplier<Collector>) input.readObject());
}
}
}
}
| 3,956
| 35.638889
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/StreamMarshalling.java
|
package org.infinispan.stream;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.marshall.core.Ids;
/**
* Static factory class containing methods that will provide marshallable instances for very common use cases.
* Every instance returned from the various static methods uses the Infinispan marshalling to reduce payload sizes
* considerably and should be used whenever possible.
*/
public class StreamMarshalling {
private StreamMarshalling() { }
/**
* Provides a predicate that returns true when the object is equal.
* @param object the instance to test equality on
* @return the predicate
*/
public static Predicate<Object> equalityPredicate(Object object) {
return new EqualityPredicate(object);
}
/**
* Predicate that returns true if the object passed to it is not null.
* @return the predicate
*/
public static Predicate<Object> nonNullPredicate() {
return NonNullPredicate.getInstance();
}
/**
* Predicate taht always returns true irrespective of the value provided
* @return the predicate
*/
public static Predicate<Object> alwaysTruePredicate() {
return AlwaysTruePredicate.getInstance();
}
/**
* Provides a function that returns the key of the entry when invoked.
* @param <K> key type of the entry
* @param <V> value type of the entry
* @return a function that when applied to a given entry will return the key
*/
public static <K, V> Function<Map.Entry<K, V>, K> entryToKeyFunction() {
return EntryToKeyFunction.getInstance();
}
/**
* Provides a function that returns the value of the entry when invoked.
* @param <K> key type of the entry
* @param <V> value type of the entry
* @return a function that when applied to a given entry will return the value
*/
public static <K, V> Function<Map.Entry<K, V>, V> entryToValueFunction() {
return EntryToValueFunction.getInstance();
}
/**
* Same as {@link Function#identity()} except that this instance is also able to be marshalled by Infinispan.
* @param <T> any type
* @return function that just returns the provided value
*/
public static <T> Function<T, T> identity() {
return IdentityFunction.getInstance();
}
/**
* Provides a function that given a key will return the {@link CacheEntry} that maps to this
* key. This function only works when used with a {@link org.infinispan.CacheStream} returned
* from the desired {@link Cache}. The entry will be read from the <b>Cache</b> of which the
* <b>CacheStream</b> was created from.
* @param <K> the key type
* @param <V> the expected value type of the entry
* @return a function that when applied returns the entry for the given key
*/
public static <K, V> Function<K, CacheEntry<K, V>> keyToEntryFunction() {
return new KeyToEntryFunction<>();
}
private static final class EqualityPredicate implements Predicate<Object> {
private final Object object;
private EqualityPredicate(Object object) {
Objects.nonNull(object);
this.object = object;
}
@Override
public boolean test(Object t) {
return object.equals(t);
}
}
private static final class NonNullPredicate implements Predicate<Object> {
private static final NonNullPredicate INSTANCE = new NonNullPredicate();
public static NonNullPredicate getInstance() {
return INSTANCE;
}
@Override
public boolean test(Object t) {
return t != null;
}
}
private static final class AlwaysTruePredicate implements Predicate<Object> {
private static final AlwaysTruePredicate INSTANCE = new AlwaysTruePredicate();
public static AlwaysTruePredicate getInstance() {
return INSTANCE;
}
@Override
public boolean test(Object t) {
return true;
}
}
private static final class EntryToKeyFunction<K, V> implements Function<Map.Entry<K, V>, K> {
private static final EntryToKeyFunction<?, ?> FUNCTION = new EntryToKeyFunction<>();
public static <K, V> EntryToKeyFunction<K, V> getInstance() {
return (EntryToKeyFunction<K, V>) FUNCTION;
}
@Override
public K apply(Map.Entry<K, V> kvEntry) {
return kvEntry.getKey();
}
}
private static final class EntryToValueFunction<K, V> implements Function<Map.Entry<K, V>, V> {
private static final EntryToValueFunction<?, ?> FUNCTION = new EntryToValueFunction<>();
public static <K, V> EntryToValueFunction<K, V> getInstance() {
return (EntryToValueFunction<K, V>) FUNCTION;
}
@Override
public V apply(Map.Entry<K, V> kvEntry) {
return kvEntry.getValue();
}
}
@Scope(Scopes.NONE)
static final class KeyToEntryFunction<K, V> implements Function<K, CacheEntry<K, V>> {
@Inject AdvancedCache<K, V> advancedCache;
@Override
public CacheEntry<K, V> apply(K k) {
return advancedCache.getCacheEntry(k);
}
}
private static final class IdentityFunction<T> implements Function<T, T> {
private static final IdentityFunction<?> FUNCTION = new IdentityFunction<>();
public static <T> IdentityFunction<T> getInstance() {
return (IdentityFunction<T>) FUNCTION;
}
@Override
public T apply(T t) {
return t;
}
}
public static final class StreamMarshallingExternalizer implements AdvancedExternalizer<Object> {
enum ExternalizerId {
EQUALITY_PREDICATE(EqualityPredicate.class),
ENTRY_KEY_FUNCTION(EntryToKeyFunction.class),
ENTRY_VALUE_FUNCTION(EntryToValueFunction.class),
NON_NULL_PREDICATE(NonNullPredicate.class),
ALWAYS_TRUE_PREDICATE(AlwaysTruePredicate.class),
KEY_ENTRY_FUNCTION(KeyToEntryFunction.class),
IDENTITY_FUNCTION(IdentityFunction.class),
;
private final Class<? extends Object> marshalledClass;
ExternalizerId(Class<? extends Object> marshalledClass) {
this.marshalledClass = marshalledClass;
}
}
private final Map<Class<? extends Object>, ExternalizerId> objects = new HashMap<>();
public StreamMarshallingExternalizer() {
for (ExternalizerId id : ExternalizerId.values()) {
objects.put(id.marshalledClass, id);
}
}
@Override
public Set<Class<?>> getTypeClasses() {
return Util.<Class<? extends Object>>asSet(EqualityPredicate.class, EntryToKeyFunction.class,
EntryToValueFunction.class, NonNullPredicate.class, AlwaysTruePredicate.class,
KeyToEntryFunction.class, IdentityFunction.class);
}
@Override
public Integer getId() {
return Ids.STREAM_MARSHALLING;
}
@Override
public void writeObject(ObjectOutput output, Object object) throws IOException {
ExternalizerId id = objects.get(object.getClass());
if (id == null) {
throw new IllegalArgumentException("Unsupported class " + object.getClass() + " was provided!");
}
output.writeByte(id.ordinal());
switch (id) {
case EQUALITY_PREDICATE:
output.writeObject(((EqualityPredicate) object).object);
break;
}
}
@Override
public Object readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int number = input.readUnsignedByte();
ExternalizerId[] ids = ExternalizerId.values();
if (number < 0 || number >= ids.length) {
throw new IllegalArgumentException("Found invalid number " + number);
}
ExternalizerId id = ids[number];
switch (id) {
case EQUALITY_PREDICATE:
return new EqualityPredicate(input.readObject());
case ENTRY_KEY_FUNCTION:
return EntryToKeyFunction.getInstance();
case ENTRY_VALUE_FUNCTION:
return EntryToValueFunction.getInstance();
case NON_NULL_PREDICATE:
return NonNullPredicate.getInstance();
case ALWAYS_TRUE_PREDICATE:
return AlwaysTruePredicate.getInstance();
case KEY_ENTRY_FUNCTION:
return new KeyToEntryFunction<>();
case IDENTITY_FUNCTION:
return IdentityFunction.getInstance();
default:
throw new IllegalArgumentException("ExternalizerId not supported: " + id);
}
}
}
}
| 9,218
| 33.657895
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/CacheAware.java
|
package org.infinispan.stream;
import org.infinispan.Cache;
/**
* Interface that describes how a cache can be injected into another object. This is useful for cases such as
* after an object is deserialized and you must inject a Cache into it.
* @since 8.1
*/
public interface CacheAware<K, V> {
/**
* Method that is invoked when a cache is to be injected.
* @param cache the cache instance tied to the object
*/
void injectCache(Cache<K, V> cache);
}
| 477
| 27.117647
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/SegmentAwareOperation.java
|
package org.infinispan.stream.impl;
/**
* Terminal stream operation that is aware of segments being lost. This interface describes a single callback method
* to be invoked on the operation when a segment is lost and it is concurrently running some operation.
* @since 8.0
*/
public interface SegmentAwareOperation {
/**
* This method will be invoked when the operation is known to be performing on a given set of segments
* and this node no longer owns 1 or many of them. Returns whether the lost segment affected the results or
* not. If stopIfLost is true then doneWithOperation will not be invoked as it would normally.
* @param allSegmentsLost argument to tell the operation that if this segment affects that it should not
* perform any more operations if possible as all segments have been lost.
* @return whether or not this operation was affected by the loss of segments
*/
boolean lostSegment(boolean allSegmentsLost);
}
| 989
| 51.105263
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/IntermediateCacheStream.java
|
package org.infinispan.stream.impl;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Optional;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.function.ToDoubleFunction;
import java.util.function.ToIntFunction;
import java.util.function.ToLongFunction;
import java.util.stream.Collector;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.BaseCacheStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.stream.impl.local.LocalCacheStream;
import org.infinispan.util.function.SerializableSupplier;
/**
* An intermediate cache stream used when an intermediate operation that requires both a remote and local portion
*/
public class IntermediateCacheStream<Original, R> implements CacheStream<R> {
private BaseCacheStream remoteStream;
private final IntermediateType type;
private LocalCacheStream<R> localStream;
private final IntermediateCacheStreamSupplier supplier;
public IntermediateCacheStream(DistributedCacheStream<Original, R> remoteStream) {
this.remoteStream = remoteStream;
this.type = IntermediateType.REF;
this.supplier = new IntermediateCacheStreamSupplier(type, remoteStream);
this.localStream = new LocalCacheStream<>(supplier, remoteStream.parallel,
remoteStream.registry);
}
public IntermediateCacheStream(BaseCacheStream remoteStream, IntermediateType type,
LocalCacheStream<R> localStream, IntermediateCacheStreamSupplier supplier) {
this.remoteStream = remoteStream;
this.type = type;
this.localStream = localStream;
this.supplier = supplier;
}
@Override
public CacheStream<R> sequentialDistribution() {
remoteStream = remoteStream.sequentialDistribution();
return this;
}
@Override
public CacheStream<R> parallelDistribution() {
remoteStream = remoteStream.parallelDistribution();
return this;
}
@Override
public CacheStream<R> filterKeySegments(Set<Integer> segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public CacheStream<R> filterKeySegments(IntSet segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public CacheStream<R> filterKeys(Set<?> keys) {
remoteStream = remoteStream.filterKeys(keys);
return this;
}
@Override
public CacheStream<R> distributedBatchSize(int batchSize) {
remoteStream = remoteStream.distributedBatchSize(batchSize);
return this;
}
@Override
public CacheStream<R> segmentCompletionListener(SegmentCompletionListener listener) {
throw new UnsupportedOperationException("Segment completion listener is only supported when no intermediate " +
"operation is provided (sorted, distinct, limit, skip)");
}
@Override
public CacheStream<R> disableRehashAware() {
remoteStream = remoteStream.disableRehashAware();
return this;
}
@Override
public CacheStream<R> timeout(long timeout, TimeUnit unit) {
remoteStream = remoteStream.timeout(timeout, unit);
return this;
}
@Override
public boolean isParallel() {
return localStream.isParallel();
}
@Override
public CacheStream<R> sorted() {
localStream = localStream.sorted();
return this;
}
@Override
public CacheStream<R> sorted(Comparator<? super R> comparator) {
localStream = localStream.sorted(comparator);
return this;
}
@Override
public CacheStream<R> limit(long maxSize) {
localStream = localStream.limit(maxSize);
return this;
}
@Override
public CacheStream<R> skip(long n) {
localStream = localStream.skip(n);
return this;
}
@Override
public CacheStream<R> peek(Consumer<? super R> action) {
localStream = localStream.peek(action);
return this;
}
@Override
public CacheStream<R> distinct() {
localStream = localStream.distinct();
return this;
}
@Override
public CacheStream<R> filter(Predicate<? super R> predicate) {
localStream = localStream.filter(predicate);
return this;
}
@Override
public <R1> CacheStream<R1> map(Function<? super R, ? extends R1> mapper) {
localStream = (LocalCacheStream<R>) localStream.map(mapper);
return (CacheStream<R1>) this;
}
@Override
public DoubleCacheStream mapToDouble(ToDoubleFunction<? super R> mapper) {
return new IntermediateDoubleCacheStream(remoteStream, type, localStream.mapToDouble(mapper), supplier);
}
@Override
public IntCacheStream mapToInt(ToIntFunction<? super R> mapper) {
return new IntermediateIntCacheStream(remoteStream, type, localStream.mapToInt(mapper), supplier);
}
@Override
public LongCacheStream mapToLong(ToLongFunction<? super R> mapper) {
return new IntermediateLongCacheStream(remoteStream, type, localStream.mapToLong(mapper), supplier);
}
@Override
public <R1> CacheStream<R1> flatMap(Function<? super R, ? extends Stream<? extends R1>> mapper) {
localStream = (LocalCacheStream<R>) localStream.flatMap(mapper);
return (CacheStream<R1>) this;
}
@Override
public DoubleCacheStream flatMapToDouble(Function<? super R, ? extends DoubleStream> mapper) {
return new IntermediateDoubleCacheStream(remoteStream, type, localStream.flatMapToDouble(mapper), supplier);
}
@Override
public IntCacheStream flatMapToInt(Function<? super R, ? extends IntStream> mapper) {
return new IntermediateIntCacheStream(remoteStream, type, localStream.flatMapToInt(mapper), supplier);
}
@Override
public LongCacheStream flatMapToLong(Function<? super R, ? extends LongStream> mapper) {
return new IntermediateLongCacheStream(remoteStream, type, localStream.flatMapToLong(mapper), supplier);
}
@Override
public CacheStream<R> parallel() {
remoteStream = (BaseCacheStream) remoteStream.parallel();
localStream = (LocalCacheStream) localStream.parallel();
return this;
}
@Override
public CacheStream<R> sequential() {
remoteStream = (BaseCacheStream) remoteStream.sequential();
localStream = (LocalCacheStream) localStream.sequential();
return this;
}
@Override
public CacheStream<R> unordered() {
localStream = (LocalCacheStream<R>) localStream.unordered();
return this;
}
@Override
public void forEach(Consumer<? super R> action) {
localStream.forEach(action);
}
@Override
public void forEachOrdered(Consumer<? super R> action) {
localStream.forEachOrdered(action);
}
@Override
public <K, V> void forEach(BiConsumer<Cache<K, V>, ? super R> action) {
localStream.forEach(action);
}
@Override
public R reduce(R identity, BinaryOperator<R> accumulator) {
return localStream.reduce(identity, accumulator);
}
@Override
public Optional<R> reduce(BinaryOperator<R> accumulator) {
return localStream.reduce(accumulator);
}
@Override
public <U> U reduce(U identity, BiFunction<U, ? super R, U> accumulator, BinaryOperator<U> combiner) {
return localStream.reduce(identity, accumulator, combiner);
}
@Override
public <R1, A> R1 collect(Collector<? super R, A, R1> collector) {
return localStream.collect(collector);
}
@Override
public <R1> R1 collect(SerializableSupplier<Collector<? super R, ?, R1>> supplier) {
return localStream.collect(supplier);
}
@Override
public <R1> R1 collect(Supplier<Collector<? super R, ?, R1>> supplier) {
return localStream.collect(supplier);
}
@Override
public <R1> R1 collect(Supplier<R1> supplier, BiConsumer<R1, ? super R> accumulator, BiConsumer<R1, R1> combiner) {
return localStream.collect(supplier, accumulator, combiner);
}
@Override
public Optional<R> max(Comparator<? super R> comparator) {
return localStream.max(comparator);
}
@Override
public Optional<R> min(Comparator<? super R> comparator) {
return localStream.min(comparator);
}
@Override
public long count() {
return localStream.count();
}
@Override
public boolean anyMatch(Predicate<? super R> predicate) {
return localStream.anyMatch(predicate);
}
@Override
public boolean allMatch(Predicate<? super R> predicate) {
return localStream.allMatch(predicate);
}
@Override
public boolean noneMatch(Predicate<? super R> predicate) {
return localStream.noneMatch(predicate);
}
@Override
public Optional<R> findFirst() {
return localStream.findFirst();
}
@Override
public Optional<R> findAny() {
return localStream.findAny();
}
@Override
public Iterator<R> iterator() {
return localStream.iterator();
}
@Override
public Spliterator<R> spliterator() {
return localStream.spliterator();
}
@Override
public Object[] toArray() {
return new Object[0];
}
@Override
public <A> A[] toArray(IntFunction<A[]> generator) {
return localStream.toArray(generator);
}
@Override
public CacheStream<R> onClose(Runnable closeHandler) {
remoteStream = (BaseCacheStream) remoteStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
localStream.close();
remoteStream.close();
}
}
| 10,086
| 28.069164
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/LockedStreamImpl.java
|
package org.infinispan.stream.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.LockedStream;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.util.EntryWrapper;
import org.infinispan.util.KeyValuePair;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.concurrent.locks.KeyAwareLockPromise;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.function.SerializablePredicate;
/**
* Lock Stream implementation that locks a key using the {@link LockManager} before and after executing the various
* code.
* <p>
* This implementation doesn't work properly when using an optimistic transactional cache. Care should be made to prevent
* that usage if possible.
* @author wburns
* @since 9.0
*/
public class LockedStreamImpl<K, V> implements LockedStream<K, V> {
final CacheStream<CacheEntry<K, V>> realStream;
final Predicate<? super CacheEntry<K, V>> predicate;
final long time;
final TimeUnit unit;
public LockedStreamImpl(CacheStream<CacheEntry<K, V>> realStream, long time, TimeUnit unit) {
this.realStream = Objects.requireNonNull(realStream);
this.predicate = null;
if (time <= 0) {
throw new IllegalArgumentException("time must be greater than 0");
}
this.time = time;
this.unit = Objects.requireNonNull(unit);
}
LockedStreamImpl(CacheStream<CacheEntry<K, V>> realStream, Predicate<? super CacheEntry<K, V>> predicate,
long time, TimeUnit unit) {
this.realStream = realStream;
this.predicate = predicate;
this.time = time;
this.unit = unit;
}
private LockedStream<K, V> newOrReuse(CacheStream<CacheEntry<K, V>> resultingStream) {
if (resultingStream == realStream) {
return this;
} else {
return newStream(resultingStream, predicate, time, unit);
}
}
/**
* Method to be overridden by a subclass so that chained methods return the correct implementation
* @param realStream the underlying stream
* @param predicate the predicate to use
* @param time the time value to use
* @param unit the new unit to use
* @return the lock stream to return
*/
LockedStreamImpl<K, V> newStream(CacheStream<CacheEntry<K, V>> realStream, Predicate<? super CacheEntry<K, V>> predicate,
long time, TimeUnit unit) {
return new LockedStreamImpl<>(realStream, predicate, time, unit);
}
@Override
public LockedStream<K, V> filter(final Predicate<? super CacheEntry<K, V>> predicate) {
Objects.nonNull(predicate);
Predicate<? super CacheEntry<K, V>> usedPredicate;
if (this.predicate != null) {
usedPredicate = (SerializablePredicate<? super CacheEntry<K, V>>) e -> this.predicate.test(e) && predicate.test(e);
} else {
usedPredicate = predicate;
}
return newStream(realStream, usedPredicate, time, unit);
}
@Override
public void forEach(BiConsumer<Cache<K, V>, ? super CacheEntry<K, V>> biConsumer) {
realStream.forEach(new CacheEntryConsumer<>(biConsumer, predicate));
}
@Override
public <R> Map<K, R> invokeAll(BiFunction<Cache<K, V>, ? super CacheEntry<K, V>, R> biFunction) {
Map<K, R> map = new HashMap<>();
Iterator<KeyValuePair<K, R>> iterator =
realStream.flatMap(new CacheEntryFunction<>(biFunction, predicate))
.iterator();
iterator.forEachRemaining(e -> map.put(e.getKey(), e.getValue()));
return map;
}
@Override
public LockedStream<K, V> sequentialDistribution() {
return newOrReuse(realStream.sequentialDistribution());
}
@Override
public LockedStream<K, V> parallelDistribution() {
return newOrReuse(realStream.parallelDistribution());
}
@Override
public LockedStream<K, V> filterKeySegments(Set<Integer> segments) {
return newOrReuse(realStream.filterKeySegments(segments));
}
@Override
public LockedStream<K, V> filterKeySegments(IntSet segments) {
return newOrReuse(realStream.filterKeySegments(segments));
}
@Override
public LockedStream<K, V> filterKeys(Set<?> keys) {
return newOrReuse(realStream.filterKeys(keys));
}
@Override
public LockedStream<K, V> distributedBatchSize(int batchSize) {
return newOrReuse(realStream.distributedBatchSize(batchSize));
}
@Override
public LockedStream segmentCompletionListener(SegmentCompletionListener listener) {
throw new UnsupportedOperationException("LockedStream doesn't support completion listener");
}
@Override
public LockedStream<K, V> disableRehashAware() {
return newOrReuse(realStream.disableRehashAware());
}
@Override
public LockedStream timeout(long timeout, TimeUnit unit) {
return newOrReuse(realStream.timeout(timeout, unit));
}
@Override
public Iterator<CacheEntry<K, V>> iterator() {
throw new UnsupportedOperationException("LockedStream doesn't support iterator");
}
@Override
public Spliterator<CacheEntry<K, V>> spliterator() {
throw new UnsupportedOperationException("LockedStream doesn't support spliterator");
}
@Override
public boolean isParallel() {
return realStream.isParallel();
}
@Override
public LockedStream<K, V> sequential() {
return newOrReuse(realStream.sequential());
}
@Override
public LockedStream<K, V> parallel() {
return newOrReuse(realStream.parallel());
}
@Override
public LockedStream<K, V> unordered() {
// This stream is always unordered
return this;
}
@Override
public LockedStream<K, V> onClose(Runnable closeHandler) {
return newOrReuse(realStream.onClose(closeHandler));
}
@Override
public void close() {
realStream.close();
}
@Scope(Scopes.NONE)
static abstract class LockHelper<K, V, R> {
protected final Predicate<? super CacheEntry<K, V>> predicate;
@Inject protected transient LockManager lockManager;
protected LockHelper(Predicate<? super CacheEntry<K, V>> predicate) {
this.predicate = predicate;
}
R perform(Cache<K, V> cache, CacheEntry<K, V> entry) {
K key = entry.getKey();
lock(key);
try {
CacheEntry<K, V> rereadEntry = cache.getAdvancedCache().getCacheEntry(key);
if (rereadEntry != null && (predicate == null || predicate.test(rereadEntry))) {
Cache<K, V> cacheToUse = cache.getAdvancedCache().lockAs(key);
return actualPerform(cacheToUse, rereadEntry);
}
return null;
} finally {
lockManager.unlock(key, key);
}
}
protected abstract R actualPerform(Cache<K, V> cache, CacheEntry<K, V> entry);
private void lock(K key) {
KeyAwareLockPromise kalp = lockManager.lock(key, key, 10, TimeUnit.SECONDS);
if (!kalp.isAvailable()) {
try {
ForkJoinPool.managedBlock(new ForkJoinPool.ManagedBlocker() {
@Override
public boolean block() throws InterruptedException {
kalp.lock();
return true;
}
@Override
public boolean isReleasable() {
return kalp.isAvailable();
}
});
} catch (InterruptedException e) {
throw new CacheException(e);
} catch (TimeoutException e) {
throw new CacheException("Could not acquire lock for key: " + key + " in 10 seconds");
}
}
}
}
@Scope(Scopes.NONE)
@SerializeWith(value = CacheEntryFunction.Externalizer.class)
static class CacheEntryFunction<K, V, R> extends LockHelper<K, V, KeyValuePair<K, R>> implements Function<CacheEntry<K, V>, Stream<KeyValuePair<K, R>>> {
private final BiFunction<Cache<K, V>, ? super CacheEntry<K, V>, R> biFunction;
@Inject protected transient Cache<K, V> cache;
protected CacheEntryFunction(BiFunction<Cache<K, V>, ? super CacheEntry<K, V>, R> biFunction,
Predicate<? super CacheEntry<K, V>> predicate) {
super(predicate);
this.biFunction = biFunction;
}
@Override
public Stream<KeyValuePair<K, R>> apply(CacheEntry<K, V> kvCacheEntry) {
KeyValuePair<K, R> pair = perform(cache, kvCacheEntry);
return pair != null ? Stream.of(pair) : Stream.empty();
}
@Override
protected KeyValuePair<K, R> actualPerform(Cache<K, V> cache, CacheEntry<K, V> entry) {
return new KeyValuePair<>(entry.getKey(), biFunction.apply(cache, entry));
}
public static final class Externalizer implements org.infinispan.commons.marshall.Externalizer<CacheEntryFunction> {
@Override
public void writeObject(ObjectOutput output, CacheEntryFunction object) throws IOException {
output.writeObject(object.biFunction);
output.writeObject(object.predicate);
}
@Override
public CacheEntryFunction readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new CacheEntryFunction((BiFunction) input.readObject(), (Predicate) input.readObject());
}
}
}
@SerializeWith(value = CacheEntryConsumer.Externalizer.class)
static class CacheEntryConsumer<K, V> extends LockHelper<K, V, Void> implements BiConsumer<Cache<K, V>, CacheEntry<K, V>> {
private final BiConsumer<Cache<K, V>, ? super CacheEntry<K, V>> realConsumer;
private CacheEntryConsumer(BiConsumer<Cache<K, V>, ? super CacheEntry<K, V>> realConsumer,
Predicate<? super CacheEntry<K, V>> predicate) {
super(predicate);
this.realConsumer = realConsumer;
}
@Override
public void accept(Cache<K, V> kvCache, CacheEntry<K, V> kvCacheEntry) {
perform(kvCache, kvCacheEntry);
}
@Override
protected Void actualPerform(Cache<K, V> cache, CacheEntry<K, V> entry) {
realConsumer.accept(cache, new EntryWrapper<>(cache, entry));
return null;
}
public static final class Externalizer implements org.infinispan.commons.marshall.Externalizer<CacheEntryConsumer> {
@Override
public void writeObject(ObjectOutput output, CacheEntryConsumer object) throws IOException {
output.writeObject(object.realConsumer);
output.writeObject(object.predicate);
}
@Override
public CacheEntryConsumer readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new CacheEntryConsumer((BiConsumer) input.readObject(), (Predicate) input.readObject());
}
}
}
}
| 11,683
| 34.730887
| 156
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/TxLockedStreamImpl.java
|
package org.infinispan.stream.impl;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Predicate;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.commons.CacheException;
import org.infinispan.container.entries.CacheEntry;
/**
* Locked Stream that is designed for transactions. This way it can suspend and resume the transaction upon invocation.
* @author wburns
* @since 9.0
*/
public class TxLockedStreamImpl<K, V> extends LockedStreamImpl<K, V> {
final TransactionManager tm;
public TxLockedStreamImpl(TransactionManager tm, CacheStream<CacheEntry<K, V>> realStream, long time, TimeUnit unit) {
super(realStream, time, unit);
this.tm = Objects.requireNonNull(tm);
}
TxLockedStreamImpl(TransactionManager tm, CacheStream<CacheEntry<K, V>> realStream,
Predicate<? super CacheEntry<K, V>> predicate, long time, TimeUnit unit) {
super(realStream, predicate, time, unit);
this.tm = tm;
}
@Override
public void forEach(BiConsumer<Cache<K, V>, ? super CacheEntry<K, V>> biConsumer) {
Transaction ongoingTransaction = null;
try {
ongoingTransaction = suspendOngoingTransactionIfExists();
super.forEach(biConsumer);
} finally {
resumePreviousOngoingTransaction(ongoingTransaction);
}
}
@Override
public <R> Map<K, R> invokeAll(BiFunction<Cache<K, V>, ? super CacheEntry<K, V>, R> biFunction) {
Transaction ongoingTransaction = null;
try {
ongoingTransaction = suspendOngoingTransactionIfExists();
return super.invokeAll(biFunction);
} finally {
resumePreviousOngoingTransaction(ongoingTransaction);
}
}
private Transaction suspendOngoingTransactionIfExists() {
final Transaction tx = getOngoingTransaction();
if (tx != null) {
try {
tm.suspend();
} catch (SystemException e) {
throw new CacheException("Unable to suspend transaction.", e);
}
}
return tx;
}
private Transaction getOngoingTransaction() {
try {
return tm.getTransaction();
} catch (SystemException e) {
throw new CacheException("Unable to get transaction", e);
}
}
private void resumePreviousOngoingTransaction(Transaction transaction) {
if (transaction != null) {
try {
tm.resume(transaction);
} catch (Exception e) {
throw new CacheException("Had problems trying to resume a transaction after locked stream forEach()", e);
}
}
}
@Override
LockedStreamImpl<K, V> newStream(CacheStream<CacheEntry<K, V>> realStream,
Predicate<? super CacheEntry<K, V>> predicate, long time, TimeUnit unit) {
return new TxLockedStreamImpl<>(tm, realStream, predicate, time, unit);
}
}
| 3,130
| 31.614583
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/DistributedIntCacheStream.java
|
package org.infinispan.stream.impl;
import java.lang.invoke.MethodHandles;
import java.util.IntSummaryStatistics;
import java.util.Iterator;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.IntBinaryOperator;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.IntPredicate;
import java.util.function.IntToDoubleFunction;
import java.util.function.IntToLongFunction;
import java.util.function.IntUnaryOperator;
import java.util.function.ObjIntConsumer;
import java.util.function.Supplier;
import java.util.function.ToIntFunction;
import java.util.stream.IntStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.reactive.publisher.PublisherReducers;
import org.infinispan.stream.impl.intops.primitive.i.AsDoubleIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.AsLongIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.BoxedIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.DistinctIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.FilterIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.FlatMapIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.LimitIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToDoubleIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToLongIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToObjIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.PeekIntOperation;
import org.infinispan.util.function.SerializableBiConsumer;
import org.infinispan.util.function.SerializableBiFunction;
import org.infinispan.util.function.SerializableBinaryOperator;
import org.infinispan.util.function.SerializableCallable;
import org.infinispan.util.function.SerializableComparator;
import org.infinispan.util.function.SerializableIntConsumer;
import org.infinispan.util.function.SerializableIntFunction;
import org.infinispan.util.function.SerializableIntPredicate;
import org.infinispan.util.function.SerializableIntToDoubleFunction;
import org.infinispan.util.function.SerializableIntToLongFunction;
import org.infinispan.util.function.SerializableIntUnaryOperator;
import org.infinispan.util.function.SerializableObjIntConsumer;
import org.infinispan.util.function.SerializablePredicate;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
/**
* Implementation of {@link IntStream} that utilizes a lazily evaluated distributed back end execution. Note this
* class is only able to be created using {@link org.infinispan.CacheStream#mapToInt(ToIntFunction)} or similar
* methods from the {@link org.infinispan.CacheStream} interface.
* @param <Original> original stream type
*/
public class DistributedIntCacheStream<Original> extends AbstractCacheStream<Original, Integer, IntStream, IntCacheStream>
implements IntCacheStream {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
/**
* This constructor is to be used only when a user calls a map or flat map method changing to an IntStream
* from a CacheStream, Stream, DoubleStream, LongStream etc.
* @param other other instance of {@link AbstractCacheStream} to copy details from
*/
protected DistributedIntCacheStream(AbstractCacheStream other) {
super(other);
}
@Override
protected Log getLog() {
return log;
}
@Override
protected DistributedIntCacheStream unwrap() {
return this;
}
@Override
public IntCacheStream filter(IntPredicate predicate) {
return addIntermediateOperation(new FilterIntOperation<>(predicate));
}
@Override
public IntCacheStream filter(SerializableIntPredicate predicate) {
return filter((IntPredicate) predicate);
}
@Override
public IntCacheStream map(IntUnaryOperator mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
return addIntermediateOperation(new MapIntOperation(mapper));
}
@Override
public IntCacheStream map(SerializableIntUnaryOperator mapper) {
return map((IntUnaryOperator) mapper);
}
@Override
public <U> CacheStream<U> mapToObj(IntFunction<? extends U> mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToObjIntOperation<>(mapper));
return cacheStream();
}
@Override
public <U> CacheStream<U> mapToObj(SerializableIntFunction<? extends U> mapper) {
return mapToObj((IntFunction<? extends U>) mapper);
}
@Override
public LongCacheStream mapToLong(IntToLongFunction mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToLongIntOperation(mapper));
return longCacheStream();
}
@Override
public LongCacheStream mapToLong(SerializableIntToLongFunction mapper) {
return mapToLong((IntToLongFunction) mapper);
}
@Override
public DoubleCacheStream mapToDouble(IntToDoubleFunction mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToDoubleIntOperation(mapper));
return doubleCacheStream();
}
@Override
public DoubleCacheStream mapToDouble(SerializableIntToDoubleFunction mapper) {
return mapToDouble((IntToDoubleFunction) mapper);
}
@Override
public IntCacheStream flatMap(IntFunction<? extends IntStream> mapper) {
iteratorOperation = IteratorOperation.FLAT_MAP;
return addIntermediateOperation(new FlatMapIntOperation(mapper));
}
@Override
public IntCacheStream flatMap(SerializableIntFunction<? extends IntStream> mapper) {
return flatMap((IntFunction<? extends IntStream>) mapper);
}
@Override
public IntCacheStream distinct() {
// Distinct is applied remotely as well
addIntermediateOperation(DistinctIntOperation.getInstance());
return new IntermediateIntCacheStream(this).distinct();
}
@Override
public IntCacheStream sorted() {
return new IntermediateIntCacheStream(this).sorted();
}
@Override
public IntCacheStream peek(IntConsumer action) {
return addIntermediateOperation(new PeekIntOperation(action));
}
@Override
public IntCacheStream peek(SerializableIntConsumer action) {
return peek((IntConsumer) action);
}
@Override
public IntCacheStream limit(long maxSize) {
// Limit is applied remotely as well
addIntermediateOperation(new LimitIntOperation(maxSize));
return new IntermediateIntCacheStream(this).limit(maxSize);
}
@Override
public IntCacheStream skip(long n) {
return new IntermediateIntCacheStream(this).skip(n);
}
@Override
public LongCacheStream asLongStream() {
addIntermediateOperationMap(AsLongIntOperation.getInstance());
return longCacheStream();
}
@Override
public DoubleCacheStream asDoubleStream() {
addIntermediateOperationMap(AsDoubleIntOperation.getInstance());
return doubleCacheStream();
}
@Override
public CacheStream<Integer> boxed() {
addIntermediateOperationMap(BoxedIntOperation.getInstance());
return cacheStream();
}
// Rest are terminal operators
@Override
public void forEach(IntConsumer action) {
peek(action)
.iterator()
.forEachRemaining((int ignore) -> { });
}
@Override
public void forEach(SerializableIntConsumer action) {
forEach((IntConsumer) action);
}
@Override
public <K, V> void forEach(ObjIntConsumer<Cache<K, V>> action) {
peek(CacheBiConsumers.intConsumer(action))
.iterator()
.forEachRemaining((int ignore) -> { });
}
@Override
public <K, V> void forEach(SerializableObjIntConsumer<Cache<K, V>> action) {
forEach((ObjIntConsumer<Cache<K, V>>) action);
}
@Override
public void forEachOrdered(IntConsumer action) {
// We aren't sorted, so just do forEach
forEach(action);
}
@Override
public int[] toArray() {
Object[] values = performPublisherOperation(PublisherReducers.toArrayReducer(), PublisherReducers.toArrayFinalizer());
int[] results = new int[values.length];
int i = 0;
for (Object obj : values) {
results[i++] = (Integer) obj;
}
return results;
}
@Override
public int reduce(int identity, IntBinaryOperator op) {
Function<Publisher<Integer>, CompletionStage<Integer>> reduce = PublisherReducers.reduce(identity,
(SerializableBiFunction<Integer, Integer, Integer>) op::applyAsInt);
return performPublisherOperation(reduce, reduce);
}
@Override
public OptionalInt reduce(IntBinaryOperator op) {
Function<Publisher<Integer>, CompletionStage<Integer>> reduce = PublisherReducers.reduce(
(SerializableBinaryOperator<Integer>) op::applyAsInt);
Integer result = performPublisherOperation(reduce, reduce);
if (result == null) {
return OptionalInt.empty();
}
return OptionalInt.of(result);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjIntConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return performPublisherOperation(PublisherReducers.collect(supplier,
(SerializableBiConsumer<R, Integer>) accumulator::accept),
PublisherReducers.accumulate(combiner));
}
@Override
public int sum() {
long result = mapToLong(Integer::toUnsignedLong).sum();
if (result > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
}
return (int) result;
}
@Override
public OptionalInt min() {
SerializableComparator<Integer> serializableComparator = Integer::compareTo;
Function<Publisher<Integer>, CompletionStage<Integer>> minFunction = PublisherReducers.min(serializableComparator);
Integer min = performPublisherOperation(minFunction, minFunction);
if (min == null) {
return OptionalInt.empty();
}
return OptionalInt.of(min);
}
@Override
public OptionalInt max() {
SerializableComparator<Integer> serializableComparator = Integer::compareTo;
Function<Publisher<Integer>, CompletionStage<Integer>> maxFunction = PublisherReducers.max(serializableComparator);
Integer max = performPublisherOperation(maxFunction, maxFunction);
if (max == null) {
return OptionalInt.empty();
}
return OptionalInt.of(max);
}
@Override
public OptionalDouble average() {
IntSummaryStatistics iss = summaryStatistics();
if (iss.getCount() == 0) {
return OptionalDouble.empty();
}
return OptionalDouble.of(iss.getAverage());
}
@Override
public IntSummaryStatistics summaryStatistics() {
return performPublisherOperation(PublisherReducers.reduceWith(
(SerializableCallable<IntSummaryStatistics>) IntSummaryStatistics::new,
(SerializableBiFunction<IntSummaryStatistics, Integer, IntSummaryStatistics>) (lss, intValue) -> {
lss.accept(intValue);
return lss;
}), PublisherReducers.reduce(
(SerializableBinaryOperator<IntSummaryStatistics>) (first, second) -> {
first.combine(second);
return first;
}));
}
@Override
public boolean anyMatch(IntPredicate predicate) {
return performPublisherOperation(PublisherReducers.anyMatch((SerializablePredicate<Integer>) predicate::test),
PublisherReducers.or());
}
@Override
public boolean allMatch(IntPredicate predicate) {
return performPublisherOperation(PublisherReducers.allMatch((SerializablePredicate<Integer>) predicate::test),
PublisherReducers.and());
}
@Override
public boolean noneMatch(IntPredicate predicate) {
return performPublisherOperation(PublisherReducers.noneMatch((SerializablePredicate<Integer>) predicate::test),
PublisherReducers.and());
}
@Override
public OptionalInt findFirst() {
// We aren't sorted, so just do findAny
return findAny();
}
@Override
public OptionalInt findAny() {
Function<Publisher<Integer>, CompletionStage<Integer>> function = PublisherReducers.findFirst();
Integer value = performPublisherOperation(function, function);
if (value == null) {
return OptionalInt.empty();
}
return OptionalInt.of(value);
}
@Override
public PrimitiveIterator.OfInt iterator() {
return remoteIterator();
}
PrimitiveIterator.OfInt remoteIterator() {
// TODO: need to add in way to not box these later
// Since this is a remote iterator we have to add it to the remote intermediate operations queue
intermediateOperations.add(BoxedIntOperation.getInstance());
DistributedCacheStream<Original, Integer> stream = new DistributedCacheStream<>(this);
Iterator<Integer> iterator = stream.iterator();
return new IntegerIteratorToPrimitiveInteger(iterator);
}
static class IntegerIteratorToPrimitiveInteger implements PrimitiveIterator.OfInt {
private final Iterator<Integer> iterator;
IntegerIteratorToPrimitiveInteger(Iterator<Integer> iterator) {
this.iterator = iterator;
}
@Override
public int nextInt() {
return iterator.next();
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
}
@Override
public Spliterator.OfInt spliterator() {
return Spliterators.spliteratorUnknownSize(iterator(), Spliterator.CONCURRENT);
}
@Override
public long count() {
return performPublisherOperation(PublisherReducers.count(), PublisherReducers.add());
}
// These are the custom added methods for cache streams
@Override
public IntCacheStream sequentialDistribution() {
parallelDistribution = false;
return this;
}
@Override
public IntCacheStream parallelDistribution() {
parallelDistribution = true;
return this;
}
@Override
public IntCacheStream filterKeySegments(Set<Integer> segments) {
return filterKeySegments(IntSets.from(segments));
}
@Override
public IntCacheStream filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public IntCacheStream filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public IntCacheStream distributedBatchSize(int batchSize) {
distributedBatchSize = batchSize;
return this;
}
@Override
public IntCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
if (segmentCompletionListener == null) {
segmentCompletionListener = listener;
} else {
segmentCompletionListener = composeWithExceptions(segmentCompletionListener, listener);
}
return this;
}
@Override
public IntCacheStream disableRehashAware() {
rehashAware = false;
return this;
}
@Override
public IntCacheStream timeout(long timeout, TimeUnit unit) {
if (timeout <= 0) {
throw new IllegalArgumentException("Timeout must be greater than 0");
}
this.timeout = timeout;
this.timeoutUnit = unit;
return this;
}
protected <R> DistributedCacheStream<Original, R> cacheStream() {
return new DistributedCacheStream<>(this);
}
protected DistributedDoubleCacheStream<Original> doubleCacheStream() {
return new DistributedDoubleCacheStream<>(this);
}
protected DistributedLongCacheStream<Original> longCacheStream() {
return new DistributedLongCacheStream<>(this);
}
}
| 16,583
| 32.844898
| 124
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/IntermediateType.java
|
package org.infinispan.stream.impl;
import java.util.Spliterator;
import java.util.stream.BaseStream;
import java.util.stream.StreamSupport;
import org.infinispan.BaseCacheStream;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
/**
* The various intermediate types. Note that the local intermediate operation can be a type other than the
* resulting stream, so we have to keep track of what it was to avoid {@link ClassCastException} issues.
*/
public enum IntermediateType {
REF {
@Override
public <T, S extends BaseStream<T, S>> S handleStream(BaseCacheStream streamable) {
CacheStream<?> stream = (CacheStream<?>) streamable;
Spliterator<?> spliterator = stream.spliterator();
return (S) StreamSupport.stream(spliterator, streamable.isParallel());
}
},
INT {
@Override
public <T, S extends BaseStream<T, S>> S handleStream(BaseCacheStream streamable) {
IntCacheStream stream = (IntCacheStream) streamable;
Spliterator.OfInt spliterator = stream.spliterator();
return (S) StreamSupport.intStream(spliterator, streamable.isParallel());
}
},
LONG {
@Override
public <T, S extends BaseStream<T, S>> S handleStream(BaseCacheStream streamable) {
LongCacheStream stream = (LongCacheStream) streamable;
Spliterator.OfLong spliterator = stream.spliterator();
return (S) StreamSupport.longStream(spliterator, streamable.isParallel());
}
},
DOUBLE {
@Override
public <T, S extends BaseStream<T, S>> S handleStream(BaseCacheStream streamable) {
DoubleCacheStream stream = (DoubleCacheStream) streamable;
Spliterator.OfDouble spliterator = stream.spliterator();
return (S) StreamSupport.doubleStream(spliterator, streamable.isParallel());
}
};
public abstract <T, S extends BaseStream<T, S>> S handleStream(BaseCacheStream streamable);
}
| 2,048
| 37.660377
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/DistributedDoubleCacheStream.java
|
package org.infinispan.stream.impl;
import java.lang.invoke.MethodHandles;
import java.util.DoubleSummaryStatistics;
import java.util.Iterator;
import java.util.OptionalDouble;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.DoubleBinaryOperator;
import java.util.function.DoubleConsumer;
import java.util.function.DoubleFunction;
import java.util.function.DoublePredicate;
import java.util.function.DoubleToIntFunction;
import java.util.function.DoubleToLongFunction;
import java.util.function.DoubleUnaryOperator;
import java.util.function.Function;
import java.util.function.ObjDoubleConsumer;
import java.util.function.Supplier;
import java.util.function.ToDoubleFunction;
import java.util.stream.DoubleStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.reactive.publisher.PublisherReducers;
import org.infinispan.stream.impl.intops.primitive.d.BoxedDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.DistinctDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.FilterDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.FlatMapDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.LimitDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToIntDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToLongDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToObjDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.PeekDoubleOperation;
import org.infinispan.util.function.SerializableBiConsumer;
import org.infinispan.util.function.SerializableBiFunction;
import org.infinispan.util.function.SerializableBinaryOperator;
import org.infinispan.util.function.SerializableCallable;
import org.infinispan.util.function.SerializableComparator;
import org.infinispan.util.function.SerializableDoubleConsumer;
import org.infinispan.util.function.SerializableDoubleFunction;
import org.infinispan.util.function.SerializableDoublePredicate;
import org.infinispan.util.function.SerializableDoubleToIntFunction;
import org.infinispan.util.function.SerializableDoubleToLongFunction;
import org.infinispan.util.function.SerializableDoubleUnaryOperator;
import org.infinispan.util.function.SerializableObjDoubleConsumer;
import org.infinispan.util.function.SerializablePredicate;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
/**
* Implementation of {@link DoubleStream} that utilizes a lazily evaluated distributed back end execution. Note this
* class is only able to be created using {@link org.infinispan.CacheStream#mapToDouble(ToDoubleFunction)} or similar
* methods from the {@link org.infinispan.CacheStream} interface.
* @param <Original> original stream type
*/
public class DistributedDoubleCacheStream<Original> extends AbstractCacheStream<Original, Double, DoubleStream, DoubleCacheStream>
implements DoubleCacheStream {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
/**
* This constructor is to be used only when a user calls a map or flat map method changing to a DoubleStream
* from a CacheStream, Stream, IntStream, LongStream etc.
* @param other other instance of {@link AbstractCacheStream} to copy details from
*/
protected DistributedDoubleCacheStream(AbstractCacheStream other) {
super(other);
}
@Override
protected Log getLog() {
return log;
}
@Override
protected DoubleCacheStream unwrap() {
return this;
}
@Override
public DoubleCacheStream filter(DoublePredicate predicate) {
return addIntermediateOperation(new FilterDoubleOperation(predicate));
}
@Override
public DoubleCacheStream filter(SerializableDoublePredicate predicate) {
return filter((DoublePredicate) predicate);
}
@Override
public DoubleCacheStream map(DoubleUnaryOperator mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
return addIntermediateOperation(new MapDoubleOperation(mapper));
}
@Override
public DoubleCacheStream map(SerializableDoubleUnaryOperator mapper) {
return map((DoubleUnaryOperator) mapper);
}
@Override
public <U> CacheStream<U> mapToObj(DoubleFunction<? extends U> mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToObjDoubleOperation<>(mapper));
return cacheStream();
}
@Override
public <U> CacheStream<U> mapToObj(SerializableDoubleFunction<? extends U> mapper) {
return mapToObj((DoubleFunction<? extends U>) mapper);
}
@Override
public IntCacheStream mapToInt(DoubleToIntFunction mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToIntDoubleOperation(mapper));
return intCacheStream();
}
@Override
public IntCacheStream mapToInt(SerializableDoubleToIntFunction mapper) {
return mapToInt((DoubleToIntFunction) mapper);
}
@Override
public LongCacheStream mapToLong(DoubleToLongFunction mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToLongDoubleOperation(mapper));
return longCacheStream();
}
@Override
public LongCacheStream mapToLong(SerializableDoubleToLongFunction mapper) {
return mapToLong((DoubleToLongFunction) mapper);
}
@Override
public DoubleCacheStream flatMap(DoubleFunction<? extends DoubleStream> mapper) {
iteratorOperation = IteratorOperation.FLAT_MAP;
return addIntermediateOperation(new FlatMapDoubleOperation(mapper));
}
@Override
public DoubleCacheStream flatMap(SerializableDoubleFunction<? extends DoubleStream> mapper) {
return flatMap((DoubleFunction<? extends DoubleStream>) mapper);
}
@Override
public DoubleCacheStream distinct() {
// Limit is applied remotely as well
addIntermediateOperation(DistinctDoubleOperation.getInstance());
return new IntermediateDoubleCacheStream(this).distinct();
}
@Override
public DoubleCacheStream sorted() {
return new IntermediateDoubleCacheStream(this).sorted();
}
@Override
public DoubleCacheStream peek(DoubleConsumer action) {
return addIntermediateOperation(new PeekDoubleOperation(action));
}
@Override
public DoubleCacheStream peek(SerializableDoubleConsumer action) {
return peek((DoubleConsumer) action);
}
@Override
public DoubleCacheStream limit(long maxSize) {
// Limit is applied remotely as well
addIntermediateOperation(new LimitDoubleOperation(maxSize));
return new IntermediateDoubleCacheStream(this).limit(maxSize);
}
@Override
public DoubleCacheStream skip(long n) {
return new IntermediateDoubleCacheStream(this).skip(n);
}
@Override
public CacheStream<Double> boxed() {
addIntermediateOperationMap(BoxedDoubleOperation.getInstance());
return cacheStream();
}
// Rest are terminal operators
@Override
public void forEach(DoubleConsumer action) {
peek(action)
.iterator()
.forEachRemaining((double ignore) -> { });
}
@Override
public void forEach(SerializableDoubleConsumer action) {
forEach((DoubleConsumer) action);
}
@Override
public <K, V> void forEach(ObjDoubleConsumer<Cache<K, V>> action) {
peek(CacheBiConsumers.doubleConsumer(action))
.iterator()
.forEachRemaining((double ignore) -> { });
}
@Override
public <K, V> void forEach(SerializableObjDoubleConsumer<Cache<K, V>> action) {
forEach((ObjDoubleConsumer<Cache<K, V>>) action);
}
@Override
public void forEachOrdered(DoubleConsumer action) {
// We aren't sorted, so just do forEach
forEach(action);
}
@Override
public double[] toArray() {
Object[] values = performPublisherOperation(PublisherReducers.toArrayReducer(), PublisherReducers.toArrayFinalizer());
double[] results = new double[values.length];
int i = 0;
for (Object obj : values) {
results[i++] = (Double) obj;
}
return results;
}
@Override
public double reduce(double identity, DoubleBinaryOperator op) {
Function<Publisher<Double>, CompletionStage<Double>> reduce = PublisherReducers.reduce(identity,
(SerializableBiFunction<Double, Double, Double>) op::applyAsDouble);
return performPublisherOperation(reduce, reduce);
}
@Override
public OptionalDouble reduce(DoubleBinaryOperator op) {
Function<Publisher<Double>, CompletionStage<Double>> reduce = PublisherReducers.reduce(
(SerializableBinaryOperator<Double>) op::applyAsDouble);
Double result = performPublisherOperation(reduce, reduce);
if (result == null) {
return OptionalDouble.empty();
}
return OptionalDouble.of(result);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjDoubleConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return performPublisherOperation(PublisherReducers.collect(supplier,
(SerializableBiConsumer<R, Double>) accumulator::accept),
PublisherReducers.accumulate(combiner));
}
@Override
public double sum() {
DoubleSummaryStatistics dss = summaryStatistics();
return dss.getSum();
}
@Override
public OptionalDouble min() {
SerializableComparator<Double> serializableComparator = Double::compareTo;
Function<Publisher<Double>, CompletionStage<Double>> minFunction = PublisherReducers.min(serializableComparator);
Double min = performPublisherOperation(minFunction, minFunction);
if (min == null) {
return OptionalDouble.empty();
}
return OptionalDouble.of(min);
}
@Override
public OptionalDouble max() {
SerializableComparator<Double> serializableComparator = Double::compareTo;
Function<Publisher<Double>, CompletionStage<Double>> maxFunction = PublisherReducers.max(serializableComparator);
Double max = performPublisherOperation(maxFunction, maxFunction);
if (max == null) {
return OptionalDouble.empty();
}
return OptionalDouble.of(max);
}
@Override
public OptionalDouble average() {
DoubleSummaryStatistics dss = summaryStatistics();
if (dss.getCount() == 0) {
return OptionalDouble.empty();
}
return OptionalDouble.of(dss.getAverage());
}
@Override
public DoubleSummaryStatistics summaryStatistics() {
return performPublisherOperation(PublisherReducers.reduceWith(
(SerializableCallable<DoubleSummaryStatistics>) DoubleSummaryStatistics::new,
(SerializableBiFunction<DoubleSummaryStatistics, Double, DoubleSummaryStatistics>) (dss, doubleValue) -> {
dss.accept(doubleValue);
return dss;
}), PublisherReducers.reduce(
(SerializableBinaryOperator<DoubleSummaryStatistics>) (first, second) -> {
first.combine(second);
return first;
}));
}
@Override
public boolean anyMatch(DoublePredicate predicate) {
return performPublisherOperation(PublisherReducers.anyMatch((SerializablePredicate<Double>) predicate::test),
PublisherReducers.or());
}
@Override
public boolean allMatch(DoublePredicate predicate) {
return performPublisherOperation(PublisherReducers.allMatch((SerializablePredicate<Double>) predicate::test),
PublisherReducers.and());
}
@Override
public boolean noneMatch(DoublePredicate predicate) {
return performPublisherOperation(PublisherReducers.noneMatch((SerializablePredicate<Double>) predicate::test),
PublisherReducers.and());
}
@Override
public OptionalDouble findFirst() {
// We aren't sorted, so just do findAny
return findAny();
}
@Override
public OptionalDouble findAny() {
Function<Publisher<Double>, CompletionStage<Double>> function = PublisherReducers.findFirst();
Double value = performPublisherOperation(function, function);
if (value == null) {
return OptionalDouble.empty();
}
return OptionalDouble.of(value);
}
@Override
public PrimitiveIterator.OfDouble iterator() {
return remoteIterator();
}
PrimitiveIterator.OfDouble remoteIterator() {
// TODO: need to add in way to not box these later
// Since this is a remote iterator we have to add it to the remote intermediate operations queue
intermediateOperations.add(BoxedDoubleOperation.getInstance());
DistributedCacheStream<Original, Double> stream = new DistributedCacheStream<>(this);
Iterator<Double> iterator = stream.iterator();
return new DoubleIteratorToPrimitiveDouble(iterator);
}
static class DoubleIteratorToPrimitiveDouble implements PrimitiveIterator.OfDouble {
private final Iterator<Double> iterator;
DoubleIteratorToPrimitiveDouble(Iterator<Double> iterator) {
this.iterator = iterator;
}
@Override
public double nextDouble() {
return iterator.next();
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
}
@Override
public Spliterator.OfDouble spliterator() {
return Spliterators.spliteratorUnknownSize(iterator(), 0);
}
@Override
public long count() {
return performPublisherOperation(PublisherReducers.count(), PublisherReducers.add());
}
// These are the custom added methods for cache streams
@Override
public DoubleCacheStream sequentialDistribution() {
parallelDistribution = false;
return this;
}
@Override
public DoubleCacheStream parallelDistribution() {
parallelDistribution = true;
return this;
}
@Override
public DoubleCacheStream filterKeySegments(Set<Integer> segments) {
return filterKeySegments(IntSets.from(segments));
}
@Override
public DoubleCacheStream filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public DoubleCacheStream filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public DoubleCacheStream distributedBatchSize(int batchSize) {
distributedBatchSize = batchSize;
return this;
}
@Override
public DoubleCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
if (segmentCompletionListener == null) {
segmentCompletionListener = listener;
} else {
segmentCompletionListener = composeWithExceptions(segmentCompletionListener, listener);
}
return this;
}
@Override
public DoubleCacheStream disableRehashAware() {
rehashAware = false;
return this;
}
@Override
public DoubleCacheStream timeout(long timeout, TimeUnit unit) {
if (timeout <= 0) {
throw new IllegalArgumentException("Timeout must be greater than 0");
}
this.timeout = timeout;
this.timeoutUnit = unit;
return this;
}
protected <R> DistributedCacheStream<Original, R> cacheStream() {
return new DistributedCacheStream<>(this);
}
protected DistributedIntCacheStream<Original> intCacheStream() {
return new DistributedIntCacheStream<>(this);
}
protected DistributedLongCacheStream<Original> longCacheStream() {
return new DistributedLongCacheStream<>(this);
}
}
| 16,300
| 33.609342
| 130
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/RemovableCloseableIterator.java
|
package org.infinispan.stream.impl;
import java.util.function.Function;
import org.infinispan.Cache;
import org.infinispan.commons.util.CloseableIterator;
/**
* A CloseableIterator implementation that allows for a CloseableIterator that doesn't allow remove operations to
* implement remove by delegating the call to the provided cache to remove the previously read value. The key used
* to remove from the cache is determined by first applying the removeFunction to the value retrieved from the
* iterator.
*
* @author wburns
* @since 8.0
* @deprecated Users should use {@link org.infinispan.commons.util.RemovableCloseableIterator} instead
*/
public class RemovableCloseableIterator<K, C> extends org.infinispan.commons.util.RemovableCloseableIterator<C> {
public RemovableCloseableIterator(CloseableIterator<C> realIterator, Cache<K, ?> cache,
Function<? super C, K> removeFunction) {
super(realIterator, c -> cache.remove(removeFunction.apply(c)));
}
}
| 993
| 40.416667
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/AbstractCacheStream.java
|
package org.infinispan.stream.impl;
import java.util.ArrayDeque;
import java.util.PrimitiveIterator;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.BaseStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.reactive.publisher.impl.ClusterPublisherManager;
import org.infinispan.reactive.publisher.impl.DeliveryGuarantee;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.infinispan.util.KeyValuePair;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.reactivestreams.Publisher;
/**
* Abstract stream that provides all of the common functionality required for all types of Streams including the various
* primitive types.
* @param <Original> the original type of the underlying stream - normally CacheEntry or Object
* @param <T> The type returned by the stream
* @param <S> The stream interface
*/
public abstract class AbstractCacheStream<Original, T, S extends BaseStream<T, S>, S2 extends S> implements BaseStream<T, S> {
protected final Queue<IntermediateOperation> intermediateOperations;
protected final Address localAddress;
protected final ClusterPublisherManager cpm;
protected final Executor executor;
protected final ComponentRegistry registry;
protected final PartitionHandlingManager partition;
protected final KeyPartitioner keyPartitioner;
protected final StateTransferLock stateTransferLock;
protected final long explicitFlags;
protected final Function<? super Original, ?> toKeyFunction;
protected final InvocationContext invocationContext;
protected Runnable closeRunnable = null;
protected Boolean parallelDistribution;
protected boolean parallel;
protected boolean rehashAware = true;
protected Set<?> keysToFilter;
protected IntSet segmentsToFilter;
protected int distributedBatchSize;
protected Consumer<Supplier<PrimitiveIterator.OfInt>> segmentCompletionListener;
protected IteratorOperation iteratorOperation = IteratorOperation.NO_MAP;
protected long timeout = 30;
protected TimeUnit timeoutUnit = TimeUnit.SECONDS;
protected AbstractCacheStream(Address localAddress, boolean parallel, InvocationContext ctx,
long explicitFlags, int distributedBatchSize, Executor executor,
ComponentRegistry registry, Function<? super Original, ?> toKeyFunction,
ClusterPublisherManager<?, ?> clusterPublisherManager) {
this.localAddress = localAddress;
this.parallel = parallel;
this.invocationContext = ctx;
this.explicitFlags = explicitFlags;
this.distributedBatchSize = distributedBatchSize;
this.executor = executor;
this.registry = registry;
this.toKeyFunction = toKeyFunction;
this.partition = registry.getComponent(PartitionHandlingManager.class);
this.keyPartitioner = registry.getComponent(KeyPartitioner.class);
this.stateTransferLock = registry.getComponent(StateTransferLock.class);
this.cpm = clusterPublisherManager;
intermediateOperations = new ArrayDeque<>();
}
protected AbstractCacheStream(AbstractCacheStream<Original, T, S, S2> other) {
this.intermediateOperations = other.intermediateOperations;
this.localAddress = other.localAddress;
this.invocationContext = other.invocationContext;
this.explicitFlags = other.explicitFlags;
this.executor = other.executor;
this.registry = other.registry;
this.toKeyFunction = other.toKeyFunction;
this.partition = other.partition;
this.keyPartitioner = other.keyPartitioner;
this.stateTransferLock = other.stateTransferLock;
this.cpm = other.cpm;
this.closeRunnable = other.closeRunnable;
this.parallel = other.parallel;
this.parallelDistribution = other.parallelDistribution;
this.rehashAware = other.rehashAware;
this.keysToFilter = other.keysToFilter;
this.segmentsToFilter = other.segmentsToFilter;
this.distributedBatchSize = other.distributedBatchSize;
this.segmentCompletionListener = other.segmentCompletionListener;
this.iteratorOperation = other.iteratorOperation;
this.timeout = other.timeout;
this.timeoutUnit = other.timeoutUnit;
}
protected abstract Log getLog();
protected S2 addIntermediateOperation(IntermediateOperation<T, S, T, S> intermediateOperation) {
intermediateOperation.handleInjection(registry);
addIntermediateOperation(intermediateOperations, intermediateOperation);
return unwrap();
}
protected void addIntermediateOperationMap(IntermediateOperation<T, S, ?, ?> intermediateOperation) {
intermediateOperation.handleInjection(registry);
addIntermediateOperation(intermediateOperations, intermediateOperation);
}
protected void addIntermediateOperation(Queue<IntermediateOperation> intermediateOperations,
IntermediateOperation<T, S, ?, ?> intermediateOperation) {
intermediateOperations.add(intermediateOperation);
}
protected abstract S2 unwrap();
@Override
public boolean isParallel() {
return parallel;
}
@Override
public S2 sequential() {
parallel = false;
return unwrap();
}
@Override
public S2 parallel() {
parallel = true;
return unwrap();
}
@Override
public S2 unordered() {
// This by default is always unordered
return unwrap();
}
@Override
public S2 onClose(Runnable closeHandler) {
if (this.closeRunnable == null) {
this.closeRunnable = closeHandler;
} else {
this.closeRunnable = Util.composeWithExceptions(this.closeRunnable, closeHandler);
}
return unwrap();
}
@Override
public void close() {
if (closeRunnable != null) {
closeRunnable.run();
}
}
<R> R performPublisherOperation(Function<? super Publisher<T>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
Function usedTransformer;
if (intermediateOperations.isEmpty()) {
usedTransformer = transformer;
} else {
usedTransformer = new CacheStreamIntermediateReducer(intermediateOperations, transformer);
}
DeliveryGuarantee guarantee = rehashAware ? DeliveryGuarantee.EXACTLY_ONCE : DeliveryGuarantee.AT_MOST_ONCE;
CompletionStage<R> stage;
if (toKeyFunction == null) {
stage = cpm.keyReduction(parallel, segmentsToFilter, keysToFilter, invocationContext, explicitFlags, guarantee,
usedTransformer, finalizer);
} else {
stage = cpm.entryReduction(parallel, segmentsToFilter, keysToFilter, invocationContext, explicitFlags, guarantee,
usedTransformer, finalizer);
}
return CompletionStages.join(stage);
}
protected boolean isPrimaryOwner(ConsistentHash ch, Object key) {
return localAddress.equals(ch.locatePrimaryOwnerForSegment(keyPartitioner.getSegment(key)));
}
enum IteratorOperation {
NO_MAP,
MAP {
/**
* Function to be used to unwrap an entry. If this is null, then no wrapping is required
* @return a function to apply
*/
@Override
public <In, Out> Function<In, Out> getFunction() {
// Map should be wrap entry in KVP<Key, Result(s)> so we have to unwrap those result(s)
return e -> ((KeyValuePair<?, Out>) e).getValue();
}
},
FLAT_MAP
;
public <In, Out> Function<In, Out> getFunction() {
// There is no unwrapping required as we just have the CacheEntry directly
return null;
}
}
/**
* Given two SegmentCompletionListener, return a SegmentCompletionListener that
* executes both in sequence, even if the first throws an exception, and if both
* throw exceptions, add any exceptions thrown by the second as suppressed
* exceptions of the first.
*/
protected static Consumer<Supplier<PrimitiveIterator.OfInt>> composeWithExceptions(Consumer<Supplier<PrimitiveIterator.OfInt>> a,
Consumer<Supplier<PrimitiveIterator.OfInt>> b) {
return (segments) -> {
try {
a.accept(segments);
}
catch (Throwable e1) {
try {
b.accept(segments);
}
catch (Throwable e2) {
try {
e1.addSuppressed(e2);
} catch (Throwable ignore) {}
}
throw e1;
}
b.accept(segments);
};
}
}
| 9,368
| 35.597656
| 132
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/CacheBiConsumers.java
|
package org.infinispan.stream.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.DoubleConsumer;
import java.util.function.IntConsumer;
import java.util.function.LongConsumer;
import java.util.function.ObjDoubleConsumer;
import java.util.function.ObjIntConsumer;
import java.util.function.ObjLongConsumer;
import org.infinispan.Cache;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.marshall.core.Ids;
public class CacheBiConsumers {
private CacheBiConsumers() { }
static <K, V, R> Consumer<R> objectConsumer(BiConsumer<Cache<K, V>, ? super R> biConsumer) {
return new CacheObjBiConsumer<>(biConsumer);
}
static <K, V> DoubleConsumer doubleConsumer(ObjDoubleConsumer<Cache<K, V>> objDoubleConsumer) {
return new CacheDoubleConsumer<>(objDoubleConsumer);
}
static <K, V> LongConsumer longConsumer(ObjLongConsumer<Cache<K, V>> objLongConsumer) {
return new CacheLongConsumer<>(objLongConsumer);
}
static <K, V> IntConsumer intConsumer(ObjIntConsumer<Cache<K, V>> objIntConsumer) {
return new CacheIntConsumer<>(objIntConsumer);
}
@Scope(Scopes.NONE)
static class CacheObjBiConsumer<K, V, R> implements Consumer<R> {
private final BiConsumer<Cache<K, V>, ? super R> biConsumer;
protected transient Cache<K, V> cache;
@Inject
void inject(Cache<K, V> cache, ComponentRegistry componentRegistry) {
componentRegistry.wireDependencies(biConsumer);
this.cache = cache;
}
CacheObjBiConsumer(BiConsumer<Cache<K, V>, ? super R> biConsumer) {
this.biConsumer = biConsumer;
}
@Override
public void accept(R r) {
biConsumer.accept(cache, r);
}
}
@Scope(Scopes.NONE)
static class CacheDoubleConsumer<K, V> implements DoubleConsumer {
private final ObjDoubleConsumer<Cache<K, V>> objDoubleConsumer;
protected transient Cache<K, V> cache;
@Inject
void inject(Cache<K, V> cache, ComponentRegistry componentRegistry) {
componentRegistry.wireDependencies(objDoubleConsumer);
this.cache = cache;
}
CacheDoubleConsumer(ObjDoubleConsumer<Cache<K, V>> objDoubleConsumer) {
this.objDoubleConsumer = objDoubleConsumer;
}
@Override
public void accept(double r) {
objDoubleConsumer.accept(cache, r);
}
}
@Scope(Scopes.NONE)
static class CacheLongConsumer<K, V> implements LongConsumer {
private final ObjLongConsumer<Cache<K, V>> objLongConsumer;
protected transient Cache<K, V> cache;
@Inject
void inject(Cache<K, V> cache, ComponentRegistry componentRegistry) {
componentRegistry.wireDependencies(objLongConsumer);
this.cache = cache;
}
CacheLongConsumer(ObjLongConsumer<Cache<K, V>> objLongConsumer) {
this.objLongConsumer = objLongConsumer;
}
@Override
public void accept(long r) {
objLongConsumer.accept(cache, r);
}
}
@Scope(Scopes.NONE)
static class CacheIntConsumer<K, V> implements IntConsumer {
private final ObjIntConsumer<Cache<K, V>> objIntConsumer;
protected transient Cache<K, V> cache;
@Inject
void inject(Cache<K, V> cache, ComponentRegistry componentRegistry) {
componentRegistry.wireDependencies(objIntConsumer);
this.cache = cache;
}
CacheIntConsumer(ObjIntConsumer<Cache<K, V>> objIntConsumer) {
this.objIntConsumer = objIntConsumer;
}
@Override
public void accept(int r) {
objIntConsumer.accept(cache, r);
}
}
public static class Externalizer implements AdvancedExternalizer<Object> {
enum ExternalizerId {
OBJECT(CacheObjBiConsumer.class),
DOUBLE(CacheDoubleConsumer.class),
LONG(CacheLongConsumer.class),
INT(CacheIntConsumer.class)
;
private final Class<?> marshalledClass;
ExternalizerId(Class<?> marshalledClass) {
this.marshalledClass = marshalledClass;
}
}
private static final ExternalizerId[] VALUES = ExternalizerId.values();
private final Map<Class<?>, ExternalizerId> objects = new HashMap<>();
public Externalizer() {
for (ExternalizerId id : VALUES) {
objects.put(id.marshalledClass, id);
}
}
@Override
public Set<Class<?>> getTypeClasses() {
return Util.asSet(CacheObjBiConsumer.class, CacheDoubleConsumer.class, CacheLongConsumer.class,
CacheIntConsumer.class);
}
@Override
public Integer getId() {
return Ids.CACHE_BI_CONSUMERS;
}
@Override
public void writeObject(ObjectOutput output, Object object) throws IOException {
ExternalizerId id = objects.get(object.getClass());
if (id == null) {
throw new IllegalArgumentException("Unsupported class " + object.getClass() + " was provided!");
}
output.writeByte(id.ordinal());
switch (id) {
case OBJECT:
output.writeObject(((CacheObjBiConsumer) object).biConsumer);
break;
case DOUBLE:
output.writeObject(((CacheDoubleConsumer) object).objDoubleConsumer);
break;
case LONG:
output.writeObject(((CacheLongConsumer) object).objLongConsumer);
break;
case INT:
output.writeObject(((CacheIntConsumer) object).objIntConsumer);
break;
}
}
@Override
public Object readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int number = input.readUnsignedByte();
ExternalizerId[] ids = VALUES;
if (number >= ids.length) {
throw new IllegalArgumentException("Found invalid number " + number);
}
ExternalizerId id = ids[number];
switch (id) {
case OBJECT:
return new CacheObjBiConsumer<>((BiConsumer) input.readObject());
case DOUBLE:
return new CacheDoubleConsumer<>((ObjDoubleConsumer) input.readObject());
case LONG:
return new CacheLongConsumer<>((ObjLongConsumer) input.readObject());
case INT:
return new CacheIntConsumer<>((ObjIntConsumer) input.readObject());
default:
throw new IllegalArgumentException("ExternalizerId not supported: " + id);
}
}
}
}
| 7,017
| 31.341014
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/IntermediateDoubleCacheStream.java
|
package org.infinispan.stream.impl;
import java.util.DoubleSummaryStatistics;
import java.util.OptionalDouble;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.DoubleBinaryOperator;
import java.util.function.DoubleConsumer;
import java.util.function.DoubleFunction;
import java.util.function.DoublePredicate;
import java.util.function.DoubleToIntFunction;
import java.util.function.DoubleToLongFunction;
import java.util.function.DoubleUnaryOperator;
import java.util.function.ObjDoubleConsumer;
import java.util.function.Supplier;
import java.util.stream.DoubleStream;
import org.infinispan.BaseCacheStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.stream.impl.local.LocalDoubleCacheStream;
/**
* An intermediate double cache stream used when an intermediate operation that requires both a remote and local portion
*/
public class IntermediateDoubleCacheStream implements DoubleCacheStream {
private BaseCacheStream remoteStream;
private final IntermediateType type;
private LocalDoubleCacheStream localDoubleStream;
private IntermediateCacheStreamSupplier supplier;
public IntermediateDoubleCacheStream(DistributedDoubleCacheStream remoteStream) {
this.remoteStream = remoteStream;
this.type = IntermediateType.DOUBLE;
this.supplier = new IntermediateCacheStreamSupplier(type, remoteStream);
this.localDoubleStream = new LocalDoubleCacheStream(supplier, remoteStream.parallel,
remoteStream.registry);
}
public IntermediateDoubleCacheStream(BaseCacheStream remoteStream, IntermediateType type,
LocalDoubleCacheStream localDoubleStream, IntermediateCacheStreamSupplier supplier) {
this.remoteStream = remoteStream;
this.type = type;
this.localDoubleStream = localDoubleStream;
this.supplier = supplier;
}
@Override
public DoubleCacheStream sequentialDistribution() {
remoteStream = remoteStream.sequentialDistribution();
return this;
}
@Override
public DoubleCacheStream parallelDistribution() {
remoteStream = remoteStream.parallelDistribution();
return this;
}
@Override
public DoubleCacheStream filterKeySegments(Set<Integer> segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public DoubleCacheStream filterKeySegments(IntSet segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public DoubleCacheStream filterKeys(Set<?> keys) {
remoteStream = remoteStream.filterKeys(keys);
return this;
}
@Override
public DoubleCacheStream distributedBatchSize(int batchSize) {
remoteStream = remoteStream.distributedBatchSize(batchSize);
return this;
}
@Override
public DoubleCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
throw new UnsupportedOperationException("Segment completion listener is only supported when no intermediate " +
"operation is provided (sorted, distinct, limit, skip)");
}
@Override
public DoubleCacheStream disableRehashAware() {
remoteStream = remoteStream.disableRehashAware();
return this;
}
@Override
public DoubleCacheStream timeout(long timeout, TimeUnit unit) {
remoteStream = remoteStream.timeout(timeout, unit);
return this;
}
@Override
public boolean isParallel() {
return localDoubleStream.isParallel();
}
@Override
public DoubleCacheStream sorted() {
localDoubleStream = localDoubleStream.sorted();
return this;
}
@Override
public DoubleCacheStream limit(long maxSize) {
localDoubleStream = localDoubleStream.limit(maxSize);
return this;
}
@Override
public DoubleCacheStream skip(long n) {
localDoubleStream = localDoubleStream.skip(n);
return this;
}
@Override
public DoubleCacheStream peek(DoubleConsumer action) {
localDoubleStream = localDoubleStream.peek(action);
return this;
}
@Override
public DoubleCacheStream distinct() {
localDoubleStream = localDoubleStream.distinct();
return this;
}
@Override
public DoubleCacheStream filter(DoublePredicate predicate) {
localDoubleStream = localDoubleStream.filter(predicate);
return this;
}
@Override
public DoubleCacheStream map(DoubleUnaryOperator mapper) {
localDoubleStream.map(mapper);
return this;
}
@Override
public <U> CacheStream<U> mapToObj(DoubleFunction<? extends U> mapper) {
return new IntermediateCacheStream<>(remoteStream, type, localDoubleStream.mapToObj(mapper), supplier);
}
@Override
public IntCacheStream mapToInt(DoubleToIntFunction mapper) {
return new IntermediateIntCacheStream(remoteStream, type, localDoubleStream.mapToInt(mapper), supplier);
}
@Override
public LongCacheStream mapToLong(DoubleToLongFunction mapper) {
return new IntermediateLongCacheStream(remoteStream, type, localDoubleStream.mapToLong(mapper), supplier);
}
@Override
public DoubleCacheStream flatMap(DoubleFunction<? extends DoubleStream> mapper) {
localDoubleStream.flatMap(mapper);
return this;
}
@Override
public DoubleCacheStream parallel() {
remoteStream = (BaseCacheStream) remoteStream.parallel();
localDoubleStream = (LocalDoubleCacheStream) localDoubleStream.parallel();
return this;
}
@Override
public PrimitiveIterator.OfDouble iterator() {
return localDoubleStream.iterator();
}
@Override
public Spliterator.OfDouble spliterator() {
return localDoubleStream.spliterator();
}
@Override
public DoubleCacheStream sequential() {
remoteStream = (BaseCacheStream) remoteStream.sequential();
localDoubleStream = (LocalDoubleCacheStream) localDoubleStream.sequential();
return this;
}
@Override
public DoubleCacheStream unordered() {
localDoubleStream = (LocalDoubleCacheStream) localDoubleStream.unordered();
return this;
}
@Override
public void forEach(DoubleConsumer action) {
localDoubleStream.forEach(action);
}
@Override
public <K, V> void forEach(ObjDoubleConsumer<Cache<K, V>> action) {
localDoubleStream.forEach(action);
}
@Override
public void forEachOrdered(DoubleConsumer action) {
localDoubleStream.forEachOrdered(action);
}
@Override
public double[] toArray() {
return localDoubleStream.toArray();
}
@Override
public double reduce(double identity, DoubleBinaryOperator op) {
return localDoubleStream.reduce(identity, op);
}
@Override
public OptionalDouble reduce(DoubleBinaryOperator op) {
return localDoubleStream.reduce(op);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjDoubleConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return localDoubleStream.collect(supplier, accumulator, combiner);
}
@Override
public double sum() {
return localDoubleStream.sum();
}
@Override
public OptionalDouble min() {
return localDoubleStream.min();
}
@Override
public OptionalDouble max() {
return localDoubleStream.max();
}
@Override
public long count() {
return localDoubleStream.count();
}
@Override
public OptionalDouble average() {
return localDoubleStream.average();
}
@Override
public DoubleSummaryStatistics summaryStatistics() {
return localDoubleStream.summaryStatistics();
}
@Override
public boolean anyMatch(DoublePredicate predicate) {
return localDoubleStream.anyMatch(predicate);
}
@Override
public boolean allMatch(DoublePredicate predicate) {
return localDoubleStream.allMatch(predicate);
}
@Override
public boolean noneMatch(DoublePredicate predicate) {
return localDoubleStream.noneMatch(predicate);
}
@Override
public OptionalDouble findFirst() {
return localDoubleStream.findFirst();
}
@Override
public OptionalDouble findAny() {
return localDoubleStream.findAny();
}
@Override
public CacheStream<Double> boxed() {
return mapToObj(Double::valueOf);
}
@Override
public DoubleCacheStream onClose(Runnable closeHandler) {
remoteStream = (BaseCacheStream) remoteStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
localDoubleStream.close();
remoteStream.close();
}
}
| 8,872
| 27.257962
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/CacheStreamIntermediateReducer.java
|
package org.infinispan.stream.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.commands.functional.functions.InjectableComponent;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.marshall.core.Ids;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* Reducer implementation for Distributed Publisher that converts between CacheStream operations to an
* appropriate Reducer
* @param <R>
*/
public final class CacheStreamIntermediateReducer<R> implements Function<Publisher<Object>, CompletionStage<R>>, InjectableComponent {
private final Queue<IntermediateOperation> intOps;
private final Function<? super Publisher<Object>, ? extends CompletionStage<R>> transformer;
CacheStreamIntermediateReducer(Queue<IntermediateOperation> intOps, Function<? super Publisher<Object>, ? extends CompletionStage<R>> transformer) {
this.intOps = intOps;
this.transformer = transformer;
}
@Override
public CompletionStage<R> apply(Publisher<Object> objectPublisher) {
Flowable<Object> innerPublisher = Flowable.fromPublisher(objectPublisher);
for (IntermediateOperation intOp : intOps) {
innerPublisher = intOp.mapFlowable(innerPublisher);
}
return transformer.apply(innerPublisher);
}
@Override
public void inject(ComponentRegistry registry) {
for (IntermediateOperation intOp : intOps) {
intOp.handleInjection(registry);
}
}
public static final class ReducerExternalizer implements AdvancedExternalizer<CacheStreamIntermediateReducer> {
@Override
public void writeObject(ObjectOutput output, CacheStreamIntermediateReducer object) throws IOException {
output.writeObject(object.intOps);
output.writeObject(object.transformer);
}
@Override
public CacheStreamIntermediateReducer readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new CacheStreamIntermediateReducer((Queue) input.readObject(), (Function) input.readObject());
}
@Override
public Set<Class<? extends CacheStreamIntermediateReducer>> getTypeClasses() {
return Collections.singleton(CacheStreamIntermediateReducer.class);
}
@Override
public Integer getId() {
return Ids.CACHE_STREAM_INTERMEDIATE_REDUCER;
}
}
}
| 2,726
| 35.851351
| 151
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/IntermediateLongCacheStream.java
|
package org.infinispan.stream.impl;
import java.util.LongSummaryStatistics;
import java.util.OptionalDouble;
import java.util.OptionalLong;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.LongBinaryOperator;
import java.util.function.LongConsumer;
import java.util.function.LongFunction;
import java.util.function.LongPredicate;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
import java.util.function.LongUnaryOperator;
import java.util.function.ObjLongConsumer;
import java.util.function.Supplier;
import java.util.stream.LongStream;
import org.infinispan.BaseCacheStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.stream.impl.local.LocalLongCacheStream;
/**
* An intermediate long cache stream used when an intermediate operation that requires both a remote and local portion
*/
public class IntermediateLongCacheStream implements LongCacheStream {
private BaseCacheStream remoteStream;
private final IntermediateType type;
private LocalLongCacheStream localLongStream;
private IntermediateCacheStreamSupplier supplier;
public IntermediateLongCacheStream(DistributedLongCacheStream remoteStream) {
this.remoteStream = remoteStream;
this.type = IntermediateType.LONG;
this.supplier = new IntermediateCacheStreamSupplier(type, remoteStream);
this.localLongStream = new LocalLongCacheStream(supplier, remoteStream.parallel,
remoteStream.registry);
}
public IntermediateLongCacheStream(BaseCacheStream remoteStream, IntermediateType type,
LocalLongCacheStream localLongStream, IntermediateCacheStreamSupplier supplier) {
this.remoteStream = remoteStream;
this.type = type;
this.localLongStream = localLongStream;
this.supplier = supplier;
}
@Override
public LongCacheStream sequentialDistribution() {
remoteStream = remoteStream.sequentialDistribution();
return this;
}
@Override
public LongCacheStream parallelDistribution() {
remoteStream = remoteStream.parallelDistribution();
return this;
}
@Override
public LongCacheStream filterKeySegments(Set<Integer> segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public LongCacheStream filterKeySegments(IntSet segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public LongCacheStream filterKeys(Set<?> keys) {
remoteStream = remoteStream.filterKeys(keys);
return this;
}
@Override
public LongCacheStream distributedBatchSize(int batchSize) {
remoteStream = remoteStream.distributedBatchSize(batchSize);
return this;
}
@Override
public LongCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
throw new UnsupportedOperationException("Segment completion listener is only supported when no intermediate " +
"operation is provided (sorted, distinct, limit, skip)");
}
@Override
public LongCacheStream disableRehashAware() {
remoteStream = remoteStream.disableRehashAware();
return this;
}
@Override
public LongCacheStream timeout(long timeout, TimeUnit unit) {
remoteStream = remoteStream.timeout(timeout, unit);
return this;
}
@Override
public boolean isParallel() {
return localLongStream.isParallel();
}
@Override
public LongCacheStream sorted() {
localLongStream = localLongStream.sorted();
return this;
}
@Override
public LongCacheStream limit(long maxSize) {
localLongStream = localLongStream.limit(maxSize);
return this;
}
@Override
public LongCacheStream skip(long n) {
localLongStream = localLongStream.skip(n);
return this;
}
@Override
public LongCacheStream peek(LongConsumer action) {
localLongStream = localLongStream.peek(action);
return this;
}
@Override
public LongCacheStream distinct() {
localLongStream = localLongStream.distinct();
return this;
}
@Override
public LongCacheStream filter(LongPredicate predicate) {
localLongStream = localLongStream.filter(predicate);
return this;
}
@Override
public LongCacheStream map(LongUnaryOperator mapper) {
localLongStream.map(mapper);
return this;
}
@Override
public <U> CacheStream<U> mapToObj(LongFunction<? extends U> mapper) {
return new IntermediateCacheStream<>(remoteStream, type, localLongStream.mapToObj(mapper), supplier);
}
@Override
public IntCacheStream mapToInt(LongToIntFunction mapper) {
return new IntermediateIntCacheStream(remoteStream, type, localLongStream.mapToInt(mapper), supplier);
}
@Override
public DoubleCacheStream mapToDouble(LongToDoubleFunction mapper) {
return new IntermediateDoubleCacheStream(remoteStream, type, localLongStream.mapToDouble(mapper), supplier);
}
@Override
public LongCacheStream flatMap(LongFunction<? extends LongStream> mapper) {
localLongStream.flatMap(mapper);
return this;
}
@Override
public LongCacheStream parallel() {
remoteStream = (BaseCacheStream) remoteStream.parallel();
localLongStream = (LocalLongCacheStream) localLongStream.parallel();
return this;
}
@Override
public PrimitiveIterator.OfLong iterator() {
return localLongStream.iterator();
}
@Override
public Spliterator.OfLong spliterator() {
return localLongStream.spliterator();
}
@Override
public LongCacheStream sequential() {
remoteStream = (BaseCacheStream) remoteStream.sequential();
localLongStream = (LocalLongCacheStream) localLongStream.sequential();
return this;
}
@Override
public LongCacheStream unordered() {
localLongStream = (LocalLongCacheStream) localLongStream.unordered();
return this;
}
@Override
public void forEach(LongConsumer action) {
localLongStream.forEach(action);
}
@Override
public <K, V> void forEach(ObjLongConsumer<Cache<K, V>> action) {
localLongStream.forEach(action);
}
@Override
public void forEachOrdered(LongConsumer action) {
localLongStream.forEachOrdered(action);
}
@Override
public long[] toArray() {
return localLongStream.toArray();
}
@Override
public long reduce(long identity, LongBinaryOperator op) {
return localLongStream.reduce(identity, op);
}
@Override
public OptionalLong reduce(LongBinaryOperator op) {
return localLongStream.reduce(op);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjLongConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return localLongStream.collect(supplier, accumulator, combiner);
}
@Override
public long sum() {
return localLongStream.sum();
}
@Override
public OptionalLong min() {
return localLongStream.min();
}
@Override
public OptionalLong max() {
return localLongStream.max();
}
@Override
public long count() {
return localLongStream.count();
}
@Override
public OptionalDouble average() {
return localLongStream.average();
}
@Override
public LongSummaryStatistics summaryStatistics() {
return localLongStream.summaryStatistics();
}
@Override
public boolean anyMatch(LongPredicate predicate) {
return localLongStream.anyMatch(predicate);
}
@Override
public boolean allMatch(LongPredicate predicate) {
return localLongStream.allMatch(predicate);
}
@Override
public boolean noneMatch(LongPredicate predicate) {
return localLongStream.noneMatch(predicate);
}
@Override
public OptionalLong findFirst() {
return localLongStream.findFirst();
}
@Override
public OptionalLong findAny() {
return localLongStream.findAny();
}
@Override
public CacheStream<Long> boxed() {
return mapToObj(Long::valueOf);
}
@Override
public DoubleCacheStream asDoubleStream() {
return mapToDouble(l -> (double) l);
}
@Override
public LongCacheStream onClose(Runnable closeHandler) {
remoteStream = (BaseCacheStream) remoteStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
localLongStream.close();
remoteStream.close();
}
}
| 8,772
| 26.415625
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/DistributedLongCacheStream.java
|
package org.infinispan.stream.impl;
import java.lang.invoke.MethodHandles;
import java.util.Iterator;
import java.util.LongSummaryStatistics;
import java.util.OptionalDouble;
import java.util.OptionalLong;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.LongBinaryOperator;
import java.util.function.LongConsumer;
import java.util.function.LongFunction;
import java.util.function.LongPredicate;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
import java.util.function.LongUnaryOperator;
import java.util.function.ObjLongConsumer;
import java.util.function.Supplier;
import java.util.function.ToIntFunction;
import java.util.stream.LongStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.reactive.publisher.PublisherReducers;
import org.infinispan.stream.impl.intops.primitive.l.AsDoubleLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.BoxedLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.DistinctLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.FilterLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.FlatMapLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.LimitLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToDoubleLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToIntLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToObjLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.PeekLongOperation;
import org.infinispan.util.function.SerializableBiConsumer;
import org.infinispan.util.function.SerializableBiFunction;
import org.infinispan.util.function.SerializableBinaryOperator;
import org.infinispan.util.function.SerializableCallable;
import org.infinispan.util.function.SerializableComparator;
import org.infinispan.util.function.SerializableLongConsumer;
import org.infinispan.util.function.SerializableLongFunction;
import org.infinispan.util.function.SerializableLongPredicate;
import org.infinispan.util.function.SerializableLongToDoubleFunction;
import org.infinispan.util.function.SerializableLongToIntFunction;
import org.infinispan.util.function.SerializableLongUnaryOperator;
import org.infinispan.util.function.SerializableObjLongConsumer;
import org.infinispan.util.function.SerializablePredicate;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
/**
* Implementation of {@link LongStream} that utilizes a lazily evaluated distributed back end execution. Note this
* class is only able to be created using {@link org.infinispan.CacheStream#mapToInt(ToIntFunction)} or similar
* methods from the {@link org.infinispan.CacheStream} interface.
* @param <Original> original stream type
*/
public class DistributedLongCacheStream<Original> extends AbstractCacheStream<Original, Long, LongStream, LongCacheStream>
implements LongCacheStream {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
/**
* This constructor is to be used only when a user calls a map or flat map method changing to an IntStream
* from a CacheStream, Stream, DoubleStream, IntStream etc.
* @param other other instance of {@link AbstractCacheStream} to copy details from
*/
protected DistributedLongCacheStream(AbstractCacheStream other) {
super(other);
}
@Override
protected Log getLog() {
return log;
}
@Override
protected LongCacheStream unwrap() {
return this;
}
@Override
public LongCacheStream filter(LongPredicate predicate) {
return addIntermediateOperation(new FilterLongOperation<>(predicate));
}
@Override
public LongCacheStream filter(SerializableLongPredicate predicate) {
return filter((LongPredicate) predicate);
}
@Override
public LongCacheStream map(LongUnaryOperator mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
return addIntermediateOperation(new MapLongOperation(mapper));
}
@Override
public LongCacheStream map(SerializableLongUnaryOperator mapper) {
return map((LongUnaryOperator) mapper);
}
@Override
public <U> CacheStream<U> mapToObj(LongFunction<? extends U> mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToObjLongOperation<>(mapper));
return cacheStream();
}
@Override
public <U> CacheStream<U> mapToObj(SerializableLongFunction<? extends U> mapper) {
return mapToObj((LongFunction<? extends U>) mapper);
}
@Override
public IntCacheStream mapToInt(LongToIntFunction mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToIntLongOperation(mapper));
return intCacheStream();
}
@Override
public IntCacheStream mapToInt(SerializableLongToIntFunction mapper) {
return mapToInt((LongToIntFunction) mapper);
}
@Override
public DoubleCacheStream mapToDouble(LongToDoubleFunction mapper) {
// Don't need to update iterator operation as we already are guaranteed to be at least MAP
addIntermediateOperationMap(new MapToDoubleLongOperation(mapper));
return doubleCacheStream();
}
@Override
public DoubleCacheStream mapToDouble(SerializableLongToDoubleFunction mapper) {
return mapToDouble((LongToDoubleFunction) mapper);
}
@Override
public LongCacheStream flatMap(LongFunction<? extends LongStream> mapper) {
iteratorOperation = IteratorOperation.FLAT_MAP;
return addIntermediateOperation(new FlatMapLongOperation(mapper));
}
@Override
public LongCacheStream flatMap(SerializableLongFunction<? extends LongStream> mapper) {
return flatMap((LongFunction<? extends LongStream>) mapper);
}
@Override
public LongCacheStream distinct() {
// Distinct is applied remotely as well
addIntermediateOperation(DistinctLongOperation.getInstance());
return new IntermediateLongCacheStream(this).distinct();
}
@Override
public LongCacheStream sorted() {
return new IntermediateLongCacheStream(this).sorted();
}
@Override
public LongCacheStream peek(LongConsumer action) {
return addIntermediateOperation(new PeekLongOperation(action));
}
@Override
public LongCacheStream peek(SerializableLongConsumer action) {
return peek((LongConsumer) action);
}
@Override
public DoubleCacheStream asDoubleStream() {
addIntermediateOperationMap(AsDoubleLongOperation.getInstance());
return doubleCacheStream();
}
@Override
public CacheStream<Long> boxed() {
addIntermediateOperationMap(BoxedLongOperation.getInstance());
return cacheStream();
}
@Override
public LongCacheStream limit(long maxSize) {
// Limit is applied remotely as well
addIntermediateOperation(new LimitLongOperation(maxSize));
return new IntermediateLongCacheStream(this).limit(maxSize);
}
@Override
public LongCacheStream skip(long n) {
return new IntermediateLongCacheStream(this).skip(n);
}
// Rest are terminal operators
@Override
public void forEach(LongConsumer action) {
peek(action)
.iterator()
.forEachRemaining((long ignore) -> { });
}
@Override
public void forEach(SerializableLongConsumer action) {
forEach((LongConsumer) action);
}
@Override
public <K, V> void forEach(ObjLongConsumer<Cache<K, V>> action) {
peek(CacheBiConsumers.longConsumer(action))
.iterator()
.forEachRemaining((long ignore) -> { });
}
@Override
public <K, V> void forEach(SerializableObjLongConsumer<Cache<K, V>> action) {
forEach((ObjLongConsumer<Cache<K, V>>) action);
}
@Override
public void forEachOrdered(LongConsumer action) {
// Our stream is not sorted so just call forEach
forEach(action);
}
@Override
public long[] toArray() {
Object[] values = performPublisherOperation(PublisherReducers.toArrayReducer(), PublisherReducers.toArrayFinalizer());
long[] results = new long[values.length];
int i = 0;
for (Object obj : values) {
results[i++] = (Long) obj;
}
return results;
}
@Override
public long reduce(long identity, LongBinaryOperator op) {
Function<Publisher<Long>, CompletionStage<Long>> reduce = PublisherReducers.reduce(identity,
(SerializableBiFunction<Long, Long, Long>) op::applyAsLong);
return performPublisherOperation(reduce, reduce);
}
@Override
public OptionalLong reduce(LongBinaryOperator op) {
Function<Publisher<Long>, CompletionStage<Long>> reduce = PublisherReducers.reduce(
(SerializableBinaryOperator<Long>) op::applyAsLong);
Long result = performPublisherOperation(reduce, reduce);
if (result == null) {
return OptionalLong.empty();
}
return OptionalLong.of(result);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjLongConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return performPublisherOperation(PublisherReducers.collect(supplier,
(SerializableBiConsumer<R, Long>) accumulator::accept),
PublisherReducers.accumulate(combiner));
}
@Override
public long sum() {
Function<Publisher<Long>, CompletionStage<Long>> addFunction = PublisherReducers.add();
return performPublisherOperation(addFunction, addFunction);
}
@Override
public OptionalLong min() {
SerializableComparator<Long> serializableComparator = Long::compareTo;
Function<Publisher<Long>, CompletionStage<Long>> minFunction = PublisherReducers.min(serializableComparator);
Long min = performPublisherOperation(minFunction, minFunction);
if (min == null) {
return OptionalLong.empty();
}
return OptionalLong.of(min);
}
@Override
public OptionalLong max() {
SerializableComparator<Long> serializableComparator = Long::compareTo;
Function<Publisher<Long>, CompletionStage<Long>> maxFunction = PublisherReducers.max(serializableComparator);
Long max = performPublisherOperation(maxFunction, maxFunction);
if (max == null) {
return OptionalLong.empty();
}
return OptionalLong.of(max);
}
@Override
public OptionalDouble average() {
LongSummaryStatistics lss = summaryStatistics();
if (lss.getCount() == 0) {
return OptionalDouble.empty();
}
return OptionalDouble.of(lss.getAverage());
}
@Override
public LongSummaryStatistics summaryStatistics() {
return performPublisherOperation(PublisherReducers.reduceWith(
(SerializableCallable<LongSummaryStatistics>) LongSummaryStatistics::new,
(SerializableBiFunction<LongSummaryStatistics, Long, LongSummaryStatistics>) (lss, longValue) -> {
lss.accept(longValue);
return lss;
}), PublisherReducers.reduce(
(SerializableBinaryOperator<LongSummaryStatistics>) (first, second) -> {
first.combine(second);
return first;
}));
}
@Override
public boolean anyMatch(LongPredicate predicate) {
return performPublisherOperation(PublisherReducers.anyMatch((SerializablePredicate<Long>) predicate::test),
PublisherReducers.or());
}
@Override
public boolean allMatch(LongPredicate predicate) {
return performPublisherOperation(PublisherReducers.allMatch((SerializablePredicate<Long>) predicate::test),
PublisherReducers.and());
}
@Override
public boolean noneMatch(LongPredicate predicate) {
return performPublisherOperation(PublisherReducers.noneMatch((SerializablePredicate<Long>) predicate::test),
PublisherReducers.and());
}
@Override
public OptionalLong findFirst() {
// Our stream is not sorted so just call findAny
return findAny();
}
@Override
public OptionalLong findAny() {
Function<Publisher<Long>, CompletionStage<Long>> function = PublisherReducers.findFirst();
Long value = performPublisherOperation(function, function);
if (value == null) {
return OptionalLong.empty();
}
return OptionalLong.of(value);
}
@Override
public PrimitiveIterator.OfLong iterator() {
return remoteIterator();
}
PrimitiveIterator.OfLong remoteIterator() {
// TODO: need to add in way to not box these later
// Since this is a remote iterator we have to add it to the remote intermediate operations queue
intermediateOperations.add(BoxedLongOperation.getInstance());
DistributedCacheStream<Original, Long> stream = new DistributedCacheStream<>(this);
Iterator<Long> iterator = stream.iterator();
return new LongIteratorToPrimitiveLong(iterator);
}
static class LongIteratorToPrimitiveLong implements PrimitiveIterator.OfLong {
private final Iterator<Long> iterator;
LongIteratorToPrimitiveLong(Iterator<Long> iterator) {
this.iterator = iterator;
}
@Override
public long nextLong() {
return iterator.next();
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
}
@Override
public Spliterator.OfLong spliterator() {
return Spliterators.spliteratorUnknownSize(iterator(), 0);
}
@Override
public long count() {
return performPublisherOperation(PublisherReducers.count(), PublisherReducers.add());
}
// These are the custom added methods for cache streams
@Override
public LongCacheStream sequentialDistribution() {
parallelDistribution = false;
return this;
}
@Override
public LongCacheStream parallelDistribution() {
parallelDistribution = true;
return this;
}
@Override
public LongCacheStream filterKeySegments(Set<Integer> segments) {
return filterKeySegments(IntSets.from(segments));
}
@Override
public LongCacheStream filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public LongCacheStream filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public LongCacheStream distributedBatchSize(int batchSize) {
distributedBatchSize = batchSize;
return this;
}
@Override
public LongCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
if (segmentCompletionListener == null) {
segmentCompletionListener = listener;
} else {
segmentCompletionListener = composeWithExceptions(segmentCompletionListener, listener);
}
return this;
}
@Override
public LongCacheStream disableRehashAware() {
rehashAware = false;
return this;
}
@Override
public LongCacheStream timeout(long timeout, TimeUnit unit) {
if (timeout <= 0) {
throw new IllegalArgumentException("Timeout must be greater than 0");
}
this.timeout = timeout;
this.timeoutUnit = unit;
return this;
}
protected <R> DistributedCacheStream<Original, R> cacheStream() {
return new DistributedCacheStream<>(this);
}
protected DistributedDoubleCacheStream<Original> doubleCacheStream() {
return new DistributedDoubleCacheStream<>(this);
}
protected DistributedIntCacheStream<Original> intCacheStream() {
return new DistributedIntCacheStream<>(this);
}
}
| 16,332
| 33.098121
| 124
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/TerminalOperation.java
|
package org.infinispan.stream.impl;
import java.util.function.Supplier;
import java.util.stream.Stream;
import org.infinispan.factories.ComponentRegistry;
/**
* Interface describing an operation that is a terminal one that doesn't track keys.
* @param <R> the returing result
*/
public interface TerminalOperation<Original, R> extends SegmentAwareOperation {
/**
* Actually runs the terminal operation returning the result from the operation
* @return the value retrieved for the operation
*/
R performOperation();
/**
* Sets the local supplier for the stream. This is to be invoked on a remote node after the object is unmarshalled
* to set the supplier to use
* @param supplier the supplier that will return the stream that the operations are performed on
*/
void setSupplier(Supplier<Stream<Original>> supplier);
/**
* Handles injection of components for various intermediate and this operation.
* @param registry component registry to use
*/
void handleInjection(ComponentRegistry registry);
}
| 1,065
| 32.3125
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/IntermediateIntCacheStream.java
|
package org.infinispan.stream.impl;
import java.util.IntSummaryStatistics;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.IntBinaryOperator;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.IntPredicate;
import java.util.function.IntToDoubleFunction;
import java.util.function.IntToLongFunction;
import java.util.function.IntUnaryOperator;
import java.util.function.ObjIntConsumer;
import java.util.function.Supplier;
import java.util.stream.IntStream;
import org.infinispan.BaseCacheStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.stream.impl.local.LocalIntCacheStream;
/**
* An intermediate int cache stream used when an intermediate operation that requires both a remote and local portion
*/
public class IntermediateIntCacheStream implements IntCacheStream {
private BaseCacheStream remoteStream;
private final IntermediateType type;
private LocalIntCacheStream localIntStream;
private IntermediateCacheStreamSupplier supplier;
public IntermediateIntCacheStream(DistributedIntCacheStream remoteStream) {
this.remoteStream = remoteStream;
this.type = IntermediateType.INT;
this.supplier = new IntermediateCacheStreamSupplier(type, remoteStream);
this.localIntStream = new LocalIntCacheStream(supplier, remoteStream.parallel,
remoteStream.registry);
}
public IntermediateIntCacheStream(BaseCacheStream remoteStream, IntermediateType type,
LocalIntCacheStream localIntStream, IntermediateCacheStreamSupplier supplier) {
this.remoteStream = remoteStream;
this.type = type;
this.localIntStream = localIntStream;
this.supplier = supplier;
}
@Override
public IntCacheStream sequentialDistribution() {
remoteStream = remoteStream.sequentialDistribution();
return this;
}
@Override
public IntCacheStream parallelDistribution() {
remoteStream = remoteStream.parallelDistribution();
return this;
}
@Override
public IntCacheStream filterKeySegments(Set<Integer> segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public IntCacheStream filterKeySegments(IntSet segments) {
remoteStream = remoteStream.filterKeySegments(segments);
return this;
}
@Override
public IntCacheStream filterKeys(Set<?> keys) {
remoteStream = remoteStream.filterKeys(keys);
return this;
}
@Override
public IntCacheStream distributedBatchSize(int batchSize) {
remoteStream = remoteStream.distributedBatchSize(batchSize);
return this;
}
@Override
public IntCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
throw new UnsupportedOperationException("Segment completion listener is only supported when no intermediate " +
"operation is provided (sorted, distinct, limit, skip)");
}
@Override
public IntCacheStream disableRehashAware() {
remoteStream = remoteStream.disableRehashAware();
return this;
}
@Override
public IntCacheStream timeout(long timeout, TimeUnit unit) {
remoteStream = remoteStream.timeout(timeout, unit);
return this;
}
@Override
public boolean isParallel() {
return localIntStream.isParallel();
}
@Override
public IntCacheStream sorted() {
localIntStream = localIntStream.sorted();
return this;
}
@Override
public IntCacheStream limit(long maxSize) {
localIntStream = localIntStream.limit(maxSize);
return this;
}
@Override
public IntCacheStream skip(long n) {
localIntStream = localIntStream.skip(n);
return this;
}
@Override
public IntCacheStream peek(IntConsumer action) {
localIntStream = localIntStream.peek(action);
return this;
}
@Override
public IntCacheStream distinct() {
localIntStream = localIntStream.distinct();
return this;
}
@Override
public IntCacheStream filter(IntPredicate predicate) {
localIntStream = localIntStream.filter(predicate);
return this;
}
@Override
public IntCacheStream map(IntUnaryOperator mapper) {
localIntStream.map(mapper);
return this;
}
@Override
public <U> CacheStream<U> mapToObj(IntFunction<? extends U> mapper) {
return new IntermediateCacheStream<>(remoteStream, type, localIntStream.mapToObj(mapper), supplier);
}
@Override
public LongCacheStream mapToLong(IntToLongFunction mapper) {
return new IntermediateLongCacheStream(remoteStream, type, localIntStream.mapToLong(mapper), supplier);
}
@Override
public DoubleCacheStream mapToDouble(IntToDoubleFunction mapper) {
return new IntermediateDoubleCacheStream(remoteStream, type, localIntStream.mapToDouble(mapper), supplier);
}
@Override
public IntCacheStream flatMap(IntFunction<? extends IntStream> mapper) {
localIntStream.flatMap(mapper);
return this;
}
@Override
public IntCacheStream parallel() {
remoteStream = (BaseCacheStream) remoteStream.parallel();
localIntStream = (LocalIntCacheStream) localIntStream.parallel();
return this;
}
@Override
public PrimitiveIterator.OfInt iterator() {
return localIntStream.iterator();
}
@Override
public Spliterator.OfInt spliterator() {
return localIntStream.spliterator();
}
@Override
public IntCacheStream sequential() {
remoteStream = (BaseCacheStream) remoteStream.sequential();
localIntStream = (LocalIntCacheStream) localIntStream.sequential();
return this;
}
@Override
public IntCacheStream unordered() {
localIntStream = (LocalIntCacheStream) localIntStream.unordered();
return this;
}
@Override
public void forEach(IntConsumer action) {
localIntStream.forEach(action);
}
@Override
public <K, V> void forEach(ObjIntConsumer<Cache<K, V>> action) {
localIntStream.forEach(action);
}
@Override
public void forEachOrdered(IntConsumer action) {
localIntStream.forEachOrdered(action);
}
@Override
public int[] toArray() {
return localIntStream.toArray();
}
@Override
public int reduce(int identity, IntBinaryOperator op) {
return localIntStream.reduce(identity, op);
}
@Override
public OptionalInt reduce(IntBinaryOperator op) {
return localIntStream.reduce(op);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjIntConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return localIntStream.collect(supplier, accumulator, combiner);
}
@Override
public int sum() {
return localIntStream.sum();
}
@Override
public OptionalInt min() {
return localIntStream.min();
}
@Override
public OptionalInt max() {
return localIntStream.max();
}
@Override
public long count() {
return localIntStream.count();
}
@Override
public OptionalDouble average() {
return localIntStream.average();
}
@Override
public IntSummaryStatistics summaryStatistics() {
return localIntStream.summaryStatistics();
}
@Override
public boolean anyMatch(IntPredicate predicate) {
return localIntStream.anyMatch(predicate);
}
@Override
public boolean allMatch(IntPredicate predicate) {
return localIntStream.allMatch(predicate);
}
@Override
public boolean noneMatch(IntPredicate predicate) {
return localIntStream.noneMatch(predicate);
}
@Override
public OptionalInt findFirst() {
return localIntStream.findFirst();
}
@Override
public OptionalInt findAny() {
return localIntStream.findAny();
}
@Override
public CacheStream<Integer> boxed() {
return mapToObj(Integer::valueOf);
}
@Override
public DoubleCacheStream asDoubleStream() {
return mapToDouble(l -> (double) l);
}
@Override
public LongCacheStream asLongStream() {
return mapToLong(l -> (long) l);
}
@Override
public IntCacheStream onClose(Runnable closeHandler) {
remoteStream = (BaseCacheStream) remoteStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
localIntStream.close();
remoteStream.close();
}
}
| 8,761
| 25.877301
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/IntermediateCacheStreamSupplier.java
|
package org.infinispan.stream.impl;
import java.util.Set;
import java.util.stream.BaseStream;
import org.infinispan.BaseCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.stream.impl.local.AbstractLocalCacheStream;
/**
* Stream supplier that is used when a local intermediate operation is invoked, requiring a combined remote and local
* operation stream.
*/
class IntermediateCacheStreamSupplier<T, S extends BaseStream<T, S>> implements AbstractLocalCacheStream.StreamSupplier<T, S> {
final IntermediateType type;
final BaseCacheStream streamable;
IntermediateCacheStreamSupplier(IntermediateType type, BaseCacheStream streamable) {
this.type = type;
this.streamable = streamable;
}
@Override
public S buildStream(IntSet segmentsToFilter, Set<?> keysToFilter, boolean parallel) {
return (S) type.handleStream(streamable);
}
}
| 903
| 30.172414
| 127
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/DistributedCacheStream.java
|
package org.infinispan.stream.impl;
import java.lang.invoke.MethodHandles;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.function.ToDoubleFunction;
import java.util.function.ToIntFunction;
import java.util.function.ToLongFunction;
import java.util.stream.Collector;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.reactive.RxJavaInterop;
import org.infinispan.commons.util.AbstractIterator;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.Closeables;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.marshall.core.MarshallableFunctions;
import org.infinispan.reactive.publisher.PublisherReducers;
import org.infinispan.reactive.publisher.impl.ClusterPublisherManager;
import org.infinispan.reactive.publisher.impl.DeliveryGuarantee;
import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier;
import org.infinispan.remoting.transport.Address;
import org.infinispan.stream.impl.intops.object.DistinctOperation;
import org.infinispan.stream.impl.intops.object.FilterOperation;
import org.infinispan.stream.impl.intops.object.FlatMapOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToDoubleOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToIntOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToLongOperation;
import org.infinispan.stream.impl.intops.object.LimitOperation;
import org.infinispan.stream.impl.intops.object.MapOperation;
import org.infinispan.stream.impl.intops.object.MapToDoubleOperation;
import org.infinispan.stream.impl.intops.object.MapToIntOperation;
import org.infinispan.stream.impl.intops.object.MapToLongOperation;
import org.infinispan.stream.impl.intops.object.PeekOperation;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* Implementation of {@link CacheStream} that provides support for lazily distributing stream methods to appropriate
* nodes
* @param <Original> the original type of the underlying stream - normally CacheEntry or Object
* @param <R> The type of the stream
*/
public class DistributedCacheStream<Original, R> extends AbstractCacheStream<Original, R, Stream<R>, CacheStream<R>>
implements CacheStream<R> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final int maxSegment;
/**
* Standard constructor requiring all pertinent information to properly utilize a distributed cache stream
* @param localAddress the local address for this node
* @param parallel whether or not this stream is parallel
* @param ctx the invocation context when this stream is created
* @param explicitFlags whether or not a cache loader should be utilized for these operations
* @param distributedBatchSize default size of distributed batches
* @param executor executor to be used for certain operations that require async processing (ie. iterator)
* @param registry component registry to wire objects with
* @param toKeyFunction function that can be applied to an object in the stream to convert it to a key or null if it
* is a key already. This variable is used to tell also if the underlying stream contains
* entries or not by this value being non null
* @param clusterPublisherManager publisher manager
*/
public DistributedCacheStream(Address localAddress, boolean parallel, InvocationContext ctx,
long explicitFlags, int distributedBatchSize, Executor executor,
ComponentRegistry registry, Function<? super Original, ?> toKeyFunction,
ClusterPublisherManager<?, ?> clusterPublisherManager) {
super(localAddress, parallel, ctx, explicitFlags, distributedBatchSize, executor, registry, toKeyFunction,
clusterPublisherManager);
Configuration configuration = registry.getComponent(Configuration.class);
maxSegment = configuration.clustering().hash().numSegments();
}
/**
* This constructor is to be used only when a user calls a map or flat map method changing back to a regular
* Stream from an IntStream, DoubleStream etc.
* @param other other instance of {@link AbstractCacheStream} to copy details from
*/
protected DistributedCacheStream(AbstractCacheStream other) {
super(other);
Configuration configuration = registry.getComponent(Configuration.class);
maxSegment = configuration.clustering().hash().numSegments();
}
@Override
protected Log getLog() {
return log;
}
@Override
protected CacheStream<R> unwrap() {
return this;
}
// Intermediate operations that are stored for lazy evalulation
@Override
public CacheStream<R> filter(Predicate<? super R> predicate) {
return addIntermediateOperation(new FilterOperation<>(predicate));
}
@Override
public <R1> CacheStream<R1> map(Function<? super R, ? extends R1> mapper) {
if (iteratorOperation != IteratorOperation.FLAT_MAP) {
iteratorOperation = IteratorOperation.MAP;
}
addIntermediateOperationMap(new MapOperation<>(mapper));
return (CacheStream<R1>) this;
}
@Override
public IntCacheStream mapToInt(ToIntFunction<? super R> mapper) {
if (iteratorOperation != IteratorOperation.FLAT_MAP) {
iteratorOperation = IteratorOperation.MAP;
}
addIntermediateOperationMap(new MapToIntOperation<>(mapper));
return intCacheStream();
}
@Override
public LongCacheStream mapToLong(ToLongFunction<? super R> mapper) {
if (iteratorOperation != IteratorOperation.FLAT_MAP) {
iteratorOperation = IteratorOperation.MAP;
}
addIntermediateOperationMap(new MapToLongOperation<>(mapper));
return longCacheStream();
}
@Override
public DoubleCacheStream mapToDouble(ToDoubleFunction<? super R> mapper) {
if (iteratorOperation != IteratorOperation.FLAT_MAP) {
iteratorOperation = IteratorOperation.MAP;
}
addIntermediateOperationMap(new MapToDoubleOperation<>(mapper));
return doubleCacheStream();
}
@Override
public <R1> CacheStream<R1> flatMap(Function<? super R, ? extends Stream<? extends R1>> mapper) {
iteratorOperation = IteratorOperation.FLAT_MAP;
addIntermediateOperationMap(new FlatMapOperation<R, R1>(mapper));
return (CacheStream<R1>) this;
}
@Override
public IntCacheStream flatMapToInt(Function<? super R, ? extends IntStream> mapper) {
iteratorOperation = IteratorOperation.FLAT_MAP;
addIntermediateOperationMap(new FlatMapToIntOperation<>(mapper));
return intCacheStream();
}
@Override
public LongCacheStream flatMapToLong(Function<? super R, ? extends LongStream> mapper) {
iteratorOperation = IteratorOperation.FLAT_MAP;
addIntermediateOperationMap(new FlatMapToLongOperation<>(mapper));
return longCacheStream();
}
@Override
public DoubleCacheStream flatMapToDouble(Function<? super R, ? extends DoubleStream> mapper) {
iteratorOperation = IteratorOperation.FLAT_MAP;
addIntermediateOperationMap(new FlatMapToDoubleOperation<>(mapper));
return doubleCacheStream();
}
@Override
public CacheStream<R> distinct() {
// Distinct is applied remotely as well
addIntermediateOperation(DistinctOperation.getInstance());
return new IntermediateCacheStream<>(this).distinct();
}
@Override
public CacheStream<R> sorted() {
return new IntermediateCacheStream<>(this).sorted();
}
@Override
public CacheStream<R> sorted(Comparator<? super R> comparator) {
return new IntermediateCacheStream<>(this).sorted(comparator);
}
@Override
public CacheStream<R> peek(Consumer<? super R> action) {
return addIntermediateOperation(new PeekOperation<>(action));
}
@Override
public CacheStream<R> limit(long maxSize) {
// Limit is applied remotely as well
addIntermediateOperation(new LimitOperation<>(maxSize));
return new IntermediateCacheStream<>(this).limit(maxSize);
}
@Override
public CacheStream<R> skip(long n) {
return new IntermediateCacheStream<>(this).skip(n);
}
// Now we have terminal operators
@Override
public R reduce(R identity, BinaryOperator<R> accumulator) {
return performPublisherOperation(PublisherReducers.reduce(identity, accumulator),
PublisherReducers.reduce(accumulator));
}
@Override
public Optional<R> reduce(BinaryOperator<R> accumulator) {
Function<Publisher<R>, CompletionStage<R>> function = PublisherReducers.reduce(accumulator);
R value = performPublisherOperation(function, function);
return Optional.ofNullable(value);
}
@Override
public <U> U reduce(U identity, BiFunction<U, ? super R, U> accumulator, BinaryOperator<U> combiner) {
return performPublisherOperation(PublisherReducers.reduce(identity, accumulator),
PublisherReducers.reduce(combiner));
}
/**
* {@inheritDoc}
* Note: this method doesn't pay attention to ordering constraints and any sorting performed on the stream will
* be ignored by this terminal operator. If you wish to have an ordered collector use the
* {@link DistributedCacheStream#collect(Collector)} method making sure the
* {@link java.util.stream.Collector.Characteristics#UNORDERED} property is not set.
* @param supplier
* @param accumulator
* @param combiner
* @param <R1>
* @return
*/
@Override
public <R1> R1 collect(Supplier<R1> supplier, BiConsumer<R1, ? super R> accumulator, BiConsumer<R1, R1> combiner) {
return performPublisherOperation(PublisherReducers.collect(supplier, accumulator),
PublisherReducers.accumulate(combiner));
}
@Override
public <R1, A> R1 collect(Collector<? super R, A, R1> collector) {
A intermediateResult = performPublisherOperation(PublisherReducers.collectorReducer(collector),
PublisherReducers.collectorFinalizer(collector));
// Identify finish means we can just ignore the finisher method
if (collector.characteristics().contains(Collector.Characteristics.IDENTITY_FINISH)) {
return (R1) intermediateResult;
} else {
return collector.finisher().apply(intermediateResult);
}
}
@Override
public Optional<R> min(Comparator<? super R> comparator) {
Function<Publisher<R>, CompletionStage<R>> function = PublisherReducers.min(comparator);
R value = performPublisherOperation(function, function);
return Optional.ofNullable(value);
}
@Override
public Optional<R> max(Comparator<? super R> comparator) {
Function<Publisher<R>, CompletionStage<R>> function = PublisherReducers.max(comparator);
R value = performPublisherOperation(function, function);
return Optional.ofNullable(value);
}
@Override
public boolean anyMatch(Predicate<? super R> predicate) {
return performPublisherOperation(PublisherReducers.anyMatch(predicate), PublisherReducers.or());
}
@Override
public boolean allMatch(Predicate<? super R> predicate) {
return performPublisherOperation(PublisherReducers.allMatch(predicate), PublisherReducers.and());
}
@Override
public boolean noneMatch(Predicate<? super R> predicate) {
return performPublisherOperation(PublisherReducers.noneMatch(predicate), PublisherReducers.and());
}
@Override
public Optional<R> findFirst() {
// We aren't sorted, so just do findAny
return findAny();
}
@Override
public Optional<R> findAny() {
Function<Publisher<R>, CompletionStage<R>> function = PublisherReducers.findFirst();
R value = performPublisherOperation(function, function);
return Optional.ofNullable(value);
}
@Override
public long count() {
return performPublisherOperation(PublisherReducers.count(), PublisherReducers.add());
}
// The next ones are key tracking terminal operators
@Override
public Iterator<R> iterator() {
log.tracef("Distributed iterator invoked with rehash: %s", rehashAware);
Function usedTransformer;
if (intermediateOperations.isEmpty()) {
usedTransformer = MarshallableFunctions.identity();
} else {
usedTransformer = new CacheIntermediatePublisher(intermediateOperations);
}
DeliveryGuarantee deliveryGuarantee = rehashAware ? DeliveryGuarantee.EXACTLY_ONCE : DeliveryGuarantee.AT_MOST_ONCE;
Publisher<R> publisherToSubscribeTo;
SegmentPublisherSupplier<R> publisher;
if (toKeyFunction == null) {
publisher = cpm.keyPublisher(segmentsToFilter, keysToFilter, invocationContext, explicitFlags,
deliveryGuarantee, distributedBatchSize, usedTransformer);
} else {
publisher = cpm.entryPublisher(segmentsToFilter, keysToFilter, invocationContext, explicitFlags,
deliveryGuarantee, distributedBatchSize, usedTransformer);
}
CompletionSegmentTracker<R> segmentTracker;
if (segmentCompletionListener != null) {
// Tracker relies on ordering that a segment completion occurs
segmentTracker = new CompletionSegmentTracker<>(segmentCompletionListener);
publisherToSubscribeTo = Flowable.fromPublisher(publisher.publisherWithSegments())
.mapOptional(segmentTracker);
} else {
segmentTracker = null;
publisherToSubscribeTo = publisher.publisherWithoutSegments();
}
CloseableIterator<R> realIterator = Closeables.iterator(Flowable.fromPublisher(publisherToSubscribeTo)
// Make sure any runtime errors are wrapped in CacheException
.onErrorResumeNext(RxJavaInterop.cacheExceptionWrapper()), distributedBatchSize);
onClose(realIterator::close);
if (segmentTracker != null) {
return new AbstractIterator<R>() {
@Override
protected R getNext() {
if (realIterator.hasNext()) {
R value = realIterator.next();
segmentTracker.returningObject(value);
return value;
} else {
segmentTracker.onComplete();
}
return null;
}
};
}
return realIterator;
}
/**
* Tracking class that keeps track of segment completions and maps them to a given value. This value is not actually
* part of these segments, but is instead the object returned immediately after the segments complete. This way
* we can guarantee to notify the user after all elements have been processed of which segments were completed.
* All methods except for accept(int) are guaranteed to be called sequentially and in a safe manner.
*/
private class CompletionSegmentTracker<R> implements io.reactivex.rxjava3.functions.Function<SegmentPublisherSupplier.Notification<R>, Optional<? extends R>> {
private final Consumer<Supplier<PrimitiveIterator.OfInt>> listener;
private final Map<R, IntSet> awaitingNotification;
volatile IntSet completedSegments;
private CompletionSegmentTracker(Consumer<Supplier<PrimitiveIterator.OfInt>> listener) {
this.listener = Objects.requireNonNull(listener);
this.awaitingNotification = new HashMap<>();
this.completedSegments = IntSets.mutableEmptySet(maxSegment);
}
@Override
public Optional<R> apply(SegmentPublisherSupplier.Notification<R> r) throws Throwable {
if (r.isSegmentComplete()) {
completedSegments.set(r.completedSegment());
return Optional.empty();
}
R value = r.value();
if (!completedSegments.isEmpty()) {
log.tracef("Going to complete segments %s when %s is iterated upon", completedSegments, Util.toStr(value));
awaitingNotification.put(value, completedSegments);
completedSegments = IntSets.mutableEmptySet(maxSegment);
}
return Optional.of(value);
}
public void returningObject(Object value) {
IntSet segments = awaitingNotification.remove(value);
if (segments != null) {
log.tracef("Notifying listeners of segments %s complete now that %s is returning", segments, Util.toStr(value));
listener.accept(segments::iterator);
}
}
public void onComplete() {
log.tracef("Completing last segments of: %s", completedSegments);
listener.accept(completedSegments::iterator);
completedSegments.clear();
}
}
@Override
public Spliterator<R> spliterator() {
return Spliterators.spliterator(iterator(), Long.MAX_VALUE, Spliterator.CONCURRENT);
}
@Override
public void forEach(Consumer<? super R> action) {
peek(action)
.iterator()
.forEachRemaining(ignore -> { });
}
@Override
public <K, V> void forEach(BiConsumer<Cache<K, V>, ? super R> action) {
peek(CacheBiConsumers.objectConsumer(action))
.iterator()
.forEachRemaining(ignore -> { });
}
@Override
public void forEachOrdered(Consumer<? super R> action) {
// We aren't sorted, so just do forEach
forEach(action);
}
@Override
public Object[] toArray() {
return performPublisherOperation(PublisherReducers.toArrayReducer(), PublisherReducers.toArrayFinalizer());
}
@Override
public <A> A[] toArray(IntFunction<A[]> generator) {
// The types are really Function<Publisher<R>, CompletionStage<A[]>> but to help users call toArrayReducer with
// proper compile type checks it forces a type restriction that the generated array must be a super class
// of the stream type. Unfortunately Stream API does not have that restriction and thus only throws
// a RuntimeException instead.
Function function = PublisherReducers.toArrayReducer(generator);
return (A[]) performPublisherOperation(function, PublisherReducers.toArrayFinalizer(generator));
}
// These are the custom added methods for cache streams
@Override
public CacheStream<R> sequentialDistribution() {
parallelDistribution = false;
return this;
}
@Override
public CacheStream<R> parallelDistribution() {
parallelDistribution = true;
return this;
}
@Override
public CacheStream<R> filterKeySegments(Set<Integer> segments) {
segmentsToFilter = IntSets.from(segments);
return this;
}
@Override
public CacheStream<R> filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public CacheStream<R> filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public CacheStream<R> distributedBatchSize(int batchSize) {
distributedBatchSize = batchSize;
return this;
}
@Override
public CacheStream<R> segmentCompletionListener(SegmentCompletionListener listener) {
if (segmentCompletionListener == null) {
segmentCompletionListener = listener;
} else {
segmentCompletionListener = composeWithExceptions(segmentCompletionListener, listener);
}
return this;
}
@Override
public CacheStream<R> disableRehashAware() {
rehashAware = false;
return this;
}
@Override
public CacheStream<R> timeout(long timeout, TimeUnit unit) {
if (timeout <= 0) {
throw new IllegalArgumentException("Timeout must be greater than 0");
}
this.timeout = timeout;
this.timeoutUnit = unit;
return this;
}
protected DistributedIntCacheStream intCacheStream() {
return new DistributedIntCacheStream(this);
}
protected DistributedDoubleCacheStream doubleCacheStream() {
return new DistributedDoubleCacheStream(this);
}
protected DistributedLongCacheStream longCacheStream() {
return new DistributedLongCacheStream(this);
}
}
| 21,439
| 37.491921
| 162
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.