repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/CacheIntermediatePublisher.java
|
package org.infinispan.stream.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.cache.impl.EncodingFunction;
import org.infinispan.commands.functional.functions.InjectableComponent;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.marshall.core.Ids;
import org.infinispan.reactive.publisher.impl.ModifiedValueFunction;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.infinispan.stream.impl.intops.MappingOperation;
import org.infinispan.stream.impl.intops.object.MapOperation;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* Function that is used to encapsulate multiple intermediate operations and perform them lazily when the function
* is applied.
* @param <R>
*/
public final class CacheIntermediatePublisher<R> implements ModifiedValueFunction<Publisher<Object>, Publisher<R>>, InjectableComponent {
private final Iterable<IntermediateOperation<?, ?, ?, ?>> intOps;
public CacheIntermediatePublisher(Iterable<IntermediateOperation<?, ?, ?, ?>> intOps) {
this.intOps = intOps;
}
@Override
public Publisher<R> apply(Publisher<Object> objectPublisher) {
Flowable<Object> innerPublisher = Flowable.fromPublisher(objectPublisher);
for (IntermediateOperation<?, ?, ?, ?> intOp : intOps) {
innerPublisher = intOp.mapFlowable((Flowable) innerPublisher);
}
return (Publisher<R>) innerPublisher;
}
@Override
public boolean isModified() {
for (IntermediateOperation intOp : intOps) {
if (intOp instanceof MappingOperation) {
// Encoding functions retain the original value - so we ignore those
if (intOp instanceof MapOperation && ((MapOperation) intOp).getFunction() instanceof EncodingFunction) {
continue;
}
return true;
}
}
return false;
}
@Override
public void inject(ComponentRegistry registry) {
for (IntermediateOperation intOp : intOps) {
intOp.handleInjection(registry);
}
}
public static final class ReducerExternalizer implements AdvancedExternalizer<CacheIntermediatePublisher> {
@Override
public void writeObject(ObjectOutput output, CacheIntermediatePublisher object) throws IOException {
output.writeObject(object.intOps);
}
@Override
public CacheIntermediatePublisher readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new CacheIntermediatePublisher((Iterable) input.readObject());
}
@Override
public Set<Class<? extends CacheIntermediatePublisher>> getTypeClasses() {
return Collections.singleton(CacheIntermediatePublisher.class);
}
@Override
public Integer getId() {
return Ids.CACHE_STREAM_INTERMEDIATE_PUBLISHER;
}
}
}
| 3,057
| 34.55814
| 137
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/RemovableIterator.java
|
package org.infinispan.stream.impl;
import java.util.Iterator;
import java.util.function.Function;
import org.infinispan.Cache;
/**
* An Iterator implementation that allows for a Iterator that doesn't allow remove operations to
* implement remove by delegating the call to the provided cache to remove the previously read value. The key used
* to remove from the cache is determined by first applying the removeFunction to the value retrieved from the
* iterator.
*
* @author wburns
* @since 8.0
* @deprecated Users should use {@link org.infinispan.commons.util.RemovableIterator} instead
*/
public class RemovableIterator<K, C> extends org.infinispan.commons.util.RemovableIterator<C> {
public RemovableIterator(Iterator<C> realIterator, Cache<K, ?> cache,
Function<? super C, K> removeFunction) {
super(realIterator, c -> cache.remove(removeFunction.apply(c)));
}
}
| 904
| 36.708333
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/PersistenceKeyStreamSupplier.java
|
package org.infinispan.stream.impl.local;
import java.lang.invoke.MethodHandles;
import java.util.HashSet;
import java.util.Set;
import java.util.function.ToIntFunction;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.cache.impl.AbstractDelegatingCache;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.Closeables;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.context.Flag;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.util.LazyConcatIterator;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* StreamSupplier that allows for creating streams where they utilize the {@link PersistenceManager} to publish keys
* using segments if possible.
* @author wburns
* @since 9.4
*/
public class PersistenceKeyStreamSupplier<K> implements AbstractLocalCacheStream.StreamSupplier<K, Stream<K>> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final Cache<K, ?> cache;
private final ToIntFunction<Object> toIntFunction;
private final CacheStream<K> inMemoryStream;
private final PersistenceManager persistenceManager;
public PersistenceKeyStreamSupplier(Cache<K, ?> cache, ToIntFunction<Object> toIntFunction,
CacheStream<K> inMemoryStream, PersistenceManager persistenceManager) {
this.cache = cache;
this.toIntFunction = toIntFunction;
this.inMemoryStream = inMemoryStream;
this.persistenceManager = persistenceManager;
}
@Override
public Stream<K> buildStream(IntSet segmentsToFilter, Set<?> keysToFilter, boolean parallel) {
Stream<K> stream;
if (keysToFilter != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying key filtering %s", keysToFilter);
}
// Make sure we aren't going remote to retrieve these
AdvancedCache<K, ?> advancedCache = AbstractDelegatingCache.unwrapCache(cache).getAdvancedCache()
.withFlags(Flag.CACHE_MODE_LOCAL);
stream = (Stream<K>) (parallel ? keysToFilter.parallelStream() : keysToFilter.stream())
.filter(advancedCache::containsKey);
if (segmentsToFilter != null && toIntFunction != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying segment filter %s", segmentsToFilter);
}
stream = stream.filter(k -> {
int segment = toIntFunction.applyAsInt(k);
boolean isPresent = segmentsToFilter.contains(segment);
if (log.isTraceEnabled())
log.tracef("Is key %s present in segment %d? %b", k, segment, isPresent);
return isPresent;
});
}
} else {
Publisher<K> publisher;
CacheStream<K> inMemoryStream = this.inMemoryStream;
Set<K> seenKeys = new HashSet<>(2048);
if (segmentsToFilter != null) {
inMemoryStream = inMemoryStream.filterKeySegments(segmentsToFilter);
publisher = persistenceManager.publishKeys(segmentsToFilter, k -> !seenKeys.contains(k),
PersistenceManager.AccessMode.BOTH);
} else {
publisher = persistenceManager.publishKeys(k -> !seenKeys.contains(k), PersistenceManager.AccessMode.BOTH);
}
CloseableIterator<K> localIterator = new IteratorMapper<>(Closeables.iterator(inMemoryStream), k -> {
seenKeys.add(k);
return k;
});
Flowable<K> flowable = Flowable.fromPublisher(publisher);
CloseableIterator<K> iterator = new LazyConcatIterator<>(localIterator,
() -> org.infinispan.util.Closeables.iterator(flowable, 128));
Iterable<K> iterable = () -> iterator;
// Make sure we close the iterator when the resulting stream is closed
stream = StreamSupport.stream(iterable.spliterator(), parallel).onClose(iterator::close);
}
return stream;
}
}
| 4,308
| 42.09
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/AbstractLocalCacheStream.java
|
package org.infinispan.stream.impl.local;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Queue;
import java.util.Set;
import java.util.stream.BaseStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.infinispan.stream.impl.intops.UnorderedOperation;
/**
* Implements the base operations required for a local stream.
* stream is populated
*/
public abstract class AbstractLocalCacheStream<T, S extends BaseStream<T, S>, S2 extends S> implements BaseStream<T, S> {
protected final StreamSupplier<T, S> streamSupplier;
protected final ComponentRegistry registry;
protected final Collection<Runnable> onCloseRunnables;
protected final Queue<IntermediateOperation> intermediateOperations;
protected IntSet segmentsToFilter;
protected Set<?> keysToFilter;
protected boolean parallel;
public interface StreamSupplier<T, S extends BaseStream<T, S>> {
S buildStream(IntSet segmentsToFilter, Set<?> keysToFilter, boolean parallel);
}
/**
* @param streamSupplier
* @param parallel
* @param registry
*/
public AbstractLocalCacheStream(StreamSupplier<T, S> streamSupplier, boolean parallel, ComponentRegistry registry) {
this.streamSupplier = streamSupplier;
this.registry = registry;
this.onCloseRunnables = new ArrayList<>(4);
this.intermediateOperations = new ArrayDeque<>();
this.parallel = parallel;
}
AbstractLocalCacheStream(AbstractLocalCacheStream<?, ?, ?> original) {
this.streamSupplier = (StreamSupplier<T, S>) original.streamSupplier;
this.registry = original.registry;
this.onCloseRunnables = original.onCloseRunnables;
this.intermediateOperations = original.intermediateOperations;
this.segmentsToFilter = original.segmentsToFilter;
this.keysToFilter = original.keysToFilter;
this.parallel = original.parallel;
}
protected final S createStream() {
BaseStream<?, ?> stream = streamSupplier.buildStream(segmentsToFilter, keysToFilter, parallel);
for (IntermediateOperation intOp : intermediateOperations) {
intOp.handleInjection(registry);
stream = intOp.perform(stream);
}
return (S) stream;
}
@Override
public boolean isParallel() {
return parallel;
}
@Override
public S2 sequential() {
this.parallel = false;
return (S2) this;
}
@Override
public S2 parallel() {
this.parallel = true;
return (S2) this;
}
@Override
public S2 unordered() {
intermediateOperations.add(new UnorderedOperation<>());
return (S2) this;
}
@Override
public S2 onClose(Runnable closeHandler) {
onCloseRunnables.add(closeHandler);
return (S2) this;
}
@Override
public void close() {
onCloseRunnables.forEach(Runnable::run);
}
}
| 3,003
| 27.609524
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/LocalIntCacheStream.java
|
package org.infinispan.stream.impl.local;
import java.util.IntSummaryStatistics;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.IntBinaryOperator;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.IntPredicate;
import java.util.function.IntToDoubleFunction;
import java.util.function.IntToLongFunction;
import java.util.function.IntUnaryOperator;
import java.util.function.ObjIntConsumer;
import java.util.function.Supplier;
import java.util.stream.IntStream;
import org.infinispan.Cache;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.primitive.i.BoxedIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.DistinctIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.FilterIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.FlatMapIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.LimitIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToDoubleIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToLongIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToObjIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.PeekIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.SkipIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.SortedIntOperation;
/**
* IntStream that wraps a given stream to allow for additional functionality such as injection of values into
* various operations
*/
public class LocalIntCacheStream extends AbstractLocalCacheStream<Integer, IntStream, IntCacheStream> implements IntCacheStream {
public LocalIntCacheStream(StreamSupplier<Integer, IntStream> streamSupplier, boolean parallel, ComponentRegistry registry) {
super(streamSupplier, parallel, registry);
}
LocalIntCacheStream(AbstractLocalCacheStream<?, ?, ?> original) {
super(original);
}
@Override
public LocalIntCacheStream filter(IntPredicate predicate) {
registry.wireDependencies(predicate);
intermediateOperations.add(new FilterIntOperation<>(predicate));
return this;
}
@Override
public LocalIntCacheStream map(IntUnaryOperator mapper) {
registry.wireDependencies(mapper);
intermediateOperations.add(new MapIntOperation(mapper));
return this;
}
@Override
public <U> LocalCacheStream<U> mapToObj(IntFunction<? extends U> mapper) {
registry.wireDependencies(mapper);
intermediateOperations.add(new MapToObjIntOperation<>(mapper));
return new LocalCacheStream<U>(this);
}
@Override
public LocalLongCacheStream mapToLong(IntToLongFunction mapper) {
intermediateOperations.add(new MapToLongIntOperation(mapper));
return new LocalLongCacheStream(this);
}
@Override
public LocalDoubleCacheStream mapToDouble(IntToDoubleFunction mapper) {
intermediateOperations.add(new MapToDoubleIntOperation(mapper));
return new LocalDoubleCacheStream(this);
}
@Override
public LocalIntCacheStream flatMap(IntFunction<? extends IntStream> mapper) {
intermediateOperations.add(new FlatMapIntOperation(mapper));
return this;
}
@Override
public LocalIntCacheStream distinct() {
intermediateOperations.add(DistinctIntOperation.getInstance());
return this;
}
@Override
public LocalIntCacheStream sorted() {
intermediateOperations.add(SortedIntOperation.getInstance());
return this;
}
@Override
public LocalIntCacheStream peek(IntConsumer action) {
intermediateOperations.add(new PeekIntOperation(action));
return this;
}
@Override
public LocalIntCacheStream limit(long maxSize) {
intermediateOperations.add(new LimitIntOperation(maxSize));
return this;
}
@Override
public LocalIntCacheStream skip(long n) {
intermediateOperations.add(new SkipIntOperation(n));
return this;
}
@Override
public void forEach(IntConsumer action) {
injectCache(action);
try (IntStream stream = createStream()) {
stream.forEach(action);
}
}
@Override
public <K, V> void forEach(ObjIntConsumer<Cache<K, V>> action) {
Cache<K, V> cache = registry.getComponent(Cache.class);
try (IntStream stream = createStream()) {
stream.forEach(i -> action.accept(cache, i));
}
}
@Override
public void forEachOrdered(IntConsumer action) {
injectCache(action);
try (IntStream stream = createStream()) {
stream.forEachOrdered(action);
}
}
/**
* Method to inject a cache into a consumer. Note we only support this for the consumer at this
* time.
* @param cacheAware the instance that may be a {@link CacheAware}
*/
private void injectCache(IntConsumer cacheAware) {
if (cacheAware instanceof CacheAware) {
((CacheAware) cacheAware).injectCache(registry.getComponent(Cache.class));
}
}
@Override
public int[] toArray() {
try (IntStream stream = createStream()) {
return stream.toArray();
}
}
@Override
public int reduce(int identity, IntBinaryOperator op) {
try (IntStream stream = createStream()) {
return stream.reduce(identity, op);
}
}
@Override
public OptionalInt reduce(IntBinaryOperator op) {
try (IntStream stream = createStream()) {
return stream.reduce(op);
}
}
@Override
public <R> R collect(Supplier<R> supplier, ObjIntConsumer<R> accumulator, BiConsumer<R, R> combiner) {
try (IntStream stream = createStream()) {
return stream.collect(supplier, accumulator, combiner);
}
}
@Override
public int sum() {
try (IntStream stream = createStream()) {
return stream.sum();
}
}
@Override
public OptionalInt min() {
try (IntStream stream = createStream()) {
return stream.min();
}
}
@Override
public OptionalInt max() {
try (IntStream stream = createStream()) {
return stream.max();
}
}
@Override
public long count() {
try (IntStream stream = createStream()) {
return stream.count();
}
}
@Override
public OptionalDouble average() {
try (IntStream stream = createStream()) {
return stream.average();
}
}
@Override
public IntSummaryStatistics summaryStatistics() {
try (IntStream stream = createStream()) {
return stream.summaryStatistics();
}
}
@Override
public boolean anyMatch(IntPredicate predicate) {
try (IntStream stream = createStream()) {
return stream.anyMatch(predicate);
}
}
@Override
public boolean allMatch(IntPredicate predicate) {
try (IntStream stream = createStream()) {
return stream.allMatch(predicate);
}
}
@Override
public boolean noneMatch(IntPredicate predicate) {
try (IntStream stream = createStream()) {
return stream.noneMatch(predicate);
}
}
@Override
public OptionalInt findFirst() {
try (IntStream stream = createStream()) {
return stream.findFirst();
}
}
@Override
public OptionalInt findAny() {
try (IntStream stream = createStream()) {
return stream.findAny();
}
}
@Override
public LongCacheStream asLongStream() {
return mapToLong(i -> (long) i);
}
@Override
public DoubleCacheStream asDoubleStream() {
return mapToDouble(i -> (double) i);
}
@Override
public LocalCacheStream<Integer> boxed() {
intermediateOperations.add(BoxedIntOperation.getInstance());
return new LocalCacheStream<>(this);
}
@Override
public PrimitiveIterator.OfInt iterator() {
IntStream stream = createStream();
onCloseRunnables.add(stream::close);
return stream.iterator();
}
@Override
public Spliterator.OfInt spliterator() {
IntStream stream = createStream();
onCloseRunnables.add(stream::close);
return stream.spliterator();
}
@Override
public LocalIntCacheStream sequentialDistribution() {
return this;
}
@Override
public LocalIntCacheStream parallelDistribution() {
return this;
}
@Override
public LocalIntCacheStream filterKeySegments(Set<Integer> segments) {
return filterKeySegments(IntSets.from(segments));
}
@Override
public LocalIntCacheStream filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public LocalIntCacheStream filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public LocalIntCacheStream distributedBatchSize(int batchSize) {
// TODO: Does this change cache loader?
return this;
}
@Override
public LocalIntCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
// All segments are completed when the getStream() is completed so we don't track them
return this;
}
@Override
public LocalIntCacheStream disableRehashAware() {
// Local long stream doesn't matter for rehash
return this;
}
@Override
public LocalIntCacheStream timeout(long timeout, TimeUnit unit) {
// Timeout does nothing for a local long cache stream
return this;
}
}
| 9,950
| 27.760116
| 129
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/ValueCacheCollection.java
|
package org.infinispan.stream.impl.local;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.CacheStream;
import org.infinispan.commands.read.AbstractCloseableIteratorCollection;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.CloseableSpliterator;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.commons.util.SpliteratorMapper;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.stream.StreamMarshalling;
/**
* CacheCollection that can be used for the values method of a cache. Backs all the calls to the cacheSet version
* allowing for key filtering still to be applied.
* @param <K> key type of the cache
* @param <V> value type of the cache
*/
public class ValueCacheCollection<K, V> extends AbstractCloseableIteratorCollection<V, K, V>
implements CacheCollection<V> {
private final CacheSet<CacheEntry<K, V>> cacheSet;
public ValueCacheCollection(Cache<K, V> cache, CacheSet<CacheEntry<K, V>> cacheSet) {
super(cache);
this.cacheSet = cacheSet;
}
@Override
public CloseableIterator<V> iterator() {
return new IteratorMapper<>(cacheSet.iterator(), CacheEntry::getValue);
}
@Override
public CloseableSpliterator<V> spliterator() {
return new SpliteratorMapper<>(cacheSet.spliterator(), CacheEntry::getValue);
}
@Override
public boolean contains(Object o) {
// We don't support null values
if (o == null) {
throw new NullPointerException();
}
try (CloseableIterator<V> it = iterator()) {
while (it.hasNext())
if (o.equals(it.next()))
return true;
return false;
}
}
@Override
public boolean containsAll(Collection<?> c) {
// The AbstractCollection implementation calls contains for each element. Instead we want to call the iterator
// only once so we have a special implementation.
if (c.size() > 0) {
Set<?> set = new HashSet<>(c);
try (CloseableIterator<V> it = iterator()) {
while (!set.isEmpty() && it.hasNext()) {
set.remove(it.next());
}
}
return set.isEmpty();
}
return true;
}
@Override
public boolean remove(Object o) {
try (CloseableIterator<V> it = iterator()) {
while (it.hasNext()) {
V next = it.next();
if (o.equals(next)) {
it.remove();
return true;
}
}
return false;
}
}
@Override
public CacheStream<V> stream() {
Stream<CacheEntry<K, V>> stream = cacheSet.stream();
return (CacheStream<V>) stream.map(StreamMarshalling.entryToValueFunction());
}
@Override
public CacheStream<V> parallelStream() {
Stream<CacheEntry<K, V>> stream = cacheSet.parallelStream();
return (CacheStream<V>) stream.map(StreamMarshalling.entryToValueFunction());
}
}
| 3,168
| 30.376238
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/PersistenceEntryStreamSupplier.java
|
package org.infinispan.stream.impl.local;
import java.lang.invoke.MethodHandles;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.function.ToIntFunction;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.cache.impl.AbstractDelegatingCache;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.Closeables;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.Flag;
import org.infinispan.persistence.PersistenceUtil;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.util.LazyConcatIterator;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* StreamSupplier that allows for creating streams where they utilize the {@link PersistenceManager} to publish entries
* using segments if possible.
* @author wburns
* @since 9.4
*/
public class PersistenceEntryStreamSupplier<K, V> implements AbstractLocalCacheStream.StreamSupplier<CacheEntry<K, V>, Stream<CacheEntry<K, V>>> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final Cache<K, V> cache;
private final InternalEntryFactory iceFactory;
private final ToIntFunction<Object> toIntFunction;
private final CacheStream<CacheEntry<K, V>> inMemoryStream;
private final PersistenceManager persistenceManager;
public PersistenceEntryStreamSupplier(Cache<K, V> cache, InternalEntryFactory iceFactory,
ToIntFunction<Object> toIntFunction, CacheStream<CacheEntry<K, V>> inMemoryStream,
PersistenceManager persistenceManager) {
this.cache = cache;
this.iceFactory = iceFactory;
this.toIntFunction = toIntFunction;
this.inMemoryStream = inMemoryStream;
this.persistenceManager = persistenceManager;
}
@Override
public Stream<CacheEntry<K, V>> buildStream(IntSet segmentsToFilter, Set<?> keysToFilter, boolean parallel) {
Stream<CacheEntry<K, V>> stream;
if (keysToFilter != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying key filtering %s", keysToFilter);
}
// Make sure we aren't going remote to retrieve these
AdvancedCache<K, V> advancedCache = AbstractDelegatingCache.unwrapCache(cache).getAdvancedCache()
.withFlags(Flag.CACHE_MODE_LOCAL);
stream = keysToFilter.stream()
.map(advancedCache::getCacheEntry)
.filter(Objects::nonNull);
if (segmentsToFilter != null && toIntFunction != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying segment filter %s", segmentsToFilter);
}
stream = stream.filter(k -> {
K key = k.getKey();
int segment = toIntFunction.applyAsInt(key);
boolean isPresent = segmentsToFilter.contains(segment);
if (log.isTraceEnabled())
log.tracef("Is key %s present in segment %d? %b", key, segment, isPresent);
return isPresent;
});
}
} else {
Publisher<MarshallableEntry<K, V>> publisher;
CacheStream<CacheEntry<K, V>> inMemoryStream = this.inMemoryStream;
Set<K> seenKeys = new HashSet<>(2048);
if (segmentsToFilter != null) {
inMemoryStream = inMemoryStream.filterKeySegments(segmentsToFilter);
publisher = persistenceManager.publishEntries(segmentsToFilter, k -> !seenKeys.contains(k), true, true,
PersistenceManager.AccessMode.BOTH);
} else {
publisher = persistenceManager.publishEntries(k -> !seenKeys.contains(k), true, true,
PersistenceManager.AccessMode.BOTH);
}
CloseableIterator<CacheEntry<K, V>> localIterator = new IteratorMapper<>(Closeables.iterator(inMemoryStream), e -> {
seenKeys.add(e.getKey());
return e;
});
Flowable<CacheEntry<K, V>> flowable = Flowable.fromPublisher(publisher)
.map(me -> PersistenceUtil.convert(me, iceFactory));
CloseableIterator<CacheEntry<K, V>> iterator = new LazyConcatIterator<>(localIterator,
() -> org.infinispan.util.Closeables.iterator(flowable, 128));
Iterable<CacheEntry<K, V>> iterable = () -> iterator;
// Make sure we close the iterator when the resulting stream is closed
stream = StreamSupport.stream(iterable.spliterator(), parallel).onClose(iterator::close);
}
return stream;
}
}
| 5,024
| 43.866071
| 146
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/LocalDoubleCacheStream.java
|
package org.infinispan.stream.impl.local;
import java.util.DoubleSummaryStatistics;
import java.util.OptionalDouble;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.DoubleBinaryOperator;
import java.util.function.DoubleConsumer;
import java.util.function.DoubleFunction;
import java.util.function.DoublePredicate;
import java.util.function.DoubleToIntFunction;
import java.util.function.DoubleToLongFunction;
import java.util.function.DoubleUnaryOperator;
import java.util.function.ObjDoubleConsumer;
import java.util.function.Supplier;
import java.util.stream.DoubleStream;
import org.infinispan.Cache;
import org.infinispan.DoubleCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.primitive.d.BoxedDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.DistinctDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.FilterDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.FlatMapDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.LimitDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToIntDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToLongDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToObjDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.PeekDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.SkipDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.SortedDoubleOperation;
/**
* DoubleStream that wraps a given stream to allow for additional functionality such as injection of values into
* various operations
*/
public class LocalDoubleCacheStream extends AbstractLocalCacheStream<Double, DoubleStream, DoubleCacheStream> implements DoubleCacheStream {
public LocalDoubleCacheStream(StreamSupplier<Double, DoubleStream> streamSupplier, boolean parallel, ComponentRegistry registry) {
super(streamSupplier, parallel, registry);
}
LocalDoubleCacheStream(AbstractLocalCacheStream<?, ?, ?> original) {
super(original);
}
@Override
public LocalDoubleCacheStream filter(DoublePredicate predicate) {
registry.wireDependencies(predicate);
intermediateOperations.add(new FilterDoubleOperation(predicate));
return this;
}
@Override
public LocalDoubleCacheStream map(DoubleUnaryOperator mapper) {
registry.wireDependencies(mapper);
intermediateOperations.add(new MapDoubleOperation(mapper));
return this;
}
@Override
public <U> LocalCacheStream<U> mapToObj(DoubleFunction<? extends U> mapper) {
registry.wireDependencies(mapper);
intermediateOperations.add(new MapToObjDoubleOperation<>(mapper));
return new LocalCacheStream<>(this);
}
@Override
public LocalIntCacheStream mapToInt(DoubleToIntFunction mapper) {
intermediateOperations.add(new MapToIntDoubleOperation(mapper));
return new LocalIntCacheStream(this);
}
@Override
public LocalLongCacheStream mapToLong(DoubleToLongFunction mapper) {
intermediateOperations.add(new MapToLongDoubleOperation(mapper));
return new LocalLongCacheStream(this);
}
@Override
public LocalDoubleCacheStream flatMap(DoubleFunction<? extends DoubleStream> mapper) {
intermediateOperations.add(new FlatMapDoubleOperation(mapper));
return this;
}
@Override
public LocalDoubleCacheStream distinct() {
intermediateOperations.add(DistinctDoubleOperation.getInstance());
return this;
}
@Override
public LocalDoubleCacheStream sorted() {
intermediateOperations.add(SortedDoubleOperation.getInstance());
return this;
}
@Override
public LocalDoubleCacheStream peek(DoubleConsumer action) {
intermediateOperations.add(new PeekDoubleOperation(action));
return this;
}
@Override
public LocalDoubleCacheStream limit(long maxSize) {
intermediateOperations.add(new LimitDoubleOperation(maxSize));
return this;
}
@Override
public LocalDoubleCacheStream skip(long n) {
intermediateOperations.add(new SkipDoubleOperation(n));
return this;
}
@Override
public void forEach(DoubleConsumer action) {
injectCache(action);
try (DoubleStream stream = createStream()) {
stream.forEach(action);
}
}
@Override
public <K, V> void forEach(ObjDoubleConsumer<Cache<K, V>> action) {
Cache<K, V> cache = registry.getComponent(Cache.class);
try (DoubleStream stream = createStream()) {
stream.forEach(d -> action.accept(cache, d));
}
}
@Override
public void forEachOrdered(DoubleConsumer action) {
injectCache(action);
try (DoubleStream stream = createStream()) {
stream.forEachOrdered(action);
}
}
/**
* Method to inject a cache into a consumer. Note we only support this for the consumer at this
* time.
* @param cacheAware the instance that may be a {@link CacheAware}
*/
private void injectCache(DoubleConsumer cacheAware) {
if (cacheAware instanceof CacheAware) {
((CacheAware) cacheAware).injectCache(registry.getComponent(Cache.class));
}
}
@Override
public double[] toArray() {
try (DoubleStream stream = createStream()) {
return stream.toArray();
}
}
@Override
public double reduce(double identity, DoubleBinaryOperator op) {
try (DoubleStream stream = createStream()) {
return stream.reduce(identity, op);
}
}
@Override
public OptionalDouble reduce(DoubleBinaryOperator op) {
try (DoubleStream stream = createStream()) {
return stream.reduce(op);
}
}
@Override
public <R> R collect(Supplier<R> supplier, ObjDoubleConsumer<R> accumulator, BiConsumer<R, R> combiner) {
try (DoubleStream stream = createStream()) {
return stream.collect(supplier, accumulator, combiner);
}
}
@Override
public double sum() {
try (DoubleStream stream = createStream()) {
return stream.sum();
}
}
@Override
public OptionalDouble min() {
try (DoubleStream stream = createStream()) {
return stream.min();
}
}
@Override
public OptionalDouble max() {
try (DoubleStream stream = createStream()) {
return stream.max();
}
}
@Override
public long count() {
try (DoubleStream stream = createStream()) {
return stream.count();
}
}
@Override
public OptionalDouble average() {
try (DoubleStream stream = createStream()) {
return stream.average();
}
}
@Override
public DoubleSummaryStatistics summaryStatistics() {
try (DoubleStream stream = createStream()) {
return stream.summaryStatistics();
}
}
@Override
public boolean anyMatch(DoublePredicate predicate) {
try (DoubleStream stream = createStream()) {
return stream.anyMatch(predicate);
}
}
@Override
public boolean allMatch(DoublePredicate predicate) {
try (DoubleStream stream = createStream()) {
return stream.allMatch(predicate);
}
}
@Override
public boolean noneMatch(DoublePredicate predicate) {
try (DoubleStream stream = createStream()) {
return stream.noneMatch(predicate);
}
}
@Override
public OptionalDouble findFirst() {
try (DoubleStream stream = createStream()) {
return stream.findFirst();
}
}
@Override
public OptionalDouble findAny() {
try (DoubleStream stream = createStream()) {
return stream.findAny();
}
}
@Override
public LocalCacheStream<Double> boxed() {
intermediateOperations.add(BoxedDoubleOperation.getInstance());
return new LocalCacheStream<>(this);
}
@Override
public PrimitiveIterator.OfDouble iterator() {
DoubleStream stream = createStream();
onCloseRunnables.add(stream::close);
return stream.iterator();
}
@Override
public Spliterator.OfDouble spliterator() {
DoubleStream stream = createStream();
onCloseRunnables.add(stream::close);
return stream.spliterator();
}
@Override
public LocalDoubleCacheStream sequentialDistribution() {
return this;
}
@Override
public LocalDoubleCacheStream parallelDistribution() {
return this;
}
@Override
public LocalDoubleCacheStream filterKeySegments(Set<Integer> segments) {
return filterKeySegments(IntSets.from(segments));
}
@Override
public LocalDoubleCacheStream filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public LocalDoubleCacheStream filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public LocalDoubleCacheStream distributedBatchSize(int batchSize) {
// TODO: Does this change cache loader?
return this;
}
@Override
public LocalDoubleCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
// All segments are completed when the getStream() is completed so we don't track them
return this;
}
@Override
public LocalDoubleCacheStream disableRehashAware() {
// Local long stream doesn't matter for rehash
return this;
}
@Override
public LocalDoubleCacheStream timeout(long timeout, TimeUnit unit) {
// Timeout does nothing for a local long cache stream
return this;
}
}
| 9,933
| 28.831832
| 140
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/LocalCacheStream.java
|
package org.infinispan.stream.impl.local;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Optional;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.function.ToDoubleFunction;
import java.util.function.ToIntFunction;
import java.util.function.ToLongFunction;
import java.util.stream.Collector;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.object.DistinctOperation;
import org.infinispan.stream.impl.intops.object.FilterOperation;
import org.infinispan.stream.impl.intops.object.FlatMapOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToDoubleOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToIntOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToLongOperation;
import org.infinispan.stream.impl.intops.object.LimitOperation;
import org.infinispan.stream.impl.intops.object.MapOperation;
import org.infinispan.stream.impl.intops.object.MapToDoubleOperation;
import org.infinispan.stream.impl.intops.object.MapToIntOperation;
import org.infinispan.stream.impl.intops.object.MapToLongOperation;
import org.infinispan.stream.impl.intops.object.PeekOperation;
import org.infinispan.stream.impl.intops.object.SkipOperation;
import org.infinispan.stream.impl.intops.object.SortedComparatorOperation;
import org.infinispan.stream.impl.intops.object.SortedOperation;
import org.infinispan.util.function.SerializableSupplier;
/**
* CacheStream that is to be used locally. This allows for full functionality of a regular stream but also has options
* to filter by keys and other functionality.
* @param <R> type of the stream
*/
public class LocalCacheStream<R> extends AbstractLocalCacheStream<R, Stream<R>, CacheStream<R>> implements CacheStream<R> {
public LocalCacheStream(StreamSupplier<R, Stream<R>> streamSupplier, boolean parallel, ComponentRegistry registry) {
super(streamSupplier, parallel, registry);
}
public LocalCacheStream(AbstractLocalCacheStream<?, ?, ?> other) {
super(other);
}
@Override
public LocalCacheStream<R> sequentialDistribution() {
return this;
}
@Override
public LocalCacheStream<R> parallelDistribution() {
return this;
}
@Override
public LocalCacheStream<R> filterKeySegments(Set<Integer> segments) {
return filterKeySegments(IntSets.from(segments));
}
@Override
public LocalCacheStream<R> filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public LocalCacheStream<R> filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public LocalCacheStream<R> distributedBatchSize(int batchSize) {
// TODO: Does this change cache loader?
return this;
}
@Override
public LocalCacheStream<R> segmentCompletionListener(SegmentCompletionListener listener) {
// All segments are completed when the getStream() is completed so we don't track them
return this;
}
@Override
public LocalCacheStream<R> disableRehashAware() {
// Local stream doesn't matter for rehash
return this;
}
@Override
public LocalCacheStream<R> filter(Predicate<? super R> predicate) {
registry.wireDependencies(predicate);
intermediateOperations.add(new FilterOperation<>(predicate));
return this;
}
@Override
public <R1> LocalCacheStream<R1> map(Function<? super R, ? extends R1> mapper) {
registry.wireDependencies(mapper);
intermediateOperations.add(new MapOperation<>(mapper));
return (LocalCacheStream<R1>) this;
}
@Override
public LocalIntCacheStream mapToInt(ToIntFunction<? super R> mapper) {
intermediateOperations.add(new MapToIntOperation<>(mapper));
return new LocalIntCacheStream(this);
}
@Override
public LocalLongCacheStream mapToLong(ToLongFunction<? super R> mapper) {
intermediateOperations.add(new MapToLongOperation<>(mapper));
return new LocalLongCacheStream(this);
}
@Override
public LocalDoubleCacheStream mapToDouble(ToDoubleFunction<? super R> mapper) {
intermediateOperations.add(new MapToDoubleOperation<>(mapper));
return new LocalDoubleCacheStream(this);
}
@Override
public <R1> LocalCacheStream<R1> flatMap(Function<? super R, ? extends Stream<? extends R1>> mapper) {
intermediateOperations.add(new FlatMapOperation<>(mapper));
return (LocalCacheStream<R1>) this;
}
@Override
public LocalIntCacheStream flatMapToInt(Function<? super R, ? extends IntStream> mapper) {
intermediateOperations.add(new FlatMapToIntOperation<>(mapper));
return new LocalIntCacheStream(this);
}
@Override
public LocalLongCacheStream flatMapToLong(Function<? super R, ? extends LongStream> mapper) {
intermediateOperations.add(new FlatMapToLongOperation<>(mapper));
return new LocalLongCacheStream(this);
}
@Override
public LocalDoubleCacheStream flatMapToDouble(Function<? super R, ? extends DoubleStream> mapper) {
intermediateOperations.add(new FlatMapToDoubleOperation<>(mapper));
return new LocalDoubleCacheStream(this);
}
@Override
public LocalCacheStream<R> distinct() {
intermediateOperations.add(DistinctOperation.getInstance());
return this;
}
@Override
public LocalCacheStream<R> sorted() {
intermediateOperations.add(SortedOperation.getInstance());
return this;
}
@Override
public LocalCacheStream<R> sorted(Comparator<? super R> comparator) {
intermediateOperations.add(new SortedComparatorOperation<>(comparator));
return this;
}
@Override
public LocalCacheStream<R> peek(Consumer<? super R> action) {
intermediateOperations.add(new PeekOperation<>(action));
return this;
}
@Override
public LocalCacheStream<R> limit(long maxSize) {
intermediateOperations.add(new LimitOperation<>(maxSize));
return this;
}
@Override
public LocalCacheStream<R> skip(long n) {
intermediateOperations.add(new SkipOperation<>(n));
return this;
}
@Override
public void forEach(Consumer<? super R> action) {
injectCache(action);
try (Stream<R> stream = createStream()) {
stream.forEach(action);
}
}
@Override
public <K, V> void forEach(BiConsumer<Cache<K, V>, ? super R> action) {
Cache<K, V> cache = registry.getComponent(Cache.class);
registry.wireDependencies(action);
try (Stream<R> stream = createStream()) {
stream.forEach(e -> action.accept(cache, e));
}
}
@Override
public void forEachOrdered(Consumer<? super R> action) {
injectCache(action);
try (Stream<R> stream = createStream()) {
stream.forEachOrdered(action);
}
}
/**
* Method to inject a cache into a consumer. Note we only support this for the consumer at this
* time.
* @param cacheAware the instance that may be a {@link CacheAware}
*/
private void injectCache(Consumer<? super R> cacheAware) {
if (cacheAware instanceof CacheAware) {
((CacheAware) cacheAware).injectCache(registry.getComponent(Cache.class));
}
}
@Override
public Object[] toArray() {
try (Stream<R> stream = createStream()) {
return stream.toArray();
}
}
@Override
public <A> A[] toArray(IntFunction<A[]> generator) {
try (Stream<R> stream = createStream()) {
return stream.toArray(generator);
}
}
@Override
public R reduce(R identity, BinaryOperator<R> accumulator) {
try (Stream<R> stream = createStream()) {
return stream.reduce(identity, accumulator);
}
}
@Override
public Optional<R> reduce(BinaryOperator<R> accumulator) {
try (Stream<R> stream = createStream()) {
return stream.reduce(accumulator);
}
}
@Override
public <U> U reduce(U identity, BiFunction<U, ? super R, U> accumulator, BinaryOperator<U> combiner) {
try (Stream<R> stream = createStream()) {
return stream.reduce(identity, accumulator, combiner);
}
}
@Override
public <R1> R1 collect(Supplier<R1> supplier, BiConsumer<R1, ? super R> accumulator, BiConsumer<R1, R1> combiner) {
try (Stream<R> stream = createStream()) {
return stream.collect(supplier, accumulator, combiner);
}
}
@Override
public <R1, A> R1 collect(Collector<? super R, A, R1> collector) {
try (Stream<R> stream = createStream()) {
return stream.collect(collector);
}
}
@Override
public <R1> R1 collect(SerializableSupplier<Collector<? super R, ?, R1>> supplier) {
try (Stream<R> stream = createStream()) {
return stream.collect(supplier.get());
}
}
@Override
public <R1> R1 collect(Supplier<Collector<? super R, ?, R1>> supplier) {
try (Stream<R> stream = createStream()) {
return stream.collect(supplier.get());
}
}
@Override
public Optional<R> min(Comparator<? super R> comparator) {
try (Stream<R> stream = createStream()) {
return stream.min(comparator);
}
}
@Override
public Optional<R> max(Comparator<? super R> comparator) {
try (Stream<R> stream = createStream()) {
return stream.max(comparator);
}
}
@Override
public long count() {
try (Stream<R> stream = createStream()) {
return stream.count();
}
}
@Override
public boolean anyMatch(Predicate<? super R> predicate) {
try (Stream<R> stream = createStream()) {
return stream.anyMatch(predicate);
}
}
@Override
public boolean allMatch(Predicate<? super R> predicate) {
try (Stream<R> stream = createStream()) {
return stream.allMatch(predicate);
}
}
@Override
public boolean noneMatch(Predicate<? super R> predicate) {
try (Stream<R> stream = createStream()) {
return stream.noneMatch(predicate);
}
}
@Override
public Optional<R> findFirst() {
try (Stream<R> stream = createStream()) {
return stream.findFirst();
}
}
@Override
public Optional<R> findAny() {
try (Stream<R> stream = createStream()) {
return stream.findAny();
}
}
@Override
public Iterator<R> iterator() {
Stream<R> stream = createStream();
onCloseRunnables.add(stream::close);
return stream.iterator();
}
@Override
public Spliterator<R> spliterator() {
Stream<R> stream = createStream();
onCloseRunnables.add(stream::close);
return stream.spliterator();
}
@Override
public LocalCacheStream<R> timeout(long timeout, TimeUnit unit) {
// Timeout does nothing for a local cache stream
return this;
}
}
| 11,555
| 29.65252
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/EntryStreamSupplier.java
|
package org.infinispan.stream.impl.local;
import java.util.Objects;
import java.util.Set;
import java.util.function.Supplier;
import java.util.function.ToIntFunction;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.cache.impl.AbstractDelegatingCache;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Stream supplier that is to be used when the underlying stream is composed by {@link CacheEntry} instances. This
* supplier will do the proper filtering by key based on the CacheEntry key.
*/
public class EntryStreamSupplier<K, V> implements AbstractLocalCacheStream.StreamSupplier<CacheEntry<K, V>, Stream<CacheEntry<K, V>>> {
private static final Log log = LogFactory.getLog(EntryStreamSupplier.class);
private final Cache<K, V> cache;
private final ToIntFunction<Object> toIntFunction;
private final Supplier<Stream<CacheEntry<K, V>>> supplier;
public EntryStreamSupplier(Cache<K, V> cache, ToIntFunction<Object> toIntFunction,
Supplier<Stream<CacheEntry<K, V>>> supplier) {
this.cache = cache;
this.toIntFunction = toIntFunction;
this.supplier = supplier;
}
@Override
public Stream<CacheEntry<K, V>> buildStream(IntSet segmentsToFilter, Set<?> keysToFilter, boolean parallel) {
Stream<CacheEntry<K, V>> stream;
if (keysToFilter != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying key filtering %s", keysToFilter);
}
// Make sure we aren't going remote to retrieve these
AdvancedCache<K, V> advancedCache = AbstractDelegatingCache.unwrapCache(cache).getAdvancedCache()
.withFlags(Flag.CACHE_MODE_LOCAL);
Stream<?> keyStream = parallel ? keysToFilter.parallelStream() : keysToFilter.stream();
stream = keyStream
.map(advancedCache::getCacheEntry)
.filter(Objects::nonNull);
} else {
stream = supplier.get();
if (parallel) {
stream = stream.parallel();
}
}
if (segmentsToFilter != null && toIntFunction != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying segment filter %s", segmentsToFilter);
}
stream = stream.filter(k -> {
K key = k.getKey();
int segment = toIntFunction.applyAsInt(key);
boolean isPresent = segmentsToFilter.contains(segment);
if (log.isTraceEnabled())
log.tracef("Is key %s present in segment %d? %b", key, segment, isPresent);
return isPresent;
});
}
return stream;
}
}
| 2,834
| 38.375
| 135
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/LocalLongCacheStream.java
|
package org.infinispan.stream.impl.local;
import java.util.LongSummaryStatistics;
import java.util.OptionalDouble;
import java.util.OptionalLong;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.LongBinaryOperator;
import java.util.function.LongConsumer;
import java.util.function.LongFunction;
import java.util.function.LongPredicate;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
import java.util.function.LongUnaryOperator;
import java.util.function.ObjLongConsumer;
import java.util.function.Supplier;
import java.util.stream.LongStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.primitive.l.BoxedLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.DistinctLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.FilterLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.FlatMapLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.LimitLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToDoubleLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToIntLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToObjLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.PeekLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.SkipLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.SortedLongOperation;
/**
* LongStream that wraps a given stream to allow for additional functionality such as injection of values into
* various operations
*/
public class LocalLongCacheStream extends AbstractLocalCacheStream<Long, LongStream, LongCacheStream> implements LongCacheStream {
public LocalLongCacheStream(StreamSupplier<Long, LongStream> streamSupplier, boolean parallel, ComponentRegistry registry) {
super(streamSupplier, parallel, registry);
}
LocalLongCacheStream(AbstractLocalCacheStream<?, ?, ?> original) {
super(original);
}
@Override
public LocalLongCacheStream filter(LongPredicate predicate) {
registry.wireDependencies(predicate);
intermediateOperations.add(new FilterLongOperation<>(predicate));
return this;
}
@Override
public LocalLongCacheStream map(LongUnaryOperator mapper) {
registry.wireDependencies(mapper);
intermediateOperations.add(new MapLongOperation(mapper));
return this;
}
@Override
public <U> LocalCacheStream<U> mapToObj(LongFunction<? extends U> mapper) {
registry.wireDependencies(mapper);
intermediateOperations.add(new MapToObjLongOperation<>(mapper));
return new LocalCacheStream<U>(this);
}
@Override
public LocalIntCacheStream mapToInt(LongToIntFunction mapper) {
intermediateOperations.add(new MapToIntLongOperation(mapper));
return new LocalIntCacheStream(this);
}
@Override
public LocalDoubleCacheStream mapToDouble(LongToDoubleFunction mapper) {
intermediateOperations.add(new MapToDoubleLongOperation(mapper));
return new LocalDoubleCacheStream(this);
}
@Override
public LocalLongCacheStream flatMap(LongFunction<? extends LongStream> mapper) {
intermediateOperations.add(new FlatMapLongOperation(mapper));
return this;
}
@Override
public LocalLongCacheStream distinct() {
intermediateOperations.add(DistinctLongOperation.getInstance());
return this;
}
@Override
public LocalLongCacheStream sorted() {
intermediateOperations.add(SortedLongOperation.getInstance());
return this;
}
@Override
public LocalLongCacheStream peek(LongConsumer action) {
intermediateOperations.add(new PeekLongOperation(action));
return this;
}
@Override
public LocalLongCacheStream limit(long maxSize) {
intermediateOperations.add(new LimitLongOperation(maxSize));
return this;
}
@Override
public LocalLongCacheStream skip(long n) {
intermediateOperations.add(new SkipLongOperation(n));
return this;
}
@Override
public void forEach(LongConsumer action) {
injectCache(action);
try (LongStream stream = createStream()) {
stream.forEach(action);
}
}
@Override
public <K, V> void forEach(ObjLongConsumer<Cache<K, V>> action) {
Cache<K, V> cache = registry.getComponent(Cache.class);
try (LongStream stream = createStream()) {
stream.forEach(l -> action.accept(cache, l));
}
}
@Override
public void forEachOrdered(LongConsumer action) {
injectCache(action);
try (LongStream stream = createStream()) {
stream.forEachOrdered(action);
}
}
/**
* Method to inject a cache into a consumer. Note we only support this for the consumer at this
* time.
* @param cacheAware the instance that may be a {@link CacheAware}
*/
private void injectCache(LongConsumer cacheAware) {
if (cacheAware instanceof CacheAware) {
((CacheAware) cacheAware).injectCache(registry.getComponent(Cache.class));
}
}
@Override
public long[] toArray() {
try (LongStream stream = createStream()) {
return stream.toArray();
}
}
@Override
public long reduce(long identity, LongBinaryOperator op) {
try (LongStream stream = createStream()) {
return stream.reduce(identity, op);
}
}
@Override
public OptionalLong reduce(LongBinaryOperator op) {
try (LongStream stream = createStream()) {
return stream.reduce(op);
}
}
@Override
public <R> R collect(Supplier<R> supplier, ObjLongConsumer<R> accumulator, BiConsumer<R, R> combiner) {
try (LongStream stream = createStream()) {
return stream.collect(supplier, accumulator, combiner);
}
}
@Override
public long sum() {
try (LongStream stream = createStream()) {
return stream.sum();
}
}
@Override
public OptionalLong min() {
try (LongStream stream = createStream()) {
return stream.min();
}
}
@Override
public OptionalLong max() {
try (LongStream stream = createStream()) {
return stream.max();
}
}
@Override
public long count() {
try (LongStream stream = createStream()) {
return stream.count();
}
}
@Override
public OptionalDouble average() {
try (LongStream stream = createStream()) {
return stream.average();
}
}
@Override
public LongSummaryStatistics summaryStatistics() {
try (LongStream stream = createStream()) {
return stream.summaryStatistics();
}
}
@Override
public boolean anyMatch(LongPredicate predicate) {
try (LongStream stream = createStream()) {
return stream.anyMatch(predicate);
}
}
@Override
public boolean allMatch(LongPredicate predicate) {
try (LongStream stream = createStream()) {
return stream.allMatch(predicate);
}
}
@Override
public boolean noneMatch(LongPredicate predicate) {
try (LongStream stream = createStream()) {
return stream.noneMatch(predicate);
}
}
@Override
public OptionalLong findFirst() {
try (LongStream stream = createStream()) {
return stream.findFirst();
}
}
@Override
public OptionalLong findAny() {
try (LongStream stream = createStream()) {
return stream.findAny();
}
}
@Override
public DoubleCacheStream asDoubleStream() {
return mapToDouble(l -> (double) l);
}
@Override
public CacheStream<Long> boxed() {
intermediateOperations.add(BoxedLongOperation.getInstance());
return new LocalCacheStream<>(this);
}
@Override
public PrimitiveIterator.OfLong iterator() {
LongStream stream = createStream();
onCloseRunnables.add(stream::close);
return stream.iterator();
}
@Override
public Spliterator.OfLong spliterator() {
LongStream stream = createStream();
onCloseRunnables.add(stream::close);
return stream.spliterator();
}
@Override
public LocalLongCacheStream sequentialDistribution() {
return this;
}
@Override
public LocalLongCacheStream parallelDistribution() {
return this;
}
@Override
public LocalLongCacheStream filterKeySegments(Set<Integer> segments) {
return filterKeySegments(IntSets.from(segments));
}
@Override
public LocalLongCacheStream filterKeySegments(IntSet segments) {
segmentsToFilter = segments;
return this;
}
@Override
public LocalLongCacheStream filterKeys(Set<?> keys) {
keysToFilter = keys;
return this;
}
@Override
public LocalLongCacheStream distributedBatchSize(int batchSize) {
// TODO: Does this change cache loader?
return this;
}
@Override
public LocalLongCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
// All segments are completed when the getStream() is completed so we don't track them
return this;
}
@Override
public LocalLongCacheStream disableRehashAware() {
// Local long stream doesn't matter for rehash
return this;
}
@Override
public LocalLongCacheStream timeout(long timeout, TimeUnit unit) {
// Timeout does nothing for a local long cache stream
return this;
}
}
| 9,936
| 28.055556
| 130
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/local/KeyStreamSupplier.java
|
package org.infinispan.stream.impl.local;
import java.util.Set;
import java.util.function.Supplier;
import java.util.function.ToIntFunction;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.cache.impl.AbstractDelegatingCache;
import org.infinispan.commons.util.IntSet;
import org.infinispan.context.Flag;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Stream supplier that is to be used when the underlying stream is composed by key instances. This supplier will do
* the proper filtering by assuming each element is the key itself.
*/
public class KeyStreamSupplier<K, V> implements AbstractLocalCacheStream.StreamSupplier<K, Stream<K>> {
private static final Log log = LogFactory.getLog(KeyStreamSupplier.class);
private final Cache<K, V> cache;
private final ToIntFunction<Object> segmentFunction;
private final Supplier<Stream<K>> supplier;
public KeyStreamSupplier(Cache<K, V> cache, ToIntFunction<Object> segmentFunction, Supplier<Stream<K>> supplier) {
this.cache = cache;
this.segmentFunction = segmentFunction;
this.supplier = supplier;
}
@Override
public Stream<K> buildStream(IntSet segmentsToFilter, Set<?> keysToFilter, boolean parallel) {
Stream<K> stream;
// Make sure we aren't going remote to retrieve these
AdvancedCache<K, V> advancedCache = AbstractDelegatingCache.unwrapCache(cache).getAdvancedCache()
.withFlags(Flag.CACHE_MODE_LOCAL);
if (keysToFilter != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying key filtering %s", keysToFilter);
}
// ignore non existent keys
stream = (Stream<K>) (parallel ? keysToFilter.parallelStream() : keysToFilter.stream())
.filter(k -> advancedCache.get(k) != null);
} else {
stream = supplier.get();
if (parallel) {
stream = stream.parallel();
}
}
if (segmentsToFilter != null && segmentFunction != null) {
if (log.isTraceEnabled()) {
log.tracef("Applying segment filter %s", segmentsToFilter);
}
stream = stream.filter(k -> segmentsToFilter.contains(segmentFunction.applyAsInt(k)));
}
return stream;
}
}
| 2,349
| 37.52459
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/spliterators/IteratorAsSpliterator.java
|
package org.infinispan.stream.impl.spliterators;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Objects;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.Consumer;
import java.util.function.Supplier;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.CloseableSpliterator;
import org.infinispan.commons.util.Closeables;
/**
* A Spliterator using the provided iterator for supplying values.
* Splits occur start at the batch size. Each split gets subsequently bigger by increasing by the original
* split size. The batch size will never become higher than the configured max batch size
*/
public class IteratorAsSpliterator<T> implements CloseableSpliterator<T> {
private CloseableIterator<? extends T> iterator;
private final int characteristics;
private final int batchIncrease;
private final int maxBatchSize;
private long estimateRemaining;
private int currentBatchSize;
public static class Builder<T> implements Supplier<IteratorAsSpliterator<T>> {
private final CloseableIterator<? extends T> iterator;
private int characteristics;
private int batchIncrease = 1024;
private int maxBatchSize = 51200;
private long estimateRemaining = Long.MAX_VALUE;
public Builder(Iterator<? extends T> iterator) {
Objects.nonNull(iterator);
this.iterator = Closeables.iterator(iterator);
}
public Builder(CloseableIterator<? extends T> closeableIterator) {
Objects.nonNull(closeableIterator);
this.iterator = closeableIterator;
}
/**
* Sets the characteristics the subsequent spliterator will have.
* @param characteristics
* @return
*/
public Builder<T> setCharacteristics(int characteristics) {
this.characteristics = characteristics;
return this;
}
/**
* Sets the batch increase size. This controls how much larger subsequent splits are.
* The default value is 1024;
* @param batchIncrease
* @return this
*/
public Builder<T> setBatchIncrease(int batchIncrease) {
if (batchIncrease <= 0) {
throw new IllegalArgumentException("The batchIncrease " + batchIncrease + " must be greater than 0");
}
this.batchIncrease = batchIncrease;
return this;
}
/**
* Sets the max batch size for a thread to use - This defaults to 51200
* @param maxBatchSize
* @return this
*/
public Builder setMaxBatchSize(int maxBatchSize) {
if (maxBatchSize <= 0) {
throw new IllegalArgumentException("The maxBatchSize " + maxBatchSize + " must be greater than 0");
}
this.maxBatchSize = maxBatchSize;
return this;
}
/**
* Sets how many estimated elements are remaining for this iterator
* This defaults to Long.MAX_VALUE. It is heavily recommended to provide an exact or estimate value
* to help with controlling parallelism
* @param estimateRemaining
* @return this
*/
public Builder<T> setEstimateRemaining(long estimateRemaining) {
this.estimateRemaining = estimateRemaining;
return this;
}
@Override
public IteratorAsSpliterator<T> get() {
if (iterator == null) {
throw new IllegalArgumentException("Iterator cannot be null");
}
if (batchIncrease > maxBatchSize) {
throw new IllegalArgumentException("Max batch size " + maxBatchSize +
" cannot be larger than batchIncrease" + batchIncrease);
}
return new IteratorAsSpliterator<>(this);
}
}
/**
*
* @param builder
*/
private IteratorAsSpliterator(Builder<T> builder) {
this.iterator = builder.iterator;
this.characteristics = builder.characteristics;
this.batchIncrease = builder.batchIncrease;
this.maxBatchSize = builder.maxBatchSize;
this.estimateRemaining = builder.estimateRemaining;
}
@Override
public Spliterator<T> trySplit() {
if (estimateRemaining > 1 && iterator.hasNext()) {
int batch = currentBatchSize + batchIncrease;
if (batch > estimateRemaining) {
batch = (int) estimateRemaining;
}
if (batch > maxBatchSize) {
batch = maxBatchSize;
}
Object[] array = new Object[batch];
int i = 0;
while (iterator.hasNext() && i < batch) {
array[i] = iterator.next();
i++;
}
currentBatchSize = batch;
estimateRemaining -= i;
return Spliterators.spliterator(array, 0, i, characteristics);
}
return null;
}
@Override
public void forEachRemaining(Consumer<? super T> action) {
if (action == null) {
throw new NullPointerException();
}
iterator.forEachRemaining(action);
}
@Override
public boolean tryAdvance(Consumer<? super T> action) {
if (action == null) {
throw new NullPointerException();
}
if (iterator.hasNext()) {
action.accept(iterator.next());
return true;
}
return false;
}
@Override
public long estimateSize() {
return estimateRemaining;
}
@Override
public int characteristics() { return characteristics; }
@Override
public Comparator<? super T> getComparator() {
if (hasCharacteristics(Spliterator.SORTED)) {
return null;
}
throw new IllegalStateException();
}
@Override
public void close() {
iterator.close();
}
}
| 5,741
| 29.870968
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/UnorderedOperation.java
|
package org.infinispan.stream.impl.intops;
import java.util.stream.BaseStream;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs unordered operation on a {@link BaseStream}
* @param <Type> the type of the stream
* @param <Stream> the stream type
*/
public class UnorderedOperation<Type, Stream extends BaseStream<Type, Stream>>
implements IntermediateOperation<Type, Stream, Type, Stream> {
@Override
public BaseStream perform(BaseStream stream) {
return stream.unordered();
}
@Override
public Flowable<Type> mapFlowable(Flowable<Type> input) {
return input;
}
}
| 617
| 24.75
| 78
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/MappingOperation.java
|
package org.infinispan.stream.impl.intops;
import java.util.stream.BaseStream;
/**
* Marker interface to signify that an {@link IntermediateOperation} is a map operation.
* @author wburns
* @since 9.0
*/
public interface MappingOperation<InputType, InputStream extends BaseStream<InputType, InputStream>,
OutputType, OutputStream extends BaseStream<OutputType, OutputStream>>
extends IntermediateOperation<InputType, InputStream, OutputType, OutputStream> {
}
| 478
| 33.214286
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/IntermediateOperation.java
|
package org.infinispan.stream.impl.intops;
import java.util.stream.BaseStream;
import org.infinispan.factories.ComponentRegistry;
import io.reactivex.rxjava3.core.Flowable;
/**
* Intermediate operation that can be applied to a stream to change its processing.
* @param <InputType> the type of the input stream
* @param <InputStream> the input stream type
* @param <OutputType> the type of the output stream
* @param <OutputStream> the output stream type
*/
public interface IntermediateOperation<InputType, InputStream extends BaseStream<InputType, InputStream>,
OutputType, OutputStream extends BaseStream<OutputType, OutputStream>> {
/**
* Performs the actualy intermediate operation returning the resulting stream
* @param stream the stream to have the operation performed on
* @return the resulting stream after the operation was applied
*/
OutputStream perform(InputStream stream);
/**
* Performs the intermediate operation on a Flowable. This is an interop method to allow Distributed
* Streams to actually use Distributed Publisher
* @param input the input flowable
* @return
*/
Flowable<OutputType> mapFlowable(Flowable<InputType> input);
/**
* Handles injection of components for various dependencies that the intermediate operation has
* @param registry the registry to use
*/
default void handleInjection(ComponentRegistry registry) {
// Default is nothing is done
}
}
| 1,475
| 35
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/FlatMappingOperation.java
|
package org.infinispan.stream.impl.intops;
import java.util.stream.BaseStream;
import java.util.stream.Stream;
/**
* Interface to signify that an {@link IntermediateOperation} is a flat map operation. This also provides proper
* generics for converting a flat map as a map operation resulting in a Stream containing the proper stream
* @author wburns
* @since 9.0
*/
public interface FlatMappingOperation<InputType, InputStream extends BaseStream<InputType, InputStream>,
OutputType, OutputStream extends BaseStream<OutputType, OutputStream>>
extends MappingOperation<InputType, InputStream, OutputType, OutputStream> {
/**
* Instead of flat mapping this returns a stream of {@link OutputStream}.
* @param inputStream the stream to convert
* @return the stream of streams
*/
Stream<OutputStream> map(InputStream inputStream);
}
| 870
| 36.869565
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/IntermediateOperationExternalizer.java
|
package org.infinispan.stream.impl.intops;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.function.Consumer;
import java.util.function.DoubleConsumer;
import java.util.function.DoubleFunction;
import java.util.function.DoublePredicate;
import java.util.function.DoubleToIntFunction;
import java.util.function.DoubleToLongFunction;
import java.util.function.DoubleUnaryOperator;
import java.util.function.Function;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.IntPredicate;
import java.util.function.IntToDoubleFunction;
import java.util.function.IntToLongFunction;
import java.util.function.IntUnaryOperator;
import java.util.function.LongConsumer;
import java.util.function.LongFunction;
import java.util.function.LongPredicate;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
import java.util.function.LongUnaryOperator;
import java.util.function.Predicate;
import java.util.function.ToDoubleFunction;
import java.util.function.ToIntFunction;
import java.util.function.ToLongFunction;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.marshall.core.Ids;
import org.infinispan.stream.impl.intops.object.DistinctOperation;
import org.infinispan.stream.impl.intops.object.FilterOperation;
import org.infinispan.stream.impl.intops.object.FlatMapOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToDoubleOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToIntOperation;
import org.infinispan.stream.impl.intops.object.FlatMapToLongOperation;
import org.infinispan.stream.impl.intops.object.LimitOperation;
import org.infinispan.stream.impl.intops.object.MapOperation;
import org.infinispan.stream.impl.intops.object.MapToDoubleOperation;
import org.infinispan.stream.impl.intops.object.MapToIntOperation;
import org.infinispan.stream.impl.intops.object.MapToLongOperation;
import org.infinispan.stream.impl.intops.object.PeekOperation;
import org.infinispan.stream.impl.intops.object.SortedComparatorOperation;
import org.infinispan.stream.impl.intops.object.SortedOperation;
import org.infinispan.stream.impl.intops.primitive.d.BoxedDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.DistinctDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.FilterDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.FlatMapDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.LimitDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToIntDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToLongDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.MapToObjDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.PeekDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.d.SortedDoubleOperation;
import org.infinispan.stream.impl.intops.primitive.i.AsDoubleIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.AsLongIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.BoxedIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.DistinctIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.FilterIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.FlatMapIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.LimitIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToDoubleIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToLongIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.MapToObjIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.PeekIntOperation;
import org.infinispan.stream.impl.intops.primitive.i.SortedIntOperation;
import org.infinispan.stream.impl.intops.primitive.l.AsDoubleLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.BoxedLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.DistinctLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.FilterLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.FlatMapLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.LimitLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToDoubleLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToIntLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.MapToObjLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.PeekLongOperation;
import org.infinispan.stream.impl.intops.primitive.l.SortedLongOperation;
/**
* Externalizer to be used for serializing the various intermediate operations
*/
public class IntermediateOperationExternalizer implements AdvancedExternalizer<IntermediateOperation> {
// Object stream intermediate operations
private static final int DISTINCT = 0;
private static final int FILTER = 1;
private static final int FLATMAP = 2;
private static final int FLATMAP_DOUBLE = 3;
private static final int FLATMAP_INT = 4;
private static final int FLATMAP_LONG = 5;
private static final int LIMIT = 6;
private static final int MAP = 7;
private static final int MAP_DOUBLE = 8;
private static final int MAP_INT = 9;
private static final int MAP_LONG = 10;
private static final int PEEK = 11;
private static final int SORTED_COMPARATOR = 12;
private static final int SORTED = 13;
// Double stream intermediate operations
private static final int DOUBLE_BOXED = 20;
private static final int DOUBLE_DISTINCT = 21;
private static final int DOUBLE_FILTER = 22;
private static final int DOUBLE_FLATMAP = 23;
private static final int DOUBLE_LIMIT = 24;
private static final int DOUBLE_MAP = 25;
private static final int DOUBLE_MAP_INT = 26;
private static final int DOUBLE_MAP_LONG = 27;
private static final int DOUBLE_MAP_OBJ = 28;
private static final int DOUBLE_PEEK = 29;
private static final int DOUBLE_SORTED = 30;
// Int stream intermediate operations
private static final int INT_AS_DOUBLE = 40;
private static final int INT_AS_LONG = 41;
private static final int INT_BOXED = 42;
private static final int INT_DISTINCT = 43;
private static final int INT_FILTER = 44;
private static final int INT_FLATMAP = 45;
private static final int INT_LIMIT = 46;
private static final int INT_MAP = 47;
private static final int INT_MAP_DOUBLE = 48;
private static final int INT_MAP_LONG = 49;
private static final int INT_MAP_OBJ = 50;
private static final int INT_PEEK = 51;
private static final int INT_SORTED = 52;
// Long stream intermediate operations
private static final int LONG_AS_DOUBLE = 60;
private static final int LONG_BOXED = 61;
private static final int LONG_DISTINCT = 62;
private static final int LONG_FILTER = 63;
private static final int LONG_FLATMAP = 64;
private static final int LONG_LIMIT = 65;
private static final int LONG_MAP = 66;
private static final int LONG_MAP_DOUBLE = 67;
private static final int LONG_MAP_INT = 68;
private static final int LONG_MAP_OBJ = 69;
private static final int LONG_PEEK = 70;
private static final int LONG_SORTED = 71;
private final Map<Class<? extends IntermediateOperation>, Integer> operations = new HashMap<>(72);
public IntermediateOperationExternalizer() {
operations.put(DistinctOperation.class, DISTINCT);
operations.put(FilterOperation.class, FILTER);
operations.put(FlatMapOperation.class, FLATMAP);
operations.put(FlatMapToDoubleOperation.class, FLATMAP_DOUBLE);
operations.put(FlatMapToIntOperation.class, FLATMAP_INT);
operations.put(FlatMapToLongOperation.class, FLATMAP_LONG);
operations.put(LimitOperation.class, LIMIT);
operations.put(MapOperation.class, MAP);
operations.put(MapToDoubleOperation.class, MAP_DOUBLE);
operations.put(MapToIntOperation.class, MAP_INT);
operations.put(MapToLongOperation.class, MAP_LONG);
operations.put(PeekOperation.class, PEEK);
operations.put(SortedComparatorOperation.class, SORTED_COMPARATOR);
operations.put(SortedOperation.class, SORTED);
operations.put(BoxedDoubleOperation.class, DOUBLE_BOXED);
operations.put(DistinctDoubleOperation.class, DOUBLE_DISTINCT);
operations.put(FilterDoubleOperation.class, DOUBLE_FILTER);
operations.put(FlatMapDoubleOperation.class, DOUBLE_FLATMAP);
operations.put(LimitDoubleOperation.class, DOUBLE_LIMIT);
operations.put(MapDoubleOperation.class, DOUBLE_MAP);
operations.put(MapToIntDoubleOperation.class, DOUBLE_MAP_INT);
operations.put(MapToLongDoubleOperation.class, DOUBLE_MAP_LONG);
operations.put(MapToObjDoubleOperation.class, DOUBLE_MAP_OBJ);
operations.put(PeekDoubleOperation.class, DOUBLE_PEEK);
operations.put(SortedDoubleOperation.class, DOUBLE_SORTED);
operations.put(AsDoubleIntOperation.class, INT_AS_DOUBLE);
operations.put(AsLongIntOperation.class, INT_AS_LONG);
operations.put(BoxedIntOperation.class, INT_BOXED);
operations.put(DistinctIntOperation.class, INT_DISTINCT);
operations.put(FilterIntOperation.class, INT_FILTER);
operations.put(FlatMapIntOperation.class, INT_FLATMAP);
operations.put(LimitIntOperation.class, INT_LIMIT);
operations.put(MapIntOperation.class, INT_MAP);
operations.put(MapToDoubleIntOperation.class, INT_MAP_DOUBLE);
operations.put(MapToLongIntOperation.class, INT_MAP_LONG);
operations.put(MapToObjIntOperation.class, INT_MAP_OBJ);
operations.put(PeekIntOperation.class, INT_PEEK);
operations.put(SortedIntOperation.class, INT_SORTED);
operations.put(AsDoubleLongOperation.class, LONG_AS_DOUBLE);
operations.put(BoxedLongOperation.class, LONG_BOXED);
operations.put(DistinctLongOperation.class, LONG_DISTINCT);
operations.put(FilterLongOperation.class, LONG_FILTER);
operations.put(FlatMapLongOperation.class, LONG_FLATMAP);
operations.put(LimitLongOperation.class, LONG_LIMIT);
operations.put(MapLongOperation.class, LONG_MAP);
operations.put(MapToDoubleLongOperation.class, LONG_MAP_DOUBLE);
operations.put(MapToIntLongOperation.class, LONG_MAP_INT);
operations.put(MapToObjLongOperation.class, LONG_MAP_OBJ);
operations.put(PeekLongOperation.class, LONG_PEEK);
operations.put(SortedLongOperation.class, LONG_SORTED);
}
@Override
public Set<Class<? extends IntermediateOperation>> getTypeClasses() {
return Util.<Class<? extends IntermediateOperation>>asSet(DistinctOperation.class, FilterOperation.class,
FlatMapOperation.class, FlatMapToDoubleOperation.class, FlatMapToIntOperation.class,
FlatMapToLongOperation.class, LimitOperation.class, MapOperation.class, MapToDoubleOperation.class,
MapToIntOperation.class, MapToLongOperation.class, PeekOperation.class,
SortedComparatorOperation.class, SortedOperation.class,
BoxedDoubleOperation.class, DistinctDoubleOperation.class, FilterDoubleOperation.class,
FlatMapDoubleOperation.class, LimitDoubleOperation.class, MapDoubleOperation.class,
MapToIntDoubleOperation.class, MapToLongDoubleOperation.class, MapToDoubleOperation.class,
PeekDoubleOperation.class, SortedDoubleOperation.class,
AsDoubleIntOperation.class, AsLongIntOperation.class, BoxedIntOperation.class, DistinctOperation.class,
FilterIntOperation.class, FlatMapIntOperation.class, LimitIntOperation.class,
MapIntOperation.class, MapToDoubleIntOperation.class, MapToLongIntOperation.class,
MapToObjIntOperation.class, PeekIntOperation.class, SortedIntOperation.class,
AsDoubleLongOperation.class, BoxedLongOperation.class, DistinctOperation.class, FilterLongOperation.class,
FlatMapLongOperation.class, LimitOperation.class, MapLongOperation.class, MapToDoubleLongOperation.class,
MapToIntLongOperation.class, MapToObjLongOperation.class, PeekLongOperation.class,
SortedLongOperation.class
);
}
@Override
public Integer getId() {
return Ids.INTERMEDIATE_OPERATIONS;
}
@Override
public void writeObject(ObjectOutput output, IntermediateOperation object) throws IOException {
int number = operations.getOrDefault(object.getClass(), -1);
output.writeByte(number);
switch (number) {
case FILTER:
output.writeObject(((FilterOperation) object).getPredicate());
break;
case FLATMAP:
output.writeObject(((FlatMapOperation) object).getFunction());
break;
case FLATMAP_DOUBLE:
output.writeObject(((FlatMapToDoubleOperation) object).getFunction());
break;
case FLATMAP_INT:
output.writeObject(((FlatMapToIntOperation) object).getFunction());
break;
case FLATMAP_LONG:
output.writeObject(((FlatMapToLongOperation) object).getFunction());
break;
case LIMIT:
UnsignedNumeric.writeUnsignedLong(output, ((LimitOperation) object).getLimit());
break;
case MAP:
output.writeObject(((MapOperation) object).getFunction());
break;
case MAP_DOUBLE:
output.writeObject(((MapToDoubleOperation) object).getFunction());
break;
case MAP_INT:
output.writeObject(((MapToIntOperation) object).getFunction());
break;
case MAP_LONG:
output.writeObject(((MapToLongOperation) object).getFunction());
break;
case PEEK:
output.writeObject(((PeekOperation) object).getConsumer());
break;
case SORTED_COMPARATOR:
output.writeObject(((SortedComparatorOperation) object).getComparator());
break;
case DOUBLE_FILTER:
output.writeObject(((FilterDoubleOperation) object).getPredicate());
break;
case DOUBLE_FLATMAP:
output.writeObject(((FlatMapDoubleOperation) object).getFunction());
break;
case DOUBLE_LIMIT:
UnsignedNumeric.writeUnsignedLong(output, ((LimitDoubleOperation) object).getLimit());
break;
case DOUBLE_MAP:
output.writeObject(((MapDoubleOperation) object).getOperator());
break;
case DOUBLE_MAP_INT:
output.writeObject(((MapToIntDoubleOperation) object).getFunction());
break;
case DOUBLE_MAP_LONG:
output.writeObject(((MapToLongDoubleOperation) object).getFunction());
break;
case DOUBLE_MAP_OBJ:
output.writeObject(((MapToObjDoubleOperation) object).getFunction());
break;
case DOUBLE_PEEK:
output.writeObject(((PeekDoubleOperation) object).getConsumer());
break;
case INT_FILTER:
output.writeObject(((FilterIntOperation) object).getPredicate());
break;
case INT_FLATMAP:
output.writeObject(((FlatMapIntOperation) object).getFunction());
break;
case INT_LIMIT:
UnsignedNumeric.writeUnsignedLong(output, ((LimitIntOperation) object).getLimit());
break;
case INT_MAP:
output.writeObject(((MapIntOperation) object).getOperator());
break;
case INT_MAP_DOUBLE:
output.writeObject(((MapToDoubleIntOperation) object).getFunction());
break;
case INT_MAP_LONG:
output.writeObject(((MapToLongIntOperation) object).getFunction());
break;
case INT_MAP_OBJ:
output.writeObject(((MapToObjIntOperation) object).getFunction());
break;
case INT_PEEK:
output.writeObject(((PeekIntOperation) object).getConsumer());
break;
case LONG_FILTER:
output.writeObject(((FilterLongOperation) object).getPredicate());
break;
case LONG_FLATMAP:
output.writeObject(((FlatMapLongOperation) object).getFunction());
break;
case LONG_LIMIT:
UnsignedNumeric.writeUnsignedLong(output, ((LimitLongOperation) object).getLimit());
break;
case LONG_MAP:
output.writeObject(((MapLongOperation) object).getOperator());
break;
case LONG_MAP_DOUBLE:
output.writeObject(((MapToDoubleLongOperation) object).getFunction());
break;
case LONG_MAP_INT:
output.writeObject(((MapToIntLongOperation) object).getFunction());
break;
case LONG_MAP_OBJ:
output.writeObject(((MapToObjLongOperation) object).getFunction());
break;
case LONG_PEEK:
output.writeObject(((PeekLongOperation) object).getConsumer());
break;
}
}
@Override
public IntermediateOperation readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int number = input.readUnsignedByte();
switch (number) {
case DISTINCT:
return DistinctOperation.getInstance();
case FILTER:
return new FilterOperation<>((Predicate) input.readObject());
case FLATMAP:
return new FlatMapOperation<>((Function) input.readObject());
case FLATMAP_DOUBLE:
return new FlatMapToDoubleOperation((Function) input.readObject());
case FLATMAP_INT:
return new FlatMapToIntOperation((Function) input.readObject());
case FLATMAP_LONG:
return new FlatMapToLongOperation<>((Function) input.readObject());
case LIMIT:
return new LimitOperation<>(UnsignedNumeric.readUnsignedLong(input));
case MAP:
return new MapOperation<>((Function) input.readObject());
case MAP_DOUBLE:
return new MapToDoubleOperation<>((ToDoubleFunction) input.readObject());
case MAP_INT:
return new MapToIntOperation<>((ToIntFunction) input.readObject());
case MAP_LONG:
return new MapToLongOperation<>((ToLongFunction) input.readObject());
case PEEK:
return new PeekOperation<>((Consumer) input.readObject());
case SORTED_COMPARATOR:
return new SortedComparatorOperation<>((Comparator) input.readObject());
case SORTED:
return SortedOperation.getInstance();
case DOUBLE_BOXED:
return BoxedDoubleOperation.getInstance();
case DOUBLE_DISTINCT:
return DistinctDoubleOperation.getInstance();
case DOUBLE_FILTER:
return new FilterDoubleOperation((DoublePredicate) input.readObject());
case DOUBLE_FLATMAP:
return new FlatMapDoubleOperation((DoubleFunction) input.readObject());
case DOUBLE_LIMIT:
return new LimitDoubleOperation(UnsignedNumeric.readUnsignedLong(input));
case DOUBLE_MAP:
return new MapDoubleOperation((DoubleUnaryOperator) input.readObject());
case DOUBLE_MAP_INT:
return new MapToIntDoubleOperation((DoubleToIntFunction) input.readObject());
case DOUBLE_MAP_LONG:
return new MapToLongDoubleOperation((DoubleToLongFunction) input.readObject());
case DOUBLE_MAP_OBJ:
return new MapToObjDoubleOperation<>((DoubleFunction) input.readObject());
case DOUBLE_PEEK:
return new PeekDoubleOperation((DoubleConsumer) input.readObject());
case DOUBLE_SORTED:
return SortedDoubleOperation.getInstance();
case INT_AS_DOUBLE:
return AsDoubleIntOperation.getInstance();
case INT_AS_LONG:
return AsLongIntOperation.getInstance();
case INT_BOXED:
return BoxedIntOperation.getInstance();
case INT_DISTINCT:
return DistinctIntOperation.getInstance();
case INT_FILTER:
return new FilterIntOperation((IntPredicate) input.readObject());
case INT_FLATMAP:
return new FlatMapIntOperation((IntFunction) input.readObject());
case INT_LIMIT:
return new LimitIntOperation(UnsignedNumeric.readUnsignedLong(input));
case INT_MAP:
return new MapIntOperation((IntUnaryOperator) input.readObject());
case INT_MAP_DOUBLE:
return new MapToDoubleIntOperation((IntToDoubleFunction) input.readObject());
case INT_MAP_LONG:
return new MapToLongIntOperation((IntToLongFunction) input.readObject());
case INT_MAP_OBJ:
return new MapToObjIntOperation<>((IntFunction) input.readObject());
case INT_PEEK:
return new PeekIntOperation((IntConsumer) input.readObject());
case INT_SORTED:
return SortedIntOperation.getInstance();
case LONG_AS_DOUBLE:
return AsDoubleLongOperation.getInstance();
case LONG_BOXED:
return BoxedLongOperation.getInstance();
case LONG_DISTINCT:
return DistinctLongOperation.getInstance();
case LONG_FILTER:
return new FilterLongOperation((LongPredicate) input.readObject());
case LONG_FLATMAP:
return new FlatMapLongOperation((LongFunction) input.readObject());
case LONG_LIMIT:
return new LimitLongOperation(UnsignedNumeric.readUnsignedLong(input));
case LONG_MAP:
return new MapLongOperation((LongUnaryOperator) input.readObject());
case LONG_MAP_DOUBLE:
return new MapToDoubleLongOperation((LongToDoubleFunction) input.readObject());
case LONG_MAP_INT:
return new MapToIntLongOperation((LongToIntFunction) input.readObject());
case LONG_MAP_OBJ:
return new MapToObjLongOperation<>((LongFunction) input.readObject());
case LONG_PEEK:
return new PeekLongOperation((LongConsumer) input.readObject());
case LONG_SORTED:
return SortedLongOperation.getInstance();
default:
throw new IllegalArgumentException("Found invalid number " + number);
}
}
}
| 22,939
| 47.601695
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/FilterOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.Predicate;
import java.util.stream.Stream;
import org.infinispan.commands.functional.functions.InjectableComponent;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs filter operation on a regular {@link Stream}
* @param <S> the type in the stream
*/
public class FilterOperation<S> implements IntermediateOperation<S, Stream<S>, S, Stream<S>> {
private final Predicate<? super S> predicate;
public FilterOperation(Predicate<? super S> predicate) {
this.predicate = predicate;
}
@Override
public Stream<S> perform(Stream<S> stream) {
return stream.filter(predicate);
}
public Predicate<? super S> getPredicate() {
return predicate;
}
@Override
public Flowable<S> mapFlowable(Flowable<S> input) {
return input.filter(predicate::test);
}
@Override
public void handleInjection(ComponentRegistry registry) {
if (predicate instanceof InjectableComponent) {
((InjectableComponent) predicate).inject(registry);
}
}
@Override
public String toString() {
return "FilterOperation{" +
"predicate=" + predicate +
'}';
}
}
| 1,346
| 25.411765
| 94
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/MapToDoubleOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.ToDoubleFunction;
import java.util.stream.DoubleStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to double operation on a regular {@link Stream}
* @param <I> the type of the input stream
*/
public class MapToDoubleOperation<I> implements MappingOperation<I, Stream<I>, Double, DoubleStream> {
private final ToDoubleFunction<? super I> function;
public MapToDoubleOperation(ToDoubleFunction<? super I> function) {
this.function = function;
}
@Override
public DoubleStream perform(Stream<I> stream) {
return stream.mapToDouble(function);
}
public ToDoubleFunction<? super I> getFunction() {
return function;
}
@Override
public Flowable<Double> mapFlowable(Flowable<I> input) {
return input.map(function::applyAsDouble);
}
}
| 978
| 26.194444
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/FlatMapToIntOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.Function;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.FlatMappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs flat map to int operation on a regular {@link Stream}
* @param <I> the type of the input stream
*/
public class FlatMapToIntOperation<I> implements FlatMappingOperation<I, Stream<I>, Integer, IntStream> {
private final Function<? super I, ? extends IntStream> function;
public FlatMapToIntOperation(Function<? super I, ? extends IntStream> function) {
this.function = function;
}
@Override
public IntStream perform(Stream<I> stream) {
return stream.flatMapToInt(function);
}
public Function<? super I, ? extends IntStream> getFunction() {
return function;
}
@Override
public Stream<IntStream> map(Stream<I> iStream) {
return iStream.map(function);
}
@Override
public Flowable<Integer> mapFlowable(Flowable<I> input) {
return input.concatMapStream(o -> function.apply(o).boxed());
}
}
| 1,142
| 26.878049
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/SkipOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs skip operation on a regular {@link Stream}
*/
public class SkipOperation<S> implements IntermediateOperation<S, Stream<S>, S, Stream<S>> {
private final long n;
public SkipOperation(long n) {
this.n = n;
}
@Override
public Stream<S> perform(Stream<S> stream) {
return stream.skip(n);
}
@Override
public Flowable<S> mapFlowable(Flowable<S> input) {
return input.skip(n);
}
}
| 632
| 20.827586
| 92
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/MapOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.Function;
import java.util.stream.Stream;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.impl.intops.MappingOperation;
import org.infinispan.util.function.SerializableFunction;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to operation on a regular {@link Stream}
* @param <I> the type of the input stream
* @param <O> the type of the output stream
*/
public class MapOperation<I, O> implements MappingOperation<I, Stream<I>, O, Stream<O>> {
private final Function<? super I, ? extends O> function;
public MapOperation(Function<? super I, ? extends O> function) {
this.function = function;
}
public MapOperation(SerializableFunction<? super I, ? extends O> function) {
this((Function<? super I, ? extends O>) function);
}
@Override
public Stream<O> perform(Stream<I> stream) {
return stream.map(function);
}
@Override
public void handleInjection(ComponentRegistry registry) {
registry.wireDependencies(function);
}
public Function<? super I, ? extends O> getFunction() {
return function;
}
@Override
public Flowable<O> mapFlowable(Flowable<I> input) {
return input.map(function::apply);
}
}
| 1,314
| 26.978723
| 89
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/DistinctOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs distinct operation on a regular {@link Stream}
* @param <S> the type in the stream
*/
public class DistinctOperation<S> implements IntermediateOperation<S, Stream<S>, S, Stream<S>> {
private static final DistinctOperation<?> OPERATION = new DistinctOperation<>();
private DistinctOperation() { }
public static <S> DistinctOperation<S> getInstance() {
return (DistinctOperation<S>) OPERATION;
}
@Override
public Stream<S> perform(Stream<S> stream) {
return stream.distinct();
}
@Override
public Flowable<S> mapFlowable(Flowable<S> input) {
return input.distinct();
}
}
| 830
| 25.806452
| 96
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/SortedComparatorOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.Comparator;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs sorted operation with a comparator on a regular {@link Stream}
*/
public class SortedComparatorOperation<S> implements IntermediateOperation<S, Stream<S>, S, Stream<S>> {
private final Comparator<? super S> comparator;
public SortedComparatorOperation(Comparator<? super S> comparator) {
this.comparator = comparator;
}
@Override
public Stream<S> perform(Stream<S> stream) {
return stream.sorted(comparator);
}
public Comparator<? super S> getComparator() {
return comparator;
}
@Override
public Flowable<S> mapFlowable(Flowable<S> input) {
return input.sorted(comparator);
}
}
| 878
| 24.852941
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/MapToLongOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.ToLongFunction;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to long operation on a regular {@link Stream}
* @param <I> the type of the input stream
*/
public class MapToLongOperation<I> implements MappingOperation<I, Stream<I>, Long, LongStream> {
private final ToLongFunction<? super I> function;
public MapToLongOperation(ToLongFunction<? super I> function) {
this.function = function;
}
@Override
public LongStream perform(Stream<I> stream) {
return stream.mapToLong(function);
}
public ToLongFunction<? super I> getFunction() {
return function;
}
@Override
public Flowable<Long> mapFlowable(Flowable<I> input) {
return input.map(function::applyAsLong);
}
}
| 950
| 25.416667
| 96
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/LimitOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs limit operation on a regular {@link Stream}
*/
public class LimitOperation<S> implements IntermediateOperation<S, Stream<S>, S, Stream<S>> {
private final long limit;
public LimitOperation(long limit) {
if (limit <= 0) {
throw new IllegalArgumentException("Limit must be greater than 0");
}
this.limit = limit;
}
@Override
public Stream<S> perform(Stream<S> stream) {
return stream.limit(limit);
}
public long getLimit() {
return limit;
}
@Override
public Flowable<S> mapFlowable(Flowable<S> input) {
return input.take(limit);
}
}
| 823
| 21.888889
| 93
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/SortedOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs sorted operation on a regular {@link Stream}
*/
public class SortedOperation<S> implements IntermediateOperation<S, Stream<S>, S, Stream<S>> {
private static final SortedOperation<?> OPERATION = new SortedOperation<>();
private SortedOperation() { }
public static <S> SortedOperation<S> getInstance() {
return (SortedOperation<S>) OPERATION;
}
@Override
public Stream<S> perform(Stream<S> stream) {
return stream.sorted();
}
@Override
public Flowable<S> mapFlowable(Flowable<S> input) {
return input.sorted();
}
}
| 775
| 24.866667
| 94
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/FlatMapOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.Function;
import java.util.stream.Stream;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.impl.intops.FlatMappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs flat map operation on a regular {@link Stream}
* @param <I> the type of the input stream
* @param <O> the type of the output stream
*/
public class FlatMapOperation<I, O> implements FlatMappingOperation<I, Stream<I>, O, Stream<O>> {
private final Function<? super I, ? extends Stream<? extends O>> function;
public FlatMapOperation(Function<? super I, ? extends Stream<? extends O>> function) {
this.function = function;
}
@Override
public Stream<O> perform(Stream<I> stream) {
return stream.flatMap(function);
}
public Function<? super I, ? extends Stream<? extends O>> getFunction() {
return function;
}
@Override
public void handleInjection(ComponentRegistry registry) {
registry.wireDependencies(function);
}
@Override
public Stream<Stream<O>> map(Stream<I> iStream) {
// Have to cast to make generics happy
return iStream.map((Function<I, Stream<O>>) function);
}
@Override
public Flowable<O> mapFlowable(Flowable<I> input) {
return input.concatMapStream(function::apply);
}
}
| 1,379
| 27.75
| 97
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/FlatMapToLongOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.Function;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.FlatMappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs flat map to long operation on a regular {@link Stream}
* @param <I> the type of the input stream
*/
public class FlatMapToLongOperation<I> implements FlatMappingOperation<I, Stream<I>, Long, LongStream> {
private final Function<? super I, ? extends LongStream> function;
public FlatMapToLongOperation(Function<? super I, ? extends LongStream> function) {
this.function = function;
}
@Override
public LongStream perform(Stream<I> stream) {
return stream.flatMapToLong(function);
}
public Function<? super I, ? extends LongStream> getFunction() {
return function;
}
@Override
public Stream<LongStream> map(Stream<I> iStream) {
return iStream.map(function);
}
@Override
public Flowable<Long> mapFlowable(Flowable<I> input) {
return input.concatMapStream(o -> function.apply(o).boxed());
}
}
| 1,147
| 27
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/FlatMapToDoubleOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.Function;
import java.util.stream.DoubleStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.FlatMappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs flat map to double operation on a regular {@link Stream}
* @param <I> the type of the input stream
*/
public class FlatMapToDoubleOperation<I> implements FlatMappingOperation<I, Stream<I>, Double, DoubleStream> {
private final Function<? super I, ? extends DoubleStream> function;
public FlatMapToDoubleOperation(Function<? super I, ? extends DoubleStream> function) {
this.function = function;
}
@Override
public DoubleStream perform(Stream<I> stream) {
return stream.flatMapToDouble(function);
}
public Function<? super I, ? extends DoubleStream> getFunction() {
return function;
}
@Override
public Stream<DoubleStream> map(Stream<I> iStream) {
return iStream.map(function);
}
@Override
public Flowable<Double> mapFlowable(Flowable<I> input) {
return input.concatMapStream(o -> function.apply(o).boxed());
}
}
| 1,173
| 27.634146
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/MapToIntOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.ToIntFunction;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to int operation on a regular {@link Stream}
* @param <I> the type of the input stream
*/
public class MapToIntOperation<I> implements MappingOperation<I, Stream<I>, Integer, IntStream> {
private final ToIntFunction<? super I> function;
public MapToIntOperation(ToIntFunction<? super I> function) {
this.function = function;
}
@Override
public IntStream perform(Stream<I> stream) {
return stream.mapToInt(function);
}
public ToIntFunction<? super I> getFunction() {
return function;
}
@Override
public Flowable<Integer> mapFlowable(Flowable<I> input) {
return input.map(function::applyAsInt);
}
}
| 944
| 25.25
| 97
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/object/PeekOperation.java
|
package org.infinispan.stream.impl.intops.object;
import java.util.function.Consumer;
import java.util.stream.Stream;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.infinispan.util.concurrent.BlockingManager;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
/**
* Performs peek operation on a regular {@link Stream}
*/
public class PeekOperation<S> implements IntermediateOperation<S, Stream<S>, S, Stream<S>> {
private final Consumer<? super S> consumer;
private BlockingManager blockingManager;
public PeekOperation(Consumer<? super S> consumer) {
this.consumer = consumer;
}
@Override
public Stream<S> perform(Stream<S> stream) {
return stream.peek(consumer);
}
public Consumer<? super S> getConsumer() {
return consumer;
}
@Override
public void handleInjection(ComponentRegistry registry) {
blockingManager = registry.getBlockingManager().running();
if (consumer instanceof CacheAware) {
((CacheAware) consumer).injectCache(registry.getCache().running());
} else {
registry.wireDependencies(consumer);
}
}
@Override
public Flowable<S> mapFlowable(Flowable<S> input) {
return input.concatMapSingle(t -> Single.fromCompletionStage(
blockingManager.supplyBlocking(() -> {
consumer.accept(t);
return t;
}, "publisher-peek")));
}
}
| 1,562
| 28.490566
| 92
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/LimitIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs limit operation on a {@link IntStream}
*/
public class LimitIntOperation implements IntermediateOperation<Integer, IntStream, Integer, IntStream> {
private final long limit;
public LimitIntOperation(long limit) {
if (limit <= 0) {
throw new IllegalArgumentException("Limit must be greater than 0");
}
this.limit = limit;
}
@Override
public IntStream perform(IntStream stream) {
return stream.limit(limit);
}
public long getLimit() {
return limit;
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.take(limit);
}
}
| 853
| 22.722222
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/MapToObjIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.function.IntFunction;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to object operation on a {@link IntStream}
*/
public class MapToObjIntOperation<R> implements MappingOperation<Integer, IntStream, R, Stream<R>> {
private final IntFunction<? extends R> function;
public MapToObjIntOperation(IntFunction<? extends R> function) {
this.function = function;
}
@Override
public Stream<R> perform(IntStream stream) {
return stream.mapToObj(function);
}
public IntFunction<? extends R> getFunction() {
return function;
}
@Override
public Flowable<R> mapFlowable(Flowable<Integer> input) {
return input.map(function::apply);
}
}
| 903
| 24.828571
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/BoxedIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs boxed operation on a {@link IntStream}
*/
public class BoxedIntOperation implements MappingOperation<Integer, IntStream, Integer, Stream<Integer>> {
private static final BoxedIntOperation OPERATION = new BoxedIntOperation();
private BoxedIntOperation() { }
public static BoxedIntOperation getInstance() {
return OPERATION;
}
@Override
public Stream<Integer> perform(IntStream stream) {
return stream.boxed();
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input;
}
}
| 799
| 24.806452
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/FilterIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.function.IntPredicate;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs filter operation on a {@link IntStream}
*/
public class FilterIntOperation<S> implements IntermediateOperation<Integer, IntStream, Integer, IntStream> {
private final IntPredicate predicate;
public FilterIntOperation(IntPredicate predicate) {
this.predicate = predicate;
}
@Override
public IntStream perform(IntStream stream) {
return stream.filter(predicate);
}
public IntPredicate getPredicate() {
return predicate;
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.filter(predicate::test);
}
}
| 855
| 24.176471
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/MapToDoubleIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.function.IntToDoubleFunction;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to double operation on a {@link IntStream}
*/
public class MapToDoubleIntOperation implements MappingOperation<Integer, IntStream, Double, DoubleStream> {
private final IntToDoubleFunction function;
public MapToDoubleIntOperation(IntToDoubleFunction function) {
this.function = function;
}
@Override
public DoubleStream perform(IntStream stream) {
return stream.mapToDouble(function);
}
public IntToDoubleFunction getFunction() {
return function;
}
@Override
public Flowable<Double> mapFlowable(Flowable<Integer> input) {
return input.map(function::applyAsDouble);
}
}
| 932
| 25.657143
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/SkipIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs skip operation on a {@link IntStream}
*/
public class SkipIntOperation implements IntermediateOperation<Integer, IntStream, Integer, IntStream> {
private final long n;
public SkipIntOperation(long n) {
this.n = n;
}
@Override
public IntStream perform(IntStream stream) {
return stream.skip(n);
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.skip(n);
}
}
| 662
| 21.862069
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/AsLongIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs as long operation on a {@link IntStream}
*/
public class AsLongIntOperation implements MappingOperation<Integer, IntStream, Long, LongStream> {
private static final AsLongIntOperation OPERATION = new AsLongIntOperation();
private AsLongIntOperation() { }
public static AsLongIntOperation getInstance() {
return OPERATION;
}
@Override
public LongStream perform(IntStream stream) {
return stream.asLongStream();
}
@Override
public Flowable<Long> mapFlowable(Flowable<Integer> input) {
return input.map(Integer::longValue);
}
}
| 825
| 25.645161
| 99
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/DistinctIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs distinct operation on a {@link IntStream}
*/
public class DistinctIntOperation implements IntermediateOperation<Integer, IntStream, Integer, IntStream> {
private static final DistinctIntOperation OPERATION = new DistinctIntOperation();
private DistinctIntOperation() { }
public static DistinctIntOperation getInstance() {
return OPERATION;
}
@Override
public IntStream perform(IntStream stream) {
return stream.distinct();
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.distinct();
}
}
| 797
| 25.6
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/MapIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.function.IntUnaryOperator;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map operation on a {@link IntStream}
*/
public class MapIntOperation implements MappingOperation<Integer, IntStream, Integer, IntStream> {
private final IntUnaryOperator operator;
public MapIntOperation(IntUnaryOperator operator) {
this.operator = operator;
}
@Override
public IntStream perform(IntStream stream) {
return stream.map(operator);
}
public IntUnaryOperator getOperator() {
return operator;
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.map(operator::applyAsInt);
}
}
| 841
| 23.764706
| 98
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/FlatMapIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.function.IntFunction;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.FlatMappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs flat map operation on a {@link IntStream}
*/
public class FlatMapIntOperation implements FlatMappingOperation<Integer, IntStream, Integer, IntStream> {
private final IntFunction<? extends IntStream> function;
public FlatMapIntOperation(IntFunction<? extends IntStream> function) {
this.function = function;
}
@Override
public IntStream perform(IntStream stream) {
return stream.flatMap(function);
}
public IntFunction<? extends IntStream> getFunction() {
return function;
}
@Override
public Stream<IntStream> map(IntStream intStream) {
return intStream.mapToObj(function);
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.concatMapStream(i -> function.apply(i).boxed());
}
}
| 1,080
| 26.025
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/AsDoubleIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs as double operation on a {@link IntStream}
*/
public class AsDoubleIntOperation implements MappingOperation<Integer, IntStream, Double, DoubleStream> {
private static final AsDoubleIntOperation OPERATION = new AsDoubleIntOperation();
private AsDoubleIntOperation() { }
public static AsDoubleIntOperation getInstance() {
return OPERATION;
}
@Override
public DoubleStream perform(IntStream stream) {
return stream.asDoubleStream();
}
@Override
public Flowable<Double> mapFlowable(Flowable<Integer> input) {
return input.map(Integer::doubleValue);
}
}
| 851
| 26.483871
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/PeekIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.function.IntConsumer;
import java.util.stream.IntStream;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.infinispan.util.concurrent.BlockingManager;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
/**
* Performs peek operation on a {@link IntStream}
*/
public class PeekIntOperation implements IntermediateOperation<Integer, IntStream, Integer, IntStream> {
private final IntConsumer consumer;
private BlockingManager blockingManager;
public PeekIntOperation(IntConsumer consumer) {
this.consumer = consumer;
}
@Override
public IntStream perform(IntStream stream) {
return stream.peek(consumer);
}
public IntConsumer getConsumer() {
return consumer;
}
@Override
public void handleInjection(ComponentRegistry registry) {
blockingManager = registry.getBlockingManager().running();
if (consumer instanceof CacheAware) {
((CacheAware) consumer).injectCache(registry.getCache().running());
} else {
registry.wireDependencies(consumer);
}
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.concatMapSingle(t -> Single.fromCompletionStage(
blockingManager.supplyBlocking(() -> {
consumer.accept(t);
return t;
}, "publisher-peek")));
}
}
| 1,571
| 28.660377
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/MapToLongIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.function.IntToLongFunction;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to long operation on a {@link IntStream}
*/
public class MapToLongIntOperation implements MappingOperation<Integer, IntStream, Long, LongStream> {
private final IntToLongFunction function;
public MapToLongIntOperation(IntToLongFunction function) {
this.function = function;
}
@Override
public LongStream perform(IntStream stream) {
return stream.mapToLong(function);
}
public IntToLongFunction getFunction() {
return function;
}
@Override
public Flowable<Long> mapFlowable(Flowable<Integer> input) {
return input.map(function::applyAsLong);
}
}
| 904
| 24.857143
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/i/SortedIntOperation.java
|
package org.infinispan.stream.impl.intops.primitive.i;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs sorted operation on a {@link IntStream}
*/
public class SortedIntOperation implements IntermediateOperation<Integer, IntStream, Integer, IntStream> {
private static final SortedIntOperation OPERATION = new SortedIntOperation();
private SortedIntOperation() { }
public static SortedIntOperation getInstance() {
return OPERATION;
}
@Override
public IntStream perform(IntStream stream) {
return stream.sorted();
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Integer> input) {
return input.sorted();
}
}
| 781
| 25.066667
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/FilterLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.function.LongPredicate;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs filter operation on a {@link LongStream}
*/
public class FilterLongOperation<S> implements IntermediateOperation<Long, LongStream, Long, LongStream> {
private final LongPredicate predicate;
public FilterLongOperation(LongPredicate predicate) {
this.predicate = predicate;
}
@Override
public LongStream perform(LongStream stream) {
return stream.filter(predicate);
}
public LongPredicate getPredicate() {
return predicate;
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.filter(predicate::test);
}
}
| 855
| 24.176471
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/DistinctLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs distinct operation on a {@link LongStream}
*/
public class DistinctLongOperation implements IntermediateOperation<Long, LongStream, Long, LongStream> {
private static final DistinctLongOperation OPERATION = new DistinctLongOperation();
private DistinctLongOperation() { }
public static DistinctLongOperation getInstance() {
return OPERATION;
}
@Override
public LongStream perform(LongStream stream) {
return stream.distinct();
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.distinct();
}
}
| 796
| 25.566667
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/AsDoubleLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.stream.DoubleStream;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs as double operation on a {@link LongStream}
*/
public class AsDoubleLongOperation implements MappingOperation<Long, LongStream, Double, DoubleStream> {
private static final AsDoubleLongOperation OPERATION = new AsDoubleLongOperation();
private AsDoubleLongOperation() { }
public static AsDoubleLongOperation getInstance() {
return OPERATION;
}
@Override
public DoubleStream perform(LongStream stream) {
return stream.asDoubleStream();
}
@Override
public Flowable<Double> mapFlowable(Flowable<Long> input) {
return input.map(Long::doubleValue);
}
}
| 851
| 26.483871
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/MapToDoubleLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.function.LongToDoubleFunction;
import java.util.stream.DoubleStream;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to double operation on a {@link LongStream}
*/
public class MapToDoubleLongOperation implements MappingOperation<Long, LongStream, Double, DoubleStream> {
private final LongToDoubleFunction function;
public MapToDoubleLongOperation(LongToDoubleFunction function) {
this.function = function;
}
@Override
public DoubleStream perform(LongStream stream) {
return stream.mapToDouble(function);
}
public LongToDoubleFunction getFunction() {
return function;
}
@Override
public Flowable<Double> mapFlowable(Flowable<Long> input) {
return input.map(function::applyAsDouble);
}
}
| 936
| 25.771429
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/PeekLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.function.LongConsumer;
import java.util.stream.LongStream;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.infinispan.util.concurrent.BlockingManager;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
/**
* Performs peek operation on a {@link LongStream}
*/
public class PeekLongOperation implements IntermediateOperation<Long, LongStream, Long, LongStream> {
private final LongConsumer consumer;
private BlockingManager blockingManager;
public PeekLongOperation(LongConsumer consumer) {
this.consumer = consumer;
}
@Override
public LongStream perform(LongStream stream) {
return stream.peek(consumer);
}
public LongConsumer getConsumer() {
return consumer;
}
@Override
public void handleInjection(ComponentRegistry registry) {
blockingManager = registry.getBlockingManager().running();
if (consumer instanceof CacheAware) {
((CacheAware) consumer).injectCache(registry.getCache().running());
} else {
registry.wireDependencies(consumer);
}
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.concatMapSingle(t -> Single.fromCompletionStage(
blockingManager.supplyBlocking(() -> {
consumer.accept(t);
return t;
}, "publisher-peek")));
}
}
| 1,571
| 28.660377
| 101
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/MapLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.function.LongUnaryOperator;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map operation on a {@link LongStream}
*/
public class MapLongOperation implements MappingOperation<Long, LongStream, Long, LongStream> {
private final LongUnaryOperator operator;
public MapLongOperation(LongUnaryOperator operator) {
this.operator = operator;
}
@Override
public LongStream perform(LongStream stream) {
return stream.map(operator);
}
public LongUnaryOperator getOperator() {
return operator;
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.map(operator::applyAsLong);
}
}
| 842
| 23.794118
| 95
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/SkipLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs skip operation on a {@link LongStream}
*/
public class SkipLongOperation implements IntermediateOperation<Long, LongStream, Long, LongStream> {
private final long n;
public SkipLongOperation(long n) {
this.n = n;
}
@Override
public LongStream perform(LongStream stream) {
return stream.skip(n);
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.skip(n);
}
}
| 658
| 21.724138
| 101
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/BoxedLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs boxed operation on a {@link LongStream}
*/
public class BoxedLongOperation implements MappingOperation<Long, LongStream, Long, Stream<Long>> {
private static final BoxedLongOperation OPERATION = new BoxedLongOperation();
private BoxedLongOperation() { }
public static BoxedLongOperation getInstance() {
return OPERATION;
}
@Override
public Stream<Long> perform(LongStream stream) {
return stream.boxed();
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input;
}
}
| 790
| 24.516129
| 99
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/MapToObjLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.function.LongFunction;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to object operation on a {@link LongStream}
*/
public class MapToObjLongOperation<R> implements MappingOperation<Long, LongStream, R, Stream<R>> {
private final LongFunction<? extends R> function;
public MapToObjLongOperation(LongFunction<? extends R> function) {
this.function = function;
}
@Override
public Stream<R> perform(LongStream stream) {
return stream.mapToObj(function);
}
public LongFunction<? extends R> getFunction() {
return function;
}
@Override
public Flowable<R> mapFlowable(Flowable<Long> input) {
return input.map(function::apply);
}
}
| 907
| 24.942857
| 99
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/LimitLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs limit operation on a {@link LongStream}
*/
public class LimitLongOperation implements IntermediateOperation<Long, LongStream, Long, LongStream> {
private final long limit;
public LimitLongOperation(long limit) {
if (limit <= 0) {
throw new IllegalArgumentException("Limit must be greater than 0");
}
this.limit = limit;
}
@Override
public LongStream perform(LongStream stream) {
return stream.limit(limit);
}
public long getLimit() {
return limit;
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.take(limit);
}
}
| 849
| 22.611111
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/SortedLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs sorted operation on a {@link LongStream}
*/
public class SortedLongOperation implements IntermediateOperation<Long, LongStream, Long, LongStream> {
private static final SortedLongOperation OPERATION = new SortedLongOperation();
private SortedLongOperation() { }
public static SortedLongOperation getInstance() {
return OPERATION;
}
@Override
public LongStream perform(LongStream stream) {
return stream.sorted();
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.sorted();
}
}
| 780
| 25.033333
| 103
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/FlatMapLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.function.LongFunction;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.FlatMappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs flat map operation on a {@link LongStream}
*/
public class FlatMapLongOperation implements FlatMappingOperation<Long, LongStream, Long, LongStream> {
private final LongFunction<? extends LongStream> function;
public FlatMapLongOperation(LongFunction<? extends LongStream> function) {
this.function = function;
}
@Override
public LongStream perform(LongStream stream) {
return stream.flatMap(function);
}
public LongFunction<? extends LongStream> getFunction() {
return function;
}
@Override
public Stream<LongStream> map(LongStream longStream) {
return longStream.mapToObj(function);
}
@Override
public Flowable<Long> mapFlowable(Flowable<Long> input) {
return input.concatMapStream(l -> function.apply(l).boxed());
}
}
| 1,087
| 26.2
| 103
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/l/MapToIntLongOperation.java
|
package org.infinispan.stream.impl.intops.primitive.l;
import java.util.function.LongToIntFunction;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to int operation on a {@link LongStream}
*/
public class MapToIntLongOperation implements MappingOperation<Long, LongStream, Integer, IntStream> {
private final LongToIntFunction function;
public MapToIntLongOperation(LongToIntFunction function) {
this.function = function;
}
@Override
public IntStream perform(LongStream stream) {
return stream.mapToInt(function);
}
public LongToIntFunction getFunction() {
return function;
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Long> input) {
return input.map(function::applyAsInt);
}
}
| 902
| 24.8
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/DistinctDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.stream.DoubleStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs distinct operation on a {@link DoubleStream}
*/
public class DistinctDoubleOperation implements IntermediateOperation<Double, DoubleStream, Double, DoubleStream> {
private static final DistinctDoubleOperation OPERATION = new DistinctDoubleOperation();
private DistinctDoubleOperation() { }
public static DistinctDoubleOperation getInstance() {
return OPERATION;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.distinct();
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.distinct();
}
}
| 826
| 26.566667
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/FlatMapDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.function.DoubleFunction;
import java.util.stream.DoubleStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.FlatMappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs flat map operation on a {@link DoubleStream}
*/
public class FlatMapDoubleOperation implements FlatMappingOperation<Double, DoubleStream, Double, DoubleStream> {
private final DoubleFunction<? extends DoubleStream> function;
public FlatMapDoubleOperation(DoubleFunction<? extends DoubleStream> function) {
this.function = function;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.flatMap(function);
}
public DoubleFunction<? extends DoubleStream> getFunction() {
return function;
}
@Override
public Stream<DoubleStream> map(DoubleStream doubleStream) {
return doubleStream.mapToObj(function);
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.concatMapStream(d -> function.apply(d).boxed());
}
}
| 1,133
| 27.35
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/LimitDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.stream.DoubleStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs limit operation on a {@link DoubleStream}
*/
public class LimitDoubleOperation implements IntermediateOperation<Double, DoubleStream, Double, DoubleStream> {
private final long limit;
public LimitDoubleOperation(long limit) {
if (limit <= 0) {
throw new IllegalArgumentException("Limit must be greater than 0");
}
this.limit = limit;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.limit(limit);
}
public long getLimit() {
return limit;
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.take(limit);
}
}
| 873
| 23.277778
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/MapDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.function.DoubleUnaryOperator;
import java.util.stream.DoubleStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map operation on a {@link DoubleStream}
*/
public class MapDoubleOperation implements MappingOperation<Double, DoubleStream, Double, DoubleStream> {
private final DoubleUnaryOperator operator;
public MapDoubleOperation(DoubleUnaryOperator operator) {
this.operator = operator;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.map(operator);
}
public DoubleUnaryOperator getOperator() {
return operator;
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.map(operator::applyAsDouble);
}
}
| 876
| 24.794118
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/BoxedDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.stream.DoubleStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs boxed operation on a {@link DoubleStream}
*/
public class BoxedDoubleOperation implements MappingOperation<Double, DoubleStream, Double, Stream<Double>> {
private static final BoxedDoubleOperation OPERATION = new BoxedDoubleOperation();
private BoxedDoubleOperation() { }
public static BoxedDoubleOperation getInstance() {
return OPERATION;
}
@Override
public Stream<Double> perform(DoubleStream stream) {
return stream.boxed();
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input;
}
}
| 820
| 25.483871
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/MapToObjDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.function.DoubleFunction;
import java.util.stream.DoubleStream;
import java.util.stream.Stream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs boxed operation on a {@link DoubleStream}
* @param <R> the type of the output stream
*/
public class MapToObjDoubleOperation<R> implements MappingOperation<Double, DoubleStream, R, Stream<R>> {
private final DoubleFunction<? extends R> function;
public MapToObjDoubleOperation(DoubleFunction<? extends R> function) {
this.function = function;
}
@Override
public Stream<R> perform(DoubleStream stream) {
return stream.mapToObj(function);
}
public DoubleFunction<? extends R> getFunction() {
return function;
}
@Override
public Flowable<R> mapFlowable(Flowable<Double> input) {
return input.map(function::apply);
}
}
| 967
| 25.888889
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/FilterDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.function.DoublePredicate;
import java.util.stream.DoubleStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs filter operation on a {@link DoubleStream}
*/
public class FilterDoubleOperation implements IntermediateOperation<Double, DoubleStream, Double, DoubleStream> {
private final DoublePredicate predicate;
public FilterDoubleOperation(DoublePredicate predicate) {
this.predicate = predicate;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.filter(predicate);
}
public DoublePredicate getPredicate() {
return predicate;
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.filter(predicate::test);
}
}
| 884
| 25.029412
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/SortedDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.stream.DoubleStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs sorted operation on a {@link DoubleStream}
*/
public class SortedDoubleOperation implements IntermediateOperation<Double, DoubleStream, Double, DoubleStream> {
private static final SortedDoubleOperation OPERATION = new SortedDoubleOperation();
private SortedDoubleOperation() { }
public static SortedDoubleOperation getInstance() {
return OPERATION;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.sorted();
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.sorted();
}
}
| 810
| 26.033333
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/SkipDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.stream.DoubleStream;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs skip operation on a {@link DoubleStream}
*/
public class SkipDoubleOperation implements IntermediateOperation<Double, DoubleStream, Double, DoubleStream> {
private final long n;
public SkipDoubleOperation(long n) {
this.n = n;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.skip(n);
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.skip(n);
}
}
| 682
| 22.551724
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/MapToIntDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.function.DoubleToIntFunction;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to int operation on a {@link DoubleStream}
*/
public class MapToIntDoubleOperation implements MappingOperation<Double, DoubleStream, Integer, IntStream> {
private final DoubleToIntFunction function;
public MapToIntDoubleOperation(DoubleToIntFunction function) {
this.function = function;
}
@Override
public IntStream perform(DoubleStream stream) {
return stream.mapToInt(function);
}
public DoubleToIntFunction getFunction() {
return function;
}
@Override
public Flowable<Integer> mapFlowable(Flowable<Double> input) {
return input.map(function::applyAsInt);
}
}
| 926
| 25.485714
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/PeekDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.function.DoubleConsumer;
import java.util.stream.DoubleStream;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.stream.CacheAware;
import org.infinispan.stream.impl.intops.IntermediateOperation;
import org.infinispan.util.concurrent.BlockingManager;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
/**
* Performs peek operation on a {@link DoubleStream}
*/
public class PeekDoubleOperation implements IntermediateOperation<Double, DoubleStream, Double, DoubleStream> {
private final DoubleConsumer consumer;
private BlockingManager blockingManager;
public PeekDoubleOperation(DoubleConsumer consumer) {
this.consumer = consumer;
}
@Override
public DoubleStream perform(DoubleStream stream) {
return stream.peek(consumer);
}
public DoubleConsumer getConsumer() {
return consumer;
}
@Override
public void handleInjection(ComponentRegistry registry) {
blockingManager = registry.getBlockingManager().running();
if (consumer instanceof CacheAware) {
((CacheAware) consumer).injectCache(registry.getCache().running());
} else {
registry.wireDependencies(consumer);
}
}
@Override
public Flowable<Double> mapFlowable(Flowable<Double> input) {
return input.concatMapSingle(t -> Single.fromCompletionStage(
blockingManager.supplyBlocking(() -> {
consumer.accept(t);
return t;
}, "publisher-peek")));
}
}
| 1,603
| 29.264151
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stream/impl/intops/primitive/d/MapToLongDoubleOperation.java
|
package org.infinispan.stream.impl.intops.primitive.d;
import java.util.function.DoubleToLongFunction;
import java.util.stream.DoubleStream;
import java.util.stream.LongStream;
import org.infinispan.stream.impl.intops.MappingOperation;
import io.reactivex.rxjava3.core.Flowable;
/**
* Performs map to long operation on a {@link DoubleStream}
*/
public class MapToLongDoubleOperation implements MappingOperation<Double, DoubleStream, Long, LongStream> {
private final DoubleToLongFunction function;
public MapToLongDoubleOperation(DoubleToLongFunction function) {
this.function = function;
}
@Override
public LongStream perform(DoubleStream stream) {
return stream.mapToLong(function);
}
public DoubleToLongFunction getFunction() {
return function;
}
@Override
public Flowable<Long> mapFlowable(Flowable<Double> input) {
return input.map(function::applyAsLong);
}
}
| 932
| 25.657143
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/LocalizedCacheTopology.java
|
package org.infinispan.distribution;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.util.ImmutableHopscotchHashSet;
import org.infinispan.commons.util.Immutables;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.SingleSegmentKeyPartitioner;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.CacheTopology;
/**
* Extends {@link CacheTopology} with information about keys owned by the local node.
*
* @author Dan Berindei
* @since 9.0
*/
public class LocalizedCacheTopology extends CacheTopology {
private final Address localAddress;
private boolean connected;
private final Set<Address> membersSet;
private final KeyPartitioner keyPartitioner;
private final boolean isDistributed;
private final boolean allLocal;
private final int numSegments;
private final int maxOwners;
private final DistributionInfo[] distributionInfos;
/**
* @param cacheMode Ignored, the result topology is always LOCAL
* @param localAddress Address of the local node
*/
public static LocalizedCacheTopology makeSingletonTopology(CacheMode cacheMode, Address localAddress) {
List<Address> members = Collections.singletonList(localAddress);
CacheTopology cacheTopology = new CacheTopology(-1, -1, null, null, Phase.NO_REBALANCE, members, null);
return new LocalizedCacheTopology(CacheMode.LOCAL, cacheTopology, SingleSegmentKeyPartitioner.getInstance(),
localAddress, false);
}
/**
* Creates a new local topology that has a single address but multiple segments. This is useful when the data
* storage is segmented in some way (ie. segmented store)
* @param keyPartitioner partitioner to decide which segment a given key maps to
* @param numSegments how many segments there are
* @param localAddress the address of this node
* @return segmented topology
*/
public static LocalizedCacheTopology makeSegmentedSingletonTopology(KeyPartitioner keyPartitioner, int numSegments,
Address localAddress) {
return new LocalizedCacheTopology(keyPartitioner, numSegments, localAddress);
}
public LocalizedCacheTopology(CacheMode cacheMode, CacheTopology cacheTopology, KeyPartitioner keyPartitioner,
Address localAddress, boolean connected) {
super(cacheTopology.getTopologyId(), cacheTopology.getRebalanceId(), cacheTopology.getCurrentCH(),
cacheTopology.getPendingCH(), cacheTopology.getUnionCH(), cacheTopology.getPhase(), cacheTopology.getActualMembers(),
cacheTopology.getMembersPersistentUUIDs());
ConsistentHash readCH = getReadConsistentHash();
ConsistentHash writeCH = getWriteConsistentHash();
this.localAddress = localAddress;
this.connected = connected;
this.membersSet = new ImmutableHopscotchHashSet<>(cacheTopology.getMembers());
this.keyPartitioner = keyPartitioner;
this.isDistributed = cacheMode.isDistributed();
boolean isReplicated = cacheMode.isReplicated();
boolean isInvalidation = cacheMode.isInvalidation();
if (isDistributed) {
this.numSegments = readCH.getNumSegments();
this.distributionInfos = new DistributionInfo[numSegments];
int maxOwners = 1;
for (int segmentId = 0; segmentId < numSegments; segmentId++) {
Address primary = readCH.locatePrimaryOwnerForSegment(segmentId);
List<Address> readOwners = readCH.locateOwnersForSegment(segmentId);
List<Address> writeOwners = writeCH.locateOwnersForSegment(segmentId);
Collection<Address> writeBackups = writeOwners.subList(1, writeOwners.size());
this.distributionInfos[segmentId] =
new DistributionInfo(segmentId, primary, readOwners, writeOwners, writeBackups, localAddress);
maxOwners = Math.max(maxOwners, writeOwners.size());
}
this.maxOwners = maxOwners;
this.allLocal = false;
} else if (isReplicated || isInvalidation) {
this.numSegments = readCH.getNumSegments();
// Writes/invalidations must be broadcast to the entire cluster
Map<Address, List<Address>> readOwnersMap = new HashMap<>();
Map<Address, List<Address>> writeOwnersMap = new HashMap<>();
this.distributionInfos = new DistributionInfo[numSegments];
for (int segmentId = 0; segmentId < numSegments; segmentId++) {
int segmentCopy = segmentId;
Address primary = readCH.locatePrimaryOwnerForSegment(segmentId);
List<Address> readOwners = readOwnersMap.computeIfAbsent(primary, p ->
Immutables.immutableListCopy(readCH.locateOwnersForSegment(segmentCopy)));
List<Address> writeOwners = writeOwnersMap.computeIfAbsent(primary, p ->
Immutables.immutableListCopy(writeCH.locateOwnersForSegment(segmentCopy)));
List<Address> writeBackups = writeOwners.subList(1, writeOwners.size());
this.distributionInfos[segmentId] =
new DistributionInfo(segmentId, primary, readOwners, writeOwners, writeBackups, localAddress);
}
this.maxOwners = cacheTopology.getMembers().size();
this.allLocal = readOwnersMap.containsKey(localAddress);
} else {
assert cacheMode == CacheMode.LOCAL;
this.numSegments = 1;
List<Address> owners = Collections.singletonList(localAddress);
List<Address> writeBackups = Collections.emptyList();
this.distributionInfos = new DistributionInfo[]{
new DistributionInfo(0, localAddress, owners, owners, writeBackups, localAddress)
};
this.maxOwners = 1;
this.allLocal = true;
}
}
private LocalizedCacheTopology(KeyPartitioner keyPartitioner, int numSegments, Address localAddress) {
super(-1, -1, null, null, null, Collections.singletonList(localAddress), null);
this.localAddress = localAddress;
this.numSegments = numSegments;
this.keyPartitioner = keyPartitioner;
this.membersSet = Collections.singleton(localAddress);
this.isDistributed = false;
// Reads and writes are local, only the invalidation is replicated
List<Address> owners = Collections.singletonList(localAddress);
this.distributionInfos = new DistributionInfo[numSegments];
for (int i = 0; i < distributionInfos.length; ++i) {
distributionInfos[i] = new DistributionInfo(i, localAddress, owners, owners, Collections.emptyList(), localAddress);
}
this.maxOwners = 1;
this.allLocal = true;
}
/**
* @return {@code true} iff key {@code key} can be read without going remote.
*/
public boolean isReadOwner(Object key) {
if (allLocal)
return true;
int segmentId = keyPartitioner.getSegment(key);
return distributionInfos[segmentId].isReadOwner();
}
public boolean isSegmentReadOwner(int segment) {
return allLocal || distributionInfos[segment].isReadOwner();
}
/**
* @return {@code true} iff writing a value for key {@code key} will update it on the local node.
*/
public boolean isWriteOwner(Object key) {
if (allLocal)
return true;
int segmentId = keyPartitioner.getSegment(key);
return distributionInfos[segmentId].isWriteOwner();
}
public boolean isSegmentWriteOwner(int segment) {
return allLocal || distributionInfos[segment].isWriteOwner();
}
/**
* @return The consistent hash segment of key {@code key}
*/
public int getSegment(Object key) {
return keyPartitioner.getSegment(key);
}
/**
* @return Information about the ownership of segment {@code segment}, including the primary owner.
* @deprecated since 9.3 please use {@link #getSegmentDistribution(int)} instead.
*/
@Deprecated
public DistributionInfo getDistributionForSegment(int segmentId) {
return getSegmentDistribution(segmentId);
}
public DistributionInfo getSegmentDistribution(int segmentId) {
return distributionInfos[segmentId];
}
/**
* @return Information about the ownership of key {@code key}, including the primary owner.
*/
public DistributionInfo getDistribution(Object key) {
int segmentId = keyPartitioner.getSegment(key);
return distributionInfos[segmentId];
}
/**
* @return An unordered collection with the write owners of {@code key}.
*/
public Collection<Address> getWriteOwners(Object key) {
int segmentId = isDistributed ? keyPartitioner.getSegment(key) : 0;
return distributionInfos[segmentId].writeOwners();
}
/**
* @return An unordered collection with the write owners of {@code keys}.
*/
public Collection<Address> getWriteOwners(Collection<?> keys) {
if (keys.isEmpty()) {
return Collections.emptySet();
}
if (isDistributed) {
if (keys.size() == 1) {
Object singleKey = keys.iterator().next();
return getDistribution(singleKey).writeOwners();
} else {
IntSet segments = IntSets.mutableEmptySet(numSegments);
// Expecting some overlap between keys
Set<Address> owners = new HashSet<>(2 * maxOwners);
for (Object key : keys) {
int segment = keyPartitioner.getSegment(key);
if (segments.add(segment)) {
owners.addAll(getSegmentDistribution(segment).writeOwners());
}
}
return owners;
}
} else {
return getSegmentDistribution(0).writeOwners();
}
}
/**
* @return The segments owned by the local node for reading.
*/
public IntSet getLocalReadSegments() {
if (isDistributed) {
IntSet localSegments = IntSets.mutableEmptySet(numSegments);
for (int segment = 0; segment < numSegments; segment++) {
if (distributionInfos[segment].isReadOwner()) {
localSegments.set(segment);
}
}
return localSegments;
} else if (allLocal) {
return IntSets.immutableRangeSet(numSegments);
} else {
return IntSets.immutableEmptySet();
}
}
/**
* @return The segments owned by the local node for writing.
*/
public IntSet getLocalWriteSegments() {
if (isDistributed) {
IntSet localSegments = IntSets.mutableEmptySet(numSegments);
for (int segmentId = 0; segmentId < numSegments; segmentId++) {
if (distributionInfos[segmentId].isWriteOwner()) {
localSegments.set(segmentId);
}
}
return localSegments;
} else if (allLocal) {
return IntSets.immutableRangeSet(numSegments);
} else {
return IntSets.immutableEmptySet();
}
}
/**
* @return The segments owned by the local node as primary owner.
*/
public IntSet getLocalPrimarySegments() {
if (membersSet.size() > 1) {
IntSet localSegments = IntSets.mutableEmptySet(numSegments);
for (int segment = 0; segment < numSegments; segment++) {
if (distributionInfos[segment].isPrimary()) {
localSegments.set(segment);
}
}
return localSegments;
} else {
return IntSets.immutableRangeSet(numSegments);
}
}
/**
* @return The number of segments owned by the local node for writing.
*/
public int getLocalWriteSegmentsCount() {
if (isDistributed) {
int count = 0;
for (int segment = 0; segment < numSegments; segment++) {
if (distributionInfos[segment].isWriteOwner()) {
count++;
}
}
return count;
} else if (allLocal) {
return numSegments;
} else {
return 0;
}
}
/**
* @return The address of the local node.
*/
public Address getLocalAddress() {
return localAddress;
}
public Set<Address> getMembersSet() {
return membersSet;
}
/**
* @return {@code true} if the local node received this topology from the coordinator,
* {@code false} otherwise (e.g. during preload).
*/
public boolean isConnected() {
return connected;
}
public int getNumSegments() {
return numSegments;
}
}
| 12,812
| 37.133929
| 129
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/package-info.java
|
/**
* Classes relating to the distributed cache mode.
*
* @api.public
*/
package org.infinispan.distribution;
| 114
| 15.428571
| 50
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/TriangleOrderManager.java
|
package org.infinispan.distribution;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import net.jcip.annotations.GuardedBy;
/**
* It manages the order of updates from the primary owner to backup owner.
* <p>
* It depends on the cache topology id. The primary owner assigns the sequence number to the backup command and then
* sends it to the backup owner. In the backup owner, the command awaits until it is its turn to be executed.
* <p>
* If the command topology id does not match, it throws an {@link OutdatedTopologyException}.
* <p>
* The sequence order starts with 1 and it is per segment based. This allows segments to be updated concurrently.
*
* @author Pedro Ruivo
* @since 9.0
*/
@Scope(Scopes.NAMED_CACHE)
public class TriangleOrderManager {
private static final Log log = LogFactory.getLog(TriangleOrderManager.class);
private final TriangleSequencer[] sequencers;
@Inject DistributionManager distributionManager;
public TriangleOrderManager(int segments) {
TriangleSequencer[] triangleSequencers = new TriangleSequencer[segments];
for (int segment = 0; segment < segments; ++segment) {
triangleSequencers[segment] = new TriangleSequencer(segment);
}
sequencers = triangleSequencers;
}
public long next(int segmentId, final int commandTopologyId) {
checkTopologyId(commandTopologyId);
try {
return getNext(segmentId, commandTopologyId);
} finally {
//check if topology didn't change in the meanwhile
checkTopologyId(commandTopologyId);
}
}
public boolean isNext(int segmentId, long sequenceNumber, int commandTopologyId) {
final int topologyId = distributionManager.getCacheTopology().getTopologyId();
return commandTopologyId < topologyId ||
(commandTopologyId == topologyId && checkIfNext(segmentId, commandTopologyId, sequenceNumber));
}
public void markDelivered(int segmentId, long sequenceNumber, int commandTopologyId) {
sequencers[segmentId].deliver(commandTopologyId, sequenceNumber);
}
/**
* Meant for testing only.
*
* @return The latest sequence number sent for segment {@code segmentId} in topology {@code topologyId}.
*/
public long latestSent(int segmentId, int topologyId) {
return sequencers[segmentId].latestSent(topologyId);
}
private long getNext(int segmentId, int topologyId) {
return sequencers[segmentId].next(topologyId);
}
private boolean checkIfNext(int segmentId, int topologyId, long sequenceNumber) {
return sequencers[segmentId].isNext(topologyId, sequenceNumber);
}
private void checkTopologyId(int topologyId) {
if (topologyId != distributionManager.getCacheTopology().getTopologyId()) {
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
}
private static class TriangleSequencer {
private final int segment;
@GuardedBy("this")
private int senderTopologyId = -1;
@GuardedBy("this")
private int receiverTopologyId = -1;
@GuardedBy("this")
private long senderSequenceNumber = 1;
@GuardedBy("this")
private long receiverSequenceNumber = 1;
private TriangleSequencer(int segment) {
this.segment = segment;
}
private synchronized long next(int commandTopologyId) {
if (senderTopologyId == commandTopologyId) {
if (log.isTraceEnabled()) {
log.tracef("Sender %d new sequence %d:%d", segment, senderTopologyId, senderSequenceNumber);
}
return senderSequenceNumber++;
} else if (senderTopologyId < commandTopologyId) {
if (log.isTraceEnabled()) {
log.tracef("Sender %d new sequence %d:1 (changed topology from %d)",
segment, senderTopologyId, commandTopologyId);
}
//update topology. this command will be the first
senderTopologyId = commandTopologyId;
senderSequenceNumber = 2;
return 1;
} else {
if (log.isTraceEnabled()) {
log.tracef("Sender %d retrying because of outdated topology: %d < %d",
segment, commandTopologyId, senderTopologyId);
}
//this topology is higher than the command topology id.
//another topology was installed. this command will fail with OutdatedTopologyException.
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
}
private synchronized long latestSent(int topologyId) {
if (topologyId < senderTopologyId)
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
if (senderTopologyId < topologyId)
return 0;
return senderSequenceNumber - 1;
}
private synchronized void deliver(int commandTopologyId, long sequenceNumber) {
if (receiverTopologyId == commandTopologyId && receiverSequenceNumber == sequenceNumber) {
receiverSequenceNumber++;
if (log.isTraceEnabled()) {
log.tracef("Receiver %d delivered sequence %d:%d", segment, commandTopologyId, sequenceNumber);
}
}
}
private synchronized boolean isNext(int commandTopologyId, long sequenceNumber) {
if (log.isTraceEnabled()) {
log.tracef("Receiver %d checking sequence %d:%d, current sequence is %d:%d",
segment, commandTopologyId, sequenceNumber, receiverTopologyId, receiverSequenceNumber);
}
if (receiverTopologyId == commandTopologyId) {
return receiverSequenceNumber == sequenceNumber;
} else if (receiverTopologyId < commandTopologyId) {
receiverTopologyId = commandTopologyId;
receiverSequenceNumber = 1;
return 1 == sequenceNumber;
} else {
//this topology is higher than the command topology id.
//another topology was installed. this command will fail with OutdatedTopologyException.
return true;
}
}
}
}
| 6,369
| 38.320988
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/L1Manager.java
|
package org.infinispan.distribution;
import java.util.Collection;
import java.util.concurrent.CompletableFuture;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.distribution.L1WriteSynchronizer;
import org.infinispan.remoting.transport.Address;
/**
* Manages the L1 Cache, in particular recording anyone who is going to cache an
* a command that a node responds to so that a unicast invalidation can be sent
* later if needed.
*
* @author Pete Muir
*
*/
@Scope(Scopes.NAMED_CACHE)
public interface L1Manager {
/**
* Records a request that will be cached in another nodes L1
*/
void addRequestor(Object key, Address requestor);
CompletableFuture<?> flushCache(Collection<Object> key, Address origin, boolean assumeOriginKeptEntryInL1);
/**
* Registers the given write synchronizer to be notified whenever a remote value is looked up for the given key.
* If the synchronizer is no longer needed to be signaled, the user should unregister it using
* {@link L1Manager#unregisterL1WriteSynchronizer(Object, org.infinispan.interceptors.distribution.L1WriteSynchronizer)}
* @param key The key that that when looked up will trigger the synchronizer
* @param sync The synchronizer to run the update when the key is looked up
*/
void registerL1WriteSynchronizer(Object key, L1WriteSynchronizer sync);
/**
* Unregister the given write synchronizer if present. Note the synchronizer is only unregistered if it matches
* using instance equality (==) due to possibly concurrent usage of write synchronizers
* @param key The key to unregister the given synchronizer for.
* @param sync The synchronizer to be removed if it is still present.
*/
void unregisterL1WriteSynchronizer(Object key, L1WriteSynchronizer sync);
}
| 1,872
| 39.717391
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/Ownership.java
|
package org.infinispan.distribution;
public enum Ownership {
/**
* This node is not an owner.
*/
NON_OWNER,
/**
* This node is the primary owner.
*/
PRIMARY,
/**
* this node is the backup owner.
*/
BACKUP
}
| 250
| 13.764706
| 37
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/DistributionInfo.java
|
package org.infinispan.distribution;
import java.util.Collection;
import java.util.List;
import org.infinispan.remoting.transport.Address;
/**
* @author Radim Vansa
* @author Dan Berindei
* @since 9.0
*/
public class DistributionInfo {
private final int segmentId;
// The write CH always includes the read CH, and the primary owner is always in the read CH
private final Address primary;
private final List<Address> readOwners;
private final List<Address> writeOwners;
private final Collection<Address> writeBackups;
private final boolean isPrimary;
private final boolean isReadOwner;
private final boolean isWriteOwner;
private final boolean isWriteBackup;
public DistributionInfo(int segmentId, Address primary, List<Address> readOwners, List<Address> writeOwners,
Collection<Address> writeBackups, Address localAddress) {
this.segmentId = segmentId;
this.primary = primary;
this.readOwners = readOwners;
this.writeOwners = writeOwners;
this.writeBackups = writeBackups;
this.isPrimary = primary != null && primary.equals(localAddress);
this.isReadOwner = readOwners.contains(localAddress);
this.isWriteOwner = writeOwners.contains(localAddress);
this.isWriteBackup = this.isWriteOwner && !this.isPrimary;
}
public int segmentId() {
return segmentId;
}
public Address primary() {
return primary;
}
public List<Address> readOwners() {
return readOwners;
}
public List<Address> writeOwners() {
return writeOwners;
}
public Collection<Address> writeBackups() {
return writeBackups;
}
public boolean isPrimary() {
return isPrimary;
}
public boolean isReadOwner() {
return isReadOwner;
}
public boolean isWriteOwner() {
return isWriteOwner;
}
public boolean isWriteBackup() {
return isWriteBackup;
}
public Ownership readOwnership() {
return isPrimary ? Ownership.PRIMARY : (isReadOwner ? Ownership.BACKUP : Ownership.NON_OWNER);
}
public Ownership writeOwnership() {
return isPrimary ? Ownership.PRIMARY : (isWriteOwner ? Ownership.BACKUP : Ownership.NON_OWNER);
}
@Override
public String toString() {
return "DistributionInfo{" +
(isPrimary ? "primary, " : (isReadOwner ? "read+write, " : (isWriteBackup ? "write-only, " : ""))) +
"segmentId=" + segmentId +
", readOwners=" + readOwners +
", writeOwners=" + writeOwners +
'}';
}
}
| 2,582
| 26.189474
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/DistributionManager.java
|
package org.infinispan.distribution;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.topology.CacheTopology;
/**
* A component that manages the distribution of elements across a cache cluster
*
* @author Manik Surtani
* @author Mircea.Markus@jboss.com
* @author Vladimir Blagojevic
* @author anistor@redhat.com
* @since 4.0
*/
@Scope(Scopes.NAMED_CACHE)
public interface DistributionManager {
/**
* @return the consistent hash used for reading.
* @deprecated Since 11.0, to be removed in 14.0. Please use {@link #getCacheTopology()} instead.
*/
@Deprecated
ConsistentHash getReadConsistentHash();
/**
* @return the consistent hash used for writing.
* @deprecated Since 11.0, to be removed in 14.0. Please use {@link #getCacheTopology()} instead.
*/
@Deprecated
ConsistentHash getWriteConsistentHash();
/**
* Tests whether a given key is affected by a rehash that may be in progress. If no rehash is in progress, this method
* returns false. Helps determine whether additional steps are necessary in handling an operation with a given key.
*
* @param key key to test
* @return whether a key is affected by a rehash
*/
boolean isAffectedByRehash(Object key);
/**
* Tests whether a rehash is in progress
* @return true if a rehash is in progress, false otherwise
*/
boolean isRehashInProgress();
/**
* Tests whether the current instance has completed joining the cluster
* @return true if join is in progress, false otherwise
*/
boolean isJoinComplete();
/**
* @return the current cache topology, which includes the read and write consistent hashes.
*/
LocalizedCacheTopology getCacheTopology();
/**
* @deprecated Internal only.
*/
@Deprecated
void setCacheTopology(CacheTopology cacheTopology);
LocalizedCacheTopology createLocalizedCacheTopology(CacheTopology cacheTopology);
}
| 2,060
| 29.308824
| 122
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/RemoteValueRetrievedListener.java
|
package org.infinispan.distribution;
import org.infinispan.container.entries.InternalCacheEntry;
/**
* Listener that is notified when a remote value is looked up
*
* @author William Burns
* @since 6.0
*/
public interface RemoteValueRetrievedListener {
/**
* Invoked when a remote value is found from a remote source
* @param ice The cache entry that was found
*/
void remoteValueFound(InternalCacheEntry ice);
/**
* Invoked when a remote value is not found from the remote source for the given key
* @param key The key for which there was no value found
*/
void remoteValueNotFound(Object key);
}
| 641
| 25.75
| 87
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/DataLocality.java
|
package org.infinispan.distribution;
/**
* Used to determine whether a key is mapped to a local node. Uncertainty indicates a rehash is in progress and the
* locality of key in question may be in flux.
*
* @author Manik Surtani
* @author Mircea Markus
* @since 4.2.1
* @deprecated Since 11.0. Will be removed in 14.0, no longer used.
*/
public enum DataLocality {
LOCAL(true,false),
NOT_LOCAL(false,false),
LOCAL_UNCERTAIN(true,true),
NOT_LOCAL_UNCERTAIN(false,true);
private final boolean local, uncertain;
DataLocality(boolean local, boolean uncertain) {
this.local = local;
this.uncertain = uncertain;
}
public boolean isLocal() {
return local;
}
public boolean isUncertain() {
return uncertain;
}
}
| 777
| 20.611111
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/package-info.java
|
/**
* <p>
* Groups allow keys with differeing hash codes to be co-located on the same node.
* </p>
*
* <p>
* Infinispan offers support for both instrinsic grouping ( see{@link org.infinispan.distribution.group.Group})
* and extrinsic grouping (see {@link org.infinispan.distribution.group.Grouper}).
* </p>
*
* @api.public
*/
package org.infinispan.distribution.group;
| 380
| 26.214286
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/Group.java
|
package org.infinispan.distribution.group;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
/**
* <p>
* Identifies the key for a group.
* </p>
*
* <p>
* <code>@Group</code> should be used when you have control over the key class. For example:
* </p>
*
* <pre>
* class User {
*
* ...
* String office;
* ...
*
* int hashCode() {
* // Defines the hash for the key, normally used to determine location
* ...
* }
*
* // Override the location by specifying a group, all keys in the same
* // group end up with the same owner
* @Group
* String getOffice() {
* return office;
* }
*
* }
* </pre>
*
* <p>
* If you don't have control over the key class, you can specify a {@link Grouper} (in your configuration) which can be used to
* specify the group externally.
* </p>
*
* <p>
* You must set the <code>groupsEnabled<code> property to true in your configuration in order to use groups.
* </p>
*
* @see Grouper
*
* @author Pete Muir
*
*/
@Target(METHOD)
@Retention(RUNTIME)
public @interface Group {
}
| 1,229
| 19.847458
| 127
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/Grouper.java
|
package org.infinispan.distribution.group;
/**
* <p>
* User applications may implement this interface in order to customize the compution of groups in cases when the modifying the
* key is not possible, or when the value determined by the {@link Group} annotation needs customizing.
* </p>
*
* <p>
* <code>Grouper</code> acts as an interceptor, passing the previously computed value in. The group passed to the first
* <code>Grouper</code> will be that determined by <code>@Group</code> (if <code>@Group</code> is defined).
* </p>
*
* <p>
* For example:
* </p>
*
* <pre>
* public class KXGrouper implements Grouper<String> {
*
* // A pattern that can extract from a "kX" (e.g. k1, k2) style key
* private static Pattern kPattern = Pattern.compile("(ˆk)(\\d)$");
*
* public String computeGroup(String key, String group) {
* Matcher matcher = kPattern.matcher(key);
* if (matcher.matches()) {
* String g = Integer.parseInt(matcher.group(2)) % 2 + "";
* return g;
* } else
* return null;
* }
*
* public Class<String> getKeyType() {
* return String.class;
* }
*
* }
* </pre>
*
* <p>
* You must set the
* <code>groupsEnabled<code> property to true in your configuration in order to use groups. You can specify an order list of groupers there.
* </p>
*
* @see Group
*
* @author Pete Muir
*
* @param <T>
*/
public interface Grouper<T> {
/**
* Compute the group for a given key
*
* @param key the key to compute the group for
* @param group the group as currently computed, or null if no group has been determined yet
* @return the group, or null if no group is defined
*/
default Object computeGroup(T key, Object group) {
return computeGroup(key, String.valueOf(group));
}
/**
* @deprecated Since 9.1
*/
@Deprecated
default String computeGroup(T key, String group) {
throw new UnsupportedOperationException("This operation should not be invoked.");
}
Class<T> getKeyType();
}
| 2,149
| 27.666667
| 140
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/impl/GroupManagerFactory.java
|
package org.infinispan.distribution.group.impl;
import org.infinispan.factories.AbstractNamedCacheComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
@Scope(Scopes.NAMED_CACHE)
@DefaultFactoryFor(classes = GroupManager.class)
public class GroupManagerFactory extends AbstractNamedCacheComponentFactory implements AutoInstantiableFactory {
@Override
public Object construct(String componentName) {
return configuration.clustering().hash().groups().enabled() ?
new GroupManagerImpl(configuration) :
null;
}
}
| 729
| 33.761905
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/impl/GroupManager.java
|
package org.infinispan.distribution.group.impl;
import java.util.Map;
import org.infinispan.CacheStream;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.InvocationContext;
/**
* Control's key grouping.
*
* @author Pete Muir
*/
public interface GroupManager {
/**
* Get the group for a given key
*
* @param key the key for which to get the group
* @return the group, or null if no group is defined for the key
*/
Object getGroup(Object key);
/**
* Collects all entries belonging to a single group.
* <p>
* This method receives a {@link CacheStream} and it must filter the {@link CacheEntry} that belongs to the group.
* <p>
* If the cache is transactional, the entries must be stored in the {@link InvocationContext} (with proper read
* version if applicable).
*
* @param stream The {@link CacheStream} of {@link CacheEntry} to filter.
* @param ctx The {@link InvocationContext} to use during its invocation.
* @param groupName The group name to collect.
* @param <K> The key type.
* @param <V> The value type.
* @return A {@link Map} with keys and value.
*/
<K, V> Map<K, V> collect(CacheStream<? extends CacheEntry<K, V>> stream, InvocationContext ctx, String groupName);
}
| 1,327
| 31.390244
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/impl/GroupFilter.java
|
package org.infinispan.distribution.group.impl;
import java.util.function.Predicate;
/**
* A key filter that accepts keys which belongs to the group.
*
* @author Pedro Ruivo
* @since 7.0
*/
public class GroupFilter<K> implements Predicate<K> {
private final Object groupName;
private final GroupManager groupManager;
public GroupFilter(Object groupName, GroupManager groupManager) {
this.groupName = groupName;
this.groupManager = groupManager;
}
@Override
public boolean test(K key) {
Object keyGroup = groupManager.getGroup(key);
return keyGroup != null && keyGroup.equals(groupName);
}
}
| 646
| 22.962963
| 68
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/impl/CacheEntryGroupPredicate.java
|
package org.infinispan.distribution.group.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
import java.util.function.Predicate;
import org.infinispan.commands.functional.functions.InjectableComponent;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Tests if a {@link CacheEntry} belongs to a group.
*
* @since 14.0
*/
public class CacheEntryGroupPredicate<K> implements Predicate<CacheEntry<K, ?>>, InjectableComponent {
private static final Log log = LogFactory.getLog(CacheEntryGroupPredicate.class);
@SuppressWarnings("rawtypes")
public static final AbstractExternalizer<CacheEntryGroupPredicate> EXTERNALIZER = new Externalizer();
private GroupManager groupManager;
private final String groupName;
public CacheEntryGroupPredicate(String groupName) {
this.groupName = groupName;
}
@Override
public boolean test(CacheEntry<K, ?> entry) {
String keyGroup = String.valueOf(groupManager.getGroup(entry.getKey()));
boolean sameGroup = Objects.equals(groupName, keyGroup);
if (log.isTraceEnabled()) {
log.tracef("Testing key %s for group %s. Same group? %s", entry.getKey(), groupName, sameGroup);
}
return sameGroup;
}
@Override
public void inject(ComponentRegistry registry) {
groupManager = registry.getComponent(GroupManager.class);
}
@Override
public String toString() {
return "CacheEntryGroupPredicate{" +
"groupName='" + groupName + '\'' +
'}';
}
@SuppressWarnings("rawtypes")
private static final class Externalizer extends AbstractExternalizer<CacheEntryGroupPredicate> {
@Override
public Set<Class<? extends CacheEntryGroupPredicate>> getTypeClasses() {
return Collections.singleton(CacheEntryGroupPredicate.class);
}
@Override
public Integer getId() {
return Ids.CACHE_ENTRY_GROUP_PREDICATE;
}
@Override
public void writeObject(ObjectOutput output, CacheEntryGroupPredicate object) throws IOException {
output.writeUTF(object.groupName);
}
@Override
public CacheEntryGroupPredicate readObject(ObjectInput input) throws IOException {
return new CacheEntryGroupPredicate(input.readUTF());
}
}
}
| 2,641
| 30.082353
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/impl/GroupManagerImpl.java
|
package org.infinispan.distribution.group.impl;
import static org.infinispan.commons.util.ReflectionUtil.invokeMethod;
import static org.infinispan.transaction.impl.WriteSkewHelper.addVersionRead;
import java.lang.reflect.Method;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.stream.Collectors;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.ReflectionUtil;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.impl.EntryFactory;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.group.Group;
import org.infinispan.distribution.group.Grouper;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
@Scope(Scopes.NAMED_CACHE)
public class GroupManagerImpl implements GroupManager {
private static final Log log = LogFactory.getLog(GroupManagerImpl.class);
@Inject
ComponentRegistry componentRegistry;
@Inject
ComponentRef<EntryFactory> entryFactory;
@Inject
ComponentRef<VersionGenerator> versionGenerator;
private final ConcurrentMap<Class<?>, GroupMetadata> groupMetadataCache;
private final List<Grouper<?>> groupers;
public GroupManagerImpl(Configuration configuration) {
groupMetadataCache = new ConcurrentHashMap<>();
if (configuration.clustering().hash().groups().groupers() != null) {
groupers = configuration.clustering().hash().groups().groupers();
} else {
groupers = Collections.emptyList();
}
}
@Override
public Object getGroup(Object key) {
GroupMetadata metadata = getMetadata(key);
if (metadata != null) {
return applyGroupers(metadata.getGroup(key), key);
} else
return applyGroupers(null, key);
}
@Override
public <K, V> Map<K, V> collect(CacheStream<? extends CacheEntry<K, V>> stream, InvocationContext ctx, String groupName) {
CacheEntryGroupPredicate<K> predicate = new CacheEntryGroupPredicate<>(groupName);
predicate.inject(componentRegistry);
List<CacheEntry<K, V>> list = stream.filterKeySegments(IntSets.immutableSet(groupSegment(groupName)))
.filter(predicate)
.collect(Collectors::toList);
return ctx.isInTxScope() ? handleTxGetGroup((TxInvocationContext<?>) ctx, list, groupName) : handleNoTxGetGroup(list, groupName);
}
private <V, K> Map<K, V> handleNoTxGetGroup(List<? extends CacheEntry<K, V>> entries, String groupName) {
boolean trace = log.isTraceEnabled();
Map<K, V> group = new HashMap<>();
entries.forEach(e -> {
if (trace) {
log.tracef("Found entry belonging to group %s: %s", groupName, e);
}
group.put(e.getKey(), e.getValue());
});
return group;
}
private <V, K> Map<K, V> handleTxGetGroup(TxInvocationContext<?> ctx, List<? extends CacheEntry<K, V>> entries, String groupName) {
boolean trace = log.isTraceEnabled();
synchronized (ctx) {
Map<K, V> group = new HashMap<>();
entries.forEach(e -> {
if (ctx.lookupEntry(e.getKey()) == null) {
entryFactory.running().wrapExternalEntry(ctx, e.getKey(), e, true, false);
addVersionRead(ctx, e, e.getKey(), versionGenerator.running(), log);
}
if (trace) {
log.tracef("Found entry belonging to group %s: %s", groupName, e);
}
group.put(e.getKey(), e.getValue());
});
return group;
}
}
private int groupSegment(String groupName) {
KeyPartitioner keyPartitioner = componentRegistry.getComponent(KeyPartitioner.class);
if (keyPartitioner instanceof GroupingPartitioner) {
return ((GroupingPartitioner) keyPartitioner).unwrap().getSegment(groupName);
} else {
return keyPartitioner.getSegment(groupName);
}
}
@FunctionalInterface
private interface GroupMetadata {
GroupMetadata NONE = instance -> null;
Object getGroup(Object instance);
}
private static class GroupMetadataImpl implements GroupMetadata {
private final Method method;
GroupMetadataImpl(Method method) {
if (method.getParameterCount() > 0)
throw new IllegalArgumentException(Util.formatString("@Group method %s must have zero arguments", method));
this.method = method;
}
@Override
public Object getGroup(Object instance) {
method.setAccessible(true);
return invokeMethod(instance, method, Util.EMPTY_OBJECT_ARRAY);
}
}
private static GroupMetadata createGroupMetadata(Class<?> clazz) {
Collection<Method> possibleMethods = ReflectionUtil.getAllMethods(clazz, Group.class);
if (possibleMethods.isEmpty())
return GroupMetadata.NONE;
else if (possibleMethods.size() == 1)
return new GroupMetadataImpl(possibleMethods.iterator().next());
else
throw new IllegalStateException(Util.formatString("Cannot define more that one @Group method for class hierarchy rooted at %s", clazz.getName()));
}
private Object applyGroupers(Object group, Object key) {
for (Grouper<?> grouper : groupers) {
if (grouper.getKeyType().isAssignableFrom(key.getClass()))
//noinspection unchecked
group = ((Grouper<Object>) grouper).computeGroup(key, group);
}
return group;
}
private GroupMetadata getMetadata(Object key) {
Class<?> keyClass = key.getClass();
GroupMetadata groupMetadata = groupMetadataCache.get(keyClass);
if (groupMetadata == null) {
//this is not ideal as it is possible for the group metadata to be redundantly calculated several times.
//however profiling showed that using the Map<Class,Future> cache-approach is significantly slower on
// the long run
groupMetadata = createGroupMetadata(keyClass);
GroupMetadata previous = groupMetadataCache.putIfAbsent(keyClass, groupMetadata);
if (previous != null) {
// in case another thread added a metadata already, discard what we created and reuse the existing.
return previous;
}
}
return groupMetadata;
}
}
| 7,000
| 38.111732
| 155
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/group/impl/GroupingPartitioner.java
|
package org.infinispan.distribution.group.impl;
import org.infinispan.distribution.ch.KeyPartitioner;
/**
* Key partitioner that uses {@link org.infinispan.distribution.group.Group} annotations to map
* grouped keys to the same segment.
*
* @author Dan Berindei
* @since 8.2
*/
public class GroupingPartitioner implements KeyPartitioner {
private final KeyPartitioner partitioner;
private final GroupManager groupManager;
public GroupingPartitioner(KeyPartitioner partitioner, GroupManager groupManager) {
this.partitioner = partitioner;
this.groupManager = groupManager;
}
@Override
public int getSegment(Object key) {
Object groupKey = groupManager.getGroup(key);
return partitioner.getSegment(groupKey != null ? groupKey : key);
}
public KeyPartitioner unwrap() {
return partitioner;
}
}
| 859
| 26.741935
| 95
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/util/ReadOnlySegmentAwareIterator.java
|
package org.infinispan.distribution.util;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
/**
* Iterator implementation that shows a read only view of the provided iterator by only
* allowing values that map to a given segment using the provided consistent hash.
* <p>
* This iterator is used with the other various SegmentAware Collections such as
* {@link ReadOnlySegmentAwareCollection}
*
* @author wburns
* @since 7.2
*/
public class ReadOnlySegmentAwareIterator<E> implements Iterator<E> {
protected final Iterator<E> iter;
protected final LocalizedCacheTopology topology;
protected final IntSet allowedSegments;
protected E next;
public ReadOnlySegmentAwareIterator(Iterator<E> iter, LocalizedCacheTopology topology, IntSet allowedSegments) {
super();
this.iter = iter;
this.topology = topology;
this.allowedSegments = allowedSegments;
next = findNext();
}
protected boolean valueAllowed(Object obj) {
int segment = topology.getSegment(obj);
return allowedSegments.contains(segment);
}
protected E findNext() {
while (iter.hasNext()) {
E next = iter.next();
if (valueAllowed(next)) {
return next;
}
}
return null;
}
@Override
public boolean hasNext() {
return next != null;
}
@Override
public E next() {
E prev = next;
if (prev == null) {
throw new NoSuchElementException();
}
next = findNext();
return prev;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
}
| 1,752
| 24.405797
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/util/ReadOnlySegmentAwareEntryIterator.java
|
package org.infinispan.distribution.util;
import java.util.Iterator;
import java.util.Map.Entry;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
/**
* Iterator implementation that shows a read only view of the provided iterator by only
* allowing values that map to a given segment using the provided consistent hash.
* <p>
* This iterator is specifically used with the {@link ReadOnlySegmentAwareEntryCollection} so
* that it will properly filter out entries by their key instead of by the entry instance
*
* @author wburns
* @since 7.2
*/
public class ReadOnlySegmentAwareEntryIterator<K, V> extends ReadOnlySegmentAwareIterator<Entry<K, V>> {
public ReadOnlySegmentAwareEntryIterator(Iterator<Entry<K, V>> iter, LocalizedCacheTopology topology, IntSet allowedSegments) {
super(iter, topology, allowedSegments);
}
@Override
protected boolean valueAllowed(Object obj) {
if (obj instanceof Entry) {
return super.valueAllowed(((Entry<?, ?>)obj).getKey());
}
return false;
}
}
| 1,091
| 32.090909
| 130
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/util/ReadOnlySegmentAwareMap.java
|
package org.infinispan.distribution.util;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.util.AbstractDelegatingCollection;
import org.infinispan.commons.util.AbstractDelegatingMap;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
/**
* Map implementation that shows a read only view of the provided entry by only allowing
* entries whose key maps to a given segment using the provided consistent hash.
* <p>
* Any operation that would modify this map will throw an {@link UnsupportedOperationException}
* <p>
* This map is useful when you don't want to copy an entire map but only need to see
* entries from the given segments.
* <p>
* Note many operations are not constant time when using this map. The
* {@link ReadOnlySegmentAwareMap#values} method is not supported as well. Please check\
* the method you are using to see if it will perform differently than normally expected.
* @author wburns
* @since 7.2
*/
public class ReadOnlySegmentAwareMap<K, V> extends AbstractDelegatingMap<K, V> {
protected final Map<K, V> map;
protected final LocalizedCacheTopology topology;
protected final IntSet allowedSegments;
protected Set<K> segmentAwareKeySet;
protected Set<Map.Entry<K, V>> segmentAwareEntrySet;
public ReadOnlySegmentAwareMap(Map<K, V> map, LocalizedCacheTopology topology, IntSet allowedSegments) {
super();
this.map = Collections.unmodifiableMap(map);
this.topology = topology;
this.allowedSegments = allowedSegments;
}
@Override
protected Map<K, V> delegate() {
return map;
}
protected boolean keyAllowed(Object key) {
int segment = topology.getSegment(key);
return allowedSegments.contains(segment);
}
@Override
public boolean containsKey(Object key) {
if (keyAllowed(key)) {
return super.containsKey(key);
}
return false;
}
@Override
public boolean containsValue(Object value) {
for (Entry<K, V> entry : entrySet()) {
if (value.equals(entry.getValue())) {
return true;
}
}
return false;
}
@Override
public V get(Object key) {
if (keyAllowed(key)) {
return super.get(key);
}
return null;
}
@Override
public Set<java.util.Map.Entry<K, V>> entrySet() {
if (segmentAwareEntrySet == null) {
segmentAwareEntrySet = new CollectionAsSet<>(
new ReadOnlySegmentAwareEntryCollection<>(delegate().entrySet(), topology, allowedSegments));
}
return segmentAwareEntrySet;
}
/**
* Checks if the provided map is empty. This is done by iterating over all of the keys
* until it can find a key that maps to a given segment.
* <p>
* This method should always be preferred over checking the size to see if it is empty.
* <p>
* This time complexity for this method between O(1) to O(N).
*/
@Override
public boolean isEmpty() {
Set<K> keySet = keySet();
Iterator<K> iter = keySet.iterator();
return !iter.hasNext();
}
@Override
public Set<K> keySet() {
if (segmentAwareKeySet == null) {
segmentAwareKeySet = new CollectionAsSet<K>(new ReadOnlySegmentAwareCollection<>(super.keySet(), topology, allowedSegments));
}
return segmentAwareKeySet;
}
/**
* Returns the size of the read only map. This is done by iterating over all of the
* keys counting all that are in the segments.
* <p>
* If you are using this method to verify if the map is empty, you should instead use
* the {@link ReadOnlySegmentAwareEntryMap#isEmpty()} as it will perform better if the
* size is only used for this purpose.
* <p>
* This time complexity for this method is always O(N).
*/
@Override
public int size() {
Set<K> keySet = keySet();
Iterator<K> iter = keySet.iterator();
int count = 0;
while (iter.hasNext()) {
iter.next();
count++;
}
return count;
}
/**
* NOTE: this method is not supported. Due to the nature of this map, we don't want
* to copy the underlying value collection. Thus almost any operation will require
* O(N) and therefore this method is not provided.
*/
@Override
public Collection<V> values() {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
return "ReadOnlySegmentAwareMap [map=" + map + ", ch=" + topology +
", allowedSegments=" + allowedSegments + "]";
}
private static class CollectionAsSet<T> extends AbstractDelegatingCollection<T> implements Set<T> {
private final AbstractDelegatingCollection<T> delegate;
public CollectionAsSet(AbstractDelegatingCollection<T> delegate) {
this.delegate = delegate;
}
@Override
protected Collection<T> delegate() {
return delegate;
}
}
}
| 5,083
| 30
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/util/ReadOnlySegmentAwareCollection.java
|
package org.infinispan.distribution.util;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import org.infinispan.commons.util.AbstractDelegatingCollection;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
/**
* Set implementation that shows a read only view of the provided set by only allowing
* entries that map to a given segment using the provided consistent hash.
* <p>
* This set is useful when you don't want to copy an entire set but only need to see
* values from the given segments.
* <p>
* Note many operations are not constant time when using this set. Please check the method
* you are using to see if it will perform differently than normally expected.
* @author wburns
* @since 7.2
*/
public class ReadOnlySegmentAwareCollection<E> extends AbstractDelegatingCollection<E> {
protected final Collection<E> set;
protected final LocalizedCacheTopology topology;
protected final IntSet allowedSegments;
public ReadOnlySegmentAwareCollection(Collection<E> set, LocalizedCacheTopology topology, IntSet allowedSegments) {
super();
this.set = Collections.unmodifiableCollection(set);
this.topology = topology;
this.allowedSegments = allowedSegments;
}
@Override
protected Collection<E> delegate() {
return set;
}
protected boolean valueAllowed(Object obj) {
int segment = topology.getSegment(obj);
return allowedSegments.contains(segment);
}
@Override
public boolean contains(Object o) {
if (valueAllowed(o)) {
return super.contains(o);
}
return false;
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object obj : c) {
if (valueAllowed(obj) && !super.contains(obj)) {
return false;
}
}
return true;
}
/**
* Checks if the provided set is empty. This is done by iterating over all of the values
* until it can find a key that maps to a given segment.
* <p>
* This method should always be preferred over checking the size to see if it is empty.
* <p>
* This time complexity for this method between O(1) to O(N).
*/
@Override
public boolean isEmpty() {
Iterator<E> iter = iterator();
return !iter.hasNext();
}
/**
* Returns the size of the read only set. This is done by iterating over all of the
* values counting all that are in the segments.
* <p>
* If you are using this method to verify if the set is empty, you should instead use
* the {@link ReadOnlySegmentAwareEntryCollection#isEmpty()} as it will perform better if the
* size is only used for this purpose.
* <p>
* This time complexity for this method is always O(N).
*/
@Override
public int size() {
Iterator<E> iter = iterator();
int count = 0;
while (iter.hasNext()) {
iter.next();
count++;
}
return count;
}
@Override
public Iterator<E> iterator() {
return new ReadOnlySegmentAwareIterator<>(super.iterator(), topology, allowedSegments);
}
}
| 3,167
| 29.461538
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/util/ReadOnlySegmentAwareEntryCollection.java
|
package org.infinispan.distribution.util;
import java.util.Iterator;
import java.util.Map.Entry;
import java.util.Set;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
/**
* Iterator implementation that shows a read only view of the provided iterator by only
* allowing values that map to a given segment using the provided consistent hash.
* <p>
* This iterator is used with specifically with the {@link ReadOnlySegmentAwareEntryCollection}
* to properly filter the entry by the key instead of the entry instance itself.
*
* @author wburns
* @since 7.2
*/
public class ReadOnlySegmentAwareEntryCollection<K, V> extends ReadOnlySegmentAwareCollection<Entry<K, V>> {
public ReadOnlySegmentAwareEntryCollection(Set<Entry<K, V>> set, LocalizedCacheTopology topology, IntSet allowedSegments) {
super(set, topology, allowedSegments);
}
@Override
protected boolean valueAllowed(Object obj) {
if (obj instanceof Entry) {
return super.valueAllowed(((Entry<?, ?>)obj).getKey());
}
return false;
}
@Override
public Iterator<Entry<K, V>> iterator() {
return new ReadOnlySegmentAwareEntryIterator<>(delegate().iterator(), topology,
allowedSegments);
}
}
| 1,285
| 31.15
| 126
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/topologyaware/TopologyInfo.java
|
package org.infinispan.distribution.topologyaware;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.TopologyAwareAddress;
/**
* This class holds the topology hierarchy of a cache's members and estimates for owned segments.
*
* @author Dan Berindei
* @since 5.2
*/
public class TopologyInfo {
private final int numSegments;
private final int numOwners;
private final Cluster cluster = new Cluster();
private final List<Rack> allRacks = new ArrayList<>();
private final List<Machine> allMachines = new ArrayList<>();
private final List<Node> allNodes = new ArrayList<>();
private final Map<Address, Node> addressMap = new HashMap<>();
public TopologyInfo(int numSegments, int numOwners, Collection<Address> members,
Map<Address, Float> capacityFactors) {
this.numOwners = Math.min(numOwners, members.size());
this.numSegments = numSegments;
// This way, all the nodes collections at the id/rack/machine levels will be sorted
for (Address node : members) {
float capacityFactor = capacityFactors != null ? capacityFactors.get(node) : 1f;
if (capacityFactor != 0f) {
addNode(node, capacityFactor);
}
}
if (cluster.totalCapacity == 0)
throw new IllegalArgumentException("At least one node should have non-zero capacity");
// Sort all location lists descending by capacity factor and add them to the global collections
// splitExpectedOwnedSegments needs location lists to be sorted
Collections.sort(cluster.sites);
for (Site site : cluster.sites) {
Collections.sort(site.racks);
for (Rack rack : site.racks) {
allRacks.add(rack);
Collections.sort(rack.machines);
for (Machine machine : rack.machines) {
allMachines.add(machine);
Collections.sort(machine.nodes);
for (Node node : machine.nodes) {
allNodes.add(node);
addressMap.put(node.address, node);
}
}
}
}
computeExpectedSegments();
}
public int getDistinctLocationsCount(TopologyLevel level) {
switch (level) {
case NODE:
return allNodes.size();
case MACHINE:
return allMachines.size();
case RACK:
return allRacks.size();
case SITE:
return cluster.sites.size();
default:
throw new IllegalArgumentException("Unknown level: " + level);
}
}
public int getDistinctLocationsCount(TopologyLevel level, Collection<Address> addresses) {
Set<Object> locations = new HashSet<>();
for (Address address : addresses) {
locations.add(getLocationId(level, address));
}
return locations.size();
}
public boolean duplicateLocation(TopologyLevel level, Collection<Address> addresses, Address candidate, boolean excludeCandidate) {
Object newLocationId = getLocationId(level, candidate);
for (Address address : addresses) {
if (!excludeCandidate || !address.equals(candidate)) {
if (newLocationId.equals(getLocationId(level, address)))
return true;
}
}
return false;
}
public Object getLocationId(TopologyLevel level, Address address) {
Node node = addressMap.get(address);
Object locationId;
switch (level) {
case SITE:
locationId = node.machine.rack.site;
break;
case RACK:
locationId = node.machine.rack;
break;
case MACHINE:
locationId = node.machine;
break;
case NODE:
locationId = node;
break;
default:
throw new IllegalStateException("Unexpected value: " + level);
}
return locationId;
}
private void addNode(Address address, float capacityFactor) {
TopologyAwareAddress taa = (TopologyAwareAddress) address;
String siteId = taa.getSiteId();
String rackId = taa.getRackId();
String machineId = taa.getMachineId();
cluster.addNode(siteId, rackId, machineId, address, capacityFactor);
}
public Collection<Address> getSiteNodes(String site) {
Collection<Address> addresses = new ArrayList<>();
cluster.getSite(site).collectNodes(addresses);
return addresses;
}
public Collection<Address> getRackNodes(String site, String rack) {
Collection<Address> addresses = new ArrayList<>();
cluster.getSite(site).getRack(rack).collectNodes(addresses);
return addresses;
}
public Collection<Address> getMachineNodes(String site, String rack, String machine) {
Collection<Address> addresses = new ArrayList<>();
cluster.getSite(site).getRack(rack).getMachine(machine).collectNodes(addresses);
return addresses;
}
public Collection<String> getAllSites() {
return cluster.getChildNames();
}
public Collection<String> getSiteRacks(String site) {
return cluster.getSite(site).getChildNames();
}
public Collection<String> getRackMachines(String site, String rack) {
return cluster.getSite(site).getRack(rack).getChildNames();
}
@Override
public String toString() {
DecimalFormat df = new DecimalFormat("0", DecimalFormatSymbols.getInstance(Locale.ENGLISH));
df.setMaximumFractionDigits(2);
StringBuilder sb = new StringBuilder("TopologyInfo{\n");
sb.append(formatLocation(df, cluster, ""));
for (Site site : cluster.sites) {
sb.append(formatLocation(df, site, " "));
for (Rack rack : site.racks) {
sb.append(formatLocation(df, rack, " "));
for (Machine machine : rack.machines) {
sb.append(formatLocation(df, machine, " "));
for (Node node : machine.nodes) {
sb.append(formatLocation(df, node, " "));
}
}
}
}
sb.append("}");
return sb.toString();
}
public String formatLocation(DecimalFormat df, Location location, String prefix) {
return String.format("%s%s * %s: %s+%s %n", prefix, location.getName(), df.format(location.totalCapacity),
df.format(location.expectedPrimarySegments),
df.format(location.getExpectedBackupSegments()));
}
private void computeExpectedSegments() {
// Primary owners are allocated strictly based on capacity factors
// But for backup owners we first try to put an owner on each site, on every rack, and on every machine
// If there are too few nodes, each node will hold all the segments
// If there are too few machines, each machine will hold all the segments, and some will have duplicates
// The same if there are too few racks or sites
splitPrimarySegments();
splitExpectedOwnedSegments(cluster.getChildren(), numSegments * numOwners,
cluster.totalCapacity);
}
private void splitPrimarySegments() {
// Round down, the actual segment allocation is allowed to add extra segments
for (Node node : allNodes) {
float fraction = node.totalCapacity / cluster.totalCapacity;
node.addPrimarySegments(numSegments * fraction);
}
}
/**
* Split totalOwnedSegments segments into the given locations recursively.
*
* @param locations List of locations of the same level, sorted descending by capacity factor
*/
private void splitExpectedOwnedSegments(Collection<? extends Location> locations, float totalOwnedSegments,
float totalCapacity) {
float remainingCapacity = totalCapacity;
float remainingOwned = totalOwnedSegments;
// First pass, assign expected owned segments for locations with too little capacity
// We know we can do it without a loop because locations are ordered descending by capacity
List<Location> remainingLocations = new ArrayList<>(locations);
for (ListIterator<Location> it = remainingLocations.listIterator(locations.size()); it.hasPrevious(); ) {
Location location = it.previous();
if (remainingOwned < numSegments * remainingLocations.size())
break;
// We don't have enough locations, so each location must own at least numSegments segments
int minOwned = numSegments;
float locationOwned = remainingOwned * location.totalCapacity / remainingCapacity;
if (locationOwned > minOwned)
break;
splitExpectedOwnedSegments2(location.getChildren(), minOwned, location.totalCapacity);
remainingCapacity -= location.totalCapacity;
remainingOwned -= location.expectedOwnedSegments;
it.remove();
}
// Second pass, assign expected owned segments for locations with too much capacity
// We know we can do it without a loop because locations are ordered descending by capacity
for (Iterator<? extends Location> it = remainingLocations.iterator(); it.hasNext(); ) {
Location location = it.next();
float maxOwned = computeMaxOwned(remainingOwned, remainingLocations.size());
float locationOwned = remainingOwned * location.totalCapacity / remainingCapacity;
if (locationOwned < maxOwned)
break;
splitExpectedOwnedSegments2(location.getChildren(), maxOwned, location.totalCapacity);
remainingCapacity -= location.totalCapacity;
remainingOwned -= maxOwned;
it.remove();
}
// If there were exactly numSegments segments per location, we're finished here
if (remainingLocations.isEmpty())
return;
// Third pass: If more than numSegments segments per location, split segments between their children
// Else spread remaining segments based only on the capacity, rounding down
if (remainingLocations.size() * numSegments < remainingOwned) {
List<Location> childrenLocations = new ArrayList<>(remainingLocations.size() * 2);
for (Location location : remainingLocations) {
childrenLocations.addAll(location.getChildren());
}
Collections.sort(childrenLocations);
splitExpectedOwnedSegments2(childrenLocations, remainingOwned, remainingCapacity);
} else {
// The allocation algorithm can assign more segments to nodes, so it's ok to miss some segments here
float fraction = remainingOwned / remainingCapacity;
for (Location location : remainingLocations) {
float locationOwned = location.totalCapacity * fraction;
splitExpectedOwnedSegments2(location.getChildren(), locationOwned, location.totalCapacity);
}
}
}
private float computeMaxOwned(float remainingOwned, int locationsCount) {
float maxOwned;
if (remainingOwned < numSegments) {
// We already have enough owners on siblings of the parent location
maxOwned = remainingOwned;
} else if (remainingOwned < numSegments * locationsCount) {
// We have enough locations to so we don't put more than numSegments segments in any of them
maxOwned = numSegments;
} else {
// We don't have enough locations, so each location gets at least numSegments
maxOwned = remainingOwned - numSegments * (locationsCount - 1);
}
return maxOwned;
}
private void splitExpectedOwnedSegments2(Collection<? extends Location> locations,
float totalOwnedSegments, float totalCapacity) {
Location first = locations.iterator().next();
if (locations.size() == 1 && first instanceof Node) {
((Node) first).addOwnedSegments(totalOwnedSegments);
} else {
splitExpectedOwnedSegments(locations, totalOwnedSegments, totalCapacity);
}
}
public float computeTotalCapacity(Collection<Address> nodes, Map<Address, Float> capacityFactors) {
if (capacityFactors == null)
return nodes.size();
float totalCapacity = 0;
for (Address node : nodes) {
totalCapacity += capacityFactors.get(node);
}
return totalCapacity;
}
public float getExpectedPrimarySegments(Address address) {
Node node = addressMap.get(address);
return node != null ? node.expectedPrimarySegments : 0;
}
public float getExpectedOwnedSegments(Address address) {
Node node = addressMap.get(address);
return node != null ? node.expectedOwnedSegments : 0;
}
public int getSiteIndex(Address address) {
Site site = addressMap.get(address).getSite();
return cluster.sites.indexOf(site);
}
public int getRackIndex(Address address) {
Rack rack = addressMap.get(address).getRack();
return allRacks.indexOf(rack);
}
public int getMachineIndex(Address address) {
Machine machine = addressMap.get(address).getMachine();
return allMachines.indexOf(machine);
}
/**
* Base class for locations.
*
* <p>Implements Comparable, but locations with equal capacity are equal, so they can't be used as map keys.</p>
*/
public static abstract class Location implements Comparable<Location> {
float totalCapacity;
int nodeCount;
float expectedPrimarySegments;
float expectedOwnedSegments;
abstract Collection<? extends Location> getChildren();
abstract String getName();
abstract String getFullName();
float getCapacityPerNode() {
return totalCapacity / nodeCount;
}
float getExpectedBackupSegments() {
return expectedOwnedSegments - expectedPrimarySegments;
}
void collectNodes(Collection<Address> addressCollection) {
for (Location location : getChildren()) {
location.collectNodes(addressCollection);
}
}
public Collection<String> getChildNames() {
Collection<String> names = new ArrayList<>();
for (Location location : getChildren()) {
names.add(location.getName());
}
return names;
}
@Override
public int compareTo(Location o) {
// Sort descending by total capacity, ignore everything else
return Float.compare(o.totalCapacity, totalCapacity);
}
@Override
public String toString() {
String name = getFullName();
return String.format("%s * %f: %.2f+%.2f", name != null ? name : "/", totalCapacity, expectedPrimarySegments,
getExpectedBackupSegments());
}
}
public static class Cluster extends Location {
final List<Site> sites = new ArrayList<>();
final Map<String, Site> siteMap = new HashMap<>();
void addNode(String siteId, String rackId, String machineId, Address address, float capacityFactor) {
Site site = siteMap.get(siteId);
if (site == null) {
site = new Site(this, siteId);
sites.add(site);
siteMap.put(siteId, site);
}
site.addNode(rackId, machineId, address, capacityFactor);
totalCapacity += capacityFactor;
nodeCount++;
}
Site getSite(String siteId) {
return siteMap.get(siteId);
}
@Override
Collection<Site> getChildren() {
return sites;
}
@Override
String getName() {
return "cluster";
}
@Override
String getFullName() {
return "";
}
}
public static class Site extends Location {
final Cluster cluster;
final String siteId;
final List<Rack> racks = new ArrayList<>();
final Map<String, Rack> rackMap = new HashMap<>();
Site(Cluster cluster, String siteId) {
this.cluster = cluster;
this.siteId = siteId;
}
void addNode(String rackId, String machineId, Address address, float capacityFactor) {
Rack rack = rackMap.get(rackId);
if (rack == null) {
rack = new Rack(this, rackId);
racks.add(rack);
rackMap.put(rackId, rack);
}
rack.addNode(machineId, address, capacityFactor);
totalCapacity += capacityFactor;
nodeCount++;
}
Rack getRack(String rackId) {
return rackMap.get(rackId);
}
@Override
Collection<Rack> getChildren() {
return racks;
}
@Override
String getName() {
return siteId;
}
@Override
String getFullName() {
return siteId;
}
}
public static class Rack extends Location {
final Site site;
final String rackId;
final List<Machine> machines = new ArrayList<>();
final Map<String, Machine> machineMap = new HashMap<>();
Rack(Site site, String rackId) {
this.site = site;
this.rackId = rackId;
}
void addNode(String machineId, Address address, float capacityFactor) {
Machine machine = machineMap.get(machineId);
if (machine == null) {
machine = new Machine(this, machineId);
machines.add(machine);
machineMap.put(machineId, machine);
}
machine.addNode(address, capacityFactor);
totalCapacity += capacityFactor;
nodeCount++;
}
Machine getMachine(String machineId) {
return machineMap.get(machineId);
}
@Override
Collection<Machine> getChildren() {
return machines;
}
@Override
String getName() {
return rackId;
}
@Override
String getFullName() {
return rackId + '|' + site.siteId;
}
}
public static class Machine extends Location {
final Rack rack;
final String machineId;
final List<Node> nodes = new ArrayList<>();
Machine(Rack rack, String machineId) {
this.rack = rack;
this.machineId = machineId;
}
void addNode(Address address, float capacityFactor) {
nodes.add(new Node(this, address, capacityFactor));
totalCapacity += capacityFactor;
nodeCount++;
}
@Override
Collection<Node> getChildren() {
return nodes;
}
@Override
String getName() {
return machineId;
}
@Override
String getFullName() {
return machineId + '|' + rack.rackId + '|' + rack.site.siteId;
}
}
public static class Node extends Location {
final Machine machine;
final Address address;
Node(Machine machine, Address address, float capacityFactor) {
this.machine = machine;
this.address = address;
this.totalCapacity = capacityFactor;
}
public Machine getMachine() {
return machine;
}
public Rack getRack() {
return machine.rack;
}
public Site getSite() {
return machine.rack.site;
}
@Override
Collection<Node> getChildren() {
return Collections.singletonList(this);
}
@Override
void collectNodes(Collection<Address> addressCollection) {
addressCollection.add(address);
}
@Override
String getName() {
return address.toString();
}
@Override
String getFullName() {
return address.toString() + '|' + machine.machineId + '|' + machine.rack.rackId + '|' +
machine.rack.site.siteId;
}
void addPrimarySegments(float segments) {
expectedPrimarySegments += segments;
machine.expectedPrimarySegments += segments;
machine.rack.expectedPrimarySegments += segments;
machine.rack.site.expectedPrimarySegments += segments;
machine.rack.site.cluster.expectedPrimarySegments += segments;
}
void addOwnedSegments(float segments) {
expectedOwnedSegments += segments;
machine.expectedOwnedSegments += segments;
machine.rack.expectedOwnedSegments += segments;
machine.rack.site.expectedOwnedSegments += segments;
machine.rack.site.cluster.expectedOwnedSegments += segments;
}
@Override
public String toString() {
return address.toString();
}
}
}
| 20,864
| 32.384
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/topologyaware/TopologyLevel.java
|
package org.infinispan.distribution.topologyaware;
/**
* The cluster topology is a tree with five levels: the entire cluster, sites, racks, machines, and
* individual nodes.
*
* @author Dan Berindei
* @since 5.2
*/
public enum TopologyLevel {
NODE,
MACHINE,
RACK,
SITE,
}
| 291
| 17.25
| 99
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/impl/DistributionManagerImpl.java
|
package org.infinispan.distribution.impl;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHash;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.jmx.annotations.Parameter;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.LocalModeAddress;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* The default distribution manager implementation
*
* @author Manik Surtani
* @author Vladimir Blagojevic
* @author Mircea.Markus@jboss.com
* @author Bela Ban
* @author Dan Berindei <dan@infinispan.org>
* @author anistor@redhat.com
* @since 4.0
*/
@MBean(objectName = "DistributionManager", description = "Component that handles distribution of content across a cluster")
@Scope(Scopes.NAMED_CACHE)
public class DistributionManagerImpl implements DistributionManager {
private static final Log log = LogFactory.getLog(DistributionManagerImpl.class);
@Inject Transport transport;
@Inject KeyPartitioner keyPartitioner;
@Inject Configuration configuration;
private CacheMode cacheMode;
private volatile LocalizedCacheTopology extendedTopology;
// Start before RpcManagerImpl
@Start(priority = 8)
@SuppressWarnings("unused")
void start() throws Exception {
if (log.isTraceEnabled()) log.tracef("starting distribution manager on %s", getAddress());
cacheMode = configuration.clustering().cacheMode();
// We need an extended topology for preload, before the start of StateTransferManagerImpl
Address localAddress = transport == null ? LocalModeAddress.INSTANCE : transport.getAddress();
extendedTopology = makeSingletonTopology(cacheMode, keyPartitioner, configuration.clustering().hash().numSegments(),
localAddress);
}
private Address getAddress() {
return transport.getAddress();
}
@Override
public ConsistentHash getReadConsistentHash() {
return extendedTopology.getReadConsistentHash();
}
@Override
public ConsistentHash getWriteConsistentHash() {
return extendedTopology.getWriteConsistentHash();
}
@Override
@ManagedOperation(
description = "Determines whether a given key is affected by an ongoing rehash, if any.",
displayName = "Could key be affected by rehash?"
)
public boolean isAffectedByRehash(@Parameter(name = "key", description = "Key to check") Object key) {
if (!isRehashInProgress())
return false;
int segment = keyPartitioner.getSegment(key);
DistributionInfo distributionInfo = this.extendedTopology.getDistribution(segment);
return distributionInfo.isWriteOwner() && !distributionInfo.isReadOwner();
}
/**
* Tests whether a rehash is in progress
*
* @return true if a rehash is in progress, false otherwise
*/
@Override
public boolean isRehashInProgress() {
return extendedTopology.getPendingCH() != null;
}
@Override
public boolean isJoinComplete() {
return extendedTopology.isConnected();
}
@ManagedOperation(
description = "Tells you whether a given key would be written to this instance of the cache according to the consistent hashing algorithm. " +
"Only works with String keys.",
displayName = "Is key local?"
)
public boolean isLocatedLocally(@Parameter(name = "key", description = "Key to query") String key) {
return getCacheTopology().isWriteOwner(key);
}
@ManagedOperation(
description = "Shows the addresses of the nodes where a write operation would store the entry associated with the specified key. Only " +
"works with String keys.",
displayName = "Locate key"
)
public List<String> locateKey(@Parameter(name = "key", description = "Key to locate") String key) {
List<Address> addresses = getCacheTopology().getDistribution(key).writeOwners();
return addresses.stream()
.map(Address::toString)
.collect(Collectors.toList());
}
@Override
public LocalizedCacheTopology getCacheTopology() {
return this.extendedTopology;
}
@Override
public void setCacheTopology(CacheTopology cacheTopology) {
if (log.isTraceEnabled()) log.tracef("Topology updated to %s", cacheTopology);
this.extendedTopology = createLocalizedCacheTopology(cacheTopology);
}
@Override
public LocalizedCacheTopology createLocalizedCacheTopology(CacheTopology cacheTopology) {
return new LocalizedCacheTopology(cacheMode, cacheTopology, keyPartitioner, transport.getAddress(), true);
}
public static LocalizedCacheTopology makeSingletonTopology(CacheMode cacheMode, KeyPartitioner keyPartitioner,
int numSegments, Address localAddress) {
List<Address> members = Collections.singletonList(localAddress);
int[] owners = new int[numSegments];
Arrays.fill(owners, 0);
ConsistentHash ch = new ReplicatedConsistentHash(members, null, Collections.emptyList(), owners);
CacheTopology cacheTopology = new CacheTopology(-1, -1, ch, null, CacheTopology.Phase.NO_REBALANCE, members, null);
return new LocalizedCacheTopology(cacheMode, cacheTopology, keyPartitioner, localAddress, false);
}
}
| 6,157
| 38.22293
| 151
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/impl/L1ManagerImpl.java
|
package org.infinispan.distribution.impl;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.remote.SingleRpcCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.distribution.L1Manager;
import org.infinispan.distribution.RemoteValueRetrievedListener;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.distribution.L1WriteSynchronizer;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
@Scope(Scopes.NAMED_CACHE)
public class L1ManagerImpl implements L1Manager, RemoteValueRetrievedListener {
private static final Log log = LogFactory.getLog(L1ManagerImpl.class);
@Inject Configuration configuration;
@Inject RpcManager rpcManager;
@Inject CommandsFactory commandsFactory;
@Inject TimeService timeService;
@Inject @ComponentName(KnownComponentNames.EXPIRATION_SCHEDULED_EXECUTOR)
ScheduledExecutorService scheduledExecutor;
private int threshold;
private long l1Lifespan;
// TODO replace this with a custom, expirable collection
private final ConcurrentMap<Object, ConcurrentMap<Address, Long>> requestors;
private final ConcurrentMap<Object, L1WriteSynchronizer> synchronizers;
private ScheduledFuture<?> scheduledRequestorsCleanupTask;
public L1ManagerImpl() {
requestors = new ConcurrentHashMap<>();
synchronizers = new ConcurrentHashMap<>();
}
@Start (priority = 3)
public void start() {
this.threshold = configuration.clustering().l1().invalidationThreshold();
this.l1Lifespan = configuration.clustering().l1().lifespan();
if (configuration.clustering().l1().cleanupTaskFrequency() > 0) {
scheduledRequestorsCleanupTask =
scheduledExecutor.scheduleAtFixedRate(this::cleanUpRequestors,
configuration.clustering().l1().cleanupTaskFrequency(),
configuration.clustering().l1().cleanupTaskFrequency(),
TimeUnit.MILLISECONDS);
} else {
log.warnL1NotHavingReaperThread();
}
}
@Stop (priority = 3)
public void stop() {
if (scheduledRequestorsCleanupTask != null) scheduledRequestorsCleanupTask.cancel(true);
}
private void cleanUpRequestors() {
long expiryTime = timeService.wallClockTime() - l1Lifespan;
for (Map.Entry<Object, ConcurrentMap<Address, Long>> entry: requestors.entrySet()) {
Object key = entry.getKey();
ConcurrentMap<Address, Long> reqs = entry.getValue();
prune(reqs, expiryTime);
if (reqs.isEmpty()) requestors.remove(key);
}
}
private void prune(ConcurrentMap<Address, Long> reqs, long expiryTime) {
for (Map.Entry<Address, Long> req: reqs.entrySet()) {
if (req.getValue() < expiryTime) reqs.remove(req.getKey());
}
}
@Override
public void addRequestor(Object key, Address origin) {
//we do a plain get first as that's likely to be enough
ConcurrentMap<Address, Long> as = requestors.get(key);
log.tracef("Registering requestor %s for key '%s'", origin, key);
long now = timeService.wallClockTime();
if (as == null) {
// only if needed we create a new HashSet, but make sure we don't replace another one being created
as = new ConcurrentHashMap<>();
as.put(origin, now);
ConcurrentMap<Address, Long> previousAs = requestors.putIfAbsent(key, as);
if (previousAs != null) {
//another thread added it already, so use his copy and discard our proposed instance
previousAs.put(origin, now);
}
} else {
as.put(origin, now);
}
}
@Override
public CompletableFuture<?> flushCache(Collection<Object> keys, Address origin, boolean assumeOriginKeptEntryInL1) {
final Collection<Address> invalidationAddresses = buildInvalidationAddressList(keys, origin, assumeOriginKeptEntryInL1);
int nodes = invalidationAddresses.size();
if (nodes > 0) {
InvalidateCommand ic = commandsFactory.buildInvalidateFromL1Command(origin, EnumUtil.EMPTY_BIT_SET, keys);
final SingleRpcCommand rpcCommand = commandsFactory.buildSingleRpcCommand(ic);
// No need to invalidate at all if there is no one to invalidate!
boolean multicast = isUseMulticast(nodes);
if (log.isTraceEnabled()) log.tracef("Invalidating keys %s on nodes %s. Use multicast? %s", keys, invalidationAddresses, multicast);
// L1 invalidations can ignore a member leaving while sending invalidation
MapResponseCollector collector = MapResponseCollector.ignoreLeavers();
CompletionStage<Map<Address, Response>> request;
if (multicast) {
request = rpcManager.invokeCommandOnAll(rpcCommand, collector, rpcManager.getSyncRpcOptions());
} else {
request = rpcManager.invokeCommand(invalidationAddresses, rpcCommand, collector,
rpcManager.getSyncRpcOptions());
}
return request.toCompletableFuture();
} else {
if (log.isTraceEnabled()) log.tracef("No L1 caches to invalidate for keys %s", keys);
return null;
}
}
private Collection<Address> buildInvalidationAddressList(Collection<Object> keys, Address origin, boolean assumeOriginKeptEntryInL1) {
Collection<Address> addresses = new HashSet<>(2);
boolean originIsInRequestorsList = false;
for (Object key : keys) {
ConcurrentMap<Address, Long> as = requestors.remove(key);
if (as != null) {
Set<Address> requestorAddresses = as.keySet();
addresses.addAll(requestorAddresses);
if (assumeOriginKeptEntryInL1 && origin != null && requestorAddresses.contains(origin)) {
originIsInRequestorsList = true;
// re-add the origin as a requestor since the key will still be in the origin's L1 cache
addRequestor(key, origin);
}
}
}
// Prevent a loop by not sending the invalidation message to the origin
if (originIsInRequestorsList) addresses.remove(origin);
return addresses;
}
private boolean isUseMulticast(int nodes) {
// User has requested unicast only
if (threshold == -1) return false;
// Underlying transport is not multicast capable
if (!rpcManager.getTransport().isMulticastCapable()) return false;
// User has requested multicast only
if (threshold == 0) return true;
// we decide:
return nodes > threshold;
}
@Override
public void registerL1WriteSynchronizer(Object key, L1WriteSynchronizer sync) {
if (synchronizers.put(key, sync) != null) {
if (log.isTraceEnabled()) {
log.tracef("Replaced existing L1 write synchronizer for key %s as there was a concurrent L1 attempt to " +
"update", key);
}
}
}
@Override
public void unregisterL1WriteSynchronizer(Object key, L1WriteSynchronizer sync) {
synchronizers.remove(key, sync);
}
@Override
public void remoteValueFound(InternalCacheEntry ice) {
L1WriteSynchronizer synchronizer = synchronizers.get(ice.getKey());
if (synchronizer != null) {
synchronizer.runL1UpdateIfPossible(ice);
}
}
@Override
public void remoteValueNotFound(Object key) {
L1WriteSynchronizer synchronizer = synchronizers.get(key);
if (synchronizer != null) {
// we assume synchronizer supports null value properly
synchronizer.runL1UpdateIfPossible(null);
}
}
}
| 8,932
| 41.336493
| 141
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/package-info.java
|
/**
* Consistent Hash interfaces
* @api.public
*/
package org.infinispan.distribution.ch;
| 93
| 14.666667
| 39
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.