repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/main/java/org/infinispan/filter/KeyValueFilterConverter.java
|
package org.infinispan.filter;
import org.infinispan.metadata.Metadata;
/**
* This interface is an optimization that can be used when a filter and converter are most efficiently used as the same
* object composing the filtering and conversion in the same method invocation.
* @author wburns
* @since 7.0
*/
public interface KeyValueFilterConverter<K, V, C> extends KeyValueFilter<K, V>, Converter<K, V, C> {
/**
* Will both filter the entry and if passed subsequently convert the value to a new value. A returned value of null
* will symbolize the value not passing the filter, so ensure your conversion will not return null if you want this
* entry to be returned.
* @param key The key of the entry to filter
* @param value The value of the entry to filter and then convert
* @param metadata The metadata attached to the entry
* @return The converted value or null if the filter didn't pass
*/
public C filterAndConvert(K key, V value, Metadata metadata);
}
| 1,005
| 42.73913
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/filter/ParamKeyValueFilterConverterFactory.java
|
package org.infinispan.filter;
/**
* Factory for {@link org.infinispan.filter.KeyValueFilterConverter} instances supporting
* parameters.
*
* @author gustavonalle
* @since 8.1
*/
public interface ParamKeyValueFilterConverterFactory<K, V, C> extends KeyValueFilterConverterFactory<K, V, C> {
/**
* Create an instance of {@link KeyValueFilterConverter}
* @param params Supplied params
* @return KeyValueFilterConverter
*/
KeyValueFilterConverter<K, V, C> getFilterConverter(Object[] params);
/**
* @return true if parameters should be passed in binary format to the filter.
*/
default boolean binaryParam() {
return false;
}
@Override
default KeyValueFilterConverter<K, V, C> getFilterConverter() {
return getFilterConverter(null);
}
}
| 804
| 24.967742
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/EntryMergePolicyFactoryRegistry.java
|
package org.infinispan.conflict;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.infinispan.configuration.cache.PartitionHandlingConfiguration;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A registry for {@link EntryMergePolicyFactory} implementations, which allows {@link EntryMergePolicy} implementations
* to be eagerly/lazily loaded across multiple contexts. The order in which {@link EntryMergePolicyFactory}
* implementations are added to the registry determines their priority, with {@link EntryMergePolicyFactoryRegistry#createInstance(PartitionHandlingConfiguration)}
* returning as soon as the first non-null implementation is encountered.
*
* In embedded mode we only expect a single factory implementation to be present as custom policy implementations are
* provided during runtime by the user or loaded via the {@link org.infinispan.configuration.parsing.Parser}'s
* {@link org.infinispan.configuration.cache.ConfigurationBuilder}'s classloader. However, in server mode it's possible
* for users to deploy their custom policies to the server or use one of the default policies, therefore it's necessary
* for both the embedded factory and a server factory to be utilised.
*/
@Scope(Scopes.GLOBAL)
public class EntryMergePolicyFactoryRegistry {
private static final Log log = LogFactory.getLog(EntryMergePolicyFactoryRegistry.class);
private final List<EntryMergePolicyFactory> factories = Collections.synchronizedList(new ArrayList<EntryMergePolicyFactory>());
public EntryMergePolicyFactoryRegistry() {
// Create the factory for local embedded classes
factories.add(new EntryMergePolicyFactory() {
@Override
public <T> T createInstance(PartitionHandlingConfiguration config) {
return (T) config.mergePolicy();
}
});
}
public EntryMergePolicy createInstance(PartitionHandlingConfiguration config) {
for (EntryMergePolicyFactory factory : factories) {
Object instance = factory.createInstance(config);
if(instance != null)
return (EntryMergePolicy) instance;
}
return null;
}
public void addMergePolicyFactory(EntryMergePolicyFactory factory) {
if(factory == null)
throw CONTAINER.unableToAddNullEntryMergePolicyFactory();
factories.add(0, factory);
}
}
| 2,580
| 42.016667
| 163
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/package-info.java
|
/**
* This package contains the APIs that enable users to search for, and amend, data inconsistencies within their cache.
*
* @api.public
*/
package org.infinispan.conflict;
| 178
| 24.571429
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/MergePolicy.java
|
package org.infinispan.conflict;
import java.util.List;
import org.infinispan.container.entries.CacheEntry;
public enum MergePolicy implements EntryMergePolicy {
CUSTOM(),
NONE(),
PREFERRED_ALWAYS((preferredEntry, otherEntries) -> preferredEntry),
PREFERRED_NON_NULL((preferredEntry, otherEntries) -> {
if (preferredEntry != null || otherEntries.isEmpty()) return preferredEntry;
return (CacheEntry) otherEntries.get(0);
}),
REMOVE_ALL((preferredEntry, otherEntries) -> null);
private final EntryMergePolicy impl;
MergePolicy() {
this(new UnsupportedMergePolicy());
}
MergePolicy(EntryMergePolicy policy) {
this.impl = policy;
}
@Override
public CacheEntry merge(CacheEntry preferredEntry, List otherEntries) {
return impl.merge(preferredEntry, otherEntries);
}
public static MergePolicy fromString(String str) {
for (MergePolicy mp : MergePolicy.values())
if (mp.name().equalsIgnoreCase(str))
return mp;
return CUSTOM;
}
public static MergePolicy fromConfiguration(EntryMergePolicy policy) {
if (policy == null) return NONE;
for (MergePolicy mp : MergePolicy.values())
if (mp == policy)
return mp;
return CUSTOM;
}
public static class UnsupportedMergePolicy implements EntryMergePolicy {
@Override
public CacheEntry merge(CacheEntry preferredEntry, List otherEntries) {
throw new UnsupportedOperationException();
}
}
}
| 1,540
| 26.035088
| 88
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/ConflictManagerFactory.java
|
package org.infinispan.conflict;
import org.infinispan.AdvancedCache;
import org.infinispan.conflict.impl.InternalConflictManager;
import org.infinispan.security.AuthorizationManager;
import org.infinispan.security.AuthorizationPermission;
/**
* A {@link ConflictManager} factory for cache instances.
*
* @author Ryan Emerson
* @since 9.1
*/
final public class ConflictManagerFactory {
@SuppressWarnings("unchecked")
public static <K,V> ConflictManager<K,V> get(AdvancedCache<K, V> cache) {
AuthorizationManager authzManager = cache.getAuthorizationManager();
if (authzManager != null) {
authzManager.checkPermission(AuthorizationPermission.ALL_READ);
authzManager.checkPermission(AuthorizationPermission.ALL_WRITE);
}
return cache
.getComponentRegistry()
.getComponent(InternalConflictManager.class);
}
}
| 889
| 30.785714
| 76
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/EntryMergePolicy.java
|
package org.infinispan.conflict;
import java.util.List;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.partitionhandling.PartitionHandling;
/**
* @author Ryan Emerson
* @since 9.1
*/
public interface EntryMergePolicy<K, V> {
/**
* This method is called by {@link ConflictManager#resolveConflicts()} for each conflict discovered to determine
* which {@link CacheEntry} should be utilised. This merge policy is used when a user explicitly calls {@link ConflictManager#resolveConflicts()}
* as well as when a partition merge occurs with {@link PartitionHandling#ALLOW_READ_WRITES} set.
*
* In the event of a partition merge, we define the preferred partition as the partition whom's coordinator is coordinating
* the current merge.
*
* @param preferredEntry During a partition merge, the preferredEntry is the primary replica of a CacheEntry stored
* in the partition that contains the most nodes or if partitions are equal the one with the
* largest topologyId. In the event of overlapping partitions, i.e. a node A is present in the
* topology of both partitions {A}, {A,B,C}, we pick {A} as the preferred partition as it will
* have the higher topologId because the other partition's topology is behind.
*
* During a non-merge call to {@link ConflictManager#resolveConflicts()}, the preferredEntry is
* simply the primary owner of an entry
*
* @param otherEntries a {@link List} of all other {@link CacheEntry} associated with a given Key.
* @return the winning {@link CacheEntry} to be utilised across the cluster, or null if all entries for a key should be
* removed.
*/
CacheEntry<K, V> merge(CacheEntry<K, V> preferredEntry, List<CacheEntry<K, V>> otherEntries);
}
| 1,921
| 50.945946
| 148
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/ConflictManager.java
|
package org.infinispan.conflict;
import java.util.Map;
import java.util.stream.Stream;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.transport.Address;
/**
* @author Ryan Emerson
* @since 9.1
*/
@Scope(Scopes.NAMED_CACHE)
public interface ConflictManager<K, V> {
/**
* Get all CacheEntry's that exists for a given key. Note, concurrent calls to this method for the same key will utilise
* the same CompletableFuture inside this method and consequently return the same results as all other invocations.
* If this method is invoked during state transfer it will block until rehashing has completed. Similarly, if
* state transfer is initiated during an invocation of this method and rehashing affects the segments of the provided
* key, the initial requests for the entries versions are cancelled and re-attempted once state transfer has completed.
*
* This method utilises the addresses of the local {@link DistributionInfo#writeOwners()} to request values for a given key.
* If a value does not exist for a key at one of the addresses, then a null valued is mapped to said address.
*
* @param key the key for which associated entries are to be returned
* @return a map of an address and it's associated CacheEntry
* @throws org.infinispan.commons.CacheException if one or more versions of a key cannot be retrieved.
*/
Map<Address, InternalCacheValue<V>> getAllVersions(K key);
/**
* Returns a stream of conflicts detected in the cluster. This is a lazily-loaded stream which searches for conflicts
* by sequentially fetching cache segments from their respective owner nodes. If a rebalance is initiated whilst the
* stream is fetching a cache segment, then a CacheException is thrown when executing the stream.
*
* @return a stream of Map<Address, CacheEntry> for all conflicts detected throughout this cache.
* @throws IllegalStateException if called whilst a previous conflicts stream is still executing or state transfer is in progress.
*/
Stream<Map<Address, CacheEntry<K, V>>> getConflicts();
/**
* Utilises {@link ConflictManager#getConflicts()} to discover conflicts between Key replicas and utilises the configured
* {@link EntryMergePolicy} to determine which entry should take precedence. The
* resulting {@link org.infinispan.container.entries.CacheEntry} is then applied on all replicas in the cluster.
*/
void resolveConflicts();
/**
* Utilises {@link ConflictManager#getConflicts()} to discover conflicts between Key replicas and utilises the provided
* {@link EntryMergePolicy} to determine which entry should take precedence. The
* resulting {@link org.infinispan.container.entries.CacheEntry} is then applied on all replicas in the cluster.
* @param mergePolicy the policy to be applied to all detected conflicts
*/
void resolveConflicts(EntryMergePolicy<K, V> mergePolicy);
/**
* @return true if a state transfer is currently in progress.
*/
boolean isStateTransferInProgress();
/**
* @return true if conflict resolution is in progress. This can happen if the user has multiple threads interacting
* with the ConflictManager or if a Split-brain merge is in progress.
*/
boolean isConflictResolutionInProgress();
}
| 3,568
| 48.569444
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/EntryMergePolicyFactory.java
|
package org.infinispan.conflict;
import org.infinispan.configuration.cache.PartitionHandlingConfiguration;
public interface EntryMergePolicyFactory {
<T> T createInstance(PartitionHandlingConfiguration config);
}
| 218
| 26.375
| 73
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/impl/package-info.java
|
/**
* This package contains implementations of the conflict resolution API.
*
* @api.private
*/
package org.infinispan.conflict.impl;
| 138
| 18.857143
| 72
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/impl/DefaultConflictManager.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.util.logging.Log.CLUSTER;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;
import java.util.Spliterators;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Phaser;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import jakarta.transaction.TransactionManager;
import org.infinispan.cache.impl.InvocationHelper;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.PartitionHandlingConfiguration;
import org.infinispan.conflict.EntryMergePolicy;
import org.infinispan.conflict.EntryMergePolicyFactoryRegistry;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.entries.NullCacheEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.UnsureResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.statetransfer.StateConsumer;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
@Scope(Scopes.NAMED_CACHE)
public class DefaultConflictManager<K, V> implements InternalConflictManager<K, V> {
private static Log log = LogFactory.getLog(DefaultConflictManager.class);
private static final long localFlags = FlagBitSets.CACHE_MODE_LOCAL| FlagBitSets.SKIP_OWNERSHIP_CHECK| FlagBitSets.SKIP_LOCKING;
private static final long userMergeFlags = FlagBitSets.IGNORE_RETURN_VALUES;
private static final long autoMergeFlags = FlagBitSets.IGNORE_RETURN_VALUES | FlagBitSets.PUT_FOR_STATE_TRANSFER | FlagBitSets.SKIP_REMOTE_LOOKUP;
@ComponentName(KnownComponentNames.CACHE_NAME)
@Inject String cacheName;
@Inject ComponentRef<AsyncInterceptorChain> interceptorChain;
@Inject InvocationHelper invocationHelper;
@Inject Configuration cacheConfiguration;
@Inject CommandsFactory commandsFactory;
@Inject DistributionManager distributionManager;
@Inject InvocationContextFactory invocationContextFactory;
@Inject RpcManager rpcManager;
@Inject ComponentRef<StateConsumer> stateConsumer;
@Inject StateReceiver<K, V> stateReceiver;
@Inject EntryMergePolicyFactoryRegistry mergePolicyRegistry;
@Inject TimeService timeService;
@Inject BlockingManager blockingManager;
@Inject InternalEntryFactory internalEntryFactory;
@Inject TransactionManager transactionManager;
@Inject KeyPartitioner keyPartitioner;
private Address localAddress;
private long conflictTimeout;
private EntryMergePolicy<K, V> entryMergePolicy;
private BlockingManager.BlockingExecutor resolutionExecutor;
private final AtomicBoolean streamInProgress = new AtomicBoolean();
private final Map<K, VersionRequest> versionRequestMap = new HashMap<>();
private final Queue<VersionRequest> retryQueue = new ConcurrentLinkedQueue<>();
private volatile boolean running = false;
private volatile ReplicaSpliterator conflictSpliterator;
private volatile CompletableFuture<Void> conflictFuture;
@Start
public void start() {
this.localAddress = rpcManager.getAddress();
PartitionHandlingConfiguration config = cacheConfiguration.clustering().partitionHandling();
this.entryMergePolicy = mergePolicyRegistry.createInstance(config);
// TODO make this an explicit configuration param in PartitionHandlingConfiguration
this.conflictTimeout = cacheConfiguration.clustering().stateTransfer().timeout();
// Limit the number of concurrent tasks to ensure that internal CR operations can never overlap
this.resolutionExecutor = blockingManager.limitedBlockingExecutor("ConflictManager-" + cacheName, 1);
this.running = true;
if (log.isTraceEnabled()) log.tracef("Cache %s starting %s. isRunning=%s", cacheName, getClass().getSimpleName(), !running);
}
@Stop(priority = 0)
public void stop() {
this.running = false;
synchronized (versionRequestMap) {
if (log.isTraceEnabled()) log.tracef("Cache %s stopping %s. isRunning=%s", getClass().getSimpleName(), cacheName, running);
cancelVersionRequests();
versionRequestMap.clear();
}
if (isConflictResolutionInProgress() && conflictSpliterator != null)
conflictSpliterator.stop();
}
@Override
public StateReceiver getStateReceiver() {
return stateReceiver;
}
@Override
public void cancelVersionRequests() {
if (!running)
return;
synchronized (versionRequestMap) {
versionRequestMap.values().forEach(VersionRequest::cancelRequestIfOutdated);
}
}
@Override
public void restartVersionRequests() {
if (!running)
return;
VersionRequest request;
while ((request = retryQueue.poll()) != null) {
if (log.isTraceEnabled()) log.tracef("Retrying %s", request);
request.start();
}
}
@Override
public Map<Address, InternalCacheValue<V>> getAllVersions(final K key) {
checkIsRunning();
final VersionRequest request;
synchronized (versionRequestMap) {
request = versionRequestMap.computeIfAbsent(key, k -> new VersionRequest(k, stateConsumer.running().isStateTransferInProgress()));
}
try {
return request.completableFuture.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException(e);
} catch (ExecutionException e) {
if (e.getCause() instanceof CacheException)
throw (CacheException) e.getCause();
throw new CacheException(e.getCause());
} finally {
synchronized (versionRequestMap) {
versionRequestMap.remove(key);
}
}
}
@Override
public Stream<Map<Address, CacheEntry<K, V>>> getConflicts() {
checkIsRunning();
return getConflicts(distributionManager.getCacheTopology());
}
private Stream<Map<Address, CacheEntry<K, V>>> getConflicts(LocalizedCacheTopology topology) {
if (log.isTraceEnabled()) log.tracef("getConflicts isStateTransferInProgress=%s, topology=%s", stateConsumer.running().isStateTransferInProgress(), topology);
if (topology.getPhase() != CacheTopology.Phase.CONFLICT_RESOLUTION && stateConsumer.running().isStateTransferInProgress()) {
throw CLUSTER.getConflictsStateTransferInProgress(cacheName);
}
if (!streamInProgress.compareAndSet(false, true))
throw CLUSTER.getConflictsAlreadyInProgress();
conflictSpliterator = new ReplicaSpliterator(topology);
if (!running) {
conflictSpliterator.stop();
return Stream.empty();
}
return StreamSupport
.stream(new ReplicaSpliterator(topology), false)
.filter(filterConsistentEntries());
}
@Override
public boolean isConflictResolutionInProgress() {
return streamInProgress.get();
}
@Override
public void resolveConflicts() {
if (entryMergePolicy == null)
throw new CacheException("Cannot resolve conflicts as no EntryMergePolicy has been configured");
resolveConflicts(entryMergePolicy);
}
@Override
public void resolveConflicts(EntryMergePolicy<K, V> mergePolicy) {
checkIsRunning();
doResolveConflicts(distributionManager.getCacheTopology(), mergePolicy, null);
}
@Override
public CompletionStage<Void> resolveConflicts(CacheTopology topology, Set<Address> preferredNodes) {
if (!running)
return CompletableFuture.completedFuture(null);
LocalizedCacheTopology localizedTopology;
if (topology instanceof LocalizedCacheTopology) {
localizedTopology = (LocalizedCacheTopology) topology;
} else {
localizedTopology = distributionManager.createLocalizedCacheTopology(topology);
}
conflictFuture = resolutionExecutor.execute(() -> doResolveConflicts(localizedTopology, entryMergePolicy, preferredNodes),
localizedTopology.getTopologyId())
.toCompletableFuture();
return conflictFuture.whenComplete((Void, t) -> {
if (t != null) {
if (conflictSpliterator != null) {
conflictSpliterator.stop();
conflictSpliterator = null;
}
}
});
}
@Override
public void cancelConflictResolution() {
if (conflictFuture != null && !conflictFuture.isDone()) {
if (log.isTraceEnabled()) log.tracef("Cache %s cancelling conflict resolution future", cacheName);
conflictFuture.cancel(true);
}
}
private void doResolveConflicts(final LocalizedCacheTopology topology, final EntryMergePolicy<K, V> mergePolicy,
final Set<Address> preferredNodes) {
boolean userCall = preferredNodes == null;
final Set<Address> preferredPartition = userCall ? new HashSet<>(topology.getCurrentCH().getMembers()) : preferredNodes;
if (log.isTraceEnabled())
log.tracef("Cache %s attempting to resolve conflicts. All Members %s, Installed topology %s, Preferred Partition %s",
cacheName, topology.getMembers(), topology, preferredPartition);
final Phaser phaser = new Phaser(1);
getConflicts(topology).forEach(conflictMap -> {
phaser.register();
if (log.isTraceEnabled()) log.tracef("Cache %s conflict detected %s", cacheName, conflictMap);
Collection<CacheEntry<K, V>> entries = conflictMap.values();
Optional<K> optionalEntry = entries.stream()
.filter(entry -> !(entry instanceof NullCacheEntry))
.map(CacheEntry::getKey)
.findAny();
final K key = optionalEntry.orElseThrow(() -> new CacheException("All returned conflicts are NullCacheEntries. This should not happen!"));
Address primaryReplica = topology.getDistribution(key).primary();
List<Address> preferredEntries = conflictMap.entrySet().stream()
.map(Map.Entry::getKey)
.filter(preferredPartition::contains)
.collect(Collectors.toList());
// If only one entry exists in the preferred partition, then use that entry
CacheEntry<K, V> preferredEntry;
if (preferredEntries.size() == 1) {
preferredEntry = conflictMap.remove(preferredEntries.get(0));
} else {
// If multiple conflicts exist in the preferred partition, then use primary replica from the preferred partition
// If not a merge, then also use primary as preferred entry
// Preferred is null if no entry exists in preferred partition
preferredEntry = conflictMap.remove(primaryReplica);
}
if (log.isTraceEnabled()) log.tracef("Cache %s applying EntryMergePolicy %s to PreferredEntry %s, otherEntries %s",
cacheName, mergePolicy.getClass().getName(), preferredEntry, entries);
CacheEntry<K, V> entry = preferredEntry instanceof NullCacheEntry ? null : preferredEntry;
List<CacheEntry<K, V>> otherEntries = entries.stream().filter(e -> !(e instanceof NullCacheEntry)).collect(Collectors.toList());
CacheEntry<K, V> mergedEntry = mergePolicy.merge(entry, otherEntries);
CompletableFuture<V> future;
future = applyMergeResult(userCall, key, mergedEntry);
future.whenComplete((responseMap, exception) -> {
if (log.isTraceEnabled()) log.tracef("Cache %s resolveConflicts future complete for key %s: ResponseMap=%s",
cacheName, key, responseMap);
phaser.arriveAndDeregister();
if (exception != null)
log.exceptionDuringConflictResolution(key, exception);
});
});
phaser.arriveAndAwaitAdvance();
if (log.isTraceEnabled()) log.tracef("Cache %s finished resolving conflicts for topologyId=%s", cacheName, topology.getTopologyId());
}
private CompletableFuture<V> applyMergeResult(boolean userCall, K key, CacheEntry<K, V> mergedEntry) {
long flags = userCall ? userMergeFlags : autoMergeFlags;
VisitableCommand command;
if (mergedEntry == null) {
if (log.isTraceEnabled()) log.tracef("Cache %s executing remove on conflict: key %s", cacheName, key);
command = commandsFactory.buildRemoveCommand(key, null, keyPartitioner.getSegment(key), flags);
} else {
if (log.isTraceEnabled()) log.tracef("Cache %s executing update on conflict: key %s with value %s", cacheName, key, mergedEntry
.getValue());
command = commandsFactory.buildPutKeyValueCommand(key, mergedEntry.getValue(), keyPartitioner.getSegment(key),
mergedEntry.getMetadata(), flags);
}
try {
assert transactionManager == null || transactionManager.getTransaction() == null : "Transaction active on conflict resolution thread";
InvocationContext ctx = invocationHelper.createInvocationContextWithImplicitTransaction(1, true);
return invocationHelper.invokeAsync(ctx, command);
} catch (Exception e) {
return CompletableFuture.failedFuture(e);
}
}
@Override
public boolean isStateTransferInProgress() {
return stateConsumer.running().isStateTransferInProgress();
}
private void checkIsRunning() {
if (!running)
throw new CacheException(String.format("Cache %s unable to process request as the ConflictManager has been stopped", cacheName));
}
private class VersionRequest {
final K key;
final boolean postpone;
final CompletableFuture<Map<Address, InternalCacheValue<V>>> completableFuture = new CompletableFuture<>();
volatile CompletableFuture<Map<Address, Response>> rpcFuture;
volatile Collection<Address> keyOwners;
VersionRequest(K key, boolean postpone) {
this.key = key;
this.postpone = postpone;
if (log.isTraceEnabled()) log.tracef("Cache %s creating %s", cacheName,this);
if (postpone) {
retryQueue.add(this);
} else {
start();
}
}
void cancelRequestIfOutdated() {
Collection<Address> latestOwners = distributionManager.getCacheTopology().getWriteOwners(key);
if (rpcFuture != null && !completableFuture.isDone() && !keyOwners.equals(latestOwners)) {
rpcFuture = null;
keyOwners.clear();
if (rpcFuture.cancel(false)) {
retryQueue.add(this);
if (log.isTraceEnabled()) log.tracef("Cancelling %s for nodes %s. New write owners %s", this, keyOwners, latestOwners);
}
}
}
void start() {
LocalizedCacheTopology topology = distributionManager.getCacheTopology();
DistributionInfo info = topology.getDistribution(key);
keyOwners = info.writeOwners();
if (log.isTraceEnabled()) log.tracef("Attempting %s from owners %s", this, keyOwners);
final Map<Address, InternalCacheValue<V>> versionsMap = new HashMap<>();
if (keyOwners.contains(localAddress)) {
GetCacheEntryCommand cmd = commandsFactory.buildGetCacheEntryCommand(key, info.segmentId(), localFlags);
InvocationContext ctx = invocationContextFactory.createNonTxInvocationContext();
CacheEntry<K, V> entry = (CacheEntry<K, V>) interceptorChain.running().invoke(ctx, cmd);
InternalCacheValue<V> icv = entry != null ? internalEntryFactory.createValue(entry) : null;
synchronized (versionsMap) {
versionsMap.put(localAddress, icv);
}
}
ClusteredGetCommand cmd = commandsFactory.buildClusteredGetCommand(key, info.segmentId(), FlagBitSets.SKIP_OWNERSHIP_CHECK);
cmd.setTopologyId(topology.getTopologyId());
MapResponseCollector collector = MapResponseCollector.ignoreLeavers(keyOwners.size());
rpcFuture = rpcManager.invokeCommand(keyOwners, cmd, collector, rpcManager.getSyncRpcOptions()).toCompletableFuture();
rpcFuture.whenComplete((responseMap, exception) -> {
if (log.isTraceEnabled()) log.tracef("%s received responseMap %s, exception %s", this, responseMap, exception);
if (exception != null) {
String msg = String.format("%s encountered when attempting '%s' on cache '%s'", exception.getCause(), this, cacheName);
completableFuture.completeExceptionally(new CacheException(msg, exception.getCause()));
return;
}
for (Map.Entry<Address, Response> entry : responseMap.entrySet()) {
if (log.isTraceEnabled()) log.tracef("%s received response %s from %s", this, entry.getValue(), entry.getKey());
Response rsp = entry.getValue();
if (rsp instanceof SuccessfulResponse) {
SuccessfulResponse response = (SuccessfulResponse) rsp;
Object rspVal = response.getResponseValue();
synchronized (versionsMap) {
versionsMap.put(entry.getKey(), (InternalCacheValue<V>) rspVal);
}
} else if(rsp instanceof UnsureResponse) {
log.debugf("Received UnsureResponse, restarting request %s", this);
this.start();
return;
} else if (rsp instanceof CacheNotFoundResponse) {
if (log.isTraceEnabled()) log.tracef("Ignoring CacheNotFoundResponse: %s", rsp);
} else {
completableFuture.completeExceptionally(new CacheException(String.format("Unable to retrieve key %s from %s: %s", key, entry.getKey(), entry.getValue())));
return;
}
}
completableFuture.complete(versionsMap);
});
}
@Override
public String toString() {
return "VersionRequest{" +
"key=" + key +
", postpone=" + postpone +
'}';
}
}
private Predicate<? super Map<Address, CacheEntry<K, V>>> filterConsistentEntries() {
return map -> map.values().stream().distinct().limit(2).count() > 1 || map.values().isEmpty();
}
private class ReplicaSpliterator extends Spliterators.AbstractSpliterator<Map<Address, CacheEntry<K, V>>> {
private final LocalizedCacheTopology topology;
private final int totalSegments;
private final long endTime;
private int nextSegment = 0;
private Iterator<Map<Address, CacheEntry<K, V>>> iterator = Collections.emptyIterator();
private volatile CompletableFuture<List<Map<Address, CacheEntry<K, V>>>> segmentRequestFuture;
ReplicaSpliterator(LocalizedCacheTopology topology) {
super(Long.MAX_VALUE, DISTINCT | NONNULL);
this.topology = topology;
this.totalSegments = topology.getWriteConsistentHash().getNumSegments();
this.endTime = timeService.expectedEndTime(conflictTimeout, TimeUnit.MILLISECONDS);
}
@Override
public boolean tryAdvance(Consumer<? super Map<Address, CacheEntry<K, V>>> action) {
while (!iterator.hasNext()) {
if (nextSegment < totalSegments) {
try {
if (log.isTraceEnabled())
log.tracef("Cache %s attempting to receive all replicas for segment %s with topology %s", cacheName, nextSegment, topology);
long remainingTime = timeService.remainingTime(endTime, TimeUnit.MILLISECONDS);
segmentRequestFuture = stateReceiver.getAllReplicasForSegment(nextSegment, topology, remainingTime);
List<Map<Address, CacheEntry<K, V>>> segmentEntries = segmentRequestFuture.get(remainingTime, TimeUnit.MILLISECONDS);
if (log.isTraceEnabled())
log.tracef("Cache %s segment %s entries received: %s", cacheName, nextSegment, segmentEntries);
nextSegment++;
iterator = segmentEntries.iterator();
} catch (Exception e) {
if (log.isTraceEnabled()) log.tracef("Cache %s replicaSpliterator caught %s", cacheName, e);
stopStream();
return handleException(e);
}
} else {
streamInProgress.compareAndSet(true, false);
return false;
}
}
action.accept(iterator.next());
return true;
}
void stop() {
if (log.isTraceEnabled()) log.tracef("Cache %s stop() called on ReplicaSpliterator. Current segment %s", cacheName, nextSegment);
if (segmentRequestFuture != null && !segmentRequestFuture.isDone())
segmentRequestFuture.cancel(true);
streamInProgress.set(false);
}
void stopStream() {
stateReceiver.cancelRequests();
streamInProgress.set(false);
}
private boolean handleException(Throwable t) {
Throwable cause = t.getCause();
if (t instanceof CancellationException || cause instanceof CancellationException) {
return false;
}
if (t instanceof InterruptedException) {
Thread.currentThread().interrupt();
throw new CacheException(t);
}
throw new CacheException(t.getMessage(), cause != null ? cause : t);
}
}
}
| 23,821
| 43.033272
| 173
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/impl/StateReceiverImpl.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.factories.KnownComponentNames.CACHE_NAME;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.util.IntSets;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.NullCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.executors.LimitedExecutor;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.annotation.DataRehashed;
import org.infinispan.notifications.cachelistener.event.DataRehashedEvent;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.InboundTransferTask;
import org.infinispan.statetransfer.StateChunk;
import org.infinispan.topology.CacheTopology;
/**
* @author Ryan Emerson
* @since 9.1
*/
@Listener
@Scope(Scopes.NAMED_CACHE)
public class StateReceiverImpl<K, V> implements StateReceiver<K, V> {
private static final Log log = LogFactory.getLog(StateReceiverImpl.class);
@ComponentName(CACHE_NAME)
@Inject String cacheName;
@Inject CacheNotifier<K, V> cacheNotifier;
@Inject CommandsFactory commandsFactory;
@Inject InternalDataContainer<K, V> dataContainer;
@Inject RpcManager rpcManager;
@Inject @ComponentName(NON_BLOCKING_EXECUTOR)
ExecutorService nonBlockingExecutor;
private LimitedExecutor stateReceiverExecutor;
private final ConcurrentHashMap<Integer, SegmentRequest> requestMap = new ConcurrentHashMap<>();
@Start
public void start() {
cacheNotifier.addListener(this);
stateReceiverExecutor = new LimitedExecutor("StateReceiver-" + cacheName, nonBlockingExecutor, 1);
}
@Stop
public void stop() {
cancelRequests();
stateReceiverExecutor.shutdownNow();
}
@Override
public void cancelRequests() {
if (log.isTraceEnabled()) log.tracef("Cache %s stop() called on StateReceiverImpl", cacheName);
for (SegmentRequest request : requestMap.values()) {
request.cancel(null);
}
}
@DataRehashed
@SuppressWarnings("WeakerAccess")
public void onDataRehash(DataRehashedEvent<K, V> dataRehashedEvent) {
if (dataRehashedEvent.isPre()) {
if (log.isTraceEnabled()) log.tracef("Cache %s received event: %s", cacheName, dataRehashedEvent);
for (SegmentRequest request : requestMap.values())
request.cancel(new CacheException("Cancelling replica request as the owners of the requested " +
"segment have changed."));
}
}
@Override
public CompletableFuture<List<Map<Address, CacheEntry<K, V>>>> getAllReplicasForSegment(int segmentId, LocalizedCacheTopology topology, long timeout) {
return requestMap.computeIfAbsent(segmentId, id -> new SegmentRequest(id, topology, timeout)).requestState();
}
@Override
public void receiveState(Address sender, int topologyId, Collection<StateChunk> stateChunks) {
if (stateChunks.isEmpty()) {
if (log.isTraceEnabled())
log.tracef("Cache %s ignoring received state from %s because stateChunks are empty", cacheName, sender);
return;
}
int segmentId = stateChunks.iterator().next().getSegmentId();
SegmentRequest request = requestMap.get(segmentId);
if (request == null) {
if (log.isTraceEnabled()) log.tracef("Cache %s ignoring received state because the associated request was completed or cancelled", cacheName);
return;
}
if (log.isTraceEnabled()) log.tracef("Cache %s received state for %s", cacheName, request);
request.receiveState(sender, topologyId, stateChunks);
}
Map<K, Map<Address, CacheEntry<K, V>>> getKeyReplicaMap(int segmentId) {
return requestMap.get(segmentId).keyReplicaMap;
}
Map<Address, InboundTransferTask> getTransferTaskMap(int segmentId) {
return requestMap.get(segmentId).transferTaskMap;
}
InboundTransferTask createTransferTask(int segmentId, Address source, CacheTopology topology, long transferTimeout) {
return new InboundTransferTask(IntSets.immutableSet(segmentId), source, topology.getTopologyId(),
rpcManager, commandsFactory, transferTimeout, cacheName, false);
}
class SegmentRequest {
final int segmentId;
final LocalizedCacheTopology topology;
final long timeout;
final List<Address> replicaHosts;
final Map<K, Map<Address, CacheEntry<K, V>>> keyReplicaMap = new HashMap<>();
final Map<Address, InboundTransferTask> transferTaskMap = new ConcurrentHashMap<>();
CompletableFuture<List<Map<Address, CacheEntry<K, V>>>> future;
SegmentRequest(int segmentId, LocalizedCacheTopology topology, long timeout) {
this.segmentId = segmentId;
this.topology = topology;
this.timeout = timeout;
this.replicaHosts = topology.getSegmentDistribution(segmentId).writeOwners();
}
synchronized CompletableFuture<List<Map<Address, CacheEntry<K, V>>>> requestState() {
if (future != null) {
assert future.isCompletedExceptionally();
if (log.isTraceEnabled()) log.tracef("Cache %s already cancelled replicas request for segment %s from %s with topology %s",
cacheName, segmentId, replicaHosts, topology);
return future;
}
if (log.isTraceEnabled()) log.tracef("Cache %s attempting to receive replicas for segment %s from %s with topologyId=%s, timeout=%d",
cacheName, segmentId, replicaHosts, topology.getTopologyId(), timeout);
future = new CompletableFuture<>();
future.whenComplete((v, t) -> {
if (t != null) {
if (log.isTraceEnabled()) log.tracef("Cache %s segment request(s) cancelled due to exception=%s", cacheName, t);
// If an exception has occurred, possibly a CancellationException, we must must cancel all ongoing transfers
cancel(t);
}
});
for (final Address replica : replicaHosts) {
if (replica.equals(rpcManager.getAddress())) {
dataContainer.forEach(entry -> {
int keySegment = topology.getDistribution(entry.getKey()).segmentId();
if (keySegment == segmentId) {
addKeyToReplicaMap(replica, entry);
}
});
// numOwner == 1, then we cannot rely on receiveState to complete the future
if (replicaHosts.size() == 1) {
completeRequest();
}
} else {
final InboundTransferTask transferTask = createTransferTask(segmentId, replica, topology, timeout);
transferTaskMap.put(replica, transferTask);
stateReceiverExecutor.execute(() -> {
// If the transferTaskMap does not contain an entry for this replica, then it must have been cancelled
// before this request was executed..
if (!transferTaskMap.containsKey(replica))
return;
transferTask.requestSegments().exceptionally(throwable -> {
if (log.isTraceEnabled()) log.tracef(throwable, "Cache %s exception when processing InboundTransferTask", cacheName);
cancel(throwable);
return null;
});
});
}
}
return future;
}
synchronized void clear() {
keyReplicaMap.clear();
transferTaskMap.clear();
requestMap.remove(segmentId);
}
synchronized void receiveState(Address sender, int topologyId, Collection<StateChunk> stateChunks) {
if (topologyId < topology.getTopologyId()) {
if (log.isTraceEnabled())
log.tracef("Cache %s discarding state response with old topology id %d, the smallest allowed topology id is %d",
topologyId, topology.getTopologyId(), cacheName);
return;
}
InboundTransferTask transferTask = transferTaskMap.get(sender);
if (transferTask == null) {
if (log.isTraceEnabled())
log.tracef("Cache %s state received for an unknown request. No record of a state request exists for node %s", cacheName, sender);
return;
}
if (log.isTraceEnabled()) log.tracef("Cache %s state chunks received from %s, with topologyId %s, statechunks %s", cacheName, sender, topologyId, stateChunks);
for (StateChunk chunk : stateChunks) {
boolean isLastChunk = chunk.isLastChunk();
chunk.getCacheEntries().forEach(ice -> addKeyToReplicaMap(sender, (CacheEntry<K, V>) ice));
transferTask.onStateReceived(chunk.getSegmentId(), isLastChunk);
if (isLastChunk) {
transferTaskMap.remove(sender);
if (transferTaskMap.isEmpty()) {
completeRequest();
}
}
}
}
synchronized void cancel(Throwable throwable) {
if (future.isDone())
return;
log.debugf(throwable, "Cache %s cancelling request for segment %s", cacheName, segmentId);
if (future == null) {
// requestState() has not run yet, so we create the future first
future = new CompletableFuture<>();
}
if (throwable != null) {
future.completeExceptionally(throwable);
} else {
future.cancel(true);
}
transferTaskMap.forEach((address, inboundTransferTask) -> inboundTransferTask.cancel());
clear();
}
synchronized void completeRequest() {
List<Map<Address, CacheEntry<K, V>>> retVal = new ArrayList<>(keyReplicaMap.values());
clear();
future.complete(Collections.unmodifiableList(retVal));
}
void addKeyToReplicaMap(Address address, CacheEntry<K, V> ice) {
// If a map doesn't already exist for a given key, then init a map that contains all hos with a NullValueEntry
// This is necessary to determine if a key is missing on a given host as it artificially introduces a conflict
keyReplicaMap.computeIfAbsent(ice.getKey(), k -> {
Map<Address, CacheEntry<K, V>> map = new HashMap<>();
replicaHosts.forEach(a -> map.put(a, NullCacheEntry.getInstance()));
return map;
}).put(address, ice);
}
@Override
public String toString() {
return "SegmentRequest{" +
"segmentId=" + segmentId +
", topology=" + topology.getTopologyId() +
", replicaHosts=" + replicaHosts +
", keyReplicaMap=" + keyReplicaMap +
", transferTaskMap=" + transferTaskMap +
", future=" + future +
'}';
}
}
}
| 11,982
| 40.898601
| 168
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/impl/StateReceiver.java
|
package org.infinispan.conflict.impl;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.StateChunk;
/**
* @author Ryan Emerson
* @since 9.1
*/
@Scope(Scopes.NAMED_CACHE)
public interface StateReceiver<K, V> {
/**
* Cancels all ongoing replica requests.
*/
void cancelRequests();
/**
* Return all replicas of a cache entry for a given segment. We require the ConsitentHash to be passed here, as it is
* necessary for the hash of the last stable topology to be utilised during an automatic merge, before a
* new merged topology is installed.
*
* @throws IllegalStateException if this method is invoked whilst a previous request for Replicas is still executing
*/
CompletableFuture<List<Map<Address, CacheEntry<K, V>>>> getAllReplicasForSegment(int segmentId, LocalizedCacheTopology topology, long timeout);
void receiveState(Address sender, int topologyId, Collection<StateChunk> stateChunks);
}
| 1,311
| 33.526316
| 146
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/conflict/impl/InternalConflictManager.java
|
package org.infinispan.conflict.impl;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import org.infinispan.conflict.ConflictManager;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.CacheTopology;
/**
* @author Ryan Emerson
* @since 9.1
*/
@Scope(Scopes.NAMED_CACHE)
public interface InternalConflictManager<K, V> extends ConflictManager<K, V> {
void cancelVersionRequests();
void restartVersionRequests();
void cancelConflictResolution();
CompletionStage<Void> resolveConflicts(CacheTopology cacheTopology, Set<Address> preferredNodes);
StateReceiver getStateReceiver();
}
| 740
| 29.875
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/package-info.java
|
/**
* General utilities that are not specific to Infinispan, including string parsing helpers, reflection tools and
* collections and containers designed to supplement the JDK-provided containers.
*/
package org.infinispan.util;
| 232
| 37.833333
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/DataContainerRemoveIterator.java
|
package org.infinispan.util;
import java.util.Iterator;
import org.infinispan.Cache;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
/**
* This is an iterator that will iterate upon the data container. A cache is also provided to be used
* when the remove method on the iterator is invoked. Note that this means it will take part of any
* ongoing transaction if there is one.
*/
public class DataContainerRemoveIterator<K, V> implements Iterator<CacheEntry<K, V>> {
private final Cache<K, V> cache;
private final Iterator<InternalCacheEntry<K, V>> dataContainerIterator;
private K previousKey;
public DataContainerRemoveIterator(Cache<K, V> cache) {
this(cache, cache.getAdvancedCache().getDataContainer());
}
public DataContainerRemoveIterator(Cache<K, V> cache, DataContainer<K, V> dataContainer) {
if (cache == null || dataContainer == null) {
throw new NullPointerException();
}
this.cache = cache;
this.dataContainerIterator = dataContainer.iterator();
}
@Override
public boolean hasNext() {
return dataContainerIterator.hasNext();
}
@Override
public CacheEntry<K, V> next() {
CacheEntry<K, V> entry = dataContainerIterator.next();
previousKey = entry.getKey();
return entry;
}
@Override
public void remove() {
if (previousKey == null) {
throw new IllegalStateException();
}
cache.remove(previousKey);
previousKey = null;
}
}
| 1,601
| 28.666667
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/AbstractDelegatingLongCacheStream.java
|
package org.infinispan.util;
import java.util.LongSummaryStatistics;
import java.util.OptionalDouble;
import java.util.OptionalLong;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.LongBinaryOperator;
import java.util.function.LongConsumer;
import java.util.function.LongFunction;
import java.util.function.LongPredicate;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
import java.util.function.LongUnaryOperator;
import java.util.function.ObjLongConsumer;
import java.util.function.Supplier;
import java.util.stream.LongStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
/**
* Abstract Delegating handler that passes LongStream operations off to the underlying LongCacheStream but delegates
* {@link org.infinispan.BaseCacheStream} operations to the provided {@link CacheStream}. This allows for intercepting
* methods defined on <b>BaseCacheStream</b>.
* <p>
* This class is package private as it should only be created by using a map operator from another
* AbstractDelegating*CacheStream instance. Note that {@link AbstractDelegatingCacheStream} is public as this is
* the defined approach to create such a delegated stream.
* @author wburns
* @since 9.2
*/
class AbstractDelegatingLongCacheStream implements LongCacheStream {
protected AbstractDelegatingCacheStream<?> delegateCacheStream;
protected LongCacheStream underlyingStream;
AbstractDelegatingLongCacheStream(AbstractDelegatingCacheStream<?> delegateCacheStream,
LongCacheStream underlyingStream) {
this.delegateCacheStream = delegateCacheStream;
this.underlyingStream = underlyingStream;
}
// These are methods that convert to a different AbstractDelegating*CacheStream
@Override
public IntCacheStream mapToInt(LongToIntFunction mapper) {
return underlyingStream.mapToInt(mapper);
}
@Override
public DoubleCacheStream mapToDouble(LongToDoubleFunction mapper) {
return new AbstractDelegatingDoubleCacheStream(delegateCacheStream, underlyingStream.mapToDouble(mapper));
}
@Override
public <U> CacheStream<U> mapToObj(LongFunction<? extends U> mapper) {
delegateCacheStream.underlyingStream = underlyingStream.mapToObj(mapper);
return (CacheStream<U>) delegateCacheStream;
}
@Override
public CacheStream<Long> boxed() {
delegateCacheStream.underlyingStream = underlyingStream.boxed();
return (CacheStream<Long>) delegateCacheStream;
}
@Override
public DoubleCacheStream asDoubleStream() {
return new AbstractDelegatingDoubleCacheStream(delegateCacheStream, underlyingStream.asDoubleStream());
}
// These are methods that should delegate to the original cache stream
@Override
public LongCacheStream sequentialDistribution() {
delegateCacheStream = delegateCacheStream.sequentialDistribution();
return this;
}
@Override
public LongCacheStream parallelDistribution() {
delegateCacheStream = delegateCacheStream.parallelDistribution();
return this;
}
@Override
public LongCacheStream filterKeySegments(Set<Integer> segments) {
delegateCacheStream = delegateCacheStream.filterKeySegments(segments);
return this;
}
@Override
public LongCacheStream filterKeySegments(IntSet segments) {
delegateCacheStream = delegateCacheStream.filterKeySegments(segments);
return this;
}
@Override
public LongCacheStream filterKeys(Set<?> keys) {
delegateCacheStream = delegateCacheStream.filterKeys(keys);
return this;
}
@Override
public LongCacheStream distributedBatchSize(int batchSize) {
delegateCacheStream = delegateCacheStream.distributedBatchSize(batchSize);
return this;
}
@Override
public LongCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
delegateCacheStream = delegateCacheStream.segmentCompletionListener(listener);
return this;
}
@Override
public LongCacheStream disableRehashAware() {
delegateCacheStream = delegateCacheStream.disableRehashAware();
return this;
}
@Override
public LongCacheStream timeout(long timeout, TimeUnit unit) {
delegateCacheStream = delegateCacheStream.timeout(timeout, unit);
return this;
}
// Actual LongStream operations
@Override
public LongCacheStream filter(LongPredicate predicate) {
underlyingStream = underlyingStream.filter(predicate);
return this;
}
@Override
public LongCacheStream map(LongUnaryOperator mapper) {
underlyingStream = underlyingStream.map(mapper);
return this;
}
@Override
public LongCacheStream flatMap(LongFunction<? extends LongStream> mapper) {
underlyingStream = underlyingStream.flatMap(mapper);
return this;
}
@Override
public LongCacheStream distinct() {
underlyingStream = underlyingStream.distinct();
return this;
}
@Override
public LongCacheStream sorted() {
underlyingStream = underlyingStream.sorted();
return this;
}
@Override
public LongCacheStream peek(LongConsumer action) {
underlyingStream = underlyingStream.peek(action);
return this;
}
@Override
public LongCacheStream limit(long maxSize) {
underlyingStream = underlyingStream.limit(maxSize);
return this;
}
@Override
public LongCacheStream skip(long n) {
underlyingStream = underlyingStream.skip(n);
return this;
}
@Override
public void forEach(LongConsumer action) {
underlyingStream.forEach(action);
}
@Override
public void forEachOrdered(LongConsumer action) {
underlyingStream.forEachOrdered(action);
}
@Override
public long[] toArray() {
return underlyingStream.toArray();
}
@Override
public long reduce(long identity, LongBinaryOperator op) {
return underlyingStream.reduce(identity, op);
}
@Override
public OptionalLong reduce(LongBinaryOperator op) {
return underlyingStream.reduce(op);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjLongConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return underlyingStream.collect(supplier, accumulator, combiner);
}
@Override
public long sum() {
return underlyingStream.sum();
}
@Override
public OptionalLong min() {
return underlyingStream.min();
}
@Override
public OptionalLong max() {
return underlyingStream.max();
}
@Override
public long count() {
return underlyingStream.count();
}
@Override
public OptionalDouble average() {
return underlyingStream.average();
}
@Override
public LongSummaryStatistics summaryStatistics() {
return underlyingStream.summaryStatistics();
}
@Override
public boolean anyMatch(LongPredicate predicate) {
return underlyingStream.anyMatch(predicate);
}
@Override
public boolean allMatch(LongPredicate predicate) {
return underlyingStream.allMatch(predicate);
}
@Override
public boolean noneMatch(LongPredicate predicate) {
return underlyingStream.noneMatch(predicate);
}
@Override
public OptionalLong findFirst() {
return underlyingStream.findFirst();
}
@Override
public OptionalLong findAny() {
return underlyingStream.findAny();
}
@Override
public <K, V> void forEach(ObjLongConsumer<Cache<K, V>> action) {
underlyingStream.forEach(action);
}
@Override
public LongCacheStream sequential() {
underlyingStream = underlyingStream.sequential();
return this;
}
@Override
public LongCacheStream parallel() {
underlyingStream = underlyingStream.parallel();
return this;
}
@Override
public PrimitiveIterator.OfLong iterator() {
return underlyingStream.iterator();
}
@Override
public Spliterator.OfLong spliterator() {
return underlyingStream.spliterator();
}
@Override
public boolean isParallel() {
return underlyingStream.isParallel();
}
@Override
public LongCacheStream unordered() {
underlyingStream = underlyingStream.unordered();
return this;
}
@Override
public LongCacheStream onClose(Runnable closeHandler) {
underlyingStream = underlyingStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
underlyingStream.close();
}
}
| 8,785
| 26.54232
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/CoreBlockHoundIntegration.java
|
package org.infinispan.util;
import org.infinispan.affinity.impl.KeyAffinityServiceImpl;
import org.infinispan.cache.impl.CacheImpl;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.internal.CommonsBlockHoundIntegration;
import org.infinispan.container.offheap.OffHeapConcurrentMap;
import org.infinispan.container.offheap.SegmentedBoundedOffHeapDataContainer;
import org.infinispan.executors.LimitedExecutor;
import org.infinispan.expiration.impl.ClusterExpirationManager;
import org.infinispan.factories.impl.BasicComponentRegistryImpl;
import org.infinispan.factories.threads.EnhancedQueueExecutorFactory;
import org.infinispan.interceptors.impl.CacheMgmtInterceptor;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.marshall.protostream.impl.SerializationContextRegistryImpl;
import org.infinispan.persistence.manager.PersistenceManagerImpl;
import org.infinispan.persistence.sifs.TemporaryTable;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.statetransfer.StateTransferLockImpl;
import org.infinispan.topology.ClusterTopologyManagerImpl;
import org.infinispan.topology.LocalTopologyManagerImpl;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.xa.recovery.RecoveryManagerImpl;
import org.jgroups.JChannel;
import org.jgroups.blocks.cs.TcpConnection;
import org.jgroups.fork.ForkChannel;
import org.jgroups.protocols.UNICAST3;
import org.jgroups.protocols.pbcast.GMS;
import org.jgroups.util.TimeScheduler3;
import org.kohsuke.MetaInfServices;
import reactor.blockhound.BlockHound;
import reactor.blockhound.integration.BlockHoundIntegration;
@SuppressWarnings("unused")
@MetaInfServices
public class CoreBlockHoundIntegration implements BlockHoundIntegration {
@Override
public void applyTo(BlockHound.Builder builder) {
registerBlockingMethods(builder);
// Block designates methods that should only hold a lock very briefly
{
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, OffHeapConcurrentMap.class);
// This acquires the lruLock and also OffHeapConcurrentMap stampedLocks when processing eviction
builder.allowBlockingCallsInside(SegmentedBoundedOffHeapDataContainer.class.getName(), "ensureSize");
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, StateTransferLockImpl.class);
// LimitedExecutor just submits a task to another thread pool
builder.allowBlockingCallsInside(LimitedExecutor.class.getName(), "acquireLock");
// This invokes the actual runnable - we have to make sure it doesn't block as normal
builder.disallowBlockingCallsInside(LimitedExecutor.class.getName(), "actualRun");
// This method by design will never block; It may block very shortly if another thread is removing or adding
// to the queue, but it will never block for an extended period by design as there will always be room
builder.allowBlockingCallsInside(ClusterExpirationManager.class.getName(), "addStageToPermits");
// This shouldn't block long when held - but it is a write lock which can be delayed
builder.allowBlockingCallsInside(KeyAffinityServiceImpl.class.getName(), "handleViewChange");
builder.allowBlockingCallsInside(TransactionTable.class.getName(), "calculateMinTopologyId");
builder.allowBlockingCallsInside(ClusterTopologyManagerImpl.class.getName(), "acquireUpdateLock");
builder.allowBlockingCallsInside(PersistenceManagerImpl.class.getName(), "acquireReadLock");
builder.allowBlockingCallsInside(JGroupsTransport.class.getName(), "withView");
}
// This invokes the actual runnable - we have to make sure it doesn't block as normal
builder.disallowBlockingCallsInside(LimitedExecutor.class.getName(), "actualRun");
// If shutting down a cache manager - don't worry if blocking
builder.allowBlockingCallsInside(DefaultCacheManager.class.getName(), "stop");
// The blocking iterator locks to signal at the end - ignore (we can't reference class object as it is internal)
builder.allowBlockingCallsInside("io.reactivex.rxjava3.internal.operators.flowable.BlockingFlowableIterable" + "$BlockingFlowableIterator", "signalConsumer");
// Loading up the EnhancedQueueExecutor class loads org.jboss.threads.Version that reads a file to determine version
builder.allowBlockingCallsInside(EnhancedQueueExecutorFactory.class.getName(), "createExecutor");
// Reads from a file during initialization which is during store startup
builder.allowBlockingCallsInside("org.infinispan.persistence.sifs.Index", "checkForExistingIndexSizeFile");
methodsToBeRemoved(builder);
questionableMethodsAllowedToBlock(builder);
jgroups(builder);
}
private void jgroups(BlockHound.Builder builder) {
// Just ignore jgroups for now and assume it is non blocking
builder.allowBlockingCallsInside(JChannel.class.getName(), "send");
builder.allowBlockingCallsInside(ForkChannel.class.getName(), "send");
// Sometimes JGroups sends messages or does other blocking stuff without going through the channel
builder.allowBlockingCallsInside(TcpConnection.class.getName(), "connect");
builder.allowBlockingCallsInside(TcpConnection.class.getName(), "send");
builder.allowBlockingCallsInside(TcpConnection.class.getName() + "$Receiver", "run");
// Blocking internals
builder.allowBlockingCallsInside(TimeScheduler3.class.getName(), "add");
builder.allowBlockingCallsInside(GMS.class.getName(), "process");
builder.allowBlockingCallsInside(UNICAST3.class.getName(), "triggerXmit");
}
/**
* Various methods that need to be removed as they are essentially bugs. Please ensure that a JIRA is created and
* referenced here for any such method
* @param builder the block hound builder to register methods
*/
private static void methodsToBeRemoved(BlockHound.Builder builder) {
// The internal map only supports local mode - we need to replace with Caffeine
// https://issues.redhat.com/browse/ISPN-11272
builder.allowBlockingCallsInside(RecoveryManagerImpl.class.getName(), "registerInDoubtTransaction");
// SoftIndexFileStore locks and awaits on write if there is a concurrent compaction
// https://issues.redhat.com/browse/ISPN-13799
builder.allowBlockingCallsInside(TemporaryTable.class.getName(), "set");
}
private static void registerBlockingMethods(BlockHound.Builder builder) {
builder.markAsBlocking(CacheImpl.class, "size", "()I");
builder.markAsBlocking(CacheImpl.class, "size", "(J)I");
builder.markAsBlocking(CacheImpl.class, "containsKey", "(Ljava/lang/Object;)Z");
builder.markAsBlocking(CacheImpl.class, "get", "(Ljava/lang/Object;)Ljava/lang/Object;");
builder.markAsBlocking(CacheImpl.class, "put", "(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;");
// Distributed streams are blocking!
builder.markAsBlocking("org.infinispan.interceptors.distribution.DistributionBulkInterceptor$BackingEntrySet", "stream", "()Lorg/infinispan/CacheStream;");
builder.markAsBlocking("org.infinispan.interceptors.distribution.DistributionBulkInterceptor$BackingEntrySet", "parallelStream", "()Lorg/infinispan/CacheStream;");
}
private static void questionableMethodsAllowedToBlock(BlockHound.Builder builder) {
// This happens when a cache is requested while it is still starting
// Due to this happening at startup or extremely rarely at runtime, we can ignore it
// This should be fixed in https://issues.redhat.com/browse/ISPN-11396
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, BasicComponentRegistryImpl.class);
try {
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, Class.forName(BasicComponentRegistryImpl.class.getName() + "$ComponentWrapper"));
} catch (ClassNotFoundException e) {
throw new CacheException(e);
}
// Can wait on lock
builder.allowBlockingCallsInside(ClusterTopologyManagerImpl.class.getName(), "updateState");
// These methods to I/O via GlobalStateManager
builder.allowBlockingCallsInside(ClusterTopologyManagerImpl.class.getName(), "initCacheStatusIfAbsent");
builder.allowBlockingCallsInside(LocalTopologyManagerImpl.class.getName(), "writeCHState");
builder.allowBlockingCallsInside(LocalTopologyManagerImpl.class.getName(), "deleteCHState");
builder.allowBlockingCallsInside(LocalTopologyManagerImpl.class.getName(), "getNumberMembersFromState");
// This can block if there is a store otherwise it won't block
builder.allowBlockingCallsInside(CacheMgmtInterceptor.class.getName(), "getNumberOfEntries");
// Unfortunately retrieving the protobuf schema reads from a separately generated file - We hope this can be changed
// so instead the generated context initializer can just store the schema as a String.
builder.allowBlockingCallsInside(SerializationContextRegistryImpl.class.getName(), "register");
}
}
| 9,225
| 56.6625
| 169
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/IntSetExternalizer.java
|
package org.infinispan.util;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSetsExternalization;
/**
* Externalizer to be used for IntSet implementations
* @author wburns
* @since 9.0
*/
@SuppressWarnings("deprecated")
public class IntSetExternalizer extends AbstractExternalizer<IntSet> {
@Override
public Integer getId() {
return Ids.INT_SET;
}
@Override
public Set<Class<? extends IntSet>> getTypeClasses() {
return IntSetsExternalization.getTypeClasses();
}
@Override
public void writeObject(ObjectOutput output, IntSet intSet) throws IOException {
IntSetsExternalization.writeTo(output, intSet);
}
@Override
public IntSet readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return IntSetsExternalization.readFrom(input);
}
}
| 1,070
| 25.121951
| 91
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/SetMapper.java
|
package org.infinispan.util;
import java.util.Collection;
import java.util.Set;
import java.util.function.Function;
import org.infinispan.commons.util.InjectiveFunction;
/**
* A set that maps another one to a new one of a possibly different type. Note this set is read only
* and doesn't accept write operations.
* <p>
* This class currently only accepts a {@link Function} that also implements {@link InjectiveFunction} so that it can
* guarantee the resulting mapped values are distinct from each other. This is important as many operations because
* very costly if this is not true.
* <p>
* Some operations such as {@link Collection#contains(Object)} and {@link Collection#containsAll(Collection)} may be
* more expensive then normal since they cannot utilize lookups into the original collection.
* @author wburns
* @since 9.0
*/
public class SetMapper<E, R> extends CollectionMapper<E, R> implements Set<R> {
public SetMapper(Set<E> realCollection, Function<? super E, ? extends R> mapper) {
super(realCollection, mapper);
if (!(mapper instanceof InjectiveFunction)) {
throw new IllegalArgumentException("Function must also provided distinct values as evidented by implementing" +
"the marker interface InjectiveFunction");
}
}
}
| 1,302
| 41.032258
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/StripedHashFunction.java
|
package org.infinispan.util;
/**
* An hash function for stripping.
* <p>
* It calculates the number of segments based on the concurrency level desired and hashes the object to the
* corresponding segments.
*
* @author Pedro Ruivo
* @since 8.0
*/
public class StripedHashFunction<T> {
private final int lockSegmentMask;
private final int lockSegmentShift;
private final int numSegments;
public StripedHashFunction(int concurrencyLevel) {
int tempLockSegShift = 0;
int tmpNumSegments = 1;
while (tmpNumSegments < concurrencyLevel) {
++tempLockSegShift;
tmpNumSegments <<= 1;
}
lockSegmentShift = 32 - tempLockSegShift;
lockSegmentMask = tmpNumSegments - 1;
numSegments = tmpNumSegments;
}
/**
* @param hashCode the object's hash code serving as a key.
* @return the hash code of the key
*/
private static int hash(int hashCode) {
int h = hashCode;
h += ~(h << 9);
h ^= (h >>> 14);
h += (h << 4);
h ^= (h >>> 10);
return h;
}
/**
* @return the number of segments.
*/
public final int getNumSegments() {
return numSegments;
}
/**
* It calculates the segment in which the object belongs.
*
* @param object the object to hash.
* @return the segment index, between 0 and {@link #getNumSegments()}-1.
*/
public final int hashToSegment(T object) {
return (hash(object.hashCode()) >>> lockSegmentShift) & lockSegmentMask;
}
}
| 1,520
| 24.35
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/Closeables.java
|
package org.infinispan.util;
import org.infinispan.commons.util.CloseableIterator;
import org.reactivestreams.Publisher;
/**
* This class is used solely for the purpose of converting classes only in core to corresponding closeable variants.
* @author wburns
* @since 9.3
*/
public class Closeables {
private Closeables() { }
/**
* Converts a {@link Publisher} to a {@link CloseableIterator} by utilizing items fetched into an array and
* refetched as they are consumed from the iterator. The iterator when closed will also close the underlying
* {@link org.reactivestreams.Subscription} when subscribed to the publisher.
* @param publisher the publisher to convert
* @param fetchSize how many entries to hold in memory at once in preparation for the iterators consumption
* @param <E> value type
* @return an iterator that when closed will cancel the subscription
* @deprecated since 11.0 Please use {@link org.infinispan.commons.util.Closeables#iterator(Publisher, int)} instead.
*/
@Deprecated
public static <E> CloseableIterator<E> iterator(Publisher<E> publisher, int fetchSize) {
return org.infinispan.commons.util.Closeables.iterator(publisher, fetchSize);
}
}
| 1,230
| 41.448276
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/IracUtils.java
|
package org.infinispan.util;
import static org.infinispan.metadata.impl.PrivateMetadata.getBuilder;
import java.util.Optional;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.versioning.irac.IracEntryVersion;
import org.infinispan.container.versioning.irac.IracTombstoneManager;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.util.logging.LogSupplier;
/**
* Utility methods from IRAC (async cross-site replication)
*
* @author Pedro Ruivo
* @since 12.0
*/
public final class IracUtils {
private IracUtils() {
}
public static Optional<IracMetadata> findIracMetadataFromCacheEntry(CacheEntry<?, ?> entry) {
PrivateMetadata privateMetadata = entry.getInternalMetadata();
if (privateMetadata == null) {
return Optional.empty();
}
return Optional.ofNullable(privateMetadata.iracMetadata());
}
public static IracEntryVersion getIracVersionFromCacheEntry(CacheEntry<?,?> entry) {
return findIracMetadataFromCacheEntry(entry).map(IracMetadata::getVersion).orElse(null);
}
/**
* Stores the {@link IracMetadata} into {@link CacheEntry}.
* <p>
* If the {@link CacheEntry} is a remove, then the tombstone is added via {@link
* IracTombstoneManager#storeTombstone(int, Object, IracMetadata)}.
*
* @param entry The {@link CacheEntry} to update.
* @param metadata The {@link IracMetadata} to store.
* @param versionGenerator The {@link IracTombstoneManager} to update.
* @param logSupplier The {@link LogSupplier} to log the {@link IracMetadata} and the key.
*/
public static void setIracMetadata(CacheEntry<?, ?> entry, int segment, IracMetadata metadata,
IracTombstoneManager versionGenerator, LogSupplier logSupplier) {
final Object key = entry.getKey();
assert metadata != null : "[IRAC] Metadata must not be null!";
if (entry.isRemoved()) {
logTombstoneAssociated(key, metadata, logSupplier);
versionGenerator.storeTombstone(segment, key, metadata);
} else {
logIracMetadataAssociated(key, metadata, logSupplier);
updateCacheEntryMetadata(entry, metadata);
versionGenerator.removeTombstone(key);
}
}
/**
* Same as {@link #setIracMetadata(CacheEntry, int, IracMetadata, IracTombstoneManager, LogSupplier)} but it stores a
* "full" {@link PrivateMetadata} instead of {@link IracMetadata}.
* <p>
* This method is invoked to set the version from remote site updates. Note that the tombstone is not stored in case
* of a remove operation.
*
* @param entry The {@link CacheEntry} to update.
* @param metadata The {@link PrivateMetadata} to store.
* @param versionGenerator The {@link IracTombstoneManager} to update.
* @param logSupplier The {@link LogSupplier} to log the {@link PrivateMetadata} and the key.
*/
public static void setPrivateMetadata(CacheEntry<?, ?> entry, int segment, PrivateMetadata metadata,
IracTombstoneManager versionGenerator, LogSupplier logSupplier) {
final Object key = entry.getKey();
assert metadata.iracMetadata() != null : "[IRAC] Metadata must not be null!";
if (entry.isRemoved()) {
logTombstoneAssociated(key, metadata.iracMetadata(), logSupplier);
versionGenerator.storeTombstone(segment, key, metadata.iracMetadata());
} else {
logIracMetadataAssociated(key, metadata.iracMetadata(), logSupplier);
entry.setInternalMetadata(metadata);
versionGenerator.removeTombstone(key);
}
}
public static void logUpdateDiscarded(Object key, IracMetadata metadata, LogSupplier logSupplier) {
if (logSupplier.isTraceEnabled()) {
logSupplier.getLog().tracef("[IRAC] Update from remote site discarded. Metadata=%s, key=%s", metadata, key);
}
}
private static void logIracMetadataAssociated(Object key, IracMetadata metadata, LogSupplier logSupplier) {
if (logSupplier.isTraceEnabled()) {
logSupplier.getLog().tracef("[IRAC] IracMetadata %s associated with key '%s'", metadata, key);
}
}
private static void logTombstoneAssociated(Object key, IracMetadata metadata, LogSupplier logSupplier) {
if (logSupplier.isTraceEnabled()) {
logSupplier.getLog().tracef("[IRAC] Store tombstone %s for key '%s'", metadata, key);
}
}
private static void updateCacheEntryMetadata(CacheEntry<?, ?> entry, IracMetadata iracMetadata) {
PrivateMetadata internalMetadata = getBuilder(entry.getInternalMetadata())
.iracMetadata(iracMetadata)
.build();
entry.setInternalMetadata(internalMetadata);
}
}
| 4,875
| 41.77193
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/WriteableCacheSetMapper.java
|
package org.infinispan.util;
import java.util.Collection;
import org.infinispan.CacheSet;
import org.infinispan.commons.util.InjectiveFunction;
/**
* A writeable cache set mapper that also has constant time operations for things such as
* {@link Collection#contains(Object)} if the underlying Set does. Also implements the Set interface.
* <p>
* This set should be used for cases when a simple transformation of an element to another is all that is
* needed by the underlying set.
* <p>
* This implementation is basically identical to {@link WriteableCacheCollectionMapper} except that this class
* also implements {@link java.util.Set} and all of its optional operations.
* @author wburns
* @since 9.2
* @param <E> the original collection type - referred to as old in some methods
* @param <R> the resulting collection type - referred to as new in some methods
*/
public class WriteableCacheSetMapper<E, R> extends WriteableCacheCollectionMapper<E, R> implements CacheSet<R> {
public WriteableCacheSetMapper(CacheSet<E> realSet,
InjectiveFunction<? super E, ? extends R> toNewTypeFunction,
InjectiveFunction<? super R, ? extends E> fromNewTypeFunction,
InjectiveFunction<Object, ?> keyFilterFunction) {
super(realSet, toNewTypeFunction, fromNewTypeFunction, keyFilterFunction);
}
public WriteableCacheSetMapper(CacheSet<E> realSet,
InjectiveFunction<? super E, ? extends R> toNewTypeFunction,
InjectiveFunction<? super E, ? extends R> toNewTypeIteratorFunction,
InjectiveFunction<? super R, ? extends E> fromNewTypeFunction,
InjectiveFunction<Object, ?> keyFilterFunction) {
super(realSet, toNewTypeFunction, toNewTypeIteratorFunction, fromNewTypeFunction, keyFilterFunction);
}
}
| 1,786
| 46.026316
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/CyclicDependencyException.java
|
package org.infinispan.util;
/**
* Thrown when a cyclic dependency exist
* @author gustavonalle
* @since 7.0
*/
public class CyclicDependencyException extends Exception {
public CyclicDependencyException(String message) {
super(message);
}
protected CyclicDependencyException(String message, Throwable cause) {
super(message, cause);
}
}
| 368
| 20.705882
| 73
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/TriangleFunctionsUtil.java
|
package org.infinispan.util;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.triangle.BackupWriteCommand;
import org.infinispan.commands.triangle.MultiEntriesFunctionalBackupWriteCommand;
import org.infinispan.commands.triangle.MultiKeyFunctionalBackupWriteCommand;
import org.infinispan.commands.triangle.PutMapBackupWriteCommand;
import org.infinispan.commands.triangle.SingleKeyBackupWriteCommand;
import org.infinispan.commands.triangle.SingleKeyFunctionalBackupWriteCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.RemoveExpiredCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commons.util.InfinispanCollections;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.remoting.responses.ValidResponse;
/**
* Some utility functions for {@link org.infinispan.interceptors.distribution.TriangleDistributionInterceptor}.
*
* @author Pedro Ruivo
* @since 9.2
*/
public final class TriangleFunctionsUtil {
private TriangleFunctionsUtil() {
}
public static PutMapCommand copy(PutMapCommand command, Collection<Object> keys) {
PutMapCommand copy = new PutMapCommand(command);
copy.setMap(filterEntries(command.getMap(), keys));
return copy;
}
public static <K, V, T> WriteOnlyManyEntriesCommand<K, V, T> copy(WriteOnlyManyEntriesCommand<K, V, T> command,
Collection<Object> keys) {
return new WriteOnlyManyEntriesCommand<>(command).withArguments(filterEntries(command.getArguments(), keys));
}
public static <K, V> WriteOnlyManyCommand<K, V> copy(WriteOnlyManyCommand<K, V> command, Collection<Object> keys) {
WriteOnlyManyCommand<K, V> copy = new WriteOnlyManyCommand<>(command);
copy.setKeys(keys);
return copy;
}
public static <K, V, R> ReadWriteManyCommand<K, V, R> copy(ReadWriteManyCommand<K, V, R> command,
Collection<Object> keys) {
ReadWriteManyCommand<K, V, R> copy = new ReadWriteManyCommand<>(command);
copy.setKeys(keys);
return copy;
}
public static <K, V, T, R> ReadWriteManyEntriesCommand<K, V, T, R> copy(ReadWriteManyEntriesCommand<K, V, T, R> command,
Collection<Object> keys) {
return new ReadWriteManyEntriesCommand<K, V, T, R>(command).withArguments(filterEntries(command.getArguments(), keys));
}
public static Map<Object, Object> mergeHashMap(ValidResponse response, Map<Object, Object> resultMap) {
//noinspection unchecked
Map<Object, Object> remoteMap = (Map<Object, Object>) response.getResponseValue();
return InfinispanCollections.mergeMaps(resultMap, remoteMap);
}
@SuppressWarnings("unused")
public static Void voidMerge(ValidResponse ignored1, Void ignored2) {
return null;
}
public static List<Object> mergeList(ValidResponse response, List<Object> resultList) {
//noinspection unchecked
List<Object> list = (List<Object>) response.getResponseValue();
return InfinispanCollections.mergeLists(list, resultList);
}
public static Map<Integer, Collection<Object>> filterBySegment(LocalizedCacheTopology cacheTopology,
Collection<Object> keys) {
Map<Integer, Collection<Object>> filteredKeys = new HashMap<>(
cacheTopology.getReadConsistentHash().getNumSegments());
for (Object key : keys) {
filteredKeys.computeIfAbsent(cacheTopology.getSegment(key), integer -> new ArrayList<>()).add(key);
}
return filteredKeys;
}
public static <K, V> Map<K, V> filterEntries(Map<K, V> map, Collection<Object> keys) {
//note: can't use Collector.toMap() since the implementation doesn't support null values.
return map.entrySet().stream()
.filter(entry -> keys.contains(entry.getKey()))
.collect(HashMap::new, (rMap, entry) -> rMap.put(entry.getKey(), entry.getValue()), HashMap::putAll);
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, PutKeyValueCommand command) {
SingleKeyBackupWriteCommand cmd = factory.buildSingleKeyBackupWriteCommand();
cmd.setPutKeyValueCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, IracPutKeyValueCommand command) {
SingleKeyBackupWriteCommand cmd = factory.buildSingleKeyBackupWriteCommand();
cmd.setIracPutKeyValueCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, RemoveCommand command) {
SingleKeyBackupWriteCommand cmd = factory.buildSingleKeyBackupWriteCommand();
cmd.setRemoveCommand(command, command.getCommandId() == RemoveExpiredCommand.COMMAND_ID);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, ReplaceCommand command) {
SingleKeyBackupWriteCommand cmd = factory.buildSingleKeyBackupWriteCommand();
cmd.setReplaceCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, ComputeIfAbsentCommand command) {
SingleKeyBackupWriteCommand cmd = factory.buildSingleKeyBackupWriteCommand();
cmd.setComputeIfAbsentCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, ComputeCommand command) {
SingleKeyBackupWriteCommand cmd = factory.buildSingleKeyBackupWriteCommand();
cmd.setComputeCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, ReadWriteKeyValueCommand command) {
SingleKeyFunctionalBackupWriteCommand cmd = factory.buildSingleKeyFunctionalBackupWriteCommand();
cmd.setReadWriteKeyValueCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, ReadWriteKeyCommand command) {
SingleKeyFunctionalBackupWriteCommand cmd = factory.buildSingleKeyFunctionalBackupWriteCommand();
cmd.setReadWriteKeyCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, WriteOnlyKeyValueCommand command) {
SingleKeyFunctionalBackupWriteCommand cmd = factory.buildSingleKeyFunctionalBackupWriteCommand();
cmd.setWriteOnlyKeyValueCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, WriteOnlyKeyCommand command) {
SingleKeyFunctionalBackupWriteCommand cmd = factory.buildSingleKeyFunctionalBackupWriteCommand();
cmd.setWriteOnlyKeyCommand(command);
return cmd;
}
public static BackupWriteCommand backupFrom(CommandsFactory factory, PutMapCommand command,
Collection<Object> keys) {
PutMapBackupWriteCommand cmd = factory.buildPutMapBackupWriteCommand();
cmd.setPutMapCommand(command, keys);
return cmd;
}
public static <K, V, T> BackupWriteCommand backupFrom(CommandsFactory factory,
WriteOnlyManyEntriesCommand<K, V, T> command, Collection<Object> keys) {
MultiEntriesFunctionalBackupWriteCommand cmd = factory.buildMultiEntriesFunctionalBackupWriteCommand();
cmd.setWriteOnly(command, keys);
return cmd;
}
public static <K, V, T, R> BackupWriteCommand backupFrom(CommandsFactory factory,
ReadWriteManyEntriesCommand<K, V, T, R> command, Collection<Object> keys) {
MultiEntriesFunctionalBackupWriteCommand cmd = factory.buildMultiEntriesFunctionalBackupWriteCommand();
cmd.setReadWrite(command, keys);
return cmd;
}
public static <K, V> BackupWriteCommand backupFrom(CommandsFactory factory,
WriteOnlyManyCommand<K, V> command, Collection<Object> keys) {
MultiKeyFunctionalBackupWriteCommand cmd = factory.buildMultiKeyFunctionalBackupWriteCommand();
cmd.setWriteOnly(command, keys);
return cmd;
}
public static <K, V, R> BackupWriteCommand backupFrom(CommandsFactory factory,
ReadWriteManyCommand<K, V, R> command, Collection<Object> keys) {
MultiKeyFunctionalBackupWriteCommand cmd = factory.buildMultiKeyFunctionalBackupWriteCommand();
cmd.setReadWrite(command, keys);
return cmd;
}
}
| 9,175
| 43.543689
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/UserRaisedFunctionalException.java
|
package org.infinispan.util;
/**
* Thrown when client's code passed as a labda expression in commands such as {@link org.infinispan.commands.write.ComputeIfAbsentCommand}
* raises a exception. We don't want to convert this excepton into a {@link org.infinispan.commons.CacheException} but
* instead just propagate it to the user as it is.
*
* @author karesti@redhat.com
* @since 9.1
*/
public class UserRaisedFunctionalException extends RuntimeException {
public UserRaisedFunctionalException(Throwable cause) {
super(cause);
}
}
| 551
| 31.470588
| 138
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/CacheSetMapper.java
|
package org.infinispan.util;
import java.util.function.Function;
import org.infinispan.CacheSet;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.CloseableSpliterator;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.commons.util.SpliteratorMapper;
/**
* A {@link CacheSet} that allows for a different set to be mapped as a different instance wtih values replaced on
* request. This is useful as a cache set is normally lazily evaluated to prevent having to pull all values into memory
* which can be a lot faster when checking single values and can also prevent out of memory issues.
* @author wburns
* @since 9.0
*/
public class CacheSetMapper<E, R> extends SetMapper<E, R> implements CacheSet<R> {
protected final CacheSet<E> realSet;
public CacheSetMapper(CacheSet<E> realSet, Function<? super E, ? extends R> mapper) {
super(realSet, mapper);
this.realSet = realSet;
}
@Override
public CacheStream<R> stream() {
return realSet.stream().map(mapper);
}
@Override
public CacheStream<R> parallelStream() {
return realSet.parallelStream().map(mapper);
}
@Override
public CloseableSpliterator<R> spliterator() {
return new SpliteratorMapper<>(realSet.spliterator(), mapper);
}
@Override
public CloseableIterator<R> iterator() {
return new IteratorMapper<>(realSet.iterator(), mapper);
}
}
| 1,483
| 30.574468
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/ModuleProperties.java
|
package org.infinispan.util;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.module.ModuleCommandExtensions;
import org.infinispan.commands.module.ModuleCommandFactory;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commons.util.ServiceFinder;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* The <code>ModuleProperties</code> class represents Infinispan's module service extensions.
*
* @author Vladimir Blagojevic
* @author Sanne Grinovero
* @author Galder Zamarreño
* @since 4.0
* @deprecated Since 10.0, without replacement. To be removed very soon.
*/
@Deprecated
public final class ModuleProperties {
private static final Log log = LogFactory.getLog(ModuleProperties.class);
private Map<Byte, ModuleCommandFactory> commandFactories;
private Collection<Class<? extends ReplicableCommand>> moduleCommands;
public void loadModuleCommandHandlers(ClassLoader cl) {
Collection<ModuleCommandExtensions> moduleCmdExtLoader = ServiceFinder.load(ModuleCommandExtensions.class, cl);
if (moduleCmdExtLoader.iterator().hasNext()) {
commandFactories = new HashMap<>(1);
moduleCommands = new HashSet<>(1);
for (ModuleCommandExtensions extension : moduleCmdExtLoader) {
log.debugf("Loading module command extension SPI class: %s", extension);
ModuleCommandFactory cmdFactory = extension.getModuleCommandFactory();
Objects.requireNonNull(cmdFactory);
for (Map.Entry<Byte, Class<? extends ReplicableCommand>> command : cmdFactory.getModuleCommands().entrySet()) {
byte id = command.getKey();
if (commandFactories.containsKey(id))
throw new IllegalArgumentException(String.format(
"Cannot use id %d for commands, as it is already in use by %s",
id, commandFactories.get(id).getClass().getName()));
commandFactories.put(id, cmdFactory);
moduleCommands.add(command.getValue());
}
}
} else {
log.debug("No module command extensions to load");
commandFactories = Collections.emptyMap();
}
}
public Collection<Class<? extends ReplicableCommand>> moduleCommands() {
return moduleCommands;
}
public Map<Byte, ModuleCommandFactory> moduleCommandFactories() {
return commandFactories;
}
@SuppressWarnings("unchecked")
public Collection<Class<? extends CacheRpcCommand>> moduleCacheRpcCommands() {
Collection<Class<? extends ReplicableCommand>> cmds = moduleCommands();
if (cmds == null || cmds.isEmpty())
return Collections.emptySet();
Collection<Class<? extends CacheRpcCommand>> cacheRpcCmds = new HashSet<>(2);
for (Class<? extends ReplicableCommand> moduleCmdClass : cmds) {
if (CacheRpcCommand.class.isAssignableFrom(moduleCmdClass))
cacheRpcCmds.add((Class<? extends CacheRpcCommand>) moduleCmdClass);
}
return cacheRpcCmds;
}
public Collection<Class<? extends ReplicableCommand>> moduleOnlyReplicableCommands() {
Collection<Class<? extends ReplicableCommand>> cmds = moduleCommands();
if (cmds == null || cmds.isEmpty())
return Collections.emptySet();
Collection<Class<? extends ReplicableCommand>> replicableOnlyCmds = new HashSet<>(2);
for (Class<? extends ReplicableCommand> moduleCmdClass : cmds) {
if (!CacheRpcCommand.class.isAssignableFrom(moduleCmdClass)) {
replicableOnlyCmds.add(moduleCmdClass);
}
}
return replicableOnlyCmds;
}
}
| 3,874
| 38.141414
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/ByteString.java
|
package org.infinispan.util;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.commons.util.Util;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
/**
* A simple class which encapsulates a byte[] representation of a String using a predefined encoding (currently UTF-8).
* This avoids repeated invocation of the expensive {@link ObjectOutput#writeUTF(String)} on marshalling
*
* @author Tristan Tarrant
* @since 9.0
*/
@ProtoTypeId(ProtoStreamTypeIds.BYTE_STRING)
public final class ByteString implements Comparable<ByteString> {
private static final Charset CHARSET = StandardCharsets.UTF_8;
private static final ByteString EMPTY = new ByteString(Util.EMPTY_BYTE_ARRAY);
private static final int MAX_LENGTH = 255;
private transient String s;
private transient final int hash;
@ProtoField(number = 1)
final byte[] bytes;
@ProtoFactory
ByteString(byte[] bytes) {
if (bytes.length > MAX_LENGTH) {
throw new IllegalArgumentException("ByteString must be less than 256 bytes");
}
this.bytes = bytes;
this.hash = Arrays.hashCode(bytes);
}
public static ByteString fromString(String s) {
if (s.length() == 0)
return EMPTY;
else
return new ByteString(s.getBytes(CHARSET));
}
public static boolean isValid(String s) {
return s.getBytes(CHARSET).length <= MAX_LENGTH;
}
public static ByteString emptyString() {
return EMPTY;
}
@Override
public int hashCode() {
return hash;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ByteString that = (ByteString) o;
return Arrays.equals(bytes, that.bytes);
}
@Override
public String toString() {
if (s == null) {
s = new String(bytes, CHARSET);
}
return s;
}
public static void writeObject(ObjectOutput output, ByteString object) throws IOException {
output.writeByte(object.bytes.length);
if (object.bytes.length > 0) {
output.write(object.bytes);
}
}
public static ByteString readObject(ObjectInput input) throws IOException {
int len = input.readUnsignedByte();
if (len == 0)
return EMPTY;
byte[] b = new byte[len];
input.readFully(b);
return new ByteString(b);
}
@Override
public int compareTo(ByteString o) {
int ourLength = bytes.length;
int otherLength = o.bytes.length;
int compare;
if ((compare = Integer.compare(ourLength, otherLength)) != 0) {
return compare;
}
for (int i = 0; i < ourLength; ++i) {
if ((compare = Byte.compare(bytes[i], o.bytes[i])) != 0) {
return compare;
}
}
return 0;
}
}
| 3,164
| 27.258929
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/KeyValuePair.java
|
package org.infinispan.util;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
/**
*
* Holds logically related key-value pairs or binary tuples.
*
* @author Mircea Markus
* @since 6.0
*/
public class KeyValuePair<K,V> {
private final K key;
private final V value;
public static <K, V> KeyValuePair<K, V> of(K key, V value) {
return new KeyValuePair<>(key, value);
}
public KeyValuePair(K key, V value) {
this.key = key;
this.value = value;
}
public K getKey() {
return key;
}
public V getValue() {
return value;
}
public static class Externalizer extends AbstractExternalizer<KeyValuePair> {
private static final long serialVersionUID = -5291318076267612501L;
@Override
public void writeObject(ObjectOutput output, KeyValuePair kvPair) throws IOException {
output.writeObject(kvPair.getKey());
output.writeObject(kvPair.getValue());
}
@Override
public KeyValuePair readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new KeyValuePair(input.readObject(), input.readObject());
}
@Override
public Integer getId() {
return Ids.KEY_VALUE_PAIR_ID;
}
@Override
public Set<Class<? extends KeyValuePair>> getTypeClasses() {
return Collections.singleton(KeyValuePair.class);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof KeyValuePair)) return false;
KeyValuePair that = (KeyValuePair) o;
if (key != null ? !key.equals(that.key) : that.key != null) return false;
if (value != null ? !value.equals(that.value) : that.value != null) return false;
return true;
}
@Override
public int hashCode() {
int result = key != null ? key.hashCode() : 0;
result = 31 * result + (value != null ? value.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "KeyValuePair{key=" + key + ", value=" + value + '}';
}
}
| 2,282
| 23.815217
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/EmbeddedTimeService.java
|
package org.infinispan.util;
import org.infinispan.commons.time.DefaultTimeService;
import org.infinispan.commons.time.TimeService;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* The default implementation of {@link TimeService}. It does not perform any optimization and relies on {@link
* System#currentTimeMillis()} and {@link System#nanoTime()}.
*
* @author Pedro Ruivo
* @since 5.3
*/
@Scope(Scopes.GLOBAL)
public class EmbeddedTimeService extends DefaultTimeService {
}
| 535
| 28.777778
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/CollectionMapper.java
|
package org.infinispan.util;
import java.util.AbstractCollection;
import java.util.Collection;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Stream;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.commons.util.SpliteratorMapper;
/**
* A collection that maps another one to a new one of a possibly different type. Note this collection is read only
* and doesn't accept write operations.
* <p>
* Some operations such as {@link Collection#contains(Object)} and {@link Collection#containsAll(Collection)} may be
* more expensive then normal since they cannot utilize lookups into the original collection.
* @author wburns
* @since 9.0
*/
public class CollectionMapper<E, R> extends AbstractCollection<R> {
protected final Collection<E> realCollection;
protected final Function<? super E, ? extends R> mapper;
public CollectionMapper(Collection<E> realCollection, Function<? super E, ? extends R> mapper) {
this.realCollection = realCollection;
this.mapper = mapper;
}
@Override
public int size() {
return realCollection.size();
}
@Override
public boolean isEmpty() {
return realCollection.isEmpty();
}
@Override
public Iterator<R> iterator() {
return new IteratorMapper<E, R>(realCollection.iterator(), mapper) {
@Override
public void remove() {
throw new UnsupportedOperationException("remove");
}
};
}
@Override
public Spliterator<R> spliterator() {
return new SpliteratorMapper<>(realCollection.spliterator(), mapper);
}
@Override
public Stream<R> stream() {
return realCollection.stream().map(mapper);
}
@Override
public Stream<R> parallelStream() {
return realCollection.parallelStream().map(mapper);
}
@Override
public void forEach(Consumer<? super R> action) {
realCollection.forEach(c -> action.accept(mapper.apply(c)));
}
// Write operations are not supported!
@Override
public boolean add(R e) {
throw new UnsupportedOperationException();
}
@Override
public boolean remove(Object o) {
throw new UnsupportedOperationException();
}
@Override
public boolean addAll(Collection<? extends R> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean removeAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean retainAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
}
| 2,744
| 25.650485
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/EntryWrapper.java
|
package org.infinispan.util;
import org.infinispan.Cache;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ForwardingCacheEntry;
/**
* Wrapper for CacheEntry(s) that can be used to update the cache when it's value is set.
* @param <K> The key type
* @param <V> The value type
*/
public class EntryWrapper<K, V> extends ForwardingCacheEntry<K, V> {
private final Cache<K, V> cache;
private final CacheEntry<K, V> entry;
/**
* Creates a new entry wrapper given the cache and entry. If the entry is itself an EntryWrapper then the inner
* entry is instead wrapped to prevent double cache.put operations. Also we then we can use the cache given as
* this could have different flags etc.
* @param cache the cache to use on setValue
* @param entry the actual entry
*/
public EntryWrapper(Cache<K, V> cache, CacheEntry<K, V> entry) {
this.cache = cache;
// Don't double wrap, but take the most recent
if (entry instanceof EntryWrapper) {
this.entry = ((EntryWrapper<K, V>) entry).entry;
} else {
this.entry = entry;
}
}
@Override
protected CacheEntry<K, V> delegate() {
return entry;
}
@Override
public V setValue(V value) {
cache.put(entry.getKey(), value);
return super.setValue(value);
}
}
| 1,361
| 29.954545
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/CoreImmutables.java
|
package org.infinispan.util;
import static org.infinispan.commons.util.Util.toStr;
import org.infinispan.commons.util.Immutables;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Factory for generating immutable type wrappers for core types.
*
* @author Jason T. Greene
* @author Galder Zamarreño
* @author Tristan Tarrant
* @since 4.0
*/
public class CoreImmutables extends Immutables {
/**
* Wraps a {@link InternalCacheEntry}} with an immutable {@link InternalCacheEntry}}. There is no copying involved.
*
* @param entry the internal cache entry to wrap.
* @return an immutable {@link InternalCacheEntry}} wrapper that delegates to the original entry.
*/
public static <K, V> InternalCacheEntry<K, V> immutableInternalCacheEntry(InternalCacheEntry<K, V> entry) {
return new ImmutableInternalCacheEntry<>(entry);
}
/**
* Immutable version of InternalCacheEntry for traversing data containers.
*/
private static class ImmutableInternalCacheEntry<K, V> implements InternalCacheEntry<K, V>, Immutable {
private final InternalCacheEntry<K, V> entry;
private final int hash;
ImmutableInternalCacheEntry(InternalCacheEntry<K, V> entry) {
this.entry = entry;
this.hash = entry.hashCode();
}
@Override
public K getKey() {
return entry.getKey();
}
@Override
public V getValue() {
return entry.getValue();
}
@Override
public V setValue(V value) {
throw new UnsupportedOperationException();
}
@Override
public void commit(DataContainer container) {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof InternalCacheEntry))
return false;
InternalCacheEntry entry = (InternalCacheEntry) o;
return entry.equals(this.entry);
}
@Override
public int hashCode() {
return hash;
}
@Override
public String toString() {
return toStr(getKey()) + "=" + toStr(getValue());
}
@Override
public boolean canExpire() {
return entry.canExpire();
}
@Override
public long getCreated() {
return entry.getCreated();
}
@Override
public long getExpiryTime() {
return entry.getExpiryTime();
}
@Override
public long getLastUsed() {
return entry.getLastUsed();
}
@Override
public boolean isExpired(long now) {
return entry.isExpired(now);
}
@Override
public InternalCacheValue<V> toInternalCacheValue() {
return new CoreImmutables.ImmutableInternalCacheValue<>(this);
}
@Override
public void touch(long currentTimeMillis) {
throw new UnsupportedOperationException();
}
@Override
public void reincarnate(long now) {
throw new UnsupportedOperationException();
}
@Override
public boolean isL1Entry() {
return entry.isL1Entry();
}
@Override
public void setMetadata(Metadata metadata) {
throw new UnsupportedOperationException();
}
@Override
public long getLifespan() {
return entry.getLifespan();
}
@Override
public long getMaxIdle() {
return entry.getMaxIdle();
}
@Override
public boolean skipLookup() {
return false;
}
@Override
public boolean isChanged() {
return entry.isChanged();
}
@Override
public boolean isCreated() {
return entry.isCreated();
}
@Override
public boolean isNull() {
return entry.isNull();
}
@Override
public boolean isRemoved() {
return entry.isRemoved();
}
@Override
public boolean isEvicted() {
return entry.isEvicted();
}
@Override
public void setCreated(boolean created) {
throw new UnsupportedOperationException();
}
@Override
public void setRemoved(boolean removed) {
throw new UnsupportedOperationException();
}
@Override
public void setChanged(boolean changed) {
throw new UnsupportedOperationException();
}
@Override
public void setEvicted(boolean evicted) {
entry.setEvicted(evicted);
}
@Override
public void setSkipLookup(boolean skipLookup) {
throw new UnsupportedOperationException();
}
@Override
public Metadata getMetadata() {
return entry.getMetadata();
}
@Override
public InternalCacheEntry<K, V> clone() {
return new ImmutableInternalCacheEntry<>(entry.clone());
}
@Override
public final PrivateMetadata getInternalMetadata() {
return entry.getInternalMetadata();
}
@Override
public void setInternalMetadata(PrivateMetadata metadata) {
throw new UnsupportedOperationException();
}
}
private static class ImmutableInternalCacheValue<V> implements InternalCacheValue<V>, Immutable {
private final ImmutableInternalCacheEntry<?, V> entry;
ImmutableInternalCacheValue(ImmutableInternalCacheEntry<?, V> entry) {
this.entry = entry;
}
@Override
public boolean canExpire() {
return entry.canExpire();
}
@Override
public long getCreated() {
return entry.getCreated();
}
@Override
public long getLastUsed() {
return entry.getLastUsed();
}
@Override
public long getLifespan() {
return entry.getLifespan();
}
@Override
public long getMaxIdle() {
return entry.getMaxIdle();
}
@Override
public V getValue() {
return entry.getValue();
}
@Override
public boolean isExpired(long now) {
return entry.isExpired(now);
}
@Override
public <K> InternalCacheEntry<K, V> toInternalCacheEntry(K key) {
//noinspection unchecked
return (InternalCacheEntry<K, V>)entry;
}
@Override
public long getExpiryTime() {
return entry.toInternalCacheValue().getExpiryTime();
}
@Override
public Metadata getMetadata() {
return entry.getMetadata();
}
@Override
public PrivateMetadata getInternalMetadata() {
return entry.getInternalMetadata();
}
@Override
public void setInternalMetadata(PrivateMetadata internalMetadata) {
throw new UnsupportedOperationException();
}
}
}
| 6,977
| 23.229167
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/ExponentialBackOff.java
|
package org.infinispan.util;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.commons.util.Experimental;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.irac.IracXSiteBackup;
/**
* Interface to implement an exponential back-off algorithm that retries the request based on the result of the remote
* operation.
* <p>
* This interface contains 2 methods: {@link #asyncBackOff()} ()} which should be invoked if the request needs to be
* retried and {@link #reset()}, invoked when a request is "successful", which resets the state.
*
* @author Pedro Ruivo
* @since 12.0
*/
@Experimental
public interface ExponentialBackOff {
/**
* Disabled exponential back-off algorithm. It does nothing.
*/
ExponentialBackOff NO_OP = new ExponentialBackOff() {
@Override
public void reset() {
//no-op
}
@Override
public CompletionStage<Void> asyncBackOff() {
return CompletableFutures.completedNull();
}
};
Function<IracXSiteBackup, ExponentialBackOff> NO_OP_BUILDER = backup -> NO_OP;
/**
* Resets its state.
* <p>
* The blocking time in {@link #asyncBackOff()} increases with the number of consecutive retries. This methods resets
* its state back to the initial sleep time.
*/
void reset();
/**
* It returns a {@link CompletionStage} which is completed a certain amount of time before retries the request.
* <p>
* After the completion, the request is allows to proceed.
*
* @return A {@link CompletionStage}.
*/
CompletionStage<Void> asyncBackOff();
}
| 1,685
| 27.576271
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/AbstractDelegatingIntCacheStream.java
|
package org.infinispan.util;
import java.util.IntSummaryStatistics;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.IntBinaryOperator;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.IntPredicate;
import java.util.function.IntToDoubleFunction;
import java.util.function.IntToLongFunction;
import java.util.function.IntUnaryOperator;
import java.util.function.ObjIntConsumer;
import java.util.function.Supplier;
import java.util.stream.IntStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
/**
* Abstract Delegating handler that passes IntStream operations off to the underlying IntCacheStream but delegates
* {@link org.infinispan.BaseCacheStream} operations to the provided {@link CacheStream}. This allows for intercepting
* methods defined on <b>BaseCacheStream</b>.
* <p>
* This class is package private as it should only be created by using a map operator from another
* AbstractDelegating*CacheStream instance. Note that {@link AbstractDelegatingCacheStream} is public as this is
* the defined approach to create such a delegated stream.
* @author wburns
* @since 9.2
*/
class AbstractDelegatingIntCacheStream implements IntCacheStream {
protected AbstractDelegatingCacheStream<?> delegateCacheStream;
protected IntCacheStream underlyingStream;
AbstractDelegatingIntCacheStream(AbstractDelegatingCacheStream<?> delegateCacheStream,
IntCacheStream underlyingStream) {
this.delegateCacheStream = delegateCacheStream;
this.underlyingStream = underlyingStream;
}
// These are methods that convert to a different AbstractDelegating*CacheStream
@Override
public DoubleCacheStream mapToDouble(IntToDoubleFunction mapper) {
return new AbstractDelegatingDoubleCacheStream(delegateCacheStream, underlyingStream.mapToDouble(mapper));
}
@Override
public LongCacheStream mapToLong(IntToLongFunction mapper) {
return new AbstractDelegatingLongCacheStream(delegateCacheStream, underlyingStream.mapToLong(mapper));
}
@Override
public <U> CacheStream<U> mapToObj(IntFunction<? extends U> mapper) {
delegateCacheStream.underlyingStream = underlyingStream.mapToObj(mapper);
return (CacheStream<U>) delegateCacheStream;
}
@Override
public CacheStream<Integer> boxed() {
delegateCacheStream.underlyingStream = underlyingStream.boxed();
return (CacheStream<Integer>) delegateCacheStream;
}
@Override
public DoubleCacheStream asDoubleStream() {
return new AbstractDelegatingDoubleCacheStream(delegateCacheStream, underlyingStream.asDoubleStream());
}
@Override
public LongCacheStream asLongStream() {
return new AbstractDelegatingLongCacheStream(delegateCacheStream, underlyingStream.asLongStream());
}
// These are methods that should delegate to the original cache stream
@Override
public IntCacheStream sequentialDistribution() {
delegateCacheStream = delegateCacheStream.sequentialDistribution();
return this;
}
@Override
public IntCacheStream parallelDistribution() {
delegateCacheStream = delegateCacheStream.parallelDistribution();
return this;
}
@Override
public IntCacheStream filterKeySegments(Set<Integer> segments) {
delegateCacheStream = delegateCacheStream.filterKeySegments(segments);
return this;
}
@Override
public IntCacheStream filterKeySegments(IntSet segments) {
delegateCacheStream = delegateCacheStream.filterKeySegments(segments);
return this;
}
@Override
public IntCacheStream filterKeys(Set<?> keys) {
delegateCacheStream = delegateCacheStream.filterKeys(keys);
return this;
}
@Override
public IntCacheStream distributedBatchSize(int batchSize) {
delegateCacheStream = delegateCacheStream.distributedBatchSize(batchSize);
return this;
}
@Override
public IntCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
delegateCacheStream = delegateCacheStream.segmentCompletionListener(listener);
return this;
}
@Override
public IntCacheStream disableRehashAware() {
delegateCacheStream = delegateCacheStream.disableRehashAware();
return this;
}
@Override
public IntCacheStream timeout(long timeout, TimeUnit unit) {
delegateCacheStream = delegateCacheStream.timeout(timeout, unit);
return this;
}
// Actual IntStream operations
@Override
public IntCacheStream filter(IntPredicate predicate) {
underlyingStream = underlyingStream.filter(predicate);
return this;
}
@Override
public IntCacheStream map(IntUnaryOperator mapper) {
underlyingStream = underlyingStream.map(mapper);
return this;
}
@Override
public IntCacheStream flatMap(IntFunction<? extends IntStream> mapper) {
underlyingStream = underlyingStream.flatMap(mapper);
return this;
}
@Override
public IntCacheStream distinct() {
underlyingStream = underlyingStream.distinct();
return this;
}
@Override
public IntCacheStream sorted() {
underlyingStream = underlyingStream.sorted();
return this;
}
@Override
public IntCacheStream peek(IntConsumer action) {
underlyingStream = underlyingStream.peek(action);
return this;
}
@Override
public IntCacheStream limit(long maxSize) {
underlyingStream = underlyingStream.limit(maxSize);
return this;
}
@Override
public IntCacheStream skip(long n) {
underlyingStream = underlyingStream.skip(n);
return this;
}
@Override
public void forEach(IntConsumer action) {
underlyingStream.forEach(action);
}
@Override
public void forEachOrdered(IntConsumer action) {
underlyingStream.forEachOrdered(action);
}
@Override
public int[] toArray() {
return underlyingStream.toArray();
}
@Override
public int reduce(int identity, IntBinaryOperator op) {
return underlyingStream.reduce(identity, op);
}
@Override
public OptionalInt reduce(IntBinaryOperator op) {
return underlyingStream.reduce(op);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjIntConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return underlyingStream.collect(supplier, accumulator, combiner);
}
@Override
public int sum() {
return underlyingStream.sum();
}
@Override
public OptionalInt min() {
return underlyingStream.min();
}
@Override
public OptionalInt max() {
return underlyingStream.max();
}
@Override
public long count() {
return underlyingStream.count();
}
@Override
public OptionalDouble average() {
return underlyingStream.average();
}
@Override
public IntSummaryStatistics summaryStatistics() {
return underlyingStream.summaryStatistics();
}
@Override
public boolean anyMatch(IntPredicate predicate) {
return underlyingStream.anyMatch(predicate);
}
@Override
public boolean allMatch(IntPredicate predicate) {
return underlyingStream.allMatch(predicate);
}
@Override
public boolean noneMatch(IntPredicate predicate) {
return underlyingStream.noneMatch(predicate);
}
@Override
public OptionalInt findFirst() {
return underlyingStream.findFirst();
}
@Override
public OptionalInt findAny() {
return underlyingStream.findAny();
}
@Override
public <K, V> void forEach(ObjIntConsumer<Cache<K, V>> action) {
underlyingStream.forEach(action);
}
@Override
public IntCacheStream sequential() {
underlyingStream = underlyingStream.sequential();
return this;
}
@Override
public IntCacheStream parallel() {
underlyingStream = underlyingStream.parallel();
return this;
}
@Override
public PrimitiveIterator.OfInt iterator() {
return underlyingStream.iterator();
}
@Override
public Spliterator.OfInt spliterator() {
return underlyingStream.spliterator();
}
@Override
public boolean isParallel() {
return underlyingStream.isParallel();
}
@Override
public IntCacheStream unordered() {
underlyingStream = underlyingStream.unordered();
return this;
}
@Override
public IntCacheStream onClose(Runnable closeHandler) {
underlyingStream = underlyingStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
underlyingStream.close();
}
}
| 8,955
| 26.641975
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/LazyConcatIterator.java
|
package org.infinispan.util;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.Supplier;
import org.infinispan.commons.util.CloseableIterator;
/**
* Iterator implementation that will return all entries from the first iterator. Upon completion of the first iterator
* the supplier will generate an additional iterator that will then be used as a source for this iterator. After the
* second iterator is consumed this iterator will also be completed.
* @param <E> element type
*/
public class LazyConcatIterator<E> implements CloseableIterator<E> {
private final CloseableIterator<E> iterator1;
private final Supplier<? extends CloseableIterator<E>> supplier;
private CloseableIterator<E> iterator2;
public LazyConcatIterator(CloseableIterator<E> first, Supplier<? extends CloseableIterator<E>> supplier) {
this.iterator1 = Objects.requireNonNull(first);
this.supplier = Objects.requireNonNull(supplier);
}
@Override
public void close() {
try (CloseableIterator<E> closeme = iterator1) {
if (iterator2 != null) {
iterator2.close();
}
}
}
@Override
public boolean hasNext() {
boolean hasNext;
if (iterator2 == null) {
hasNext = iterator1.hasNext();
if (hasNext) {
return true;
} else {
iterator2 = supplier.get();
}
}
return iterator2.hasNext();
}
@Override
public E next() {
if (iterator2 == null) {
// We have to double check hasNext in case if they are calling next without hasNext
if (iterator1.hasNext()) {
return iterator1.next();
} else {
iterator2 = supplier.get();
}
}
if (iterator2.hasNext()) {
return iterator2.next();
}
throw new NoSuchElementException();
}
}
| 1,906
| 27.462687
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/Casting.java
|
package org.infinispan.util;
import org.infinispan.util.function.SerializableSupplier;
import java.util.function.Supplier;
import java.util.stream.Collector;
public class Casting {
// The hacks here allow casts to work properly,
// since Java doesn't work as well with nested generics
@SuppressWarnings("unchecked")
public static <T, R> SerializableSupplier<Collector<T, ?, R>> toSerialSupplierCollect(
SerializableSupplier supplier) {
return supplier;
}
// This is a hack to allow for cast to work properly, since Java doesn't work as well with nested generics
@SuppressWarnings("unchecked")
public static <T, R> Supplier<Collector<T, ?, R>> toSupplierCollect(Supplier supplier) {
return supplier;
}
}
| 755
| 28.076923
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/MappedCacheStream.java
|
package org.infinispan.util;
import java.util.Set;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.InjectiveFunction;
/**
* CacheStream that allows for mapping the filtered keys to different keys. Note that the function provided
* must be a {@link InjectiveFunction} guaranteeing that keys are distinct when mapped.
* @author wburns
* @since 9.2
*/
class MappedCacheStream<R> extends AbstractDelegatingCacheStream<R> {
private final InjectiveFunction<Object, ?> keyMapper;
MappedCacheStream(CacheStream<R> stream, InjectiveFunction<Object, ?> keyMapper) {
super(stream);
this.keyMapper = keyMapper;
}
@Override
public AbstractDelegatingCacheStream<R> filterKeys(Set<?> keys) {
return super.filterKeys(keys != null ? new SetMapper<>(keys, keyMapper) : null);
}
}
| 829
| 30.923077
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/WriteableCacheCollectionMapper.java
|
package org.infinispan.util;
import java.util.Collection;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.CacheCollection;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.CloseableSpliterator;
import org.infinispan.commons.util.InjectiveFunction;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.commons.util.SpliteratorMapper;
/**
* A writeable cache collection mapper that also has constant time operations for things such as
* {@link Collection#contains(Object)} if the underlying Collection does.
* <p>
* This collection should be used for cases when a simple transformation of a element to another is all that is
* needed by the underlying collection.
* <p>
* Note this class allows for a different function specifically for values returned from an iterator. This
* can be useful to intercept calls such as {@link java.util.Map.Entry#setValue(Object)} and update appropriately.
* @author wburns
* @since 9.2
* @param <E> the original collection type - referred to as old in some methods
* @param <R> the resulting collection type - referred to as new in some methods
*/
public class WriteableCacheCollectionMapper<E, R> extends CollectionMapper<E, R> implements CacheCollection<R> {
protected final CacheCollection<E> realCacheCollection;
protected final Function<? super E, ? extends R> toNewTypeIteratorFunction;
protected final Function<? super R, ? extends E> fromNewTypeFunction;
protected final InjectiveFunction<Object, ?> keyFilterMapper;
public WriteableCacheCollectionMapper(CacheCollection<E> realCollection,
Function<? super E, ? extends R> toNewTypeFunction,
Function<? super R, ? extends E> fromNewTypeFunction,
InjectiveFunction<Object, ?> keyFilterFunction) {
super(realCollection, toNewTypeFunction);
this.realCacheCollection = realCollection;
this.toNewTypeIteratorFunction = toNewTypeFunction;
this.fromNewTypeFunction = fromNewTypeFunction;
this.keyFilterMapper = keyFilterFunction;
}
public WriteableCacheCollectionMapper(CacheCollection<E> realCollection,
Function<? super E, ? extends R> toNewTypeFunction,
Function<? super E, ? extends R> toNewTypeIteratorFunction,
Function<? super R, ? extends E> fromNewTypeFunction,
InjectiveFunction<Object, ?> keyFilterFunction) {
super(realCollection, toNewTypeFunction);
this.realCacheCollection = realCollection;
this.toNewTypeIteratorFunction = toNewTypeIteratorFunction;
this.fromNewTypeFunction = fromNewTypeFunction;
this.keyFilterMapper = keyFilterFunction;
}
@Override
public CloseableIterator<R> iterator() {
return new IteratorMapper<>(realCollection.iterator(), toNewTypeIteratorFunction);
}
@Override
public boolean contains(Object o) {
return realCollection.contains(fromNewTypeFunction.apply((R) o));
}
@Override
public boolean containsAll(Collection<?> c) {
return realCollection.containsAll(new CollectionMapper<>((Collection<R>) c, fromNewTypeFunction));
}
@Override
public boolean add(R e) {
return realCollection.add(fromNewTypeFunction.apply(e));
}
@Override
public boolean addAll(Collection<? extends R> c) {
return realCollection.addAll(new CollectionMapper<>((Collection<R>) c, fromNewTypeFunction));
}
@Override
public boolean remove(Object o) {
return realCollection.remove(fromNewTypeFunction.apply((R) o));
}
@Override
public boolean removeAll(Collection<?> c) {
return realCollection.removeAll(new CollectionMapper<>((Collection<R>) c, fromNewTypeFunction));
}
@Override
public boolean retainAll(Collection<?> c) {
return realCollection.retainAll(new CollectionMapper<>((Collection<R>) c, fromNewTypeFunction));
}
@Override
public boolean removeIf(Predicate<? super R> filter) {
return realCollection.removeIf(e -> filter.test(mapper.apply(e)));
}
@Override
public void clear() {
realCollection.clear();
}
@Override
public CloseableSpliterator<R> spliterator() {
return new SpliteratorMapper<>(realCacheCollection.spliterator(), mapper);
}
private CacheStream<R> getStream(CacheStream<E> parentStream) {
return new MappedCacheStream<>(parentStream.map(mapper), keyFilterMapper);
}
@Override
public CacheStream<R> stream() {
return getStream(realCacheCollection.stream());
}
@Override
public CacheStream<R> parallelStream() {
return getStream(realCacheCollection.parallelStream());
}
}
| 4,715
| 36.133858
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/AbstractDelegatingDoubleCacheStream.java
|
package org.infinispan.util;
import java.util.DoubleSummaryStatistics;
import java.util.OptionalDouble;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.DoubleBinaryOperator;
import java.util.function.DoubleConsumer;
import java.util.function.DoubleFunction;
import java.util.function.DoublePredicate;
import java.util.function.DoubleToIntFunction;
import java.util.function.DoubleToLongFunction;
import java.util.function.DoubleUnaryOperator;
import java.util.function.ObjDoubleConsumer;
import java.util.function.Supplier;
import java.util.stream.DoubleStream;
import org.infinispan.BaseCacheStream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
/**
* Abstract Delegating handler that passes DoubleStream operations off to the underlying DoubleCacheStream but delegates
* {@link org.infinispan.BaseCacheStream} operations to the provided {@link CacheStream}. This allows for intercepting
* methods defined on <b>BaseCacheStream</b>.
* <p>
* This class is package private as it should only be created by using a map operator from another
* AbstractDelegating*CacheStream instance. Note that {@link AbstractDelegatingCacheStream} is public as this is
* the defined approach to create such a delegated stream.
* @author wburns
* @since 9.2
*/
class AbstractDelegatingDoubleCacheStream implements DoubleCacheStream {
protected AbstractDelegatingCacheStream<?> delegateCacheStream;
protected DoubleCacheStream underlyingStream;
AbstractDelegatingDoubleCacheStream(AbstractDelegatingCacheStream<?> delegateCacheStream,
DoubleCacheStream underlyingStream) {
this.delegateCacheStream = delegateCacheStream;
this.underlyingStream = underlyingStream;
}
// These are methods that convert to a different AbstractDelegating*CacheStream
@Override
public IntCacheStream mapToInt(DoubleToIntFunction mapper) {
return underlyingStream.mapToInt(mapper);
}
@Override
public LongCacheStream mapToLong(DoubleToLongFunction mapper) {
return new AbstractDelegatingLongCacheStream(delegateCacheStream, underlyingStream.mapToLong(mapper));
}
@Override
public <U> CacheStream<U> mapToObj(DoubleFunction<? extends U> mapper) {
delegateCacheStream.underlyingStream = underlyingStream.mapToObj(mapper);
return (CacheStream<U>) delegateCacheStream;
}
@Override
public CacheStream<Double> boxed() {
delegateCacheStream.underlyingStream = underlyingStream.boxed();
return (CacheStream<Double>) delegateCacheStream;
}
// These are methods that should delegate to the original cache stream
@Override
public DoubleCacheStream sequentialDistribution() {
delegateCacheStream = delegateCacheStream.sequentialDistribution();
return this;
}
@Override
public DoubleCacheStream parallelDistribution() {
delegateCacheStream = delegateCacheStream.parallelDistribution();
return this;
}
@Override
public DoubleCacheStream filterKeySegments(Set<Integer> segments) {
delegateCacheStream = delegateCacheStream.filterKeySegments(segments);
return this;
}
@Override
public BaseCacheStream filterKeySegments(IntSet segments) {
delegateCacheStream = delegateCacheStream.filterKeySegments(segments);
return this;
}
@Override
public DoubleCacheStream filterKeys(Set<?> keys) {
delegateCacheStream = delegateCacheStream.filterKeys(keys);
return this;
}
@Override
public DoubleCacheStream distributedBatchSize(int batchSize) {
delegateCacheStream = delegateCacheStream.distributedBatchSize(batchSize);
return this;
}
@Override
public DoubleCacheStream segmentCompletionListener(SegmentCompletionListener listener) {
delegateCacheStream = delegateCacheStream.segmentCompletionListener(listener);
return this;
}
@Override
public DoubleCacheStream disableRehashAware() {
delegateCacheStream = delegateCacheStream.disableRehashAware();
return this;
}
@Override
public DoubleCacheStream timeout(long timeout, TimeUnit unit) {
delegateCacheStream = delegateCacheStream.timeout(timeout, unit);
return this;
}
// Actual DoubleStream operations
@Override
public DoubleCacheStream filter(DoublePredicate predicate) {
underlyingStream = underlyingStream.filter(predicate);
return this;
}
@Override
public DoubleCacheStream map(DoubleUnaryOperator mapper) {
underlyingStream = underlyingStream.map(mapper);
return this;
}
@Override
public DoubleCacheStream flatMap(DoubleFunction<? extends DoubleStream> mapper) {
underlyingStream = underlyingStream.flatMap(mapper);
return this;
}
@Override
public DoubleCacheStream distinct() {
underlyingStream = underlyingStream.distinct();
return this;
}
@Override
public DoubleCacheStream sorted() {
underlyingStream = underlyingStream.sorted();
return this;
}
@Override
public DoubleCacheStream peek(DoubleConsumer action) {
underlyingStream = underlyingStream.peek(action);
return this;
}
@Override
public DoubleCacheStream limit(long maxSize) {
underlyingStream = underlyingStream.limit(maxSize);
return this;
}
@Override
public DoubleCacheStream skip(long n) {
underlyingStream = underlyingStream.skip(n);
return this;
}
@Override
public void forEach(DoubleConsumer action) {
underlyingStream.forEach(action);
}
@Override
public void forEachOrdered(DoubleConsumer action) {
underlyingStream.forEachOrdered(action);
}
@Override
public double[] toArray() {
return underlyingStream.toArray();
}
@Override
public double reduce(double identity, DoubleBinaryOperator op) {
return underlyingStream.reduce(identity, op);
}
@Override
public OptionalDouble reduce(DoubleBinaryOperator op) {
return underlyingStream.reduce(op);
}
@Override
public <R> R collect(Supplier<R> supplier, ObjDoubleConsumer<R> accumulator, BiConsumer<R, R> combiner) {
return underlyingStream.collect(supplier, accumulator, combiner);
}
@Override
public double sum() {
return underlyingStream.sum();
}
@Override
public OptionalDouble min() {
return underlyingStream.min();
}
@Override
public OptionalDouble max() {
return underlyingStream.max();
}
@Override
public long count() {
return underlyingStream.count();
}
@Override
public OptionalDouble average() {
return underlyingStream.average();
}
@Override
public DoubleSummaryStatistics summaryStatistics() {
return underlyingStream.summaryStatistics();
}
@Override
public boolean anyMatch(DoublePredicate predicate) {
return underlyingStream.anyMatch(predicate);
}
@Override
public boolean allMatch(DoublePredicate predicate) {
return underlyingStream.allMatch(predicate);
}
@Override
public boolean noneMatch(DoublePredicate predicate) {
return underlyingStream.noneMatch(predicate);
}
@Override
public OptionalDouble findFirst() {
return underlyingStream.findFirst();
}
@Override
public OptionalDouble findAny() {
return underlyingStream.findAny();
}
@Override
public <K, V> void forEach(ObjDoubleConsumer<Cache<K, V>> action) {
underlyingStream.forEach(action);
}
@Override
public DoubleCacheStream sequential() {
underlyingStream = underlyingStream.sequential();
return this;
}
@Override
public DoubleCacheStream parallel() {
underlyingStream = underlyingStream.parallel();
return this;
}
@Override
public PrimitiveIterator.OfDouble iterator() {
return underlyingStream.iterator();
}
@Override
public Spliterator.OfDouble spliterator() {
return underlyingStream.spliterator();
}
@Override
public boolean isParallel() {
return underlyingStream.isParallel();
}
@Override
public DoubleCacheStream unordered() {
underlyingStream = underlyingStream.unordered();
return this;
}
@Override
public DoubleCacheStream onClose(Runnable closeHandler) {
underlyingStream = underlyingStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
underlyingStream.close();
}
}
| 8,743
| 26.847134
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/AbstractDelegatingCacheStream.java
|
package org.infinispan.util;
import java.util.Comparator;
import java.util.Iterator;
import java.util.Optional;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.function.ToDoubleFunction;
import java.util.function.ToIntFunction;
import java.util.function.ToLongFunction;
import java.util.stream.Collector;
import java.util.stream.DoubleStream;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.CacheStream;
import org.infinispan.DoubleCacheStream;
import org.infinispan.IntCacheStream;
import org.infinispan.LongCacheStream;
import org.infinispan.commons.util.IntSet;
/**
* Delegate that forwards all the of the method calls to the underlying cache stream. This can be useful to intercept
* a given method call. Note that primitive stream operations are not intercepted (including those from
* {@link java.util.stream.BaseStream}, however defined {@link org.infinispan.BaseCacheStream} methods would be.
*/
public class AbstractDelegatingCacheStream<R> implements CacheStream<R> {
protected CacheStream<?> underlyingStream;
public AbstractDelegatingCacheStream(CacheStream<R> stream) {
this.underlyingStream = stream;
}
private CacheStream<R> castStream(CacheStream stream) {
return stream;
}
// These are methods that convert to a different AbstractDelegating*CacheStream
@Override
public IntCacheStream mapToInt(ToIntFunction<? super R> mapper) {
return new AbstractDelegatingIntCacheStream(this, castStream(underlyingStream).mapToInt(mapper));
}
@Override
public LongCacheStream mapToLong(ToLongFunction<? super R> mapper) {
return new AbstractDelegatingLongCacheStream(this, castStream(underlyingStream).mapToLong(mapper));
}
@Override
public DoubleCacheStream mapToDouble(ToDoubleFunction<? super R> mapper) {
return new AbstractDelegatingDoubleCacheStream(this, castStream(underlyingStream).mapToDouble(mapper));
}
@Override
public IntCacheStream flatMapToInt(Function<? super R, ? extends IntStream> mapper) {
return new AbstractDelegatingIntCacheStream(this, castStream(underlyingStream).flatMapToInt(mapper));
}
@Override
public LongCacheStream flatMapToLong(Function<? super R, ? extends LongStream> mapper) {
return new AbstractDelegatingLongCacheStream(this, castStream(underlyingStream).flatMapToLong(mapper));
}
@Override
public DoubleCacheStream flatMapToDouble(Function<? super R, ? extends DoubleStream> mapper) {
return new AbstractDelegatingDoubleCacheStream(this, castStream(underlyingStream).flatMapToDouble(mapper));
}
@Override
public AbstractDelegatingCacheStream<R> sequentialDistribution() {
underlyingStream = underlyingStream.sequentialDistribution();
return this;
}
@Override
public AbstractDelegatingCacheStream<R> parallelDistribution() {
underlyingStream = underlyingStream.parallelDistribution();
return this;
}
@Override
public AbstractDelegatingCacheStream<R> filterKeySegments(Set<Integer> segments) {
underlyingStream = underlyingStream.filterKeySegments(segments);
return this;
}
@Override
public AbstractDelegatingCacheStream<R> filterKeySegments(IntSet segments) {
underlyingStream = underlyingStream.filterKeySegments(segments);
return this;
}
@Override
public AbstractDelegatingCacheStream<R> filterKeys(Set<?> keys) {
underlyingStream = underlyingStream.filterKeys(keys);
return this;
}
@Override
public AbstractDelegatingCacheStream<R> distributedBatchSize(int batchSize) {
underlyingStream = underlyingStream.distributedBatchSize(batchSize);
return this;
}
@Override
public AbstractDelegatingCacheStream<R> segmentCompletionListener(SegmentCompletionListener listener) {
underlyingStream = underlyingStream.segmentCompletionListener(listener);
return this;
}
@Override
public AbstractDelegatingCacheStream<R> disableRehashAware() {
underlyingStream = underlyingStream.disableRehashAware();
return this;
}
@Override
public AbstractDelegatingCacheStream<R> timeout(long timeout, TimeUnit unit) {
underlyingStream = underlyingStream.timeout(timeout, unit);
return this;
}
@Override
public void forEach(Consumer<? super R> action) {
castStream(underlyingStream).forEach(action);
}
@Override
public <K, V> void forEach(BiConsumer<Cache<K, V>, ? super R> action) {
castStream(underlyingStream).forEach(action);
}
@Override
public void forEachOrdered(Consumer<? super R> action) {
castStream(underlyingStream).forEachOrdered(action);
}
@Override
public Object[] toArray() {
return underlyingStream.toArray();
}
@Override
public <A> A[] toArray(IntFunction<A[]> generator) {
return underlyingStream.toArray(generator);
}
@Override
public R reduce(R identity, BinaryOperator<R> accumulator) {
return castStream(underlyingStream).reduce(identity, accumulator);
}
@Override
public Optional<R> reduce(BinaryOperator<R> accumulator) {
return castStream(underlyingStream).reduce(accumulator);
}
@Override
public <U> U reduce(U identity, BiFunction<U, ? super R, U> accumulator, BinaryOperator<U> combiner) {
return castStream(underlyingStream).reduce(identity, accumulator, combiner);
}
@Override
public <R1> R1 collect(Supplier<R1> supplier, BiConsumer<R1, ? super R> accumulator, BiConsumer<R1, R1> combiner) {
return castStream(underlyingStream).collect(supplier, accumulator, combiner);
}
@Override
public Iterator<R> iterator() {
return castStream(underlyingStream).iterator();
}
@Override
public Spliterator<R> spliterator() {
return castStream(underlyingStream).spliterator();
}
@Override
public boolean isParallel() {
return underlyingStream.isParallel();
}
@Override
public CacheStream<R> sequential() {
underlyingStream = underlyingStream.sequential();
return this;
}
@Override
public CacheStream<R> parallel() {
underlyingStream = underlyingStream.parallel();
return this;
}
@Override
public CacheStream<R> unordered() {
underlyingStream = underlyingStream.unordered();
return this;
}
@Override
public CacheStream<R> onClose(Runnable closeHandler) {
underlyingStream = underlyingStream.onClose(closeHandler);
return this;
}
@Override
public void close() {
underlyingStream.close();
}
@Override
public CacheStream<R> sorted() {
underlyingStream = underlyingStream.sorted();
return this;
}
@Override
public CacheStream<R> sorted(Comparator<? super R> comparator) {
underlyingStream = castStream(underlyingStream).sorted(comparator);
return this;
}
@Override
public CacheStream<R> peek(Consumer<? super R> action) {
underlyingStream = castStream(underlyingStream).peek(action);
return this;
}
@Override
public CacheStream<R> limit(long maxSize) {
underlyingStream = underlyingStream.limit(maxSize);
return this;
}
@Override
public CacheStream<R> skip(long n) {
underlyingStream = underlyingStream.skip(n);
return this;
}
@Override
public CacheStream<R> filter(Predicate<? super R> predicate) {
underlyingStream = castStream(underlyingStream).filter(predicate);
return this;
}
@Override
public <R1> CacheStream<R1> map(Function<? super R, ? extends R1> mapper) {
underlyingStream = castStream(underlyingStream).map(mapper);
return (CacheStream<R1>) this;
}
@Override
public <R1> CacheStream<R1> flatMap(Function<? super R, ? extends Stream<? extends R1>> mapper) {
underlyingStream = castStream(underlyingStream).flatMap(mapper);
return (CacheStream<R1>) this;
}
@Override
public CacheStream<R> distinct() {
underlyingStream = underlyingStream.distinct();
return this;
}
@Override
public <R1, A> R1 collect(Collector<? super R, A, R1> collector) {
return castStream(underlyingStream).collect(collector);
}
@Override
public Optional<R> min(Comparator<? super R> comparator) {
return castStream(underlyingStream).min(comparator);
}
@Override
public Optional<R> max(Comparator<? super R> comparator) {
return castStream(underlyingStream).max(comparator);
}
@Override
public long count() {
return underlyingStream.count();
}
@Override
public boolean anyMatch(Predicate<? super R> predicate) {
return castStream(underlyingStream).anyMatch(predicate);
}
@Override
public boolean allMatch(Predicate<? super R> predicate) {
return castStream(underlyingStream).allMatch(predicate);
}
@Override
public boolean noneMatch(Predicate<? super R> predicate) {
return castStream(underlyingStream).noneMatch(predicate);
}
@Override
public Optional<R> findFirst() {
return castStream(underlyingStream).findFirst();
}
@Override
public Optional<R> findAny() {
return castStream(underlyingStream).findAny();
}
}
| 9,664
| 28.922601
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/DependencyGraph.java
|
package org.infinispan.util;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.StampedLock;
import net.jcip.annotations.ThreadSafe;
/**
* Graph to track dependencies between objects
*
* @author gustavonalle
* @since 7.0
*/
@ThreadSafe
public final class DependencyGraph<T> {
private final Map<T, Set<T>> outgoingEdges = new HashMap<>();
private final Map<T, Set<T>> incomingEdges = new HashMap<>();
private final StampedLock lock = new StampedLock();
/**
* Calculates a topological sort of the graph, in linear time
*
* @return List<T> elements sorted respecting dependencies
* @throws CyclicDependencyException if cycles are present in the graph and thus no topological sort is possible
*/
public List<T> topologicalSort() throws CyclicDependencyException {
long stamp = lock.readLock();
try {
ArrayList<T> result = new ArrayList<>();
Deque<T> noIncomingEdges = new ArrayDeque<>();
Map<T, Integer> temp = new HashMap<>();
for (Map.Entry<T, Set<T>> incoming : incomingEdges.entrySet()) {
int size = incoming.getValue().size();
T key = incoming.getKey();
temp.put(key, size);
if (size == 0) {
noIncomingEdges.add(key);
}
}
while (!noIncomingEdges.isEmpty()) {
T n = noIncomingEdges.poll();
result.add(n);
temp.remove(n);
Set<T> elements = outgoingEdges.get(n);
if (elements != null) {
for (T m : elements) {
Integer count = temp.get(m);
temp.put(m, --count);
if (count == 0) {
noIncomingEdges.add(m);
}
}
}
}
if (!temp.isEmpty()) {
throw new CyclicDependencyException("Cycle detected");
} else {
return result;
}
} finally {
lock.unlockRead(stamp);
}
}
/**
* Add a dependency between two elements
* @param from From element
* @param to To element
*/
public void addDependency(T from, T to) {
if (from == null || to == null || from.equals(to)) {
throw new IllegalArgumentException("Invalid parameters");
}
long stamp = lock.writeLock();
try {
if (addOutgoingEdge(from, to)) {
addIncomingEdge(to, from);
}
} finally {
lock.unlockWrite(stamp);
}
}
/**
* Remove a dependency
* @param from From element
* @param to To element
* @throws java.lang.IllegalArgumentException if either to or from don't exist
*/
public void removeDependency(T from, T to) {
long stamp = lock.writeLock();
try {
Set<T> dependencies = outgoingEdges.get(from);
if (dependencies == null || !dependencies.contains(to)) {
throw new IllegalArgumentException("Inexistent dependency");
}
dependencies.remove(to);
incomingEdges.get(to).remove(from);
} finally {
lock.unlockWrite(stamp);
}
}
public void clearAll() {
long stamp = lock.writeLock();
try {
outgoingEdges.clear();
incomingEdges.clear();
} finally {
lock.unlockWrite(stamp);
}
}
private void addIncomingEdge(T to, T from) {
Set<T> incoming = incomingEdges.get(to);
if (incoming == null) {
incomingEdges.put(to, newInitialSet(from));
} else {
incoming.add(from);
}
if (!incomingEdges.containsKey(from)) {
incomingEdges.put(from, new HashSet<>());
}
}
private boolean addOutgoingEdge(T from, T to) {
Set<T> outgoing = outgoingEdges.get(from);
if (outgoing == null) {
outgoingEdges.put(from, newInitialSet(to));
return true;
}
return outgoing.add(to);
}
private Set<T> newInitialSet(T element) {
Set<T> elements = new HashSet<>();
elements.add(element);
return elements;
}
/**
* Check if an element is depended on
*
* @param element Element stored in the graph
* @return true if exists any dependency on element
* @throws java.lang.IllegalArgumentException if element is not present in the graph
*/
public boolean hasDependent(T element) {
long stamp = lock.readLock();
try {
Set<T> ts = this.incomingEdges.get(element);
return ts != null && ts.size() > 0;
} finally {
lock.unlockRead(stamp);
}
}
/**
* Return the dependents
* @param element Element contained in the graph
* @return list of elements depending on element
*/
public Set<T> getDependents(T element) {
long stamp = lock.readLock();
try {
Set<T> dependants = this.incomingEdges.get(element);
if (dependants == null || dependants.isEmpty()) {
return Collections.emptySet();
}
return Collections.unmodifiableSet(this.incomingEdges.get(element));
} finally {
lock.unlockRead(stamp);
}
}
/**
* Remove element from the graph
*
* @param element the element
*/
public void remove(T element) {
long stamp = lock.writeLock();
try {
if (outgoingEdges.remove(element) != null) {
for (Set<T> values : outgoingEdges.values()) {
values.remove(element);
}
}
if (incomingEdges.remove(element) != null) {
for (Set<T> values : incomingEdges.values()) {
values.remove(element);
}
}
} finally {
lock.unlockWrite(stamp);
}
}
}
| 5,968
| 27.2891
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/CacheTopologyUtil.java
|
package org.infinispan.util;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.topology.CacheTopology;
/**
* Utility methods related to {@link CacheTopology}.
*
* @since 14.0
*/
public enum CacheTopologyUtil {
;
private static final long SKIP_TOPOLOGY_FLAGS = FlagBitSets.SKIP_OWNERSHIP_CHECK | FlagBitSets.CACHE_MODE_LOCAL;
/**
* Check if the current {@link LocalizedCacheTopology} is valid for the {@link TopologyAffectedCommand}.
*
* @param command The {@link TopologyAffectedCommand} that will use the {@link LocalizedCacheTopology}.
* @param current The current {@link LocalizedCacheTopology}.
* @return The current {@link LocalizedCacheTopology}.
*/
public static LocalizedCacheTopology checkTopology(TopologyAffectedCommand command, LocalizedCacheTopology current) {
int currentTopologyId = current.getTopologyId();
int cmdTopology = command.getTopologyId();
if (command instanceof FlagAffectedCommand && (((FlagAffectedCommand) command).hasAnyFlag(SKIP_TOPOLOGY_FLAGS))) {
return current;
}
if (cmdTopology >= 0 && currentTopologyId != cmdTopology) {
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
return current;
}
}
| 1,491
| 37.25641
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/ExponentialBackOffImpl.java
|
package org.infinispan.util;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* The default {@link ExponentialBackOff} implementation for IRAC (asynchronous cross-site replication).
* <p>
* An exponential back-off implementation with min interval of 500 ms and a maximum of 300'000 ms (5 min). It uses a
* multiplier of 2 (each timeslot will be increase + 100% for each consecutive retry) and the final wait time is
* randomized, +- 50% of the timeslot.
*
* @author Pedro Ruivo
* @since 12.0
*/
public class ExponentialBackOffImpl implements ExponentialBackOff {
private static final Log log = LogFactory.getLog(ExponentialBackOffImpl.class);
//TODO currently only used by IRAC. If required, make it configurable (those 4 constants) to cover other uses cases.
//multiplier value (2 == +100% per retry)
private static final double MULTIPLIER = 2;
//initial interval value in milliseconds
private static final int INITIAL_INTERVAL_MILLIS = 500;
//maximum back off time in milliseconds (300 seconds == 5 min)
private static final int MAX_INTERVAL_MILLIS = 300_000;
//randomization factor (0.5 == 50% below and 50% above the retry interval).
private static final double RANDOMIZATION_FACTOR = 0.5;
//the current retry timeout. If a retry occurs, it will wait for this time +- RANDOMIZATION_FACTOR (%)
private int currentIntervalMillis;
private final ScheduledExecutorService delayer;
public ExponentialBackOffImpl(ScheduledExecutorService delayer) {
this.delayer = delayer;
this.currentIntervalMillis = INITIAL_INTERVAL_MILLIS;
}
long nextBackOffMillis() {
//public for unit test purposes.
if (currentIntervalMillis >= MAX_INTERVAL_MILLIS) {
if (log.isTraceEnabled()) {
log.tracef("Next backoff time %s ms", MAX_INTERVAL_MILLIS);
}
return MAX_INTERVAL_MILLIS;
}
int randomIntervalMillis = getRandomValueFromInterval();
incrementCurrentInterval();
if (log.isTraceEnabled()) {
log.tracef("Next backoff time %s ms", randomIntervalMillis);
}
return Math.min(randomIntervalMillis, MAX_INTERVAL_MILLIS);
}
public void reset() {
this.currentIntervalMillis = INITIAL_INTERVAL_MILLIS;
}
@Override
public CompletionStage<Void> asyncBackOff() {
CompletableFuture<Void> cf = new CompletableFuture<>();
long sleepTime = nextBackOffMillis();
if (log.isTraceEnabled()) {
log.tracef("async backing-off for %s.", Util.prettyPrintTime(sleepTime));
}
delayer.schedule(() -> cf.complete(null), sleepTime, TimeUnit.MILLISECONDS);
return cf;
}
private void incrementCurrentInterval() {
// Check for overflow, if overflow is detected set the current interval to the max interval.
if (currentIntervalMillis >= MAX_INTERVAL_MILLIS) {
currentIntervalMillis = MAX_INTERVAL_MILLIS;
} else {
currentIntervalMillis *= MULTIPLIER;
}
}
private int getRandomValueFromInterval() {
double delta = RANDOMIZATION_FACTOR * currentIntervalMillis;
return (int) (delta + (ThreadLocalRandom.current().nextDouble() * currentIntervalMillis));
}
}
| 3,526
| 37.336957
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/CloseableSuppliedIterator.java
|
package org.infinispan.util;
import java.util.NoSuchElementException;
import java.util.function.Consumer;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.util.function.CloseableSupplier;
public class CloseableSuppliedIterator<E> implements CloseableIterator<E> {
private final CloseableSupplier<? extends E> supplier;
private E next;
public CloseableSuppliedIterator(CloseableSupplier<? extends E> supplier) {
if (supplier == null) {
throw new NullPointerException();
}
this.supplier = supplier;
}
@Override
public void close() {
supplier.close();
}
private E getNext() {
return supplier.get();
}
@Override
public boolean hasNext() {
if (next == null) {
next = getNext();
}
return next != null;
}
@Override
public E next() {
E e = next == null ? getNext() : next;
if (e == null) {
throw new NoSuchElementException();
}
next = null;
return e;
}
@Override
public void forEachRemaining(Consumer<? super E> action) {
E supplied;
if (next != null) {
action.accept(next);
}
while ((supplied = supplier.get()) != null) {
action.accept(supplied);
}
}
}
| 1,290
| 20.881356
| 78
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableCallable.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.concurrent.Callable;
/**
* This is a functional interface that is the same as a {@link Callable} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 8.2
*/
@FunctionalInterface
public interface SerializableCallable<V> extends Serializable, Callable<V> {
}
| 375
| 22.5
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableSupplier.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.Supplier;
/**
* This is a functional interface that is the same as a {@link Supplier} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableSupplier<T> extends Serializable, Supplier<T> {
}
| 352
| 22.533333
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableDoubleFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.DoubleFunction;
/**
* This is a functional interface that is the same as a {@link DoubleFunction} except that it must also be
* {@link java.io.Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableDoubleFunction<R> extends Serializable, DoubleFunction<R> {
}
| 384
| 24.666667
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableIntBinaryOperator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.IntBinaryOperator;
/**
* This is a functional interface that is the same as a {@link IntBinaryOperator} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableIntBinaryOperator extends Serializable, IntBinaryOperator {
}
| 382
| 24.533333
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableLongUnaryOperator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.LongUnaryOperator;
/**
* This is a functional interface that is the same as a {@link LongUnaryOperator} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableLongUnaryOperator extends Serializable, LongUnaryOperator {
}
| 382
| 24.533333
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableRunnable.java
|
package org.infinispan.util.function;
import java.io.Serializable;
/**
* This is a functional interface that is the same as a {@link Runnable} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 8.2
*/
@FunctionalInterface
public interface SerializableRunnable extends Serializable, Runnable {
}
| 331
| 21.133333
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableDoubleUnaryOperator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.DoubleUnaryOperator;
/**
* This is a functional interface that is the same as a {@link DoubleUnaryOperator} except that it must also be
* {@link java.io.Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableDoubleUnaryOperator extends Serializable, DoubleUnaryOperator {
}
| 398
| 25.6
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableLongBinaryOperator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.LongBinaryOperator;
/**
* This is a functional interface that is the same as a {@link LongBinaryOperator} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableLongBinaryOperator extends Serializable, LongBinaryOperator {
}
| 386
| 24.8
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/TriConsumer.java
|
package org.infinispan.util.function;
import java.util.function.Consumer;
/**
* Represents an operation that accepts three input arguments and returns no
* result. This is the three-arity specialization of {@link Consumer}.
* Unlike most other functional interfaces, {@code TriConsumer} is expected
* to operate via side-effects.
*
* <p>This is a <a href="package-summary.html">functional interface</a>
* whose functional method is {@link #accept(Object, Object, Object)}.
*
* @param <T> the type of the first argument to the operation
* @param <U> the type of the second argument to the operation
* @param <V> the type of the third argument to the operation
*
* @author wburns
* @since 8.2
*/
@FunctionalInterface
public interface TriConsumer<T, U, V> {
/**
* Performs this operation on the given arguments.
*
* @param t the first input argument
* @param u the second input argument
* @param v the third input argument
*/
void accept(T t, U u, V v);
}
| 1,001
| 30.3125
| 76
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableComparator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.Comparator;
/**
* This is a functional interface that is the same as a {@link Comparator} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableComparator<T> extends Serializable, Comparator<T> {
}
| 351
| 22.466667
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableIntFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.IntFunction;
/**
* This is a functional interface that is the same as a {@link IntFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableIntFunction<R> extends Serializable, IntFunction<R> {
}
| 364
| 23.333333
| 103
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableDoublePredicate.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.DoublePredicate;
/**
* This is a functional interface that is the same as a {@link DoublePredicate} except that it must also be
* {@link java.io.Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableDoublePredicate extends Serializable, DoublePredicate {
}
| 382
| 24.533333
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableToLongFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.ToLongFunction;
/**
* This is a functional interface that is the same as a {@link ToLongFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableToLongFunction<T> extends Serializable, ToLongFunction<T> {
}
| 376
| 24.133333
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableIntToLongFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.IntToLongFunction;
/**
* This is a functional interface that is the same as a {@link IntToLongFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableIntToLongFunction extends Serializable, IntToLongFunction {
}
| 382
| 24.533333
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableDoubleConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.DoubleConsumer;
/**
* This is a functional interface that is the same as a {@link DoubleConsumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableDoubleConsumer extends Serializable, DoubleConsumer {
}
| 370
| 23.733333
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableBinaryOperator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.BinaryOperator;
/**
* This is a functional interface that is the same as a {@link BinaryOperator} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableBinaryOperator<T> extends Serializable, BinaryOperator<T> {
}
| 376
| 24.133333
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableObjDoubleConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.ObjDoubleConsumer;
/**
* This is a functional interface that is the same as a {@link ObjDoubleConsumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableObjDoubleConsumer<T> extends Serializable, ObjDoubleConsumer<T> {
}
| 388
| 24.933333
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableIntPredicate.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.IntPredicate;
/**
* This is a functional interface that is the same as a {@link IntPredicate} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableIntPredicate extends Serializable, IntPredicate {
}
| 362
| 23.2
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableIntUnaryOperator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.IntUnaryOperator;
/**
* This is a functional interface that is the same as a {@link IntUnaryOperator} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableIntUnaryOperator extends Serializable, IntUnaryOperator {
}
| 378
| 24.266667
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableObjLongConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.ObjLongConsumer;
/**
* This is a functional interface that is the same as a {@link ObjLongConsumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableObjLongConsumer<T> extends Serializable, ObjLongConsumer<T> {
}
| 380
| 24.4
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableLongPredicate.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.LongPredicate;
/**
* This is a functional interface that is the same as a {@link LongPredicate} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableLongPredicate extends Serializable, LongPredicate {
}
| 366
| 23.466667
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableBiConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.BiConsumer;
/**
* This is a functional interface that is the same as a {@link BiConsumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableBiConsumer<T, U> extends Serializable, BiConsumer<T, U> {
}
| 366
| 23.466667
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableIntToDoubleFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.IntToDoubleFunction;
/**
* This is a functional interface that is the same as a {@link IntToDoubleFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableIntToDoubleFunction extends Serializable, IntToDoubleFunction {
}
| 390
| 25.066667
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.Function;
/**
* This is a functional interface that is the same as a {@link Function} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 8.2
*/
@FunctionalInterface
public interface SerializableFunction<T, R> extends Serializable, Function<T, R> {
}
| 379
| 22.75
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableToIntFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.ToIntFunction;
/**
* This is a functional interface that is the same as a {@link ToIntFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableToIntFunction<T> extends Serializable, ToIntFunction<T> {
}
| 372
| 23.866667
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.Consumer;
/**
* This is a functional interface that is the same as a {@link Consumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableConsumer<T> extends Serializable, Consumer<T> {
}
| 352
| 22.533333
| 100
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableLongConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.LongConsumer;
/**
* This is a functional interface that is the same as a {@link LongConsumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableLongConsumer extends Serializable, LongConsumer {
}
| 362
| 23.2
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/CloseableSupplier.java
|
package org.infinispan.util.function;
import java.util.function.Supplier;
public interface CloseableSupplier<T> extends Supplier<T>, AutoCloseable {
@Override
default void close() {
// Does nothing by default
}
}
| 230
| 20
| 74
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableIntConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.IntConsumer;
/**
* This is a functional interface that is the same as a {@link IntConsumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableIntConsumer extends Serializable, IntConsumer {
}
| 358
| 22.933333
| 103
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableLongToDoubleFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.LongToDoubleFunction;
/**
* This is a functional interface that is the same as a {@link LongToDoubleFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableLongToDoubleFunction extends Serializable, LongToDoubleFunction {
}
| 394
| 25.333333
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableToDoubleFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.ToDoubleFunction;
/**
* This is a functional interface that is the same as a {@link ToDoubleFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableToDoubleFunction<T> extends Serializable, ToDoubleFunction<T> {
}
| 384
| 24.666667
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableDoubleBinaryOperator.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.DoubleBinaryOperator;
/**
* This is a functional interface that is the same as a {@link DoubleBinaryOperator} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableDoubleBinaryOperator extends Serializable, DoubleBinaryOperator {
}
| 394
| 25.333333
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableObjIntConsumer.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.ObjIntConsumer;
/**
* This is a functional interface that is the same as a {@link ObjIntConsumer} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableObjIntConsumer<T> extends Serializable, ObjIntConsumer<T> {
}
| 376
| 24.133333
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableBiFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.BiFunction;
/**
* This is a functional interface that is the same as a {@link BiFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableBiFunction<T, U, R> extends Serializable, BiFunction<T, U, R> {
}
| 372
| 23.866667
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableLongFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.LongFunction;
/**
* This is a functional interface that is the same as a {@link LongFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableLongFunction<R> extends Serializable, LongFunction<R> {
}
| 368
| 23.6
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableLongToIntFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.LongToIntFunction;
/**
* This is a functional interface that is the same as a {@link LongToIntFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableLongToIntFunction extends Serializable, LongToIntFunction {
}
| 382
| 24.533333
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableDoubleToIntFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.DoubleToIntFunction;
/**
* This is a functional interface that is the same as a {@link DoubleToIntFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableDoubleToIntFunction extends Serializable, DoubleToIntFunction {
}
| 390
| 25.066667
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializablePredicate.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.Predicate;
/**
* This is a functional interface that is the same as a {@link Predicate} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializablePredicate<T> extends Serializable, Predicate<T> {
}
| 356
| 22.8
| 101
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/function/SerializableDoubleToLongFunction.java
|
package org.infinispan.util.function;
import java.io.Serializable;
import java.util.function.DoubleToLongFunction;
/**
* This is a functional interface that is the same as a {@link DoubleToLongFunction} except that it must also be
* {@link Serializable}
*
* @author wburns
* @since 9.0
*/
public interface SerializableDoubleToLongFunction extends Serializable, DoubleToLongFunction {
}
| 394
| 25.333333
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/WithinThreadExecutor.java
|
package org.infinispan.util.concurrent;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.AbstractExecutorService;
import java.util.concurrent.TimeUnit;
/**
* An executor that works within the current thread.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @see <a href="http://jcip.net/">Java Concurrency In Practice</a>
* @since 4.0
*/
public final class WithinThreadExecutor extends AbstractExecutorService {
private volatile boolean shutDown = false;
@Override
public void execute(Runnable command) {
command.run();
}
@Override
public void shutdown() {
shutDown = true;
}
@Override
public List<Runnable> shutdownNow() {
shutDown = true;
return Collections.emptyList();
}
@Override
public boolean isShutdown() {
return shutDown;
}
@Override
public boolean isTerminated() {
return shutDown;
}
@Override
public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException {
return shutDown;
}
}
| 1,096
| 21.387755
| 93
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/package-info.java
|
/**
* Thread-safe containers and other concurrency-related utilities, designed to supplement JDK concurrency utilities
* and containers.
*/
package org.infinispan.util.concurrent;
| 184
| 25.428571
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/BlockingManager.java
|
package org.infinispan.util.concurrent;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collector;
import org.reactivestreams.Publisher;
/**
* Manager utility for blocking operations that runs tasks on the blocking executor and returns a
* {@code CompletionStage} or {@code Publisher} that continues on the non-blocking executor, similar
* to {@code stage.handleAsync(callback, blockingExecutor).whenCompleteAsync(NOOP, nonBlockingExecutor)}.
* <p>
* If the current thread is blocking, it blocks until the task can run, then runs the task in the current thread and returns a
* completed {@code CompletionStage} so it <em>does not</em> continue the execution on the non-blocking executor.
* <p>
* Many of the methods on {@code BlockingManager} let you pass an identifier (ID) when performing the operation. This ID is
* printed with TRACE logs. For this reason, you should provide IDs that are unique, making it easier to track the stream
* of operations across threads if TRACE logs are used.
*/
public interface BlockingManager {
/**
* Replacement for {@code CompletionStage.runAsync()} that invokes the {@code Runnable} in a blocking thread
* if the current thread is non-blocking or in the current thread if the current thread is blocking.
* The returned stage, if not complete, resumes any chained stage on the non-blocking executor.
* <p>
* Note that if the current thread is blocking, the task is invoked in the current thread, meaning the stage is
* always completed when returned, so any chained stage is also invoked on the current thread.
* @param runnable blocking operation that runs some code.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @return a stage that is completed after the runnable is done or throws an exception.
*/
CompletionStage<Void> runBlocking(Runnable runnable, Object traceId);
/**
* Subscribes to the provided publisher on the invoking thread. Published values are observed on a blocking thread
* one a time passed to the provided consumer. The returned stage if not complete will resume any chained stage
* on the non blocking executor.
* <p>
* If no values are published the returned stage will be completed upon return of this method and require no
* thread context switches
* <p>
* Note that if the current thread is blocking everything including subscription, publication and consumption of
* values will be done on the current thread.
* @param publisher publisher of values to consume
* @param consumer consumer to handle the values
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads
* @param <E> the type of entries
* @return a stage that is completed after all values are consumed
*/
<E> CompletionStage<Void> subscribeBlockingConsumer(Publisher<E> publisher, Consumer<E> consumer, Object traceId);
/**
* Subscribes to the provided publisher on the invoking thread. Published values are observed on a blocking thread
* one a time passed to the provided collector. The returned stage if not complete will resume any chained stage
* on the non blocking executor.
* <p>
* If no values are published the returned stage will be completed upon return of this method and require no
* thread context switches
* <p>
* Note that if the current thread is blocking everything including subscription, publication and collection of
* values will be done on the current thread.
* @param publisher publisher of values to collect
* @param collector collector of the values
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads
* @param <T> the type of entries
* @param <A> accumulator type of the entries
* @param <R> final value type
* @return a stage that when complete contains the collected values as a single value
*/
<T, A, R> CompletionStage<R> subscribeBlockingCollector(Publisher<T> publisher, Collector<? super T, A, R> collector,
Object traceId);
/**
* Replacement for {@code CompletionStage.supplyAsync()} that invokes the {@code Supplier} in a blocking thread
* (if the current thread is non-blocking) or in the current thread (if the current thread is blocking).
* The returned stage, if not complete, resumes any chained stage on the non-blocking executor.
* <p>
* Note that if the current thread is blocking, the task is invoked in the current thread meaning the stage is
* always completed when returned, so any chained stage is also invoked on the current thread.
* @param <V> the supplied type.
* @param supplier blocking operation that returns a value.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @return a stage that, when complete, contains the value returned from the supplier or a throwable.
*/
<V> CompletionStage<V> supplyBlocking(Supplier<V> supplier, Object traceId);
/**
* Replacement for {@code CompletionStage.handleAsync()} that invokes the {@code BiFunction} in a blocking thread
* (if the current thread is non-blocking) or in the current thread (if the current thread is blocking and the stage
* is completed).
* The returned stage, if not complete, resumes any chained stage on the non-blocking executor.
* <p>
* Note that if the current thread is blocking and the stage is completed, the task is invoked in the current thread
* meaning the stage is always completed when returned, so any chained stage is also invoked on the current thread.
*
* @param stage stage, that may or may not be complete, to handle.
* @param function the blocking function.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @param <I> input value type to the function.
* @param <O> output value type after being transformed via function.
* @return a stage that, when complete, contains the value returned from the function or a throwable.
*/
<I, O> CompletionStage<O> handleBlocking(CompletionStage<? extends I> stage,
BiFunction<? super I, Throwable, ? extends O> function, Object traceId);
/**
* Replacement for {@link CompletionStage#thenRunAsync(Runnable)} that invokes the {@code Runnable} in a blocking thread
* (if the current thread is non-blocking) or in the current thread (if the current thread is blocking and the stage
* is completed).
* The returned stage, if not complete, resumes any chained stage on the non-blocking executor.
* <p>
* Note that if the current thread is blocking and the stage is completed, the task is invoked in the current thread
* meaning the stage is always completed when returned, so any chained stage is also invoked on the current thread.
*
* @param stage stage, that may or may not be complete, to apply.
* @param runnable blocking operation that runs some code.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @param <I> input value type to the function.
* @return a stage that is completed after the action is done or throws an exception.
*/
<I> CompletionStage<Void> thenRunBlocking(CompletionStage<? extends I> stage, Runnable runnable, Object traceId);
/**
* Replacement for {@code CompletionStage.thenApplyAsync()} that invokes the {@code Function} in a blocking thread
* (if the current thread is non-blocking) or in the current thread (if the current thread is blocking and the stage
* is completed).
* The returned stage, if not complete, resumes any chained stage on the non-blocking executor.
* <p>
* Note that if the current thread is blocking and the stage is completed, the task is invoked in the current thread
* meaning the stage is always completed when returned, so any chained stage is also invoked on the current thread.
*
* @param stage stage, that may or may not be complete, to apply.
* @param function the blocking function.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @param <I> input value type to the function.
* @param <O> output value type after being transformed via function.
* @return a stage that, when complete, contains the value returned from the function or a throwable.
*/
<I, O> CompletionStage<O> thenApplyBlocking(CompletionStage<? extends I> stage,
Function<? super I, ? extends O> function, Object traceId);
/**
* Replacement for {@code CompletionStage.thenComposeAsync()} that invokes the {@code Function} in a blocking thread
* (if the current thread is non-blocking) or in the current thread (if the current thread is blocking and the stage
* is completed).
* The returned stage, if not complete, resumes any chained stage on the non-blocking executor.
* <p>
* Note that if the current thread is blocking and the stage is completed, the task is invoked in the current thread
* meaning the stage is always completed when returned, so any chained stage is also invoked on the current thread.
* <p>
* Note this method is not normally required as the Function already returns a CompletionStage and it is recommended
* to have the composed function just be non-blocking to begin with.
* This method is here when invoking some method that may spuriously block to be safe.
*
* @param stage stage, that may or may not be complete, to compose.
* @param function the blocking function.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @param <I> input value type to the function.
* @param <O> output value type after being transformed via function.
* @return a stage that, when complete, contains the value returned from the composed function or a throwable.
*/
<I, O> CompletionStage<O> thenComposeBlocking(CompletionStage<? extends I> stage,
Function<? super I, ? extends CompletionStage<O>> function, Object traceId);
/**
* Replacement for {@code CompletionStage.whenCompleteAsync()} that invokes the {@code BiConsumer} in a blocking thread
* (if the current thread is non-blocking) or in the current thread (if the current thread is blocking).
* The returned stage, if not complete, resumes any chained stage on the non-blocking executor.
* <p>
* Note that if the current thread is blocking and the stage is completed, the task is invoked in the current thread
* meaning the stage is always completed when returned, so any chained stage is also invoked on the current thread.
*
* @param stage stage, that may or may not be complete, to apply.
* @param biConsumer the blocking biConsumer.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @param <V> stage value type.
* @return a stage that is complete when the biConsumer is complete, but retains the results from the original stage.
*/
<V> CompletionStage<V> whenCompleteBlocking(CompletionStage<V> stage,
BiConsumer<? super V, ? super Throwable> biConsumer, Object traceId);
/**
* When the provided stage is complete, continue the completion chain of the returned CompletionStage on the
* supplied executor. If tracing is enabled, a trace message is printed using the object as an identifier to more
* easily track the transition between threads.
* <p>
* This method is useful when an asynchronous computation completes and you do not want to run further processing
* on the thread that returned it. An example may be that some blocking operation is performed on a special blocking
* thread pool. However when the blocking operation completes we want to continue processing that result in a thread
* pool that is for computational tasks.
* <p>
* If the supplied stage is already completed when invoking this command, it returns an already completed
* stage, which means any additional dependent stages are run in the invoking thread.
* @param <V> return value type of the supplied stage.
* @param delay the stage to delay the continuation until complete.
* @param traceId the identifier to print when tracing is enabled.
* @return a CompletionStage that, when depended upon, runs any callback in the supplied executor.
*/
<V> CompletionStage<V> continueOnNonBlockingThread(CompletionStage<V> delay, Object traceId);
/**
* Provided a publisher that is known to block when subscribed to. Thus if the thread that subscribes in a non
* blocking thread we will instead subscribe on a blocking thread and observe on a non blocking thread for each
* published value.
* <p>
* If, however, the subscribing thread is a blocking thread no threading changes will be done, which
* means the publisher will be subscribed to on the invoking thread. In this case values have no guarantee as to
* which thread they are observed on, dependent solely on how the Publisher publishes them.
* @param publisher the publisher that, when subscribed to, blocks the current thread.
* @param <V> the published entry types.
* @return publisher that does not block the current thread.
*/
<V> Publisher<V> blockingPublisher(Publisher<V> publisher);
/**
* Subscribes to the provided blocking publisher using the the blocking executor, ignoring all elements and returning
* a {@link CompletionStage} with a value of null which completes on a non-blocking thread. This method is designed
* to be used by a {@link Publisher} that when subscribed to has some type of side-effect that is blocking.
* <p>
* The returned {@link CompletionStage} will always be completed upon a non-blocking thread if the current thread is
* non-blocking.
* <p>
* Note that if the current thread is blocking everything including subscription, publication and collection of
* values will be done on the current thread.
*
* @param publisher the publisher that, when subscribed to, blocks the current thread.
* @param <V> the published entry types.
* @return a completion stage that completes once the publisher has completed.
*/
<V> CompletionStage<Void> blockingPublisherToVoidStage(Publisher<V> publisher, Object traceId);
/**
* Returns an executor that will run the given tasks on a blocking thread as required.
* <p>
* Note that this executor will always submit the task to the blocking thread pool, even if the requestor
* is a blocking thread. This is different than other methods that will invoke the task in the invoking
* thread if the invoking thread is blocking.
* @return an executor that can run blocking commands.
*/
Executor asExecutor(String name);
/**
* Provides a {@link BlockingExecutor} which is limited to the provided concurrency amount.
*
* @param name name of the limited blocking executor.
* @param concurrency maximum amount of concurrent operations to be performed via the returned executor.
* @return a blocking executor limited in the amount of concurrent invocations.
*/
BlockingExecutor limitedBlockingExecutor(String name, int concurrency);
/**
* Replacement for {@link java.util.concurrent.ScheduledExecutorService#schedule(Runnable, long, TimeUnit)}} that
* invokes the {@code Runnable} in a blocking thread only after the elapsed time.
* <p>
* Unlike other methods in this interface, the submitting thread does not impact this method's behavior.
*
* @param runnable blocking operation that runs some code.
* @param delay the time from now to delay execution
* @param unit the time unit of the delay parameter
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @return a stage that is completed after the runnable is done or throws an exception.
*/
default ScheduledBlockingCompletableStage<Void> scheduleRunBlocking(Runnable runnable, long delay, TimeUnit unit, Object traceId) {
return scheduleRunBlocking(() -> {
runnable.run();
return null;
}, delay, unit, traceId);
}
/**
* Replacement for {@link java.util.concurrent.ScheduledExecutorService#schedule(java.util.concurrent.Callable, long, TimeUnit)}} that
* invokes the {@code Callable} in a blocking thread only after the elapsed time.
* <p>
* Unlike other methods in this interface, the submitting thread does not impact this method's behavior.
*
* @param supplier blocking operation that runs some code.
* @param delay the time from now to delay execution
* @param unit the time unit of the delay parameter
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @return a stage that is completed after the runnable is done or throws an exception.
*/
<V> ScheduledBlockingCompletableStage<V> scheduleRunBlocking(Supplier<V> supplier, long delay, TimeUnit unit, Object traceId);
/**
* Executor interface that submits task to a blocking pool that returns a stage that is guaranteed
* to run any chained stages on a non-blocking thread if the stage is not yet complete.
* <p>
* Note that this executor runs the task in the invoking thread if the thread is a blocking thread.
*/
interface BlockingExecutor {
/**
* Executes the given runnable on the blocking executor. The traceId is printed in the invoking thread, in the
* blocking thread, and also during resumption of the non-blocking thread.
* @param runnable blocking operation that runs some code.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @return a stage that is completed after the runnable is done or throws an exception.
*/
CompletionStage<Void> execute(Runnable runnable, Object traceId);
/**
* Executes the given supplier on the blocking executor. The traceId is printed in the invoking thread, in the
* blocking thread, and also during resumption of the non-blocking thread.
* @param supplier blocking operation that returns a value.
* @param traceId an identifier that can be used to tell in a trace when an operation moves between threads.
* @param <V> supplier type.
* @return a stage that, when complete, contains the value returned from the supplier or a throwable.
*/
<V> CompletionStage<V> supply(Supplier<V> supplier, Object traceId);
}
interface ScheduledBlockingCompletableStage<V> extends ScheduledCompletableStage<V>, ScheduledFuture<V> {
}
}
| 19,357
| 59.118012
| 137
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/BoundedExecutors.java
|
package org.infinispan.util.concurrent;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* Similar to JDK {@link java.util.concurrent.Executors} except that the factory methods here allow you to specify the
* size of the blocking queue that backs the executor.
*
* @author Manik Surtani (<a href="mailto:manik AT jboss DOT org">manik AT jboss DOT org</a>)
* @since 4.0
*/
public class BoundedExecutors {
/**
* Creates a thread pool that reuses a fixed set of threads operating off a shared bounded queue. If any thread
* terminates due to a failure during execution prior to shutdown, a new one will take its place if needed to execute
* subsequent tasks.
*
* @param nThreads the number of threads in the pool
* @param boundedQueueSize size of the bounded queue
* @return the newly created thread pool
*/
public static ExecutorService newFixedThreadPool(int nThreads, int boundedQueueSize) {
return new ThreadPoolExecutor(nThreads, nThreads,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(boundedQueueSize));
}
/**
* Creates a thread pool that reuses a fixed set of threads operating off a shared bounded queue, using the provided
* ThreadFactory to create new threads when needed.
*
* @param nThreads the number of threads in the pool
* @param threadFactory the factory to use when creating new threads
* @param boundedQueueSize size of the bounded queue
* @return the newly created thread pool
*/
public static ExecutorService newFixedThreadPool(int nThreads, ThreadFactory threadFactory, int boundedQueueSize) {
return new ThreadPoolExecutor(nThreads, nThreads,
0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(boundedQueueSize),
threadFactory);
}
}
| 2,159
| 44
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/AggregateCompletionStage.java
|
package org.infinispan.util.concurrent;
import java.util.concurrent.CompletionStage;
/**
* Aggregation of multiple {@link CompletionStage} instances where their count is not known or when a large amount
* of stages are required, as it uses less memory foot print per stage.
* <p>
* This stage allows for multiple CompletionStages to be registered via {@link #dependsOn(CompletionStage)}. This
* AggregateCompletionStage will not complete until it is frozen via {@link #freeze()} and all of the registered
* CompletionStages complete. If one of the stages that is being depended upon completes with an exception
* this AggregateCompletionStage will complete with the same Throwable cause after all stages are complete.
*/
public interface AggregateCompletionStage<R> {
/**
* Adds another CompletionStage for this stage to be reliant upon.
* <p>
* If this CombinedCompletionStage is frozen, it will throw an {@link IllegalStateException}
* @param stage stage to depend on
* @return this stage
*/
AggregateCompletionStage<R> dependsOn(CompletionStage<?> stage);
/**
* Marks this composed stage as frozen, allowing it to complete when all stages it depends on complete
* @return this stage
*/
CompletionStage<R> freeze();
}
| 1,279
| 41.666667
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/CountDownRunnable.java
|
package org.infinispan.util.concurrent;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
/**
* A concurrency structure that invokes a {@link Runnable} when its count reaches zero.
* <p>
* Method {@link #increment()} and {@link #decrement()} are available to increase or decrease the counter. When {@link
* #freeze()} is invoked, no more {@link #increment()} are allowed to be called. It assumes a correct invocation
* behavior, i.e. {@link #increment()} is invoked before the corresponding {@link #decrement()}.
* <p>
* The {@link Runnable} is executed only once.
*
* @since 14
*/
public class CountDownRunnable {
private static final AtomicIntegerFieldUpdater<CountDownRunnable> PENDING_UPDATER = AtomicIntegerFieldUpdater.newUpdater(CountDownRunnable.class, "pending");
private static final int COMPLETED = -1;
private static final int READY = 0;
private final Runnable runnable;
private volatile int pending = READY;
private volatile boolean frozen;
public CountDownRunnable(Runnable runnable) {
this.runnable = Objects.requireNonNull(runnable);
}
public void increment() {
if (frozen) {
throw new IllegalStateException();
}
PENDING_UPDATER.incrementAndGet(this);
}
public void decrement() {
if (PENDING_UPDATER.decrementAndGet(this) == READY && frozen) {
tryComplete();
}
}
public int missing() {
return pending;
}
public void freeze() {
frozen = true;
tryComplete();
}
private void tryComplete() {
if (PENDING_UPDATER.compareAndSet(this, READY, COMPLETED)) {
runnable.run();
}
}
}
| 1,692
| 27.694915
| 160
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/NonBlockingManagerImpl.java
|
package org.infinispan.util.concurrent;
import java.lang.invoke.MethodHandles;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.Scheduler;
import io.reactivex.rxjava3.schedulers.Schedulers;
import net.jcip.annotations.GuardedBy;
@Scope(Scopes.GLOBAL)
public class NonBlockingManagerImpl implements NonBlockingManager {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR)
@Inject ScheduledExecutorService scheduler;
@ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR)
@Inject Executor executor;
@Override
public AutoCloseable scheduleWithFixedDelay(Supplier<CompletionStage<?>> supplier, long initialDelay, long delay,
TimeUnit unit, Predicate<? super Throwable> mayRepeatOnThrowable) {
ReschedulingTask task = new ReschedulingTask(supplier, delay, unit, mayRepeatOnThrowable);
synchronized (task) {
task.future = scheduler.schedule(task, initialDelay, unit);
}
return task;
}
@Override
public <T> void complete(CompletableFuture<? super T> future, T value) {
// This is just best effort to eliminate context switch if there are no dependents.
if (future.getNumberOfDependents() > 0) {
log.tracef("Future has a dependent, completing it in non blocking thread");
executor.execute(() -> future.complete(value));
} else {
log.tracef("Future has no dependent, completing it in invoking thread");
future.complete(value);
}
}
@Override
public void completeExceptionally(CompletableFuture<?> future, Throwable t) {
// This is just best effort to eliminate context switch if there are no dependents.
if (future.getNumberOfDependents() > 0) {
log.tracef("Future has a dependent, completing it exceptionally in non blocking thread");
executor.execute(() -> future.completeExceptionally(t));
} else {
log.tracef("Future has no dependent, completing it exceptionally in invoking thread");
future.completeExceptionally(t);
}
}
private class ReschedulingTask implements AutoCloseable, Runnable {
@GuardedBy("this")
private Future<?> future;
private final Supplier<CompletionStage<?>> supplier;
private final long delay;
private final TimeUnit unit;
private final Predicate<? super Throwable> mayRepeatOnThrowable;
private ReschedulingTask(Supplier<CompletionStage<?>> supplier, long delay, TimeUnit unit,
Predicate<? super Throwable> mayRepeatOnThrowable) {
this.supplier = supplier;
this.delay = delay;
this.unit = unit;
this.mayRepeatOnThrowable = mayRepeatOnThrowable;
}
@Override
public void run() {
CompletionStage<?> stage = supplier.get();
stage.whenComplete((v, t) -> {
if (t != null) {
if (mayRepeatOnThrowable == null || !mayRepeatOnThrowable.test(t)) {
log.scheduledTaskEncounteredThrowable(supplier, t);
return;
}
log.tracef(t, "There was an error in submitted periodic non blocking task with supplier %s, configured to resubmit", supplier);
}
boolean isRunning;
synchronized (this) {
isRunning = future != null;
}
if (isRunning) {
Future<?> newFuture = scheduler.schedule(this, delay, unit);
boolean shouldCancel = false;
synchronized (this) {
if (future == null) {
shouldCancel = true;
} else {
future = newFuture;
}
}
if (shouldCancel) {
if (log.isTraceEnabled()) {
log.tracef("Periodic non blocking task with supplier %s was cancelled while rescheduling.", supplier);
}
newFuture.cancel(true);
}
} else if (log.isTraceEnabled()) {
log.tracef("Periodic non blocking task with supplier %s was cancelled prior to execution.", supplier);
}
});
}
@Override
public void close() throws Exception {
if (log.isTraceEnabled()) {
log.tracef("Periodic non blocking task with supplier %s was cancelled.", supplier);
}
Future<?> cancelFuture;
synchronized (this) {
cancelFuture = future;
future = null;
}
if (cancelFuture != null) {
cancelFuture.cancel(false);
}
}
}
@Override
public Scheduler asScheduler() {
return Schedulers.from(executor);
}
}
| 5,476
| 37.034722
| 142
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/ScheduledCompletableStage.java
|
package org.infinispan.util.concurrent;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Delayed;
/**
* A scheduled CompletionStage that may be cancelled if it has not been started yet.
* If it is cancelled the stage will be completed exceptionally with a {@link java.util.concurrent.CancellationException}
*
* @param <V> The result type
*/
public interface ScheduledCompletableStage<V> extends Delayed, CompletionStage<V> {
}
| 458
| 31.785714
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/IsolationLevel.java
|
package org.infinispan.util.concurrent;
/**
* Various transaction isolation levels as an enumerated class. Note that Infinispan
* supports only {@link #READ_COMMITTED} and {@link #REPEATABLE_READ}, upgrading where possible.
* <p/>
* Also note that Infinispan defaults to {@link #READ_COMMITTED}.
* <p/>
*
* @author (various)
* @see <a href="http://infinispan.org/docs/stable/user_guide/user_guide.html#isolation_levels">Isolation levels</a>
* @since 4.0
*/
public enum IsolationLevel {
/**
* No isolation.
*/
NONE,
SERIALIZABLE,
REPEATABLE_READ,
READ_COMMITTED,
READ_UNCOMMITTED
}
| 619
| 24.833333
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/NonBlockingManager.java
|
package org.infinispan.util.concurrent;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.function.Supplier;
import io.reactivex.rxjava3.core.Scheduler;
/**
* Manager utility for non-blocking operations.
*/
public interface NonBlockingManager {
/**
* Schedules the supplier that is executed after the <b>initialDelay</b> period and subsequently runs <b>delay</b>
* after the previous stage completes. The supplier must not block the thread in which it runs and should immediately
* return to avoid blocking the scheduling thread.
* <p>
* The supplier will not be rescheduled if the supplier or the returned stage produce a Throwable.
*
* @param supplier non-blocking operation supplier.
* @param initialDelay period of time before the supplier is invoked.
* @param delay delay between subsequent supplier invocations.
* @param unit time unit for delays.
* @return an AutoCloseable that cancels the scheduled task.
*/
default AutoCloseable scheduleWithFixedDelay(Supplier<CompletionStage<?>> supplier, long initialDelay, long delay, TimeUnit unit) {
return scheduleWithFixedDelay(supplier, initialDelay, delay, unit, null);
}
/**
* Schedules the supplier that is executed after the <b>initialDelay</b> period and subsequently runs <b>delay</b>
* after the previous stage completes. The supplier must not block the thread in which it runs and should immediately
* return to avoid blocking the scheduling thread.
* <p>
* This supplier method will not be rescheduled if the supplier throws any Throwable directly.
* If the CompletionStage returned from the supplier produces a Throwable, it is possible to reschedule the supplier
* if the given Throwable passes the <b>mayRepeatOnThrowable</b> predicate.
*
* @param supplier non-blocking operation supplier.
* @param initialDelay period of time before the supplier is invoked.
* @param delay delay between subsequent supplier invocations.
* @param unit time unit for delays.
* @param mayRepeatOnThrowable whether to continue scheduling if the provided supplier returns a Throwable
* @return an AutoCloseable that cancels the scheduled task.
*/
AutoCloseable scheduleWithFixedDelay(Supplier<CompletionStage<?>> supplier, long initialDelay, long delay,
TimeUnit unit, Predicate<? super Throwable> mayRepeatOnThrowable);
/**
* Completes the provided future with the given value. If the future does not have any dependents it will complete
* it in the invoking thread. However, if there are any dependents it will complete it in a non blocking thread.
* This is a best effort to prevent a context switch for a stage that does not yet have a dependent while also
* handing off the dependent processing to a non blocking thread if necessary.
*
* @param future the future to complete
* @param value the value to complete the future with
* @param <T> the type of the value
*/
<T> void complete(CompletableFuture<? super T> future, T value);
/**
* Exceptionally completes the provided future with the given throble. If the future does not have any dependents it
* will complete it in the invoking thread. However, if there are any dependents it will complete it in a non
* blocking thread. This is a best effort to prevent a context switch for a stage that does not yet have a dependent
* while also handing off the dependent processing to a non blocking thread if necessary.
*
* @param future future to complete
* @param t throwable to complete the future with
*/
void completeExceptionally(CompletableFuture<?> future, Throwable t);
/**
* Returns a scheduler to be used with RxJava {@link io.reactivex.rxjava3.core.Flowable#observeOn(Scheduler)} method
* or similar.
* @return schduler to use within the RxJava ecosystem
*/
Scheduler asScheduler();
}
| 4,073
| 49.296296
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/BlockingTaskAwareExecutorService.java
|
package org.infinispan.util.concurrent;
import java.util.concurrent.ExecutorService;
/**
* Executor service that is aware of {@code BlockingRunnable} and only dispatch the runnable to a thread when it has low
* (or no) probability of blocking the thread.
* <p/>
* However, it is not aware of the changes in the state so you must invoke {@link #checkForReadyTasks()} to notify
* this that some runnable may be ready to be processed.
*
* @author Pedro Ruivo
* @since 5.3
*/
public interface BlockingTaskAwareExecutorService extends ExecutorService {
/**
* Executes the given command at some time in the future when the command is less probably to block a thread.
*
* @param runnable the command to execute
*/
void execute(BlockingRunnable runnable);
/**
* It checks for tasks ready to be processed in this {@link ExecutorService}.
*
* The invocation is done asynchronously, so the invoker is never blocked.
*/
void checkForReadyTasks();
}
| 995
| 30.125
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/ConditionFuture.java
|
package org.infinispan.util.concurrent;
import java.util.ArrayList;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.commons.IllegalLifecycleStateException;
/**
* A mixture between a {@link CompletableFuture} and a {@link java.util.concurrent.locks.Condition}.
*
* @since 10.1
* @author Dan Berindei
*/
public class ConditionFuture<T> {
private final Map<Data, Predicate<T>> futures = Collections.synchronizedMap(new IdentityHashMap<>());
private final ScheduledExecutorService timeoutExecutor;
private volatile T lastValue;
private volatile boolean running = true;
public ConditionFuture(ScheduledExecutorService timeoutExecutor) {
this.timeoutExecutor = timeoutExecutor;
}
public CompletionStage<Void> newConditionStage(Predicate<T> test, long timeout, TimeUnit timeUnit) {
return newConditionStage(test, TimeoutException::new, timeout, timeUnit);
}
/**
* Create a new {@link CompletionStage} that completes after the first {@link #update(Object)} call
* with a value satisfying the {@code test} predicate.
*
* @param test The predicate.
* @param exceptionGenerator Exception generator for timeout errors.
* @param timeout Maximum time to wait for a value satisfying the predicate.
* @param timeUnit Timeout time unit.
*/
public CompletionStage<Void> newConditionStage(Predicate<T> test, Supplier<Exception> exceptionGenerator, long timeout, TimeUnit timeUnit) {
Objects.requireNonNull(test);
if (!running) {
return CompletableFuture.failedFuture(new IllegalLifecycleStateException());
}
Data data = new Data();
data.cancelFuture = timeoutExecutor.schedule(() -> {
data.completeExceptionally(exceptionGenerator.get());
return null;
}, timeout, timeUnit);
Predicate<?> previous = futures.putIfAbsent(data, test);
if (previous != null) {
data.cancelFuture.cancel(false);
throw new IllegalStateException("Inserting the same Data instance");
}
if (!running) {
data.cancelFuture.cancel(false);
futures.remove(test);
data.completeExceptionally(new IllegalLifecycleStateException());
}
T localValue = lastValue;
if (localValue != null && test.test(localValue)) {
data.cancelFuture.cancel(false);
futures.remove(test);
data.complete(null);
}
return data;
}
/**
* Update the value and complete any outstanding condition stages for which the value satisfies the predicate.
*/
public void update(T value) {
if (!running)
throw new IllegalLifecycleStateException();
lastValue = Objects.requireNonNull(value);
checkConditions(value);
}
/**
* Update the value and complete any outstanding condition stages for which the value satisfies the predicate.
*/
public void updateAsync(T value, Executor executor) {
if (!running)
throw new IllegalLifecycleStateException();
lastValue = Objects.requireNonNull(value);
try {
executor.execute(() -> checkConditions(value));
} catch (Throwable t) {
completeAllExceptionally(t);
}
}
private void completeAllExceptionally(Throwable t) {
List<Data> completed;
synchronized (futures) {
completed = new ArrayList<>(futures.keySet());
futures.clear();
}
for (Data data : completed) {
data.cancelFuture.cancel(false);
data.completeExceptionally(t);
}
}
private void checkConditions(T value) {
List<Data> completed;
synchronized (futures) {
completed = new ArrayList<>(futures.size());
for (Iterator<Map.Entry<Data, Predicate<T>>> iterator = futures.entrySet().iterator(); iterator.hasNext(); ) {
Map.Entry<Data, Predicate<T>> e = iterator.next();
if (e.getValue().test(value)) {
Data data = e.getKey();
completed.add(data);
iterator.remove();
}
}
}
for (Data data : completed) {
data.cancelFuture.cancel(false);
data.complete(null);
}
}
public void stop() {
running = false;
lastValue = null;
IllegalLifecycleStateException exception = new IllegalLifecycleStateException();
completeAllExceptionally(exception);
}
private static class Data extends CompletableFuture<Void> {
volatile Future<Void> cancelFuture;
}
}
| 4,985
| 31.167742
| 143
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/ReclosableLatch.java
|
package org.infinispan.util.concurrent;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
/**
* A thread gate, that uses an {@link java.util.concurrent.locks.AbstractQueuedSynchronizer}.
* <p/>
* This implementation allows you to create a latch with a default state (open or closed), and repeatedly open or close
* the latch.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @since 4.0
*/
public class ReclosableLatch extends AbstractQueuedSynchronizer {
private static final long serialVersionUID = 1744280161777661090l;
// the following states are used in the AQS.
private static final int OPEN_STATE = 0, CLOSED_STATE = 1;
public ReclosableLatch() {
setState(CLOSED_STATE);
}
public ReclosableLatch(boolean defaultOpen) {
setState(defaultOpen ? OPEN_STATE : CLOSED_STATE);
}
@Override
public final int tryAcquireShared(int ignored) {
// return 1 if we allow the requestor to proceed, -1 if we want the requestor to block.
return getState() == OPEN_STATE ? 1 : -1;
}
@Override
public final boolean tryReleaseShared(int state) {
// used as a mechanism to set the state of the Sync.
setState(state);
return true;
}
public final void open() {
// do not use setState() directly since this won't notify parked threads.
releaseShared(OPEN_STATE);
}
public final void close() {
// do not use setState() directly since this won't notify parked threads.
releaseShared(CLOSED_STATE);
}
public boolean isOpened() {
return getState() == OPEN_STATE;
}
public final void await() throws InterruptedException {
acquireSharedInterruptibly(1); // the 1 is a dummy value that is not used.
}
public final boolean await(long time, TimeUnit unit) throws InterruptedException {
return tryAcquireSharedNanos(1, unit.toNanos(time)); // the 1 is a dummy value that is not used.
}
@Override
public String toString() {
int s = getState();
String q = hasQueuedThreads() ? "non" : "";
return "ReclosableLatch [State = " + s + ", " + q + "empty queue]";
}
}
| 2,215
| 29.777778
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/TimeoutException.java
|
package org.infinispan.util.concurrent;
import org.infinispan.commons.CacheException;
/**
* Thrown when a timeout occurred. used by operations with timeouts, e.g. lock acquisition, or waiting for responses
* from all members.
*
* @author <a href="mailto:bela@jboss.org">Bela Ban</a>.
* @author Mircea.Markus@jboss.com
* @since 4.0
*/
public class TimeoutException extends CacheException {
/**
* The serialVersionUID
*/
private static final long serialVersionUID = -8096787619908687038L;
public TimeoutException() {
super();
}
public TimeoutException(String msg) {
super(msg);
}
public TimeoutException(String msg, Throwable cause) {
super(msg, cause);
}
@Override
public String toString() {
return super.toString();
}
}
| 800
| 20.078947
| 116
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.