repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/BlockingManagerImpl.java
package org.infinispan.util.concurrent; import java.lang.invoke.MethodHandles; import java.util.Objects; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.Delayed; import java.util.concurrent.Executor; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collector; import org.infinispan.commons.executors.BlockingResource; import org.infinispan.commons.util.concurrent.CompletableFutures; import org.infinispan.executors.LimitedExecutor; import org.infinispan.factories.KnownComponentNames; import org.infinispan.factories.annotations.ComponentName; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.annotations.Start; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import org.reactivestreams.Publisher; import io.reactivex.rxjava3.core.Flowable; import io.reactivex.rxjava3.core.Scheduler; import io.reactivex.rxjava3.schedulers.Schedulers; @Scope(Scopes.GLOBAL) public class BlockingManagerImpl implements BlockingManager { private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass()); private final AtomicInteger id = log.isTraceEnabled() ? new AtomicInteger() : null; @Inject @ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR) Executor nonBlockingExecutor; // This should eventually be the only reference to blocking executor @Inject @ComponentName(KnownComponentNames.BLOCKING_EXECUTOR) Executor blockingExecutor; @Inject NonBlockingManager nonBlockingManager; @Inject @ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR) ScheduledExecutorService scheduledExecutorService; private Scheduler blockingScheduler; private Scheduler nonBlockingScheduler; @Start void start() { blockingScheduler = Schedulers.from(new ReentrantBlockingExecutor()); nonBlockingScheduler = Schedulers.from(nonBlockingExecutor); } private String nextTraceId() { return id != null ? "-BlockingManagerImpl-" + id.getAndIncrement() : null; } private class ReentrantBlockingExecutor implements Executor { @Override public void execute(Runnable command) { runBlockingOperation(command, nextTraceId(), blockingExecutor, false); } } @Override public CompletionStage<Void> runBlocking(Runnable runnable, Object traceId) { return runBlockingOperation(runnable, traceId, blockingExecutor); } @Override public <E> CompletionStage<Void> subscribeBlockingConsumer(Publisher<E> publisher, Consumer<E> consumer, Object traceId) { Flowable<E> valuePublisher = Flowable.fromPublisher(publisher) .observeOn(blockingScheduler); if (log.isTraceEnabled()) { valuePublisher = valuePublisher.doOnNext(value -> log.tracef("Invoking blocking consumer for %s with value %s", traceId, value)); } return continueOnNonBlockingThread(valuePublisher .doOnNext(consumer::accept) .ignoreElements() .toCompletionStage(null), traceId); } @Override public <T, A, R> CompletionStage<R> subscribeBlockingCollector(Publisher<T> publisher, Collector<? super T, A, R> collector, Object traceId) { Flowable<T> valuePublisher = Flowable.fromPublisher(publisher) .observeOn(blockingScheduler); if (log.isTraceEnabled()) { valuePublisher = valuePublisher.doOnNext(value -> log.tracef("Invoking blocking collector for %s with value %s", traceId, value)); } return continueOnNonBlockingThread(Flowable.fromPublisher(valuePublisher) // Unfortunately rxjava doesn't have the generics as they should :( .collect(collector) .toCompletionStage(), traceId); } private CompletionStage<Void> runBlockingOperation(Runnable runnable, Object traceId, Executor executor) { return runBlockingOperation(runnable, traceId, executor, true); } private CompletionStage<Void> runBlockingOperation(Runnable runnable, Object traceId, Executor executor, boolean requireReturnOnNonBlockingThread) { if (isCurrentThreadBlocking()) { if (log.isTraceEnabled()) { log.tracef("Invoked run on a blocking thread, running %s in same blocking thread", traceId); } try { runnable.run(); return CompletableFutures.completedNull(); } catch (Throwable t) { return CompletableFuture.failedFuture(t); } } CompletionStage<Void> stage; if (log.isTraceEnabled()) { log.tracef("Submitting blocking run operation %s to blocking thread", traceId); stage = CompletableFuture.runAsync(() -> { log.tracef("Running blocking run operation %s", traceId); runnable.run(); }, executor); } else { stage = CompletableFuture.runAsync(runnable, executor); } return requireReturnOnNonBlockingThread ? continueOnNonBlockingThread(stage, traceId) : stage; } @Override public <V> CompletionStage<V> supplyBlocking(Supplier<V> supplier, Object traceId) { return supplyBlockingOperation(supplier, traceId, blockingExecutor); } private <V> CompletionStage<V> supplyBlockingOperation(Supplier<V> supplier, Object traceId, Executor executor) { if (isCurrentThreadBlocking()) { if (log.isTraceEnabled()) { log.tracef("Invoked supply on a blocking thread, running %s in same blocking thread", traceId); } try { return CompletableFuture.completedFuture(supplier.get()); } catch (Throwable t) { return CompletableFuture.failedFuture(t); } } CompletionStage<V> stage; if (log.isTraceEnabled()) { log.tracef("Submitting blocking supply operation %s to blocking thread", traceId); stage = CompletableFuture.supplyAsync(() -> { log.tracef("Running blocking supply operation %s", traceId); return supplier.get(); }, executor); } else { stage = CompletableFuture.supplyAsync(supplier, executor); } return continueOnNonBlockingThread(stage, traceId); } @Override public <I, O> CompletionStage<O> handleBlocking(CompletionStage<? extends I> stage, BiFunction<? super I, Throwable, ? extends O> function, Object traceId) { if (isCurrentThreadBlocking()) { I value = null; Throwable throwable = null; try { if (log.isTraceEnabled()) { log.tracef("Invoked handle on a blocking thread, joining %s in same blocking thread", traceId); } value = CompletionStages.join(stage); } catch (Throwable t) { throwable = t; } return CompletableFuture.completedFuture(function.apply(value, throwable)); } return continueOnNonBlockingThread(stage.handleAsync(function, blockingExecutor), traceId); } @Override public <I, O> CompletionStage<O> thenApplyBlocking(CompletionStage<? extends I> stage, Function<? super I, ? extends O> function, Object traceId) { if (CompletionStages.isCompletedSuccessfully(stage) && isCurrentThreadBlocking()) { if (log.isTraceEnabled()) { log.tracef("Invoked thenApply on a blocking thread, joining %s in same blocking thread", traceId); } try { I value = CompletionStages.join(stage); return CompletableFuture.completedFuture(function.apply(value)); } catch (Throwable t) { return CompletableFuture.failedFuture(t); } } return continueOnNonBlockingThread(stage.thenApplyAsync(function, blockingExecutor), traceId); } @Override public <I> CompletionStage<Void> thenRunBlocking(CompletionStage<? extends I> stage, Runnable runnable, Object traceId) { if (CompletionStages.isCompletedSuccessfully(stage) && isCurrentThreadBlocking()) { if (log.isTraceEnabled()) { log.tracef("Invoked thenRun on a blocking thread, joining %s in same blocking thread", traceId); } try { CompletionStages.join(stage); runnable.run(); return CompletableFutures.completedNull(); } catch (Throwable t) { return CompletableFuture.failedFuture(t); } } return continueOnNonBlockingThread(stage.thenRunAsync(runnable, blockingExecutor), traceId); } @Override public <I, O> CompletionStage<O> thenComposeBlocking(CompletionStage<? extends I> stage, Function<? super I, ? extends CompletionStage<O>> function, Object traceId) { if (CompletionStages.isCompletedSuccessfully(stage) && isCurrentThreadBlocking()) { if (log.isTraceEnabled()) { log.tracef("Invoked thenComposeBlocking on a blocking thread, joining %s in same blocking thread", traceId); } try { I value = CompletionStages.join(stage); return function.apply(value); } catch (Throwable t) { return CompletableFuture.failedFuture(t); } } return continueOnNonBlockingThread(stage.thenComposeAsync(function, blockingExecutor), traceId); } @Override public <V> CompletionStage<V> whenCompleteBlocking(CompletionStage<V> stage, BiConsumer<? super V, ? super Throwable> biConsumer, Object traceId) { if (CompletionStages.isCompletedSuccessfully(stage) && isCurrentThreadBlocking()) { if (log.isTraceEnabled()) { log.tracef("Invoked whenComplete on a blocking thread, joining %s in same blocking thread", traceId); } V value = null; Throwable throwable = null; try { value = CompletionStages.join(stage); } catch (Throwable t) { throwable = t; } try { biConsumer.accept(value, throwable); } catch (Throwable t) { if (throwable == null) { return CompletableFuture.failedFuture(t); } throwable.addSuppressed(t); return CompletableFuture.failedFuture(throwable); } return stage.whenComplete(biConsumer); } return continueOnNonBlockingThread(stage.whenCompleteAsync(biConsumer, blockingExecutor), traceId); } @Override public <V> CompletionStage<V> continueOnNonBlockingThread(CompletionStage<V> delay, Object traceId) { if (CompletionStages.isCompletedSuccessfully(delay)) { if (log.isTraceEnabled()) { log.tracef("Stage for %s was already completed, returning in same thread", traceId); } return delay; } return delay.whenCompleteAsync((v, t) -> { if (t != null) { if (log.isTraceEnabled()) { log.tracef("Continuing execution of id %s with exception %s", traceId, t.getMessage()); } } else if (log.isTraceEnabled()) { log.tracef("Continuing execution of id %s", traceId); } }, nonBlockingExecutor); } @Override public <V> Publisher<V> blockingPublisher(Publisher<V> publisher) { return Flowable.defer(() -> { if (isCurrentThreadBlocking()) { return publisher; } if (log.isTraceEnabled()) { int publisherId = System.identityHashCode(publisher); log.tracef("Blocking publisher start %d", publisherId); return Flowable.fromPublisher(publisher) .subscribeOn(blockingScheduler) .observeOn(nonBlockingScheduler) .doFinally(() -> log.tracef("Blocking publisher done %d", publisherId)); } return Flowable.fromPublisher(publisher) .subscribeOn(blockingScheduler) .observeOn(nonBlockingScheduler); }); } public <V> CompletionStage<Void> blockingPublisherToVoidStage(Publisher<V> publisher, Object traceId) { Flowable<V> flowable = Flowable.fromPublisher(publisher); if (!isCurrentThreadBlocking()) { if (log.isTraceEnabled()) { flowable = flowable.doOnSubscribe(subscription -> log.tracef("Subscribing to %s on blocking thread", traceId)); } flowable = flowable.subscribeOn(blockingScheduler); if (log.isTraceEnabled()) { flowable = flowable.doOnSubscribe(subscription -> log.tracef("Publisher %s subscribing thread is %s", traceId, Thread.currentThread())); } } else if (log.isTraceEnabled()) { log.tracef("Invoked on a blocking thread, subscribing %s in same blocking thread", traceId); } CompletionStage<Void> stage = flowable .ignoreElements() .toCompletionStage(null); return continueOnNonBlockingThread(stage, traceId); } @Override public Executor asExecutor(String name) { if (!log.isTraceEnabled()) { return blockingExecutor; } return task -> { log.tracef("Submitting blocking run operation %s with name %s to blocking thread", task, name); blockingExecutor.execute(() -> { log.tracef("Running blocking operation %s with name %s on blocking thread", task, name); task.run(); }); }; } @Override public BlockingExecutor limitedBlockingExecutor(String name, int concurrentExecutions) { LimitedExecutor limitedExecutor = new LimitedExecutor(name, blockingExecutor, concurrentExecutions); return new LimitedBlockingExecutor(limitedExecutor); } private class LimitedBlockingExecutor implements BlockingExecutor { private final LimitedExecutor limitedExecutor; private LimitedBlockingExecutor(LimitedExecutor limitedExecutor) { this.limitedExecutor = limitedExecutor; } @Override public CompletionStage<Void> execute(Runnable runnable, Object traceId) { return runBlockingOperation(runnable, traceId, limitedExecutor); } @Override public <V> CompletionStage<V> supply(Supplier<V> supplier, Object traceId) { return supplyBlockingOperation(supplier, traceId, limitedExecutor); } } @Override public <V> ScheduledBlockingCompletableStage<V> scheduleRunBlocking(Supplier<V> supplier, long delay, TimeUnit unit, Object traceId) { var scheduledStage = new ScheduledBlockingFuture<>(supplier, traceId); log.tracef("Scheduling supply operation %s for %s to run in %s %s", supplier, traceId, delay, unit); scheduledStage.scheduledFuture = scheduledExecutorService.schedule(scheduledStage, delay, unit); return scheduledStage; } private class ScheduledBlockingFuture<V> extends CompletableFuture<V> implements ScheduledBlockingCompletableStage<V>, Runnable { private volatile ScheduledFuture<?> scheduledFuture; private final Supplier<V> supplier; private final Object traceId; private ScheduledBlockingFuture(Supplier<V> supplier, Object traceId) { this.supplier = Objects.requireNonNull(supplier); this.traceId = Objects.requireNonNull(traceId); } @Override public long getDelay(TimeUnit timeUnit) { return scheduledFuture.getDelay(timeUnit); } @Override public int compareTo(Delayed delayed) { if (delayed instanceof ScheduledBlockingFuture) { return scheduledFuture.compareTo(((ScheduledBlockingFuture<?>) delayed).scheduledFuture); } return scheduledFuture.compareTo(delayed); } @Override public boolean cancel(boolean mayInterruptIfRunning) { // We don't actually care if the scheduled task was cancelled, only matters if we can cancel this one scheduledFuture.cancel(true); return super.cancel(mayInterruptIfRunning); } @Override public void run() { CompletableFuture.supplyAsync(() -> { log.tracef("Running blocking supply operation %s", traceId); return supplier.get(); }, blockingExecutor) .whenComplete((v, t) -> { if (t != null) { log.tracef("Operation %s completed exceptionally with message %s", traceId, t.getMessage()); nonBlockingManager.completeExceptionally(this, t); } else { log.tracef("Operation %s completed normally", traceId); nonBlockingManager.complete(this, v); } }); } } // This method is designed to be overridden for testing purposes protected boolean isCurrentThreadBlocking() { return Thread.currentThread().getThreadGroup() instanceof BlockingResource; } }
17,375
39.598131
148
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/CommandAckCollector.java
package org.infinispan.util.concurrent; import static java.lang.String.format; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; import org.infinispan.commands.CommandInvocationId; import org.infinispan.commons.util.Util; import org.infinispan.commons.util.concurrent.CompletableFutures; import org.infinispan.configuration.cache.ClusteringConfiguration; import org.infinispan.configuration.cache.Configuration; import org.infinispan.factories.KnownComponentNames; import org.infinispan.factories.annotations.ComponentName; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.annotations.Start; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.interceptors.distribution.BiasedCollector; import org.infinispan.interceptors.distribution.Collector; import org.infinispan.interceptors.distribution.PrimaryOwnerOnlyCollector; import org.infinispan.remoting.responses.ValidResponse; import org.infinispan.remoting.transport.Address; import org.infinispan.statetransfer.OutdatedTopologyException; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import net.jcip.annotations.GuardedBy; /** * An acknowledge collector for Triangle algorithm used in non-transactional caches for write operations. * <p> * Acknowledges are used between the owners and the originator. They signal the completion of a write operation. The * operation can complete successfully or not. * <p> * The acknowledges are valid on the same cache topology id. So, each acknowledge is tagged with the command topology * id. Acknowledges from previous topology id are discarded. * <p> * The acknowledges from the primary owner carry the return value of the operation. * * @author Pedro Ruivo * @since 9.0 */ @Scope(Scopes.NAMED_CACHE) public class CommandAckCollector { private static final Log log = LogFactory.getLog(CommandAckCollector.class); @Inject @ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR) ScheduledExecutorService timeoutExecutor; @Inject Configuration configuration; private final ConcurrentHashMap<Long, BaseAckTarget> collectorMap; private long timeoutNanoSeconds; private Collection<Address> currentMembers; public CommandAckCollector() { collectorMap = new ConcurrentHashMap<>(); } @Start public void start() { this.timeoutNanoSeconds = TimeUnit.MILLISECONDS.toNanos(configuration.clustering().remoteTimeout()); configuration.clustering() .attributes().attribute(ClusteringConfiguration.REMOTE_TIMEOUT) .addListener((a, ignored) -> { timeoutNanoSeconds = TimeUnit.MILLISECONDS.toNanos(a.get()); }); } /** * Creates a collector for a single key write operation. * @param id the id from {@link CommandInvocationId}. * @param backupOwners the backup owners of the key. * @param topologyId the current topology id. */ public <T> Collector<T> create(long id, Collection<Address> backupOwners, int topologyId) { if (backupOwners.isEmpty()) { return new PrimaryOwnerOnlyCollector<>(); } SingleKeyCollector<T> collector = new SingleKeyCollector<>(id, backupOwners, topologyId); BaseAckTarget prev = collectorMap.put(id, collector); //is it possible the have a previous collector when the topology changes after the first collector is created //in that case, the previous collector must have a lower topology id assert prev == null || prev.topologyId < topologyId : format("replaced old collector '%s' by '%s'", prev, collector); if (log.isTraceEnabled()) { log.tracef("Created new collector for %s. BackupOwners=%s", id, backupOwners); } return collector; } public BiasedCollector createBiased(long id, int topologyId) { BiasedKeyCollector collector = new BiasedKeyCollector(id, topologyId); BaseAckTarget prev = collectorMap.put(id, collector); assert prev == null || prev.topologyId < topologyId : prev.toString(); if (log.isTraceEnabled()) { log.tracef("Created new biased collector for %d", id); } return collector; } public MultiTargetCollector createMultiTargetCollector(long id, int primaries, int topologyId) { MultiTargetCollectorImpl multiTargetCollector = new MultiTargetCollectorImpl(id, primaries, topologyId); BaseAckTarget prev = collectorMap.put(id, multiTargetCollector); assert prev == null || prev.topologyId < topologyId : prev.toString(); if (log.isTraceEnabled()) { log.tracef("Created new multi target collector for %d", id); } return multiTargetCollector; } /** * Creates a collector for {@link org.infinispan.commands.write.PutMapCommand}. * * @param id the id from {@link CommandInvocationId#getId()}. * @param backups a map between a backup owner and its segments affected. * @param topologyId the current topology id. */ public <T> Collector<T> createSegmentBasedCollector(long id, Map<Address, Collection<Integer>> backups, int topologyId) { if (backups.isEmpty()) { return new PrimaryOwnerOnlyCollector<>(); } SegmentBasedCollector<T> collector = new SegmentBasedCollector<>(id, backups, topologyId); BaseAckTarget prev = collectorMap.put(id, collector); //is it possible the have a previous collector when the topology changes after the first collector is created //in that case, the previous collector must have a lower topology id assert prev == null || prev.topologyId < topologyId : format("replaced old collector '%s' by '%s'", prev, collector); if (log.isTraceEnabled()) { log.tracef("Created new collector for %s. BackupSegments=%s", id, backups); } return collector; } /** * Acknowledges a {@link org.infinispan.commands.write.PutMapCommand} completion in the backup owner. * * @param id the id from {@link CommandInvocationId#getId()}. * @param from the backup owner. * @param segment the segments affected and acknowledged. * @param topologyId the topology id. */ public void multiKeyBackupAck(long id, Address from, int segment, int topologyId) { SegmentBasedCollector collector = (SegmentBasedCollector) collectorMap.get(id); if (collector != null) { collector.backupAck(from, segment, topologyId); } } /** * Acknowledges a write operation completion in the backup owner. * * @param id the id from {@link CommandInvocationId#getId()}. * @param from the backup owner. * @param topologyId the topology id. */ public void backupAck(long id, Address from, int topologyId) { BaseAckTarget ackTarget = collectorMap.get(id); if (ackTarget instanceof SingleKeyCollector) { ((SingleKeyCollector) ackTarget).backupAck(topologyId, from); } else if (ackTarget instanceof MultiTargetCollectorImpl) { ((MultiTargetCollectorImpl) ackTarget).backupAck(topologyId, from); } } /** * Acknowledges an exception during the operation execution. * <p> * The collector is completed without waiting any further acknowledges. * * @param id the id from {@link CommandInvocationId#getId()}. * @param throwable the {@link Throwable}. * @param topologyId the topology id. */ public void completeExceptionally(long id, Throwable throwable, int topologyId) { BaseAckTarget ackTarget = collectorMap.get(id); if (ackTarget != null) { ackTarget.completeExceptionally(throwable, topologyId); } } /** * @return the pending ids from {@link CommandInvocationId#getId()} (testing purposes only) */ public List<Long> getPendingCommands() { return new ArrayList<>(collectorMap.keySet()); } /** * @param id the id from {@link CommandInvocationId#getId()}. * @return {@code true} if there are acknowledges pending from the backup owners, {@code false} otherwise. (testing * purposes only) */ @SuppressWarnings("BooleanMethodIsAlwaysInverted") //testing only public boolean hasPendingBackupAcks(long id) { BaseAckTarget ackTarget = collectorMap.get(id); return ackTarget != null && ackTarget.hasPendingBackupAcks(); } /** * Notifies a change in member list. * * @param members the new cluster members. */ public void onMembersChange(Collection<Address> members) { Set<Address> currentMembers = new HashSet<>(members); this.currentMembers = currentMembers; for (BaseAckTarget<?> ackTarget : collectorMap.values()) { ackTarget.onMembersChange(currentMembers); } } private TimeoutException createTimeoutException(String address, long id) { return log.timeoutWaitingForAcks(Util.prettyPrintTime(timeoutNanoSeconds, TimeUnit.NANOSECONDS), address, id); } private abstract class BaseAckTarget<T> implements Callable<Void>, BiConsumer<T, Throwable> { final long id; final int topologyId; final ScheduledFuture<?> timeoutTask; private BaseAckTarget(long id, int topologyId) { this.topologyId = topologyId; this.id = id; this.timeoutTask = timeoutExecutor.schedule(this, timeoutNanoSeconds, TimeUnit.NANOSECONDS); } /** * Invoked when the collector's future is completed, it must cleanup all task related to this collector. * <p> * The tasks includes removing the collector from the map and cancel the timeout task. */ public final void accept(T t, Throwable throwable) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Collector completed with ret=%s, throw=%s", id, t, throwable); } boolean removed = collectorMap.remove(id, this); assert removed; timeoutTask.cancel(false); } abstract void completeExceptionally(Throwable throwable, int topologyId); abstract boolean hasPendingBackupAcks(); abstract void onMembersChange(Collection<Address> members); } private abstract class BaseCollector<T> extends BaseAckTarget<T> implements Collector<T> { final CompletableFuture<T> future; final CompletableFuture<T> exposedFuture; volatile T primaryResult; volatile boolean primaryResultReceived = false; BaseCollector(long id, int topologyId) { super(id, topologyId); this.future = new CompletableFuture<>(); this.exposedFuture = future.whenComplete(this); } /** * Invoked by the timeout executor when the timeout expires. * <p> * It completes the future with the timeout exception. */ @Override public final synchronized Void call() { future.completeExceptionally(createTimeoutException(getAddress(), id)); return null; } protected abstract String getAddress(); @Override public final CompletableFuture<T> getFuture() { return exposedFuture; } @Override public void primaryException(Throwable throwable) { future.completeExceptionally(throwable); } final void completeExceptionally(Throwable throwable, int topologyId) { if (log.isTraceEnabled()) { log.tracef(throwable, "[Collector#%s] completed exceptionally. TopologyId=%s (expected=%s)", id, topologyId, this.topologyId); } if (isWrongTopologyOrIsDone(topologyId)) { return; } future.completeExceptionally(throwable); } final boolean isWrongTopologyOrIsDone(int topologyId) { return this.topologyId != topologyId || future.isDone(); } } private class SingleKeyCollector<T> extends BaseCollector<T> { final Collection<Address> backupOwners; private SingleKeyCollector(long id, Collection<Address> backupOwners, int topologyId) { super(id, topologyId); this.backupOwners = new HashSet<>(backupOwners); } @Override synchronized boolean hasPendingBackupAcks() { if (log.isTraceEnabled()) { log.tracef("Pending backup acks: %s", backupOwners); } return !backupOwners.isEmpty(); } @Override void onMembersChange(Collection<Address> members) { boolean empty; synchronized (this) { empty = backupOwners.retainAll(members) && backupOwners.isEmpty(); } if (empty && primaryResultReceived) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Some backups left the cluster.", id); } markReady(); } } @Override public void primaryResult(T result, boolean success) { primaryResult = result; primaryResultReceived = true; if (!success || !hasPendingBackupAcks()) { markReady(); } } void backupAck(int topologyId, Address from) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Backup ACK. Address=%s, TopologyId=%s (expected=%s)", id, from, topologyId, this.topologyId); } if (isWrongTopologyOrIsDone(topologyId)) { return; } boolean empty; synchronized (this) { empty = backupOwners.remove(from) && backupOwners.isEmpty(); } if (empty && primaryResultReceived) { markReady(); } } void markReady() { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Ready!", id); } future.complete(primaryResult); } @Override protected synchronized String getAddress() { return backupOwners.toString(); } } private class BiasedKeyCollector extends SingleKeyCollector<ValidResponse> implements BiasedCollector { @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") private Collection<Address> unsolicitedAcks; private BiasedKeyCollector(long id, int topologyId) { super(id, Collections.emptyList(), topologyId); } void backupAck(int topologyId, Address from) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Backup ACK. Address=%s, TopologyId=%s (expected=%s)", id, from, topologyId, this.topologyId); } if (isWrongTopologyOrIsDone(topologyId)) { return; } boolean empty; synchronized (this) { if (!backupOwners.remove(from)) { if (unsolicitedAcks == null) { unsolicitedAcks = new ArrayList<>(4); } log.tracef("[Collector#%s] Unsolicited ACK", id); unsolicitedAcks.add(from); } empty = backupOwners.isEmpty(); } if (empty && primaryResultReceived) { markReady(); } } @Override public synchronized void addPendingAcks(boolean success, Address[] waitFor) { if (success && waitFor != null) { Collection<Address> members = currentMembers; for (Address address : waitFor) { if (members == null || members.contains(address)) { backupOwners.add(address); } } } if (unsolicitedAcks != null) { unsolicitedAcks.removeIf(backupOwners::remove); } } } private class SegmentBasedCollector<T> extends BaseCollector<T> { @GuardedBy("this") private final Map<Address, Collection<Integer>> backups; SegmentBasedCollector(long id, Map<Address, Collection<Integer>> backups, int topologyId) { super(id, topologyId); this.backups = backups; } @Override public synchronized boolean hasPendingBackupAcks() { return !backups.isEmpty(); } @Override public synchronized void onMembersChange(Collection<Address> members) { if (backups.keySet().retainAll(members)) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Some backups left the cluster.", id); } checkCompleted(); } } @Override public void primaryResult(T result, boolean success) { primaryResult = result; primaryResultReceived = true; synchronized (this) { checkCompleted(); } } void backupAck(Address from, int segment, int topologyId) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] PutMap Backup ACK. Address=%s. TopologyId=%s (expected=%s). Segment=%s", id, from, topologyId, this.topologyId, segment); } if (isWrongTopologyOrIsDone(topologyId)) { return; } synchronized (this) { Collection<Integer> pendingSegments = backups.getOrDefault(from, Collections.emptyList()); if (pendingSegments.remove(segment) && pendingSegments.isEmpty()) { backups.remove(from); } checkCompleted(); } } @GuardedBy("this") private void checkCompleted() { if (primaryResultReceived && backups.isEmpty()) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Ready! Return value=%ss.", id, primaryResult); } future.complete(primaryResult); } } @Override protected synchronized String getAddress() { return backups.keySet().toString(); } @Override public String toString() { final StringBuilder sb = new StringBuilder("SegmentBasedCollector{"); sb.append("id=").append(id); sb.append(", topologyId=").append(topologyId); sb.append(", primaryResult=").append(primaryResult); sb.append(", primaryResultReceived=").append(primaryResultReceived); sb.append(", backups=").append(backups); sb.append('}'); return sb.toString(); } } /** * Contrary to {@link MultiTargetCollectorImpl} implements the {@link Collector} interface delegating its calls * to the {@link MultiTargetCollectorImpl} which is stored in {@link #collectorMap}. */ private static class SingleTargetCollectorImpl implements BiasedCollector, Function<Void, CompletableFuture<ValidResponse>> { private final MultiTargetCollectorImpl parent; private final CompletableFuture<ValidResponse> resultFuture = new CompletableFuture<>(); private final CompletableFuture<ValidResponse> combinedFuture; private SingleTargetCollectorImpl(MultiTargetCollectorImpl parent) { this.parent = parent; this.combinedFuture = CompletableFuture.allOf(resultFuture, parent.acksFuture).thenCompose(this); } @Override public CompletableFuture<ValidResponse> getFuture() { return combinedFuture; } @Override public void primaryException(Throwable throwable) { // exceptions can propagate immediately combinedFuture.completeExceptionally(throwable); } @Override public void primaryResult(ValidResponse result, boolean success) { if (log.isTraceEnabled()) { log.tracef("Received result for %d, topology %d: %s", parent.id, parent.topologyId, result); } resultFuture.complete(result); parent.checkComplete(); } @Override public CompletableFuture<ValidResponse> apply(Void nil) { return resultFuture; } @Override public void addPendingAcks(boolean success, Address[] waitFor) { if (success && waitFor != null) { parent.addPendingAcks(waitFor); } } } public interface MultiTargetCollector { BiasedCollector collectorFor(Address target); } private class MultiTargetCollectorImpl extends BaseAckTarget<Void> implements MultiTargetCollector { private final Map<Address, SingleTargetCollectorImpl> primaryCollectors = new HashMap<>(); // Note that this is a list, since we may expect multiple acks from single node. private final List<Address> pendingAcks = new ArrayList<>(); private final CompletableFuture<Void> acksFuture = new CompletableFuture<>(); private final int primaries; @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") private List<Address> unsolicitedAcks; private Throwable throwable; MultiTargetCollectorImpl(long id, int primaries, int topologyId) { super(id, topologyId); this.primaries = primaries; acksFuture.whenComplete(this); } @Override public synchronized BiasedCollector collectorFor(Address target) { if (throwable != null) { throw CompletableFutures.asCompletionException(throwable); } else { SingleTargetCollectorImpl collector = new SingleTargetCollectorImpl(this); Collector<ValidResponse> prev = primaryCollectors.put(target, collector); assert prev == null : prev.toString(); return collector; } } synchronized void addPendingAcks(Address[] waitFor) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] Adding pending acks from %s, existing are %s", id, Arrays.toString(waitFor), pendingAcks); } Collection<Address> members = currentMembers; for (Address member : waitFor) { if (members == null || members.contains(member)) { pendingAcks.add(member); } } if (unsolicitedAcks != null) { // this should work for multiple acks from same node as well unsolicitedAcks.removeIf(pendingAcks::remove); } } synchronized void backupAck(int topologyId, Address from) { if (log.isTraceEnabled()) { log.tracef("[Collector#%s] PutMap Backup ACK. Address=%s. TopologyId=%s (expected=%s).", id, from, topologyId, this.topologyId); } if (topologyId == this.topologyId) { if (!pendingAcks.remove(from)) { if (unsolicitedAcks == null) { unsolicitedAcks = new ArrayList<>(4); } unsolicitedAcks.add(from); } } checkComplete(); } @Override synchronized void completeExceptionally(Throwable throwable, int topologyId) { if (topologyId == this.topologyId) { this.throwable = throwable; for (Collector<?> collector : primaryCollectors.values()) { collector.primaryException(throwable); } } } @Override synchronized boolean hasPendingBackupAcks() { return !pendingAcks.isEmpty(); } @Override synchronized void onMembersChange(Collection<Address> members) { pendingAcks.retainAll(members); for (Map.Entry<Address, SingleTargetCollectorImpl> pair : primaryCollectors.entrySet()) { if (!members.contains(pair.getKey())) { pair.getValue().primaryException(OutdatedTopologyException.RETRY_NEXT_TOPOLOGY); } } } @Override public Void call() { completeExceptionally(createTimeoutException(getPendingAcksString(), id), topologyId); return null; } private synchronized String getPendingAcksString() { return pendingAcks.toString(); } synchronized void checkComplete() { if (primaries != primaryCollectors.size()) { return; } for (SingleTargetCollectorImpl c : primaryCollectors.values()) { if (!c.resultFuture.isDone()) return; } if (!hasPendingBackupAcks()) { acksFuture.complete(null); } } } }
24,792
36.059791
128
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/BlockingRunnable.java
package org.infinispan.util.concurrent; /** * A special Runnable that is only sent to a thread when it is ready to be * executed without blocking the thread * <p/> * Used in {@code org.infinispan.util.concurrent.BlockingTaskAwareExecutorService} * * @author Pedro Ruivo * @since 5.3 */ public interface BlockingRunnable extends Runnable { /** * @return true if this Runnable is ready to be executed without blocking */ boolean isReady(); }
465
22.3
82
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/ActionSequencer.java
package org.infinispan.util.concurrent; import java.util.Collection; import java.util.Map; import java.util.Objects; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.atomic.LongAdder; import java.util.function.BiConsumer; import java.util.function.BiFunction; import org.infinispan.commons.stat.DefaultSimpleStat; import org.infinispan.commons.stat.SimpleStat; import org.infinispan.commons.time.TimeService; /** * Orders multiple actions/tasks based on a key. * <p> * It has the following properties: * <ul> * <li>If multiple actions have disjoint ordering keys, they are execute in parallel.</li> * <li>If multiple actions have the same ordering keys, deadlocks are avoided between them.</li> * <li>An action is only executed after the previous one is completed.</li> * </ul> * * @author Pedro Ruivo * @since 10.0 */ public class ActionSequencer { private static final StatCollector NO_STATS = new StatCollector(); private final Map<Object, SequenceEntry<?>> sequencer = new ConcurrentHashMap<>(); private final LongAdder pendingActions = new LongAdder(); private final LongAdder runningActions = new LongAdder(); private final TimeService timeService; private final Executor executor; private final boolean forceExecutor; private final SimpleStat queueTimes = new DefaultSimpleStat(); private final SimpleStat runningTimes = new DefaultSimpleStat(); private volatile boolean collectStats; /** * @param executor Executor to run submitted actions. * @param forceExecutor If {@code false}, run submitted actions on the submitter thread if possible. If {@code true}, * always run submitted actions on the executor. */ public ActionSequencer(Executor executor, boolean forceExecutor, TimeService timeService) { this.executor = executor; this.forceExecutor = forceExecutor; this.timeService = timeService; } private static <T> CompletionStage<T> safeNonBlockingCall(Callable<? extends CompletionStage<T>> action) { try { return action.call(); } catch (Exception e) { return CompletableFuture.failedFuture(e); } } /** * It order a non-blocking action. * <p> * It assumes the {@code action} does not block the invoked thread and it may execute it in this thread or, if there * is one or more pending actions, in a separate thread (provided by the {@code executor}). * * @param <T> The return value type. * @param keys The ordering keys. * @param action The {@link Callable} to invoke. * @return A {@link CompletableFuture} that is completed with the return value of resulting {@link * CompletableFuture}. * @throws NullPointerException if any of the parameter is null. */ public <T> CompletionStage<T> orderOnKeys(Collection<?> keys, Callable<? extends CompletionStage<T>> action) { checkAction(action); Object[] dKeys = checkKeys(keys); if (dKeys.length == 0) { return safeNonBlockingCall(action); } StatCollector statCollector = newStatCollector(); SequenceEntry<T> entry; if (dKeys.length == 1) { entry = new SingleKeyNonBlockingSequenceEntry<>(action, dKeys[0], statCollector); } else { entry = new MultiKeyNonBlockingSequenceEntry<>(action, dKeys, statCollector); } registerAction(entry); return entry; } public <T> CompletionStage<T> orderOnKey(Object key, Callable<? extends CompletionStage<T>> action) { checkAction(action); StatCollector statCollector = newStatCollector(); SequenceEntry<T> entry = new SingleKeyNonBlockingSequenceEntry<>(action, checkKey(key), statCollector); registerAction(entry); return entry; } public long getPendingActions() { return pendingActions.longValue(); } public long getRunningActions() { return runningActions.longValue(); } public void resetStatistics() { runningTimes.reset(); queueTimes.reset(); } public long getAverageQueueTimeNanos() { return queueTimes.getAverage(-1); } public long getAverageRunningTimeNanos() { return runningTimes.getAverage(-1); } public void setStatisticEnabled(boolean enable) { collectStats = enable; if (!enable) { resetStatistics(); } } public int getMapSize() { return sequencer.size(); } private <T> void registerAction(SequenceEntry<T> entry) { entry.register(); } private void checkAction(Callable<?> action) { Objects.requireNonNull(action, "Action cannot be null."); } private Object[] checkKeys(Collection<?> keys) { return Objects.requireNonNull(keys, "Keys cannot be null.") .stream().filter(Objects::nonNull).distinct().toArray(); } private Object checkKey(Object key) { return Objects.requireNonNull(key, "Key cannot be null."); } private void remove(Object key, SequenceEntry<?> entry) { sequencer.remove(key, entry); } private void remove(Object[] keys, SequenceEntry<?> entry) { for (Object key : keys) { sequencer.remove(key, entry); } } private StatCollector newStatCollector() { return collectStats ? new StatEnabledCollector() : NO_STATS; } private static class StatCollector { void taskCreated() { } void taskStarted() { } void taskFinished() { } } private abstract class SequenceEntry<T> extends CompletableFuture<T> implements BiFunction<Object, Throwable, Void>, //for handleAsync (to chain on the previous entry) BiConsumer<T, Throwable>, //for whenComplete (to chain on the action result) Runnable { //executes the actions final Callable<? extends CompletionStage<T>> action; final StatCollector statCollector; SequenceEntry(Callable<? extends CompletionStage<T>> action, StatCollector statCollector) { this.action = action; this.statCollector = statCollector; } public void register() { statCollector.taskCreated(); CompletionStage<?> previousStage = putInMap(); if (previousStage != null) { previousStage.handleAsync(this, executor); } else if (forceExecutor) { //execute the action in another thread. executor.execute(this); } else { run(); } } @Override public final void accept(T o, Throwable throwable) { removeFromMap(); statCollector.taskFinished(); if (throwable == null) { complete(o); } else { completeExceptionally(throwable); } } @Override public final Void apply(Object o, Throwable t) { run(); return null; } @Override public final void run() { statCollector.taskStarted(); CompletionStage<T> cf = safeNonBlockingCall(action); cf.whenComplete(this); } /** * Register the current entry in the sequencer map for all the affected keys. * * @return A stage that completes when all the previous entries have completed */ abstract CompletionStage<?> putInMap(); /** * Remove this entry from the sequencer map for all the affected keys, unless it was already replaced by another * entry. */ abstract void removeFromMap(); } private class SingleKeyNonBlockingSequenceEntry<T> extends SequenceEntry<T> { private final Object key; SingleKeyNonBlockingSequenceEntry(Callable<? extends CompletionStage<T>> action, Object key, StatCollector statCollector) { super(action, statCollector); this.key = key; } @Override public CompletionStage<?> putInMap() { return sequencer.put(key, this); } @Override public void removeFromMap() { remove(key, this); } } private class MultiKeyNonBlockingSequenceEntry<T> extends SequenceEntry<T> { private final Object[] keys; MultiKeyNonBlockingSequenceEntry(Callable<? extends CompletionStage<T>> action, Object[] keys, StatCollector statCollector) { super(action, statCollector); this.keys = keys; } @Override public CompletionStage<?> putInMap() { AggregateCompletionStage<?> previousCF = CompletionStages.aggregateCompletionStage(); synchronized (ActionSequencer.this) { BiFunction<Object, SequenceEntry<?>, SequenceEntry<?>> mapping = (key, previousEntry) -> waitFromPrevious( previousEntry, previousCF); for (Object key : keys) { sequencer.compute(key, mapping); } } return previousCF.freeze(); } @Override void removeFromMap() { remove(keys, this); } SequenceEntry<?> waitFromPrevious(SequenceEntry<?> previousEntry, AggregateCompletionStage<?> previousCF) { if (previousEntry != null) { previousCF.dependsOn(previousEntry); } return this; } } private class StatEnabledCollector extends StatCollector { private volatile long createdTimestamp = -1; private volatile long startedTimestamp = -1; @Override void taskCreated() { pendingActions.increment(); createdTimestamp = timeService.time(); } @Override void taskStarted() { runningActions.increment(); startedTimestamp = timeService.time(); } @Override void taskFinished() { runningActions.decrement(); pendingActions.decrement(); long endTimestamp = timeService.time(); queueTimes.record(startedTimestamp - createdTimestamp); runningTimes.record(endTimestamp - startedTimestamp); } } }
10,186
29.963526
120
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/SynchronizedRestarter.java
package org.infinispan.util.concurrent; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.LockSupport; import org.infinispan.commons.api.Lifecycle; /** * A class that handles restarts of components via multiple threads. Specifically, if a component needs to be restarted * and several threads may demand a restart but only one thread should be allowed to restart the component, then use * this class. * <p/> * What this class guarantees is that several threads may come in while a component is being restarted, but they will * block until the restart is complete. * <p/> * This is different from other techniques in that: <ul> <li>A simple compare-and-swap to check whether another thread * is already performing a restart will result in the requesting thread returning immediately and potentially attempting * to use the resource being restarted.</li> <li>A synchronized method or use of a lock would result in the thread * waiting for the restart to complete, but on completion will attempt to restart the component again.</li> </ul> This * implementation combines a compare-and-swap to detect a concurrent restart, as well as registering for notification * for when the restart completes and then parking the thread if the CAS variable still indicates a restart in progress, * and finally deregistering itself in the end. * * @author Manik Surtani * @since 4.0 */ public class SynchronizedRestarter { private AtomicBoolean restartInProgress = new AtomicBoolean(false); private Set<Thread> restartWaiters = ConcurrentHashMap.newKeySet(); public void restartComponent(Lifecycle component) throws Exception { // will only enter this block if no one else is restarting the socket // and will atomically set the flag so others won't enter if (restartInProgress.compareAndSet(false, true)) { try { component.stop(); component.start(); } finally { restartInProgress.set(false); for (Thread waiter : restartWaiters) { try { LockSupport.unpark(waiter); } catch (Throwable t) { // do nothing; continue notifying the rest } } } } else { // register interest in being notified after the restart restartWaiters.add(Thread.currentThread()); // check again to ensure the restarting thread hasn't finished, then wait for that thread to finish if (restartInProgress.get()) LockSupport.park(); // de-register interest in notification restartWaiters.remove(Thread.currentThread()); } } }
2,786
43.951613
120
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/DataOperationOrderer.java
package org.infinispan.util.concurrent; import java.lang.invoke.MethodHandles; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * Ordering construct allowing concurrent operations that wish to do operations upon the same key to wait until * the most recently registered operation is complete in a non blocking way. * @author wburns * @since 10.0 */ public class DataOperationOrderer { private final static Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass()); private final ConcurrentMap<Object, CompletionStage<Operation>> objectStages = new ConcurrentHashMap<>(); public enum Operation { READ, REMOVE, WRITE } /** * Registers the provided Stage to be next in line to do an operation on behalf of the provided key. * Returns a different Stage that when complete signals that this operation should continue or null if there * is no wait required. * @param key delineating identifier for an operation * @param register stage to register for others to wait upon for future registrations * @return stage that signals when the operation that is registering its own future may continue or null if nothing * to wait on */ public CompletionStage<Operation> orderOn(Object key, CompletionStage<Operation> register) { CompletionStage<Operation> current = objectStages.put(key, register); if (log.isTraceEnabled()) { log.tracef("Ordering upcoming future %s for key %s to run after %s", register, key, current); } return current; } /** * Completes a given operation and removes all internal references from the orderer * @param key delineating identifier for an operation * @param registeredFuture previously registered future that is removed from memory as needed * @param operation the type of operation */ public void completeOperation(Object key, CompletableFuture<Operation> registeredFuture, Operation operation) { if (log.isTraceEnabled()) { log.tracef("Ordered future %s is completed for key %s from op %s", registeredFuture, key, operation); } // If nothing was removed that is fine - means another operation has been registered objectStages.remove(key, registeredFuture); registeredFuture.complete(operation); } /** * For testing purposes only */ public CompletionStage<Operation> getCurrentStage(Object key) { return objectStages.get(key); } }
2,697
38.676471
118
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/CompletionStages.java
package org.infinispan.util.concurrent; import java.util.Objects; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CompletionStage; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Function; import org.infinispan.commons.util.concurrent.CompletableFutures; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * Utility methods for handling {@link CompletionStage} instances. * @author wburns * @since 10.0 */ public class CompletionStages { public static final Runnable NO_OP_RUNNABLE = () -> {}; private CompletionStages() { } private static final Log log = LogFactory.getLog(CompletionStages.class); /** * Returns a CompletionStage that also can be composed of many other CompletionStages. A stage can compose another * stage in it by invoking the {@link AggregateCompletionStage#dependsOn(CompletionStage)} method passing in the * CompletionStage. After all stages this composition stage depend upon have been added, the * {@link AggregateCompletionStage#freeze()} should be invoked so that the AggregateCompletionStage can finally * complete when all of the stages it depends upon complete. * <p> * If any stage this depends upon fails the returned stage will contain the Throwable from one of the stages. * @return composed completion stage */ public static AggregateCompletionStage<Void> aggregateCompletionStage() { return new VoidAggregateCompletionStage(); } /** * Same as {@link #aggregateCompletionStage()} except that when this stage completes normally it will return * the value provided. * @param valueToReturn value to return to future stage compositions * @param <R> the type of the value * @return composed completion stage that returns the value upon normal completion */ public static <R> AggregateCompletionStage<R> aggregateCompletionStage(R valueToReturn) { return new ValueAggregateCompletionStage<>(valueToReturn); } public static AggregateCompletionStage<Boolean> orBooleanAggregateCompletionStage() { return new OrBooleanAggregateCompletionStage(); } /** * Returns if the provided {@link CompletionStage} has already completed normally, that is not due to an exception. * @param stage stage to check * @return if the stage is completed normally */ public static boolean isCompletedSuccessfully(CompletionStage<?> stage) { CompletableFuture<?> future = stage.toCompletableFuture(); return future.isDone() && !future.isCompletedExceptionally(); } /** * Returns the result value when complete, or throws an (unchecked) exception if completed exceptionally. * To better conform with the use of common functional forms, if a computation involved in the completion of this * CompletionStage threw an exception, this method throws an (unchecked) CompletionException with the underlying * exception as its cause. * @param stage stage to wait on * @param <R> the type in the stage * @return the result value * @throws CompletionException if this stage completed exceptionally or a completion computation threw an exception */ public static <R> R join(CompletionStage<R> stage) { try { return CompletableFutures.await(stage.toCompletableFuture()); } catch (ExecutionException e) { throw new CompletionException(e.getCause()); } catch (InterruptedException e) { throw new CompletionException(e); } } /** * Returns a CompletableStage that completes when both of the provides CompletionStages complete. This method * may choose to return either of the argument if the other is complete or a new instance completely. * @param first the first CompletionStage * @param second the second CompletionStage * @return a CompletionStage that is complete when both of the given CompletionStages complete */ public static CompletionStage<Void> allOf(CompletionStage<Void> first, CompletionStage<Void> second) { if (!isCompletedSuccessfully(first)) { if (isCompletedSuccessfully(second)) { return first; } else { return CompletionStages.aggregateCompletionStage().dependsOn(first).dependsOn(second).freeze(); } } return second; } /** * Returns a CompletionStage that completes when all of the provided stages complete, either normally or via * exception. If one or more states complete exceptionally the returned CompletionStage will complete with the * exception of one of these. If no CompletionStages are provided, returns a CompletionStage completed with the value * null. * @param stages the CompletionStages * @return a CompletionStage that is completed when all of the given CompletionStages complete */ public static CompletionStage<Void> allOf(CompletionStage<?>... stages) { AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CompletionStage<?> stage : stages) { if (!isCompletedSuccessfully(stage)) { if (aggregateCompletionStage == null) { aggregateCompletionStage = CompletionStages.aggregateCompletionStage(); } aggregateCompletionStage.dependsOn(stage); } } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } /** * Extend {@link CompletionStage#thenCompose(Function)} to also handle exceptions. */ public static <T, U> CompletionStage<U> handleAndCompose(CompletionStage<T> stage, BiFunction<T, Throwable, CompletionStage<U>> handleFunction) { if (isCompletedSuccessfully(stage)) { T value = join(stage); try { return handleFunction.apply(value, null); } catch (Throwable t) { return CompletableFuture.failedFuture(t); } } return stage.handle(handleFunction).thenCompose(Function.identity()); } public static <T, U> CompletionStage<U> handleAndComposeAsync(CompletionStage<T> stage, BiFunction<T, Throwable, CompletionStage<U>> handleFunction, Executor executor) { return stage.handleAsync(handleFunction, executor).thenCompose(Function.identity()); } public static CompletionStage<Void> schedule(Runnable command, ScheduledExecutorService executor, long delay, TimeUnit timeUnit) { CompletableFuture<Void> future = new CompletableFuture<>(); executor.schedule(() -> { try { command.run(); future.complete(null); } catch (Throwable t) { future.completeExceptionally(t); } }, delay, timeUnit); return future; } public static <T> CompletionStage<T> schedule(Callable<T> command, ScheduledExecutorService executor, long delay, TimeUnit timeUnit) { CompletableFuture<T> future = new CompletableFuture<>(); executor.schedule(() -> { try { T value = command.call(); future.complete(value); } catch (Throwable t) { future.completeExceptionally(t); } }, delay, timeUnit); return future; } public static <T> CompletionStage<T> scheduleNonBlocking(Callable<? extends CompletionStage<T>> command, ScheduledExecutorService executor, long delay, TimeUnit timeUnit) { return schedule(command, executor, delay, timeUnit).thenCompose(Function.identity()); } public static CompletionStage<Void> ignoreValue(CompletionStage<?> stage) { return stage.thenRun(NO_OP_RUNNABLE); } public static <T> T await(CompletionStage<T> stage) throws ExecutionException, InterruptedException { return CompletableFutures.await(stage.toCompletableFuture()); } private static class VoidAggregateCompletionStage extends AbstractAggregateCompletionStage<Void> { @Override Void getValue() { return null; } } private static class ValueAggregateCompletionStage<R> extends AbstractAggregateCompletionStage<R> { private final R value; private ValueAggregateCompletionStage(R value) { this.value = value; } @Override R getValue() { return value; } } private static class OrBooleanAggregateCompletionStage extends AbstractAggregateCompletionStage<Boolean> { private volatile boolean value = false; @Override Boolean getValue() { return value; } @Override public void accept(Object o, Throwable t) { if (t != null) { super.accept(null, t); return; } if (o instanceof Boolean && (Boolean) o) { this.value = true; } super.accept(o, null); } } /** * Abstract {@link AggregateCompletionStage} that will keep a count of non completed stages it depends upon while * only registering to be notified when each completes, decrementing the counter. The returned CompletionStage * via {@link #freeze()} will be completed when the counter is zero, providing the value returned from * {@link #getValue()} as the result. * This class implements BiConsumer and extends CompletableFuture to avoid additional object/lambda allocation per instance * @param <R> */ private static abstract class AbstractAggregateCompletionStage<R> extends CompletableFuture<R> implements AggregateCompletionStage<R>, BiConsumer<Object, Throwable> { private static final AtomicIntegerFieldUpdater<AbstractAggregateCompletionStage> remainingUpdater = AtomicIntegerFieldUpdater.newUpdater(AbstractAggregateCompletionStage.class, "remaining"); @SuppressWarnings({"unused"}) private volatile int remaining; private volatile boolean frozen = false; private volatile Throwable throwable; @Override public void accept(Object o, Throwable t) { if (t != null) { throwable = t; } if (remainingUpdater.decrementAndGet(this) == 0 && frozen) { complete(); } } @Override final public AggregateCompletionStage<R> dependsOn(CompletionStage<?> stage) { Objects.requireNonNull(stage); if (frozen) { throw new IllegalStateException(); } // We only depend upon it if the stage wasn't complete if (!isCompletedSuccessfully(stage)) { remainingUpdater.incrementAndGet(this); stage.whenComplete(this); } return this; } @Override final public CompletionStage<R> freeze() { frozen = true; if (remainingUpdater.get(this) == 0) { complete(); } return this; } private void complete() { Throwable t = throwable; if (t != null) { completeExceptionally(t); } else { complete(getValue()); } } abstract R getValue(); } }
11,699
37.870432
126
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/BlockingTaskAwareExecutorServiceImpl.java
package org.infinispan.util.concurrent; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Queue; import java.util.concurrent.AbstractExecutorService; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.infinispan.commons.time.TimeService; import org.infinispan.factories.annotations.Stop; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * A special executor service that accepts a {@code BlockingRunnable}. This special runnable gives hints about the code * to be running in order to avoiding put a runnable that will block the thread. In this way, only when the runnable * says that is ready, it is sent to the real executor service * * @author Pedro Ruivo * @since 5.3 */ @Scope(Scopes.GLOBAL) public class BlockingTaskAwareExecutorServiceImpl extends AbstractExecutorService implements BlockingTaskAwareExecutorService { private static final Log log = LogFactory.getLog(BlockingTaskAwareExecutorServiceImpl.class); private final Queue<BlockingRunnable> blockedTasks; private final ExecutorService executorService; private final TimeService timeService; private volatile boolean shutdown; private final AtomicInteger requestCounter = new AtomicInteger(); public BlockingTaskAwareExecutorServiceImpl(ExecutorService executorService, TimeService timeService) { this.blockedTasks = new ConcurrentLinkedQueue<>(); this.executorService = executorService; this.timeService = timeService; this.shutdown = false; } @Stop void stop() { // This method only runs in the server, in embedded mode // BlockingTaskAwareExecutorServiceImpl is wrapped in a LazyInitializingScheduledExecutorService // In the server, we do not need to stop the executorService, which has its own lifecycle // But we need to stop retrying tasks shutdown = true; } @Override public final void execute(BlockingRunnable runnable) { if (shutdown) { throw new RejectedExecutionException("Executor Service is already shutdown"); } if (runnable.isReady()) { doExecute(runnable); if (log.isTraceEnabled()) { log.tracef("Added a new task directly: %d task(s) are waiting", blockedTasks.size()); } } else { //we no longer submit directly to the executor service. blockedTasks.offer(runnable); checkForReadyTasks(); if (log.isTraceEnabled()) { log.tracef("Added a new task to the queue: %d task(s) are waiting", blockedTasks.size()); } } } @Override public void shutdown() { shutdown = true; executorService.shutdown(); } @Override public List<Runnable> shutdownNow() { shutdown = true; List<Runnable> runnableList = new LinkedList<>(); runnableList.addAll(executorService.shutdownNow()); runnableList.addAll(blockedTasks); return runnableList; } @Override public boolean isShutdown() { return shutdown; } @Override public boolean isTerminated() { return shutdown && blockedTasks.isEmpty() && executorService.isTerminated(); } @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { final long endTime = timeService.expectedEndTime(timeout, unit); long waitTime = timeService.remainingTime(endTime, TimeUnit.MILLISECONDS); while (!blockedTasks.isEmpty() && waitTime > 0) { Thread.sleep(waitTime); waitTime = timeService.remainingTime(endTime, TimeUnit.MILLISECONDS); } return isTerminated(); } @Override public final void checkForReadyTasks() { if (!blockedTasks.isEmpty()) { tryBlockedTasks(); } } @Override public void execute(Runnable command) { if (shutdown) { throw new RejectedExecutionException("Executor Service is already shutdown"); } if (command instanceof BlockingRunnable) { execute((BlockingRunnable) command); } else { try { executorService.execute(command); } catch (RejectedExecutionException rejected) { //put it back! blockedTasks.offer(new RunnableWrapper(command)); checkForReadyTasks(); } } } public ExecutorService getExecutorService() { return executorService; } /** * Attempts to run any blocked tasks that are now able to be ran. Note that if concurrent threads invoke this * method only one can run the given tasks. If an additional thread attempts to run the given tasks it will * restart the original one. */ private void tryBlockedTasks() { int counter = requestCounter.getAndIncrement(); if (counter == 0) { do { int taskExecutionCount = 0; int remaining = 0; for (Iterator<BlockingRunnable> iterator = blockedTasks.iterator(); iterator.hasNext(); ) { BlockingRunnable runnable = iterator.next(); boolean ready; try { ready = runnable.isReady(); } catch (Exception e) { log.debugf(e, "Failed to check ready state of %s, dropping.", runnable); iterator.remove(); continue; } boolean executed = false; if (ready) { iterator.remove(); executed = doExecute(runnable); } if (executed) { taskExecutionCount++; } else { remaining++; } } if (log.isTraceEnabled()) { log.tracef("Tasks executed=%s, still pending=~%s", taskExecutionCount, remaining); } } while ((counter = requestCounter.addAndGet(-counter)) != 0); } } private boolean doExecute(BlockingRunnable runnable) { try { executorService.execute(runnable); return true; } catch (RejectedExecutionException rejected) { if (!shutdown) { //put it back! blockedTasks.offer(runnable); requestCounter.incrementAndGet(); } return false; } } private static class RunnableWrapper implements BlockingRunnable { private final Runnable runnable; private RunnableWrapper(Runnable runnable) { this.runnable = runnable; } @Override public boolean isReady() { return true; } @Override public void run() { runnable.run(); } @Override public String toString() { return "RunnableWrapper(" + runnable + ")"; } } }
7,114
31.637615
127
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/package-info.java
/** * Lock and synchronization related classes, tools and utilities. */ package org.infinispan.util.concurrent.locks;
120
23.2
65
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/TransactionalRemoteLockCommand.java
package org.infinispan.util.concurrent.locks; import org.infinispan.context.impl.TxInvocationContext; import org.infinispan.factories.ComponentRegistry; /** * Simple interface to extract all the keys that may need to be locked for transactional commands. * * @author Pedro Ruivo * @since 8.0 */ public interface TransactionalRemoteLockCommand extends RemoteLockCommand { /** * It creates the transaction context. * * @return the {@link TxInvocationContext}. */ TxInvocationContext<?> createContext(ComponentRegistry componentRegistry); }
568
26.095238
98
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/DeadlockDetectedException.java
package org.infinispan.util.concurrent.locks; import org.infinispan.commons.CacheException; /** * Exception signaling detected deadlocks. * * @author Mircea.Markus@jboss.com */ public class DeadlockDetectedException extends CacheException { private static final long serialVersionUID = -8529876192715526744L; public DeadlockDetectedException(String msg) { super(msg); } }
395
21
70
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/ExtendedLockPromise.java
package org.infinispan.util.concurrent.locks; import java.util.function.Supplier; import org.infinispan.interceptors.InvocationStage; import org.infinispan.util.concurrent.TimeoutException; /** * An extended {@link LockPromise} interface that allows a better control over it. * * @author Pedro Ruivo * @since 8.0 */ public interface ExtendedLockPromise extends LockPromise { /** * It cancels the {@link LockPromise} if possible. * * @param cause the cancellation cause. The possible values are {@link LockState#DEADLOCKED} and {@link * LockState#TIMED_OUT}. * @throws IllegalArgumentException if the argument {@code cause} is not valid. */ void cancel(LockState cause); /** * @return the lock owner associated to this {@link LockPromise}. */ Object getRequestor(); /** * @return the current lock owner. */ Object getOwner(); /** * @return an {@link InvocationStage} for this lock. */ InvocationStage toInvocationStage(Supplier<TimeoutException> timeoutSupplier); }
1,062
25.575
106
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/PendingLockListener.java
package org.infinispan.util.concurrent.locks; /** * A listener that is invoked when {@link PendingLockPromise} is ready. * * @author Pedro Ruivo * @since 8.0 */ public interface PendingLockListener { /** * Invoked when {@link PendingLockPromise} is ready. */ void onReady(); }
299
16.647059
71
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/LockState.java
package org.infinispan.util.concurrent.locks; /** * The {@link org.infinispan.util.concurrent.locks.impl.InfinispanLock} possible states. * <p> * Used in listener to notify when the state changes. * * @author Pedro Ruivo * @since 8.0 */ public enum LockState { /** * The lock owner is in the queue waiting for the lock to be available. */ WAITING, /** * The lock owner left the queue and it is available to acquire the lock. */ ACQUIRED, /** * The time out occurred while the lock owner waits on the queue. */ TIMED_OUT, /** * The deadlock occurred with another possible lock owner and it should abort. */ DEADLOCKED, /** * The lock owner released the lock. */ RELEASED }
750
21.757576
88
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/KeyAwareLockListener.java
package org.infinispan.util.concurrent.locks; /** * A listener for {@link KeyAwareLockPromise}. * <p> * This event contains the key that the lock owner is try to acquire. * * @author Pedro Ruivo * @since 8.0 */ public interface KeyAwareLockListener { /** * Invoked when the lock is available. * * @param key the key associated to this lock. * @param state the lock state. Possible values are {@link LockState#ACQUIRED}, {@link LockState#TIMED_OUT} or * {@link LockState#DEADLOCKED}. */ void onEvent(Object key, LockState state); }
585
25.636364
113
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/LockManager.java
package org.infinispan.util.concurrent.locks; import java.util.Collection; import java.util.concurrent.TimeUnit; import org.infinispan.context.InvocationContext; import org.infinispan.util.concurrent.locks.impl.InfinispanLock; /** * An interface to deal with all aspects of acquiring and releasing locks for cache entries. * * @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>) * @author Mircea.Markus@jboss.com * @author Pedro Ruivo * @since 4.0 */ public interface LockManager { /** * Attempts to lock the {@code key} if the lock isn't already held by the {@code lockOwner}. * <p> * This method is non-blocking and return immediately a {@link LockPromise}. The {@link LockPromise} can (and should) * be used by the invoker to check when the lock is really acquired by invoking {@link LockPromise#lock()}. * * @param key key to lock. * @param lockOwner the owner of the lock. * @param time the maximum time to wait for the lock * @param unit the time unit of the {@code time} argument * @return the {@link KeyAwareLockPromise} associated to this keys. */ KeyAwareLockPromise lock(Object key, Object lockOwner, long time, TimeUnit unit); /** * Same as {@link #lock(Object, Object, long, TimeUnit)} but for multiple keys. * <p> * It ensures no deadlocks if the method is invoked by different lock owners for the same set (or subset) of keys. * * @param keys keys to lock. * @param lockOwner the owner of the lock. * @param time the maximum time to wait for the lock * @param unit the time unit of the {@code time} argument * @return the {@link KeyAwareLockPromise} associated to this keys. */ KeyAwareLockPromise lockAll(Collection<?> keys, Object lockOwner, long time, TimeUnit unit); /** * Releases the lock for the {@code key} if the {@code lockOwner} is the lock owner. * <p> * Note this method <b>will</b> unlock a lock where the key is the lockOwner * </p> * * @param key key to unlock. * @param lockOwner the owner of the lock. */ void unlock(Object key, Object lockOwner); /** * Same as {@link #unlock(Object, Object)} but for multiple keys. * <p> * Note this method will <b>not</b> unlock a lock where the key is the lockOwner * </p> * @param keys keys to unlock. * @param lockOwner the owner of the lock. */ void unlockAll(Collection<?> keys, Object lockOwner); /** * Same as {@code unlockAll(context.getLockedKeys(), context.getKeyLockOwner();}. * <p> * Note this method will <b>not</b> unlock a lock where the key is the lockOwner * </p> * @param context the context with the locked keys and the lock owner. */ void unlockAll(InvocationContext context); /** * Tests if the {@code lockOwner} owns a lock on the {@code key}. * * @param key key to test. * @param lockOwner the owner of the lock. * @return {@code true} if the owner does own the lock on the key, {@code false} otherwise. */ boolean ownsLock(Object key, Object lockOwner); /** * Tests if the {@code key} is locked. * * @param key key to test. * @return {@code true} if the key is locked, {@code false} otherwise. */ boolean isLocked(Object key); /** * Retrieves the owner of the lock for the {@code key}. * * @return the owner of the lock, or {@code null} if not locked. */ Object getOwner(Object key); /** * Prints lock information for all locks. * * @return the lock information */ String printLockInfo(); /** * @return the number of locks held. */ int getNumberOfLocksHeld(); InfinispanLock getLock(Object key); }
3,800
32.342105
120
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/LockPromise.java
package org.infinispan.util.concurrent.locks; import org.infinispan.interceptors.InvocationStage; import org.infinispan.util.concurrent.TimeoutException; /** * A promise returned by {@link org.infinispan.util.concurrent.locks.impl.InfinispanLock}. * <p> * This promise does not means that the lock is acquired. The lock is acquired when the {@link #lock()} method is * invoked. It contains the basic method to check it state (when it is available or not) and it allows adding listeners * to it. * * @author Pedro Ruivo * @since 8.0 */ public interface LockPromise { /** * It tests if the lock is available. * <p> * The lock is consider available when it is successfully acquired or the timeout is expired. In any case, when it * returns {@code true}, the {@link #lock()} will never block. * * @return {@code true} if the lock is available (or the timeout is expired), {@code false} otherwise. */ boolean isAvailable(); /** * It locks the key (or keys) associated to this promise. * <p> * This method will block until the lock is available or the timeout is expired. * * @throws InterruptedException if the current thread is interrupted while acquiring the lock * @throws TimeoutException if we are unable to acquire the lock after a specified timeout. */ void lock() throws InterruptedException, TimeoutException; /** * Adds a {@link LockListener} to be invoked when the lock is available. * <p> * The {@code acquired} parameter indicates that the lock is acquired (when it is {@code true}) or it timed out (when * it is {@code false}). * * @param listener the {@link LockListener} to invoke. */ void addListener(LockListener listener); /** * @return an {@link InvocationStage} for this lock. */ InvocationStage toInvocationStage(); }
1,871
34.320755
120
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/PendingLockPromise.java
package org.infinispan.util.concurrent.locks; import org.infinispan.interceptors.InvocationStage; /** * A promise returned by {@link PendingLockManager}. * <p> * When a transaction need to wait for older topology transaction, this class allows it to check the state. If the * transaction does not need to wait, or all older transactions have finished or have timed out, the {@link #isReady()} * method returns {@code true}. Also, it allows the caller to add listeners to be notified when it is ready. * * @author Pedro Ruivo * @since 8.0 */ public interface PendingLockPromise { PendingLockPromise NO_OP = new PendingLockPromise() { @Override public boolean isReady() { return true; } @Override public void addListener(PendingLockListener listener) { listener.onReady(); } @Override public boolean hasTimedOut() { return false; } @Override public long getRemainingTimeout() { throw new UnsupportedOperationException("This should never happen!"); } @Override public InvocationStage toInvocationStage() { return InvocationStage.completedNullStage(); } @Override public String toString() { return "NO_OP"; } }; /** * @return {@code true} when the transaction has finished the waiting. */ boolean isReady(); /** * Adds a listener to this promise. * <p> * The listener must be non-null and it is invoked only once. If {@link #isReady()} returns {@code true}, the {@code * listener} is immediately invoked in the invoker thread. * * @param listener the {@link PendingLockListener} to add. */ void addListener(PendingLockListener listener); /** * @return {@code true} if the time out happened while waiting for older transactions. */ boolean hasTimedOut(); /** * @return the remaining timeout, in millis. It is zero when {@link #hasTimedOut()} is {@code true}. */ long getRemainingTimeout(); /** * If successful, {@link #getRemainingTimeout()} will return the remaining timeout, in millis. * * If timed out, the result {@code InvocationStage} will be completed with a {@link org.infinispan.util.concurrent.TimeoutException}. * * @return an {@link InvocationStage} for this lock. */ InvocationStage toInvocationStage(); }
2,412
27.72619
136
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/RemoteLockCommand.java
package org.infinispan.util.concurrent.locks; import java.util.Collection; import org.infinispan.commands.ReplicableCommand; /** * Simple interface to extract all the keys that may need to be locked. * <p> * A {@link org.infinispan.commands.remote.CacheRpcCommand} that needs to acquire locks should implement this interface. * This way, Infinispan tries to provide a better management to optimize the system resources usage. * * @author Pedro Ruivo * @since 8.0 */ public interface RemoteLockCommand extends ReplicableCommand { /** * It returns a {@link Collection} with the keys to be lock. * <p> * It may return an empty collection if no keys needs to be locked independently of the return value of {@link * #hasSkipLocking()}. It may contains duplicated keys and {@code null} is not a valid return value. * * @return a {@link Collection} of keys to lock. */ Collection<?> getKeysToLock(); /** * It returns the lock owner of the key. * <p> * Usually, in transaction caches it is the {@link org.infinispan.transaction.xa.GlobalTransaction} and in * non-transactional caches the {@link org.infinispan.commands.CommandInvocationId}. * * @return the lock owner of the key. */ Object getKeyLockOwner(); /** * @return it the locks should be acquire with 0 (zero) acquisition timeout. */ boolean hasZeroLockAcquisition(); /** * It checks if this command should acquire locks. * * @return {@code true} if locks should be acquired for the keys in {@link #getKeysToLock()}. */ boolean hasSkipLocking(); }
1,617
31.36
120
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/KeyAwareLockPromise.java
package org.infinispan.util.concurrent.locks; import org.infinispan.interceptors.InvocationStage; /** * An extension of {@link LockPromise} that contains a key associated to the lock. * * @author Pedro Ruivo * @since 8.0 */ public interface KeyAwareLockPromise extends LockPromise { KeyAwareLockPromise NO_OP = new KeyAwareLockPromise() { @Override public void addListener(KeyAwareLockListener listener) { listener.onEvent(null, LockState.ACQUIRED); } public boolean isAvailable() { return true; } public void lock() {/*no-op*/} public void addListener(LockListener listener) { listener.onEvent(LockState.ACQUIRED); } @Override public InvocationStage toInvocationStage() { return InvocationStage.completedNullStage(); } }; /** * It adds the listener to this {@link LockPromise}. * <p> * The listener is invoked when the {@link LockPromise#isAvailable()} returns true. For more info, check {@link * KeyAwareLockListener}. * * @param listener the listener to add. */ void addListener(KeyAwareLockListener listener); }
1,173
23.978723
114
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/PendingLockManager.java
package org.infinispan.util.concurrent.locks; import java.util.Collection; import java.util.concurrent.TimeUnit; import org.infinispan.context.impl.TxInvocationContext; /** * A manager that checks and waits for older topology transaction with conflicting keys. * * @author Pedro Ruivo * @since 8.0 */ public interface PendingLockManager { /** * Same as {@link #awaitPendingTransactionsForKey(TxInvocationContext, Object, long, TimeUnit)} but non-blocking. * <p> * Multiple invocations with the same transaction returns the same {@link PendingLockPromise}. For cleanup purposes, * {@link #awaitPendingTransactionsForKey(TxInvocationContext, Object, long, TimeUnit)} must be invoked * afterwards. * * @param ctx the {@link TxInvocationContext}. * @param key the key to check. * @param time timeout. * @param unit {@link TimeUnit} of {@code time}. * @return a {@link PendingLockPromise}. */ PendingLockPromise checkPendingTransactionsForKey(TxInvocationContext<?> ctx, Object key, long time, TimeUnit unit); /** * Same as {@link #awaitPendingTransactionsForAllKeys(TxInvocationContext, Collection, long, TimeUnit)} but * non-blocking. * <p> * Multiple invocations with the same transaction returns the same {@link PendingLockPromise}. For cleanup purposes, * {@link #awaitPendingTransactionsForAllKeys(TxInvocationContext, Collection, long, TimeUnit)} must be invoked * afterwards. * * @param ctx the {@link TxInvocationContext}. * @param keys the keys to check. * @param time timeout. * @param unit {@link TimeUnit} of {@code time}. * @return a {@link PendingLockPromise}. */ PendingLockPromise checkPendingTransactionsForKeys(TxInvocationContext<?> ctx, Collection<Object> keys, long time, TimeUnit unit); /** * It waits for any transaction with older topology id to complete that may have the lock for {@code key} acquired. * * @param ctx the {@link TxInvocationContext}. * @param key the key to check. * @param time timeout. * @param unit {@link TimeUnit} of {@code time}. * @return the remaining timeout. * @throws InterruptedException if the thread is interrupted while waiting. * @deprecated Since 10.0, the blocking variants will be removed */ @Deprecated long awaitPendingTransactionsForKey(TxInvocationContext<?> ctx, Object key, long time, TimeUnit unit) throws InterruptedException; /** * It waits for any transaction with older topology id to complete that may have the lock for any key in {@code keys} * acquired. * * @param ctx the {@link TxInvocationContext}. * @param keys the keys to check. * @param time timeout. * @param unit {@link TimeUnit} of {@code time}. * @return the remaining timeout. * @throws InterruptedException if the thread is interrupted while waiting. * @deprecated Since 10.0, the blocking variants will be removed */ @Deprecated long awaitPendingTransactionsForAllKeys(TxInvocationContext<?> ctx, Collection<Object> keys, long time, TimeUnit unit) throws InterruptedException; }
3,164
39.576923
133
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/DeadlockChecker.java
package org.infinispan.util.concurrent.locks; /** * An interface to implement the deadlock algorithm. * * @author Pedro Ruivo * @since 8.0 */ public interface DeadlockChecker { /** * It checks for deadlock. * <p> * It accepts two arguments: the {@code pendingOwner} is a lock owner that tries to acquire the lock and the {@code * currentOwner} is the current lock owner. If a deadlock is detected and the {@code pendingOwner} must rollback, it * must return {@code true}. If no deadlock is found or the {@code currentOwner} must rollback, it must return {@code * false}. * <p> * This method may be invoked multiples times and in multiple threads. Thread safe is advised. * * @param pendingOwner a lock owner that tries to acquire the lock. * @param currentOwner the current lock owner. * @return {@code true} if a deadlock is detected and the {@code pendingOwner} must rollback. */ boolean deadlockDetected(Object pendingOwner, Object currentOwner); }
1,018
35.392857
120
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/LockReleasedException.java
package org.infinispan.util.concurrent.locks; import org.infinispan.commons.CacheException; /** * The exception is thrown if a locks is released while waiting for it to be acquired. * * @author Pedro Ruivo * @since 13.0 */ public class LockReleasedException extends CacheException { public LockReleasedException() { } public LockReleasedException(Throwable cause) { super(cause); } public LockReleasedException(String msg) { super(msg); } public LockReleasedException(String msg, Throwable cause) { super(msg, cause); } public LockReleasedException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { super(message, cause, enableSuppression, writableStackTrace); } }
780
22.666667
91
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/LockListener.java
package org.infinispan.util.concurrent.locks; /** * The listener for {@link LockPromise}. * * @author Pedro Ruivo * @since 8.0 */ public interface LockListener { /** * Invoked when the lock is available. * * @param state the lock state. Possible values are {@link LockState#ACQUIRED}, {@link LockState#TIMED_OUT} or * {@link LockState#DEADLOCKED}. */ void onEvent(LockState state); }
430
21.684211
113
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/impl/NoOpPendingLockManager.java
package org.infinispan.util.concurrent.locks.impl; import java.util.Collection; import java.util.concurrent.TimeUnit; import org.infinispan.context.impl.TxInvocationContext; import org.infinispan.util.concurrent.locks.PendingLockManager; import org.infinispan.util.concurrent.locks.PendingLockPromise; /** * An {@link PendingLockManager} implementation that does nothing. * * @author Pedro Ruivo * @since 8.0 */ public class NoOpPendingLockManager implements PendingLockManager { private NoOpPendingLockManager() { } public static NoOpPendingLockManager getInstance() { return Wrapper.INSTANCE; } @Override public PendingLockPromise checkPendingTransactionsForKey(TxInvocationContext<?> ctx, Object key, long time, TimeUnit unit) { return PendingLockPromise.NO_OP; } @Override public PendingLockPromise checkPendingTransactionsForKeys(TxInvocationContext<?> ctx, Collection<Object> keys, long time, TimeUnit unit) { return PendingLockPromise.NO_OP; } @Override public long awaitPendingTransactionsForKey(TxInvocationContext<?> ctx, Object key, long time, TimeUnit unit) throws InterruptedException { return unit.toMillis(time); } @Override public long awaitPendingTransactionsForAllKeys(TxInvocationContext<?> ctx, Collection<Object> keys, long time, TimeUnit unit) throws InterruptedException { return unit.toMillis(time); } private static class Wrapper { private static final NoOpPendingLockManager INSTANCE = new NoOpPendingLockManager(); } }
1,551
30.673469
158
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/impl/LockContainer.java
package org.infinispan.util.concurrent.locks.impl; import java.util.concurrent.TimeUnit; import org.infinispan.util.concurrent.locks.DeadlockChecker; import org.infinispan.util.concurrent.locks.ExtendedLockPromise; /** * A container for locks * * @author Manik Surtani * @author Mircea.Markus@jboss.com * @author Pedro Ruivo * @since 4.0 */ public interface LockContainer { /** * @param key the key to lock. * @return the lock for a specific object to be acquired. If the lock does not exists, it is created. */ ExtendedLockPromise acquire(Object key, Object lockOwner, long time, TimeUnit timeUnit); /** * @param key the key to lock. * @return the lock for a specific object. If the lock does not exists, it return {@code null}. */ InfinispanLock getLock(Object key); void release(Object key, Object lockOwner); /** * @return number of locks held */ int getNumLocksHeld(); /** * @param key the key to test. * @return {@code true} if the key is locked, {@code false} otherwise. */ boolean isLocked(Object key); /** * @return the size of the shared lock pool */ int size(); /** * It forces a deadlock checks in all existing locks. */ void deadlockCheck(DeadlockChecker deadlockChecker); }
1,304
23.622642
104
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/impl/InfinispanLock.java
package org.infinispan.util.concurrent.locks.impl; import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater.newUpdater; import static org.infinispan.commons.util.concurrent.CompletableFutures.await; import java.util.Objects; import java.util.Queue; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import org.infinispan.commons.time.TimeService; import org.infinispan.commons.util.ByRef; import org.infinispan.interceptors.ExceptionSyncInvocationStage; import org.infinispan.interceptors.InvocationStage; import org.infinispan.interceptors.impl.SimpleAsyncInvocationStage; import org.infinispan.util.concurrent.TimeoutException; import org.infinispan.util.concurrent.locks.DeadlockChecker; import org.infinispan.util.concurrent.locks.DeadlockDetectedException; import org.infinispan.util.concurrent.locks.ExtendedLockPromise; import org.infinispan.util.concurrent.locks.LockListener; import org.infinispan.util.concurrent.locks.LockReleasedException; import org.infinispan.util.concurrent.locks.LockState; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * A special lock for Infinispan cache. * <p/> * The main different with the traditional {@link java.util.concurrent.locks.Lock} is allowing to use any object as lock * owner. It is possible to use a {@link Thread} as lock owner that makes similar to {@link * java.util.concurrent.locks.Lock}. * <p/> * In addition, it has an asynchronous interface. {@link #acquire(Object, long, TimeUnit)} will not acquire the lock * immediately (except if it is free) but will return a {@link ExtendedLockPromise}. This promise allow to test if the * lock is acquired asynchronously and cancel the lock acquisition, without any blocking. * * @author Pedro Ruivo * @since 8.0 */ public class InfinispanLock { private static final Log log = LogFactory.getLog(InfinispanLock.class); private static final AtomicReferenceFieldUpdater<InfinispanLock, LockRequest> OWNER_UPDATER = newUpdater(InfinispanLock.class, LockRequest.class, "current"); private static final AtomicReferenceFieldUpdater<LockPlaceHolder, LockState> STATE_UPDATER = newUpdater(LockPlaceHolder.class, LockState.class, "lockState"); private volatile Queue<LockRequest> pendingRequest; private final ConcurrentMap<Object, LockRequest> lockOwners; private final Runnable releaseRunnable; private final Executor nonBlockingExecutor; private TimeService timeService; @SuppressWarnings("CanBeFinal") private volatile LockRequest current; /** * Creates a new instance. * * @param nonBlockingExecutor executor that is resumed upon after a lock has been acquired or times out if waiting * @param timeService the {@link TimeService} to check for timeouts. */ public InfinispanLock(Executor nonBlockingExecutor, TimeService timeService) { this(nonBlockingExecutor, timeService, null); } /** * Creates a new instance. * * @param nonBlockingExecutor executor that is resumed upon after a lock has been acquired or times out if waiting * @param timeService the {@link TimeService} to check for timeouts. * @param releaseRunnable a {@link Runnable} that is invoked every time this lock is released. */ public InfinispanLock(Executor nonBlockingExecutor, TimeService timeService, Runnable releaseRunnable) { this.nonBlockingExecutor = nonBlockingExecutor; this.timeService = timeService; lockOwners = new ConcurrentHashMap<>(); current = null; this.releaseRunnable = releaseRunnable; } /** * Creates a new instance which is acquired by {@code owner}. * <p> * The {@code lockPromise} stores the reference to the {@link ExtendedLockPromise}. * The method {@link #acquire(Object, long, TimeUnit)} is no longer necessary to be invoked by this lock {@code owner}. * * @param nonBlockingExecutor executor that is resumed upon after a lock has been acquired or times out if waiting * @param timeService the {@link TimeService} to check for timeouts. * @param releaseRunnable a {@link Runnable} that is invoked every time this lock is released. * @param owner the lock owner. * @param lockPromise the {@link ByRef} to store the {@link ExtendedLockPromise}. */ public InfinispanLock(Executor nonBlockingExecutor, TimeService timeService, Runnable releaseRunnable, Object owner, ByRef<ExtendedLockPromise> lockPromise) { this.nonBlockingExecutor = nonBlockingExecutor; this.timeService = timeService; lockOwners = new ConcurrentHashMap<>(); this.releaseRunnable = releaseRunnable; LockAcquired promise = new LockAcquired(owner); current = promise; lockOwners.put(owner, promise); lockPromise.set(promise); if (log.isTraceEnabled()) { log.tracef("%s successfully acquired the lock.", lockPromise); } } /** * Tests purpose only! */ public void setTimeService(TimeService timeService) { if (timeService != null) { this.timeService = timeService; } } /** * It tries to acquire this lock. * <p/> * If it is invoked multiple times with the same owner, the same {@link ExtendedLockPromise} is returned until it has * timed-out or {@link #release(Object)} is invoked. * <p/> * If the lock is free, it is immediately acquired, otherwise the lock owner is queued. * * @param lockOwner the lock owner who needs to acquire the lock. * @param time the timeout value. * @param timeUnit the timeout unit. * @return an {@link ExtendedLockPromise}. * @throws NullPointerException if {@code lockOwner} or {@code timeUnit} is {@code null}. */ public ExtendedLockPromise acquire(Object lockOwner, long time, TimeUnit timeUnit) { Objects.requireNonNull(lockOwner, "Lock Owner should be non-null"); Objects.requireNonNull(timeUnit, "Time Unit should be non-null"); if (log.isTraceEnabled()) { log.tracef("Acquire lock for %s. Timeout=%s (%s)", lockOwner, time, timeUnit); } LockRequest lockPlaceHolder = lockOwners.get(lockOwner); if (lockPlaceHolder != null) { if (log.isTraceEnabled()) { log.tracef("Lock owner already exists: %s", lockPlaceHolder); } return lockPlaceHolder; } lockPlaceHolder = createLockInfo(lockOwner, time, timeUnit); LockRequest other = lockOwners.putIfAbsent(lockOwner, lockPlaceHolder); if (other != null) { if (log.isTraceEnabled()) { log.tracef("Lock owner already exists: %s", other); } return other; } if (log.isTraceEnabled()) { log.tracef("Created a new one: %s", lockPlaceHolder); } addToPendingRequests(lockPlaceHolder); tryAcquire(null); return lockPlaceHolder; } /** * It tries to release the lock held by {@code lockOwner}. * <p/> * If the lock is not acquired (is waiting or timed out/deadlocked) by {@code lockOwner}, its {@link * ExtendedLockPromise} is canceled. If {@code lockOwner} is the current lock owner, the lock is released and the * next lock owner available will acquire the lock. If the {@code lockOwner} never tried to acquire the lock, this * method does nothing. * * @param lockOwner the lock owner who wants to release the lock. * @throws NullPointerException if {@code lockOwner} is {@code null}. */ public void release(Object lockOwner) { Objects.requireNonNull(lockOwner, "Lock Owner should be non-null"); if (log.isTraceEnabled()) { log.tracef("Release lock for %s.", lockOwner); } LockRequest wantToRelease = lockOwners.get(lockOwner); if (wantToRelease == null) { if (log.isTraceEnabled()) { log.tracef("%s not found!", lockOwner); } //nothing to release return; } final boolean released = wantToRelease.setReleased(); if (log.isTraceEnabled()) { log.tracef("Release lock for %s? %s", wantToRelease, released); } LockRequest currentLocked = current; if (currentLocked == wantToRelease) { tryAcquire(wantToRelease); } } /** * @return the current lock owner or {@code null} if it is not acquired. */ public Object getLockOwner() { LockRequest lockPlaceHolder = current; return lockPlaceHolder == null ? null : lockPlaceHolder.owner; } /** * It checks if the lock is acquired. * <p/> * A {@code false} return value does not mean the lock is free since it may have queued lock owners. * * @return {@code true} if the lock is acquired. */ public boolean isLocked() { return current != null; } /** * It forces a deadlock checking. */ public void deadlockCheck(DeadlockChecker deadlockChecker) { if (deadlockChecker == null) { return; //no-op } LockRequest holder = current; if (holder != null) { forEachPendingRequest(request -> request.checkDeadlock(deadlockChecker, holder)); } } /** * It tests if the lock has the lock owner. * <p/> * It return {@code true} if the lock owner is the current lock owner or it in the queue. * * @param lockOwner the lock owner to test. * @return {@code true} if it contains the lock owner. */ public boolean containsLockOwner(Object lockOwner) { return lockOwners.containsKey(lockOwner); } private void onCanceled(LockRequest canceled) { if (log.isTraceEnabled()) { log.tracef("Release lock for %s. It was canceled.", canceled.getRequestor()); } LockRequest currentLocked = current; if (currentLocked == canceled) { tryAcquire(canceled); } } private boolean casRelease(LockRequest lockPlaceHolder) { return cas(lockPlaceHolder, null); } private boolean remove(Object lockOwner) { return lockOwners.remove(lockOwner) != null; } private void triggerReleased() { if (releaseRunnable != null) { releaseRunnable.run(); } } private boolean cas(LockRequest release, LockRequest acquire) { boolean cas = OWNER_UPDATER.compareAndSet(this, release, acquire); if (log.isTraceEnabled()) { log.tracef("Lock Owner CAS(%s, %s) => %s", release, acquire, cas); } return cas; } private void tryAcquire(LockRequest release) { LockRequest toRelease = release; do { LockRequest toAcquire = peekNextPendingRequest(); if (log.isTraceEnabled()) { log.tracef("Try acquire. Next in queue=%s. Current=%s", toAcquire, current); } if (toAcquire == null && toRelease == null) { return; } else if (toAcquire == null) { //nothing to acquire, but we have to release the current. if (casRelease(toRelease)) { toRelease = null; continue; //in the meanwhile, we could have a new request. recheck! } return; } if (cas(toRelease, toAcquire)) { //we set the current lock owner, so we must remove it from the queue removeFromPendingRequest(toAcquire); if (toAcquire.setAcquire()) { if (log.isTraceEnabled()) { log.tracef("%s successfully acquired the lock.", toAcquire); } return; } if (log.isTraceEnabled()) { log.tracef("%s failed to acquire (invalid state). Retrying.", toAcquire); } //oh oh, probably the nextPending Timed-Out. we are going to retry with the next in queue toRelease = toAcquire; } else { if (log.isTraceEnabled()) { log.tracef("Unable to acquire. Lock is held."); } //other thread already set the current lock owner return; } } while (true); } private LockRequest createLockInfo(Object lockOwner, long time, TimeUnit timeUnit) { return new LockPlaceHolder(lockOwner, timeService.expectedEndTime(time, timeUnit)); } private void addToPendingRequests(LockRequest request) { if (pendingRequest == null) { synchronized (this) { if (pendingRequest == null) { pendingRequest = new ConcurrentLinkedQueue<>(); } } } pendingRequest.add(request); } private LockRequest peekNextPendingRequest() { if (pendingRequest == null) { return null; } return pendingRequest.peek(); } private void removeFromPendingRequest(LockRequest request) { assert pendingRequest != null; pendingRequest.remove(request); } private void forEachPendingRequest(Consumer<LockRequest> consumer) { if (pendingRequest == null) { return; } pendingRequest.forEach(consumer); } private static void checkValidCancelState(LockState state) { if (state != LockState.TIMED_OUT && state != LockState.DEADLOCKED) { throw new IllegalArgumentException("LockState " + state + " is not valid to cancel."); } } private abstract class LockRequest implements ExtendedLockPromise { final Object owner; LockRequest(Object owner) { this.owner = owner; } abstract boolean setAcquire(); abstract void checkDeadlock(DeadlockChecker deadlockChecker, LockRequest holder); abstract boolean setReleased(); @Override public final Object getRequestor() { return owner; } @Override public final Object getOwner() { return getLockOwner(); } } private class LockPlaceHolder extends LockRequest { private final long timeout; private final CompletableFuture<LockState> notifier; @SuppressWarnings("CanBeFinal") volatile LockState lockState; private LockPlaceHolder(Object owner, long timeout) { super(owner); this.timeout = timeout; lockState = LockState.WAITING; notifier = new CompletableFuture<>(); } @Override public boolean isAvailable() { checkTimeout(); return lockState != LockState.WAITING; } @Override public void lock() throws InterruptedException, TimeoutException { do { LockState currentState = lockState; switch (currentState) { case WAITING: checkTimeout(); await(notifier, timeService.remainingTime(timeout, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS); break; case ACQUIRED: return; //acquired! case RELEASED: throw new LockReleasedException("Requestor '" + owner + "' failed to acquire lock. Lock already released!"); case TIMED_OUT: cleanup(); throw new TimeoutException("Timeout waiting for lock."); case DEADLOCKED: cleanup(); throw new DeadlockDetectedException("DeadLock detected"); default: throw new IllegalStateException("Unknown lock state: " + currentState); } } while (true); } @Override public void addListener(LockListener listener) { if (notifier.isDone() && !notifier.isCompletedExceptionally()) { listener.onEvent(notifier.join()); } else { notifier.thenAccept(listener::onEvent); } } @Override public InvocationStage toInvocationStage() { return toInvocationStage(() -> new TimeoutException("Timeout waiting for lock.")); } @Override public void cancel(LockState state) { checkValidCancelState(state); do { LockState currentState = lockState; switch (currentState) { case WAITING: if (casState(LockState.WAITING, state)) { onCanceled(this); notifyListeners(); return; } break; case ACQUIRED: //no-op, a thread is inside the critical section. case TIMED_OUT: case DEADLOCKED: case RELEASED: return; //no-op, the lock is in final state. default: throw new IllegalStateException("Unknown lock state " + currentState); } } while (true); } @Override public InvocationStage toInvocationStage(Supplier<TimeoutException> timeoutSupplier) { if (notifier.isDone()) { return checkState(notifier.getNow(lockState), InvocationStage::completedNullStage, ExceptionSyncInvocationStage::new, timeoutSupplier); } return new SimpleAsyncInvocationStage(notifier.thenApplyAsync(state -> { Object rv = checkState(state, () -> null, throwable -> throwable, timeoutSupplier); if (rv != null) { throw (RuntimeException) rv; } return null; }, nonBlockingExecutor)); } @Override public String toString() { return "LockPlaceHolder{" + "lockState=" + lockState + ", owner=" + owner + '}'; } @Override public void checkDeadlock(DeadlockChecker checker, LockRequest holder) { checkTimeout(); //check timeout before checking the deadlock. check deadlock are more expensive. Object currentOwner = holder.owner; if (lockState == LockState.WAITING && //we are waiting for a lock !owner.equals(currentOwner) && //needed? just to be safe checker.deadlockDetected(owner, currentOwner) && //deadlock has been detected! casState(LockState.WAITING, LockState.DEADLOCKED)) { //state could have been changed to available or timed_out onCanceled(this); notifyListeners(); } } @Override public boolean setAcquire() { if (casState(LockState.WAITING, LockState.ACQUIRED)) { notifyListeners(); } return lockState == LockState.ACQUIRED; } @Override public boolean setReleased() { do { LockState state = lockState; switch (state) { case WAITING: if (casState(state, LockState.RELEASED)) { cleanup(); notifyListeners(); return true; } break; case ACQUIRED: case TIMED_OUT: case DEADLOCKED: if (casState(state, LockState.RELEASED)) { cleanup(); return true; } break; case RELEASED: return false; default: throw new IllegalStateException("Unknown lock state " + state); } } while (true); } private <T> T checkState(LockState state, Supplier<T> acquired, Function<Throwable, T> exception, Supplier<TimeoutException> timeoutSupplier) { switch (state) { case ACQUIRED: return acquired.get(); case RELEASED: return exception.apply(new LockReleasedException("Requestor '" + owner + "' failed to acquire lock. Lock already released!")); case TIMED_OUT: cleanup(); return exception.apply(timeoutSupplier.get()); case DEADLOCKED: cleanup(); return exception.apply(new DeadlockDetectedException("DeadLock detected")); default: return exception.apply(new IllegalStateException("Unknown lock state: " + state)); } } private boolean casState(LockState expect, LockState update) { boolean updated = STATE_UPDATER.compareAndSet(this, expect, update); if (updated && log.isTraceEnabled()) { log.tracef("State changed for %s. %s => %s", this, expect, update); } return updated; } private void cleanup() { if (remove(owner)) { triggerReleased(); } } private void checkTimeout() { if (lockState == LockState.WAITING && timeService.isTimeExpired(timeout) && casState(LockState.WAITING, LockState.TIMED_OUT)) { onCanceled(this); notifyListeners(); } } private void notifyListeners() { LockState state = lockState; if (state != LockState.WAITING) { notifier.complete(state); } } } private class LockAcquired extends LockRequest { private volatile boolean released; LockAcquired(Object owner) { super(owner); } @Override public void cancel(LockState cause) { checkValidCancelState(cause); //no-op, already acquired } @Override public InvocationStage toInvocationStage(Supplier<TimeoutException> timeoutSupplier) { return toInvocationStage(); } @Override public boolean isAvailable() { return true; } @Override public void lock() { //no-op acquired! } @Override public void addListener(LockListener listener) { listener.onEvent(released ? LockState.RELEASED : LockState.ACQUIRED); } @Override public InvocationStage toInvocationStage() { return InvocationStage.completedNullStage(); } @Override public boolean setAcquire() { throw new IllegalStateException("setAcquire() should never be invoked"); } @Override public void checkDeadlock(DeadlockChecker deadlockChecker, LockRequest holder) { throw new IllegalStateException("checkDeadlock() should never be invoked"); } @Override public boolean setReleased() { released = true; if (remove(owner)) { if (log.isTraceEnabled()) { log.tracef("State changed for %s. ACQUIRED => RELEASED", this); } triggerReleased(); return true; } return false; } @Override public String toString() { return "LockAcquired{" + "released?=" + released + ", owner=" + owner + '}'; } } }
23,176
34.06354
161
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/impl/DefaultPendingLockManager.java
package org.infinispan.util.concurrent.locks.impl; import static org.infinispan.commons.util.Util.prettyPrintTime; import static org.infinispan.factories.KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import org.infinispan.context.impl.TxInvocationContext; import org.infinispan.distribution.DistributionManager; import org.infinispan.factories.annotations.ComponentName; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.interceptors.InvocationStage; import org.infinispan.interceptors.impl.SimpleAsyncInvocationStage; import org.infinispan.transaction.impl.LocalTransaction; import org.infinispan.transaction.impl.TransactionTable; import org.infinispan.transaction.xa.CacheTransaction; import org.infinispan.transaction.xa.GlobalTransaction; import org.infinispan.util.KeyValuePair; import org.infinispan.commons.time.TimeService; import org.infinispan.util.concurrent.TimeoutException; import org.infinispan.util.concurrent.locks.PendingLockListener; import org.infinispan.util.concurrent.locks.PendingLockManager; import org.infinispan.util.concurrent.locks.PendingLockPromise; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * The default implementation for {@link PendingLockManager}. * <p> * In transactional caches, a transaction would wait for transaction originated in a older topology id. It can happen * when topology changes and a backup owner becomes the primary owner. * * @author Pedro Ruivo * @since 8.0 */ @Scope(Scopes.NAMED_CACHE) public class DefaultPendingLockManager implements PendingLockManager { private static final Log log = LogFactory.getLog(DefaultPendingLockManager.class); private static final int NO_PENDING_CHECK = -2; @Inject TransactionTable transactionTable; @Inject TimeService timeService; @Inject DistributionManager distributionManager; @Inject @ComponentName(TIMEOUT_SCHEDULE_EXECUTOR) ScheduledExecutorService timeoutExecutor; public DefaultPendingLockManager() { } @Override public PendingLockPromise checkPendingTransactionsForKey(TxInvocationContext<?> ctx, Object key, long time, TimeUnit unit) { final GlobalTransaction globalTransaction = ctx.getGlobalTransaction(); final int txTopologyId = getTopologyId(ctx); if (txTopologyId == NO_PENDING_CHECK) { if (log.isTraceEnabled()) { log.tracef("Skipping pending transactions check for transaction %s", globalTransaction); } return PendingLockPromise.NO_OP; } return createPromise(getTransactionWithLockedKey(txTopologyId, key, globalTransaction), globalTransaction, time, unit); } @Override public PendingLockPromise checkPendingTransactionsForKeys(TxInvocationContext<?> ctx, Collection<Object> keys, long time, TimeUnit unit) { final GlobalTransaction globalTransaction = ctx.getGlobalTransaction(); final int txTopologyId = getTopologyId(ctx); if (txTopologyId == NO_PENDING_CHECK) { if (log.isTraceEnabled()) { log.tracef("Skipping pending transactions check for transaction %s", globalTransaction); } return PendingLockPromise.NO_OP; } return createPromise(getTransactionWithAnyLockedKey(txTopologyId, keys, globalTransaction), globalTransaction, time, unit); } @Override public long awaitPendingTransactionsForKey(TxInvocationContext<?> ctx, Object key, long time, TimeUnit unit) throws InterruptedException { final GlobalTransaction gtx = ctx.getGlobalTransaction(); PendingLockPromise pendingLockPromise = checkPendingTransactionsForKey(ctx, key, time, unit); if (log.isTraceEnabled()) { log.tracef("Await for pending transactions for transaction %s using %s", gtx, pendingLockPromise); } return awaitOn(pendingLockPromise, gtx, time, unit); } @Override public long awaitPendingTransactionsForAllKeys(TxInvocationContext<?> ctx, Collection<Object> keys, long time, TimeUnit unit) throws InterruptedException { final GlobalTransaction gtx = ctx.getGlobalTransaction(); PendingLockPromise pendingLockPromise = checkPendingTransactionsForKeys(ctx, keys, time, unit); if (log.isTraceEnabled()) { log.tracef("Await for pending transactions for transaction %s using %s", gtx, pendingLockPromise); } return awaitOn(pendingLockPromise, gtx, time, unit); } private PendingLockPromise createPromise(Collection<PendingTransaction> transactions, GlobalTransaction globalTransaction, long time, TimeUnit unit) { if (transactions.isEmpty()) { if (log.isTraceEnabled()) { log.tracef("No transactions pending for transaction %s", globalTransaction); } return PendingLockPromise.NO_OP; } if (log.isTraceEnabled()) { log.tracef("Transactions pending for transaction %s are %s", globalTransaction, transactions); } PendingLockPromiseImpl pendingLockPromise = new PendingLockPromiseImpl(globalTransaction, time, unit, transactions); pendingLockPromise.scheduleTimeoutTask(); pendingLockPromise.registerListenerInCacheTransactions(); return pendingLockPromise; } private int getTopologyId(TxInvocationContext<?> context) { final CacheTransaction tx = context.getCacheTransaction(); boolean isFromStateTransfer = context.isOriginLocal() && ((LocalTransaction) tx).isFromStateTransfer(); // if the transaction is from state transfer it should not wait for the backup locks of other transactions if (!isFromStateTransfer) { final int topologyId = distributionManager.getCacheTopology().getTopologyId(); if (topologyId != TransactionTable.CACHE_STOPPED_TOPOLOGY_ID) { if (transactionTable.getMinTopologyId() < topologyId) { return topologyId; } } } return NO_PENDING_CHECK; } private static TimeoutException timeout(KeyValuePair<CacheTransaction, Object> lockOwner, GlobalTransaction thisGlobalTransaction, long timeout, TimeUnit timeUnit) { return log.unableToAcquireLock(prettyPrintTime(timeout, timeUnit), lockOwner.getValue(), thisGlobalTransaction, lockOwner.getKey().getGlobalTransaction() + " (pending)"); } private Collection<PendingTransaction> getTransactionWithLockedKey(int transactionTopologyId, Object key, GlobalTransaction globalTransaction) { if (key == null) { return Collections.emptyList(); } final Collection<PendingTransaction> pendingTransactions = new ArrayList<>(); forEachTransaction(transaction -> { if (transaction.getTopologyId() < transactionTopologyId && !transaction.getGlobalTransaction().equals(globalTransaction)) { CompletableFuture<Void> keyReleasedFuture = transaction.getReleaseFutureForKey(key); if (keyReleasedFuture != null) { pendingTransactions.add(new PendingTransaction(transaction, Collections.singletonMap(key, keyReleasedFuture))); } } }); return pendingTransactions.isEmpty() ? Collections.emptyList() : pendingTransactions; } private Collection<PendingTransaction> getTransactionWithAnyLockedKey(int transactionTopologyId, Collection<Object> keys, GlobalTransaction globalTransaction) { if (keys.isEmpty()) { return Collections.emptyList(); } final Collection<PendingTransaction> pendingTransactions = new ArrayList<>(); forEachTransaction(transaction -> { if (transaction.getTopologyId() < transactionTopologyId && !transaction.getGlobalTransaction().equals(globalTransaction)) { Map<Object, CompletableFuture<Void>> keyReleaseFuture = transaction.getReleaseFutureForKeys(keys); if (keyReleaseFuture != null) { pendingTransactions.add(new PendingTransaction(transaction, keyReleaseFuture)); } } }); return pendingTransactions.isEmpty() ? Collections.emptyList() : pendingTransactions; } private void forEachTransaction(Consumer<CacheTransaction> consumer) { final Collection<? extends CacheTransaction> localTransactions = transactionTable.getLocalTransactions(); final Collection<? extends CacheTransaction> remoteTransactions = transactionTable.getRemoteTransactions(); final int totalSize = localTransactions.size() + remoteTransactions.size(); if (totalSize == 0) { return; } if (!localTransactions.isEmpty()) { localTransactions.forEach(consumer); } if (!remoteTransactions.isEmpty()) { remoteTransactions.forEach(consumer); } } private static long awaitOn(PendingLockPromise pendingLockPromise, GlobalTransaction globalTransaction, long timeout, TimeUnit timeUnit) throws InterruptedException { if (pendingLockPromise == PendingLockPromise.NO_OP) { return timeUnit.toMillis(timeout); } assert pendingLockPromise instanceof PendingLockPromiseImpl; ((PendingLockPromiseImpl) pendingLockPromise).await(); return pendingLockPromise.getRemainingTimeout(); } private static class PendingTransaction { private final CacheTransaction cacheTransaction; private final Map<Object, CompletableFuture<Void>> keyReleased; private PendingTransaction(CacheTransaction cacheTransaction, Map<Object, CompletableFuture<Void>> keyReleased) { this.cacheTransaction = cacheTransaction; this.keyReleased = keyReleased; } @Override public String toString() { return "PendingTransaction{" + "gtx=" + cacheTransaction.getGlobalTransaction().globalId() + ", keys=" + keyReleased.keySet() + '}'; } void afterCompleted(Runnable runnable) { keyReleased.values().forEach(voidCompletableFuture -> voidCompletableFuture.thenRun(runnable)); } KeyValuePair<CacheTransaction, Object> findUnreleasedKey() { for (Map.Entry<Object, CompletableFuture<Void>> entry : keyReleased.entrySet()) { if (!entry.getValue().isDone()) { return new KeyValuePair<>(cacheTransaction, entry.getKey()); } } return null; } } private class PendingLockPromiseImpl implements PendingLockPromise, Callable<Void>, Runnable { private final GlobalTransaction globalTransaction; private final long timeoutNanos; private final Collection<PendingTransaction> pendingTransactions; private final long expectedEndTime; private final CompletableFuture<Void> notifier; private ScheduledFuture<Void> timeoutTask; private PendingLockPromiseImpl(GlobalTransaction globalTransaction, long timeout, TimeUnit timeUnit, Collection<PendingTransaction> pendingTransactions) { this.globalTransaction = globalTransaction; this.timeoutNanos = timeUnit.toNanos(timeout); this.pendingTransactions = pendingTransactions; this.expectedEndTime = timeService.expectedEndTime(timeoutNanos, TimeUnit.NANOSECONDS); this.notifier = new CompletableFuture<>(); } @Override public InvocationStage toInvocationStage() { return new SimpleAsyncInvocationStage(notifier); } @Override public boolean isReady() { return notifier.isDone(); } @Override public void addListener(PendingLockListener listener) { notifier.whenComplete((v, throwable) -> listener.onReady()); } @Override public boolean hasTimedOut() { return notifier.isCompletedExceptionally(); } @Override public long getRemainingTimeout() { return timeService.remainingTime(expectedEndTime, TimeUnit.MILLISECONDS); } @Override public Void call() throws Exception { //invoked when the timeout kicks. onRelease(); return null; } @Override public void run() { //invoked when a pending backup lock is released onRelease(); } private void onRelease() { KeyValuePair<CacheTransaction, Object> timedOutTransaction = null; for (PendingTransaction transaction : pendingTransactions) { KeyValuePair<CacheTransaction, Object> waiting = transaction.findUnreleasedKey(); if (waiting != null) { // Found a pending transaction if (timeService.isTimeExpired(expectedEndTime)) { // Timed out, complete the promise timedOutTransaction = waiting; break; } else { // Not timed out, wait some more return; } } } if (timeoutTask != null) { timeoutTask.cancel(false); } if (timedOutTransaction == null) { if (log.isTraceEnabled()) log.tracef("All pending transactions have finished for transaction %s", globalTransaction); notifier.complete(null); } else { if (log.isTraceEnabled()) log.tracef("Timed out waiting for pending transaction %s for transaction %s", timedOutTransaction, globalTransaction); notifier.completeExceptionally(timeout(timedOutTransaction, globalTransaction, timeoutNanos, TimeUnit.NANOSECONDS)); } } void registerListenerInCacheTransactions() { for (PendingTransaction transaction : pendingTransactions) { transaction.afterCompleted(this); } // Maybe one of the transactions has finished or removed a backup lock before we added the listener onRelease(); } void scheduleTimeoutTask() { if (!notifier.isDone()) { // schedule(Runnable) creates an extra Callable wrapper object timeoutTask = timeoutExecutor.schedule((Callable<Void>) this, timeoutNanos, TimeUnit.NANOSECONDS); } } void await() throws InterruptedException { try { notifier.get(timeService.remainingTime(expectedEndTime, TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS); } catch (ExecutionException e) { throw new IllegalStateException("Should never happen.", e); } catch (java.util.concurrent.TimeoutException e) { //ignore } } } }
15,611
42.608939
156
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/impl/PerKeyLockContainer.java
package org.infinispan.util.concurrent.locks.impl; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import org.infinispan.commons.time.TimeService; import org.infinispan.commons.util.ByRef; import org.infinispan.factories.KnownComponentNames; import org.infinispan.factories.annotations.ComponentName; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.concurrent.locks.DeadlockChecker; import org.infinispan.util.concurrent.locks.ExtendedLockPromise; /** * A lock container that creates and maintains a new lock per entry. * * @author Pedro Ruivo * @since 8.0 */ @Scope(Scopes.NAMED_CACHE) public class PerKeyLockContainer implements LockContainer { private static final int INITIAL_CAPACITY = 32; private final ConcurrentMap<Object, InfinispanLock> lockMap; @ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR) @Inject protected Executor nonBlockingExecutor; private TimeService timeService; public PerKeyLockContainer() { lockMap = new ConcurrentHashMap<>(INITIAL_CAPACITY); } @Inject void inject(TimeService timeService) { this.timeService = timeService; for (InfinispanLock lock : lockMap.values()) { lock.setTimeService(timeService); } } @Override public ExtendedLockPromise acquire(Object key, Object lockOwner, long time, TimeUnit timeUnit) { ByRef<ExtendedLockPromise> reference = ByRef.create(null); lockMap.compute(key, (aKey, lock) -> { if (lock == null) { return createInfinispanLock(aKey, lockOwner, reference); } reference.set(lock.acquire(lockOwner, time, timeUnit)); return lock; }); return reference.get(); } @Override public InfinispanLock getLock(Object key) { return lockMap.get(key); } @Override public void release(Object key, Object lockOwner) { lockMap.computeIfPresent(key, (ignoredKey, lock) -> { lock.release(lockOwner); return !lock.isLocked() ? null : lock; //remove it if empty }); } @Override public int getNumLocksHeld() { int count = 0; for (InfinispanLock lock : lockMap.values()) { if (lock.isLocked()) { count++; } } return count; } @Override public boolean isLocked(Object key) { InfinispanLock lock = lockMap.get(key); return lock != null && lock.isLocked(); } @Override public int size() { return lockMap.size(); } @Override public void deadlockCheck(DeadlockChecker deadlockChecker) { lockMap.values().forEach(lock -> lock.deadlockCheck(deadlockChecker)); } @Override public String toString() { return "PerKeyLockContainer{" + "locks=" + lockMap + '}'; } private InfinispanLock createInfinispanLock(Object key, Object owner, ByRef<ExtendedLockPromise> promise) { return new InfinispanLock(nonBlockingExecutor, timeService, createReleaseRunnable(key), owner, promise); } private Runnable createReleaseRunnable(Object key) { return () -> lockMap.computeIfPresent(key, (ignoredKey, lock) -> lock.isLocked() ? lock : null); } }
3,405
28.877193
110
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/impl/StripedLockContainer.java
package org.infinispan.util.concurrent.locks.impl; import static org.infinispan.commons.util.InfinispanCollections.forEach; import java.util.Arrays; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import org.infinispan.commons.time.TimeService; import org.infinispan.factories.KnownComponentNames; import org.infinispan.factories.annotations.ComponentName; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.StripedHashFunction; import org.infinispan.util.concurrent.locks.DeadlockChecker; import org.infinispan.util.concurrent.locks.ExtendedLockPromise; /** * A lock container used with lock stripping. * * @author Pedro Ruivo * @since 8.0 */ @Scope(Scopes.NAMED_CACHE) public class StripedLockContainer implements LockContainer { private final InfinispanLock[] sharedLocks; private final StripedHashFunction<Object> hashFunction; public StripedLockContainer(int concurrencyLevel) { this.hashFunction = new StripedHashFunction<>(concurrencyLevel); sharedLocks = new InfinispanLock[hashFunction.getNumSegments()]; } @Inject void inject(@ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR) Executor nonBlockingExecutor, TimeService timeService) { for (int i = 0; i < sharedLocks.length; i++) { if (sharedLocks[i] == null) { sharedLocks[i] = new InfinispanLock(nonBlockingExecutor, timeService); } else { sharedLocks[i].setTimeService(timeService); } } } @Override public ExtendedLockPromise acquire(Object key, Object lockOwner, long time, TimeUnit timeUnit) { return getLock(key).acquire(lockOwner, time, timeUnit); } @Override public void release(Object key, Object lockOwner) { getLock(key).release(lockOwner); } @Override public InfinispanLock getLock(Object key) { return sharedLocks[hashFunction.hashToSegment(key)]; } @Override public int getNumLocksHeld() { int count = 0; for (InfinispanLock lock : sharedLocks) { if (lock.isLocked()) { count++; } } return count; } @Override public boolean isLocked(Object key) { return getLock(key).isLocked(); } @Override public int size() { return sharedLocks.length; } @Override public void deadlockCheck(DeadlockChecker deadlockChecker) { forEach(sharedLocks, lock -> lock.deadlockCheck(deadlockChecker)); } @Override public String toString() { return "StripedLockContainer{" + "locks=" + Arrays.toString(sharedLocks) + '}'; } }
2,747
27.625
102
java
null
infinispan-main/core/src/main/java/org/infinispan/util/concurrent/locks/impl/DefaultLockManager.java
package org.infinispan.util.concurrent.locks.impl; import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater.newUpdater; import static org.infinispan.commons.util.Util.toStr; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.function.Function; import java.util.function.Supplier; import org.infinispan.commons.util.Util; import org.infinispan.configuration.cache.Configuration; import org.infinispan.context.InvocationContext; import org.infinispan.context.impl.TxInvocationContext; import org.infinispan.factories.KnownComponentNames; import org.infinispan.factories.annotations.ComponentName; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.interceptors.ExceptionSyncInvocationStage; import org.infinispan.interceptors.InvocationStage; import org.infinispan.interceptors.impl.SimpleAsyncInvocationStage; import org.infinispan.jmx.annotations.DataType; import org.infinispan.jmx.annotations.MBean; import org.infinispan.jmx.annotations.ManagedAttribute; import org.infinispan.util.concurrent.TimeoutException; import org.infinispan.util.concurrent.locks.DeadlockDetectedException; import org.infinispan.util.concurrent.locks.ExtendedLockPromise; import org.infinispan.util.concurrent.locks.KeyAwareLockListener; import org.infinispan.util.concurrent.locks.KeyAwareLockPromise; import org.infinispan.util.concurrent.locks.LockListener; import org.infinispan.util.concurrent.locks.LockManager; import org.infinispan.util.concurrent.locks.LockPromise; import org.infinispan.util.concurrent.locks.LockState; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * The default {@link LockManager} implementation for transactional and non-transactional caches. * * @author Pedro Ruivo * @since 8.0 */ @MBean(objectName = "LockManager", description = "Manager that handles MVCC locks for entries") @Scope(Scopes.NAMED_CACHE) public class DefaultLockManager implements LockManager { private static final Log log = LogFactory.getLog(DefaultLockManager.class); private static final AtomicReferenceFieldUpdater<CompositeLockPromise, LockState> UPDATER = newUpdater(CompositeLockPromise.class, LockState.class, "lockState"); @Inject LockContainer lockContainer; @Inject Configuration configuration; @Inject @ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR) ScheduledExecutorService scheduler; @Inject @ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR) Executor nonBlockingExecutor; @Override public KeyAwareLockPromise lock(Object key, Object lockOwner, long time, TimeUnit unit) { Objects.requireNonNull(key, "Key must be non null"); Objects.requireNonNull(lockOwner, "Lock owner must be non null"); Objects.requireNonNull(unit, "Time unit must be non null"); if (log.isTraceEnabled()) { log.tracef("Lock key=%s for owner=%s. timeout=%s (%s)", toStr(key), lockOwner, time, unit); } if (key == lockOwner) { // If the lock is already owned by this lock owner there is no reason to attempt the lock needlessly InfinispanLock lock = lockContainer.getLock(key); if (lock != null && lock.getLockOwner() == key) { if (log.isTraceEnabled()) log.tracef("Not locking key=%s as it is already held by the same lock owner", key); return KeyAwareLockPromise.NO_OP; } } ExtendedLockPromise promise = lockContainer.acquire(key, lockOwner, time, unit); return new KeyAwareExtendedLockPromise(promise, key, unit.toMillis(time)).scheduleLockTimeoutTask(scheduler); } @Override public KeyAwareLockPromise lockAll(Collection<?> keys, Object lockOwner, long time, TimeUnit unit) { Objects.requireNonNull(keys, "Keys must be non null"); Objects.requireNonNull(lockOwner, "Lock owner must be non null"); Objects.requireNonNull(unit, "Time unit must be non null"); if (keys.isEmpty()) { if (log.isTraceEnabled()) { log.tracef("Lock all: no keys found for owner=%s", lockOwner); } return KeyAwareLockPromise.NO_OP; } else if (keys.size() == 1) { //although will have the cost of creating an iterator, at least, we don't need to enter the synchronized section. return lock(keys.iterator().next(), lockOwner, time, unit); } final Set<Object> uniqueKeys = filterDistinctKeys(keys); if (uniqueKeys.size() == 1) { //although will have the cost of creating an iterator, at least, we don't need to enter the synchronized section. return lock(uniqueKeys.iterator().next(), lockOwner, time, unit); } if (log.isTraceEnabled()) { log.tracef("Lock all keys=%s for owner=%s. timeout=%s (%s)", toStr(uniqueKeys), lockOwner, time, unit); } final CompositeLockPromise compositeLockPromise = new CompositeLockPromise(uniqueKeys.size(), nonBlockingExecutor); //needed to avoid internal deadlock when 2 or more lock owner invokes this method with the same keys. //ordering will not solve the problem since acquire() is non-blocking and each lock owner can iterate faster/slower than the other. synchronized (this) { for (Object key : uniqueKeys) { compositeLockPromise.addLock(new KeyAwareExtendedLockPromise(lockContainer.acquire(key, lockOwner, time, unit), key, unit.toMillis(time))); } } compositeLockPromise.scheduleLockTimeoutTask(scheduler, time, unit); compositeLockPromise.markListAsFinal(); return compositeLockPromise; } private Set<Object> filterDistinctKeys(Collection<?> collection) { if (collection instanceof Set) { //noinspection unchecked return (Set<Object>) collection; } else { return new HashSet<>(collection); } } @Override public void unlock(Object key, Object lockOwner) { if (log.isTraceEnabled()) { log.tracef("Release lock for key=%s. owner=%s", key, lockOwner); } lockContainer.release(key, lockOwner); } @Override public void unlockAll(Collection<?> keys, Object lockOwner) { if (log.isTraceEnabled()) { log.tracef("Release locks for keys=%s. owner=%s", toStr(keys), lockOwner); } if (keys.isEmpty()) { return; } for (Object key : keys) { // If the key is the lock owner that means it was explicitly locked, which can only be unlocked via the single // argument unlock method. This is used by a cache that has the lock owner specifically overridden if (key == lockOwner) { if (log.isTraceEnabled()) log.tracef("Ignoring key %s as it matches lock owner", key); } else { lockContainer.release(key, lockOwner); } } } @Override public void unlockAll(InvocationContext context) { unlockAll(context.getLockedKeys(), context.getLockOwner()); context.clearLockedKeys(); if (context instanceof TxInvocationContext<?>) { // this may be on overkill but if the TM's Transaction Reaper aborts a transaction and a lock is not acquired // (i.e it is in WAITING state) when the RollbackCommand is executed, the lock is not released. // In other words, WAITING state moves to ACQUIRED state and the RollbackCommand is never executed again // leaving the lock acquired forever unlockAll(((TxInvocationContext<?>) context).getAffectedKeys(), context.getLockOwner()); } } @Override public boolean ownsLock(Object key, Object lockOwner) { Object currentOwner = getOwner(key); return currentOwner != null && currentOwner.equals(lockOwner); } @Override public boolean isLocked(Object key) { return getOwner(key) != null; } @Override public Object getOwner(Object key) { InfinispanLock lock = lockContainer.getLock(key); return lock == null ? null : lock.getLockOwner(); } @Override public String printLockInfo() { return lockContainer.toString(); } @Override @ManagedAttribute(description = "The number of exclusive locks that are held.", displayName = "Number of locks held") public int getNumberOfLocksHeld() { return lockContainer.getNumLocksHeld(); } @ManagedAttribute(description = "The concurrency level that the MVCC Lock Manager has been configured with.", displayName = "Concurrency level", dataType = DataType.TRAIT) public int getConcurrencyLevel() { return configuration.locking().concurrencyLevel(); } @ManagedAttribute(description = "The number of exclusive locks that are available.", displayName = "Number of locks available") public int getNumberOfLocksAvailable() { return lockContainer.size() - lockContainer.getNumLocksHeld(); } @Override public InfinispanLock getLock(Object key) { return lockContainer.getLock(key); } private static class KeyAwareExtendedLockPromise implements KeyAwareLockPromise, ExtendedLockPromise, Callable<Void>, Supplier<TimeoutException> { private final ExtendedLockPromise lockPromise; private final Object key; private final long timeoutMillis; private KeyAwareExtendedLockPromise(ExtendedLockPromise lockPromise, Object key, long timeoutMillis) { this.lockPromise = lockPromise; this.key = key; this.timeoutMillis = timeoutMillis; } @Override public void cancel(LockState cause) { lockPromise.cancel(cause); } @Override public Object getRequestor() { return lockPromise.getRequestor(); } @Override public Object getOwner() { return lockPromise.getOwner(); } @Override public InvocationStage toInvocationStage(Supplier<TimeoutException> timeoutSupplier) { return lockPromise.toInvocationStage(timeoutSupplier); } @Override public boolean isAvailable() { return lockPromise.isAvailable(); } @Override public void lock() throws InterruptedException, TimeoutException { try { lockPromise.lock(); } catch (TimeoutException e) { throw get(); } } @Override public void addListener(LockListener listener) { lockPromise.addListener(listener); } @Override public InvocationStage toInvocationStage() { return toInvocationStage(this); } @Override public void addListener(KeyAwareLockListener listener) { lockPromise.addListener(state -> listener.onEvent(key, state)); } @Override public Void call() throws Exception{ lockPromise.cancel(LockState.TIMED_OUT); return null; } @Override public TimeoutException get() { return log.unableToAcquireLock(Util.prettyPrintTime(timeoutMillis), toStr(key), lockPromise.getRequestor(), lockPromise.getOwner()); } KeyAwareExtendedLockPromise scheduleLockTimeoutTask(ScheduledExecutorService executorService) { assert executorService != null; if (isAvailable()) { return this; } if (timeoutMillis > 0) { ScheduledFuture<?> future = executorService.schedule(this, timeoutMillis, TimeUnit.MILLISECONDS); lockPromise.addListener((state -> future.cancel(false))); } else { //zero lock acquisition and we aren't available yet. Trigger timeout lockPromise.cancel(LockState.TIMED_OUT); } return this; } } private static class CompositeLockPromise implements KeyAwareLockPromise, LockListener, Callable<Void> { private final List<KeyAwareExtendedLockPromise> lockPromiseList; private final CompletableFuture<LockState> notifier; private final Executor executor; @SuppressWarnings("CanBeFinal") volatile LockState lockState = LockState.ACQUIRED; private final AtomicInteger countersLeft = new AtomicInteger(); private volatile ScheduledFuture<Void> timeoutTask; private CompositeLockPromise(int size, Executor executor) { lockPromiseList = new ArrayList<>(size); this.executor = executor; notifier = new CompletableFuture<>(); } void addLock(KeyAwareExtendedLockPromise lockPromise) { lockPromiseList.add(lockPromise); } void markListAsFinal() { countersLeft.set(lockPromiseList.size()); for (LockPromise lockPromise : lockPromiseList) { lockPromise.addListener(this); } } @Override public boolean isAvailable() { return notifier.isDone(); } @Override public void lock() throws InterruptedException, TimeoutException { InterruptedException interruptedException = null; TimeoutException timeoutException = null; DeadlockDetectedException deadlockException = null; RuntimeException runtimeException = null; for (ExtendedLockPromise lockPromise : lockPromiseList) { try { //we still need to invoke lock in all the locks. lockPromise.lock(); } catch (InterruptedException e) { interruptedException = e; } catch (TimeoutException e) { timeoutException = e; } catch (DeadlockDetectedException e) { deadlockException = e; } catch (RuntimeException e) { runtimeException = e; } } if (interruptedException != null) { throw interruptedException; } else if (timeoutException != null) { throw timeoutException; } else if (deadlockException != null) { throw deadlockException; } else if (runtimeException != null) { throw runtimeException; } } @Override public void addListener(LockListener listener) { notifier.thenAccept(listener::onEvent); } @Override public InvocationStage toInvocationStage() { if (notifier.isDone()) { return checkState(notifier.getNow(lockState), InvocationStage::completedNullStage, ExceptionSyncInvocationStage::new); } else { return new SimpleAsyncInvocationStage(notifier.thenApplyAsync(lockState -> { Object rv = checkState(lockState, () -> null, throwable -> throwable); if (rv != null) { throw (RuntimeException) rv; } return null; }, executor)); } } @Override public void onEvent(LockState state) { if (notifier.isDone()) { //already finished return; } //each lock will invoke this if (state != LockState.ACQUIRED) { cancelAll(state); return; } if (countersLeft.decrementAndGet() == 0) { cancelTimeoutTask(); notifier.complete(lockState); } } private void cancelAll(LockState state) { if (UPDATER.compareAndSet(this, LockState.ACQUIRED, state)) { cancelTimeoutTask(); //complete the future before cancel other locks. the remaining locks will be invoke onEvent() notifier.complete(state); for (KeyAwareExtendedLockPromise promise : lockPromiseList) { promise.cancel(state); } } } @Override public void addListener(KeyAwareLockListener listener) { for (KeyAwareExtendedLockPromise lockPromise : lockPromiseList) { lockPromise.addListener(listener); } } @Override public Void call() throws Exception { for (KeyAwareExtendedLockPromise promise : lockPromiseList) { promise.cancel(LockState.TIMED_OUT); } return null; } /** * Schedule a timeout task. Must be called before {@link #markListAsFinal()} */ void scheduleLockTimeoutTask(ScheduledExecutorService executorService, long time, TimeUnit unit) { if (time > 0 && !isAvailable()) { timeoutTask = executorService.schedule(this, time, unit); } } private <T> T checkState(LockState state, Supplier<T> acquired, Function<Throwable, T> exception) { if (state == LockState.ACQUIRED) { return acquired.get(); } T rv = null; for (LockPromise lockPromise : lockPromiseList) { try { lockPromise.lock(); } catch (Throwable throwable) { if (rv == null) { rv = exception.apply(throwable); } } } return rv; } private void cancelTimeoutTask() { if (timeoutTask != null) { timeoutTask.cancel(false); } } } }
17,714
36.060669
174
java
null
infinispan-main/core/src/main/java/org/infinispan/util/stream/Streams.java
package org.infinispan.util.stream; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * A collection of I/O stream related utility methods. * <p> * Exceptions that are thrown and not explicitly declared are ignored. * * @author <a href="mailto:jason@planet57.com">Jason Dillon</a> * @since 4.2 * @deprecated Since 10.0. This will be removed in next major version. */ @Deprecated public class Streams { private static final Log log = LogFactory.getLog(Streams.class); ///////////////////////////////////////////////////////////////////////// // Closing // ///////////////////////////////////////////////////////////////////////// /** * Attempt to close an <tt>InputStream</tt>. * * @param stream <tt>InputStream</tt> to attempt to close. * @return <tt>True</tt> if stream was closed (or stream was null), or * <tt>false</tt> if an exception was thrown. */ public static boolean close(final InputStream stream) { // do not attempt to close null stream, but return sucess if (stream == null) { return true; } boolean success = true; try { stream.close(); } catch (IOException e) { success = false; } return success; } /** * Attempt to close an <tt>OutputStream</tt>. * * @param stream <tt>OutputStream</tt> to attempt to close. * @return <tt>True</tt> if stream was closed (or stream was null), or * <tt>false</tt> if an exception was thrown. */ public static boolean close(final OutputStream stream) { // do not attempt to close null stream, but return sucess if (stream == null) { return true; } boolean success = true; try { stream.close(); } catch (IOException e) { success = false; } return success; } /** * Attempt to close an <tt>InputStream</tt> or <tt>OutputStream</tt>. * * @param stream Stream to attempt to close. * @return <tt>True</tt> if stream was closed (or stream was null), or * <tt>false</tt> if an exception was thrown. * @throws IllegalArgumentException Stream is not an <tt>InputStream</tt> or * <tt>OuputStream</tt>. */ public static boolean close(final Object stream) { boolean success; if (stream instanceof InputStream) { success = close((InputStream) stream); } else if (stream instanceof OutputStream) { success = close((OutputStream) stream); } else { throw new IllegalArgumentException ("stream is not an InputStream or OutputStream"); } return success; } /** * Attempt to close an array of <tt>InputStream</tt>s. * * @param streams Array of <tt>InputStream</tt>s to attempt to close. * @return <tt>True</tt> if all streams were closed, or <tt>false</tt> if an * exception was thrown. */ public static boolean close(final InputStream[] streams) { boolean success = true; for (InputStream stream : streams) { boolean rv = close(stream); if (!rv) success = false; } return success; } /** * Attempt to close an array of <tt>OutputStream</tt>s. * * @param streams Array of <tt>OutputStream</tt>s to attempt to close. * @return <tt>True</tt> if all streams were closed, or <tt>false</tt> if an * exception was thrown. */ public static boolean close(final OutputStream[] streams) { boolean success = true; for (OutputStream stream : streams) { boolean rv = close(stream); if (!rv) success = false; } return success; } /** * Attempt to close an array of <tt>InputStream</tt>a and/or * <tt>OutputStream</tt>s. * * @param streams Array of streams to attempt to close. * @return <tt>True</tt> if all streams were closed, or <tt>false</tt> if an * exception was thrown. * @throws IllegalArgumentException Stream is not an <tt>InputStream</tt> or * <tt>OuputStream</tt>. Closing stops at * the last valid stream object in this * case. */ public static boolean close(final Object[] streams) { boolean success = true; for (Object stream : streams) { boolean rv = close(stream); if (!rv) success = false; } return success; } /** * Attempt to flush and close an <tt>OutputStream</tt>. * * @param stream <tt>OutputStream</tt> to attempt to flush and close. * @return <tt>True</tt> if stream was flushed and closed, or <tt>false</tt> * if an exception was thrown. */ public static boolean fclose(final OutputStream stream) { return flush(stream) && close(stream); } /** * Attempt to flush and close an array of <tt>OutputStream</tt>s. * * @param streams <tt>OutputStream</tt>s to attempt to flush and close. * @return <tt>True</tt> if all streams were flushed and closed, or * <tt>false</tt> if an exception was thrown. */ public static boolean fclose(final OutputStream[] streams) { boolean success = true; for (OutputStream stream : streams) { boolean rv = fclose(stream); if (!rv) success = false; } return success; } ///////////////////////////////////////////////////////////////////////// // Flushing // ///////////////////////////////////////////////////////////////////////// /** * Attempt to flush an <tt>OutputStream</tt>. * * @param stream <tt>OutputStream</tt> to attempt to flush. * @return <tt>True</tt> if stream was flushed (or stream was null), or * <tt>false</tt> if an exception was thrown. */ public static boolean flush(final OutputStream stream) { // do not attempt to close null stream, but return sucess if (stream == null) { return true; } boolean success = true; try { stream.flush(); } catch (IOException e) { success = false; } return success; } /** * Attempt to flush an array of <tt>OutputStream</tt>s. * * @param streams <tt>OutputStream</tt>s to attempt to flush. * @return <tt>True</tt> if all streams were flushed, or <tt>false</tt> if * an exception was thrown. */ public static boolean flush(final OutputStream[] streams) { boolean success = true; for (OutputStream stream : streams) { boolean rv = flush(stream); if (!rv) success = false; } return success; } ///////////////////////////////////////////////////////////////////////// // Misc // ///////////////////////////////////////////////////////////////////////// /** * The default buffer size that will be used for buffered operations. */ public static final int DEFAULT_BUFFER_SIZE = 2048; /** * Copy all of the bytes from the input stream to the output stream. * * @param input Stream to read bytes from. * @param output Stream to write bytes to. * @param buffer The buffer to use while copying. * @return The total number of bytes copied. * @throws IOException Failed to copy bytes. */ public static long copy(final InputStream input, final OutputStream output, final byte[] buffer) throws IOException { long total = 0; int read; if (log.isTraceEnabled()) { log.tracef("copying %s to %s with buffer size: %d", input, output, buffer.length); } while ((read = input.read(buffer)) != -1) { output.write(buffer, 0, read); total += read; if (log.isTraceEnabled()) { log.tracef("bytes read: %d; total bytes read: %d", read, total); } } return total; } /** * Copy all of the bytes from the input stream to the output stream. * * @param input Stream to read bytes from. * @param output Stream to write bytes to. * @param size The size of the buffer to use while copying. * @return The total number of bytes copied. * @throws IOException Failed to copy bytes. */ public static long copy(final InputStream input, final OutputStream output, final int size) throws IOException { return copy(input, output, new byte[size]); } /** * Copy all of the bytes from the input stream to the output stream. * * @param input Stream to read bytes from. * @param output Stream to write bytes to. * @return The total number of bytes copied. * @throws IOException Failed to copy bytes. */ public static long copy(final InputStream input, final OutputStream output) throws IOException { return copy(input, output, DEFAULT_BUFFER_SIZE); } /** * Copy all of the bytes from the input stream to the output stream wrapping * streams in buffers as needed. * * @param input Stream to read bytes from. * @param output Stream to write bytes to. * @return The total number of bytes copied. * @throws IOException Failed to copy bytes. */ public static long copyb(InputStream input, OutputStream output) throws IOException { if (!(input instanceof BufferedInputStream)) { input = new BufferedInputStream(input); } if (!(output instanceof BufferedOutputStream)) { output = new BufferedOutputStream(output); } long bytes = copy(input, output, DEFAULT_BUFFER_SIZE); output.flush(); return bytes; } /** * Copy a limited number of bytes from the input stream to the output * stream. * * @param input Stream to read bytes from. * @param output Stream to write bytes to. * @param buffer The buffer to use while copying. * @param length The maximum number of bytes to copy. * @return The total number of bytes copied. * @throws IOException Failed to copy bytes. */ public static long copySome(final InputStream input, final OutputStream output, final byte[] buffer, final long length) throws IOException { long total = 0; int read; int readLength; boolean trace = log.isTraceEnabled(); // setup the initial readLength, if length is less than the buffer // size, then we only want to read that much readLength = Math.min((int) length, buffer.length); if (log.isTraceEnabled()) { log.tracef("initial read length: %d", readLength); } while (readLength != 0 && (read = input.read(buffer, 0, readLength)) != -1) { if (log.isTraceEnabled()) log.tracef("read bytes: %d", read); output.write(buffer, 0, read); total += read; if (log.isTraceEnabled()) log.tracef("total bytes read: %d", total); // update the readLength readLength = Math.min((int) (length - total), buffer.length); if (log.isTraceEnabled()) log.tracef("next read length: %d", readLength); } return total; } /** * Copy a limited number of bytes from the input stream to the output * stream. * * @param input Stream to read bytes from. * @param output Stream to write bytes to. * @param size The size of the buffer to use while copying. * @param length The maximum number of bytes to copy. * @return The total number of bytes copied. * @throws IOException Failed to copy bytes. */ public static long copySome(final InputStream input, final OutputStream output, final int size, final long length) throws IOException { return copySome(input, output, new byte[size], length); } /** * Copy a limited number of bytes from the input stream to the output * stream. * * @param input Stream to read bytes from. * @param output Stream to write bytes to. * @param length The maximum number of bytes to copy. * @return The total number of bytes copied. * @throws IOException Failed to copy bytes. */ public static long copySome(final InputStream input, final OutputStream output, final long length) throws IOException { return copySome(input, output, DEFAULT_BUFFER_SIZE, length); } }
13,185
30.773494
91
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/package-info.java
/** * Infinispan's logging abstractions that delegate to either JDK or Log4J logging engines. */ package org.infinispan.util.logging;
136
26.4
90
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/Log.java
package org.infinispan.util.logging; import static org.jboss.logging.Logger.Level.DEBUG; import static org.jboss.logging.Logger.Level.ERROR; import static org.jboss.logging.Logger.Level.FATAL; import static org.jboss.logging.Logger.Level.INFO; import static org.jboss.logging.Logger.Level.TRACE; import static org.jboss.logging.Logger.Level.WARN; import java.io.File; import java.io.IOException; import java.lang.reflect.Method; import java.nio.file.Path; import java.security.Permission; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; import javax.transaction.xa.XAResource; import org.infinispan.commands.ReplicableCommand; import org.infinispan.commons.CacheConfigurationException; import org.infinispan.commons.CacheException; import org.infinispan.commons.CacheListenerException; import org.infinispan.commons.IllegalLifecycleStateException; import org.infinispan.commons.configuration.io.Location; import org.infinispan.commons.dataconversion.EncodingException; import org.infinispan.commons.dataconversion.MediaType; import org.infinispan.commons.marshall.AdvancedExternalizer; import org.infinispan.commons.marshall.MarshallingException; import org.infinispan.commons.util.IntSet; import org.infinispan.commons.util.TypedProperties; import org.infinispan.configuration.cache.BackupFailurePolicy; import org.infinispan.configuration.cache.CacheMode; import org.infinispan.configuration.cache.Configuration; import org.infinispan.configuration.cache.StorageType; import org.infinispan.container.versioning.EntryVersion; import org.infinispan.interceptors.impl.ContainerFullException; import org.infinispan.jmx.JmxDomainConflictException; import org.infinispan.logging.annotations.Description; import org.infinispan.partitionhandling.AvailabilityException; import org.infinispan.partitionhandling.AvailabilityMode; import org.infinispan.persistence.spi.NonBlockingStore; import org.infinispan.persistence.spi.PersistenceException; import org.infinispan.remoting.RemoteException; import org.infinispan.remoting.responses.Response; import org.infinispan.remoting.transport.Address; import org.infinispan.remoting.transport.jgroups.SuspectException; import org.infinispan.topology.CacheJoinException; import org.infinispan.topology.CacheTopology; import org.infinispan.topology.MissingMembersException; import org.infinispan.transaction.WriteSkewException; import org.infinispan.transaction.impl.LocalTransaction; import org.infinispan.transaction.xa.GlobalTransaction; import org.infinispan.transaction.xa.InvalidTransactionException; import org.infinispan.transaction.xa.recovery.RecoveryAwareRemoteTransaction; import org.infinispan.transaction.xa.recovery.RecoveryAwareTransaction; import org.infinispan.util.ByteString; import org.infinispan.util.concurrent.TimeoutException; import org.jboss.logging.BasicLogger; import org.jboss.logging.Logger; import org.jboss.logging.annotations.Cause; import org.jboss.logging.annotations.LogMessage; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageLogger; import org.jboss.logging.annotations.Once; import org.jboss.logging.annotations.Param; import org.jgroups.View; import jakarta.transaction.Synchronization; import jakarta.transaction.TransactionManager; /** * Infinispan's log abstraction layer on top of JBoss Logging. * <p/> * It contains explicit methods for all INFO or above levels so that they can * be internationalized. For the core module, message ids ranging from 0001 * to 0900 inclusively have been reserved. * <p/> * <code> Log log = LogFactory.getLog( getClass() ); </code> The above will get * you an instance of <tt>Log</tt>, which can be used to generate log messages * either via JBoss Logging which then can delegate to Log4J (if the libraries * are present) or (if not) the built-in JDK logger. * <p/> * In addition to the 6 log levels available, this framework also supports * parameter interpolation, similar to the JDKs {@link String#format(String, Object...)} * method. What this means is, that the following block: * <code> if (log.isTraceEnabled()) { log.trace("This is a message " + message + " and some other value is " + value); } * </code> * <p/> * ... could be replaced with ... * <p/> * <code> if (log.isTraceEnabled()) log.tracef("This is a message %s and some other value is %s", message, value); * </code> * <p/> * This greatly enhances code readability. * <p/> * If you are passing a <tt>Throwable</tt>, note that this should be passed in * <i>before</i> the vararg parameter list. * <p/> * * @author Manik Surtani * @api.private * @since 4.0 */ @MessageLogger(projectCode = "ISPN") public interface Log extends BasicLogger { String LOG_ROOT = "org.infinispan."; Log CONFIG = Logger.getMessageLogger(Log.class, LOG_ROOT + "CONFIG"); Log CLUSTER = Logger.getMessageLogger(Log.class, LOG_ROOT + "CLUSTER"); Log CONTAINER = Logger.getMessageLogger(Log.class, LOG_ROOT + "CONTAINER"); Log PERSISTENCE = Logger.getMessageLogger(Log.class, LOG_ROOT + "PERSISTENCE"); Log SECURITY = Logger.getMessageLogger(Log.class, LOG_ROOT + "SECURITY"); Log XSITE = Logger.getMessageLogger(Log.class, LOG_ROOT + "XSITE"); // @LogMessage(level = WARN) // @Message(value = "Unable to load %s from cache loader", id = 1) // void unableToLoadFromCacheLoader(Object key, @Cause PersistenceException cle); // @LogMessage(level = WARN) // @Message(value = "Field %s not found!!", id = 2) // void fieldNotFound(String fieldName); // @LogMessage(level = WARN) // @Message(value = "Property %s could not be replaced as intended!", id = 3) // void propertyCouldNotBeReplaced(String line); // @LogMessage(level = WARN) // @Message(value = "Unexpected error reading properties", id = 4) // void errorReadingProperties(@Cause IOException e); // // @LogMessage(level = WARN) // @Message(value = "Detected write skew on key [%s]. Another process has changed the entry since we last read it! Unable to copy entry for update.", id = 5) // void unableToCopyEntryForUpdate(Object key); // @LogMessage(level = WARN) // @Message(value = "Failed remote execution on node %s", id = 6) // void remoteExecutionFailed(Address address, @Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Failed local execution ", id = 7) // void localExecutionFailed(@Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Can not select %s random members for %s", id = 8) // void cannotSelectRandomMembers(int numNeeded, List<Address> members); // @LogMessage(level = INFO) // @Message(value = "DistributionManager not yet joined the cluster. Cannot do anything about other concurrent joiners.", id = 14) // void distributionManagerNotJoined(); // @LogMessage(level = WARN) // @Message(value = "DistributionManager not started after waiting up to 5 minutes! Not rehashing!", id = 15) // void distributionManagerNotStarted(); @LogMessage(level = ERROR) @Message(value = "Problem encountered when applying state for key %s!", id = 16) void problemApplyingStateForKey(Object key, @Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Unable to apply prepare %s", id = 18) // void unableToApplyPrepare(PrepareCommand pc, @Cause Throwable t); // @LogMessage(level = INFO) // @Message(value = "Couldn't acquire shared lock", id = 19) // void couldNotAcquireSharedLock(); @LogMessage(level = WARN) @Message(value = "Expected just one response; got %s", id = 21) void expectedJustOneResponse(Map<Address, Response> lr); @LogMessage(level = INFO) @Message(value = "wakeUpInterval is <= 0, not starting expired purge thread", id = 25) void notStartingEvictionThread(); @LogMessage(level = WARN) @Message(value = "Caught exception purging data container!", id = 26) void exceptionPurgingDataContainer(@Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "Could not acquire lock for eviction of %s", id = 27) // void couldNotAcquireLockForEviction(Object key, @Cause Exception e); @LogMessage(level = WARN) @Message(value = "Unable to passivate entry under %s", id = 28) void unableToPassivateEntry(Object key, @Cause Throwable e); // @LogMessage(level = INFO) // @Message(value = "Passivating all entries to disk", id = 29) // void passivatingAllEntries(); // // @LogMessage(level = INFO) // @Message(value = "Passivated %d entries in %s", id = 30) // void passivatedEntries(long numEntries, String duration); // @LogMessage(level = TRACE) // @Message(value = "MBeans were successfully registered to the platform MBean server.", id = 31) // void mbeansSuccessfullyRegistered(); // @LogMessage(level = WARN) // @Message(value = "Problems un-registering MBeans", id = 32) // void problemsUnregisteringMBeans(@Cause Exception e); // @LogMessage(level = WARN) // @Message(value = "Unable to unregister MBean %s", id = 33) // void unableToUnregisterMBean(String name, @Cause Exception e); @Message(value = "The '%s' JMX domain is already in use.", id = 34) JmxDomainConflictException jmxMBeanAlreadyRegistered(String jmxDomain, @Cause Throwable cause); // @LogMessage(level = WARN) // @Message(value = "Could not reflect field description of this class. Was it removed?", id = 35) // void couldNotFindDescriptionField(); @LogMessage(level = WARN) @Message(value = "Did not find attribute %s", id = 36) void couldNotFindAttribute(String name); @LogMessage(level = WARN) @Message(value = "Failed to update attribute name %s with value %s", id = 37) void failedToUpdateAttribute(String name, Object value); // @LogMessage(level = WARN) // @Message(value = "Method name %s doesn't start with \"get\", \"set\", or \"is\" " + // "but is annotated with @ManagedAttribute: will be ignored", id = 38) // void ignoringManagedAttribute(String methodName); // @LogMessage(level = WARN) // @Message(value = "Method %s must have a valid return type and zero parameters", id = 39) // void invalidManagedAttributeMethod(String methodName); // @LogMessage(level = WARN) // @Message(value = "Not adding annotated method %s since we already have read attribute", id = 40) // void readManagedAttributeAlreadyPresent(Method m); // @LogMessage(level = WARN) // @Message(value = "Not adding annotated method %s since we already have writable attribute", id = 41) // void writeManagedAttributeAlreadyPresent(String methodName); @LogMessage(level = WARN) @Message(value = "Did not find queried attribute with name %s", id = 42) void queriedAttributeNotFound(String attributeName); @LogMessage(level = WARN) @Message(value = "Exception while writing value for attribute %s", id = 43) void errorWritingValueForAttribute(String attributeName, @Cause Exception e); @LogMessage(level = WARN) @Message(value = "Could not invoke set on attribute %s with value %s", id = 44) void couldNotInvokeSetOnAttribute(String attributeName, Object value); // @LogMessage(level = ERROR) // @Message(value = "Problems encountered while purging expired", id = 45) // void problemPurgingExpired(@Cause Exception e); @LogMessage(level = ERROR) @Message(value = "Unknown responses from remote cache: %s", id = 46) void unknownResponsesFromRemoteCache(Collection<Response> responses); @LogMessage(level = ERROR) @Message(value = "Error while doing remote call", id = 47) void errorDoingRemoteCall(@Cause Exception e); @LogMessage(level = ERROR) @Message(value = "Interrupted or timeout while waiting for AsyncCacheWriter worker threads to push all state to the decorated store", id = 48) void interruptedWaitingAsyncStorePush(@Cause InterruptedException e); // @LogMessage(level = ERROR) // @Message(value = "Unexpected error", id = 51) // void unexpectedErrorInAsyncProcessor(@Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "Interrupted on acquireLock for %d milliseconds!", id = 52) void interruptedAcquiringLock(long ms, @Cause InterruptedException e); @LogMessage(level = WARN) @Message(value = "Unable to process some async modifications after %d retries!", id = 53) void unableToProcessAsyncModifications(int retries); @LogMessage(level = ERROR) @Message(value = "Unexpected error in AsyncStoreCoordinator thread. AsyncCacheWriter is dead!", id = 55) void unexpectedErrorInAsyncStoreCoordinator(@Cause Throwable t); // @SuppressWarnings("deprecation") // @LogMessage(level = ERROR) // @Message(value = "Exception reported changing cache active status", id = 58) // void errorChangingSingletonStoreStatus(@Cause SingletonCacheWriter.PushStateException e); // @LogMessage(level = WARN) // @Message(value = "Had problems removing file %s", id = 59) // void problemsRemovingFile(File f); // @LogMessage(level = WARN) // @Message(value = "Problems purging file %s", id = 60) // void problemsPurgingFile(File buckedFile, @Cause PersistenceException e); // @LogMessage(level = WARN) // @Message(value = "Unable to acquire global lock to purge cache store", id = 61) // void unableToAcquireLockToPurgeStore(); // @LogMessage(level = ERROR) // @Message(value = "Error while reading from file: %s", id = 62) // void errorReadingFromFile(File f, @Cause Exception e); // @LogMessage(level = WARN) // @Message(value = "Problems creating the directory: %s", id = 64) // void problemsCreatingDirectory(File dir); // @LogMessage(level = ERROR) // @Message(value = "Exception while marshalling object: %s", id = 65) // void errorMarshallingObject(@Cause Throwable ioe, Object obj); // @LogMessage(level = ERROR) // @Message(value = "Unable to read version id from first two bytes of stream, barfing.", id = 66) // void unableToReadVersionId(); // @LogMessage(level = INFO) // @Message(value = "Will try and wait for the cache %s to start", id = 67) // void waitForCacheToStart(String cacheName); // @LogMessage(level = INFO) // @Message(value = "Cache named %s does not exist on this cache manager!", id = 68) // void namedCacheDoesNotExist(String cacheName); @LogMessage(level = WARN) @Message(value = "Caught exception when handling command %s", id = 71) void exceptionHandlingCommand(ReplicableCommand cmd, @Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "Unexpected error while replicating", id = 73) void unexpectedErrorReplicating(@Cause Throwable t); // @LogMessage(level = ERROR) // @Message(value = "Message or message buffer is null or empty.", id = 77) // void msgOrMsgBufferEmpty(); @LogMessage(level = INFO) @Message(value = "Starting JGroups channel `%s`", id = 78) void startingJGroupsChannel(String cluster); @LogMessage(level = INFO) @Message(value = "Starting JGroups channel `%s` with stack `%s`", id = 78) void startingJGroupsChannel(String cluster, String stack); @LogMessage(level = INFO) @Message(value = "Channel `%s` local address is `%s`, physical addresses are `%s`", id = 79) void localAndPhysicalAddress(String cluster, Address address, List<Address> physicalAddresses); @LogMessage(level = INFO) @Message(value = "Disconnecting JGroups channel `%s`", id = 80) void disconnectJGroups(String cluster); @LogMessage(level = ERROR) @Message(value = "Problem closing channel `%s`; setting it to null", id = 81) void problemClosingChannel(@Cause Exception e, String cluster); // @LogMessage(level = INFO) // @Message(value = "Stopping the RpcDispatcher for channel %s", id = 82) // void stoppingRpcDispatcher(String cluster); @LogMessage(level = ERROR) @Message(value = "Class [%s] cannot be cast to JGroupsChannelLookup! Not using a channel lookup.", id = 83) void wrongTypeForJGroupsChannelLookup(String channelLookupClassName, @Cause Exception e); @LogMessage(level = ERROR) @Message(value = "Errors instantiating [%s]! Not using a channel lookup.", id = 84) void errorInstantiatingJGroupsChannelLookup(String channelLookupClassName, @Cause Exception e); @Message(value = "Error while trying to create a channel using the specified configuration file: %s", id = 85) CacheConfigurationException errorCreatingChannelFromConfigFile(String cfg, @Cause Exception e); @Message(value = "Error while trying to create a channel using the specified configuration XML: %s", id = 86) CacheConfigurationException errorCreatingChannelFromXML(String cfg, @Cause Exception e); @Message(value = "Error while trying to create a channel using the specified configuration string: %s", id = 87) CacheConfigurationException errorCreatingChannelFromConfigString(String cfg, @Cause Exception e); @LogMessage(level = INFO) @Message(value = "Unable to use any JGroups configuration mechanisms provided in properties %s. " + "Using default JGroups configuration!", id = 88) void unableToUseJGroupsPropertiesProvided(TypedProperties props); @LogMessage(level = ERROR) @Message(value = "getCoordinator(): Interrupted while waiting for members to be set", id = 89) void interruptedWaitingForCoordinator(@Cause InterruptedException e); // @LogMessage(level = WARN) // @Message(value = "Channel not set up properly!", id = 92) // void channelNotSetUp(); @LogMessage(level = INFO) @Message(value = "Received new, MERGED cluster view for channel %s: %s", id = 93) void receivedMergedView(String cluster, View newView); @LogMessage(level = INFO) @Message(value = "Received new cluster view for channel %s: %s", id = 94) void receivedClusterView(String cluster, View newView); @LogMessage(level = ERROR) @Message(value = "Error while processing a prepare in a single-phase transaction", id = 97) void errorProcessing1pcPrepareCommand(@Cause Throwable e); @LogMessage(level = ERROR) @Message(value = "Exception during rollback", id = 98) void errorRollingBack(@Cause Throwable e); // @LogMessage(level = ERROR) // @Message(value = "Unprocessed Transaction Log Entries! = %d", id = 99) // void unprocessedTxLogEntries(int size); @LogMessage(level = WARN) @Message(value = "Stopping, but there are %s local transactions and %s remote transactions that did not finish in time.", id = 100) void unfinishedTransactionsRemain(int localTransactions, int remoteTransactions); @LogMessage(level = WARN) @Message(value = "Failed synchronization registration", id = 101) void failedSynchronizationRegistration(@Cause Exception e); @LogMessage(level = WARN) @Message(value = "Unable to roll back global transaction %s", id = 102) void unableToRollbackGlobalTx(GlobalTransaction gtx, @Cause Throwable e); // @LogMessage(level = ERROR) // @Message(value = "A remote transaction with the given id was already registered!!!", id = 103) // void remoteTxAlreadyRegistered(); @LogMessage(level = INFO) @Message(value = "Using EmbeddedTransactionManager", id = 104) void fallingBackToEmbeddedTm(); @LogMessage(level = ERROR) @Message(value = "Failed creating initial JNDI context", id = 105) void failedToCreateInitialCtx(@Cause Throwable e); // @LogMessage(level = ERROR) // @Message(value = "Found WebSphere TransactionManager factory class [%s], but " + // "couldn't invoke its static 'getTransactionManager' method", id = 106) // void unableToInvokeWebsphereStaticGetTmMethod(@Cause Exception ex, String className); @LogMessage(level = INFO) @Message(value = "Retrieving transaction manager %s", id = 107) void retrievingTm(TransactionManager tm); @LogMessage(level = ERROR) @Message(value = "Error enlisting resource", id = 108) void errorEnlistingResource(@Cause Throwable e); @LogMessage(level = ERROR) @Message(value = "beforeCompletion() failed for %s", id = 109) void beforeCompletionFailed(Synchronization s, @Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "Unexpected error from resource manager!", id = 110) void unexpectedErrorFromResourceManager(@Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "afterCompletion() failed for %s", id = 111) void afterCompletionFailed(Synchronization s, @Cause Throwable t); @LogMessage(level = WARN) @Message(value = "exception while committing", id = 112) void errorCommittingTx(@Cause Throwable e); // @LogMessage(level = ERROR) // @Message(value = "Unbinding of DummyTransactionManager failed", id = 113) // void unbindingDummyTmFailed(@Cause NamingException e); // @LogMessage(level = ERROR) // @Message(value = "Unsupported combination (dldEnabled, recoveryEnabled, xa) = (%s, %s, %s)", id = 114) // void unsupportedTransactionConfiguration(boolean dldEnabled, boolean recoveryEnabled, boolean xa); @LogMessage(level = WARN) @Message(value = "Recovery call will be ignored as recovery is disabled. " + "More on recovery: http://community.jboss.org/docs/DOC-16646", id = 115) void recoveryIgnored(); @LogMessage(level = WARN) @Message(value = "Missing the list of prepared transactions from node %s. " + "Received response is %s", id = 116) void missingListPreparedTransactions(Object key, Object value); @LogMessage(level = ERROR) @Message(value = "There's already a prepared transaction with this xid: %s. " + "New transaction is %s. Are there two different transactions having same Xid in the cluster?", id = 117) void preparedTxAlreadyExists(RecoveryAwareTransaction previous, RecoveryAwareRemoteTransaction remoteTransaction); // @LogMessage(level = WARN) // @Message(value = "Could not load module at URL %s", id = 118) // void couldNotLoadModuleAtUrl(URL url, @Cause Exception ex); // @LogMessage(level = WARN) // @Message(value = "Module %s loaded, but could not be initialized", id = 119) // void couldNotInitializeModule(Object key, @Cause Exception ex); // @LogMessage(level = WARN) // @Message(value = "Invocation of %s threw an exception %s. Exception is ignored.", id = 120) // void ignoringException(String methodName, String exceptionName, @Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "Unable to set value!", id = 121) void unableToSetValue(@Cause Exception e); // @LogMessage(level = WARN) // @Message(value = "Unable to convert string property [%s] to an int! Using default value of %d", id = 122) // void unableToConvertStringPropertyToInt(String value, int defaultValue); // @LogMessage(level = WARN) // @Message(value = "Unable to convert string property [%s] to a long! Using default value of %d", id = 123) // void unableToConvertStringPropertyToLong(String value, long defaultValue); // @LogMessage(level = WARN) // @Message(value = "Unable to convert string property [%s] to a boolean! Using default value of %b", id = 124) // void unableToConvertStringPropertyToBoolean(String value, boolean defaultValue); // @LogMessage(level = WARN) // @Message(value = "Unable to invoke getter %s on Configuration.class!", id = 125) // void unableToInvokeGetterOnConfiguration(Method getter, @Cause Exception e); @LogMessage(level = WARN) @Message(value = "Attempted to stop() from FAILED state, but caught exception", id = 126) void failedToCallStopAfterFailure(@Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Needed to call stop() before destroying but stop() threw exception. Proceeding to destroy", id = 127) // void stopBeforeDestroyFailed(@Cause CacheException e); @LogMessage(level = DEBUG) @Message(value = "Infinispan version: %s", id = 128) void version(String version); // @LogMessage(level = WARN) // @Message(value = "Received a remote call but the cache is not in STARTED state - ignoring call.", id = 129) // void cacheNotStarted(); // @LogMessage(level = ERROR) // @Message(value = "Caught exception! Aborting join.", id = 130) // void abortingJoin(@Cause Exception e); // @LogMessage(level = INFO) // @Message(value = "%s completed join rehash in %s!", id = 131) // void joinRehashCompleted(Address self, String duration); // @LogMessage(level = INFO) // @Message(value = "%s aborted join rehash after %s!", id = 132) // void joinRehashAborted(Address self, String duration); @LogMessage(level = WARN) @Message(value = "Attempted to register listener of class %s, but no valid, " + "public methods annotated with method-level event annotations found! " + "Ignoring listener.", id = 133) void noAnnotateMethodsFoundInListener(Class<?> listenerClass); @LogMessage(level = WARN) @Message(value = "Unable to invoke method %s on Object instance %s - " + "removing this target object from list of listeners!", id = 134) void unableToInvokeListenerMethodAndRemoveListener(Method m, Object target, @Cause Throwable e); @LogMessage(level = WARN) @Message(value = "Could not lock key %s in order to invalidate from L1 at node %s, skipping....", id = 135) void unableToLockToInvalidate(Object key, Address address); @LogMessage(level = ERROR) @Message(value = "Error executing command %s on %s, writing keys %s", id = 136) void executionError(String commandType, String cacheName, String affectedKeys, @Cause Throwable t); @LogMessage(level = INFO) @Message(value = "Failed invalidating remote cache", id = 137) void failedInvalidatingRemoteCache(@Cause Throwable e); // @LogMessage(level = INFO) // @Message(value = "Could not register object with name: %s", id = 138) // void couldNotRegisterObjectName(Object objectName, @Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "Infinispan configuration schema could not be resolved locally nor fetched from URL. Local path=%s, schema path=%s, schema URL=%s", id = 139) // void couldNotResolveConfigurationSchema(String localPath, String schemaPath, String schemaURL); // @LogMessage(level = WARN) // @Message(value = "Lazy deserialization configuration is deprecated, please use storeAsBinary instead", id = 140) // void lazyDeserializationDeprecated(); @LogMessage(level = WARN) @Message(value = "Could not rollback prepared 1PC transaction. This transaction will be rolled back by the recovery process, if enabled. Transaction: %s", id = 141) void couldNotRollbackPrepared1PcTransaction(LocalTransaction localTransaction, @Cause Throwable e1); // @LogMessage(level = WARN) // @Message(value = "Received a key that doesn't map to this node: %s, mapped to %s", id = 143) // void keyDoesNotMapToLocalNode(Object key, Collection<Address> nodes); @LogMessage(level = WARN) @Message(value = "Failed loading value for key %s from cache store", id = 144) void failedLoadingValueFromCacheStore(Object key, @Cause Exception e); @LogMessage(level = ERROR) @Message(value = "Error invalidating keys from L1 after rehash", id = 147) void failedToInvalidateKeys(@Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "Invalid %s value of %s. It can not be higher than %s which is %s", id = 148) // void invalidTimeoutValue(Object configName1, Object value1, Object configName2, Object value2); @LogMessage(level = WARN) @Message(value = "Fetch persistent state and purge on startup are both disabled, cache may contain stale entries on startup", id = 149) void staleEntriesWithoutFetchPersistentStateOrPurgeOnStartup(); // @LogMessage(level = FATAL) // @Message(value = "Rehash command received on non-distributed cache. All the nodes in the cluster should be using the same configuration.", id = 150) // void rehashCommandReceivedOnNonDistributedCache(); // @LogMessage(level = ERROR) // @Message(value = "Error flushing to file: %s", id = 151) // void errorFlushingToFileChannel(FileChannel f, @Cause Exception e); @LogMessage(level = INFO) @Message(value = "Passivation configured without an eviction policy being selected. " + "Only manually evicted entities will be passivated.", id = 152) void passivationWithoutEviction(); // Warning ISPN000153 removed as per ISPN-2554 // @LogMessage(level = ERROR) // @Message(value = "Unable to unlock keys %2$s for transaction %1$s after they were rebalanced off node %3$s", id = 154) // void unableToUnlockRebalancedKeys(GlobalTransaction gtx, List<Object> keys, Address self, @Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Unblocking transactions failed", id = 159) // void errorUnblockingTransactions(@Cause Exception e); @LogMessage(level = WARN) @Message(value = "Could not complete injected transaction.", id = 160) void couldNotCompleteInjectedTransaction(@Cause Throwable t); @LogMessage(level = INFO) @Message(value = "Using a batchMode transaction manager", id = 161) void usingBatchModeTransactionManager(); @LogMessage(level = INFO) @Message(value = "Could not instantiate transaction manager", id = 162) void couldNotInstantiateTransactionManager(@Cause Exception e); // @LogMessage(level = WARN) // @Message(value = "FileCacheStore ignored an unexpected file %2$s in path %1$s. The store path should be dedicated!", id = 163) // void cacheLoaderIgnoringUnexpectedFile(Object parentPath, String name); // @LogMessage(level = ERROR) // @Message(value = "Rolling back to cache view %d, but last committed view is %d", id = 164) // void cacheViewRollbackIdMismatch(int committedViewId, int committedView); // @LogMessage(level = INFO) // @Message(value = "Strict peer-to-peer is enabled but the JGroups channel was started externally - this is very likely to result in RPC timeout errors on startup", id = 171) // void warnStrictPeerToPeerWithInjectedChannel(); // @LogMessage(level = ERROR) // @Message(value = "Custom interceptor %s has used @Inject, @Start or @Stop. These methods will not be processed. Please extend org.infinispan.interceptors.base.BaseCustomInterceptor instead, and your custom interceptor will have access to a cache and cacheManager. Override stop() and start() for lifecycle methods.", id = 173) // void customInterceptorExpectsInjection(String customInterceptorFQCN); // @LogMessage(level = WARN) // @Message(value = "Unexpected error reading configuration", id = 174) // void errorReadingConfiguration(@Cause Exception e); // @LogMessage(level = WARN) // @Message(value = "Unexpected error closing resource", id = 175) // void failedToCloseResource(@Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "The 'wakeUpInterval' attribute of the 'eviction' configuration XML element is deprecated. Setting the 'wakeUpInterval' attribute of the 'expiration' configuration XML element to %d instead", id = 176) // void evictionWakeUpIntervalDeprecated(Long wakeUpInterval); @LogMessage(level = WARN) @Message(value = "%s has been deprecated as a synonym for %s. Use one of %s instead", id = 177) void randomCacheModeSynonymsDeprecated(String candidate, String mode, List<String> synonyms); // @LogMessage(level = WARN) // @Message(value = "stateRetrieval's 'alwaysProvideInMemoryState' attribute is no longer in use, " + // "instead please make sure all instances of this named cache in the cluster have 'fetchInMemoryState' attribute enabled", id = 178) // void alwaysProvideInMemoryStateDeprecated(); // @LogMessage(level = WARN) // @Message(value = "stateRetrieval's 'initialRetryWaitTime' attribute is no longer in use.", id = 179) // void initialRetryWaitTimeDeprecated(); // @LogMessage(level = WARN) // @Message(value = "stateRetrieval's 'logFlushTimeout' attribute is no longer in use.", id = 180) // void logFlushTimeoutDeprecated(); // @LogMessage(level = WARN) // @Message(value = "stateRetrieval's 'maxProgressingLogWrites' attribute is no longer in use.", id = 181) // void maxProgressingLogWritesDeprecated(); // @LogMessage(level = WARN) // @Message(value = "stateRetrieval's 'numRetries' attribute is no longer in use.", id = 182) // void numRetriesDeprecated(); // @LogMessage(level = WARN) // @Message(value = "stateRetrieval's 'retryWaitTimeIncreaseFactor' attribute is no longer in use.", id = 183) // void retryWaitTimeIncreaseFactorDeprecated(); // @LogMessage(level = INFO) // @Message(value = "The stateRetrieval configuration element has been deprecated, " + // "we're assuming you meant stateTransfer. Please see XML schema for more information.", id = 184) // void stateRetrievalConfigurationDeprecated(); // @LogMessage(level = INFO) // @Message(value = "hash's 'rehashEnabled' attribute has been deprecated. Please use stateTransfer.fetchInMemoryState instead", id = 185) // void hashRehashEnabledDeprecated(); // @LogMessage(level = INFO) // @Message(value = "hash's 'rehashRpcTimeout' attribute has been deprecated. Please use stateTransfer.timeout instead", id = 186) // void hashRehashRpcTimeoutDeprecated(); // @LogMessage(level = INFO) // @Message(value = "hash's 'rehashWait' attribute has been deprecated. Please use stateTransfer.timeout instead", id = 187) // void hashRehashWaitDeprecated(); @LogMessage(level = ERROR) @Message(value = "Error while processing a commit in a two-phase transaction", id = 188) void errorProcessing2pcCommitCommand(@Cause Throwable e); @LogMessage(level = WARN) @Message(value = "While stopping a cache or cache manager, one of its components failed to stop", id = 189) void componentFailedToStop(@Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "Use of the 'loader' element to configure a store is deprecated, please use the 'store' element instead", id = 190) // void deprecatedLoaderAsStoreConfiguration(); // @LogMessage(level = DEBUG) // @Message(value = "When indexing locally a cache with shared cache loader, preload must be enabled", id = 191) // void localIndexingWithSharedCacheLoaderRequiresPreload(); // @LogMessage(level = WARN) // @Message(value = "hash's 'numVirtualNodes' attribute has been deprecated. Please use hash.numSegments instead", id = 192) // void hashNumVirtualNodesDeprecated(); // @LogMessage(level = WARN) // @Message(value = "hash's 'consistentHash' attribute has been deprecated. Please use hash.consistentHashFactory instead", id = 193) // void consistentHashDeprecated(); @LogMessage(level = WARN) @Message(value = "Failed loading keys from cache store", id = 194) void failedLoadingKeysFromCacheStore(@Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "Error during rebalance for cache %s on node %s, topology id = %d", id = 195) void rebalanceError(String cacheName, Address node, int topologyId, @Cause Throwable cause); @LogMessage(level = WARN) @Message(value = "Failed to recover cluster state after the current node became the coordinator (or after merge), will retry", id = 196) void failedToRecoverClusterState(@Cause Throwable cause); @LogMessage(level = WARN) @Message(value = "Error updating cluster member list for view %d, waiting for next view", id = 197) void errorUpdatingMembersList(int viewId, @Cause Throwable cause); // @LogMessage(level = INFO) // @Message(value = "Unable to register MBeans for default cache", id = 198) // void unableToRegisterMBeans(); // @LogMessage(level = INFO) // @Message(value = "Unable to register MBeans for named cache %s", id = 199) // void unableToRegisterMBeans(String cacheName); // @LogMessage(level = INFO) // @Message(value = "Unable to register MBeans for cache manager", id = 200) // void unableToRegisterCacheManagerMBeans(); @LogMessage(level = TRACE) @Message(value = "This cache is configured to backup to its own site (%s).", id = 201) void cacheBackupsDataToSameSite(String siteName); @LogMessage(level = WARN) @Message(value = "Encountered issues while backing up data for cache %s to site %s", id = 202) @Description("This message indicates an issue has occurred with state transfer operations. First check that the site is online and if any network issues have occurred. Confirm that the relay nodes in the cluster are not overloaded with cross-site replication requests. In some cases garbage collection pauses can also interrupt backup operations. You can either increase the amount of memory available to relay nodes or increase the number of relay nodes in the cluster.") void warnXsiteBackupFailed(String cacheName, String siteName, @Cause Throwable throwable); @LogMessage(level = WARN) @Message(value = "The rollback request for tx %s cannot be processed by the cache %s as this cache is not transactional!", id = 203) void cannotRespondToRollback(GlobalTransaction globalTransaction, String cacheName); @LogMessage(level = WARN) @Message(value = "The commit request for tx %s cannot be processed by the cache %s as this cache is not transactional!", id = 204) void cannotRespondToCommit(GlobalTransaction globalTransaction, String cacheName); @LogMessage(level = WARN) @Message(value = "Trying to bring back an non-existent site (%s)!", id = 205) void tryingToBringOnlineNonexistentSite(String siteName); // @LogMessage(level = WARN) // @Message(value = "Could not execute cancellation command locally", id = 206) // void couldNotExecuteCancellationLocally(@Cause Throwable e); @LogMessage(level = WARN) @Message(value = "Could not interrupt as no thread found for command uuid %s", id = 207) void couldNotInterruptThread(UUID id); @LogMessage(level = ERROR) @Message(value = "No live owners found for segments %s of cache %s. Excluded owners: %s", id = 208) void noLiveOwnersFoundForSegments(Collection<Integer> segments, String cacheName, Collection<Address> faultySources); @LogMessage(level = WARN) @Message(value = "Failed to retrieve transactions of cache %s from node %s, segments %s", id = 209) void failedToRetrieveTransactionsForSegments(String cacheName, Address source, Collection<Integer> segments, @Cause Throwable t); @LogMessage(level = WARN) @Message(value = "Failed to request state of cache %s from node %s, segments %s", id = 210) void failedToRequestSegments(String cacheName, Address source, Collection<Integer> segments, @Cause Throwable e); // @LogMessage(level = ERROR) // @Message(value = "Unable to load %s from any of the following classloaders: %s", id=213) // void unableToLoadClass(String classname, String classloaders, @Cause Throwable cause); @LogMessage(level = WARN) @Message(value = "Unable to remove entry under %s from cache store after activation", id = 214) void unableToRemoveEntryAfterActivation(Object key, @Cause Throwable e); @Message(value = "Unknown migrator %s", id = 215) IllegalArgumentException unknownMigrator(String migratorName); @LogMessage(level = INFO) @Message(value = "%d entries migrated to cache %s in %s", id = 216) void entriesMigrated(long count, String name, String prettyTime); @Message(value = "Received exception from %s, see cause for remote stack trace", id = 217) RemoteException remoteException(Address sender, @Cause Throwable t); @LogMessage(level = INFO) @Message(value = "Timeout while waiting for the transaction validation. The command will not be processed. " + "Transaction is %s", id = 218) void timeoutWaitingUntilTransactionPrepared(String globalTx); // @LogMessage(level = WARN) // @Message(value = "Problems un-marshalling remote command from byte buffer", id = 220) // void errorUnMarshallingCommand(@Cause Throwable throwable); //@LogMessage(level = WARN) //@Message(value = "Unknown response value [%s]. Expected [%s]", id = 221) //void unexpectedResponse(String actual, String expected); @Message(value = "Custom interceptor missing class", id = 222) CacheConfigurationException customInterceptorMissingClass(); @LogMessage(level = WARN) @Message(value = "Custom interceptor '%s' does not extend BaseCustomInterceptor, which is recommended", id = 223) void suggestCustomInterceptorInheritance(String customInterceptorClassName); @Message(value = "Custom interceptor '%s' specifies more than one position", id = 224) CacheConfigurationException multipleCustomInterceptorPositions(String customInterceptorClassName); @Message(value = "Custom interceptor '%s' doesn't specify a position", id = 225) CacheConfigurationException missingCustomInterceptorPosition(String customInterceptorClassName); // @Message(value = "Error while initializing SSL context", id = 226) // CacheConfigurationException sslInitializationException(@Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "Support for concurrent updates can no longer be configured (it is always enabled by default)", id = 227) // void warnConcurrentUpdateSupportCannotBeConfigured(); @LogMessage(level = ERROR) @Message(value = "Failed to recover cache %s state after the current node became the coordinator", id = 228) void failedToRecoverCacheState(String cacheName, @Cause Throwable cause); @Message(value = "Unexpected initial version type (only NumericVersion instances supported): %s", id = 229) IllegalArgumentException unexpectedInitialVersion(String className); @LogMessage(level = ERROR) @Message(value = "Failed to start rebalance for cache %s", id = 230) void rebalanceStartError(String cacheName, @Cause Throwable cause); // @Message(value="Cache mode should be DIST or REPL, rather than %s", id = 231) // IllegalStateException requireDistOrReplCache(String cacheType); // @Message(value="Cache is in an invalid state: %s", id = 232) // IllegalStateException invalidCacheState(String cacheState); @LogMessage(level = WARN) @Message(value = "Root element for %s already registered in ParserRegistry by %s. Cannot install %s.", id = 234) void parserRootElementAlreadyRegistered(String qName, String oldParser, String newParser); @Message(value = "Configuration parser %s does not declare any Namespace annotations", id = 235) CacheConfigurationException parserDoesNotDeclareNamespaces(String name); // @Message(value = "Purging expired entries failed", id = 236) // PersistenceException purgingExpiredEntriesFailed(@Cause Throwable cause); // @Message(value = "Waiting for expired entries to be purge timed out", id = 237) // PersistenceException timedOutWaitingForExpiredEntriesToBePurged(@Cause Throwable cause); @Message(value = "Directory %s does not exist and cannot be created!", id = 238) CacheConfigurationException directoryCannotBeCreated(String path); // @Message(value="Cache manager is shutting down, so type write externalizer for type=%s cannot be resolved", id = 239) // IOException externalizerTableStopped(String className); // @Message(value="Cache manager is shutting down, so type (id=%d) cannot be resolved. Interruption being pushed up.", id = 240) // IOException pushReadInterruptionDueToCacheManagerShutdown(int readerIndex, @Cause InterruptedException cause); // @Message(value="Cache manager is %s and type (id=%d) cannot be resolved (thread not interrupted)", id = 241) // CacheException cannotResolveExternalizerReader(ComponentStatus status, int readerIndex); @Message(value = "Missing foreign externalizer with id=%s, either externalizer was not configured by client, or module lifecycle implementation adding externalizer was not loaded properly", id = 242) CacheException missingForeignExternalizer(int foreignId); @Message(value = "Type of data read is unknown. Id=%d is not amongst known reader indexes.", id = 243) CacheException unknownExternalizerReaderIndex(int readerIndex); @Message(value = "AdvancedExternalizer's getTypeClasses for externalizer %s must return a non-empty set", id = 244) CacheConfigurationException advanceExternalizerTypeClassesUndefined(String className); @Message(value = "Duplicate id found! AdvancedExternalizer id=%d for %s is shared by another externalizer (%s). Reader index is %d", id = 245) CacheConfigurationException duplicateExternalizerIdFound(int externalizerId, Class<?> typeClass, String otherExternalizer, int readerIndex); @Message(value = "Internal %s externalizer is using an id(%d) that exceeded the limit. It needs to be smaller than %d", id = 246) CacheConfigurationException internalExternalizerIdLimitExceeded(AdvancedExternalizer<?> ext, int externalizerId, int maxId); @Message(value = "Foreign %s externalizer is using a negative id(%d). Only positive id values are allowed.", id = 247) CacheConfigurationException foreignExternalizerUsingNegativeId(AdvancedExternalizer<?> ext, int externalizerId); // @Message(value = "Invalid cache loader configuration!! Only ONE cache loader may have fetchPersistentState set " + // "to true. Cache will not start!", id = 248) // CacheConfigurationException multipleCacheStoresWithFetchPersistentState(); @Message(value = "The cache loader configuration %s does not specify the loader class using @ConfigurationFor", id = 249) CacheConfigurationException loaderConfigurationDoesNotSpecifyLoaderClass(String className); // @Message(value = "Invalid configuration, expecting '%s' got '%s' instead", id = 250) // CacheConfigurationException incompatibleLoaderConfiguration(String expected, String actual); // @Message(value = "Cache Loader configuration cannot be null", id = 251) // CacheConfigurationException cacheLoaderConfigurationCannotBeNull(); @LogMessage(level = ERROR) @Message(value = "Error executing parallel store task", id = 252) void errorExecutingParallelStoreTask(@Cause Throwable cause); // @Message(value = "Invalid Cache Loader class: %s", id = 253) // CacheConfigurationException invalidCacheLoaderClass(String name); // @LogMessage(level = WARN) // @Message(value = "The transport element's 'strictPeerToPeer' attribute is no longer in use.", id = 254) // void strictPeerToPeerDeprecated(); @LogMessage(level = ERROR) @Message(value = "Error while processing prepare", id = 255) void errorProcessingPrepare(@Cause Throwable e); // @LogMessage(level = ERROR) // @Message(value = "Configurator SAXParse error", id = 256) // void configuratorSAXParseError(@Cause Exception e); // // @LogMessage(level = ERROR) // @Message(value = "Configurator SAX error", id = 257) // void configuratorSAXError(@Cause Exception e); // // @LogMessage(level = ERROR) // @Message(value = "Configurator general error", id = 258) // void configuratorError(@Cause Exception e); @LogMessage(level = ERROR) @Message(value = "Async store executor did not stop properly", id = 259) void errorAsyncStoreNotStopped(); // @LogMessage(level = ERROR) // @Message(value = "Exception executing command", id = 260) // void exceptionExecutingInboundCommand(@Cause Exception e); @LogMessage(level = ERROR) @Message(value = "Failed to execute outbound transfer", id = 261) void failedOutBoundTransferExecution(@Cause Throwable e); @LogMessage(level = ERROR) @Message(value = "Failed to enlist TransactionXaAdapter to transaction", id = 262) void failedToEnlistTransactionXaAdapter(@Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "FIFO strategy is deprecated, LRU will be used instead", id = 263) // void warnFifoStrategyIsDeprecated(); @LogMessage(level = WARN) @Message(value = "Not using an L1 invalidation reaper thread. This could lead to memory leaks as the requestors map may grow indefinitely!", id = 264) void warnL1NotHavingReaperThread(); @LogMessage(level = WARN) @Message(value = "Problems creating interceptor %s", id = 267) void unableToCreateInterceptor(Class<?> type, @Cause Exception e); @LogMessage(level = WARN) @Message(value = "Unable to broadcast invalidations as a part of the prepare phase. Rolling back.", id = 268) void unableToRollbackInvalidationsDuringPrepare(@Cause Throwable e); @LogMessage(level = WARN) @Message(value = "Cache used for Grid metadata should be synchronous.", id = 269) void warnGridFSMetadataCacheRequiresSync(); @LogMessage(level = WARN) @Message(value = "Could not commit local tx %s", id = 270) void warnCouldNotCommitLocalTx(Object transactionDescription, @Cause Throwable e); @LogMessage(level = WARN) @Message(value = "Could not rollback local tx %s", id = 271) void warnCouldNotRollbackLocalTx(Object transactionDescription, @Cause Throwable e); @LogMessage(level = WARN) @Message(value = "Exception removing recovery information", id = 272) void warnExceptionRemovingRecovery(@Cause Throwable e); @Message(value = "Indexing can not be enabled on caches in Invalidation mode", id = 273) CacheConfigurationException invalidConfigurationIndexingWithInvalidation(); @LogMessage(level = ERROR) @Message(value = "Persistence enabled without any CacheLoaderInterceptor in InterceptorChain!", id = 274) void persistenceWithoutCacheLoaderInterceptor(); @LogMessage(level = ERROR) @Message(value = "Persistence enabled without any CacheWriteInterceptor in InterceptorChain!", id = 275) void persistenceWithoutCacheWriteInterceptor(); // @Message(value = "Could not find migration data in cache %s", id = 276) // CacheException missingMigrationData(String name); @LogMessage(level = WARN) @Message(value = "Could not migrate key %s", id = 277) void keyMigrationFailed(String key, @Cause Throwable cause); @Message(value = "Indexing can only be enabled if infinispan-query.jar is available on your classpath, and this jar has not been detected.", id = 278) CacheConfigurationException invalidConfigurationIndexingWithoutModule(); @Message(value = "Failed to read stored entries from file. Error in file %s at offset %d", id = 279) PersistenceException errorReadingFileStore(String path, long offset); @Message(value = "Caught exception [%s] while invoking method [%s] on listener instance: %s", id = 280) CacheListenerException exceptionInvokingListener(String name, Method m, Object target, @Cause Throwable cause); @Message(value = "%s reported that a third node was suspected, see cause for info on the node that was suspected", id = 281) SuspectException thirdPartySuspected(Address sender, @Cause SuspectException e); @Message(value = "Cannot enable Invocation Batching when the Transaction Mode is NON_TRANSACTIONAL, set the transaction mode to TRANSACTIONAL", id = 282) CacheConfigurationException invocationBatchingNeedsTransactionalCache(); @Message(value = "A cache configured with invocation batching can't have recovery enabled", id = 283) CacheConfigurationException invocationBatchingCannotBeRecoverable(); @LogMessage(level = WARN) @Message(value = "Problem encountered while installing cluster listener", id = 284) void clusterListenerInstallationFailure(@Cause Throwable cause); @LogMessage(level = WARN) @Message(value = "Issue when retrieving cluster listeners from %s response was %s", id = 285) void unsuccessfulResponseForClusterListeners(Address address, Response response); @LogMessage(level = WARN) @Message(value = "Issue when retrieving cluster listeners from %s", id = 286) void exceptionDuringClusterListenerRetrieval(Address address, @Cause Throwable cause); @Message(value = "Unauthorized access: subject '%s' lacks '%s' permission", id = 287) SecurityException unauthorizedAccess(String subject, String permission); @Message(value = "A principal-to-role mapper has not been specified", id = 288) CacheConfigurationException invalidPrincipalRoleMapper(); @LogMessage(level = WARN) @Message(value = "Cannot send cross-site state chunk to '%s'.", id = 289) @Description("During a state transfer operation it was not possible to transfer a batch of cache entries. First check that the site is online and if any network issues have occurred. Confirm that the relay nodes in the cluster are not overloaded with cross-site replication requests. In some cases garbage collection pauses can also interrupt backup operations. You can either increase the amount of memory available to relay nodes or increase the number of relay nodes in the cluster.") void unableToSendXSiteState(String site, @Cause Throwable cause); // @LogMessage(level = WARN) // @Message(value = "Unable to wait for X-Site state chunk ACKs from '%s'.", id = 290) // void unableToWaitForXSiteStateAcks(String site, @Cause Throwable cause); @LogMessage(level = WARN) @Message(value = "Cannot apply cross-site state chunk.", id = 291) @Description("During a state transfer operation it was not possible to apply a batch of cache entries. Ensure that sites are online and check network status.") void unableToApplyXSiteState(@Cause Throwable cause); @LogMessage(level = WARN) @Message(value = "Unrecognized attribute '%s'. Please check your configuration. Ignoring!", id = 292) void unrecognizedAttribute(String property); @LogMessage(level = INFO) @Message(value = "Ignoring attribute %s at %s, please remove from configuration file", id = 293) void ignoreAttribute(Object attribute, Location location); @LogMessage(level = INFO) @Message(value = "Ignoring element %s at %s, please remove from configuration file", id = 294) void ignoreXmlElement(Object element, Location location); @Message(value = "No thread pool with name '%s' found", id = 295) CacheConfigurationException undefinedThreadPoolName(String name); @Message(value = "Attempt to add a %s permission to a SecurityPermissionCollection", id = 296) IllegalArgumentException invalidPermission(Permission permission); @Message(value = "Attempt to add a permission to a read-only SecurityPermissionCollection", id = 297) SecurityException readOnlyPermissionCollection(); // @LogMessage(level = DEBUG) // @Message(value = "Using internal security checker", id = 298) // void authorizationEnabledWithoutSecurityManager(); @Message(value = "Unable to acquire lock after %s for key %s and requestor %s. Lock is held by %s", id = 299) TimeoutException unableToAcquireLock(String timeout, Object key, Object requestor, Object owner); @Message(value = "There was an exception while processing retrieval of entry values", id = 300) CacheException exceptionProcessingEntryRetrievalValues(@Cause Throwable cause); // @Message(value = "Iterator response for identifier %s encountered unexpected exception", id = 301) // CacheException exceptionProcessingIteratorResponse(UUID identifier, @Cause Throwable cause); @LogMessage(level = WARN) @Message(value = "Issue when retrieving transactions from %s, response was %s", id = 302) void unsuccessfulResponseRetrievingTransactionsForSegments(Address address, Response response); @LogMessage(level = WARN) @Message(value = "More than one configuration file with specified name on classpath. The first one will be used:\n %s", id = 304) void ambiguousConfigurationFiles(String files); @Message(value = "Cluster is operating in degraded mode because of node failures.", id = 305) AvailabilityException partitionDegraded(); @Message(value = "Key '%s' is not available. Not all owners are in this partition", id = 306) AvailabilityException degradedModeKeyUnavailable(Object key); @Message(value = "Cannot clear when the cluster is partitioned", id = 307) AvailabilityException clearDisallowedWhilePartitioned(); @LogMessage(level = INFO) @Message(value = "Rebalancing enabled", id = 308) void rebalancingEnabled(); @LogMessage(level = INFO) @Message(value = "Rebalancing suspended", id = 309) void rebalancingSuspended(); @LogMessage(level = DEBUG) @Message(value = "Starting new rebalance phase for cache %s, topology %s", id = 310) void startingRebalancePhase(String cacheName, CacheTopology cacheTopology); // Messages between 312 and 320 have been moved to the org.infinispan.util.logging.events.Messages class @LogMessage(level = WARN) @Message(value = "Cyclic dependency detected between caches, stop order ignored", id = 321) void stopOrderIgnored(); @LogMessage(level = WARN) @Message(value = "Cannot restart cross-site state transfer to site %s", id = 322) @Description("It was not possible to resume a state transfer operation to a backup location. Ensure that sites are online and check network status.") void failedToRestartXSiteStateTransfer(String siteName, @Cause Throwable cause); @Message(value = "%s is in '%s' state and so it does not accept new invocations. " + "Either restart it or recreate the cache container.", id = 323) IllegalLifecycleStateException cacheIsTerminated(String cacheName, String state); @Message(value = "%s is in 'STOPPING' state and this is an invocation not belonging to an on-going transaction, so it does not accept new invocations. " + "Either restart it or recreate the cache container.", id = 324) IllegalLifecycleStateException cacheIsStopping(String cacheName); @Message(value = "Creating tmp cache %s timed out waiting for rebalancing to complete on node %s ", id = 325) RuntimeException creatingTmpCacheTimedOut(String cacheName, Address address); @LogMessage(level = WARN) @Message(value = "Remote transaction %s timed out. Rolling back after %d ms", id = 326) void remoteTransactionTimeout(GlobalTransaction gtx, long ageMilliSeconds); @Message(value = "Cannot find a parser for element '%s' in namespace '%s' at %s. Check that your configuration is up-to-date for Infinispan '%s' and you have the proper dependency in the classpath", id = 327) CacheConfigurationException unsupportedConfiguration(String element, String namespaceUri, Location location, String version); @LogMessage(level = DEBUG) @Message(value = "Rebalance phase %s confirmed for cache %s on node %s, topology id = %d", id = 328) void rebalancePhaseConfirmedOnNode(CacheTopology.Phase phase, String cacheName, Address node, int topologyId); @LogMessage(level = WARN) @Message(value = "Unable to read rebalancing status from coordinator %s", id = 329) void errorReadingRebalancingStatus(Address coordinator, @Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Distributed task failed at %s. The task is failing over to be executed at %s", id = 330) // void distributedTaskFailover(Address failedAtAddress, Address failoverTarget, @Cause Exception e); @LogMessage(level = WARN) @Message(value = "Unable to invoke method %s on Object instance %s ", id = 331) void unableToInvokeListenerMethod(Method m, Object target, @Cause Throwable e); // @Message(value = "Remote transaction %s rolled back because originator is no longer in the cluster", id = 332) // CacheException orphanTransactionRolledBack(GlobalTransaction gtx); // @Message(value = "The site must be specified.", id = 333) // CacheConfigurationException backupSiteNullName(); // @Message(value = "Using a custom failure policy requires a failure policy class to be specified.", id = 334) // CacheConfigurationException customBackupFailurePolicyClassNotSpecified(); @Message(value = "Two-phase commit can only be used with synchronous backup strategy.", id = 335) CacheConfigurationException twoPhaseCommitAsyncBackup(); @LogMessage(level = DEBUG) @Message(value = "Finished rebalance for cache %s, topology %s", id = 336) void finishedRebalance(String cacheName, CacheTopology topology); @Message(value = "Backup configuration must include a 'site'.", id = 337) @Description("Caches that use cross-site replication must include a site in the configuration. Edit the cache and specify the name of the site in the backup configuration.") CacheConfigurationException backupMissingSite(); @Message(value = "You must specify a 'failure-policy-class' to use a custom backup failure policy for backup '%s'.", id = 338) @Description("The backup configuration for the cache uses a custom failure policy but does not include the fully qualified class of a custom failure policy implementation. Specify the failure policy class in the backup configuration or use a different failure policy.") CacheConfigurationException missingBackupFailurePolicyClass(String remoteSite); @Message(value = "Remote cache name is missing or null in backup configuration.", id = 339) @Description("Cross-site replication backs up data to caches with the same name by default. If you want to backup to a cache with a different name, you must specify the name of the remote cache in the 'backup-for' configuration. Modify cache configuration to include the name of the remote cache.") CacheConfigurationException backupForNullCache(); @Message(value = "Remote cache name and remote site is missing or null in backup configuration.", id = 340) @Description("Cross-site replication backs up data to caches with the same name by default. If you want to backup to a cache with a different name, you must specify the name of the remote cache and the remote site in the 'backup-for' configuration. Modify cache configuration to include the name of the remote cache and remote site.") CacheConfigurationException backupForMissingParameters(); // @Message(value = "Cannot configure async properties for an sync cache. Set the cache mode to async first.", id = 341) // IllegalStateException asyncPropertiesConfigOnSyncCache(); // @Message(value = "Cannot configure sync properties for an async cache. Set the cache mode to sync first.", id = 342) // IllegalStateException syncPropertiesConfigOnAsyncCache(); @Message(value = "Must have a transport set in the global configuration in " + "order to define a clustered cache", id = 343) CacheConfigurationException missingTransportConfiguration(); @Message(value = "reaperWakeUpInterval must be >= 0, we got %d", id = 344) CacheConfigurationException invalidReaperWakeUpInterval(long timeout); @Message(value = "completedTxTimeout must be >= 0, we got %d", id = 345) CacheConfigurationException invalidCompletedTxTimeout(long timeout); // @Message(value = "Total Order based protocol not available for transaction mode %s", id = 346) // CacheConfigurationException invalidTxModeForTotalOrder(TransactionMode transactionMode); // @Message(value = "Cache mode %s is not supported by Total Order based protocol", id = 347) // CacheConfigurationException invalidCacheModeForTotalOrder(String friendlyCacheModeString); // @Message(value = "Total Order based protocol not available with recovery", id = 348) // CacheConfigurationException unavailableTotalOrderWithTxRecovery(); // @Message(value = "Total Order based protocol not available with %s", id = 349) // CacheConfigurationException invalidLockingModeForTotalOrder(LockingMode lockingMode); @Message(value = "Enabling the L1 cache is only supported when using DISTRIBUTED as a cache mode. Your cache mode is set to %s", id = 350) CacheConfigurationException l1OnlyForDistributedCache(String cacheMode); @Message(value = "Using a L1 lifespan of 0 or a negative value is meaningless", id = 351) CacheConfigurationException l1InvalidLifespan(); @Message(value = "Enabling the L1 cache is not supported when using EXCEPTION based eviction.", id = 352) CacheConfigurationException l1NotValidWithExpirationEviction(); @Message(value = "Cannot define both interceptor class (%s) and interceptor instance (%s)", id = 354) CacheConfigurationException interceptorClassAndInstanceDefined(String customInterceptorClassName, String customInterceptor); @Message(value = "Unable to instantiate loader/writer instance for StoreConfiguration %s", id = 355) CacheConfigurationException unableToInstantiateClass(Class<?> storeConfigurationClass); // @Message(value = "Maximum data container size is currently 2^48 - 1, the number provided was %s", id = 356) // CacheConfigurationException evictionSizeTooLarge(long value); @LogMessage(level = ERROR) @Message(value = "end() failed for %s", id = 357) void xaResourceEndFailed(XAResource resource, @Cause Throwable t); @Message(value = "A cache configuration named %s already exists. This cannot be configured externally by the user.", id = 358) CacheConfigurationException existingConfigForInternalCache(String name); @Message(value = "Keys '%s' are not available. Not all owners are in this partition", id = 359) AvailabilityException degradedModeKeysUnavailable(Collection<?> keys); @LogMessage(level = WARN) @Message(value = "The xml element eviction-executor has been deprecated and replaced by expiration-executor, please update your configuration file.", id = 360) void evictionExecutorDeprecated(); @Message(value = "Cannot commit remote transaction %s as it was already rolled back", id = 361) CacheException remoteTransactionAlreadyRolledBack(GlobalTransaction gtx); @Message(value = "Could not find status for remote transaction %s, please increase transaction.completedTxTimeout", id = 362) TimeoutException remoteTransactionStatusMissing(GlobalTransaction gtx); @LogMessage(level = WARN) @Message(value = "No filter indexing service provider found for indexed filter of type %s", id = 363) void noFilterIndexingServiceProviderFound(String filterClassName); @Message(value = "Attempted to register cluster listener of class %s, but listener is annotated as only observing pre events!", id = 364) CacheException clusterListenerRegisteredWithOnlyPreEvents(Class<?> listenerClass); @Message(value = "Could not find the specified JGroups configuration file '%s'", id = 365) CacheConfigurationException jgroupsConfigurationNotFound(String cfg); @Message(value = "Unable to add a 'null' Custom Cache Store", id = 366) IllegalArgumentException unableToAddNullCustomStore(); // @LogMessage(level = ERROR) // @Message(value = "There was an issue with topology update for topology: %s", id = 367) // void topologyUpdateError(int topologyId, @Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Memory approximation calculation for eviction is unsupported for the '%s' Java VM", id = 368) // void memoryApproximationUnsupportedVM(String javaVM); // @LogMessage(level = WARN) // @Message(value = "Ignoring asyncMarshalling configuration", id = 369) // void ignoreAsyncMarshalling(); // @Message(value = "Cache name '%s' cannot be used as it is a reserved, internal name", id = 370) // IllegalArgumentException illegalCacheName(String name); @Message(value = "Cannot remove cache configuration '%s' because it is in use", id = 371) IllegalStateException configurationInUse(String configurationName); @Message(value = "Statistics are enabled while attribute 'available' is set to false.", id = 372) CacheConfigurationException statisticsEnabledNotAvailable(); @Message(value = "Attempted to start a cache using configuration template '%s'", id = 373) CacheConfigurationException templateConfigurationStartAttempt(String cacheName); @Message(value = "No such template '%s' when declaring '%s'", id = 374) CacheConfigurationException undeclaredConfiguration(String template, String name); @Message(value = "No such template/configuration '%s'", id = 375) CacheConfigurationException noConfiguration(String extend); @Message(value = "Interceptor stack is not supported in simple cache", id = 376) UnsupportedOperationException interceptorStackNotSupported(); @Message(value = "Explicit lock operations are not supported in simple cache", id = 377) UnsupportedOperationException lockOperationsNotSupported(); @Message(value = "Invocation batching not enabled in current configuration! Please enable it.", id = 378) CacheConfigurationException invocationBatchingNotEnabled(); // @Message(value = "Distributed Executors Framework is not supported in simple cache", id = 380) // CacheConfigurationException distributedExecutorsNotSupported(); @Message(value = "This configuration is not supported for simple cache", id = 381) CacheConfigurationException notSupportedInSimpleCache(); @LogMessage(level = WARN) @Message(value = "Global state persistence was enabled without specifying a location", id = 382) void missingGlobalStatePersistentLocation(); // @LogMessage(level = WARN) // @Message(value = "The eviction max-entries attribute has been deprecated. Please use the size attribute instead", id = 383) // void evictionMaxEntriesDeprecated(); @Message(value = "Unable to broadcast invalidation messages", id = 384) RuntimeException unableToBroadcastInvalidation(@Cause Throwable e); // @LogMessage(level = WARN) // @Message(value = "The data container class configuration has been deprecated. This has no current replacement", id = 385) // void dataContainerConfigurationDeprecated(); @Message(value = "Failed to read persisted state from file %s. Aborting.", id = 386) CacheConfigurationException failedReadingPersistentState(@Cause IOException e, File stateFile); @Message(value = "Failed to write state to file %s.", id = 387) CacheConfigurationException failedWritingGlobalState(@Cause IOException e, File stateFile); @Message(value = "The state file %s is not writable. Aborting.", id = 388) CacheConfigurationException nonWritableStateFile(File stateFile); @LogMessage(level = INFO) @Message(value = "Loaded global state, version=%s timestamp=%s", id = 389) void globalStateLoad(String version, String timestamp); @LogMessage(level = INFO) @Message(value = "Persisted state, version=%s timestamp=%s", id = 390) void globalStateWrite(String version, String timestamp); @Message(value = "Recovery not supported with non transactional cache", id = 391) CacheConfigurationException recoveryNotSupportedWithNonTxCache(); @Message(value = "Recovery not supported with Synchronization", id = 392) CacheConfigurationException recoveryNotSupportedWithSynchronization(); //@Message(value = "Recovery not supported with Asynchronous %s cache mode", id = 393) //CacheConfigurationException recoveryNotSupportedWithAsync(String cacheMode); //@Message(value = "Recovery not supported with asynchronous commit phase", id = 394) //CacheConfigurationException recoveryNotSupportedWithAsyncCommit(); @LogMessage(level = INFO) @Message(value = "Transaction notifications are disabled. This prevents cluster listeners from working properly!", id = 395) void transactionNotificationsDisabled(); @LogMessage(level = DEBUG) @Message(value = "Received unsolicited state from node %s for segment %d of cache %s", id = 396) void ignoringUnsolicitedState(Address node, int segment, String cacheName); // @Message(value = "Could not migrate data for cache %s, check remote store config in the target cluster. Make sure only one remote store is present and is pointing to the source cluster", id = 397) // CacheException couldNotMigrateData(String name); @Message(value = "CH Factory '%s' cannot restore a persisted CH of class '%s'", id = 398) IllegalStateException persistentConsistentHashMismatch(String hashFactory, String consistentHashClass); @Message(value = "Timeout while waiting for %d members in cluster. Last view had %s", id = 399) TimeoutException timeoutWaitingForInitialNodes(int initialClusterSize, List<?> members); @Message(value = "Node %s was suspected", id = 400) @Description("A node in the cluster is offline or cannot be reached on the network. If you are using cross-site replication this message indicates that the relay nodes are not reachable. Check network settings for all nodes in the cluster.") SuspectException remoteNodeSuspected(Address address); @Message(value = "Node %s timed out, time : %s %s", id = 401) TimeoutException remoteNodeTimedOut(Address address, long time, TimeUnit unit); @Message(value = "Timeout waiting for view %d. Current view is %d, current status is %s", id = 402) TimeoutException coordinatorTimeoutWaitingForView(int expectedViewId, int currentViewId, Object clusterManagerStatus); @Message(value = "No indexable classes were defined for this indexed cache. The configuration must contain " + "classes or protobuf message types annotated with '@Indexed'", id = 403) CacheConfigurationException noIndexableClassesDefined(); // @Message(value = "The configured entity class %s is not indexable. Please remove it from the indexing configuration.", id = 404) // CacheConfigurationException classNotIndexable(String className); @LogMessage(level = ERROR) @Message(value = "Caught exception while invoking a cache manager listener!", id = 405) void failedInvokingCacheManagerListener(@Cause Throwable e); @LogMessage(level = WARN) @Message(value = "The replication queue is no longer supported since version 9.0. Attribute %s on line %d will be ignored.", id = 406) void ignoredReplicationQueueAttribute(String attributeName, int line); @Message(value = "Extraneous members %s are attempting to join cache %s, as they were not members of the persisted state", id = 407) CacheJoinException extraneousMembersJoinRestoredCache(List<Address> extraneousMembers, String cacheName); @Message(value = "Node %s with persistent state attempting to join cache %s on cluster without state", id = 408) CacheJoinException nodeWithPersistentStateJoiningClusterWithoutState(Address joiner, String cacheName); @Message(value = "Node %s without persistent state attempting to join cache %s on cluster with state", id = 409) CacheJoinException nodeWithoutPersistentStateJoiningCacheWithState(Address joiner, String cacheName); @Message(value = "Node %s attempting to join cache %s with incompatible state", id = 410) CacheJoinException nodeWithIncompatibleStateJoiningCache(Address joiner, String cacheName); // @LogMessage(level = WARN) // @Message(value = "Classpath does not look correct. Make sure you are not mixing uber and jars", id = 411) // void warnAboutUberJarDuplicates(); @Message(value = "Cannot determine a synthetic transaction configuration from mode=%s, xaEnabled=%s, recoveryEnabled=%s, batchingEnabled=%s", id = 412) CacheConfigurationException unknownTransactionConfiguration(org.infinispan.transaction.TransactionMode mode, boolean xaEnabled, boolean recoveryEnabled, boolean batchingEnabled); @Message(value = "Unable to instantiate serializer for %s", id = 413) CacheConfigurationException unableToInstantiateSerializer(Class<?> storeConfigurationClass); @Message(value = "Global security authorization should be enabled if cache authorization enabled.", id = 414) CacheConfigurationException globalSecurityAuthShouldBeEnabled(); @LogMessage(level = WARN) @Message(value = "The %s is no longer supported since version %s. Attribute %s on line %d will be ignored.", id = 415) void ignoredAttribute(String componentName, String version, String attributeName, int line); // @LogMessage(level = ERROR) // @Message(value = "Error executing submitted store task", id = 416) // void errorExecutingSubmittedStoreTask(@Cause Throwable cause); @Message(value = "It is not possible for a store to be transactional in a non-transactional cache. ", id = 417) CacheConfigurationException transactionalStoreInNonTransactionalCache(); @Message(value = "It is not possible for a store to be transactional when passivation is enabled. ", id = 418) CacheConfigurationException transactionalStoreInPassivatedCache(); @LogMessage(level = WARN) @Message(value = "Eviction of an entry invoked without an explicit eviction strategy for cache %s", id = 419) void evictionDisabled(String cacheName); @Message(value = "Cannot enable '%s' in invalidation caches!", id = 420) CacheConfigurationException attributeNotAllowedInInvalidationMode(String attributeName); @LogMessage(level = ERROR) @Message(value = "Error while handling view %s", id = 421) void viewHandlingError(int viewId, @Cause Throwable t); @Message(value = "Failed waiting for topology %d", id = 422) TimeoutException failedWaitingForTopology(int requestTopologyId); @Message(value = "Duplicate id found! AdvancedExternalizer id=%d is shared by another externalizer (%s)", id = 423) CacheConfigurationException duplicateExternalizerIdFound(int externalizerId, String otherExternalizer); @Message(value = "Memory eviction is enabled, please specify a maximum size or count greater than zero", id = 424) CacheConfigurationException invalidEvictionSize(); // @Message(value = "Eviction cannot use memory-based approximation with LIRS", id = 425) // CacheConfigurationException memoryEvictionInvalidStrategyLIRS(); //removed unused message (id=426) @Message(value = "Timeout after %s waiting for acks (%s). Id=%s", id = 427) TimeoutException timeoutWaitingForAcks(String timeout, String address, long id); @LogMessage(level = WARN) @Message(value = "'%1$s' at %3$s has been deprecated. Please use '%2$s' instead", id = 428) void configDeprecatedUseOther(Enum<?> element, Enum<?> other, Location location); @Message(value = "On key %s previous read version (%s) is different from currently read version (%s)", id = 429) WriteSkewException writeSkewOnRead(@Param Object key, Object key2, EntryVersion lastVersion, EntryVersion remoteVersion); @Message(value = "%s cannot be shared", id = 430) CacheConfigurationException nonSharedStoreConfiguredAsShared(String storeType); // @LogMessage(level = WARN) // @Message(value = "Unable to validate %s's configuration as the @Store annotation is missing", id = 431) // void warnStoreAnnotationMissing(String name); @Message(value = "Missing configuration for default cache '%s' declared on container", id = 432) CacheConfigurationException missingDefaultCacheDeclaration(String defaultCache); @Message(value = "A default cache has been requested, but no cache has been set as default for this container", id = 433) CacheConfigurationException noDefaultCache(); // @LogMessage(level = WARN) // @Message(value = "Direct usage of the ___defaultcache name to retrieve the default cache is deprecated", id = 434) // void deprecatedDefaultCache(); @Message(value = "Cache manager initialized with a default cache configuration but without a name for it. Set it in the GlobalConfiguration.", id = 435) CacheConfigurationException defaultCacheConfigurationWithoutName(); @Message(value = "Cache '%s' has been requested, but no matching cache configuration exists", id = 436) CacheConfigurationException noSuchCacheConfiguration(String name); @LogMessage(level = WARN) @Message(value = "Unable to validate '%s' with the implementing store as the @ConfigurationFor annotation is missing", id = 437) void warnConfigurationForAnnotationMissing(String name); @Message(value = "Cache with name '%s' is defined more than once!", id = 438) CacheConfigurationException duplicateCacheName(String name); @LogMessage(level = INFO) @Message(value = "Received new cross-site view: %s", id = 439) @Description("A cluster has either joined or left the global cluster view.") void receivedXSiteClusterView(Collection<String> view); @LogMessage(level = ERROR) @Message(value = "Error sending response for request %d@%s, command %s", id = 440) void errorSendingResponse(long requestId, org.jgroups.Address origin, ReplicableCommand command); @Message(value = "Unsupported async cache mode '%s' for transactional caches", id = 441) CacheConfigurationException unsupportedAsyncCacheMode(CacheMode cacheMode); // @Message(value = "Invalid cache loader configuration for '%s'. If a cache loader is configured as a singleton , the cache loader cannot be shared in a cluster!", id = 442) // CacheConfigurationException singletonStoreCannotBeShared(String name); @Message(value = "Invalid cache loader configuration for '%s'. In order for a cache loader to be transactional, it must also be shared.", id = 443) CacheConfigurationException clusteredTransactionalStoreMustBeShared(String simpleName); @Message(value = "Invalid cache loader configuration for '%s'. A cache loader cannot be both Asynchronous and transactional.", id = 444) CacheConfigurationException transactionalStoreCannotBeAsync(String simpleName); // @Message(value = "At most one store can be set to 'fetchPersistentState'!", id = 445) // CacheConfigurationException onlyOneFetchPersistentStoreAllowed(); @Message(value = "Multiple sites have the same name '%s'. This configuration is not valid.", id = 446) @Description("The name for each cluster that participates in cross-site replication must have a unique site name. Modify JGroups RELAY2 configuration and specify a unique site name for each backup location.") CacheConfigurationException multipleSitesWithSameName(String site); // @Message(value = "The site '%s' must be defined within the set of backups!", id = 447) // CacheConfigurationException siteMustBeInBackups(String site); @Message(value = "'awaitInitialTransfer' can be enabled only if cache mode is distributed or replicated.", id = 448) CacheConfigurationException awaitInitialTransferOnlyForDistOrRepl(); @Message(value = "Timeout value for cross-site replication state transfer must be equal to or greater than one.", id = 449) @Description("The value of the timeout attribute is zero or a negative number. Specify a value of at least one for the timeout attribute in the cross-site state transfer configuration for your cache.") CacheConfigurationException invalidXSiteStateTransferTimeout(); @Message(value = "Wait time between retries for cross-site replication state transfer must be equal to or greater than one.", id = 450) @Description("The value of the wait-time attribute is zero or a negative number. Specify a value of at least one for the wait-time attribute in the cross-site state transfer configuration for your cache.") CacheConfigurationException invalidXSiteStateTransferWaitTime(); @Message(value = "Timed out waiting for view %d, current view is %d", id = 451) TimeoutException timeoutWaitingForView(int expectedViewId, int currentViewId); @LogMessage(level = ERROR) @Message(value = "Failed to update topology for cache %s", id = 452) void topologyUpdateError(String cacheName, @Cause Throwable cause); @Message(value = "Attempt to define configuration for cache %s which already exists", id = 453) CacheConfigurationException configAlreadyDefined(String cacheName); @LogMessage(level = ERROR) @Message(value = "Failure during leaver transactions cleanup", id = 455) void transactionCleanupError(@Cause Throwable e); // @Message(value = "Cache does not contain the atomic map.", id = 456) // IllegalStateException atomicMapDoesNotExist(); // @Message(value = "Cache contains %s which is not of expected type %s", id = 457) // IllegalStateException atomicMapHasWrongType(Object value, Class<?> type); // @Message(value = "Fine grained maps require clustering.hash.groups enabled.", id = 458) // IllegalStateException atomicFineGrainedNeedsGroups(); // @Message(value = "Fine grained maps require transactional cache.", id = 459) // IllegalStateException atomicFineGrainedNeedsTransactions(); // @Message(value = "Fine grained maps require explict transaction or auto-commit enabled", id = 460) // IllegalStateException atomicFineGrainedNeedsExplicitTxOrAutoCommit(); @Message(value = "Class %s should be a subclass of %s", id = 461) CacheException invalidEncodingClass(Class<?> configured, Class<?> required); @Message(value = "ConflictManager.getConflicts() already in progress", id = 462) IllegalStateException getConflictsAlreadyInProgress(); @Message(value = "Unable to retrieve conflicts as StateTransfer is currently in progress for cache '%s'", id = 463) IllegalStateException getConflictsStateTransferInProgress(String cacheName); @LogMessage(level = WARN) @Message(value = "The partition handling 'enabled' attribute has been deprecated. Please update your configuration to use 'when-split' instead", id = 464) void partitionHandlingConfigurationEnabledDeprecated(); // @Message(value = "Keys '%s' are not available. No owners exist in this partition", id = 465) // AvailabilityException degradedModeNoOwnersExist(Object key); @LogMessage(level = WARN) @Message(value = "Exception encountered when trying to resolve conflict on Keys '%s': %s", id = 466) void exceptionDuringConflictResolution(Object key, Throwable t); @Message(value = "Cache manager is stopping", id = 472) IllegalLifecycleStateException cacheManagerIsStopping(); @LogMessage(level = ERROR) @Message(value = "Invalid message type %s received from %s", id = 473) void invalidMessageType(int messageType, org.jgroups.Address origin); @LogMessage(level = ERROR) @Message(value = "Error processing request %d@%s", id = 474) void errorProcessingRequest(long requestId, org.jgroups.Address origin, @Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "Error processing response for request %d from %s", id = 475) void errorProcessingResponse(long requestId, org.jgroups.Address sender, @Cause Throwable t); @Message(value = "Timed out waiting for responses for request %d from %s after %s", id = 476) TimeoutException requestTimedOut(long requestId, String targetsWithoutResponses, String elapsed); @LogMessage(level = ERROR) @Message(value = "Cannot perform operation %s for site %s", id = 477) @Description("It was not possible to successfully complete an operation on a backup location. Set logging levels to TRACE to analyze and troubleshoot the issue.") void xsiteAdminOperationError(String operationName, String siteName, @Cause Throwable t); @Message(value = "Couldn't find a local transaction corresponding to remote site transaction %s", id = 478) CacheException unableToFindRemoteSiteTransaction(GlobalTransaction globalTransaction); @Message(value = "LocalTransaction not found but present in the tx table for remote site transaction %s", id = 479) IllegalStateException unableToFindLocalTransactionFromRemoteSiteTransaction(GlobalTransaction globalTransaction); @LogMessage(level = WARN) @Message(value = "Ignoring versions invalidation from topology %d, current topology is %d", id = 480) void ignoringInvalidateVersionsFromOldTopology(int invalidationTopology, int currentTopologyId); @Message(value = "Cannot create remote transaction %s, the originator is not in the cluster view", id = 481) CacheException remoteTransactionOriginatorNotInView(GlobalTransaction gtx); @Message(value = "Cannot create remote transaction %s, already completed", id = 482) CacheException remoteTransactionAlreadyCompleted(GlobalTransaction gtx); // @Message(value = "Class %s not found", id = 483) // CacheConfigurationException classNotFound(String name); @Message(value = "Wildcards not allowed in cache names: '%s'", id = 484) CacheConfigurationException wildcardsNotAllowedInCacheNames(String name); @Message(value = "Configuration '%s' matches multiple wildcard templates", id = 485) CacheConfigurationException configurationNameMatchesMultipleWildcards(String name); @Message(value = "Cannot register Wrapper: duplicate Id %d", id = 486) EncodingException duplicateIdWrapper(byte id); @Message(value = "Wrapper with class '%s' not found", id = 487) EncodingException wrapperClassNotFound(Class<?> wrapperClass); @Message(value = "Wrapper with Id %d not found", id = 488) EncodingException wrapperIdNotFound(byte id); @Message(value = "Cannot register Encoder: duplicate Id %d", id = 489) EncodingException duplicateIdEncoder(short id); @Message(value = "Encoder with class '%s' not found", id = 490) EncodingException encoderClassNotFound(Class<?> wrapperClass); @Message(value = "Encoder with Id %d not found", id = 491) EncodingException encoderIdNotFound(short id); @Message(value = "Cannot find transcoder between '%s' to '%s'", id = 492) EncodingException cannotFindTranscoder(MediaType mediaType, MediaType another); // @Message(value = "Invalid text format: '%s'", id = 493) // EncodingException invalidTextFormat(Object content); // @Message(value = "Invalid binary format: '%s'", id = 494) // EncodingException invalidBinaryFormat(Object content); @Message(value = "%s encountered error transcoding content", id = 495) EncodingException errorTranscoding(String transcoderName, @Cause Throwable cause); // @Message(value = "Error transcoding content '%s'", id = 496) // EncodingException errorTranscodingContent(@Cause Throwable cause, Object content); @Message(value = "%s encountered unsupported content '%s' during transcoding", id = 497) EncodingException unsupportedContent(String transcoderName, Object content); // @LogMessage(level = WARN) // @Message(value = "Indexing mode ALL without owning all data locally (replicated mode).", id = 498) // void allIndexingInNonReplicatedCache(); @Message(value = "Could not serialize the configuration of cache '%s' (%s)", id = 499) CacheConfigurationException configurationSerializationFailed(String cacheName, Configuration configuration, @Cause Exception e); @Message(value = "Cannot create clustered configuration for cache '%s' because configuration %n%s%n is incompatible with the existing configuration %n%s", id = 500) CacheConfigurationException incompatibleClusterConfiguration(String cacheName, Configuration configuration, Configuration existing); @Message(value = "Cannot persist cache configuration as global state is disabled", id = 501) CacheConfigurationException globalStateDisabled(); @Message(value = "Error while persisting global configuration state", id = 502) CacheConfigurationException errorPersistingGlobalConfiguration(@Cause Throwable cause); @Message(value = "Size (bytes) based eviction needs either off-heap or a binary compatible storage configured in the cache encoding", id = 504) CacheConfigurationException offHeapMemoryEvictionNotSupportedWithObject(); @Message(value = "Cache %s already exists", id = 507) CacheConfigurationException cacheExists(String cacheName); @Message(value = "Cannot rename file %s to %s", id = 508) CacheConfigurationException cannotRenamePersistentFile(String absolutePath, File persistentFile, @Cause Throwable cause); @Message(value = "Unable to add a 'null' EntryMergePolicyFactory", id = 509) IllegalArgumentException unableToAddNullEntryMergePolicyFactory(); @Message(value = "ConfigurationStrategy set to CUSTOM, but none specified", id = 510) CacheConfigurationException customStorageStrategyNotSet(); @Message(value = "ConfigurationStrategy cannot be set to MANAGED in embedded mode", id = 511) CacheConfigurationException managerConfigurationStorageUnavailable(); @Message(value = "Cannot acquire lock '%s' for persistent global state", id = 512) CacheConfigurationException globalStateCannotAcquireLockFile(@Cause Throwable cause, File lockFile); @Message(value = "Exception based eviction requires a transactional cache that doesn't allow for 1 phase commit or synchronizations", id = 513) CacheConfigurationException exceptionBasedEvictionOnlySupportedInTransactionalCaches(); @Message(value = "Container eviction limit %d reached, write operation(s) is blocked", id = 514) ContainerFullException containerFull(long size); @Message(value = "The configuration is immutable", id = 515) UnsupportedOperationException immutableConfiguration(); @Message(value = "The state file for '%s' is invalid. Startup halted to prevent further corruption of persistent state", id = 516) CacheConfigurationException invalidPersistentState(String globalScope); @LogMessage(level = WARN) @Message(value = "Ignoring cache topology from %s during merge: %s", id = 517) void ignoringCacheTopology(Collection<Address> sender, CacheTopology topology); @LogMessage(level = DEBUG) @Message(value = "Updating topology for cache %s, topology %s, availability mode %s", id = 518) void updatingTopology(String cacheName, CacheTopology currentTopology, AvailabilityMode availabilityMode); @LogMessage(level = DEBUG) @Message(value = "Updating stable topology for cache %s, topology %s", id = 519) void updatingStableTopology(String cacheName, CacheTopology currentTopology); @LogMessage(level = DEBUG) @Message(value = "Updating availability mode for cache %s from %s to %s, topology %s", id = 520) void updatingAvailabilityMode(String cacheName, AvailabilityMode oldMode, AvailabilityMode newMode, CacheTopology topology); @LogMessage(level = DEBUG) @Message(value = "Cache %s recovered after merge with topology = %s, availability mode %s", id = 521) void cacheRecoveredAfterMerge(String cacheName, CacheTopology currentTopology, AvailabilityMode availabilityMode); @LogMessage(level = DEBUG) @Message(value = "Conflict resolution starting for cache %s with topology %s", id = 522) void startingConflictResolution(String cacheName, CacheTopology currentTopology); @LogMessage(level = DEBUG) @Message(value = "Conflict resolution finished for cache %s with topology %s", id = 523) void finishedConflictResolution(String cacheName, CacheTopology currentTopology); @LogMessage(level = ERROR) @Message(value = "Conflict resolution failed for cache %s with topology %s", id = 524) void failedConflictResolution(String cacheName, CacheTopology currentTopology, @Cause Throwable t); @LogMessage(level = DEBUG) @Message(value = "Conflict resolution cancelled for cache %s with topology %s", id = 525) void cancelledConflictResolution(String cacheName, CacheTopology currentTopology); @Message(value = "Maximum startup attempts exceeded for store %s", id = 527) PersistenceException storeStartupAttemptsExceeded(String storeName, @Cause Throwable t); @Message(value = "Cannot acquire lock %s as this partition is DEGRADED", id = 528) AvailabilityException degradedModeLockUnavailable(Object key); @Message(value = "Class '%s' blocked by deserialization allow list. Include the class name in the server allow list to authorize.", id = 529) CacheException errorDeserializing(String className); @LogMessage(level = WARN) @Message(value = "Unsupported async cache mode '%s' for transactional caches, forcing %s", id = 530) void unsupportedAsyncCacheMode(CacheMode unsupportedCacheMode, CacheMode forcedCacheMode); @Message(value = "Store or loader %s must implement SegmentedLoadWriteStore or its config must extend AbstractSegmentedStoreConfiguration if configured as segmented", id = 531) CacheConfigurationException storeNotSegmented(Class<?> implementedClass); @Message(value = "Invalid cache loader configuration for '%s'. If a cache loader is configured with passivation, the cache loader cannot be shared in a cluster!", id = 532) CacheConfigurationException passivationStoreCannotBeShared(String name); @Message(value = "Content '%s (MediaType: '%s') cannot be converted to '%s'", id = 533) EncodingException cannotConvertContent(Object content, MediaType contentType, MediaType destination, @Cause Throwable e); @Message(value = "Grouping requires OBJECT storage type but was: %s", id = 534) CacheConfigurationException groupingOnlyCompatibleWithObjectStorage(StorageType storageType); @Message(value = "Grouping requires application/x-java-object storage type but was: {key=%s, value=%s}", id = 535) CacheConfigurationException groupingOnlyCompatibleWithObjectStorage(MediaType keyMediaType, MediaType valueMediaType); @Message(value = "Factory doesn't know how to construct component %s", id = 537) CacheConfigurationException factoryCannotConstructComponent(String componentName); @LogMessage(level = ERROR) @Message(value = "Error stopping module %s", id = 538) void moduleStopError(String module, @Cause Throwable t); @Message(value = "Duplicate JGroups stack '%s'", id = 539) CacheConfigurationException duplicateJGroupsStack(String name); @Message(value = "No such JGroups stack '%s'", id = 540) CacheConfigurationException missingJGroupsStack(String name); @Message(value = "Error while trying to create a channel using the specified configuration '%s'", id = 541) CacheConfigurationException errorCreatingChannelFromConfigurator(String configurator, @Cause Throwable t); @Message(value = "Invalid parser scope. Expected '%s' but was '%s'", id = 542) CacheConfigurationException invalidScope(String expected, String found); @Message(value = "Cannot use stack.position when stack.combine is '%s'", id = 543) CacheConfigurationException jgroupsNoStackPosition(String combineMode); @Message(value = "The protocol '%s' does not exist in the base stack for operation '%s'", id = 544) CacheConfigurationException jgroupsNoSuchProtocol(String protocolName, String combineMode); @Message(value = "Inserting protocol '%s' in a JGroups stack requires the 'stack.position' attribute", id = 545) CacheConfigurationException jgroupsInsertRequiresPosition(String protocolName); @Message(value = "Duplicate remote site '%s' in stack '%s'", id = 546) @Description("The name for each cluster that participates in cross-site replication must have a unique site name. Modify JGroups RELAY2 configuration and specify a unique site name for each backup location.") CacheConfigurationException duplicateRemoteSite(String remoteSite, String name); @Message(value = "JGroups stack '%s' declares remote sites but does not include the RELAY2 protocol.", id = 547) @Description("Cross-site replication requires the JGroups RELAY2 protocol. Modify the JGroups configuration to include a RELAY2 stack.") CacheConfigurationException jgroupsRemoteSitesWithoutRelay(String name); @Message(value = "JGroups stack '%s' has a RELAY2 protocol without remote sites.", id = 548) @Description("Each cluster that participates in cross-site replication must be identified with a site name in the RELAY2 stack. Modify JGroups configuration and specify a unique site name for each backup location.") CacheConfigurationException jgroupsRelayWithoutRemoteSites(String name); @Message(value = "A store cannot be shared when utilised with a local cache.", id = 549) CacheConfigurationException sharedStoreWithLocalCache(); @Message(value = "Invalidation mode only supports when-split=ALLOW_READ_WRITES", id = 550) CacheConfigurationException invalidationPartitionHandlingNotSuported(); @LogMessage(level = WARN) @Message(value = "The custom interceptors configuration has been deprecated and will be ignored in the future", id = 551) void customInterceptorsDeprecated(); // @LogMessage(level = WARN) // @Message(value = "Module '%s' provides an instance of the deprecated ModuleCommandInitializer. Commands that require initialization should implement the InitializableCommand interface", id = 552) // void warnModuleCommandInitializerDeprecated(String module); @LogMessage(level = WARN) @Message(value = "Ignoring 'marshaller' attribute. Common marshallers are already available at runtime, and to deploy a custom marshaller, consult the 'Encoding' section in the user guide", id = 553) void marshallersNotSupported(); @LogMessage(level = WARN) @Message(value = "jboss-marshalling is deprecated and planned for removal", id = 554) void jbossMarshallingDetected(); @LogMessage(level = ERROR) @Message(value = "Unable to set method %s accessible", id = 555) void unableToSetAccessible(Method m, @Cause Exception e); @LogMessage(level = INFO) @Message(value = "Starting user marshaller '%s'", id = 556) void startingUserMarshaller(String marshallerClass); @Message(value = "Unable to configure JGroups Stack '%s'", id = 557) CacheConfigurationException unableToAddJGroupsStack(String name, @Cause Exception e); @Message(value = "The store location '%s' is not a child of the global persistent location '%s'", id = 558) CacheConfigurationException forbiddenStoreLocation(Path location, Path global); @LogMessage(level = WARN) @Message(value = "Cannot marshall '%s'", id = 559) void cannotMarshall(Class<?> aClass, @Cause Throwable t); @LogMessage(level = WARN) @Message(value = "The AdvancedExternalizer configuration has been deprecated and will be removed in the future", id = 560) void advancedExternalizerDeprecated(); @Message(value = "Chunk size must be positive, got %d", id = 561) CacheConfigurationException invalidChunkSize(int chunkSize); @Message(value = "Invalid cache loader configuration for '%s'. If a cache loader is configured with purgeOnStartup, the cache loader cannot be shared in a cluster!", id = 562) CacheConfigurationException sharedStoreShouldNotBePurged(String name); @Message(value = "Invalid cache loader configuration for '%s'. This implementation does not support being segmented!", id = 563) CacheConfigurationException storeDoesNotSupportBeingSegmented(String name); @LogMessage(level = WARN) @Message(value = "Configured store '%s' is segmented and may use a large number of file descriptors", id = 564) void segmentedStoreUsesManyFileDescriptors(String storeName); @Message(value = "Index.%s is no longer supported. Please update your configuration!", id = 565) CacheConfigurationException indexModeNotSupported(String indexMode); @Message(value = "Thread Pool Factory %s is blocking, but pool %s requires non blocking threads", id = 566) CacheConfigurationException threadPoolFactoryIsBlocking(String name, String poolName); // @LogMessage(level = WARN) // @Message(value = "SerializationConfiguration Version is deprecated since version 10.1 and will be removed in the future. The configured value has no affect on Infinispan marshalling.", id = 567) // void serializationVersionDeprecated(); // @Message(value = "Failed to initialize base and vendor metrics from JMX MBeans", id = 568) // IllegalStateException failedToInitBaseAndVendorMetrics(@Cause Exception e); @LogMessage(level = WARN) @Message(value = "Unable to persist Infinispan internal caches as no global state enabled", id = 569) @Once void warnUnableToPersistInternalCaches(); @Message(value = "Unexpected response from %s: %s", id = 570) IllegalArgumentException unexpectedResponse(Address target, Response response); @Message(value = "RELAY2 not found in the protocol stack. Cannot perform cross-site operations.", id = 571) @Description("To back up caches from one site to another the cluster transport uses the JGroups RELAY2 protocol. Add RELAY2 to your cluster transport configuration.") CacheConfigurationException crossSiteUnavailable(); @LogMessage(level = WARN) @Message(value = "index mode attribute is deprecated and should no longer be specified because its value is automatically detected. Most previously supported values are no longer supported. Please check the upgrade guide.", id = 572) void indexModeDeprecated(); @Message(value = "Cannot recreate persisted configuration for cache '%s' because configuration %n%s%n is incompatible with the existing configuration %n%s", id = 573) CacheConfigurationException incompatiblePersistedConfiguration(String cacheName, Configuration configuration, Configuration existing); @LogMessage(level = WARN) @Message(value = "Global state cannot persisted because it is incomplete (usually caused by errors at startup).", id = 574) void incompleteGlobalState(); @Message(value = "PartitionStrategy must be ALLOW_READ_WRITES when numOwners is 1", id = 575) CacheConfigurationException singleOwnerNotSetToAllowReadWrites(); @Message(value = "Cross-site replication not available for local cache.", id = 576) @Description("Cross-site replication capabilities do not apply to local cache mode. Either remove the backup configuration from the local cache or use a distributed or replicated cache mode.") CacheConfigurationException xsiteInLocalCache(); @Message(value = "Converting from unwrapped protostream payload requires the 'type' parameter to be supplied in the destination MediaType", id = 577) MarshallingException missingTypeForUnwrappedPayload(); @LogMessage(level = INFO) @Message(value = "Migrating '%s' persisted data to new format...", id = 578) void startMigratingPersistenceData(String cacheName); @LogMessage(level = INFO) @Message(value = "'%s' persisted data successfully migrated.", id = 579) void persistedDataSuccessfulMigrated(String cacheName); @Message(value = "Failed to migrate '%s' persisted data.", id = 580) PersistenceException persistedDataMigrationFailed(String cacheName, @Cause Throwable cause); @Message(value = "The indexing 'enabled' and the legacy 'index' configs attributes are mutually exclusive", id = 581) CacheConfigurationException indexEnabledAndIndexModeAreExclusive(); @Message(value = "A single indexing directory provider is allowed per cache configuration. Setting multiple individual providers for the indexes belonging to a cache is not allowed.", id = 582) CacheConfigurationException foundMultipleDirectoryProviders(); @Message(value = "Cannot configure both maxCount and maxSize in memory configuration", id = 583) CacheConfigurationException cannotProvideBothSizeAndCount(); @Message(value = "The memory attribute(s) %s have been deprecated and cannot be used in conjunction with the new configuration", id = 584) CacheConfigurationException cannotUseDeprecatedAndReplacement(String legacyName); @LogMessage(level = WARN) @Message(value = "Single media-type was specified for keys and values, ignoring individual configurations", id = 585) void ignoringSpecificMediaTypes(); @LogMessage(level = WARN) @Message(value = "The memory configuration element '%s' has been deprecated. Please update your configuration", id = 586) void warnUsingDeprecatedMemoryConfigs(String element); @Message(value = "Cannot change max-size since max-count is already defined", id = 587) CacheConfigurationException cannotChangeMaxSize(); @Message(value = "Cannot change max-count since max-size is already defined", id = 588) CacheConfigurationException cannotChangeMaxCount(); @Message(value = "A store cannot be configured with both preload and purgeOnStartup", id = 589) CacheConfigurationException preloadAndPurgeOnStartupConflict(); @Message(value = "Store cannot be configured with both read and write only!", id = 590) CacheConfigurationException storeBothReadAndWriteOnly(); @Message(value = "Store cannot be configured with purgeOnStartup, shared or passivation if it is read only!", id = 591) CacheConfigurationException storeReadOnlyExceptions(); @Message(value = "Store cannot be configured with fetchPersistenceState or preload if it is write only!", id = 592) CacheConfigurationException storeWriteOnlyExceptions(); @Message(value = "Store %s cannot be configured to be %s as the implementation specifies it is already %s!", id = 593) CacheConfigurationException storeConfiguredHasBothReadAndWriteOnly(String storeClassName, NonBlockingStore.Characteristic configured, NonBlockingStore.Characteristic implSpecifies); @Message(value = "At most one store can be set to 'preload'!", id = 594) CacheConfigurationException onlyOnePreloadStoreAllowed(); @LogMessage(level = WARN) @Message(value = "ClusterLoader has been deprecated and will be removed in a future version with no direct replacement", id = 595) void warnUsingDeprecatedClusterLoader(); // @LogMessage(level = WARN) // @Message(value = "Indexing auto-config attribute is deprecated. Please check the upgrade guide.", id = 596) // void autoConfigDeprecated(); @Message(value = "Store %s cannot be configured to be transactional as it does not contain the TRANSACTIONAL characteristic", id = 597) CacheConfigurationException storeConfiguredTransactionalButCharacteristicNotPresent(String storeClassName); @Message(value = "Store must specify a location when global state is disabled", id = 598) CacheConfigurationException storeLocationRequired(); @Message(value = "Store '%s' must specify the '%s' attribute when global state is disabled", id = 598) CacheConfigurationException storeLocationRequired(String storeType, String attributeName); @LogMessage(level = WARN) @Message(value = "Configuration for cache '%s' does not define the encoding for keys or values. " + "If you use operations that require data conversion or queries, you should configure the " + "cache with a specific MediaType for keys or values.", id = 599) void unknownEncoding(String cacheName); @Message(value = "Store %s cannot be configured to be shared as it does not contain the SHARED characteristic", id = 600) CacheConfigurationException storeConfiguredSharedButCharacteristicNotPresent(String storeClassName); @Message(value = "Store %s cannot be configured to be segmented as it does not contain the SEGMENTABLE characteristic", id = 601) CacheConfigurationException storeConfiguredSegmentedButCharacteristicNotPresent(String storeClassName); @LogMessage(level = WARN) @Message(value = "Conversions between JSON and Java Objects are deprecated and will be removed in a future version. " + "To read/write values as JSON, it is recommended to define a protobuf schema and store data in the cache using " + "'application/x-protostream' as MediaType", id = 602) void jsonObjectConversionDeprecated(); @Message(value = "Cannot handle cross-site request from site '%s'. Cache '%s' not found.", id = 603) @Description("A remote cluster attempted to replicate data to a cache that does not exist on the local cluster. Either create the cache or modify the backup configuration for the cache on the remote site.") CacheConfigurationException xsiteCacheNotFound(String remoteSite, ByteString cacheName); @Message(value = "Cannot handle cross-site request from site '%s'. Cache '%s' is stopped.", id = 604) @Description("A remote cluster attempted to replicate data to a cache that is not available. Start, or restart, the cache.") CacheConfigurationException xsiteCacheNotStarted(String origin, ByteString cacheName); @Message(value = "Cannot handle cross-site request from site '%s'. Cache '%s' is not clustered.", id = 605) @Description("A remote cluster attempted to replicate data to a local cache. Either recreate the cache with a distributed or replicated mode or remove the backup configuration.") CacheConfigurationException xsiteInLocalCache(String origin, ByteString cacheName); @LogMessage(level = ERROR) @Message(value = "Remote site '%s' has an invalid cache configuration. Taking the backup location offline.", id = 606) @Description("An attempt was made to replicate data to a cache that does not have a valid configuration. Check the cache at the remote site and recreate it with a valid distributed or replicated configuration.") void xsiteInvalidConfigurationRemoteSite(String siteName, @Cause CacheConfigurationException exception); @Message(value = "The XSiteEntryMergePolicy is missing. The cache configuration must include a merge policy.", id = 607) @Description("To resolve conflicting entries between backup locations cache configuration must include a merge policy. Recreate the cache and specify a merge policy from the org.infinispan.xsite.spi.XSiteMergePolicy enum or use a conflict resolution algorithm.") CacheConfigurationException missingXSiteEntryMergePolicy(); @LogMessage(level = FATAL) @Message(value = "[IRAC] Unexpected error occurred.", id = 608) @Description("During conflict resolution for cross-site replication an unexpected error occurred. To ensure data consistency between backup locations you should initiate state transfer to synchronize data between clusters.") void unexpectedErrorFromIrac(@Cause Throwable t); @LogMessage(level = DEBUG) @Message(value = "Cannot obtain cache '%s' as it is in FAILED state. Please check the configuration", id = 609) void cannotObtainFailedCache(String name, @Cause Throwable t); @Message(value = "Cache configuration must not declare indexed entities if it is not indexed", id = 610) CacheConfigurationException indexableClassesDefined(); @Message(value = "Invalid index storage", id = 611) CacheConfigurationException invalidIndexStorage(); @LogMessage(level = WARN) @Message(value = "Indexing configuration using properties has been deprecated and will be removed in a future " + "version, please consult the docs for the replacements. The following properties have been found: '%s'", id = 612) @Once void indexingPropertiesDeprecated(Properties properties); @LogMessage(level = WARN) @Message(value = "Indexing configuration using properties has been deprecated and will be removed in a future " + "version, please use the <index-writer> and <index-reader> elements to configure indexing behavior.", id = 613) @Once void deprecatedIndexProperties(); @Message(value = "It is not allowed to have different indexing configuration for each indexed type in a cache.", id = 614) CacheConfigurationException foundDifferentIndexConfigPerType(); @Message(value = "Unable to unmarshall '%s' as a marshaller is not present in the user or global SerializationContext", id = 615) MarshallingException marshallerMissingFromUserAndGlobalContext(String type); @Message(value = "Unsupported persisted data version: %s", id = 616) PersistenceException persistedDataMigrationUnsupportedVersion(String magic); @Message(value = "Site '%s' not found.", id = 617) @Description("A backup location that is configured as a site in the JGroups RELAY2 stack is not available. Check the JGroups configuration and cache configuration to ensure that the remote site is configured correctly. If the configuration is correct then check that the backup location is online.") IllegalArgumentException siteNotFound(String siteName); @LogMessage(level = WARN) @Message(value = "Cleanup failed for cross-site state transfer. Invoke the cancel-push-state(%s) command if any nodes indicate pending operations to push state.", id = 618) @Description("When cross-site state transfer operations complete or fail due to a network timeout or other exception, the coordinator node sends a cancel-push-state command to other nodes. If any nodes indicate that there are pending operations to push state to a remote site, you can invoke the cancel-push-state command again on those nodes.") void xsiteCancelSendFailed(@Cause Throwable throwable, String remoteSite); @LogMessage(level = WARN) @Message(value = "Cleanup failed for cross-site state transfer. Invoke the cancel-receive(%s) command in site %s if any nodes indicate pending operations to receive state.", id = 619) @Description("When cross-site state transfer operations complete or fail due to a network timeout or other exception, the coordinator node sends a cancel-receive command to nodes. If any nodes indicate that there are pending operations to receive state from a remote site, you can invoke the cancel-receive command again.") void xsiteCancelReceiveFailed(@Cause Throwable throwable, String localSite, String remoteSite); @Message(value = "Cross-site state transfer to '%s' already started", id = 620) @Description("An attempt was made to initiate cross-site state transfer while the operation was already in progress. Wait for the state transfer operation to complete before initiating a subsequent operation. Alternatively you can cancel the cross-site state transfer operation that is in progress.") CacheException xsiteStateTransferAlreadyInProgress(String site); @Message(value = "Element '%1$s' has been removed at %3$s. Please use element '%2$s' instead", id = 621) CacheConfigurationException elementRemovedUseOther(String elementName, String newElementName, Location location); @Message(value = "Element '%s' at %s has been removed with no replacement", id = 622) CacheConfigurationException elementRemoved(String elementName, Location location); @Message(value = "Attribute '%1$s' has been removed at %3$s. Please use attribute '%2$s' instead", id = 623) CacheConfigurationException attributeRemovedUseOther(String attributeName, String newAttributeName, Location location); @Message(value = "Attribute '%s' at %s has been removed with no replacement", id = 624) CacheConfigurationException attributeRemoved(String attributeName, Location location); @LogMessage(level = WARN) @Message(value = "Index path not provided and global state disabled, will use the current working directory for storage.", id = 625) void indexLocationWorkingDir(); @LogMessage(level = WARN) @Message(value = "Index path '%s' is not absolute and global state is disabled, will use a dir relative to the current working directory.", id = 626) void indexRelativeWorkingDir(String path); @Message(value = "Invalid cache loader configuration for '%s'. This implementation only supports being segmented!", id = 627) CacheConfigurationException storeRequiresBeingSegmented(String name); @Message(value = "Invalid cache roles '%s'", id = 628) CacheConfigurationException noSuchGlobalRoles(Set<String> cacheRoles); @LogMessage(level = WARN) @Message(value = "Exception completing partial completed transaction %s. Retrying later.", id = 629) void failedPartitionHandlingTxCompletion(GlobalTransaction globalTransaction, @Cause Throwable t); // @LogMessage(level = WARN) // @Message(value = "Another partition or topology changed for while completing partial completed transaction. Retrying later.", id = 630) // void topologyChangedPartitionHandlingTxCompletion(); @Message(value = "Cross-site state transfer mode cannot be null.", id = 633) @Description("The mode attribute for cross-site state transfer configuration must have a value of AUTO or MANUAL. Modify the cache configuration with a valid value for the mode attribute.") CacheConfigurationException invalidXSiteStateTransferMode(); @Message(value = "Cross-site automatic state transfer is not compatible with SYNC backup strategy.", id = 634) @Description("Automatic state transfer is not possible if the backup strategy for cross-site replication is synchronous. Modify the cache configuration and set the state transfer mode to MANUAL. Alternatively you can change the backup strategy to use asynchronous mode.") CacheConfigurationException autoXSiteStateTransferModeNotAvailableInSync(); @LogMessage(level = WARN) @Message(value = "[%s] Failed to receive a response from any nodes. Automatic cross-site state transfer to site '%s' is not started.", id = 635) @Description("Before it starts automatic cross-site state transfer operations, the coordinator node checks all local nodes to determine if state transfer is necessary. This error occurs when the coordinator node gets an exception from one or more local nodes. Check that nodes in the cluster are online and operating as expected.") void unableToStartXSiteAutStateTransfer(String cacheName, String targetSite, @Cause Throwable t); @Message(value = "State transfer timeout (%d) must be greater than or equal to the remote timeout (%d)", id = 636) CacheConfigurationException invalidStateTransferTimeout(Long stateTransferTimeout, Long remoteTimeout); @Message(value = "Timeout waiting for topology %d, current topology is %d", id = 637) TimeoutException topologyTimeout(int expectedTopologyId, int currentTopologyId); @Message(value = "Timeout waiting for topology %d transaction data", id = 638) TimeoutException transactionDataTimeout(int expectedTopologyId); // @LogMessage(level = ERROR) // @Message(value = "Failed to send remove request to remote site(s). Reason: tombstone was lost. Key='%s'", id = 639) // void sendFailMissingTombstone(Object key); @LogMessage(level = WARN) @Message(value = "SingleFileStore has been deprecated and will be removed in a future version, replaced by SoftIndexFileStore", id = 640) void warnUsingDeprecatedSingleFileStore(); @Message(value = "The transaction %s is already rolled back", id = 641) InvalidTransactionException transactionAlreadyRolledBack(GlobalTransaction gtx); @LogMessage(level = INFO) @Message(value = "Attempting to recover possibly corrupted data file %s", id = 642) void startRecoveringCorruptPersistenceData(String cacheName); @LogMessage(level = INFO) @Message(value = "'%s' persisted data successfully recovered %d entries.", id = 643) void corruptDataSuccessfulMigrated(String cacheName, int entries); @Message(value = "Failed to recover '%s' persisted data.", id = 644) PersistenceException corruptDataMigrationFailed(String cacheName, @Cause Throwable cause); @Message(value = "Asynchronous cache modes, such as %s, cannot use SYNC touch mode for maximum idle expiration.", id = 645) CacheConfigurationException invalidTouchMode(CacheMode cacheMode); @Message(value = "capacityFactor must be positive", id = 646) IllegalArgumentException illegalCapacityFactor(); @Message(value = "The configuration for internal cache '%s' cannot be modified", id = 647) IllegalArgumentException cannotUpdateInternalCache(String name); @Message(value = "Cache '%s' is non empty, cannot add store.", id = 648) PersistenceException cannotAddStore(String cacheName); @Message(value = "SingleFileStore does not support max-entries when segmented", id = 649) CacheConfigurationException segmentedSingleFileStoreDoesNotSupportMaxEntries(); @Message(value = "Read invalid data in SingleFileStore file %s, please remove the file and retry", id = 650) PersistenceException invalidSingleFileStoreData(String path); @Message(value = "Max idle is not allowed while using a store without passivation", id = 651) CacheConfigurationException maxIdleNotAllowedWithoutPassivation(); @LogMessage(level = WARN) @Message(value = "Max idle is not supported when using a store", id = 652) void maxIdleNotTestedWithPassivation(); @LogMessage(level = WARN) @Message(value = "The '%s' attribute on the '%s' element has been deprecated. Please use the '%s' attribute instead", id = 653) void attributeDeprecatedUseOther(Enum<?> attr, Enum<?> element, Enum<?> other); @Message(value = "Problem encountered when preloading key %s!", id = 654) PersistenceException problemPreloadingKey(Object key, @Cause Throwable t); @Message(value = "Unable to convert text content to JSON: '%s'", id = 655) EncodingException invalidJson(String s); @Message(value = "The backup '%s' configuration 'failure-policy=%s' is not valid with an ASYNC backup strategy.", id = 656) @Description("Only the 'WARN' and 'IGNORE' failure policies are compatible with asynchronous backups for cross-site replication. Modify the backup configuration for the cache to change the failure policy or use the synchronous backup strategy.") CacheConfigurationException invalidPolicyWithAsyncStrategy(String remoteSite, BackupFailurePolicy policy); @Message(value = "The backup '%s' configuration 'failure-policy-class' is not compatible with 'failure-policy=%s'. Use 'failure-policy=\"CUSTOM\"'", id = 657) @Description("The backup configuration for the cache specifies the fully qualified class of a custom failure policy implementation. This is valid with the custom failure policy only. Change the cache configuration to use 'failure-policy=\"CUSTOM\"' or remove the failure policy class.") CacheConfigurationException failurePolicyClassNotCompatibleWith(String remoteSite, BackupFailurePolicy policy); @Message(value = "Initial state transfer timed out for cache %s on %s", id = 658) TimeoutException initialStateTransferTimeout(String cacheName, Address localAddress); @Message(value = "Component %s failed to start", id = 659) CacheConfigurationException componentFailedToStart(String componentName, @Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "%s start failed, stopping any running components", id = 660) void startFailure(String registryName, @Cause Throwable t); @LogMessage(level = WARN) @Message(value = "'%s' has been deprecated with no replacement.", id = 661) void configDeprecated(Enum<?> element); @LogMessage(level = WARN) @Message(value = "Failed to transfer cross-site tombstones to %s for segments %s.", id = 662) @Description("Cross-site tombstones are metadata that ensure data consistency. This error indicates that it was not possible to replicate tombstones for some segments during normal operations. No action necessary.") void failedToTransferTombstones(Address requestor, IntSet segments, @Cause Throwable t); @Message(value = "Name must be less than 256 bytes, current name '%s' exceeds the size.", id = 663) CacheConfigurationException invalidNameSize(String name); @Message(value = "Invalid index startup mode: %s", id = 664) CacheConfigurationException invalidIndexStartUpMode(String invalidValue); @LogMessage(level = ERROR) @Message(value = "There was an error in submitted periodic task with %s, not rescheduling.", id = 665) void scheduledTaskEncounteredThrowable(Object identifier, @Cause Throwable t); @Message(value = "Transport clusterName cannot be null.", id = 666) CacheConfigurationException requireNonNullClusterName(); @Message(value = "Transport node-name is not set.", id = 667) CacheConfigurationException requireNodeName(); @Message(value = "Transport node-name must be present in raft-members: %s", id = 668) CacheConfigurationException nodeNameNotInRaftMembers(String members); @Message(value = "FORK protocol required on JGroups channel.", id = 669) IllegalArgumentException forkProtocolRequired(); @Message(value = "Error creating fork channel for %s", id = 670) @LogMessage(level = ERROR) void errorCreatingForkChannel(String name, @Cause Throwable throwable); @Message(value = "RAFT protocol is not available. Reason: %s", id = 671) @LogMessage(level = WARN) void raftProtocolUnavailable(String reason); @Message(value = "RAFT protocol is available.", id = 672) @LogMessage(level = INFO) void raftProtocolAvailable(); @Message(value = "Cannot persist RAFT data as global state is disabled", id = 673) CacheConfigurationException raftGlobalStateDisabled(); @Message(value = "There was an error when resetting the SIFS index for cache %s", id = 674) PersistenceException issueEncounteredResettingIndex(String cacheName, @Cause Throwable t); @LogMessage(level = ERROR) @Message(value = "Caught exception while invoking a event logger listener!", id = 675) void failedInvokingEventLoggerListener(@Cause Throwable e); @LogMessage(level = WARN) @Message(value = "Store '%s'#isAvailable check threw Exception", id = 676) void storeIsAvailableCheckThrewException(@Cause Throwable e, String storeImpl); @LogMessage(level = WARN) @Message(value = "Store '%s'#isAvailable completed Exceptionally", id = 677) void storeIsAvailableCompletedExceptionally(@Cause Throwable e, String storeImpl); @LogMessage(level = WARN) @Message(value = "Persistence is unavailable because of store %s", id = 678) void persistenceUnavailable(String storeImpl); @LogMessage(level = INFO) @Message(value = "Persistence is now available", id = 679) void persistenceAvailable(); @Message(value = "Expiration (Max idle or Lifespan) is not allowed while using a store '%s' that does not support expiration, unless it is configured as read only", id = 680) CacheConfigurationException expirationNotAllowedWhenStoreDoesNotSupport(String storeImpl); @Message(value = "Missing required property '%s' for attribute '%s' at %s", id = 681) CacheConfigurationException missingRequiredProperty(String property, String name, Location location); @Message(value = "Attribute '%2$s' of element '%1$s' has an illegal value '%3$s' at %4$s: %5$s", id = 686) CacheConfigurationException invalidAttributeValue(String element, String attribute, String value, Location location, String message); @Message(value = "Attribute '%2$s' of element '%1$s' has an illegal value '%3$s' at %5$s. Expecting one of %4$s.", id = 687) CacheConfigurationException invalidAttributeEnumValue(String element, String attribute, String value, String world, Location location); @LogMessage(level = WARN) @Message(value = "Attribute '%s' of element '%s' has been deprecated since schema version %d.%d. Refer to the upgrade guide", id = 688) void attributeDeprecated(String name, String element, int major, int minor); @LogMessage(level = WARN) @Message(value = "Recovering cache '%s' but there are missing members, known members %s of a total of %s", id = 689) void recoverFromStateMissingMembers(String cacheName, List<Address> members, int total); MissingMembersException recoverFromStateMissingMembers(String cacheName, List<Address> members, String total); @LogMessage(level = DEBUG) @Message(value = "We cannot find a configuration for the cache '%s' in the available configurations: '%s'. " + "This cache has been probably removed by another thread. Skip to writing it.", id = 690) void configurationNotFound(String cacheName, Collection<String> definedConfigurations); @Message(value = "Indexed entity name must not be null or empty", id = 691) CacheConfigurationException indexedEntityNameMissing(); @LogMessage(level = INFO) @Message(value = "Flushed ACL Cache", id = 692) void flushedACLCache(); @Message(value = "Dangling lock file '%s' in persistent global state, probably left behind by an unclean shutdown. ", id = 693) CacheConfigurationException globalStateLockFilePresent(File lockFile); @Message(value = "Cache '%s' has number of owners %d but is missing too many members (%d/%d) to reinstall topology", id = 694) MissingMembersException missingTooManyMembers(String cacheName, int owners, int missing, int total); }
131,344
54.373103
490
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/LogSupplier.java
package org.infinispan.util.logging; /** * Provides a {@link Log} instance to use. * * @author Pedro Ruivo * @since 12.0 */ public interface LogSupplier { /** * @return {@code true} if "TRACE" is enabled in this {@link Log} instance, {@code false} otherwise. */ boolean isTraceEnabled(); /** * @return The {@link Log} instance. */ Log getLog(); }
383
17.285714
103
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/LogFactory.java
package org.infinispan.util.logging; import org.infinispan.util.ByteString; import org.jboss.logging.Logger; import org.jboss.logging.NDC; /** * Factory that creates {@link Log} instances. * * @author Manik Surtani * @since 4.0 */ public class LogFactory { public static Log getLog(Class<?> clazz) { return Logger.getMessageLogger(Log.class, clazz.getName()); } public static <T> T getLog(Class<?> clazz, Class<T> logClass) { return Logger.getMessageLogger(logClass, clazz.getName()); } public static <T> T getLog(String category, Class<T> logClass) { return Logger.getMessageLogger(logClass, Log.LOG_ROOT + category); } public static Logger getLogger(String category) { return Logger.getLogger(Log.LOG_ROOT + category); } public static void pushNDC(String cacheName, boolean isTrace) { if (isTrace) NDC.push(cacheName); } public static void pushNDC(ByteString cacheName, boolean isTrace) { if (isTrace) NDC.push(cacheName.toString()); } public static void popNDC(boolean isTrace) { if (isTrace) NDC.pop(); } }
1,137
23.212766
72
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/Messages.java
package org.infinispan.util.logging.events; import static org.jboss.logging.Messages.getBundle; import java.util.Collection; import org.infinispan.partitionhandling.AvailabilityMode; import org.infinispan.remoting.transport.Address; import org.infinispan.topology.CacheTopology; import org.jboss.logging.annotations.Message; import org.jboss.logging.annotations.MessageBundle; /** * Messages. * * @author Tristan Tarrant * @since 8.2 */ @MessageBundle(projectCode = "ISPN") public interface Messages { Messages MESSAGES = getBundle(Messages.class); @Message(value = "[Context=%s]") String eventLogContext(String ctx); @Message(value = "[User=%s]") String eventLogWho(String who); @Message(value = "[Scope=%s]") String eventLogScope(String scope); @Message(value = "Node %s joined the cluster", id = 100000) String nodeJoined(Address joiner); @Message(value = "Node %s left the cluster", id = 100001) String nodeLeft(Address leaver); @Message(value = "Starting rebalance with members %s, phase %s, topology id %d", id = 100002) String cacheRebalanceStart(Collection<Address> members, CacheTopology.Phase phase, int topologyId); // @Message(value = "Node %s finished rebalance phase %s with topology id %d", id = 100003) // String rebalancePhaseConfirmedOnNode(Address node, CacheTopology.Phase phase, int topologyId); @Message(value = "Lost data because of graceful leaver %s", id = 312) String lostDataBecauseOfGracefulLeaver(Address leaver); @Message(value = "Lost data because of abrupt leavers %s", id = 313) String lostDataBecauseOfAbruptLeavers(Collection<Address> leavers); @Message(value = "Lost at least half of the stable members, possible split brain causing data inconsistency. Current members are %s, lost members are %s, stable members are %s", id = 314) String minorityPartition(Collection<Address> currentMembers, Collection<Address> lostMembers, Collection<Address> stableMembers); @Message(value = "Unexpected availability mode %s, partition %s", id = 315) String unexpectedAvailabilityMode(AvailabilityMode availabilityMode, CacheTopology cacheTopology); @Message(value = "Lost data because of graceful leaver %s, entering degraded mode", id = 316) String enteringDegradedModeGracefulLeaver(Address leaver); @Message(value = "Lost data because of abrupt leavers %s, assuming a network split and entering degraded mode", id = 317) String enteringDegradedModeLostData(Collection<Address> leavers); @Message(value = "Lost at least half of the stable members, assuming a network split and entering degraded mode. Current members are %s, lost members are %s, stable members are %s", id = 318) String enteringDegradedModeMinorityPartition(Collection<Address> currentMembers, Collection<Address> lostMembers, Collection<Address> stableMembers); @Message(value = "After merge (or coordinator change), cache still hasn't recovered all its data and must stay in degraded mode. Current members are %s, lost members are %s, stable members are %s", id = 319) String keepingDegradedModeAfterMergeDataLost(Collection<Address> currentMembers, Collection<Address> lostMembers, Collection<Address> stableMembers); @Message(value = "After merge (or coordinator change), cache still hasn't recovered a majority of members and must stay in degraded mode. Current members are %s, lost members are %s, stable members are %s", id = 320) String keepingDegradedModeAfterMergeMinorityPartition(Collection<Address> currentMembers, Collection<Address> lostMembers, Collection<Address> stableMembers); @Message(value = "After merge (or coordinator change), the coordinator failed to recover cluster. Cluster members are %s.", id = 100004) String clusterRecoveryFailed(Collection<Address> members); @Message(value = "Site '%s' is online.", id = 100005) String siteOnline(String siteName); @Message(value = "Site '%s' is offline.", id = 100006) String siteOffline(String siteName); @Message(value = "After merge (or coordinator change), recovered members %s with topology id %d", id = 100007) String cacheRecoveredAfterMerge(Collection<Address> members, int topologyId); @Message(value = "Updating cache members list %s, topology id %d", id = 100008) String cacheMembersUpdated(Collection<Address> members, int topologyId); @Message(value = "Advancing to rebalance phase %s, topology id %d", id = 100009) String cacheRebalancePhaseChange(CacheTopology.Phase phase, int topologyId); @Message(value = "Finished rebalance with members %s, topology id %s", id = 100010) String rebalanceFinished(Collection<Address> members, int topologyId); @Message(value = "Entering availability mode %s, topology id %s", id = 100011) String cacheAvailabilityModeChange(AvailabilityMode availabilityMode, int topologyId); @Message(value = "Starting conflict resolution with members %s, topology id %d", id = 100012) String conflictResolutionStarting(Collection<Address> members, int topologyId); @Message(value = "Finished conflict resolution with members %s, topology id %d", id = 100013) String conflictResolutionFinished(Collection<Address> members, int topologyId); @Message(value = "Failed conflict resolution with members %s, topology id %d: %s", id = 100014) String conflictResolutionFailed(Collection<Address> members, int topologyId, String errorMessage); @Message(value = "Cancelled conflict resolution with members %s, topology id %s", id = 100015) String conflictResolutionCancelled(Collection<Address> members, int topologyId); @Message(value = "Rebalance failed with members %s, topology id %s and cause %s", id = 100016) String rebalanceFinishedWithFailure(Collection<Address> members, int topologyId, Throwable t); }
5,813
51.854545
219
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/EventLogCategory.java
package org.infinispan.util.logging.events; import org.infinispan.commons.marshall.ProtoStreamTypeIds; import org.infinispan.protostream.annotations.ProtoEnumValue; import org.infinispan.protostream.annotations.ProtoTypeId; /** * EventLogCategory. * * @author Tristan Tarrant * @since 8.2 */ @ProtoTypeId(ProtoStreamTypeIds.EVENT_LOG_CATEGORY) public enum EventLogCategory { @ProtoEnumValue(number = 0) LIFECYCLE, @ProtoEnumValue(number = 1) CLUSTER, @ProtoEnumValue(number = 2) SECURITY, @ProtoEnumValue(number = 3) TASKS }
560
19.035714
61
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/EventLogManager.java
package org.infinispan.util.logging.events; import static java.util.Objects.requireNonNull; import org.infinispan.commons.IllegalLifecycleStateException; import org.infinispan.factories.impl.BasicComponentRegistry; import org.infinispan.lifecycle.ComponentStatus; import org.infinispan.manager.EmbeddedCacheManager; import org.infinispan.security.actions.SecurityActions; /** * EventLogManager. * * This is the entry point to the event logger. * * @author Tristan Tarrant * @since 8.2 */ public interface EventLogManager { /** * @return the event logger for the given {@link EmbeddedCacheManager} * @throws IllegalLifecycleStateException if the cache manager is not running */ static EventLogger getEventLogger(EmbeddedCacheManager cacheManager) { requireNonNull(cacheManager, "EmbeddedCacheManager can't be null."); if (cacheManager.getStatus() != ComponentStatus.RUNNING) throw new IllegalLifecycleStateException(); return SecurityActions.getGlobalComponentRegistry(cacheManager) .getComponent(BasicComponentRegistry.class) .getComponent(EventLogManager.class) .running().getEventLogger(); } /** * @return the event logger */ EventLogger getEventLogger(); /** * Replaces the event logger with the provided one. * * @return the previous logger */ EventLogger replaceEventLogger(EventLogger newLogger); }
1,479
29.204082
80
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/EventLoggerNotifier.java
package org.infinispan.util.logging.events; import java.util.concurrent.CompletionStage; import org.infinispan.notifications.Listenable; public interface EventLoggerNotifier extends Listenable { /** * Notify the listeners about logged information. This method notifies about any type of logged information, without * filtering for level or category. Is up to the listeners to filter the desired events. * * @param log: the logged information. * @return a {@link CompletionStage} which completes when the notification has been sent. */ CompletionStage<Void> notifyEventLogged(EventLog log); }
626
33.833333
119
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/EventLog.java
package org.infinispan.util.logging.events; import java.time.Instant; import java.util.Optional; /** * EventLog describes an event log's attributes. * * @author Tristan Tarrant * @since 8.2 */ public interface EventLog extends Comparable<EventLog> { /** * @return the instant when the event occurred */ Instant getWhen(); /** * @return the level of this event's severity */ EventLogLevel getLevel(); /** * @return the message of the event. */ String getMessage(); /** * @return the category of the event */ EventLogCategory getCategory(); /** * @return the detail of the event, e.g. a stack trace. */ Optional<String> getDetail(); /** * @return the name of the principal if the event occurred within a security context. */ Optional<String> getWho(); /** * @return the context of the event (e.g. the name of a cache). */ Optional<String> getContext(); /** * @return the scope of the event. If the event is specific to a node in the cluster, then this * will be the node's address. If the event is global to the entire cluster this will be * {@link Optional#empty()} */ Optional<String> getScope(); }
1,249
21.727273
99
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/EventLogger.java
package org.infinispan.util.logging.events; import java.io.PrintWriter; import java.io.StringWriter; import java.security.Principal; import java.time.Instant; import java.util.List; import java.util.Optional; import javax.security.auth.Subject; import org.infinispan.Cache; import org.infinispan.notifications.Listenable; import org.infinispan.remoting.transport.Address; import org.infinispan.security.Security; /** * EventLogger provides an interface for logging event messages. * * @author Tristan Tarrant * @since 8.2 */ public interface EventLogger extends Listenable { /** * Logs a message to the event log with the specified level * * @param level * the severity level of the event * @param message * the message to log */ void log(EventLogLevel level, EventLogCategory category, String message); /** * Logs a message to the event log using the {@link EventLogLevel#INFO} severity * * @param message * the message to log */ default void info(EventLogCategory category, String message) { log(EventLogLevel.INFO, category, message); } /** * Logs a message to the event log using the {@link EventLogLevel#WARN} severity * * @param message * the message to log */ default void warn(EventLogCategory category, String message) { log(EventLogLevel.WARN, category, message); } /** * Logs a message to the event log using the {@link EventLogLevel#ERROR} severity * * @param message * the message to log */ default void error(EventLogCategory category, String message) { log(EventLogLevel.ERROR, category, message); } /** * Logs a message to the event log using the {@link EventLogLevel#FATAL} severity * * @param message * the message to log */ default void fatal(EventLogCategory category, String message) { log(EventLogLevel.FATAL, category, message); } /** * Sets the scope of this event log, e.g. a node address. This should be used for events which * reference a single node in the cluster * * @param scope a scope * @return the event logger */ default EventLogger scope(String scope) { return this; } /** * Sets a node address as the scope of this event log * * @param scope the address of the node * @return the event logger */ default EventLogger scope(Address scope) { return this; } /** * Sets a cache as context of this event log. The name of the cache will be used to indicate the * context. * * @param cache * the cache to set as context * @return the event logger */ default EventLogger context(Cache<?, ?> cache) { return context(cache.getName()); } /** * Sets a context of this event log. * * @param context * the name of the context * @return the event logger */ default EventLogger context(String context) { return this; } /** * Sets a detail for this event log which could include additional information. * * @param detail * the event log detail * @return the event logger */ default EventLogger detail(String detail) { return this; } /** * Sets a throwable to include as detail for this event. Both the localized message of the * Throwable as well as its stack trace will be recorded as the event's detail * * @param detail * a throwable * @return the event logger */ default EventLogger detail(Throwable t) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); pw.println(t.getLocalizedMessage()); t.printStackTrace(pw); return detail(sw.toString()); } /** * Sets a security subject for this event log. The name of the main user principal of the subject * will be recorded in the log. * * @param subject * the security subject * @return the event logger */ default EventLogger who(Subject subject) { if (subject != null) { return this.who(Security.getSubjectUserPrincipal(subject)); } else { return this; } } /** * Sets a security principal for this event log. The name of the principal will be recorded in * the log. * * @param principal * the security principal * @return the event logger */ default EventLogger who(Principal principal) { if (principal != null) { return this.who(principal.getName()); } else { return this; } } /** * Sets a security name for this event log. * * @param s * the security name * @return the event logger */ default EventLogger who(String s) { return this; } /** * Retrieves the event logs from the cluster within the specified range * * @param start * the instant from which to retrieve the logs * @param count * the number of logs to retrieve * @param category * an optional category filter * @param level * an optional level filter * @return a list of {@link EventLog}s */ List<EventLog> getEvents(Instant start, int count, Optional<EventLogCategory> category, Optional<EventLogLevel> level); }
5,432
26.029851
122
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/EventLogSerializer.java
package org.infinispan.util.logging.events; import java.util.Optional; import org.infinispan.commons.configuration.io.ConfigurationWriter; import org.infinispan.configuration.serializing.ConfigurationSerializer; /** * @since 14.0 */ public class EventLogSerializer implements ConfigurationSerializer<EventLog> { @Override public void serialize(ConfigurationWriter writer, EventLog event) { writer.writeStartElement("log"); writer.writeAttribute("category", event.getCategory().name()); writer.writeStartElement("content"); writer.writeAttribute("level", event.getLevel().name()); writer.writeAttribute("message", event.getMessage()); writer.writeAttribute("detail", unwrap(event.getDetail())); writer.writeEndElement(); writer.writeStartElement("meta"); writer.writeAttribute("instant", event.getWhen().toString()); writer.writeAttribute("context", unwrap(event.getContext())); writer.writeAttribute("scope", unwrap(event.getScope())); writer.writeAttribute("who", unwrap(event.getWho())); // `meta` object writer.writeEndElement(); // `log` object writer.writeEndElement(); } private String unwrap(Optional<String> optional) { return optional.orElse(null); } }
1,289
31.25
78
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/EventLogLevel.java
package org.infinispan.util.logging.events; import org.infinispan.commons.marshall.ProtoStreamTypeIds; import org.infinispan.protostream.annotations.ProtoEnumValue; import org.infinispan.protostream.annotations.ProtoTypeId; import org.jboss.logging.Logger.Level; /** * EventLogLevel. * * @author Tristan Tarrant * @since 8.2 */ @ProtoTypeId(ProtoStreamTypeIds.EVENT_LOG_LEVEL) public enum EventLogLevel { @ProtoEnumValue(number = 0) INFO(Level.INFO), @ProtoEnumValue(number = 1) WARN(Level.WARN), @ProtoEnumValue(number = 2) ERROR(Level.ERROR), @ProtoEnumValue(number = 3) FATAL(Level.FATAL); private final Level loggerLevel; EventLogLevel(Level loggerLevel) { this.loggerLevel = loggerLevel; } public Level toLoggerLevel() { return loggerLevel; } }
815
19.923077
61
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/impl/EventLogManagerImpl.java
package org.infinispan.util.logging.events.impl; import org.infinispan.commons.time.TimeService; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.annotations.Start; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.logging.events.EventLogManager; import org.infinispan.util.logging.events.EventLogger; import org.infinispan.util.logging.events.EventLoggerNotifier; /** * EventLogManagerImpl. The default implementation of the EventLogManager. * * @author Tristan Tarrant * @since 8.2 */ @Scope(Scopes.GLOBAL) public class EventLogManagerImpl implements EventLogManager { @Inject protected EventLoggerNotifier notifier; @Inject protected TimeService timeService; private EventLogger logger; @Start public void start() { this.logger = new BasicEventLogger(notifier, timeService); } @Override public EventLogger replaceEventLogger(EventLogger newLogger) { EventLogger oldLogger = logger; logger = newLogger; return oldLogger; } @Override public EventLogger getEventLogger() { return logger; } }
1,175
26.348837
74
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/impl/BaseEventLog.java
package org.infinispan.util.logging.events.impl; import java.time.Instant; import java.util.Optional; import net.jcip.annotations.Immutable; import org.infinispan.util.logging.events.EventLog; import org.infinispan.util.logging.events.EventLogCategory; import org.infinispan.util.logging.events.EventLogLevel; @Immutable public class BaseEventLog implements EventLog { protected final Instant when; protected final EventLogLevel level; protected final EventLogCategory category; protected final String message; protected final String detail; protected final String who; protected final String context; protected final String scope; public BaseEventLog(Instant when, EventLogLevel level, EventLogCategory category, String message, String detail, String context, String who, String scope) { this.when = when; this.level = level; this.category = category; this.message = message; this.detail = detail; this.who = who; this.context = context; this.scope = scope; } public BaseEventLog(Instant when, EventLogLevel level, EventLogCategory category, String message) { this(when, level, category, message, null, null, null, null); } @Override public Instant getWhen() { return when; } @Override public EventLogLevel getLevel() { return level; } @Override public String getMessage() { return message; } @Override public EventLogCategory getCategory() { return category; } @Override public Optional<String> getDetail() { return Optional.ofNullable(detail); } @Override public Optional<String> getWho() { return Optional.ofNullable(who); } @Override public Optional<String> getContext() { return Optional.ofNullable(context); } @Override public Optional<String> getScope() { return Optional.ofNullable(scope); } @Override public int compareTo(EventLog that) { // Intentionally backwards return that.getWhen().compareTo(this.when); } @Override public String toString() { return "BaseEventLog{" + "when=" + when + ", level=" + level + ", category=" + category + ", message='" + message + '\'' + ", detail='" + detail + '\'' + ", who='" + who + '\'' + ", context='" + context + '\'' + ", scope='" + scope + '\'' + '}'; } }
2,498
24.762887
115
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/impl/DecoratedEventLogger.java
package org.infinispan.util.logging.events.impl; import static org.infinispan.util.logging.events.Messages.MESSAGES; import java.time.Instant; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletionStage; import org.infinispan.remoting.transport.Address; import org.infinispan.util.logging.events.EventLog; import org.infinispan.util.logging.events.EventLogCategory; import org.infinispan.util.logging.events.EventLogLevel; import org.infinispan.util.logging.events.EventLogger; /** * DecoratedEventLogger. Provides a way to decorate an EventLog with additional information. * * @author Tristan Tarrant * @since 8.2 */ public class DecoratedEventLogger implements EventLogger { private static final String LOCAL_SCOPE = "local"; private final EventLogger delegate; protected String detail; protected String context; protected String scope; protected String who; protected DecoratedEventLogger(EventLogger delegate) { this.delegate = delegate; } @Override public void log(EventLogLevel level, EventLogCategory category, String message) { StringBuilder sb = new StringBuilder(); addLogsToBuilder(sb); // We don't include detail in this implementation sb.append(' '); sb.append(message); delegate.log(level, category, sb.toString()); } protected void addLogsToBuilder(StringBuilder sb) { if (context != null) sb.append(MESSAGES.eventLogContext(context)); if (scope != null) sb.append(MESSAGES.eventLogScope(scope)); if (who != null) sb.append(MESSAGES.eventLogWho(who)); } @Override public EventLogger who(String who) { this.who = who; return this; } @Override public EventLogger scope(String scope) { this.scope = scope; return this; } @Override public EventLogger scope(Address scope) { this.scope = scope != null ? scope.toString() : LOCAL_SCOPE; return this; } @Override public EventLogger context(String context) { this.context = context; return this; } @Override public EventLogger detail(String detail) { this.detail = detail; return this; } @Override public List<EventLog> getEvents(Instant start, int count, Optional<EventLogCategory> category, Optional<EventLogLevel> level) { return delegate.getEvents(start, count, category, level); } @Override public CompletionStage<Void> addListenerAsync(Object listener) { return delegate.addListenerAsync(listener); } @Override public CompletionStage<Void> removeListenerAsync(Object listener) { return delegate.removeListenerAsync(listener); } @Override public Set<Object> getListeners() { return delegate.getListeners(); } }
2,814
26.871287
130
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/impl/BasicEventLogger.java
package org.infinispan.util.logging.events.impl; import java.time.Instant; import java.util.Collections; import java.util.List; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletionStage; import org.infinispan.commons.time.TimeService; import org.infinispan.util.concurrent.CompletionStages; import org.infinispan.util.logging.LogFactory; import org.infinispan.util.logging.events.EventLog; import org.infinispan.util.logging.events.EventLogCategory; import org.infinispan.util.logging.events.EventLogLevel; import org.infinispan.util.logging.events.EventLogger; import org.infinispan.util.logging.events.EventLoggerNotifier; /** * BasicEventLogger. An event logger which doesn't do anything aside from sending events to the * logger and notifying the listeners. * * @author Tristan Tarrant * @since 8.2 */ public class BasicEventLogger implements EventLogger { private final EventLoggerNotifier notifier; private final TimeService timeService; public BasicEventLogger(EventLoggerNotifier notifier, TimeService timeService) { this.notifier = notifier; this.timeService = timeService; } @Override public EventLogger scope(String scope) { return new DecoratedEventLogger(this).scope(scope); } @Override public EventLogger context(String context) { return new DecoratedEventLogger(this).context(context); } @Override public EventLogger detail(String detail) { return new DecoratedEventLogger(this).detail(detail); } @Override public EventLogger who(String who) { return new DecoratedEventLogger(this).who(who); } @Override public void log(EventLogLevel level, EventLogCategory category, String message) { LogFactory.getLogger(category.toString()).log(level.toLoggerLevel(), message); CompletionStages.join(notifier.notifyEventLogged(new BaseEventLog(timeService.instant(), level, category, message))); } /** * The basic event logger doesn't collect anything. */ @Override public List<EventLog> getEvents(Instant start, int count, Optional<EventLogCategory> category, Optional<EventLogLevel> level) { return Collections.emptyList(); } @Override public CompletionStage<Void> addListenerAsync(Object listener) { return notifier.addListenerAsync(listener); } @Override public CompletionStage<Void> removeListenerAsync(Object listener) { return notifier.removeListenerAsync(listener); } @Override public Set<Object> getListeners() { return notifier.getListeners(); } }
2,591
29.494118
130
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/events/impl/EventLoggerNotifierImpl.java
package org.infinispan.util.logging.events.impl; import java.lang.annotation.Annotation; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CompletionStage; import java.util.concurrent.CopyOnWriteArrayList; import jakarta.transaction.Transaction; import org.infinispan.commons.util.concurrent.CompletableFutures; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.notifications.Listener; import org.infinispan.notifications.impl.AbstractListenerImpl; import org.infinispan.notifications.impl.ListenerInvocation; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import org.infinispan.util.logging.annotation.impl.Logged; import org.infinispan.util.logging.events.EventLog; import org.infinispan.util.logging.events.EventLoggerNotifier; @Scope(Scopes.GLOBAL) public class EventLoggerNotifierImpl extends AbstractListenerImpl<EventLog, ListenerInvocation<EventLog>> implements EventLoggerNotifier { private static final Log log = LogFactory.getLog(EventLoggerNotifierImpl.class); private static final Map<Class<? extends Annotation>, Class<?>> allowedListeners = new HashMap<>(1); private final List<ListenerInvocation<EventLog>> listeners = new CopyOnWriteArrayList<>(); static { allowedListeners.put(Logged.class, EventLog.class); } public EventLoggerNotifierImpl() { listenersMap.put(Logged.class, listeners); } private class DefaultBuilder extends AbstractInvocationBuilder { @Override public ListenerInvocation<EventLog> build() { return new ListenerInvocationImpl<>(target, method, sync, classLoader, subject); } } @Override protected Log getLog() { return log; } @Override protected Map<Class<? extends Annotation>, Class<?>> getAllowedMethodAnnotations(Listener l) { return allowedListeners; } @Override public CompletionStage<Void> addListenerAsync(Object listener) { validateAndAddListenerInvocations(listener, new DefaultBuilder()); return CompletableFutures.completedNull(); } @Override public CompletionStage<Void> removeListenerAsync(Object listener) { removeListenerFromMaps(listener); return CompletableFutures.completedNull(); } @Override protected Transaction suspendIfNeeded() { return null; } @Override protected void resumeIfNeeded(Transaction transaction) { // no-op } @Override protected void handleException(Throwable t) { log.failedInvokingEventLoggerListener(t); } @Override public CompletionStage<Void> notifyEventLogged(EventLog log) { if (!listeners.isEmpty()) { return invokeListeners(log, listeners); } return CompletableFutures.completedNull(); } }
2,864
30.141304
138
java
null
infinispan-main/core/src/main/java/org/infinispan/util/logging/annotation/impl/Logged.java
package org.infinispan.util.logging.annotation.impl; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * <b>This annotation is for internal use only!</b> * <p/> * This annotation should be used on methods that need to be notified when information is logged by the * {@link org.infinispan.util.logging.events.EventLogger}. There is no distinction between the log level or category. * <p/> * Methods annotated with this annotation should accept a single parameter, an {@link * org.infinispan.util.logging.events.EventLog}, otherwise a {@link * org.infinispan.notifications.IncorrectListenerException} will be thrown when registering your listener. * <p/> * Any exceptions thrown by the listener will abort the call. Any other listeners not yet called will not be called. * * @see org.infinispan.notifications.Listener * @since 14.0 */ @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) public @interface Logged { }
1,057
38.185185
117
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/package-info.java
/** * Implementations of different executors used for asynchronous operation. */ package org.infinispan.executors;
117
22.6
74
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/LimitedExecutor.java
package org.infinispan.executors; import java.util.ArrayDeque; import java.util.Deque; import java.util.IdentityHashMap; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.Executor; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiConsumer; import java.util.function.Supplier; import org.infinispan.commons.IllegalLifecycleStateException; import org.infinispan.commons.util.concurrent.CompletableFutures; import org.infinispan.util.concurrent.WithinThreadExecutor; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import org.jboss.logging.NDC; import net.jcip.annotations.GuardedBy; /** * Executes tasks in the given executor, but never has more than {@code maxConcurrentTasks} tasks running at the same * time. * * <p>A task can finish running without allowing another task to run in its stead, with {@link #executeAsync(Supplier)}. * A new task will only start after the {@code CompletableFuture} returned by the task has completed.</p> * * <p><em>Blocking mode.</em> If the executor is a {@link WithinThreadExecutor}, tasks will run in the thread that * submitted them. If there are no available permits, the caller thread will block until a permit becomes available.</p> * * @author Dan Berindei * @since 9.0 */ public class LimitedExecutor implements Executor { private static final Log log = LogFactory.getLog(LimitedExecutor.class); private final Lock lock = new ReentrantLock(); private final Condition taskFinishedCondition = lock.newCondition(); private final String name; private final Executor executor; private final boolean blocking; private final Runner runner = new Runner(); private volatile boolean running = true; @GuardedBy("lock") private int availablePermits; @GuardedBy("lock") private Map<Thread, Object> threads; @GuardedBy("lock") private final Deque<Runnable> queue = new ArrayDeque<>(); public LimitedExecutor(String name, Executor executor, int maxConcurrentTasks) { this.name = name; this.executor = executor; this.availablePermits = maxConcurrentTasks; this.blocking = executor instanceof WithinThreadExecutor; threads = new IdentityHashMap<>(maxConcurrentTasks); } /** * Stops the executor and cancels any queued tasks. * * Stop and interrupt any tasks that have already been handed to the underlying executor. */ public void shutdownNow() { log.tracef("Stopping limited executor %s", name); running = false; acquireLock(); try { queue.clear(); for (Thread t : threads.keySet()) { t.interrupt(); } } finally { unlockLock(); } } @Override public void execute(Runnable command) { if (!running) throw new IllegalLifecycleStateException("Limited executor " + name + " is not running!"); if (blocking) { CompletableFuture<Void> f1 = new CompletableFuture<>(); executeInternal(() -> { f1.complete(null); removePermit(); }); try { CompletableFutures.await(f1); command.run(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new IllegalLifecycleStateException(ie); } catch (Exception e) { log.error("Exception in task", e); } finally { addPermit(); tryExecute(); } return; } executeInternal(command); } private void executeInternal(Runnable command) { acquireLock(); try { queue.add(command); } finally { unlockLock(); } tryExecute(); } /** * Similar to {@link #execute(Runnable)}, but the task can continue executing asynchronously, * without blocking the OS thread, while still counting against this executor's limit. * * @param asyncCommand A task that returns a non-null {@link CompletionStage}, * which may be already completed or may complete at some point in the future. */ public void executeAsync(Supplier<CompletionStage<Void>> asyncCommand) { execute(() -> { CompletionStage<Void> future = asyncCommand.get(); // The current permit will be released automatically // If the future is null, don't reserve another permit assert future != null; removePermit(); future.whenComplete(runner); }); } private void tryExecute() { boolean addRunner = false; acquireLock(); try { if (availablePermits > 0) { availablePermits--; addRunner = true; } } finally { unlockLock(); } if (addRunner) { executor.execute(runner); } } private void runTasks() { runnerStarting(); while (running) { Runnable runnable = null; acquireLock(); try { // If the previous task was asynchronous, we can't execute a new one on the same thread if (availablePermits >= 0) { runnable = queue.poll(); } if (runnable == null) { availablePermits++; break; } } finally { unlockLock(); } try { NDC.push(name); actualRun(runnable); } catch (Throwable t) { log.error("Exception in task", t); } finally { NDC.pop(); } } runnerFinished(); } /** * This method is here solely for byte code augmentation via BlockHound, since the runnable should not * block in a non blocking thread, but other parts of LimitedExecutor are okay doing so. * @param runnable the runnable to run */ private void actualRun(Runnable runnable) { runnable.run(); } private void runnerStarting() { acquireLock(); try { Thread thread = Thread.currentThread(); threads.put(thread, thread); } finally { unlockLock(); } } private void runnerFinished() { acquireLock(); try { Thread thread = Thread.currentThread(); threads.remove(thread); taskFinishedCondition.signalAll(); } finally { unlockLock(); } } private void removePermit() { acquireLock(); try { availablePermits--; } finally { unlockLock(); } } private void addPermit() { acquireLock(); try { availablePermits++; } finally { unlockLock(); } } private class Runner implements Runnable, BiConsumer<Void, Throwable> { @Override public void run() { runTasks(); } @Override public void accept(Void aVoid, Throwable throwable) { addPermit(); tryExecute(); } } // Here for instrumentation of blockhound private void acquireLock() { lock.lock(); } // Here for symmetry of acquireLock private void unlockLock() { lock.unlock(); } }
7,372
27.688716
120
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/WithinThreadExecutorFactory.java
package org.infinispan.executors; import java.util.Properties; import java.util.concurrent.ExecutorService; import org.infinispan.commons.executors.ExecutorFactory; import org.infinispan.util.concurrent.WithinThreadExecutor; /** * Executor factory that creates WithinThreadExecutor. This executor executes the tasks in the caller thread. * * @author Pedro Ruivo * @since 5.3 */ public class WithinThreadExecutorFactory implements ExecutorFactory { @Override public ExecutorService getExecutor(Properties p) { return new WithinThreadExecutor(); } }
572
25.045455
109
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/LazyInitializingBlockingTaskAwareExecutorService.java
package org.infinispan.executors; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.AbstractExecutorService; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.infinispan.commons.executors.ThreadPoolExecutorFactory; import org.infinispan.commons.time.TimeService; import org.infinispan.factories.annotations.Stop; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.concurrent.BlockingRunnable; import org.infinispan.util.concurrent.BlockingTaskAwareExecutorService; import org.infinispan.util.concurrent.BlockingTaskAwareExecutorServiceImpl; /** * A delegating executor that lazily constructs and initializes the underlying executor. * * @author Pedro Ruivo * @since 5.3 */ @Scope(Scopes.GLOBAL) public final class LazyInitializingBlockingTaskAwareExecutorService extends ManageableExecutorService<ExecutorService> implements BlockingTaskAwareExecutorService { private static final BlockingTaskAwareExecutorService STOPPED; static { STOPPED = new EmptyBlockingTaskAwareExecutorService(); } static final class EmptyBlockingTaskAwareExecutorService extends AbstractExecutorService implements BlockingTaskAwareExecutorService { @Override public void execute(BlockingRunnable runnable) { throw new RejectedExecutionException(); } @Override public void checkForReadyTasks() { } @Override public void shutdown() { } @Override public List<Runnable> shutdownNow() { return Collections.emptyList(); } @Override public boolean isShutdown() { return true; } @Override public boolean isTerminated() { return true; } @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { return true; } @Override public void execute(Runnable command) { throw new RejectedExecutionException(); } } private final ThreadPoolExecutorFactory<ExecutorService> executorFactory; private final ThreadFactory threadFactory; private final TimeService timeService; private volatile BlockingTaskAwareExecutorService blockingExecutor; public LazyInitializingBlockingTaskAwareExecutorService(ThreadPoolExecutorFactory<ExecutorService> executorFactory, ThreadFactory threadFactory, TimeService timeService) { this.executorFactory = executorFactory; this.threadFactory = threadFactory; this.timeService = timeService; } @Override public void execute(BlockingRunnable runnable) { initIfNeeded(); blockingExecutor.execute(runnable); } @Override public void checkForReadyTasks() { if (blockingExecutor != null) { blockingExecutor.checkForReadyTasks(); } } @Override public void shutdown() { synchronized (this) { if (blockingExecutor == null) { blockingExecutor = STOPPED; } blockingExecutor.shutdown(); } } @Stop @Override public List<Runnable> shutdownNow() { synchronized (this) { if (blockingExecutor == null) { blockingExecutor = STOPPED; } return blockingExecutor.shutdownNow(); } } @Override public boolean isShutdown() { return blockingExecutor == null || blockingExecutor.isShutdown(); } @Override public boolean isTerminated() { return blockingExecutor == null || blockingExecutor.isTerminated(); } @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { if (blockingExecutor == null) return true; else return blockingExecutor.awaitTermination(timeout, unit); } @Override public <T> Future<T> submit(Callable<T> task) { initIfNeeded(); return blockingExecutor.submit(task); } @Override public <T> Future<T> submit(Runnable task, T result) { initIfNeeded(); return blockingExecutor.submit(task, result); } @Override public Future<?> submit(Runnable task) { initIfNeeded(); return blockingExecutor.submit(task); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException { initIfNeeded(); return blockingExecutor.invokeAll(tasks); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException { initIfNeeded(); return blockingExecutor.invokeAll(tasks, timeout, unit); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException { initIfNeeded(); return blockingExecutor.invokeAny(tasks); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { initIfNeeded(); return blockingExecutor.invokeAny(tasks, timeout, unit); } @Override public void execute(Runnable command) { initIfNeeded(); blockingExecutor.execute(command); } public BlockingTaskAwareExecutorService getExecutorService() { return blockingExecutor; } private void initIfNeeded() { if (blockingExecutor == null) { synchronized (this) { if (blockingExecutor == null) { // The superclass methods only work if the blockingExecutor is a ThreadPoolExecutor this.executor = executorFactory.createExecutor(threadFactory); this.blockingExecutor = new BlockingTaskAwareExecutorServiceImpl(executor, timeService); } } } } }
6,350
28.67757
163
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/ScheduledExecutorFactory.java
package org.infinispan.executors; import java.util.Properties; import java.util.concurrent.ScheduledExecutorService; /** * Used to configure and create scheduled executors * * @author Manik Surtani * @since 4.0 */ public interface ScheduledExecutorFactory { ScheduledExecutorService getScheduledExecutor(Properties p); }
331
21.133333
63
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/DefaultScheduledExecutorFactory.java
package org.infinispan.executors; import java.util.Properties; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; import org.infinispan.commons.util.TypedProperties; /** * Creates scheduled executors using the JDK Executors service * * @author Manik Surtani * @author Tristan Tarrant * @since 4.0 */ public class DefaultScheduledExecutorFactory implements ScheduledExecutorFactory { final static AtomicInteger counter = new AtomicInteger(0); @Override public ScheduledExecutorService getScheduledExecutor(Properties p) { TypedProperties tp = new TypedProperties(p); final String threadNamePrefix = p.getProperty("threadNamePrefix", p.getProperty("componentName", "Thread")); final int threadPrio = tp.getIntProperty("threadPriority", Thread.MIN_PRIORITY); return Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { public Thread createThread(Runnable r) { Thread th = new Thread(r, "Scheduled-" + threadNamePrefix + "-" + counter.getAndIncrement()); th.setDaemon(true); th.setPriority(threadPrio); return th; } @Override public Thread newThread(Runnable r) { final Runnable runnable = r; return createThread(runnable); } }); } }
1,449
31.954545
114
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/SemaphoreCompletionService.java
package org.infinispan.executors; import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; import java.util.concurrent.Executor; import java.util.concurrent.Future; import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.Semaphore; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.TimeUnit; import org.infinispan.commons.IllegalLifecycleStateException; import org.infinispan.util.concurrent.WithinThreadExecutor; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * Executes tasks in the given executor, but never has more than {@code maxConcurrentTasks} tasks running at the same time. * * @author Dan Berindei * @since 7.2 */ public class SemaphoreCompletionService<T> implements CompletionService<T> { private static final Log log = LogFactory.getLog(SemaphoreCompletionService.class); private final Executor executor; private final CustomSemaphore semaphore; private final BlockingQueue<QueueingTask> queue; private final BlockingQueue<QueueingTask> completionQueue = new LinkedBlockingQueue<>(); private final boolean blocking; public SemaphoreCompletionService(Executor executor, int maxConcurrentTasks) { this.executor = executor; this.semaphore = new CustomSemaphore(maxConcurrentTasks); // Users of WithinThreadExecutor expect the tasks to execute in the thread that submitted them // But with a LinkedBlockingQueue, they could execute on any thread calling backgroundTaskFinished. this.blocking = executor instanceof WithinThreadExecutor; this.queue = blocking ? new SynchronousQueue<>() : new LinkedBlockingQueue<>(); } public List<? extends Future<T>> drainCompletionQueue() { List<QueueingTask> list = new ArrayList<QueueingTask>(); completionQueue.drainTo(list); return list; } /** * When stopping, cancel any queued tasks. */ public void cancelQueuedTasks() { ArrayList<QueueingTask> queuedTasks = new ArrayList<QueueingTask>(); queue.drainTo(queuedTasks); for (QueueingTask task : queuedTasks) { task.cancel(false); } } /** * Called from a task to remove the permit that would otherwise be freed when the task finishes running * * When the asynchronous part of the task finishes, it must call {@link #backgroundTaskFinished(Callable)} * to make the permit available again. */ public void continueTaskInBackground() { if (log.isTraceEnabled()) log.tracef("Moving task to background, available permits %d", semaphore.availablePermits()); // Prevent other tasks from running with this task's permit semaphore.removePermit(); } /** * Signal that a task that called {@link #continueTaskInBackground()} has finished and * optionally execute another task on the just-freed thread. */ public Future<T> backgroundTaskFinished(final Callable<T> cleanupTask) { QueueingTask futureTask = null; if (cleanupTask != null) { if (log.isTraceEnabled()) log.tracef("Background task finished, executing cleanup task"); futureTask = new QueueingTask(cleanupTask); executor.execute(futureTask); } else { semaphore.release(); if (log.isTraceEnabled()) log.tracef("Background task finished, available permits %d", semaphore.availablePermits()); executeFront(); } return futureTask; } @Override public Future<T> submit(final Callable<T> task) { QueueingTask futureTask = new QueueingTask(task); return doSubmit(futureTask); } @Override public Future<T> submit(final Runnable task, T result) { QueueingTask futureTask = new QueueingTask(task, result); return doSubmit(futureTask); } private Future<T> doSubmit(QueueingTask futureTask) { if (blocking) { try { semaphore.acquire(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IllegalLifecycleStateException(); } try { futureTask.run(); } finally { semaphore.release(); } } else { queue.add(futureTask); if (log.isTraceEnabled()) log.tracef("New task submitted, tasks in queue %d, available permits %d", queue.size(), semaphore.availablePermits()); executeFront(); } return futureTask; } private void executeFront() { while (!queue.isEmpty() && semaphore.tryAcquire()) { QueueingTask next = queue.poll(); if (next != null) { // Execute the task, and it will release the permit when it finishes executor.execute(next); // Only execute one task, if there are other tasks and permits available, they will be scheduled // to be executed either by the threads that released the permits or by the threads that added // the tasks. return; } else { // Perform another iteration, in case someone adds a task and skips executing it just before // we release the permit semaphore.release(); } } } @Override public Future<T> take() throws InterruptedException { return completionQueue.take(); } @Override public Future<T> poll() { return completionQueue.poll(); } @Override public Future<T> poll(long timeout, TimeUnit unit) throws InterruptedException { return completionQueue.poll(timeout, unit); } private class QueueingTask extends FutureTask<T> { public QueueingTask(Callable<T> task) { super(task); } public QueueingTask(Runnable runnable, Object result) { super(runnable, (T) result); } @Override public void run() { try { QueueingTask next = this; do { next.runInternal(); // Don't run another task if the current task called startBackgroundTask() // and there are no more permits available if (semaphore.availablePermits() < 0) break; next = queue.poll(); } while (next != null); } finally { semaphore.release(); // In case we just got a new task between queue.poll() and semaphore.release() if (!queue.isEmpty()) { executeFront(); } } } private void runInternal() { try { if (log.isTraceEnabled()) log.tracef("Task started, tasks in queue %d, available permits %d", queue.size(), semaphore.availablePermits()); super.run(); } finally { completionQueue.offer(this); if (log.isTraceEnabled()) log.tracef("Task finished, tasks in queue %d, available permits %d", queue.size(), semaphore.availablePermits()); } } } /** * Extend {@code Semaphore} to expose the {@code reducePermits(int)} method. */ private static class CustomSemaphore extends Semaphore { CustomSemaphore(int permits) { super(permits); } void removePermit() { super.reducePermits(1); } } }
7,454
33.513889
151
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/ExecutorAllCompletionService.java
package org.infinispan.executors; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; /** * Exectues given tasks in provided executor. * * @author Radim Vansa &lt;rvansa@redhat.com&gt; */ public class ExecutorAllCompletionService implements CompletionService<Void> { private ExecutorCompletionService executorService; private AtomicReference<ExecutionException> firstException = new AtomicReference<ExecutionException>(); private AtomicLong scheduled = new AtomicLong(); private AtomicLong completed = new AtomicLong(); public ExecutorAllCompletionService(Executor executor) { this.executorService = new ExecutorCompletionService(executor); } @Override public Future<Void> submit(final Callable<Void> task) { scheduled.incrementAndGet(); Future<Void> future = executorService.submit(task); pollUntilEmpty(); return future; } @Override public Future<Void> submit(final Runnable task, Void result) { scheduled.incrementAndGet(); Future<Void> future = executorService.submit(task, result); pollUntilEmpty(); return future; } private void pollUntilEmpty() { Future<Void> completedFuture; while ((completedFuture = executorService.poll()) != null) { try { completedFuture.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } catch (ExecutionException e) { if (firstException.get() == null) { firstException.compareAndSet(null, e); } } finally { completed.incrementAndGet(); } } } /** * @return True if all currently scheduled tasks have already been completed, false otherwise; */ public boolean isAllCompleted() { pollUntilEmpty(); return completed.get() >= scheduled.get(); } public long getScheduledTasks() { return scheduled.get(); } public long getCompletedTasks() { return completed.get(); } public void waitUntilAllCompleted() { while (completed.get() < scheduled.get()) { // Here is a race - if we poll the last scheduled entry elsewhere, we may wait // another 100 ms until we realize that everything has already completed. // Nevertheless, that's not so bad. try { Future<Void> future = poll(100, TimeUnit.MILLISECONDS); if (future != null) { future.get(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } catch (ExecutionException e) { if (firstException.get() == null) { firstException.compareAndSet(null, e); } } } } public boolean isExceptionThrown() { return firstException.get() != null; } public ExecutionException getFirstException() { return firstException.get(); } @Override public Future<Void> take() throws InterruptedException { Future<Void> future = executorService.take(); completed.incrementAndGet(); return future; } @Override public Future<Void> poll() { Future<Void> future = executorService.poll(); if (future != null) { completed.incrementAndGet(); } return future; } @Override public Future<Void> poll(long timeout, TimeUnit unit) throws InterruptedException { Future<Void> future = executorService.poll(timeout, unit); if (future != null) { completed.incrementAndGet(); } return future; } }
3,963
29.259542
106
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/LazyInitializingExecutorService.java
package org.infinispan.executors; import java.util.Collection; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.infinispan.commons.executors.ThreadPoolExecutorFactory; import org.infinispan.factories.annotations.Stop; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.concurrent.WithinThreadExecutor; /** * A delegating executor that lazily constructs and initializes the underlying executor, since unused JDK executors * are expensive. * * @author Manik Surtani * @since 5.1 */ @Scope(Scopes.GLOBAL) public final class LazyInitializingExecutorService extends ManageableExecutorService<ExecutorService> implements ExecutorService { private static final ExecutorService STOPPED; static { STOPPED = new WithinThreadExecutor(); STOPPED.shutdown(); } private final ThreadPoolExecutorFactory<ExecutorService> executorFactory; private final ThreadFactory threadFactory; public LazyInitializingExecutorService( ThreadPoolExecutorFactory<ExecutorService> executorFactory, ThreadFactory threadFactory) { this.executorFactory = executorFactory; this.threadFactory = threadFactory; } private void initIfNeeded() { if (executor == null) { synchronized (this) { if (executor == null) { executor = executorFactory.createExecutor(threadFactory); } } } } @Override public void shutdown() { synchronized (this) { if (executor == null) { executor = STOPPED; } executor.shutdown(); } } @Stop @Override public List<Runnable> shutdownNow() { synchronized (this) { if (executor == null) { executor = STOPPED; } return executor.shutdownNow(); } } @Override public boolean isShutdown() { return executor == null || executor.isShutdown(); } @Override public boolean isTerminated() { return executor == null || executor.isTerminated(); } @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { if (executor == null) return true; else return executor.awaitTermination(timeout, unit); } @Override public <T> Future<T> submit(Callable<T> task) { initIfNeeded(); return executor.submit(task); } @Override public <T> Future<T> submit(Runnable task, T result) { initIfNeeded(); return executor.submit(task, result); } @Override public Future<?> submit(Runnable task) { initIfNeeded(); return executor.submit(task); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException { initIfNeeded(); return executor.invokeAll(tasks); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException { initIfNeeded(); return executor.invokeAll(tasks, timeout, unit); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException { initIfNeeded(); return executor.invokeAny(tasks); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { initIfNeeded(); return executor.invokeAny(tasks, timeout, unit); } @Override public void execute(Runnable command) { initIfNeeded(); executor.execute(command); } }
3,989
27.297872
163
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/DefaultExecutorFactory.java
package org.infinispan.executors; import java.util.Properties; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.infinispan.commons.executors.ExecutorFactory; import org.infinispan.commons.util.TypedProperties; import org.infinispan.factories.threads.BlockingThreadFactory; import org.infinispan.factories.threads.NonBlockingThreadFactory; /** * Default executor factory that creates executors using the JDK Executors service. * * @author Manik Surtani * @author Tristan Tarrant * @since 4.0 */ public class DefaultExecutorFactory implements ExecutorFactory { private final AtomicInteger counter = new AtomicInteger(0); @Override public ExecutorService getExecutor(Properties p) { TypedProperties tp = TypedProperties.toTypedProperties(p); int maxThreads = tp.getIntProperty("maxThreads", 1); int queueSize = tp.getIntProperty("queueSize", 100000); int coreThreads = queueSize == 0 ? 1 : tp.getIntProperty("coreThreads", maxThreads); long keepAliveTime = tp.getLongProperty("keepAliveTime", 60000); final int threadPrio = tp.getIntProperty("threadPriority", Thread.MIN_PRIORITY); final String threadNamePrefix = tp.getProperty("threadNamePrefix", tp.getProperty("componentName", "Thread")); final String threadNameSuffix = tp.getProperty("threadNameSuffix", ""); String blocking = tp.getProperty("blocking"); ThreadGroup threadGroup; if (blocking == null) { threadGroup = Thread.currentThread().getThreadGroup(); } else { threadGroup = Boolean.parseBoolean(blocking) ? new BlockingThreadFactory.ISPNBlockingThreadGroup(threadNamePrefix + "-group") : new NonBlockingThreadFactory.ISPNNonBlockingThreadGroup(threadNamePrefix + "-group"); } BlockingQueue<Runnable> queue = queueSize == 0 ? new SynchronousQueue<>() : new LinkedBlockingQueue<>(queueSize); ThreadFactory tf = new ThreadFactory() { private Thread createThread(Runnable r) { String threadName = threadNamePrefix + "-" + counter.getAndIncrement() + threadNameSuffix; Thread th = new Thread(threadGroup, r, threadName); th.setDaemon(true); th.setPriority(threadPrio); return th; } @Override public Thread newThread(Runnable r) { return createThread(r); } }; return new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.MILLISECONDS, queue, tf, new ThreadPoolExecutor.CallerRunsPolicy()); } }
2,892
41.544118
136
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/ManageableExecutorService.java
package org.infinispan.executors; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.jmx.annotations.DataType; import org.infinispan.jmx.annotations.MBean; import org.infinispan.jmx.annotations.ManagedAttribute; /** * @author Tristan Tarrant &lt;tristan@infinispan.org&gt; * @since 10.0 **/ @MBean @Scope(Scopes.GLOBAL) public abstract class ManageableExecutorService<T extends ExecutorService> { // volatile so reads don't have to be in a synchronized block protected volatile T executor; @ManagedAttribute( description = "Returns the number of threads in this executor.", displayName = "Number of executor threads", dataType = DataType.TRAIT ) public int getPoolSize() { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getPoolSize(); } else { return -1; } } @ManagedAttribute( description = "Returns the number of active executor threads.", displayName = "Number of active executor threads", dataType = DataType.TRAIT ) public int getActiveCount() { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getActiveCount(); } else { return -1; } } @ManagedAttribute( description = "Returns the maximum number of executor threads.", displayName = "Maximum number of executor threads", dataType = DataType.TRAIT, writable = true ) public int getMaximumPoolSize() { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getMaximumPoolSize(); } else { return -1; } } public void setMaximumPoolSize(int maximumPoolSize) { if (executor instanceof ThreadPoolExecutor) { ((ThreadPoolExecutor) executor).setMaximumPoolSize(maximumPoolSize); if (!(((ThreadPoolExecutor)executor).getQueue() instanceof SynchronousQueue)) { ((ThreadPoolExecutor) executor).setCorePoolSize(maximumPoolSize); } } else { throw new UnsupportedOperationException(); } } @ManagedAttribute( description = "Returns the largest ever number of executor threads.", displayName = "Largest number of executor threads", dataType = DataType.TRAIT ) public int getLargestPoolSize() { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getLargestPoolSize(); } else { return -1; } } @ManagedAttribute( description = "Returns the number of elements in this executor's queue.", displayName = "Elements in the queue", dataType = DataType.TRAIT ) public int getQueueSize() { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getQueue().size(); } else { return -1; } } @ManagedAttribute( description = "Returns the keep-alive time for this pool's threads", displayName = "Keep-alive for pooled threads", dataType = DataType.TRAIT ) public long getKeepAliveTime() { if (executor instanceof ThreadPoolExecutor) { return ((ThreadPoolExecutor) executor).getKeepAliveTime(TimeUnit.MILLISECONDS); } else { return -1; } } public void setKeepAliveTime(long milliseconds) { if (executor instanceof ThreadPoolExecutor) { ((ThreadPoolExecutor) executor).setKeepAliveTime(milliseconds, TimeUnit.MILLISECONDS); } else { throw new UnsupportedOperationException(); } } }
3,907
30.772358
95
java
null
infinispan-main/core/src/main/java/org/infinispan/executors/LazyInitializingScheduledExecutorService.java
package org.infinispan.executors; import java.util.Collection; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.infinispan.commons.executors.ThreadPoolExecutorFactory; import org.infinispan.factories.annotations.Stop; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; /** * A delegating scheduled executor that lazily constructs and initializes the underlying scheduled executor, since * unused JDK executors are expensive. * * @author Manik Surtani * @since 5.1 */ @Scope(Scopes.GLOBAL) public class LazyInitializingScheduledExecutorService extends ManageableExecutorService<ScheduledExecutorService> implements ScheduledExecutorService { private static final ScheduledExecutorService STOPPED; static { STOPPED = new ScheduledThreadPoolExecutor(0); STOPPED.shutdown(); } private final ThreadPoolExecutorFactory<ScheduledExecutorService> executorFactory; private final ThreadFactory threadFactory; public LazyInitializingScheduledExecutorService( ThreadPoolExecutorFactory<ScheduledExecutorService> executorFactory, ThreadFactory threadFactory) { this.executorFactory = executorFactory; this.threadFactory = threadFactory; } private void initIfNeeded() { if (executor == null) { synchronized (this) { if (executor == null) { executor = executorFactory.createExecutor(threadFactory); } } } } @Override public void shutdown() { synchronized (this) { if (executor == null) { executor = STOPPED; } executor.shutdown(); } } @Stop @Override public List<Runnable> shutdownNow() { synchronized (this) { if (executor == null) { executor = STOPPED; } return executor.shutdownNow(); } } @Override public boolean isShutdown() { return executor == null || executor.isShutdown(); } @Override public boolean isTerminated() { return executor == null || executor.isTerminated(); } @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { if (executor == null) return true; else return executor.awaitTermination(timeout, unit); } @Override public <T> Future<T> submit(Callable<T> task) { initIfNeeded(); return executor.submit(task); } @Override public <T> Future<T> submit(Runnable task, T result) { initIfNeeded(); return executor.submit(task, result); } @Override public Future<?> submit(Runnable task) { initIfNeeded(); return executor.submit(task); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException { initIfNeeded(); return executor.invokeAll(tasks); } @Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException { initIfNeeded(); return executor.invokeAll(tasks, timeout, unit); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException { initIfNeeded(); return executor.invokeAny(tasks); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { initIfNeeded(); return executor.invokeAny(tasks, timeout, unit); } @Override public void execute(Runnable command) { initIfNeeded(); executor.execute(command); } @Override public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) { initIfNeeded(); return executor.schedule(command, delay, unit); } @Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { initIfNeeded(); return executor.schedule(callable, delay, unit); } @Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { initIfNeeded(); return executor.scheduleAtFixedRate(command, initialDelay, period, unit); } @Override public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { initIfNeeded(); return executor.scheduleWithFixedDelay(command, initialDelay, delay, unit); } }
4,972
28.957831
163
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/CacheJmxRegistration.java
package org.infinispan.jmx; import javax.management.ObjectName; import org.infinispan.cache.impl.CacheImpl; import org.infinispan.configuration.cache.Configuration; import org.infinispan.factories.KnownComponentNames; import org.infinispan.factories.annotations.ComponentName; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.annotations.Start; import org.infinispan.factories.annotations.Stop; import org.infinispan.factories.annotations.SurvivesRestarts; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; /** * If {@link org.infinispan.configuration.cache.Configuration#statistics()} is enabled, then class will register all * the MBeans from cache local's ConfigurationRegistry to the MBean server. * * @author Mircea.Markus@jboss.com * @author Galder Zamarreño * @since 4.0 */ @Scope(Scopes.NAMED_CACHE) @SurvivesRestarts public final class CacheJmxRegistration extends AbstractJmxRegistration { private static final String GROUP_PATTERN = TYPE + "=Cache," + NAME + "=%s," + MANAGER + "=%s"; @Inject Configuration cacheConfiguration; @Inject CacheManagerJmxRegistration globalJmxRegistration; @ComponentName(KnownComponentNames.CACHE_NAME) @Inject String cacheName; public CacheJmxRegistration() { super(CacheImpl.OBJECT_NAME); } @Start(priority = 14) @Override public void start() { // prevent double lookup of MBeanServer on eventual restart if (mBeanServer == null && globalJmxRegistration.mBeanServer != null) { groupName = initGroup(); // grab domain and MBean server from container mBeanServer = globalJmxRegistration.mBeanServer; } super.start(); } @Stop @Override public void stop() { super.stop(); } @Override protected String initGroup() { return String.format(GROUP_PATTERN, ObjectName.quote(cacheName + "(" + cacheConfiguration.clustering().cacheModeString().toLowerCase() + ")"), ObjectName.quote(globalConfig.cacheManagerName())); } }
2,106
29.1
118
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/package-info.java
/** * Helpers that allow easy exposure of attributes and operations via JMX. */ package org.infinispan.jmx;
110
21.2
73
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/ResourceDMBean.java
package org.infinispan.jmx; import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import javax.management.Attribute; import javax.management.AttributeList; import javax.management.AttributeNotFoundException; import javax.management.DynamicMBean; import javax.management.MBeanAttributeInfo; import javax.management.MBeanException; import javax.management.MBeanInfo; import javax.management.MBeanOperationInfo; import javax.management.MBeanParameterInfo; import javax.management.MBeanRegistration; import javax.management.MBeanServer; import javax.management.ObjectName; import javax.management.ServiceNotFoundException; import org.infinispan.commons.CacheException; import org.infinispan.commons.util.ReflectionUtil; import org.infinispan.factories.impl.MBeanMetadata; import org.infinispan.jmx.annotations.MBean; import org.infinispan.jmx.annotations.ManagedOperation; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * This class was copied from JGroups and adapted. * <p/> * The original JGroup's ResourceDMBean logic has been modified so that invoke() method checks whether the operation * called has been exposed as a {@link ManagedOperation}, otherwise the call fails. JGroups deviated from this logic on * purpose because they liked the fact that you could expose all class methods by simply annotating class with {@link * MBean} annotation. * * @author Mircea.Markus@jboss.com * @author Galder Zamarreño * @since 4.0 */ public final class ResourceDMBean implements DynamicMBean, MBeanRegistration { private static final Log log = LogFactory.getLog(ResourceDMBean.class); private final Object obj; private final Class<?> objectClass; private final MBeanOperationInfo[] opInfos; private final String[] opNames; private final MBeanAttributeInfo[] attInfos; private final Map<String, InvokableMBeanAttributeInfo> atts = new HashMap<>(2); private final String mbeanName; private final String description; /** * This is the name under which this MBean was registered. */ private ObjectName objectName; private static final Map<String, Field> FIELD_CACHE = new ConcurrentHashMap<>(64); private static final Map<String, Method> METHOD_CACHE = new ConcurrentHashMap<>(64); ResourceDMBean(Object instance, MBeanMetadata mBeanMetadata, String componentName) { if (instance == null) { throw new NullPointerException("Cannot make an MBean wrapper for null instance"); } this.obj = instance; this.objectClass = instance.getClass(); if (mBeanMetadata.getJmxObjectName() != null) { mbeanName = mBeanMetadata.getJmxObjectName(); } else if (componentName != null) { mbeanName = componentName; } else { throw new IllegalArgumentException("MBean.objectName and componentName cannot be both null"); } this.description = mBeanMetadata.getDescription(); // Load up all fields. int i = 0; attInfos = new MBeanAttributeInfo[mBeanMetadata.getAttributes().size()]; for (MBeanMetadata.AttributeMetadata attributeMetadata : mBeanMetadata.getAttributes()) { String attributeName = attributeMetadata.getName(); if (atts.containsKey(attributeName)) { throw new IllegalArgumentException("Component " + objectClass.getName() + " metadata has a duplicate attribute: " + attributeName); } InvokableMBeanAttributeInfo info = toJmxInfo(attributeMetadata); atts.put(attributeName, info); attInfos[i++] = info.attributeInfo; if (log.isTraceEnabled()) log.tracef("Attribute %s [r=%b,w=%b,is=%b,type=%s]", attributeName, info.attributeInfo.isReadable(), info.attributeInfo.isWritable(), info.attributeInfo.isIs(), info.attributeInfo.getType()); } // And operations opInfos = new MBeanOperationInfo[mBeanMetadata.getOperations().size()]; opNames = new String[opInfos.length]; i = 0; for (MBeanMetadata.OperationMetadata operation : mBeanMetadata.getOperations()) { opNames[i] = operation.getOperationName(); MBeanOperationInfo op = toJmxInfo(operation); opInfos[i++] = op; if (log.isTraceEnabled()) log.tracef("Operation %s %s", op.getReturnType(), op.getName()); } } /** * The name assigned via {@link MBean#objectName} or generated based on default rules if missing. */ String getMBeanName() { return mbeanName; } /** * The ObjectName. Only available if the MBean was registered. */ public ObjectName getObjectName() { return objectName; } private static Field findField(Class<?> objectClass, String fieldName) { String key = objectClass.getName() + "#" + fieldName; Field f = FIELD_CACHE.get(key); if (f == null) { f = ReflectionUtil.getField(fieldName, objectClass); if (f != null) FIELD_CACHE.put(key, f); } return f; } private static Method findSetter(Class<?> objectClass, String fieldName) { String key = objectClass.getName() + "#s#" + fieldName; Method m = METHOD_CACHE.get(key); if (m == null) { m = ReflectionUtil.findSetterForField(objectClass, fieldName); if (m != null) METHOD_CACHE.put(key, m); } return m; } private static Method findGetter(Class<?> objectClass, String fieldName) { String key = objectClass.getName() + "#g#" + fieldName; Method m = METHOD_CACHE.get(key); if (m == null) { m = ReflectionUtil.findGetterForField(objectClass, fieldName); if (m != null) METHOD_CACHE.put(key, m); } return m; } private InvokableMBeanAttributeInfo toJmxInfo(MBeanMetadata.AttributeMetadata attributeMetadata) { if (!attributeMetadata.isUseSetter()) { Field field = findField(objectClass, attributeMetadata.getName()); if (field != null) { return new InvokableFieldBasedMBeanAttributeInfo(attributeMetadata.getName(), attributeMetadata.getType(), attributeMetadata.getDescription(), true, attributeMetadata.isWritable(), attributeMetadata.isIs(), field); } } Method setter = null; Method getter = null; try { setter = attributeMetadata.isWritable() ? findSetter(objectClass, attributeMetadata.getName()) : null; getter = findGetter(objectClass, attributeMetadata.getName()); } catch (NoClassDefFoundError ignored) { // missing dependency } return new InvokableSetterBasedMBeanAttributeInfo(attributeMetadata.getName(), attributeMetadata.getType(), attributeMetadata.getDescription(), true, attributeMetadata.isWritable(), attributeMetadata.isIs(), getter, setter); } private MBeanOperationInfo toJmxInfo(MBeanMetadata.OperationMetadata operationMetadata) { MBeanMetadata.OperationParameterMetadata[] parameters = operationMetadata.getMethodParameters(); MBeanParameterInfo[] params = new MBeanParameterInfo[parameters.length]; for (int i = 0; i < parameters.length; i++) { params[i] = new MBeanParameterInfo(parameters[i].getName(), parameters[i].getType(), parameters[i].getDescription()); } return new MBeanOperationInfo(operationMetadata.getMethodName(), operationMetadata.getDescription(), params, operationMetadata.getReturnType(), MBeanOperationInfo.UNKNOWN); } @Override public MBeanInfo getMBeanInfo() { return new MBeanInfo(objectClass.getName(), description, attInfos, null, opInfos, null); } @Override public Object getAttribute(String name) throws AttributeNotFoundException { if (name == null || name.length() == 0) throw new NullPointerException("Invalid attribute requested " + name); Attribute attr = getNamedAttribute(name); if (attr == null) { throw new AttributeNotFoundException("Unknown attribute '" + name + "'. Known attributes names are: " + atts.keySet()); } return attr.getValue(); } @Override public synchronized void setAttribute(Attribute attribute) throws AttributeNotFoundException, MBeanException { if (attribute == null || attribute.getName() == null) throw new NullPointerException("Invalid attribute requested " + attribute); setNamedAttribute(attribute); } @Override public synchronized AttributeList getAttributes(String[] names) { AttributeList al = new AttributeList(); for (String name : names) { Attribute attr = getNamedAttribute(name); if (attr != null) { al.add(attr); } else { log.couldNotFindAttribute(name); //todo [anistor] is it ok to ignore missing attributes ? } } return al; } @Override public synchronized AttributeList setAttributes(AttributeList list) { AttributeList results = new AttributeList(); for (Object aList : list) { Attribute attr = (Attribute) aList; try { setNamedAttribute(attr); results.add(attr); } catch (Exception e) { log.failedToUpdateAttribute(attr.getName(), attr.getValue()); } } return results; } @Override public Object invoke(String name, Object[] args, String[] sig) throws MBeanException { if (log.isDebugEnabled()) { log.debugf("Invoke method called on %s", name); } MBeanOperationInfo opInfo = null; for (int i = 0; i < opNames.length; i++) { if (opNames[i].equals(name)) { opInfo = opInfos[i]; break; } } if (opInfo == null) { final String msg = "Operation " + name + " not amongst operations in " + Arrays.toString(opInfos); throw new MBeanException(new ServiceNotFoundException(msg), msg); } // Argument type transformation according to signatures for (int i = 0; i < sig.length; i++) { // Some clients (e.g. RHQ) will pass the arguments as java.lang.String but we need some fields to be numbers if (args[i] != null) { if (log.isDebugEnabled()) log.debugf("Argument value before transformation: %s and its class: %s. " + "For method.invoke we need it to be class: %s", args[i], args[i].getClass(), sig[i]); if (sig[i].equals(int.class.getName()) || sig[i].equals(Integer.class.getName())) { if (args[i].getClass() != Integer.class && args[i].getClass() != int.class) args[i] = Integer.parseInt((String) args[i]); } else if (sig[i].equals(Long.class.getName()) || sig[i].equals(long.class.getName())) { if (args[i].getClass() != Long.class && args[i].getClass() != long.class) args[i] = Long.parseLong((String) args[i]); } } } try { Class<?>[] classes = new Class[sig.length]; for (int i = 0; i < classes.length; i++) { classes[i] = ReflectionUtil.getClassForName(sig[i], null); } Method method = objectClass.getMethod(opInfo.getName(), classes); return method.invoke(obj, args); } catch (Exception e) { throw new MBeanException(new Exception(getRootCause(e))); } } private static Throwable getRootCause(Throwable throwable) { Throwable cause; while ((cause = throwable.getCause()) != null) { throwable = cause; } return throwable; } private Attribute getNamedAttribute(String name) { Attribute result = null; InvokableMBeanAttributeInfo i = atts.get(name); if (i == null && name.length() > 0) { // This is legacy. Earlier versions used an upper-case starting letter for *some* attributes. char firstChar = name.charAt(0); if (Character.isUpperCase(firstChar)) { name = Character.toLowerCase(firstChar) + name.substring(1); i = atts.get(name); } } if (i != null) { try { result = new Attribute(name, i.invoke(null)); if (log.isTraceEnabled()) log.tracef("Attribute %s has r=%b,w=%b,is=%b and value %s", name, i.attributeInfo.isReadable(), i.attributeInfo.isWritable(), i.attributeInfo.isIs(), result.getValue()); } catch (Exception e) { log.debugf(e, "Exception while reading value of attribute %s", name); throw new CacheException(e); } } else { log.queriedAttributeNotFound(name); //todo [anistor] why not throw an AttributeNotFoundException ? } return result; } private void setNamedAttribute(Attribute attribute) throws MBeanException, AttributeNotFoundException { if (log.isDebugEnabled()) { log.debugf("Invoking set on attribute %s with value %s", attribute.getName(), attribute.getValue()); } String name = attribute.getName(); InvokableMBeanAttributeInfo i = atts.get(name); if (i == null && name.length() > 0) { // This is legacy. Earlier versions used an upper-case starting letter for *some* attributes. char firstChar = name.charAt(0); if (Character.isUpperCase(firstChar)) { name = name.replaceFirst(Character.toString(firstChar), Character.toString(Character.toLowerCase(firstChar))); i = atts.get(name); } } if (i != null) { try { i.invoke(attribute); } catch (Exception e) { log.errorWritingValueForAttribute(name, e); throw new MBeanException(e, "Error invoking setter for attribute " + name); } } else { log.couldNotInvokeSetOnAttribute(name, attribute.getValue()); throw new AttributeNotFoundException("Could not find attribute " + name); } } @Override public ObjectName preRegister(MBeanServer server, ObjectName name) { objectName = name; return name; } @Override public void postRegister(Boolean registrationDone) { } @Override public void preDeregister() { } @Override public void postDeregister() { objectName = null; } private static abstract class InvokableMBeanAttributeInfo { private final MBeanAttributeInfo attributeInfo; InvokableMBeanAttributeInfo(String name, String type, String description, boolean isReadable, boolean isWritable, boolean isIs) { attributeInfo = new MBeanAttributeInfo(name, type, description, isReadable, isWritable, isIs); } abstract Object invoke(Attribute a) throws IllegalAccessException, InvocationTargetException; } private final class InvokableFieldBasedMBeanAttributeInfo extends InvokableMBeanAttributeInfo { private final Field field; InvokableFieldBasedMBeanAttributeInfo(String name, String type, String description, boolean isReadable, boolean isWritable, boolean isIs, Field field) { super(name, type, description, isReadable, isWritable, isIs); this.field = field; } @Override Object invoke(Attribute a) throws IllegalAccessException { if (!Modifier.isPublic(field.getModifiers())) field.setAccessible(true); if (a == null) { return field.get(obj); } else { field.set(obj, a.getValue()); return null; } } } private final class InvokableSetterBasedMBeanAttributeInfo extends InvokableMBeanAttributeInfo { private final Method setter; private final Method getter; InvokableSetterBasedMBeanAttributeInfo(String name, String type, String description, boolean isReadable, boolean isWritable, boolean isIs, Method getter, Method setter) { super(name, type, description, isReadable, isWritable, isIs); this.setter = setter; this.getter = getter; } @Override Object invoke(Attribute a) throws IllegalAccessException, InvocationTargetException { if (a == null) { if (!Modifier.isPublic(getter.getModifiers())) getter.setAccessible(true); return getter.invoke(obj); } else { if (!Modifier.isPublic(setter.getModifiers())) setter.setAccessible(true); return setter.invoke(obj, a.getValue()); } } } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || o.getClass() != ResourceDMBean.class) return false; ResourceDMBean that = (ResourceDMBean) o; return obj == that.obj; // == is intentional } @Override public int hashCode() { return obj.hashCode(); } @Override public String toString() { return "ResourceDMBean{" + "obj=" + System.identityHashCode(obj) + ", objectClass=" + objectClass + ", mbeanName='" + mbeanName + '\'' + ", description='" + description + '\'' + ", objectName=" + objectName + '}'; } }
17,716
37.43167
135
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/JmxDomainConflictException.java
package org.infinispan.jmx; import org.infinispan.commons.CacheConfigurationException; /** * @author Mircea.Markus@jboss.com */ public class JmxDomainConflictException extends CacheConfigurationException { private static final long serialVersionUID = 8057798477119623578L; public JmxDomainConflictException(String msg) { super(msg); } }
358
21.4375
77
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/AbstractJmxRegistration.java
package org.infinispan.jmx; import static org.infinispan.util.logging.Log.CONTAINER; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import javax.management.InstanceAlreadyExistsException; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; import org.infinispan.commons.CacheException; import org.infinispan.commons.jmx.MBeanServerLookup; import org.infinispan.configuration.global.GlobalConfiguration; import org.infinispan.configuration.global.GlobalJmxConfiguration; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.impl.BasicComponentRegistry; import org.infinispan.factories.impl.ComponentRef; import org.infinispan.factories.impl.MBeanMetadata; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * Parent class for JMX component registration. * * @author Galder Zamarreño * @since 4.0 */ @Scope(Scopes.NONE) abstract class AbstractJmxRegistration implements ObjectNameKeys { private static final Log log = LogFactory.getLog(AbstractJmxRegistration.class); @Inject GlobalConfiguration globalConfig; @Inject BasicComponentRegistry basicComponentRegistry; volatile MBeanServer mBeanServer; String groupName; private List<ResourceDMBean> resourceDMBeans; /** * The component to be registered first for domain reservation. */ private final String mainComponent; AbstractJmxRegistration(String mainComponent) { this.mainComponent = mainComponent; } /** * Looks up the MBean server and initializes domain and group. Overriders must ensure they call super. */ public void start() { // prevent double lookup of MBeanServer on eventual restart if (mBeanServer == null) { MBeanServer mBeanServer = null; try { GlobalJmxConfiguration jmx = globalConfig.jmx(); MBeanServerLookup lookup = jmx.mbeanServerLookup(); if (jmx.enabled() && lookup != null) { mBeanServer = lookup.getMBeanServer(jmx.properties()); } } catch (Exception e) { CONTAINER.warn("Ignoring exception in MBean server lookup", e); } if (mBeanServer != null) { // first time! groupName = initGroup(); resourceDMBeans = Collections.synchronizedList(getResourceDMBeansFromComponents()); this.mBeanServer = mBeanServer; // register those beans, Jack try { for (ResourceDMBean resourceDMBean : resourceDMBeans) { ObjectName objectName = getObjectName(groupName, resourceDMBean.getMBeanName()); register(resourceDMBean, objectName, mBeanServer); } } catch (InstanceAlreadyExistsException | IllegalArgumentException e) { throw CONTAINER.jmxMBeanAlreadyRegistered(globalConfig.jmx().domain(), e); } catch (Exception e) { throw new CacheException("Failure while registering MBeans", e); } } } else { // restart resourceDMBeans = Collections.synchronizedList(getResourceDMBeansFromComponents()); try { for (ResourceDMBean resourceDMBean : resourceDMBeans) { ObjectName objectName = getObjectName(groupName, resourceDMBean.getMBeanName()); register(resourceDMBean, objectName, mBeanServer); } } catch (Exception e) { throw new CacheException("Failure while registering MBeans", e); } } } /** * Unregisters the MBeans that were registered on start. Overriders must ensure they call super. */ public void stop() { if (mBeanServer != null && resourceDMBeans != null) { try { for (ResourceDMBean resourceDMBean : resourceDMBeans) { ObjectName objectName = resourceDMBean.getObjectName(); if (objectName != null) { unregisterMBean(objectName); } } resourceDMBeans = null; } catch (Exception e) { throw new CacheException("Failure while unregistering MBeans", e); } } } /** * Subclasses must implement this hook to initialize {@link #groupName} during start. */ protected abstract String initGroup(); /** * Checks that JMX is effectively enabled. */ public final boolean enabled() { return mBeanServer != null; } /** * Gets the domain name. This should not be called unless JMX is enabled. */ public final String getDomain() { if (mBeanServer == null) { throw new IllegalStateException("MBean server not initialized"); } return globalConfig.jmx().domain(); } /** * Gets the group name. This should not be called unless JMX is enabled. */ public final String getGroupName() { if (mBeanServer == null) { throw new IllegalStateException("MBean server not initialized"); } return groupName; } /** * Gets the MBean server. This should not be called unless JMX is enabled. */ public final MBeanServer getMBeanServer() { if (mBeanServer == null) { throw new IllegalStateException("MBean server not initialized"); } return mBeanServer; } /** * Creates an ObjectName based on given domain, group and component name. */ private static ObjectName getObjectName(String domain, String groupName, String resourceName) throws MalformedObjectNameException { if (domain == null) { throw new IllegalArgumentException("domain cannot be null"); } if (groupName == null) { throw new IllegalArgumentException("groupName cannot be null"); } if (resourceName == null) { throw new IllegalArgumentException("resourceName cannot be null"); } return new ObjectName(domain + ":" + groupName + "," + COMPONENT + "=" + resourceName); } /** * Creates an ObjectName based on given group and component name. */ private ObjectName getObjectName(String groupName, String resourceName) throws MalformedObjectNameException { return getObjectName(getDomain(), groupName, resourceName); } /** * Gathers all components from registry that have MBeanMetadata and creates ResourceDMBeans for them. The first * component is always the main component, ie. the cache/cache manager. */ private List<ResourceDMBean> getResourceDMBeansFromComponents() { Collection<ComponentRef<?>> components = basicComponentRegistry.getRegisteredComponents(); List<ResourceDMBean> resourceDMBeans = new ArrayList<>(components.size()); for (ComponentRef<?> component : components) { if (!component.isAlias()) { Object instance = component.wired(); if (instance != null) { ResourceDMBean resourceDMBean = getResourceDMBean(instance, component.getName()); if (resourceDMBean != null) { // not all components have MBeanMetadata if (mainComponent.equals(resourceDMBean.getMBeanName())) { resourceDMBeans.add(0, resourceDMBean); } else { resourceDMBeans.add(resourceDMBean); } } } } } if (resourceDMBeans.isEmpty()) { throw new IllegalStateException("No MBeans found in component registry!"); } return resourceDMBeans; } private ResourceDMBean getResourceDMBean(Object instance, String componentName) { MBeanMetadata beanMetadata = basicComponentRegistry.getMBeanMetadata(instance.getClass().getName()); return beanMetadata == null ? null : new ResourceDMBean(instance, beanMetadata, componentName); } /** * Registers a MBean, but does not track it to perform automatic unregistration on stop. The caller is expected to * perform unregistration using the returned ObjectName. */ public ObjectName registerExternalMBean(Object managedComponent, String groupName) throws Exception { if (mBeanServer == null) { throw new IllegalStateException("MBean server not initialized"); } ResourceDMBean resourceDMBean = getResourceDMBean(managedComponent, null); if (resourceDMBean == null) { throw new IllegalArgumentException("No MBean metadata found for " + managedComponent.getClass().getName()); } ObjectName objectName = getObjectName(groupName, resourceDMBean.getMBeanName()); register(resourceDMBean, objectName, mBeanServer); return objectName; } /** * Registers a MBean (and tracks it to perform automatic unregistration on stop). This method should be used for * components that are registered after the startup of the component registry and did not get registered * automatically. */ public void registerMBean(Object managedComponent) throws Exception { registerMBean(managedComponent, groupName); } /** * Registers a MBean (and tracks it to perform automatic unregistration on stop). This method should be used only for * components that are registered after the startup of the component registry and did not get registered * automatically. */ public void registerMBean(Object managedComponent, String groupName) throws Exception { if (mBeanServer == null) { throw new IllegalStateException("MBean server not initialized"); } ResourceDMBean resourceDMBean = getResourceDMBean(managedComponent, null); if (resourceDMBean == null) { throw new IllegalArgumentException("No MBean metadata found for " + managedComponent.getClass().getName()); } ObjectName objectName = getObjectName(groupName, resourceDMBean.getMBeanName()); register(resourceDMBean, objectName, mBeanServer); resourceDMBeans.add(resourceDMBean); } /** * Registers the JMX MBean. * * @param resourceDMBean MBean to register * @param objectName {@link ObjectName} under which to register the MBean. * @throws Exception If registration could not be completed. */ private void register(ResourceDMBean resourceDMBean, ObjectName objectName, MBeanServer mBeanServer) throws Exception { mBeanServer.registerMBean(resourceDMBean, objectName); if (log.isTraceEnabled()) { log.tracef("Registered MBean %s under %s", resourceDMBean, objectName); } } /** * Unregisters the MBean located under the given {@link ObjectName}, if it exists. * * @param objectName {@link ObjectName} where the MBean is registered * @throws Exception If unregistration could not be completed. */ public void unregisterMBean(ObjectName objectName) throws Exception { if (mBeanServer.isRegistered(objectName)) { mBeanServer.unregisterMBean(objectName); if (log.isTraceEnabled()) { log.tracef("Unregistered MBean: %s", objectName); } } else { log.debugf("MBean not registered: %s", objectName); } } }
11,375
36.668874
134
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/ObjectNameKeys.java
package org.infinispan.jmx; /** * @author anistor@redhat.com * @since 10.0 */ public interface ObjectNameKeys { String NAME = "name"; String TYPE = "type"; // Cache, CacheManager, Query, RemoteQuery, Server, etc. String COMPONENT = "component"; String MANAGER = "manager"; }
297
16.529412
83
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/MBeanServerLookup.java
package org.infinispan.jmx; /** * Implementors of this should return an MBeanServer to which MBeans will be registered. * * @author Mircea.Markus@jboss.com * @see org.infinispan.jmx.PlatformMBeanServerLookup * @since 4.0 * @deprecated Use {@link org.infinispan.commons.jmx.MBeanServerLookup} instead */ @Deprecated public interface MBeanServerLookup extends org.infinispan.commons.jmx.MBeanServerLookup { }
415
28.714286
89
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/PlatformMBeanServerLookup.java
package org.infinispan.jmx; /** * Default implementation for {@link MBeanServerLookup}, will return the platform MBean server. * <p/> * Note: to enable platform MBeanServer the following system property should be passed to the Sun JVM: * <b>-Dcom.sun.management.jmxremote</b>. * * @author Mircea.Markus@jboss.com * @since 4.0 * @deprecated Use {@link org.infinispan.commons.jmx.PlatformMBeanServerLookup} instead */ @Deprecated public class PlatformMBeanServerLookup extends org.infinispan.commons.jmx.PlatformMBeanServerLookup implements MBeanServerLookup { }
571
34.75
130
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/CacheManagerJmxRegistration.java
package org.infinispan.jmx; import javax.management.ObjectName; import org.infinispan.factories.annotations.SurvivesRestarts; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.manager.DefaultCacheManager; /** * Registers all the components from global component registry to the mbean server. * * @author Mircea.Markus@jboss.com * @author Galder Zamarreño * @since 4.0 */ @Scope(Scopes.GLOBAL) @SurvivesRestarts public final class CacheManagerJmxRegistration extends AbstractJmxRegistration { private static final String GROUP_PATTERN = TYPE + "=CacheManager," + NAME + "=%s"; public CacheManagerJmxRegistration() { super(DefaultCacheManager.OBJECT_NAME); } @Override protected String initGroup() { return String.format(GROUP_PATTERN, ObjectName.quote(globalConfig.cacheManagerName())); } }
897
27.0625
93
java
null
infinispan-main/core/src/main/java/org/infinispan/jmx/JmxStatisticsExposer.java
package org.infinispan.jmx; /** * Interface containing common cache management operations * * @author Jerry Gauthier * @since 4.0 */ public interface JmxStatisticsExposer { /** * Returns whether an interceptor's statistics are being captured. * * @return true if statistics are captured */ boolean getStatisticsEnabled(); /** * Enables an interceptor's cache statistics If true, the interceptor will capture statistics and make them available * through the mbean. * * @param enabled true if statistics should be captured */ void setStatisticsEnabled(boolean enabled); /** * Resets an interceptor's cache statistics */ void resetStatistics(); }
714
22.833333
120
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/package-info.java
/** * Notifications and eventing for listeners on both the Cache and CacheManager interfaces. * * @api.public */ package org.infinispan.notifications;
155
21.285714
90
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/DataConversionAwareListenable.java
package org.infinispan.notifications; import java.lang.annotation.Annotation; import java.util.Set; import java.util.concurrent.CompletionStage; import org.infinispan.notifications.cachelistener.ListenerHolder; import org.infinispan.notifications.cachelistener.filter.CacheEventConverter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilter; import org.infinispan.util.concurrent.CompletionStages; /** * @since 9.1 */ public interface DataConversionAwareListenable<K, V> extends ClassLoaderAwareFilteringListenable<K, V> { default <C> void addListener(ListenerHolder listenerHolder, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader) { CompletionStages.join(addListenerAsync(listenerHolder, filter, converter, classLoader)); } <C> CompletionStage<Void> addListenerAsync(ListenerHolder listenerHolder, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader); default <C> void addFilteredListener(ListenerHolder listenerHolder, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) { CompletionStages.join(addFilteredListenerAsync(listenerHolder, filter, converter, filterAnnotations)); } <C> CompletionStage<Void> addFilteredListenerAsync(ListenerHolder listenerHolder, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations); }
1,679
53.193548
205
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/IncorrectListenerException.java
package org.infinispan.notifications; import org.infinispan.commons.CacheException; /** * Thrown when an incorrectly annotated class is added as a cache listener using the {@link * org.infinispan.notifications.Listenable#addListener(Object)} API. * * @author <a href="mailto:manik@jboss.org">Manik Surtani</a> * @since 4.0 */ public class IncorrectListenerException extends CacheException { private static final long serialVersionUID = 3847404572671886703L; public IncorrectListenerException(String s) { super(s); } }
543
26.2
91
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/FilteringListenable.java
package org.infinispan.notifications; import java.lang.annotation.Annotation; import java.util.Set; import java.util.concurrent.CompletionStage; import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated; import org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired; import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified; import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved; import org.infinispan.notifications.cachelistener.filter.CacheEventConverter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilter; import org.infinispan.util.concurrent.CompletionStages; /** * A Listenable that can also filter events based on key * * @author Manik Surtani * @since 6.0 */ public interface FilteringListenable<K, V> extends Listenable { /** * Registers a listener that will be notified on events that pass the filter condition. The value presented in the * notifications will be first converted using the provided converter if there is one. * <p> * Some implementations may provide optimizations when a * {@link org.infinispan.notifications.cachelistener.filter.CacheEventFilterConverter} is provided as both arguments * to the <b>filter</b> and <b>converter</b> arguments. Note the provided object must have reference equality ie. (==) * to be recognized. This allows for the filter and conversion step to take place in the same method call reducing * possible overhead. * @param listener The listener to callback upon event notifications. Must not be null. * @param filter The filter to see if the notification should be sent to the listener. Can be null. * @param converter The converter to apply to the entry before being sent to the listener. Can be null. * @param <C> The type of the resultant value after being converted * @throws NullPointerException if the specified listener is null */ default <C> void addListener(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter) { CompletionStages.join(addListenerAsync(listener, filter, converter)); } /** * Asynchronous version of {@link #addListener(Object, CacheEventFilter, CacheEventConverter)} * @param listener listener to add, must not be null * @param filter * @param converter * @param <C> * @return CompletionStage that when complete the listener is fully installed */ <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter); /** * Registers a listener limiting the cache-entry specific events only to * annotations that are passed in as parameter. * <p/> * For example, if the listener passed in contains callbacks for * {@link CacheEntryCreated} and {@link CacheEntryModified}, * and filtered annotations contains only {@link CacheEntryCreated}, * then the listener will be registered only for {@link CacheEntryCreated} * callbacks. * <p/> * Callback filtering only applies to {@link CacheEntryCreated}, * {@link CacheEntryModified}, {@link CacheEntryRemoved} * and {@link CacheEntryExpired} annotations. * If the listener contains other annotations, these are preserved. * <p/> * This methods enables dynamic registration of listener interests at * runtime without the need to create several different listener classes. * * @param listener The listener to callback upon event notifications. Must not be null. * @param filter The filter to see if the notification should be sent to the listener. Can be null. * @param converter The converter to apply to the entry before being sent to the listener. Can be null. * @param filterAnnotations cache-entry annotations to allow listener to be registered on. Must not be null. * @param <C> The type of the resultant value after being converted */ default <C> void addFilteredListener(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) { CompletionStages.join(addFilteredListenerAsync(listener, filter, converter, filterAnnotations)); } /** * Asynchronous version of {@link #addFilteredListener(Object, CacheEventFilter, CacheEventConverter, Set)} */ <C> CompletionStage<Void> addFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations); /** * Same as {@link #addFilteredListener(Object, CacheEventFilter, CacheEventConverter, Set)}, but assumes the filter * and/or the converter will be done in the same data format as it's stored in the cache. */ default <C> void addStorageFormatFilteredListener(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) { CompletionStages.join(addStorageFormatFilteredListenerAsync(listener, filter, converter, filterAnnotations)); } /** * Asynchronous version of {@link #addStorageFormatFilteredListener(Object, CacheEventFilter, CacheEventConverter, Set)} */ <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations); }
5,741
55.294118
186
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/ClassLoaderAwareFilteringListenable.java
package org.infinispan.notifications; import java.util.concurrent.CompletionStage; import org.infinispan.notifications.cachelistener.filter.CacheEventConverter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilter; import org.infinispan.util.concurrent.CompletionStages; /** * Interface that enhances {@link FilteringListenable} with the possibility of specifying the * {@link ClassLoader} which should be set as the context class loader for the invoked * listener method * * @author Manik Surtani * @since 6.0 * @see ClassLoaderAwareListenable * @see FilteringListenable */ public interface ClassLoaderAwareFilteringListenable<K, V> extends FilteringListenable<K, V> { /** * Adds a listener with the provided filter and converter and using a given classloader when invoked. See * {@link org.infinispan.notifications.FilteringListenable#addListener(Object, * org.infinispan.notifications.cachelistener.filter.CacheEventFilter, * org.infinispan.notifications.cachelistener.filter.CacheEventConverter)} * for more details. * <p/> * @param listener must not be null. The listener to callback on when an event is raised * @param filter The filter to apply for the entry to see if the event should be raised * @param converter The converter to convert the filtered entry to a new value * @param classLoader The class loader to use when the event is fired * @param <C> The type that the converter returns. The listener must handle this type in any methods that handle * events being returned */ default <C> void addListener(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader) { CompletionStages.join(addListenerAsync(listener, filter, converter, classLoader)); } <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader); }
2,089
47.604651
116
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/Listener.java
package org.infinispan.notifications; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import org.infinispan.configuration.global.GlobalConfiguration; /** * Class-level annotation used to annotate an object as being a valid cache listener. Used with the {@link * org.infinispan.Cache#addListener(Object)} and related APIs. * * <p/> Note that even if a class is annotated with this * annotation, it still needs method-level annotation (such as {@link org.infinispan.notifications.cachemanagerlistener.annotation.CacheStarted}) * to actually receive notifications. * <p/> Objects annotated with this annotation - listeners - can be attached to a * running {@link org.infinispan.Cache} so users can be notified of {@link org.infinispan.Cache} events. * <p/> <p/> There can be multiple methods that are annotated to receive the same event, and a method may receive * multiple events by using a super type. * * <h4>Delivery Semantics</h4> * An event is delivered immediately after the respective * operation, sometimes before as well, but must complete before the underlying cache call returns. For this reason it * is important to keep listener processing * logic short-lived. If a long running task needs to be performed, it's recommended to invoke this in a non blocking * way or to use an async listener. * * <h4>Transactional Semantics</h4> * Since the event is delivered during the actual cache call, the transactional * outcome is not yet known. For this reason, <i>events are always delivered, even if the changes they represent are * discarded by their containing transaction</i>. For applications that must only process events that represent changes * in a completed transaction, {@link org.infinispan.notifications.cachelistener.event.TransactionalEvent#getGlobalTransaction()} * can be used, along with {@link org.infinispan.notifications.cachelistener.event.TransactionCompletedEvent#isTransactionSuccessful()} * to record events and later process them once the transaction has been successfully committed. Example 4 demonstrates * this. * * <h4>Listener Modes</h4> * A listener can be configured to run in two different modes: sync or async. * <p>The first, non-blocking, is a mode where the listener is notified in the invoking thread. Operations in this mode * should be used when either the listener operation is expected to complete extremely fast or when the operation can be * performed in a non-blocking manner by returning a CompletionStage to delay * the operation until the stage is complete. This mode is the default mode, overrided by the {@link Listener#sync()} * property. A method is non blocking if it declares that it returns a {@link java.util.concurrent.CompletionStage} or * one of its subtypes. Note that the stage may return a value, but it will be ignored. The user <b>must</b> be very * careful that no blocking or long running operation is done while in a sync listener as it can cause thread * starvation. You should instead use your own thread pool to execute the blocking or long running operation and * return a {@link java.util.concurrent.CompletionStage} signifying when it is complete. * <p>The second, async, is pretty much identical to sync except that the original operation can continue and complete * while the listener is notified in a different thread. Listeners that throw exceptions are always logged and are not * propagated to the user. This mode is enabled when the listener has specified <code>sync</code> as <b>false</b> and * the return value is always ignored. * * <h4>Locking semantics</h4> * The sync mode will guarantee that listeners are notified for mutations on the same key sequentially, since * the lock for the key will be held when notifying the listener. Async however can have events notified in any order * so they should not be used when this ordering is required. If however the notification thread pool size is limited * to one, this will provide ordering for async events, but the throughput of async events may be reduced. * <p>Because the key lock is held for the entire execution of sync listeners (until the completion stage is done), * sync listeners should be as short as possible. * Acquiring additional locks is not recommended, as it could lead to deadlocks. * * <h4>Threading Semantics</h4> * A listener implementation must be capable of handling concurrent * invocations. Local sync notifications reuse the calling thread; remote sync notifications reuse the * network thread. If a listener is async, it will be invoked in the notification thread pool. * <h4>Notification Pool</h4> * Async events are made in a <i>separate</i> notification thread, which will not cause any blocking on the * caller or network thread. The separate thread for async listeners is taken from a pool, which can be * configured using {@link GlobalConfiguration#listenerThreadPool()}. The * default values can be found in the {@link org.infinispan.factories.KnownComponentNames} class. * * <h4>Clustered Listeners</h4> * Listeners by default are classified as a local listener. That is that they only receive events that are generated * on the node to which they were registered. They also receive pre and post notification events. A clustered listener, * configured with <code>clustered=true</code>, receives a subset of events but from any node that * generated the given event, not just the one they were registered on. The events that a clustered listener can receive are: * {@link org.infinispan.notifications.cachelistener.event.CacheEntryCreatedEvent}, * {@link org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent}, * {@link org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent} and * {@link org.infinispan.notifications.cachelistener.event.CacheEntryExpiredEvent}. * For performance reasons, a clustered listener only receives post events. * * <h4>Summary of Notification Annotations</h4> * <table border="1" cellpadding="1" cellspacing="1" summary="Summary of notification annotations"> * <tr> * <th bgcolor="#CCCCFF" align="left">Annotation</th> * <th bgcolor="#CCCCFF" align="left">Event</th> * <th bgcolor="#CCCCFF" align="left">Description</th> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachemanagerlistener.annotation.CacheStarted}</td> * <td valign="top">{@link org.infinispan.notifications.cachemanagerlistener.event.CacheStartedEvent}</td> * <td valign="top">A cache was started</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachemanagerlistener.annotation.CacheStopped}</td> * <td valign="top">{@link org.infinispan.notifications.cachemanagerlistener.event.CacheStoppedEvent}</td> * <td valign="top">A cache was stopped</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryModified}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent}</td> * <td valign="top">A cache entry was modified</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryCreatedEvent}</td> * <td valign="top">A cache entry was created</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent}</td> * <td valign="top">A cache entry was removed</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryExpiredEvent}</td> * <td valign="top">A cache entry was expired</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryVisited}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryVisitedEvent}</td> * <td valign="top">A cache entry was visited</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryLoaded}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryLoadedEvent}</td> * <td valign="top">A cache entry was loaded</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntriesEvicted}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntriesEvictedEvent}</td> * <td valign="top">A cache entries were evicted</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryActivated}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryActivatedEvent}</td> * <td valign="top">A cache entry was activated</td>\ * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryPassivated}</td> * <td valign="top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryPassivatedEvent}</td> * <td valign="top">One or more cache entries were passivated</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged}</td> * <td valign="top">{@link org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent}</td> * <td valign="top">A view change event was detected</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.TransactionRegistered}</td> * <td valign@="top">{@link org.infinispan.notifications.cachelistener.event.TransactionRegisteredEvent}</td> * <td valign="top">The cache has started to participate in a transaction</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.TransactionCompleted}</td> * <td valign=@"top">{@link org.infinispan.notifications.cachelistener.event.TransactionCompletedEvent}</td> * <td valign="top">The cache has completed its participation in a transaction</td> * </tr> * <tr> * <td valign="top">{@link org.infinispan.notifications.cachelistener.annotation.CacheEntryInvalidated}</td> * <td valign=@"top">{@link org.infinispan.notifications.cachelistener.event.CacheEntryInvalidatedEvent}</td> * <td valign="top">A cache entry was invalidated by a remote cache. Only if cache mode is INVALIDATION_SYNC or INVALIDATION_ASYNC.</td> * </tr> * <p/> * </table> * <p/> * * <h4>Example 1 - Method receiving a single event, sync</h4> * <pre> * &#064;Listener * public class SingleEventListener * { * &#064;CacheStarted * public CompletionStage&lt;Void&gt; doSomething(Event event) * { * System.out.println(&quot;Cache started. Details = &quot; + event); * return null; * } * } * </pre> * <p/> * * <h4>Example 2 - Method receiving multiple events - sync</h4> * <pre> * &#064;Listener * public class MultipleEventListener * { * &#064;CacheStarted * &#064;CacheStopped * public void doSomething(Event event) * { * if (event.getType() == Event.Type.CACHE_STARTED) * System.out.println(&quot;Cache started. Details = &quot; + event); * else if (event.getType() == Event.Type.CACHE_STOPPED) * System.out.println(&quot;Cache stopped. Details = &quot; + event); * } * } * </pre> * <p/> * * <h4>Example 3 - Multiple methods receiving the same event - async</h4> * <pre> * &#064;Listener(sync=false) * public class SingleEventListener * { * &#064;CacheStarted * public void handleStart(Event event) * { * System.out.println(&quot;Cache started&quot;); * } * <p/> * &#064;CacheStarted * &#064;CacheStopped * &#064;CacheBlocked * &#064;CacheUnblocked * &#064;ViewChanged * public void logEvent(Event event) * { * logSystem.logEvent(event.getType()); * } * } * </pre> * <p/> * <p/> * * <h4>Example 4 - Processing only events with a committed transaction - sync/non-blocking</h4> * <p/> * <pre> * &#064;Listener * public class EventHandler * { * private ConcurrentMap&lt;GlobalTransaction, Queue&lt;Event&gt;&gt; map = new ConcurrentHashMap&lt;GlobalTransaction, Queue&lt;Event&gt;&gt;(); * * &#064;TransactionRegistered * public CompletionStage&lt;Void&gt; startTransaction(TransactionRegisteredEvent event) * { * map.put(event.getGlobalTransaction(), new ConcurrentLinkedQueue&lt;Event&gt;()); * return null; * } * * &#064;CacheEntryCreated * &#064;CacheEntryModified * &#064;CacheEntryRemoved * public CompletionStage&lt;Void&gt; addEvent(TransactionalEvent event) * { * map.get(event.getGlobalTransaction()).add(event);' * return null; * } * * &#064;TransactionCompleted * public CompletionStage&lt;Void&gt; endTransaction(TransactionCompletedEvent event) * { * Queue&lt;Event&gt; events = map.get(event.getGlobalTransaction()); * map.remove(event.getGlobalTransaction()); * * System.out.println("Ended transaction " + event.getGlobalTransaction().getId()); * * if(event.isTransactionSuccessful()) * { * // Lets say we want to remotely log the events for the transaction - if this has an async or non blocking * // API you can use that and not block the thread and wait until it returns to complete the Stage. * CompletionStage&lt;Void&gt; stage = performRemoteEventUpdateNonBlocking(events); * return stage; * } else { * return null; * } * } * } * </pre> * * @author <a href="mailto:manik@jboss.org">Manik Surtani</a> * @author Jason T. Greene * @author William Burns * @see org.infinispan.notifications.cachemanagerlistener.annotation.CacheStarted * @see org.infinispan.notifications.cachemanagerlistener.annotation.CacheStopped * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryModified * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryVisited * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryLoaded * @see org.infinispan.notifications.cachelistener.annotation.CacheEntriesEvicted * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryActivated * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryPassivated * @see org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged * @see org.infinispan.notifications.cachelistener.annotation.TransactionCompleted * @see org.infinispan.notifications.cachelistener.annotation.TransactionRegistered * @see org.infinispan.notifications.cachelistener.annotation.CacheEntryInvalidated * @see org.infinispan.notifications.cachelistener.annotation.DataRehashed * @see org.infinispan.notifications.cachelistener.annotation.TopologyChanged * @see org.infinispan.notifications.cachelistener.annotation.PartitionStatusChanged * @see org.infinispan.notifications.cachelistener.annotation.PersistenceAvailabilityChanged * @since 4.0 */ @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) public @interface Listener { /** * Specifies whether callbacks on any class annotated with this annotation happens synchronously or asynchronously. * Please see the appropriate section on the {@link Listener} class for more details. * Defaults to <tt>true</tt>. * * @return true if the expectation is that the operation waits until the callbacks complete before continuing; * false if the operation can continue immediately. * @since 4.0 */ boolean sync() default true; /** * Specifies whether the event should be fired on the primary data owner of the affected key, or all nodes that see * the update. * <p> * Note that is value is ignored when {@link org.infinispan.notifications.Listener#clustered()} is true. * @return true if the expectation is that only the primary data owner will fire the event, false if all nodes that * see the update fire the event. * * @since 5.3 */ boolean primaryOnly() default false; /** * Defines whether the annotated listener is clustered or not. * Important: Clustered listener can only be notified for * {@link org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved}, * {@link org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated}, * {@link org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved} * and {@link org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired} events. * @return true if the expectation is that this listener is to be a cluster listener, as in it will receive * all notifications for data modifications * @since 7.0 */ boolean clustered() default false; /** * If set to true then the entire existing state within the cluster is * evaluated. For existing matches of the value, an @CacheEntryCreated event is triggered against the listener * during registration. This is only supported if the listener is also * {@link org.infinispan.notifications.Listener#clustered()}. * <p> * If using a distributed clustered cache it is possible to retrieve new events before the initial transfer is * completed. This is handled since only new events are queued until the segment it belongs to is completed * for iteration. This also will help reduce memory strain since a distributed clustered listener will need * to eventually retrieve all values from the cache. * @return true if the expectation is that when the listener is installed that all of the current data is sent * as new events to the listener before receiving new events * @since 7.0 **/ boolean includeCurrentState() default false; /** * Returns the type of observation level this listener defines. * @return the observation level of this listener * @see Observation * @since 7.2 */ Observation observation() default Observation.BOTH; /** * Enumeration that defines when a listener event can be observed. A listener can receive an event before and/or * after an operation has occurred. If the user wishes to be notified just before the operation completes * the listener should observe using {@link Observation#PRE} level. If the user wishes to be notified only * after the operation completes the listener should observe using {@link Observation#POST} level. If the user * wishes to have an event before and after they should observe using {@link Observation#BOTH} level. */ enum Observation { /** * Observation level used to only be notified of an operation before it completes */ PRE() { @Override public boolean shouldInvoke(boolean pre) { return pre; } }, /** * Observation level used to only be notified of an operation after it has completed */ POST() { @Override public boolean shouldInvoke(boolean pre) { return !pre; } }, /** * Observation level used to be notified of an operation before and after it occurs */ BOTH() { @Override public boolean shouldInvoke(boolean pre) { return true; } }; public abstract boolean shouldInvoke(boolean pre); } }
20,629
50.575
151
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/ClassLoaderAwareListenable.java
package org.infinispan.notifications; import java.util.concurrent.CompletionStage; import org.infinispan.util.concurrent.CompletionStages; /** * Interface that enhances {@link Listenable} with the possibility of specifying the * {@link ClassLoader} which should be set as the context class loader for the invoked * listener method * * @author Tristan Tarrant * @since 5.2 */ public interface ClassLoaderAwareListenable extends Listenable { /** * Adds a listener along with a class loader to use for the invocation * @param listener listener to add, must not be null * @param classLoader classloader, must not be null */ default void addListener(Object listener, ClassLoader classLoader) { CompletionStages.join(addListenerAsync(listener, classLoader)); } /** * Asynchronous version of {@link #addListener(Object, ClassLoader)} * @param listener listener to add, must not be null * @param classLoader classloader, must not be null * @return CompletionStage that when complete the listener is fully installed */ CompletionStage<Void> addListenerAsync(Object listener, ClassLoader classLoader); }
1,162
34.242424
86
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/Listenable.java
package org.infinispan.notifications; import java.util.Set; import java.util.concurrent.CompletionStage; import org.infinispan.commons.IllegalLifecycleStateException; import org.infinispan.util.concurrent.CompletionStages; /** * Interface that denotes that the implementation can have listeners attached to it. * * @author Manik Surtani * @since 4.0 */ public interface Listenable { /** * Adds a listener to the component. Typically, listeners would need to be annotated with {@link Listener} and * further to that, contain methods annotated appropriately, otherwise the listener will not be registered. * <p/> * See the {@link Listener} annotation for more information. * <p/> * * @param listener listener to add, must not be null */ default void addListener(Object listener) { CompletionStages.join(addListenerAsync(listener)); } /** * Asynchronous version of {@link #addListener(Object)} * @param listener listener to add, must not be null * @return CompletionStage that when complete the listener is fully installed */ CompletionStage<Void> addListenerAsync(Object listener); /** * Removes a listener from the component. * * @param listener listener to remove. Must not be null. * @throws IllegalLifecycleStateException may be thrown if the {@code Listenable} is stopped. */ default void removeListener(Object listener) { CompletionStages.join(removeListenerAsync(listener)); } /** * Asynchronous version of {@link #removeListener(Object)} * @param listener listener to remove, must not be null * @return CompletionStage that when complete the listener is fully removed */ CompletionStage<Void> removeListenerAsync(Object listener); /** * @return a set of all listeners registered on this component. * * @deprecated Since 10.0, with no replacement */ @Deprecated Set<Object> getListeners(); }
1,963
30.677419
114
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/CacheEntryListenerInvocation.java
package org.infinispan.notifications.cachelistener; import java.lang.annotation.Annotation; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletionStage; import org.infinispan.encoding.DataConversion; import org.infinispan.notifications.Listener; import org.infinispan.notifications.cachelistener.event.CacheEntryEvent; import org.infinispan.notifications.cachelistener.event.Event; import org.infinispan.notifications.cachelistener.filter.CacheEventConverter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilter; import org.infinispan.notifications.impl.ListenerInvocation; /** * Additional listener methods specific to caches. * * @author wburns * @since 7.0 */ public interface CacheEntryListenerInvocation<K, V> extends ListenerInvocation<Event<K, V>> { /** * Invokes the event * @param event * @param isLocalNodePrimaryOwner * @return null if event was ignored or already complete otherwise all listeners for the event will be notified when * the provided stage is completed */ CompletionStage<Void> invoke(EventWrapper<K, V, CacheEntryEvent<K, V>> event, boolean isLocalNodePrimaryOwner); /** * Invokes the event without applying filters or converters * @param wrappedEvent * @param skipQueue * @param skipConverter * @param needsTransform * @return null if event was ignored or already complete otherwise all listeners for the event will be notified when * the provided stage is completed */ CompletionStage<Void> invokeNoChecks(EventWrapper<K, V, CacheEntryEvent<K, V>> wrappedEvent, boolean skipQueue, boolean skipConverter, boolean needsTransform); boolean isClustered(); boolean isSync(); UUID getIdentifier(); Listener.Observation getObservation(); Class<? extends Annotation> getAnnotation(); CacheEventFilter<? super K, ? super V> getFilter(); <C> CacheEventConverter<? super K, ? super V, C> getConverter(); Set<Class<? extends Annotation>> getFilterAnnotations(); DataConversion getKeyDataConversion(); DataConversion getValueDataConversion(); /** * @return true if the filter/converter should be done in the storage format */ boolean useStorageFormat(); }
2,256
31.710145
162
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/package-info.java
/** * {@link org.infinispan.Cache}-specific notifications and eventing. */ package org.infinispan.notifications.cachelistener;
129
25
68
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/BaseQueueingSegmentListener.java
package org.infinispan.notifications.cachelistener; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReferenceArray; import org.infinispan.commands.SegmentSpecificCommand; import org.infinispan.container.entries.CacheEntry; import org.infinispan.distribution.ch.KeyPartitioner; import org.infinispan.notifications.cachelistener.event.Event; import org.infinispan.reactive.RxJavaInterop; import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier; import org.infinispan.util.logging.Log; import org.reactivestreams.Publisher; import io.reactivex.rxjava3.core.Flowable; /** * This is the base class for use when listening to segment completions when doing initial event * retrieval. This will handle keeping track of concurrent key updates as well as iteration by calling * appropriate methods at the given time. * <p> * This base class provides a working set for tracking of entries as they are iterated on, assuming * the {@link QueueingSegmentListener#apply(SegmentPublisherSupplier.Notification)} * method is invoked for each event serially (includes both segment and entries). * * @author wburns * @since 7.0 */ abstract class BaseQueueingSegmentListener<K, V, E extends Event<K, V>> implements QueueingSegmentListener<K, V, E> { protected final AtomicBoolean completed = new AtomicBoolean(false); protected final AtomicReferenceArray<ConcurrentMap<K, Object>> notifiedKeys; protected final KeyPartitioner keyPartitioner; protected BaseQueueingSegmentListener(int numSegments, KeyPartitioner keyPartitioner) { this.notifiedKeys = new AtomicReferenceArray<>(numSegments); this.keyPartitioner = keyPartitioner; for (int i = 0; i < numSegments; ++i) { notifiedKeys.set(i, new ConcurrentHashMap<>()); } } int segmentFromEventWrapper(EventWrapper<K, V, E> eventWrapper) { return SegmentSpecificCommand.extractSegment(eventWrapper.getCommand(), eventWrapper.getKey(), keyPartitioner); } @Override public Publisher<CacheEntry<K, V>> apply(SegmentPublisherSupplier.Notification<CacheEntry<K, V>> cacheEntryNotification) throws Throwable { if (cacheEntryNotification.isSegmentComplete()) { return segmentComplete(cacheEntryNotification.completedSegment()); } int segment = cacheEntryNotification.valueSegment(); CacheEntry<K, V> cacheEntry = cacheEntryNotification.value(); K key = cacheEntry.getKey(); // By putting the NOTIFIED value it has signaled that any more updates for this key have to be enqueued instead // of taking the last one Object value = notifiedKeys.get(segment).put(key, NOTIFIED); if (value == null) return Flowable.just(cacheEntry); if (getLog().isTraceEnabled()) { getLog().tracef("Processing key %s as a concurrent update occurred with value %s", key, value); } return value != QueueingSegmentListener.REMOVED ? Flowable.just(((CacheEntry<K, V>) value)) : Flowable.empty(); } Flowable<CacheEntry<K, V>> segmentComplete(int segment) { ConcurrentMap<K, Object> map = notifiedKeys.get(segment); // Ensure `addEvent` below knows if the value was added or not synchronized (map) { notifiedKeys.set(segment, null); } return Flowable.fromIterable(map.entrySet()) // We only process entries we can remove ourselves to guarantee consistency with atomic updates // Normally we would use iterator, but remove for iterator doesn't notify us if it actually removed the entry .filter(e -> map.remove(e.getKey()) != null) .map(RxJavaInterop.entryToValueFunction()) .filter(v -> v != NOTIFIED && v != REMOVED) .map(v -> (CacheEntry<K, V>) v); } protected boolean addEvent(K key, int segment, Object value) { ConcurrentMap<K, Object> map = notifiedKeys.get(segment); if (map == null) { return false; } synchronized (map) { // Need to double check inside synchronized just in case concurrent segmentComplete occurred if (notifiedKeys.get(segment) == map) { return map.compute(key, (k, v) -> v == NOTIFIED ? v : value) == value; } } return false; } protected abstract Log getLog(); }
4,432
42.038835
142
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/QueueingSegmentListener.java
package org.infinispan.notifications.cachelistener; import java.util.concurrent.CompletionStage; import org.infinispan.container.entries.CacheEntry; import org.infinispan.notifications.cachelistener.event.Event; import org.infinispan.notifications.impl.ListenerInvocation; import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier; import org.reactivestreams.Publisher; import io.reactivex.rxjava3.functions.Function; /** * This interface describes methods needed for a segment listener that is used when iterating over the current * events and be able to queue them properly * * @author wburns * @since 7.0 */ public interface QueueingSegmentListener<K, V, E extends Event<K, V>> extends Function<SegmentPublisherSupplier.Notification<CacheEntry<K, V>>, Publisher<CacheEntry<K, V>>> { // This is to be used as a placeholder when a value has been iterated and now is being processed by the caller // This is considered to be the completed state for the key and should never change from this static final Object NOTIFIED = new Object(); // This is to be used as a placeholder for a removed value. This is needed so that we know // a value is removed. The caller will get this back when processing a key and should then ignore // or do it's own special processing for removed values static final Object REMOVED = new Object(); /** * This should be invoked on a notification before actually processing the data. * Note this method modifies the underlying listener state. * It will return a Publisher with the entries that need to be notified or an empty Publisher if none * * @param cacheEntryNotification The notification being processed * @return a Publisher that returns all the CacheEntries that need to be notified */ @Override Publisher<CacheEntry<K, V>> apply(SegmentPublisherSupplier.Notification<CacheEntry<K, V>> cacheEntryNotification) throws Throwable; /** * This should be called by any listener when an event is generated to possibly queue it. If it is not * queued, then the caller should take appropriate action such as manually firing the invocation. * @param wrapper The event that was just raised * @param invocation The invocation the event would be fired on * @return Whether or not it was queued. If it wasn't queued the invocation should be fired manually */ public boolean handleEvent(EventWrapper<K, V, E> wrapper, ListenerInvocation<Event<K, V>> invocation); /** * This is needed to tell the handler when the complete iteration is done. Depending on the implementation * this could also fire all queued events that are remaining. */ public CompletionStage<Void> transferComplete(); }
2,744
48.909091
174
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/EventWrapper.java
package org.infinispan.notifications.cachelistener; import org.infinispan.commands.FlagAffectedCommand; import org.infinispan.notifications.cachelistener.event.Event; /** * Simple wrapper that keeps the original key along with the converted event and command. The original key is required * for things such as key tracking. The command may be null as an event may have originated without a command. * * @author wburns * @since 9.0 */ public class EventWrapper<K, V, E extends Event<K, V>> { private E event; private final K key; private final FlagAffectedCommand command; public EventWrapper(K key, E event, FlagAffectedCommand command1) { this.event = event; this.key = key; this.command = command1; } public E getEvent() { return event; } public void setEvent(E event) { this.event = event; } public K getKey() { return key; } public FlagAffectedCommand getCommand() { return command; } }
986
23.675
119
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/QueueingAllSegmentListener.java
package org.infinispan.notifications.cachelistener; import java.lang.invoke.MethodHandles; import java.util.Iterator; import java.util.Queue; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentLinkedQueue; import org.infinispan.container.entries.CacheEntry; import org.infinispan.container.impl.InternalEntryFactory; import org.infinispan.distribution.ch.KeyPartitioner; import org.infinispan.notifications.cachelistener.event.CacheEntryEvent; import org.infinispan.notifications.cachelistener.event.Event; import org.infinispan.notifications.impl.ListenerInvocation; import org.infinispan.util.KeyValuePair; import org.infinispan.util.concurrent.AggregateCompletionStage; import org.infinispan.util.concurrent.CompletionStages; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; /** * This handler is to be used when all the events must be queued until the iteration process is complete. * This is required for any local listener or non distributed caches. The local is required since we * could have other events that are interrelated such as tx start/stop that all must be queued together in * the order they were provided. * * @author wburns * @since 7.0 */ class QueueingAllSegmentListener<K, V> extends BaseQueueingSegmentListener<K, V, Event<K, V>> { private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass()); protected final Queue<KeyValuePair<Event<K, V>, ListenerInvocation<Event<K, V>>>> queue = new ConcurrentLinkedQueue<>(); protected final InternalEntryFactory entryFactory; QueueingAllSegmentListener(InternalEntryFactory entryFactory, int segment, KeyPartitioner keyPartitioner) { super(segment, keyPartitioner); this.entryFactory = entryFactory; } @Override public boolean handleEvent(EventWrapper<K, V, Event<K, V>> wrapper, ListenerInvocation<Event<K, V>> invocation) { boolean queued = !completed.get(); if (queued) { boolean continueQueueing = true; Event<K, V> event = wrapper.getEvent(); if (event instanceof CacheEntryEvent) { CacheEntryEvent<K, V> cacheEvent = (CacheEntryEvent<K, V>) event; CacheEntry<K, V> cacheEntry = entryFactory.create(cacheEvent.getKey(), cacheEvent.getValue(), cacheEvent.getMetadata()); if (addEvent(wrapper.getKey(), segmentFromEventWrapper(wrapper), cacheEntry.getValue() != null ? cacheEntry : REMOVED)) { continueQueueing = false; } } if (continueQueueing) { KeyValuePair<Event<K, V>, ListenerInvocation<Event<K, V>>> eventPair = new KeyValuePair<Event<K, V>, ListenerInvocation<Event<K, V>>>(event, invocation); queue.add(eventPair); // If it completed since we last added and ours is in the queue, we have to run the event - so say it wasn't // queued, so caller has to run it if (completed.get() && queue.remove(eventPair)) { return false; } } } return queued; } @Override public CompletionStage<Void> transferComplete() { AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage(); Iterator<KeyValuePair<Event<K, V>, ListenerInvocation<Event<K, V>>>> iterator = queue.iterator(); while (iterator.hasNext()) { KeyValuePair<Event<K, V>, ListenerInvocation<Event<K, V>>> eventPair = iterator.next(); CompletionStage<Void> eventStage = eventPair.getValue().invoke(eventPair.getKey()); if (eventStage != null) { aggregateCompletionStage.dependsOn(eventStage); } iterator.remove(); } completed.set(true); return aggregateCompletionStage.freeze(); } @Override protected Log getLog() { return log; } }
3,980
42.747253
133
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/NotifyHelper.java
package org.infinispan.notifications.cachelistener; import java.util.Collections; import java.util.concurrent.CompletionStage; import org.infinispan.commands.FlagAffectedCommand; import org.infinispan.commands.write.EvictCommand; import org.infinispan.commands.write.InvalidateCommand; import org.infinispan.commands.write.RemoveCommand; import org.infinispan.commands.write.RemoveExpiredCommand; import org.infinispan.commands.write.WriteCommand; import org.infinispan.container.entries.CacheEntry; import org.infinispan.context.InvocationContext; import org.infinispan.context.impl.FlagBitSets; import org.infinispan.eviction.EvictionManager; import org.infinispan.functional.impl.EntryViews; import org.infinispan.functional.impl.FunctionalNotifier; import org.infinispan.metadata.Metadata; import org.infinispan.commons.util.concurrent.CompletableFutures; public class NotifyHelper { public static CompletionStage<Void> entryCommitted(CacheNotifier notifier, FunctionalNotifier functionalNotifier, boolean created, boolean removed, boolean expired, CacheEntry entry, InvocationContext ctx, FlagAffectedCommand command, Object previousValue, Metadata previousMetadata, EvictionManager evictionManager) { // We only notify if there is no state transfer flag if (FlagBitSets.extractStateTransferFlag(ctx, command) != null) { return CompletableFutures.completedNull(); } CompletionStage<Void> stage; boolean isWriteOnly = (command instanceof WriteCommand) && ((WriteCommand) command).isWriteOnly(); if (removed) { if (command instanceof RemoveExpiredCommand) { // It is possible this command was generated from a store and the value is not in memory, thus we have // to fall back to the command value and metadata if not present Object expiredValue = previousValue != null ? previousValue : ((RemoveExpiredCommand) command).getValue(); Metadata expiredMetadata = entry.getMetadata() != null ? entry.getMetadata() : ((RemoveExpiredCommand) command).getMetadata(); stage = notifier.notifyCacheEntryExpired(entry.getKey(), expiredValue, expiredMetadata, ctx); } else if (command instanceof EvictCommand) { stage = evictionManager.onEntryEviction(Collections.singletonMap(entry.getKey(), entry), command); } else if (command instanceof RemoveCommand) { stage = notifier.notifyCacheEntryRemoved(entry.getKey(), previousValue, entry.getMetadata(), false, ctx, command); } else if (command instanceof InvalidateCommand) { stage = notifier.notifyCacheEntryInvalidated(entry.getKey(), previousValue, entry.getMetadata(), false, ctx, command); } else { if (expired) { stage = notifier.notifyCacheEntryExpired(entry.getKey(), previousValue, previousMetadata, ctx); } else { stage = notifier.notifyCacheEntryRemoved(entry.getKey(), previousValue, previousMetadata, false, ctx, command); } // A write-only command only writes and so can't 100% guarantee // to be able to retrieve previous value when removed, so only // send remove event when the command is read-write. if (!isWriteOnly) functionalNotifier.notifyOnRemove(EntryViews.readOnly(entry.getKey(), previousValue, previousMetadata)); functionalNotifier.notifyOnWriteRemove(entry.getKey()); } } else { // Notify entry event after container has been updated if (created) { stage = notifier.notifyCacheEntryCreated( entry.getKey(), entry.getValue(), entry.getMetadata(), false, ctx, command); // A write-only command only writes and so can't 100% guarantee // that an entry has been created, so only send create event // when the command is read-write. if (!isWriteOnly) functionalNotifier.notifyOnCreate(entry); functionalNotifier.notifyOnWrite(entry); } else { stage = notifier.notifyCacheEntryModified(entry.getKey(), entry.getValue(), entry.getMetadata(), previousValue, previousMetadata, false, ctx, command); // A write-only command only writes and so can't 100% guarantee // that an entry has been created, so only send modify when the // command is read-write. if (!isWriteOnly) functionalNotifier.notifyOnModify(entry, previousValue, previousMetadata); functionalNotifier.notifyOnWrite(entry); } } return stage; } }
4,845
53.449438
138
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/ListenerHolder.java
package org.infinispan.notifications.cachelistener; import org.infinispan.encoding.DataConversion; /** * @since 9.1 */ public class ListenerHolder { private final Object listener; private final DataConversion keyDataConversion; private final DataConversion valueDataConversion; private final boolean filterOnStorageFormat; public ListenerHolder(Object listener, DataConversion keyDataConversion, DataConversion valueDataConversion, boolean filterOnStorageFormat) { this.listener = listener; this.keyDataConversion = keyDataConversion; this.valueDataConversion = valueDataConversion; this.filterOnStorageFormat = filterOnStorageFormat; } public boolean isFilterOnStorageFormat() { return filterOnStorageFormat; } public Object getListener() { return listener; } public DataConversion getKeyDataConversion() { return keyDataConversion; } public DataConversion getValueDataConversion() { return valueDataConversion; } }
1,017
25.789474
144
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/CacheNotifier.java
package org.infinispan.notifications.cachelistener; import java.lang.annotation.Annotation; import java.util.Collection; import java.util.Map; import java.util.concurrent.CompletionStage; import org.infinispan.commands.FlagAffectedCommand; import org.infinispan.context.InvocationContext; import org.infinispan.distribution.ch.ConsistentHash; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.metadata.Metadata; import org.infinispan.notifications.ClassLoaderAwareListenable; import org.infinispan.notifications.DataConversionAwareListenable; import org.infinispan.partitionhandling.AvailabilityMode; import org.infinispan.topology.CacheTopology; import org.infinispan.transaction.xa.GlobalTransaction; /** * Public interface with all allowed notifications. * * @author Mircea.Markus@jboss.com * @since 4.0 */ @Scope(Scopes.NAMED_CACHE) public interface CacheNotifier<K, V> extends DataConversionAwareListenable<K, V>, ClassLoaderAwareListenable { /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryCreatedEvent} * event. */ CompletionStage<Void> notifyCacheEntryCreated(K key, V value, Metadata metadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent} * event. */ CompletionStage<Void> notifyCacheEntryModified(K key, V value, Metadata metadata, V previousValue, Metadata previousMetadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent} * event. */ CompletionStage<Void> notifyCacheEntryRemoved(K key, V previousValue, Metadata previousMetadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryVisitedEvent} * event. */ CompletionStage<Void> notifyCacheEntryVisited(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntriesEvictedEvent} * event. */ // DataContainer eviction is sync CompletionStage<Void> notifyCacheEntriesEvicted(Collection<Map.Entry<K, V>> entries, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a CacheEntryExpired event. */ CompletionStage<Void> notifyCacheEntryExpired(K key, V value, Metadata metadata, InvocationContext ctx); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryInvalidatedEvent} * event. */ CompletionStage<Void> notifyCacheEntryInvalidated(K key, V value, Metadata metadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryLoadedEvent} * event. */ CompletionStage<Void> notifyCacheEntryLoaded(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryActivatedEvent} * event. */ CompletionStage<Void> notifyCacheEntryActivated(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a {@link org.infinispan.notifications.cachelistener.event.CacheEntryPassivatedEvent} * event. */ // Callers of passivation are sync CompletionStage<Void> notifyCacheEntryPassivated(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command); /** * Notifies all registered listeners of a transaction completion event. * * @param transaction the transaction that has just completed * @param successful if true, the transaction committed. If false, this is a rollback event */ CompletionStage<Void> notifyTransactionCompleted(GlobalTransaction transaction, boolean successful, InvocationContext ctx); /** * Notifies all registered listeners of a transaction registration event. * * @param globalTransaction */ // Sync local transaction registered CompletionStage<Void> notifyTransactionRegistered(GlobalTransaction globalTransaction, boolean isOriginLocal); // Callers sync - until additional parts of topology updates - not in user thread CompletionStage<Void> notifyDataRehashed(ConsistentHash oldCH, ConsistentHash newCH, ConsistentHash unionCH, int newTopologyId, boolean pre); // Callers sync - until additional parts of topology updates - not in user thread CompletionStage<Void> notifyTopologyChanged(CacheTopology oldTopology, CacheTopology newTopology, int newTopologyId, boolean pre); // Callers sync - until additional parts of topology updates - not in user thread CompletionStage<Void> notifyPartitionStatusChanged(AvailabilityMode mode, boolean pre); // Callers sync - done in periodic persistence thread - not in user thread CompletionStage<Void> notifyPersistenceAvailabilityChanged(boolean available); /** * Returns whether there is at least one listener regitstered for the given annotation * @param annotationClass annotation to test for * @return true if there is a listener mapped to the annotation, otherwise false */ boolean hasListener(Class<? extends Annotation> annotationClass); }
6,047
44.473684
149
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/DistributedQueueingSegmentListener.java
package org.infinispan.notifications.cachelistener; import java.lang.invoke.MethodHandles; import java.util.Queue; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicReferenceArray; import org.infinispan.container.entries.CacheEntry; import org.infinispan.container.impl.InternalEntryFactory; import org.infinispan.distribution.ch.KeyPartitioner; import org.infinispan.notifications.cachelistener.event.CacheEntryEvent; import org.infinispan.notifications.cachelistener.event.Event; import org.infinispan.notifications.impl.ListenerInvocation; import org.infinispan.util.KeyValuePair; import org.infinispan.util.concurrent.AggregateCompletionStage; import org.infinispan.commons.util.concurrent.CompletableFutures; import org.infinispan.util.concurrent.CompletionStages; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import io.reactivex.rxjava3.core.Completable; import io.reactivex.rxjava3.core.Flowable; /** * This handler is to be used with a clustered distributed cache. This handler does special optimizations to * alllow for queueing to occur per segment. This way we don't retain all new events in memory unlike * {@link QueueingAllSegmentListener} until the iteration is complete. * * @author wburns * @since 7.0 */ class DistributedQueueingSegmentListener<K, V> extends BaseQueueingSegmentListener<K, V, CacheEntryEvent<K, V>> { private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass()); private final AtomicReferenceArray<Queue<KeyValuePair<CacheEntryEvent<K, V>, ListenerInvocation<Event<K, V>>>>> queues; protected final InternalEntryFactory entryFactory; public DistributedQueueingSegmentListener(InternalEntryFactory entryFactory, int numSegments, KeyPartitioner keyPartitioner) { super(numSegments, keyPartitioner); this.entryFactory = entryFactory; // we assume the # of segments won't change between different consistent hashes this.queues = new AtomicReferenceArray<>(numSegments); for (int i = 0; i < queues.length(); ++i) { queues.set(i, new ConcurrentLinkedQueue<>()); } } @Override public boolean handleEvent(EventWrapper<K, V, CacheEntryEvent<K, V>> wrapped, ListenerInvocation<Event<K, V>> invocation) { // If we already completed, don't even attempt to enqueue if (completed.get()) { return false; } K key = wrapped.getKey(); CacheEntryEvent<K, V> event = wrapped.getEvent(); int segment = segmentFromEventWrapper(wrapped); CacheEntry<K, V> cacheEntry = entryFactory.create(event.getKey(), event.getValue(), event.getMetadata()); boolean enqueued = true; if (!addEvent(key, segment, cacheEntry.getValue() != null ? cacheEntry : REMOVED)) { // If it wasn't added it means we haven't processed this value yet, so add it to the queue for this segment Queue<KeyValuePair<CacheEntryEvent<K, V>, ListenerInvocation<Event<K, V>>>> queue; // If the queue is not null, try to see if we can add to it if ((queue = queues.get(segment)) != null) { KeyValuePair<CacheEntryEvent<K, V>, ListenerInvocation<Event<K, V>>> eventPair = new KeyValuePair<>(event, invocation); queue.add(eventPair); // If the queue was removed, that means we had a concurrent completion, so we need to verify if we // have to run the event manually if (queues.get(segment) == null) { if (queue.remove(eventPair)) { enqueued = false; } } } else { // if the queue is already null that means it was transferred so just raise the notification enqueued = false; } } return enqueued; } @Override public CompletionStage<Void> transferComplete() { completed.set(true); for (int i = 0; i < notifiedKeys.length(); ++i) { assert notifiedKeys.get(i) == null; assert queues.get(i) == null; } return CompletableFutures.completedNull(); } @Override Flowable<CacheEntry<K, V>> segmentComplete(int segment) { return super.segmentComplete(segment) .concatWith(Completable.defer(() -> Completable.fromCompletionStage(completeSegment(segment)))); } private CompletionStage<Void> completeSegment(int segment) { Queue<KeyValuePair<CacheEntryEvent<K, V>, ListenerInvocation<Event<K, V>>>> queue = queues.getAndSet(segment, null); AggregateCompletionStage<Void> aggregateCompletionStage = null; if (queue != null) { if (!queue.isEmpty()) { aggregateCompletionStage = CompletionStages.aggregateCompletionStage(); for (KeyValuePair<CacheEntryEvent<K, V>, ListenerInvocation<Event<K, V>>> event : queue) { // The InitialTransferInvocation already did the converter if needed aggregateCompletionStage.dependsOn(event.getValue().invoke(event.getKey())); } } } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override protected Log getLog() { return log; } }
5,334
43.831933
129
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/CacheNotifierImpl.java
package org.infinispan.notifications.cachelistener; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_ACTIVATED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_CREATED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_EVICTED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_EXPIRED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_INVALIDATED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_LOADED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_MODIFIED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_PASSIVATED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_REMOVED; import static org.infinispan.notifications.cachelistener.event.Event.Type.CACHE_ENTRY_VISITED; import static org.infinispan.notifications.cachelistener.event.Event.Type.DATA_REHASHED; import static org.infinispan.notifications.cachelistener.event.Event.Type.PARTITION_STATUS_CHANGED; import static org.infinispan.notifications.cachelistener.event.Event.Type.PERSISTENCE_AVAILABILITY_CHANGED; import static org.infinispan.notifications.cachelistener.event.Event.Type.TOPOLOGY_CHANGED; import static org.infinispan.notifications.cachelistener.event.Event.Type.TRANSACTION_COMPLETED; import static org.infinispan.notifications.cachelistener.event.Event.Type.TRANSACTION_REGISTERED; import static org.infinispan.util.logging.Log.CONTAINER; import java.lang.annotation.Annotation; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletionStage; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Function; import org.infinispan.AdvancedCache; import org.infinispan.cache.impl.EncoderEntryMapper; import org.infinispan.commands.CommandInvocationId; import org.infinispan.commands.FlagAffectedCommand; import org.infinispan.commands.SegmentSpecificCommand; import org.infinispan.commands.write.WriteCommand; import org.infinispan.commons.CacheException; import org.infinispan.commons.CacheListenerException; import org.infinispan.commons.dataconversion.MediaType; import org.infinispan.commons.dataconversion.Transcoder; import org.infinispan.commons.dataconversion.Wrapper; import org.infinispan.commons.util.EnumUtil; import org.infinispan.commons.util.ServiceFinder; import org.infinispan.commons.util.Util; import org.infinispan.commons.util.concurrent.CompletableFutures; import org.infinispan.configuration.cache.CacheMode; import org.infinispan.configuration.cache.Configuration; import org.infinispan.configuration.global.GlobalConfiguration; import org.infinispan.container.entries.CacheEntry; import org.infinispan.container.impl.InternalEntryFactory; import org.infinispan.context.InvocationContext; import org.infinispan.context.impl.FlagBitSets; import org.infinispan.context.impl.TxInvocationContext; import org.infinispan.distribution.ch.ConsistentHash; import org.infinispan.distribution.ch.KeyPartitioner; import org.infinispan.encoding.DataConversion; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.annotations.Start; import org.infinispan.factories.impl.BasicComponentRegistry; import org.infinispan.factories.impl.ComponentRef; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.filter.CacheFilters; import org.infinispan.interceptors.AsyncInterceptorChain; import org.infinispan.interceptors.locking.ClusteringDependentLogic; import org.infinispan.manager.ClusterExecutor; import org.infinispan.marshall.core.EncoderRegistry; import org.infinispan.metadata.Metadata; import org.infinispan.notifications.Listener; import org.infinispan.notifications.cachelistener.annotation.CacheEntriesEvicted; import org.infinispan.notifications.cachelistener.annotation.CacheEntryActivated; import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated; import org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired; import org.infinispan.notifications.cachelistener.annotation.CacheEntryInvalidated; import org.infinispan.notifications.cachelistener.annotation.CacheEntryLoaded; import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified; import org.infinispan.notifications.cachelistener.annotation.CacheEntryPassivated; import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved; import org.infinispan.notifications.cachelistener.annotation.CacheEntryVisited; import org.infinispan.notifications.cachelistener.annotation.DataRehashed; import org.infinispan.notifications.cachelistener.annotation.PartitionStatusChanged; import org.infinispan.notifications.cachelistener.annotation.PersistenceAvailabilityChanged; import org.infinispan.notifications.cachelistener.annotation.TopologyChanged; import org.infinispan.notifications.cachelistener.annotation.TransactionCompleted; import org.infinispan.notifications.cachelistener.annotation.TransactionRegistered; import org.infinispan.notifications.cachelistener.cluster.ClusterCacheNotifier; import org.infinispan.notifications.cachelistener.cluster.ClusterEvent; import org.infinispan.notifications.cachelistener.cluster.ClusterEventManager; import org.infinispan.notifications.cachelistener.cluster.ClusterListenerRemoveCallable; import org.infinispan.notifications.cachelistener.cluster.ClusterListenerReplicateCallable; import org.infinispan.notifications.cachelistener.cluster.RemoteClusterListener; import org.infinispan.notifications.cachelistener.event.CacheEntriesEvictedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryActivatedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryCreatedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryExpiredEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryInvalidatedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryLoadedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryPassivatedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent; import org.infinispan.notifications.cachelistener.event.CacheEntryVisitedEvent; import org.infinispan.notifications.cachelistener.event.DataRehashedEvent; import org.infinispan.notifications.cachelistener.event.Event; import org.infinispan.notifications.cachelistener.event.PartitionStatusChangedEvent; import org.infinispan.notifications.cachelistener.event.PersistenceAvailabilityChangedEvent; import org.infinispan.notifications.cachelistener.event.TopologyChangedEvent; import org.infinispan.notifications.cachelistener.event.TransactionCompletedEvent; import org.infinispan.notifications.cachelistener.event.TransactionRegisteredEvent; import org.infinispan.notifications.cachelistener.event.impl.EventImpl; import org.infinispan.notifications.cachelistener.filter.CacheEventConverter; import org.infinispan.notifications.cachelistener.filter.CacheEventConverterAsConverter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilterAsKeyValueFilter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilterConverter; import org.infinispan.notifications.cachelistener.filter.CacheEventFilterConverterAsKeyValueFilterConverter; import org.infinispan.notifications.cachelistener.filter.DelegatingCacheEntryListenerInvocation; import org.infinispan.notifications.cachelistener.filter.EventType; import org.infinispan.notifications.cachelistener.filter.FilterIndexingServiceProvider; import org.infinispan.notifications.cachelistener.filter.IndexedFilter; import org.infinispan.notifications.impl.AbstractListenerImpl; import org.infinispan.notifications.impl.ListenerInvocation; import org.infinispan.partitionhandling.AvailabilityMode; import org.infinispan.reactive.publisher.PublisherTransformers; import org.infinispan.reactive.publisher.impl.ClusterPublisherManager; import org.infinispan.reactive.publisher.impl.DeliveryGuarantee; import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier; import org.infinispan.remoting.rpc.RpcManager; import org.infinispan.remoting.transport.Address; import org.infinispan.remoting.transport.jgroups.SuspectException; import org.infinispan.security.actions.SecurityActions; import org.infinispan.stream.impl.CacheIntermediatePublisher; import org.infinispan.stream.impl.intops.IntermediateOperation; import org.infinispan.stream.impl.intops.object.FilterOperation; import org.infinispan.stream.impl.intops.object.MapOperation; import org.infinispan.topology.CacheTopology; import org.infinispan.transaction.xa.GlobalTransaction; import org.infinispan.util.concurrent.AggregateCompletionStage; import org.infinispan.util.function.TriConsumer; import org.infinispan.util.logging.Log; import org.infinispan.util.logging.LogFactory; import io.reactivex.rxjava3.core.Completable; import io.reactivex.rxjava3.core.Flowable; import jakarta.transaction.Status; import jakarta.transaction.Transaction; import jakarta.transaction.TransactionManager; /** * Helper class that handles all notifications to registered listeners. * * @author Manik Surtani (manik AT infinispan DOT org) * @author Mircea.Markus@jboss.com * @author William Burns * @author anistor@redhat.com * @since 4.0 */ @Scope(Scopes.NAMED_CACHE) public class CacheNotifierImpl<K, V> extends AbstractListenerImpl<Event<K, V>, CacheEntryListenerInvocation<K, V>> implements ClusterCacheNotifier<K, V> { private static final Log log = LogFactory.getLog(CacheNotifierImpl.class); private static final Map<Class<? extends Annotation>, Class<?>> allowedListeners = new HashMap<>(16); private static final Map<Class<? extends Annotation>, Class<?>> clusterAllowedListeners = new HashMap<>(4); static { allowedListeners.put(CacheEntryCreated.class, CacheEntryCreatedEvent.class); allowedListeners.put(CacheEntryRemoved.class, CacheEntryRemovedEvent.class); allowedListeners.put(CacheEntryVisited.class, CacheEntryVisitedEvent.class); allowedListeners.put(CacheEntryModified.class, CacheEntryModifiedEvent.class); allowedListeners.put(CacheEntryActivated.class, CacheEntryActivatedEvent.class); allowedListeners.put(CacheEntryPassivated.class, CacheEntryPassivatedEvent.class); allowedListeners.put(CacheEntryLoaded.class, CacheEntryLoadedEvent.class); allowedListeners.put(CacheEntriesEvicted.class, CacheEntriesEvictedEvent.class); allowedListeners.put(TransactionRegistered.class, TransactionRegisteredEvent.class); allowedListeners.put(TransactionCompleted.class, TransactionCompletedEvent.class); allowedListeners.put(CacheEntryInvalidated.class, CacheEntryInvalidatedEvent.class); allowedListeners.put(CacheEntryExpired.class, CacheEntryExpiredEvent.class); allowedListeners.put(DataRehashed.class, DataRehashedEvent.class); allowedListeners.put(TopologyChanged.class, TopologyChangedEvent.class); allowedListeners.put(PartitionStatusChanged.class, PartitionStatusChangedEvent.class); allowedListeners.put(PersistenceAvailabilityChanged.class, PersistenceAvailabilityChangedEvent.class); clusterAllowedListeners.put(CacheEntryCreated.class, CacheEntryCreatedEvent.class); clusterAllowedListeners.put(CacheEntryModified.class, CacheEntryModifiedEvent.class); clusterAllowedListeners.put(CacheEntryRemoved.class, CacheEntryRemovedEvent.class); clusterAllowedListeners.put(CacheEntryExpired.class, CacheEntryExpiredEvent.class); } final List<CacheEntryListenerInvocation<K, V>> cacheEntryCreatedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryRemovedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryVisitedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryModifiedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryActivatedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryPassivatedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryLoadedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryInvalidatedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntryExpiredListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> cacheEntriesEvictedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> transactionRegisteredListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> transactionCompletedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> dataRehashedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> topologyChangedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> partitionChangedListeners = new CopyOnWriteArrayList<>(); final List<CacheEntryListenerInvocation<K, V>> persistenceChangedListeners = new CopyOnWriteArrayList<>(); @Inject TransactionManager transactionManager; @Inject Configuration config; @Inject GlobalConfiguration globalConfiguration; @Inject InternalEntryFactory entryFactory; @Inject ClusterEventManager<K, V> eventManager; @Inject BasicComponentRegistry componentRegistry; @Inject KeyPartitioner keyPartitioner; @Inject RpcManager rpcManager; @Inject EncoderRegistry encoderRegistry; @Inject ComponentRef<AdvancedCache<K, V>> cache; @Inject ComponentRef<ClusteringDependentLogic> clusteringDependentLogic; @Inject ComponentRef<AsyncInterceptorChain> interceptorChain; @Inject ComponentRef<ClusterPublisherManager<K, V>> publisherManager; private ClusterExecutor clusterExecutor; private final Map<Object, UUID> clusterListenerIDs = new ConcurrentHashMap<>(); private Collection<FilterIndexingServiceProvider> filterIndexingServiceProviders; /** * This map is used to store the handler used when a listener is registered which has includeCurrentState and * is only used for that listener during the initial state transfer */ private final ConcurrentMap<UUID, QueueingSegmentListener<K, V, ? extends Event<K, V>>> segmentHandler; public CacheNotifierImpl() { this(new ConcurrentHashMap<>()); } CacheNotifierImpl(ConcurrentMap<UUID, QueueingSegmentListener<K, V, ? extends Event<K, V>>> handler) { segmentHandler = handler; listenersMap.put(CacheEntryCreated.class, cacheEntryCreatedListeners); listenersMap.put(CacheEntryRemoved.class, cacheEntryRemovedListeners); listenersMap.put(CacheEntryVisited.class, cacheEntryVisitedListeners); listenersMap.put(CacheEntryModified.class, cacheEntryModifiedListeners); listenersMap.put(CacheEntryActivated.class, cacheEntryActivatedListeners); listenersMap.put(CacheEntryPassivated.class, cacheEntryPassivatedListeners); listenersMap.put(CacheEntryLoaded.class, cacheEntryLoadedListeners); listenersMap.put(CacheEntriesEvicted.class, cacheEntriesEvictedListeners); listenersMap.put(CacheEntryExpired.class, cacheEntryExpiredListeners); listenersMap.put(TransactionRegistered.class, transactionRegisteredListeners); listenersMap.put(TransactionCompleted.class, transactionCompletedListeners); listenersMap.put(CacheEntryInvalidated.class, cacheEntryInvalidatedListeners); listenersMap.put(DataRehashed.class, dataRehashedListeners); listenersMap.put(TopologyChanged.class, topologyChangedListeners); listenersMap.put(PartitionStatusChanged.class, partitionChangedListeners); listenersMap.put(PersistenceAvailabilityChanged.class, persistenceChangedListeners); } @Start(priority = 9) public void start() { if (!config.simpleCache()) { clusterExecutor = SecurityActions.getClusterExecutor(cache.wired()); } Collection<FilterIndexingServiceProvider> providers = ServiceFinder.load(FilterIndexingServiceProvider.class); filterIndexingServiceProviders = new ArrayList<>(providers.size()); for (FilterIndexingServiceProvider provider : providers) { componentRegistry.wireDependencies(provider, false); provider.start(); filterIndexingServiceProviders.add(provider); } } @Override public void stop() { super.stop(); // The other nodes will remove the listener automatically clusterListenerIDs.clear(); if (filterIndexingServiceProviders != null) { for (FilterIndexingServiceProvider provider : filterIndexingServiceProviders) { provider.stop(); } filterIndexingServiceProviders = null; } } @Override protected Log getLog() { return log; } @Override protected Map<Class<? extends Annotation>, Class<?>> getAllowedMethodAnnotations(Listener l) { if (l.clustered()) { // Cluster listeners only allow a subset of types return clusterAllowedListeners; } return allowedListeners; } private K convertKey(CacheEntryListenerInvocation listenerInvocation, K key) { if (key == null) return null; DataConversion keyDataConversion = listenerInvocation.getKeyDataConversion(); Wrapper wrp = keyDataConversion.getWrapper(); Object unwrappedKey = keyDataConversion.getEncoder().fromStorage(wrp.unwrap(key)); CacheEventFilter filter = listenerInvocation.getFilter(); CacheEventConverter converter = listenerInvocation.getConverter(); if (filter == null && converter == null) { if (listenerInvocation.useStorageFormat()) { return (K) unwrappedKey; } // If no filter is present, convert to the requested format directly return (K) keyDataConversion.fromStorage(key); } MediaType convertFormat = filter == null ? converter.format() : filter.format(); if (listenerInvocation.useStorageFormat() || convertFormat == null) { // Filter will be run on the storage format, return the unwrapped key return (K) unwrappedKey; } // Filter has a specific format to run, convert to that format return (K) encoderRegistry.convert(unwrappedKey, keyDataConversion.getStorageMediaType(), convertFormat); } private V convertValue(CacheEntryListenerInvocation listenerInvocation, V value) { if (value == null) return null; DataConversion valueDataConversion = listenerInvocation.getValueDataConversion(); Wrapper wrp = valueDataConversion.getWrapper(); Object unwrappedValue = valueDataConversion.getEncoder().fromStorage(wrp.unwrap(value)); CacheEventFilter filter = listenerInvocation.getFilter(); CacheEventConverter converter = listenerInvocation.getConverter(); if (filter == null && converter == null) { if (listenerInvocation.useStorageFormat()) { return (V) unwrappedValue; } // If no filter is present, convert to the requested format directly return (V) valueDataConversion.fromStorage(value); } MediaType convertFormat = filter == null ? converter.format() : filter.format(); if (listenerInvocation.useStorageFormat() || convertFormat == null) { // Filter will be run on the storage format, return the unwrapped key return (V) unwrappedValue; } // Filter has a specific format to run, convert to that format return (V) encoderRegistry.convert(unwrappedValue, valueDataConversion.getStorageMediaType(), convertFormat); } @Override protected final Transaction suspendIfNeeded() { if (transactionManager == null) { return null; } try { switch (transactionManager.getStatus()) { case Status.STATUS_NO_TRANSACTION: return null; case Status.STATUS_ACTIVE: case Status.STATUS_MARKED_ROLLBACK: case Status.STATUS_PREPARED: case Status.STATUS_COMMITTED: case Status.STATUS_ROLLEDBACK: case Status.STATUS_UNKNOWN: case Status.STATUS_PREPARING: case Status.STATUS_COMMITTING: case Status.STATUS_ROLLING_BACK: default: //suspend in default and in unknown status to be safer return transactionManager.suspend(); } } catch (Exception e) { if (log.isTraceEnabled()) { log.trace("An error occurred while trying to suspend a transaction.", e); } return null; } } @Override protected final void resumeIfNeeded(Transaction transaction) { if (transaction == null || transactionManager == null) { return; } try { transactionManager.resume(transaction); } catch (Exception e) { if (log.isTraceEnabled()) { log.tracef(e, "An error occurred while trying to resume a suspended transaction. tx=%s", transaction); } } } int extractSegment(FlagAffectedCommand command, Object key) { return SegmentSpecificCommand.extractSegment(command, key, keyPartitioner); } @Override public CompletionStage<Void> notifyCacheEntryCreated(K key, V value, Metadata metadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryCreatedListeners)) { return resumeOnCPU(doNotifyCreated(key, value, metadata, pre, ctx, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyCreated(K key, V value, Metadata metadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (clusteringDependentLogic.running().commitType(command, ctx, extractSegment(command, key), false).isLocal() && (command == null || !command.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER))) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_CREATED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); Object batchIdentifier = ctx.isInTxScope() ? null : Thread.currentThread(); try { AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : cacheEntryCreatedListeners) { // Need a wrapper per invocation since converter could modify the entry in it configureEvent(listener, e, key, value, metadata, pre, ctx, command, null, null); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } if (batchIdentifier != null) { return sendEvents(batchIdentifier, aggregateCompletionStage); } else if (aggregateCompletionStage != null) { return aggregateCompletionStage.freeze(); } } finally { if (batchIdentifier != null) { eventManager.dropEvents(batchIdentifier); } } } return CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyCacheEntryModified(K key, V value, Metadata metadata, V previousValue, Metadata previousMetadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryModifiedListeners)) { return resumeOnCPU(doNotifyModified(key, value, metadata, previousValue, previousMetadata, pre, ctx, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyModified(K key, V value, Metadata metadata, V previousValue, Metadata previousMetadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (clusteringDependentLogic.running().commitType(command, ctx, extractSegment(command, key), false).isLocal() && (command == null || !command.hasAnyFlag(FlagBitSets.PUT_FOR_STATE_TRANSFER))) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_MODIFIED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); Object batchIdentifier = ctx.isInTxScope() ? null : Thread.currentThread(); try { AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : cacheEntryModifiedListeners) { // Need a wrapper per invocation since converter could modify the entry in it configureEvent(listener, e, key, value, metadata, pre, ctx, command, previousValue, previousMetadata); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } if (batchIdentifier != null) { return sendEvents(batchIdentifier, aggregateCompletionStage); } else if (aggregateCompletionStage != null) { return aggregateCompletionStage.freeze(); } } finally { if (batchIdentifier != null) { eventManager.dropEvents(batchIdentifier); } } } return CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyCacheEntryRemoved(K key, V previousValue, Metadata previousMetadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryRemovedListeners)) { return resumeOnCPU(doNotifyRemoved(key, previousValue, previousMetadata, pre, ctx, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyRemoved(K key, V previousValue, Metadata previousMetadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (clusteringDependentLogic.running().commitType(command, ctx, extractSegment(command, key), true).isLocal()) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_REMOVED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); Object batchIdentifier = ctx.isInTxScope() ? null : Thread.currentThread(); try { AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : cacheEntryRemovedListeners) { // Need a wrapper per invocation since converter could modify the entry in it if (pre) { configureEvent(listener, e, key, previousValue, previousMetadata, true, ctx, command, previousValue, previousMetadata); } else { // to be consistent it would be better to pass null as previousMetadata but certain server code // depends on ability to retrieve these metadata when pre=false from CacheEntryEvent.getMetadata // instead of having proper method getOldMetadata() there. configureEvent(listener, e, key, null, previousMetadata, false, ctx, command, previousValue, previousMetadata); } aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } if (batchIdentifier != null) { return sendEvents(batchIdentifier, aggregateCompletionStage); } else if (aggregateCompletionStage != null) { return aggregateCompletionStage.freeze(); } } finally { if (batchIdentifier != null) { eventManager.dropEvents(batchIdentifier); } } } return CompletableFutures.completedNull(); } /** * Configure event data. Currently used for 'created', 'modified', 'removed', 'invalidated' events. */ private void configureEvent(CacheEntryListenerInvocation listenerInvocation, EventImpl<K, V> e, K key, V value, Metadata metadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command, V previousValue, Metadata previousMetadata) { key = convertKey(listenerInvocation, key); value = convertValue(listenerInvocation, value); previousValue = convertValue(listenerInvocation, previousValue); e.setOriginLocal(ctx.isOriginLocal()); e.setPre(pre); e.setValue(pre ? previousValue : value); e.setNewValue(value); e.setOldValue(previousValue); e.setOldMetadata(previousMetadata); e.setMetadata(metadata); if (command != null && command.hasAnyFlag(FlagBitSets.COMMAND_RETRY)) { e.setCommandRetried(true); } e.setKey(key); setSource(e, ctx, command); } /** * Configure event data. Currently used for 'activated', 'loaded', 'visited' events. */ private void configureEvent(CacheEntryListenerInvocation listenerInvocation, EventImpl<K, V> e, K key, V value, boolean pre, InvocationContext ctx) { e.setPre(pre); e.setKey(convertKey(listenerInvocation, key)); e.setValue(convertValue(listenerInvocation, value)); e.setOriginLocal(ctx.isOriginLocal()); setSource(e, ctx, null); } /** * Configure event data. Currently used for 'expired' events. */ private void configureEvent(CacheEntryListenerInvocation listenerInvocation, EventImpl<K, V> e, K key, V value, Metadata metadata, InvocationContext ctx) { e.setKey(convertKey(listenerInvocation, key)); e.setValue(convertValue(listenerInvocation, value)); e.setMetadata(metadata); e.setOriginLocal(true); e.setPre(false); setSource(e, ctx, null); } @Override public CompletionStage<Void> notifyCacheEntryVisited(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryVisitedListeners)) { return resumeOnCPU(doNotifyVisited(key, value, pre, ctx, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyVisited(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { AggregateCompletionStage<Void> aggregateCompletionStage = null; EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_VISITED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); for (CacheEntryListenerInvocation<K, V> listener : cacheEntryVisitedListeners) { // Need a wrapper per invocation since converter could modify the entry in it configureEvent(listener, e, key, value, pre, ctx); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyCacheEntriesEvicted(Collection<Map.Entry<K, V>> entries, InvocationContext ctx, FlagAffectedCommand command) { if (!entries.isEmpty() && isNotificationAllowed(command, cacheEntriesEvictedListeners)) { return resumeOnCPU(doNotifyEvicted(entries), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyEvicted(Collection<Map.Entry<K, V>> entries) { AggregateCompletionStage<Void> aggregateCompletionStage = null; EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_EVICTED); for (CacheEntryListenerInvocation<K, V> listener : cacheEntriesEvictedListeners) { Map<K, V> evictedKeysAndValues = new HashMap<>(); for (Map.Entry<? extends K, ? extends V> entry : entries) { evictedKeysAndValues.put(convertKey(listener, entry.getKey()), convertValue(listener, entry.getValue())); } e.setEntries(evictedKeysAndValues); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(e)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } private CompletionStage<Void> sendEvents(Object batchIdentifier, AggregateCompletionStage<Void> aggregateCompletionStage) { CompletionStage<Void> managerStage = eventManager.sendEvents(batchIdentifier); if (aggregateCompletionStage != null) { if (managerStage != null) { aggregateCompletionStage.dependsOn(managerStage); } return aggregateCompletionStage.freeze(); } return managerStage; } @Override public CompletionStage<Void> notifyCacheEntryExpired(K key, V value, Metadata metadata, InvocationContext ctx) { if (!cacheEntryExpiredListeners.isEmpty()) { return resumeOnCPU(doNotifyExpired(key, value, metadata, ctx), key); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyExpired(K key, V value, Metadata metadata, InvocationContext ctx) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_EXPIRED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); AggregateCompletionStage<Void> aggregateCompletionStage = null; Object batchIdentifier = (ctx != null && ctx.isInTxScope()) ? null : Thread.currentThread(); try { for (CacheEntryListenerInvocation<K, V> listener : cacheEntryExpiredListeners) { // Need a wrapper per invocation since converter could modify the entry in it configureEvent(listener, e, key, value, metadata, ctx); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, null), isLocalNodePrimaryOwner)); } if (batchIdentifier != null) { return sendEvents(batchIdentifier, aggregateCompletionStage); } } finally { if (batchIdentifier != null) { eventManager.dropEvents(batchIdentifier); } } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyCacheEntryInvalidated(final K key, V value, Metadata metadata, final boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryInvalidatedListeners)) { return resumeOnCPU(doNotifyInvalidated(key, value, metadata, pre, ctx, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyInvalidated(K key, V value, Metadata metadata, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { AggregateCompletionStage<Void> aggregateCompletionStage = null; EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_INVALIDATED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); for (CacheEntryListenerInvocation<K, V> listener : cacheEntryInvalidatedListeners) { // Need a wrapper per invocation since converter could modify the entry in it configureEvent(listener, e, key, value, metadata, pre, ctx, command, value, metadata); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyCacheEntryLoaded(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryLoadedListeners)) { return resumeOnCPU(doNotifyLoaded(key, value, pre, ctx, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyLoaded(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { AggregateCompletionStage<Void> aggregateCompletionStage = null; EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_LOADED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); for (CacheEntryListenerInvocation<K, V> listener : cacheEntryLoadedListeners) { // Need a wrapper per invocation since converter could modify the entry in it configureEvent(listener, e, key, value, pre, ctx); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyCacheEntryActivated(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryActivatedListeners)) { return resumeOnCPU(doNotifyActivated(key, value, pre, ctx, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyActivated(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { AggregateCompletionStage<Void> aggregateCompletionStage = null; EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_ACTIVATED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); for (CacheEntryListenerInvocation<K, V> listener : cacheEntryActivatedListeners) { // Need a wrapper per invocation since converter could modify the entry in it configureEvent(listener, e, key, value, pre, ctx); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } private void setSource(EventImpl<K, V> e, InvocationContext ctx, FlagAffectedCommand command) { if (ctx != null && ctx.isInTxScope()) { GlobalTransaction tx = ((TxInvocationContext) ctx).getGlobalTransaction(); e.setSource(tx); } else if (command instanceof WriteCommand) { CommandInvocationId invocationId = ((WriteCommand) command).getCommandInvocationId(); e.setSource(invocationId); } } @Override public CompletionStage<Void> notifyCacheEntryPassivated(K key, V value, boolean pre, InvocationContext ctx, FlagAffectedCommand command) { if (isNotificationAllowed(command, cacheEntryPassivatedListeners)) { return resumeOnCPU(doNotifyPassivated(key, value, pre, command), command); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyPassivated(K key, V value, boolean pre, FlagAffectedCommand command) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_PASSIVATED); boolean isLocalNodePrimaryOwner = isLocalNodePrimaryOwner(key); AggregateCompletionStage aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : cacheEntryPassivatedListeners) { // Need a wrapper per invocation since converter could modify the entry in it key = convertKey(listener, key); value = convertValue(listener, value); e.setPre(pre); e.setKey(key); e.setValue(value); aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(new EventWrapper<>(key, e, command), isLocalNodePrimaryOwner)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } private boolean isLocalNodePrimaryOwner(K key) { return clusteringDependentLogic.running().getCacheTopology().getDistribution(key).isPrimary(); } @Override public CompletionStage<Void> notifyTransactionCompleted(GlobalTransaction transaction, boolean successful, InvocationContext ctx) { if (!transactionCompletedListeners.isEmpty()) { return resumeOnCPU(doNotifyTransactionCompleted(transaction, successful, ctx), transaction); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyTransactionCompleted(GlobalTransaction transaction, boolean successful, InvocationContext ctx) { boolean isOriginLocal = ctx.isOriginLocal(); EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), TRANSACTION_COMPLETED); e.setOriginLocal(isOriginLocal); e.setTransactionId(transaction); e.setTransactionSuccessful(successful); AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : transactionCompletedListeners) { aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(e)); } if (ctx.isInTxScope()) { if (successful) { return sendEvents(transaction, aggregateCompletionStage); } else { eventManager.dropEvents(transaction); } } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyTransactionRegistered(GlobalTransaction globalTransaction, boolean isOriginLocal) { if (!transactionRegisteredListeners.isEmpty()) { return resumeOnCPU(doNotifyTransactionRegistered(globalTransaction, isOriginLocal), globalTransaction); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyTransactionRegistered(GlobalTransaction globalTransaction, boolean isOriginLocal) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), TRANSACTION_REGISTERED); e.setOriginLocal(isOriginLocal); e.setTransactionId(globalTransaction); AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : transactionRegisteredListeners) { aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(e)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyDataRehashed(ConsistentHash oldCH, ConsistentHash newCH, ConsistentHash unionCH, int newTopologyId, boolean pre) { if (!dataRehashedListeners.isEmpty()) { return resumeOnCPU(doNotifyDataRehashed(oldCH, newCH, unionCH, newTopologyId, pre), newTopologyId); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyDataRehashed(ConsistentHash oldCH, ConsistentHash newCH, ConsistentHash unionCH, int newTopologyId, boolean pre) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), DATA_REHASHED); e.setPre(pre); e.setReadConsistentHashAtStart(oldCH); e.setWriteConsistentHashAtStart(oldCH); e.setReadConsistentHashAtEnd(newCH); e.setWriteConsistentHashAtEnd(newCH); e.setUnionConsistentHash(unionCH); e.setNewTopologyId(newTopologyId); AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : dataRehashedListeners) { aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(e)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyTopologyChanged(CacheTopology oldTopology, CacheTopology newTopology, int newTopologyId, boolean pre) { if (!topologyChangedListeners.isEmpty()) { return resumeOnCPU(doNotifyTopologyChanged(oldTopology, newTopology, newTopologyId, pre), newTopology.getTopologyId()); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyTopologyChanged(CacheTopology oldTopology, CacheTopology newTopology, int newTopologyId, boolean pre) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), TOPOLOGY_CHANGED); e.setPre(pre); if (oldTopology != null) { e.setReadConsistentHashAtStart(oldTopology.getReadConsistentHash()); e.setWriteConsistentHashAtStart(oldTopology.getWriteConsistentHash()); } e.setReadConsistentHashAtEnd(newTopology.getReadConsistentHash()); e.setWriteConsistentHashAtEnd(newTopology.getWriteConsistentHash()); e.setNewTopologyId(newTopologyId); AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : topologyChangedListeners) { aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(e)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyPartitionStatusChanged(AvailabilityMode mode, boolean pre) { if (!partitionChangedListeners.isEmpty()) { return resumeOnCPU(doNotifyPartitionStatusChanged(mode, pre), mode); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyPartitionStatusChanged(AvailabilityMode mode, boolean pre) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), PARTITION_STATUS_CHANGED); e.setPre(pre); e.setAvailabilityMode(mode); AggregateCompletionStage aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : partitionChangedListeners) { aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(e)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyPersistenceAvailabilityChanged(boolean available) { if (!persistenceChangedListeners.isEmpty()) { return resumeOnCPU(doNotifyPersistenceAvailabilityChanged(available), available); } return CompletableFutures.completedNull(); } private CompletionStage<Void> doNotifyPersistenceAvailabilityChanged(boolean available) { EventImpl<K, V> e = EventImpl.createEvent(cache.wired(), PERSISTENCE_AVAILABILITY_CHANGED); e.setAvailable(available); AggregateCompletionStage<Void> aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> listener : persistenceChangedListeners) { aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invoke(e)); } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public CompletionStage<Void> notifyClusterListeners(Collection<ClusterEvent<K, V>> events, UUID uuid) { AggregateCompletionStage<Void> aggregateCompletionStage = null; // We don't need to unwrap key or value as the node where the event originated did this already for (ClusterEvent<K, V> event : events) { if (event.isPre()) { throw new IllegalArgumentException("Events for cluster listener should never be pre change"); } switch (event.getType()) { case CACHE_ENTRY_MODIFIED: for (CacheEntryListenerInvocation<K, V> listener : cacheEntryModifiedListeners) { if (listener.isClustered() && uuid.equals(listener.getIdentifier())) { // We force invocation, since it means the owning node passed filters already and they // already converted so don't run converter either aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invokeNoChecks(new EventWrapper<>(event.getKey(), event, null), false, true, false)); break; } } break; case CACHE_ENTRY_CREATED: for (CacheEntryListenerInvocation<K, V> listener : cacheEntryCreatedListeners) { if (listener.isClustered() && uuid.equals(listener.getIdentifier())) { // We force invocation, since it means the owning node passed filters already and they // already converted so don't run converter either aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invokeNoChecks(new EventWrapper<>(event.getKey(), event, null), false, true, false)); break; } } break; case CACHE_ENTRY_REMOVED: for (CacheEntryListenerInvocation<K, V> listener : cacheEntryRemovedListeners) { if (listener.isClustered() && uuid.equals(listener.getIdentifier())) { // We force invocation, since it means the owning node passed filters already and they // already converted so don't run converter either aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invokeNoChecks(new EventWrapper<>(event.getKey(), event, null), false, true, false)); break; } } break; case CACHE_ENTRY_EXPIRED: for (CacheEntryListenerInvocation<K, V> listener : cacheEntryExpiredListeners) { if (listener.isClustered() && uuid.equals(listener.getIdentifier())) { // We force invocation, since it means the owning node passed filters already and they // already converted so don't run converter either aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, listener.invokeNoChecks(new EventWrapper<>(event.getKey(), event, null), false, true, false)); break; } } break; default: throw new IllegalArgumentException("Unexpected event type encountered!"); } } return aggregateCompletionStage != null ? resumeOnCPU(aggregateCompletionStage.freeze(), uuid) : CompletableFutures.completedNull(); } @Override public Collection<ClusterListenerReplicateCallable<K, V>> retrieveClusterListenerCallablesToInstall() { Set<Object> enlistedAlready = new HashSet<>(); Set<ClusterListenerReplicateCallable<K, V>> callables = new HashSet<>(); if (log.isTraceEnabled()) { log.tracef("Request received to get cluster listeners currently registered"); } registerClusterListenerCallablesToInstall(enlistedAlready, callables, cacheEntryModifiedListeners); registerClusterListenerCallablesToInstall(enlistedAlready, callables, cacheEntryCreatedListeners); registerClusterListenerCallablesToInstall(enlistedAlready, callables, cacheEntryRemovedListeners); if (log.isTraceEnabled()) { log.tracef("Cluster listeners found %s", callables); } return callables; } private void registerClusterListenerCallablesToInstall(Set<Object> enlistedAlready, Set<ClusterListenerReplicateCallable<K, V>> callables, List<CacheEntryListenerInvocation<K, V>> listenerInvocations) { for (CacheEntryListenerInvocation<K, V> listener : listenerInvocations) { if (!enlistedAlready.contains(listener.getTarget())) { // If clustered means it is local - so use our address if (listener.isClustered()) { Set<Class<? extends Annotation>> filterAnnotations = listener.getFilterAnnotations(); callables.add(new ClusterListenerReplicateCallable(cache.wired().getName(), listener.getIdentifier(), rpcManager.getAddress(), listener.getFilter(), listener.getConverter(), listener.isSync(), filterAnnotations, listener.getKeyDataConversion(), listener.getValueDataConversion(), listener.useStorageFormat())); enlistedAlready.add(listener.getTarget()); } else if (listener.getTarget() instanceof RemoteClusterListener) { RemoteClusterListener lcl = (RemoteClusterListener) listener.getTarget(); Set<Class<? extends Annotation>> filterAnnotations = listener.getFilterAnnotations(); callables.add(new ClusterListenerReplicateCallable(cache.wired().getName(), lcl.getId(), lcl.getOwnerAddress(), listener.getFilter(), listener.getConverter(), listener.isSync(), filterAnnotations, listener.getKeyDataConversion(), listener.getValueDataConversion(), listener.useStorageFormat())); enlistedAlready.add(listener.getTarget()); } } } } public boolean isNotificationAllowed(FlagAffectedCommand cmd, List<CacheEntryListenerInvocation<K, V>> listeners) { return !listeners.isEmpty() && (cmd == null || !cmd.hasAnyFlag(FlagBitSets.SKIP_LISTENER_NOTIFICATION)); } @Override public CompletionStage<Void> addListenerAsync(Object listener) { return addListenerAsync(listener, null, null, null); } @Override public CompletionStage<Void> addListenerAsync(Object listener, ClassLoader classLoader) { return addListenerAsync(listener, null, null, classLoader); } private <C> CompletionStage<Void> addListenerInternal(Object listener, DataConversion keyDataConversion, DataConversion valueDataConversion, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader, boolean useStorageFormat) { final Listener l = testListenerClassValidity(listener.getClass()); final UUID generatedId = Util.threadLocalRandomUUID(); final CacheMode cacheMode = config.clustering().cacheMode(); FilterIndexingServiceProvider indexingProvider = null; boolean foundMethods = false; // We use identity for null as this means it was invoked by a non encoder cache DataConversion keyConversion = keyDataConversion == null ? DataConversion.IDENTITY_KEY : keyDataConversion; DataConversion valueConversion = valueDataConversion == null ? DataConversion.IDENTITY_VALUE : valueDataConversion; Set<Class<? extends Annotation>> filterAnnotations = findListenerCallbacks(listener); if (filter instanceof IndexedFilter) { indexingProvider = findIndexingServiceProvider((IndexedFilter) filter); if (indexingProvider != null) { DelegatingCacheInvocationBuilder builder = new DelegatingCacheInvocationBuilder(indexingProvider); adjustCacheInvocationBuilder(builder, filter, converter, filterAnnotations, l, useStorageFormat, generatedId, keyConversion, valueConversion, classLoader); foundMethods = validateAndAddListenerInvocations(listener, builder); builder.registerListenerInvocations(); } } if (indexingProvider == null) { CacheInvocationBuilder builder = new CacheInvocationBuilder(); adjustCacheInvocationBuilder(builder, filter, converter, filterAnnotations, l, useStorageFormat, generatedId, keyConversion, valueConversion, classLoader); foundMethods = validateAndAddListenerInvocations(listener, builder); } CompletionStage<Void> stage = CompletableFutures.completedNull(); if (foundMethods && l.clustered()) { if (l.observation() == Listener.Observation.PRE) { throw CONTAINER.clusterListenerRegisteredWithOnlyPreEvents(listener.getClass()); } else if (cacheMode.isInvalidation()) { throw new UnsupportedOperationException("Cluster listeners cannot be used with Invalidation Caches!"); } else if (clusterListenerOnPrimaryOnly()) { clusterListenerIDs.put(listener, generatedId); Address ourAddress; List<Address> members; if (rpcManager != null) { ourAddress = rpcManager.getAddress(); members = rpcManager.getMembers(); } else { ourAddress = null; members = null; } // If we are the only member don't even worry about sending listeners if (members != null && members.size() > 1) { stage = registerClusterListeners(members, generatedId, ourAddress, filter, converter, l, listener, keyDataConversion, valueDataConversion, useStorageFormat); } } } // If we have a segment listener handler, it means we have to do initial state QueueingSegmentListener<K, V, ? extends Event<K, V>> handler = segmentHandler.remove(generatedId); if (handler != null) { if (log.isTraceEnabled()) { log.tracef("Listener %s requests initial state for cache", generatedId); } Collection<IntermediateOperation<?, ?, ?, ?>> intermediateOperations = new ArrayList<>(); if (keyDataConversion != DataConversion.IDENTITY_KEY && valueDataConversion != DataConversion.IDENTITY_VALUE) { intermediateOperations.add(new MapOperation<>(EncoderEntryMapper.newCacheEntryMapper( keyDataConversion, valueDataConversion, entryFactory))); } if (filter instanceof CacheEventFilterConverter && (filter == converter || converter == null)) { intermediateOperations.add(new MapOperation<>(CacheFilters.converterToFunction( new CacheEventFilterConverterAsKeyValueFilterConverter<>((CacheEventFilterConverter<?, ?, ?>) filter)))); intermediateOperations.add(new FilterOperation<>(CacheFilters.notNullCacheEntryPredicate())); } else { if (filter != null) { intermediateOperations.add(new FilterOperation<>(CacheFilters.predicate( new CacheEventFilterAsKeyValueFilter<>(filter)))); } if (converter != null) { intermediateOperations.add(new MapOperation<>(CacheFilters.function( new CacheEventConverterAsConverter<>(converter)))); } } stage = handlePublisher(stage, intermediateOperations, handler, generatedId, l, null, null); } return stage; } private CompletionStage<Void> handlePublisher(CompletionStage<Void> currentStage, Collection<IntermediateOperation<?, ?, ?, ?>> intermediateOperations, QueueingSegmentListener<K, V, ? extends Event<K, V>> handler, UUID generatedId, Listener l, Function<Object, Object> kc, Function<Object, Object> kv) { SegmentPublisherSupplier<CacheEntry<K, V>> publisher = publisherManager.running().entryPublisher( null, null, null, EnumUtil.EMPTY_BIT_SET, // TODO: do we really need EXACTLY_ONCE? AT_LEAST_ONCE should be fine I think DeliveryGuarantee.EXACTLY_ONCE, config.clustering().stateTransfer().chunkSize(), intermediateOperations.isEmpty() ? PublisherTransformers.identity() : new CacheIntermediatePublisher(intermediateOperations)); currentStage = currentStage.thenCompose(ignore -> Flowable.fromPublisher(publisher.publisherWithSegments()) .concatMap(handler) .flatMapCompletable(ice -> Completable.fromCompletionStage( raiseEventForInitialTransfer(generatedId, ice, l.clustered(), kc, kv)), false, 20) .toCompletionStage(null)); currentStage = currentStage.thenCompose(ignore -> handler.transferComplete()); if (log.isTraceEnabled()) { currentStage = currentStage.whenComplete((v, t) -> log.tracef("Listener %s initial state for cache completed", generatedId)); } return currentStage; } private <C> CompletionStage<Void> registerClusterListeners(List<Address> members, UUID generatedId, Address ourAddress, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Listener l, Object listener, DataConversion keyDataConversion, DataConversion valueDataConversion, boolean useStorageFormat) { if (log.isTraceEnabled()) { log.tracef("Replicating cluster listener to other nodes %s for cluster listener with id %s", members, generatedId); } ClusterListenerReplicateCallable<K, V> callable = new ClusterListenerReplicateCallable(cache.wired().getName(), generatedId, ourAddress, filter, converter, l.sync(), findListenerCallbacks(listener), keyDataConversion, valueDataConversion, useStorageFormat); TriConsumer<Address, Void, Throwable> handleSuspect = (a, ignore, t) -> { if (t != null && !(t instanceof SuspectException)) { log.debugf(t, "Address: %s encountered an exception while adding cluster listener", a); throw new CacheListenerException(t); } }; // Send to all nodes but ours CompletionStage<Void> completionStage = clusterExecutor.filterTargets(a -> !ourAddress.equals(a)) .submitConsumer(callable, handleSuspect); // We have to try any nodes that have been added since we sent the request - as they may not have requested // the listener - unfortunately if there are no nodes it throws a SuspectException, so we ignore that return completionStage.thenCompose(v -> clusterExecutor.filterTargets(a -> !members.contains(a) && !a.equals(ourAddress)) .submitConsumer(callable, handleSuspect).exceptionally(t -> { // Ignore any suspect exception if (!(t instanceof SuspectException)) { throw new CacheListenerException(t); } return null; }) ); } /** * Adds the listener using the provided filter converter and class loader. The provided builder is used to add * additional configuration including (clustered, onlyPrimary & identifier) which can be used after this method is * completed to see what values were used in the addition of this listener */ @Override public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader) { return addListenerInternal(listener, DataConversion.IDENTITY_KEY, DataConversion.IDENTITY_VALUE, filter, converter, classLoader, false); } /** * Gets a suitable indexing provider for the given indexed filter. * * @param indexedFilter the filter * @return the FilterIndexingServiceProvider that supports the given IndexedFilter or {@code null} if none was found */ private FilterIndexingServiceProvider findIndexingServiceProvider(IndexedFilter indexedFilter) { if (filterIndexingServiceProviders != null) { for (FilterIndexingServiceProvider provider : filterIndexingServiceProviders) { if (provider.supportsFilter(indexedFilter)) { return provider; } } } log.noFilterIndexingServiceProviderFound(indexedFilter.getClass().getName()); return null; } @Override public List<CacheEntryListenerInvocation<K, V>> getListenerCollectionForAnnotation(Class<? extends Annotation> annotation) { return super.getListenerCollectionForAnnotation(annotation); } private CompletionStage<Void> raiseEventForInitialTransfer(UUID identifier, CacheEntry entry, boolean clustered, Function<Object, Object> kc, Function<Object, Object> kv) { EventImpl preEvent; if (kc == null) kc = Function.identity(); if (kv == null) kv = Function.identity(); if (clustered) { // In clustered mode we only send post event preEvent = null; } else { preEvent = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_CREATED); preEvent.setKey(kc.apply(entry.getKey())); preEvent.setPre(true); preEvent.setCurrentState(true); } EventImpl postEvent = EventImpl.createEvent(cache.wired(), CACHE_ENTRY_CREATED); postEvent.setKey(kc.apply(entry.getKey())); postEvent.setValue(kv.apply(entry.getValue())); postEvent.setMetadata(entry.getMetadata()); postEvent.setPre(false); postEvent.setCurrentState(true); AggregateCompletionStage aggregateCompletionStage = null; for (CacheEntryListenerInvocation<K, V> invocation : cacheEntryCreatedListeners) { // Now notify all our methods of the creates if (invocation.getIdentifier() == identifier) { if (preEvent != null) { // Non clustered notifications are done twice aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, invocation.invokeNoChecks(new EventWrapper<>(null, preEvent, null), true, true, false)); } aggregateCompletionStage = composeStageIfNeeded(aggregateCompletionStage, invocation.invokeNoChecks(new EventWrapper<>(null, postEvent, null), true, true, false)); } } return aggregateCompletionStage != null ? aggregateCompletionStage.freeze() : CompletableFutures.completedNull(); } @Override public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter) { return addListenerAsync(listener, filter, converter, null); } @Override public <C> CompletionStage<Void> addFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) { return addFilteredListenerInternal(listener, null, null, filter, converter, filterAnnotations, false); } @Override public <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) { return addFilteredListenerInternal(listener, null, null, filter, converter, filterAnnotations, false); } @Override public <C> CompletionStage<Void> addListenerAsync(ListenerHolder listenerHolder, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, ClassLoader classLoader) { return addListenerInternal(listenerHolder.getListener(), listenerHolder.getKeyDataConversion(), listenerHolder.getValueDataConversion(), filter, converter, classLoader, false); } @Override public <C> CompletionStage<Void> addFilteredListenerAsync(ListenerHolder listenerHolder, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) { return addFilteredListenerInternal(listenerHolder.getListener(), listenerHolder.getKeyDataConversion(), listenerHolder.getValueDataConversion(), filter, converter, filterAnnotations, listenerHolder.isFilterOnStorageFormat()); } protected boolean clusterListenerOnPrimaryOnly() { CacheMode mode = config.clustering().cacheMode(); boolean zeroCapacity = config.clustering().hash().capacityFactor() == 0f || globalConfiguration.isZeroCapacityNode(); return mode.isDistributed() || (mode.isReplicated() && zeroCapacity); } private <C> CompletionStage<Void> addFilteredListenerInternal(Object listener, DataConversion keyDataConversion, DataConversion valueDataConversion, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations, boolean useStorageFormat) { final Listener l = testListenerClassValidity(listener.getClass()); final UUID generatedId = Util.threadLocalRandomUUID(); final CacheMode cacheMode = config.clustering().cacheMode(); FilterIndexingServiceProvider indexingProvider = null; boolean foundMethods = false; // We use identity for null as this means it was invoked by a non encoder cache DataConversion keyConversion = keyDataConversion == null ? DataConversion.IDENTITY_KEY : keyDataConversion; DataConversion valueConversion = valueDataConversion == null ? DataConversion.IDENTITY_VALUE : valueDataConversion; if (filter instanceof IndexedFilter) { indexingProvider = findIndexingServiceProvider((IndexedFilter) filter); if (indexingProvider != null) { DelegatingCacheInvocationBuilder builder = new DelegatingCacheInvocationBuilder(indexingProvider); adjustCacheInvocationBuilder(builder, filter, converter, filterAnnotations, l, useStorageFormat, generatedId, keyConversion, valueConversion, null); foundMethods = validateAndAddFilterListenerInvocations(listener, builder, filterAnnotations); builder.registerListenerInvocations(); } } if (indexingProvider == null) { CacheInvocationBuilder builder = new CacheInvocationBuilder(); adjustCacheInvocationBuilder(builder, filter, converter, filterAnnotations, l, useStorageFormat, generatedId, keyConversion, valueConversion, null); foundMethods = validateAndAddFilterListenerInvocations(listener, builder, filterAnnotations); } CompletionStage<Void> stage = CompletableFutures.completedNull(); if (foundMethods && l.clustered()) { if (l.observation() == Listener.Observation.PRE) { throw CONTAINER.clusterListenerRegisteredWithOnlyPreEvents(listener.getClass()); } else if (cacheMode.isInvalidation()) { throw new UnsupportedOperationException("Cluster listeners cannot be used with Invalidation Caches!"); } else if (clusterListenerOnPrimaryOnly()) { clusterListenerIDs.put(listener, generatedId); // This way we only retrieve members of the cache itself Address ourAddress = rpcManager.getAddress(); List<Address> members = rpcManager.getMembers(); // If we are the only member don't even worry about sending listeners if (members != null && members.size() > 1) { stage = registerClusterListeners(members, generatedId, ourAddress, filter, converter, l, listener, keyDataConversion, valueDataConversion, useStorageFormat); } } } // If we have a segment listener handler, it means we have to do initial state QueueingSegmentListener<K, V, ? extends Event<K, V>> handler = segmentHandler.remove(generatedId); if (handler != null) { if (log.isTraceEnabled()) { log.tracef("Listener %s requests initial state for cache", generatedId); } Collection<IntermediateOperation<?, ?, ?, ?>> intermediateOperations = new ArrayList<>(); MediaType storage = valueConversion.getStorageMediaType(); MediaType keyReq = keyConversion.getRequestMediaType(); MediaType valueReq = valueConversion.getRequestMediaType(); AdvancedCache advancedCache = cache.running(); DataConversion chainedKeyDataConversion = advancedCache.getKeyDataConversion(); DataConversion chainedValueDataConversion = advancedCache.getValueDataConversion(); if (keyReq != null && valueReq != null) { chainedKeyDataConversion = chainedKeyDataConversion.withRequestMediaType(keyReq); chainedValueDataConversion = chainedValueDataConversion.withRequestMediaType(valueReq); } boolean hasFilter = false; MediaType filterMediaType = null; if (filter != null) { hasFilter = true; filterMediaType = useStorageFormat ? null : filter.format(); if (filterMediaType == null) { // iterate in the storage format chainedKeyDataConversion = chainedKeyDataConversion.withRequestMediaType(storage); chainedValueDataConversion = chainedValueDataConversion.withRequestMediaType(storage); } else { // iterate in the filter format chainedKeyDataConversion = chainedKeyDataConversion.withRequestMediaType(filterMediaType); chainedValueDataConversion = chainedValueDataConversion.withRequestMediaType(filterMediaType); } } if (converter != null) { hasFilter = true; filterMediaType = useStorageFormat ? null : converter.format(); if (filterMediaType == null) { // iterate in the storage format chainedKeyDataConversion = chainedKeyDataConversion.withRequestMediaType(storage); chainedValueDataConversion = chainedValueDataConversion.withRequestMediaType(storage); } else { // iterate in the filter format chainedKeyDataConversion = chainedKeyDataConversion.withRequestMediaType(filterMediaType); chainedValueDataConversion = chainedValueDataConversion.withRequestMediaType(filterMediaType); } } if (!Objects.equals(chainedKeyDataConversion, keyDataConversion) || !Objects.equals(chainedValueDataConversion, valueDataConversion)) { componentRegistry.wireDependencies(chainedKeyDataConversion, false); componentRegistry.wireDependencies(chainedValueDataConversion, false); intermediateOperations.add(new MapOperation<>(EncoderEntryMapper.newCacheEntryMapper(chainedKeyDataConversion, chainedValueDataConversion, entryFactory))); } if (filter instanceof CacheEventFilterConverter && (filter == converter || converter == null)) { intermediateOperations.add(new MapOperation<>(CacheFilters.converterToFunction( new CacheEventFilterConverterAsKeyValueFilterConverter<>((CacheEventFilterConverter<?, ?, ?>) filter)))); intermediateOperations.add(new FilterOperation<>(CacheFilters.notNullCacheEntryPredicate())); } else { if (filter != null) { intermediateOperations.add(new FilterOperation<>(CacheFilters.predicate( new CacheEventFilterAsKeyValueFilter<>(filter)))); } if (converter != null) { intermediateOperations.add(new MapOperation<>(CacheFilters.function( new CacheEventConverterAsConverter<>(converter)))); } } boolean finalHasFilter = hasFilter; MediaType finalFilterMediaType = filterMediaType; Function<Object, Object> kc = k -> { if (!finalHasFilter) return k; if (finalFilterMediaType == null || useStorageFormat || keyReq == null) { return keyDataConversion.fromStorage(k); } return encoderRegistry.convert(k, finalFilterMediaType, keyDataConversion.getRequestMediaType()); }; Function<Object, Object> kv = v -> { if (!finalHasFilter) return v; if (finalFilterMediaType == null || useStorageFormat || valueReq == null) { return valueConversion.fromStorage(v); } return encoderRegistry.convert(v, finalFilterMediaType, valueConversion.getRequestMediaType()); }; stage = handlePublisher(stage, intermediateOperations, handler, generatedId, l, kc, kv); } return stage; } private <C> void adjustCacheInvocationBuilder(CacheInvocationBuilder builder, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations, Listener l, boolean useStorageFormat, UUID generatedId, DataConversion keyConversion, DataConversion valueConversion, ClassLoader classLoader) { builder .setIncludeCurrentState(l.includeCurrentState()) .setClustered(l.clustered()) .setOnlyPrimary(l.clustered() ? clusterListenerOnPrimaryOnly() : l.primaryOnly()) .setObservation(l.clustered() ? Listener.Observation.POST : l.observation()) .setFilter(filter) .setConverter(converter) .useStorageFormat(useStorageFormat) .setKeyDataConversion(keyConversion) .setValueDataConversion(valueConversion) .setIdentifier(generatedId) .setClassLoader(classLoader); builder.setFilterAnnotations(filterAnnotations); } protected class CacheInvocationBuilder extends AbstractInvocationBuilder { CacheEventFilter<? super K, ? super V> filter; CacheEventConverter<? super K, ? super V, ?> converter; boolean onlyPrimary; boolean clustered; boolean includeCurrentState; UUID identifier; DataConversion keyDataConversion; DataConversion valueDataConversion; Listener.Observation observation; Set<Class<? extends Annotation>> filterAnnotations; boolean storageFormat; public CacheEventFilter<? super K, ? super V> getFilter() { return filter; } public CacheInvocationBuilder setFilter(CacheEventFilter<? super K, ? super V> filter) { this.filter = filter; return this; } public CacheEventConverter<? super K, ? super V, ?> getConverter() { return converter; } public CacheInvocationBuilder setConverter(CacheEventConverter<? super K, ? super V, ?> converter) { this.converter = converter; return this; } public CacheInvocationBuilder useStorageFormat(boolean useStorageFormat) { this.storageFormat = useStorageFormat; return this; } public boolean isOnlyPrimary() { return onlyPrimary; } public CacheInvocationBuilder setOnlyPrimary(boolean onlyPrimary) { this.onlyPrimary = onlyPrimary; return this; } public boolean isClustered() { return clustered; } public CacheInvocationBuilder setClustered(boolean clustered) { this.clustered = clustered; return this; } public UUID getIdentifier() { return identifier; } public CacheInvocationBuilder setIdentifier(UUID identifier) { this.identifier = identifier; return this; } public CacheInvocationBuilder setKeyDataConversion(DataConversion dataConversion) { this.keyDataConversion = dataConversion; return this; } public CacheInvocationBuilder setValueDataConversion(DataConversion dataConversion) { this.valueDataConversion = dataConversion; return this; } public boolean isIncludeCurrentState() { return includeCurrentState; } public CacheInvocationBuilder setIncludeCurrentState(boolean includeCurrentState) { this.includeCurrentState = includeCurrentState; return this; } public Listener.Observation getObservation() { return observation; } public CacheInvocationBuilder setObservation(Listener.Observation observation) { this.observation = observation; return this; } public CacheInvocationBuilder setFilterAnnotations(Set<Class<? extends Annotation>> filterAnnotations) { this.filterAnnotations = filterAnnotations; return this; } @Override public CacheEntryListenerInvocation<K, V> build() { ListenerInvocation<Event<K, V>> invocation = new ListenerInvocationImpl(target, method, sync, classLoader, subject); wireDependencies(filter, converter); // If we are dealing with clustered events that forces the cluster listener to only use primary only else we would // have duplicate events CacheEntryListenerInvocation<K, V> returnValue; if (includeCurrentState) { // If it is a clustered listener and distributed cache we can do some extra optimizations if (clustered) { QueueingSegmentListener handler = segmentHandler.get(identifier); if (handler == null) { int segments = config.clustering().hash().numSegments(); if (clusterListenerOnPrimaryOnly()) { handler = new DistributedQueueingSegmentListener(entryFactory, segments, keyPartitioner); } else { handler = new QueueingAllSegmentListener(entryFactory, segments, keyPartitioner); } QueueingSegmentListener currentQueue = segmentHandler.putIfAbsent(identifier, handler); if (currentQueue != null) { handler = currentQueue; } } returnValue = new ClusteredListenerInvocation<>(encoderRegistry, invocation, handler, filter, converter, annotation, onlyPrimary, identifier, sync, observation, filterAnnotations, keyDataConversion, valueDataConversion, storageFormat); } else { // TODO: this is removed until non cluster listeners are supported // QueueingSegmentListener handler = segmentHandler.get(identifier); // if (handler == null) { // handler = new QueueingAllSegmentListener(); // QueueingSegmentListener currentQueue = segmentHandler.putIfAbsent(identifier, handler); // if (currentQueue != null) { // handler = currentQueue; // } // } // returnValue = new NonClusteredListenerInvocation(invocation, handler, filter, converter, annotation, // onlyPrimary, identifier, sync); returnValue = new BaseCacheEntryListenerInvocation(encoderRegistry, invocation, filter, converter, annotation, onlyPrimary, clustered, identifier, sync, observation, filterAnnotations, keyDataConversion, valueDataConversion, storageFormat); } } else { // If no includeCurrentState just use the base listener invocation which immediately passes all notifications // off returnValue = new BaseCacheEntryListenerInvocation(encoderRegistry, invocation, filter, converter, annotation, onlyPrimary, clustered, identifier, sync, observation, filterAnnotations, keyDataConversion, valueDataConversion, storageFormat); } return returnValue; } protected <C> void wireDependencies(CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter) { if (filter != null) { componentRegistry.wireDependencies(filter, false); } if (converter != null && converter != filter) { componentRegistry.wireDependencies(converter, false); } if (keyDataConversion != null) { componentRegistry.wireDependencies(keyDataConversion, false); } if (valueDataConversion != null) { componentRegistry.wireDependencies(valueDataConversion, false); } } } protected class DelegatingCacheInvocationBuilder extends CacheInvocationBuilder { private final FilterIndexingServiceProvider provider; private final Map<Class<? extends Annotation>, List<DelegatingCacheEntryListenerInvocation<K, V>>> listeners = new HashMap<>(3); DelegatingCacheInvocationBuilder(FilterIndexingServiceProvider provider) { this.provider = provider; } @Override public DelegatingCacheEntryListenerInvocation<K, V> build() { DelegatingCacheEntryListenerInvocation<K, V> invocation = provider.interceptListenerInvocation(super.build()); List<DelegatingCacheEntryListenerInvocation<K, V>> invocations = listeners.get(invocation.getAnnotation()); if (invocations == null) { invocations = new ArrayList<>(2); listeners.put(invocation.getAnnotation(), invocations); } invocations.add(invocation); return invocation; } void registerListenerInvocations() { if (!listeners.isEmpty()) { boolean filterAndConvert = filter == converter || converter == null; provider.registerListenerInvocations(clustered, onlyPrimary, filterAndConvert, (IndexedFilter<?, ?, ?>) filter, listeners, this.keyDataConversion, this.valueDataConversion); } } } /** * This class is to be used with cluster listener invocations only when they have included current state. Thus we * can assume all types are CacheEntryEvent, since it doesn't allow other types. */ protected class ClusteredListenerInvocation<K, V> extends BaseCacheEntryListenerInvocation<K, V> { private final QueueingSegmentListener<K, V, CacheEntryEvent<K, V>> handler; public ClusteredListenerInvocation(EncoderRegistry encoderRegistry, ListenerInvocation<Event<K, V>> invocation, QueueingSegmentListener<K, V, CacheEntryEvent<K, V>> handler, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, ?> converter, Class<? extends Annotation> annotation, boolean onlyPrimary, UUID identifier, boolean sync, Listener.Observation observation, Set<Class<? extends Annotation>> filterAnnotations, DataConversion keyDataConversion, DataConversion valueDataConversion, boolean useStorageFormat) { super(encoderRegistry, invocation, filter, converter, annotation, onlyPrimary, true, identifier, sync, observation, filterAnnotations, keyDataConversion, valueDataConversion, useStorageFormat); this.handler = handler; } @Override public CompletionStage<Void> invoke(Event<K, V> event) { throw new UnsupportedOperationException("Clustered initial transfer don't support regular events!"); } @Override protected CompletionStage<Void> doRealInvocation(EventWrapper<K, V, CacheEntryEvent<K, V>> wrapped) { // This is only used with clusters and such we can safely cast this here if (!handler.handleEvent(wrapped, invocation)) { return super.doRealInvocation(wrapped.getEvent()); } return null; } @Override public String toString() { return "ClusteredListenerInvocation{id=" + identifier + '}'; } } protected class BaseCacheEntryListenerInvocation<K, V> implements CacheEntryListenerInvocation<K, V> { private final EncoderRegistry encoderRegistry; protected final ListenerInvocation<Event<K, V>> invocation; protected final CacheEventFilter<? super K, ? super V> filter; protected final CacheEventConverter<? super K, ? super V, ?> converter; private final DataConversion keyDataConversion; private final DataConversion valueDataConversion; private final boolean useStorageFormat; protected final boolean onlyPrimary; protected final boolean clustered; protected final UUID identifier; protected final Class<? extends Annotation> annotation; protected final boolean sync; protected final boolean filterAndConvert; protected final Listener.Observation observation; protected final Set<Class<? extends Annotation>> filterAnnotations; protected BaseCacheEntryListenerInvocation(EncoderRegistry encoderRegistry, ListenerInvocation<Event<K, V>> invocation, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, ?> converter, Class<? extends Annotation> annotation, boolean onlyPrimary, boolean clustered, UUID identifier, boolean sync, Listener.Observation observation, Set<Class<? extends Annotation>> filterAnnotations, DataConversion keyDataConversion, DataConversion valueDataConversion, boolean useStorageFormat) { this.encoderRegistry = encoderRegistry; this.invocation = invocation; this.filter = filter; this.converter = converter; this.keyDataConversion = keyDataConversion; this.valueDataConversion = valueDataConversion; this.useStorageFormat = useStorageFormat; this.filterAndConvert = filter instanceof CacheEventFilterConverter && (filter == converter || converter == null); this.onlyPrimary = onlyPrimary; this.clustered = clustered; this.identifier = identifier; this.annotation = annotation; this.sync = sync; this.observation = observation; this.filterAnnotations = filterAnnotations; } @Override public CompletionStage<Void> invoke(Event<K, V> event) { if (shouldInvoke(event)) { return doRealInvocation(event); } return null; } /** * This is the entry point for local listeners firing events * * @param wrapped * @param isLocalNodePrimaryOwner */ @Override public CompletionStage<Void> invoke(EventWrapper<K, V, CacheEntryEvent<K, V>> wrapped, boolean isLocalNodePrimaryOwner) { // See if this should be filtered first before evaluating CacheEntryEvent<K, V> resultingEvent = shouldInvoke(wrapped.getEvent(), isLocalNodePrimaryOwner); if (resultingEvent != null) { wrapped.setEvent(resultingEvent); return invokeNoChecks(wrapped, false, filterAndConvert, false); } return null; } /** * This is the entry point for remote listener events being fired * * @param wrapped * @param skipQueue */ @Override public CompletionStage<Void> invokeNoChecks(EventWrapper<K, V, CacheEntryEvent<K, V>> wrapped, boolean skipQueue, boolean skipConverter, boolean needsTransform) { // We run the converter first, this way the converter doesn't have to run serialized when enqueued and also // the handler doesn't have to worry about it if (!skipConverter) { wrapped.setEvent(convertValue(converter, wrapped.getEvent())); } if (needsTransform) { CacheEntryEvent<K, V> event = wrapped.getEvent(); EventImpl<K, V> eventImpl = (EventImpl<K, V>) event; wrapped.setEvent(convertEventToRequestFormat(eventImpl, filter, converter, eventImpl.getValue())); } if (skipQueue) { return invocation.invoke(wrapped.getEvent()); } else { return doRealInvocation(wrapped); } } protected CompletionStage<Void> doRealInvocation(EventWrapper<K, V, CacheEntryEvent<K, V>> event) { return doRealInvocation(event.getEvent()); } protected CompletionStage<Void> doRealInvocation(Event<K, V> event) { return invocation.invoke(event); } protected boolean shouldInvoke(Event<K, V> event) { return observation.shouldInvoke(event.isPre()); } protected CacheEntryEvent<K, V> shouldInvoke(CacheEntryEvent<K, V> event, boolean isLocalNodePrimaryOwner) { if (log.isTraceEnabled()) { log.tracef("Should invoke %s (filter %s)? (onlyPrimary=%s, isPrimary=%s)", event, filter, onlyPrimary, isLocalNodePrimaryOwner); } if (onlyPrimary && !isLocalNodePrimaryOwner) return null; if (event instanceof EventImpl) { EventImpl<K, V> eventImpl = (EventImpl<K, V>) event; if (!shouldInvoke(event)) return null; EventType eventType; // Only use the filter if it was provided and we have an event that we can filter properly if (filter != null && (eventType = getEvent(eventImpl)) != null) { if (filterAndConvert) { Object newValue = ((CacheEventFilterConverter) filter).filterAndConvert(eventImpl.getKey(), eventImpl.getOldValue(), eventImpl.getOldMetadata(), eventImpl.getValue(), eventImpl.getMetadata(), eventType); return newValue != null ? convertEventToRequestFormat(eventImpl, filter, null, newValue) : null; } else { boolean accept = filter.accept(eventImpl.getKey(), eventImpl.getOldValue(), eventImpl.getOldMetadata(), eventImpl.getValue(), eventImpl.getMetadata(), eventType); if (!accept) { return null; } if (converter == null) { return convertEventToRequestFormat(eventImpl, filter, null, eventImpl.getValue()); } } } } return event; } // We can't currently filter events that don't implement CacheEntryEvent or CACHE_ENTRY_EVICTED events. Basically // events that have a single key value pair only private EventType getEvent(EventImpl<K, V> event) { switch (event.getType()) { case CACHE_ENTRY_ACTIVATED: case CACHE_ENTRY_CREATED: case CACHE_ENTRY_INVALIDATED: case CACHE_ENTRY_LOADED: case CACHE_ENTRY_MODIFIED: case CACHE_ENTRY_PASSIVATED: case CACHE_ENTRY_REMOVED: case CACHE_ENTRY_VISITED: case CACHE_ENTRY_EXPIRED: return new EventType(event.isCommandRetried(), event.isPre(), event.getType()); default: return null; } } @Override public Object getTarget() { return invocation.getTarget(); } @Override public CacheEventFilter<? super K, ? super V> getFilter() { return filter; } @Override public Set<Class<? extends Annotation>> getFilterAnnotations() { return filterAnnotations; } @Override public DataConversion getKeyDataConversion() { return keyDataConversion; } @Override public DataConversion getValueDataConversion() { return valueDataConversion; } @Override public boolean useStorageFormat() { return useStorageFormat; } @Override public CacheEventConverter<? super K, ? super V, ?> getConverter() { return converter; } @Override public boolean isClustered() { return clustered; } @Override public UUID getIdentifier() { return identifier; } @Override public Listener.Observation getObservation() { return observation; } @Override public Class<? extends Annotation> getAnnotation() { return annotation; } protected CacheEntryEvent<K, V> convertValue(CacheEventConverter<? super K, ? super V, ?> converter, CacheEntryEvent<K, V> event) { CacheEntryEvent<K, V> returnedEvent; if (converter != null) { if (event instanceof EventImpl) { // This is a bit hacky to let the C type be passed in for the V type EventImpl<K, V> eventImpl = (EventImpl<K, V>) event; EventType evType = new EventType(eventImpl.isCommandRetried(), eventImpl.isPre(), eventImpl.getType()); Object newValue; if (converter.useRequestFormat()) { eventImpl = convertEventToRequestFormat(eventImpl, null, converter, eventImpl.getValue()); newValue = converter.convert(eventImpl.getKey(), (V) eventImpl.getOldValue(), eventImpl.getOldMetadata(), (V) eventImpl.getValue(), eventImpl.getMetadata(), evType); eventImpl.setValue((V) newValue); } else { newValue = converter.convert(eventImpl.getKey(), (V) eventImpl.getOldValue(), eventImpl.getOldMetadata(), (V) eventImpl.getValue(), eventImpl.getMetadata(), evType); } if (!converter.useRequestFormat()) { // Convert from the filter output to the request output return convertEventToRequestFormat(eventImpl, null, converter, newValue); } else { returnedEvent = eventImpl; } } else { throw new IllegalArgumentException("Provided event should be org.infinispan.notifications.cachelistener.event.impl.EventImpl " + "when a converter is being used!"); } } else { returnedEvent = event; } return returnedEvent; } private EventImpl<K, V> convertEventToRequestFormat(EventImpl<K, V> eventImpl, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, ?> converter, Object newValue) { MediaType keyFromFormat = keyDataConversion.getStorageMediaType(); MediaType valueFromFormat = valueDataConversion.getStorageMediaType(); if (converter != null) { if (converter.format() != null && !useStorageFormat) { keyFromFormat = converter.format(); valueFromFormat = converter.format(); } } else { if (filter != null) { if (filter.format() != null && !useStorageFormat) { keyFromFormat = filter.format(); valueFromFormat = filter.format(); } } } Object convertedKey = convertToRequestFormat(eventImpl.getKey(), keyFromFormat, keyDataConversion); Object convertedValue = convertToRequestFormat(newValue, valueFromFormat, valueDataConversion); Object convertedOldValue = convertToRequestFormat(eventImpl.getOldValue(), valueFromFormat, valueDataConversion); EventImpl<K, V> clone = eventImpl.clone(); clone.setKey((K) convertedKey); clone.setValue((V) convertedValue); clone.setOldValue((V) convertedOldValue); return clone; } private Object convertToRequestFormat(Object object, MediaType objectFormat, DataConversion dataConversion) { if (object == null) return null; MediaType requestMediaType = dataConversion.getRequestMediaType(); if (requestMediaType == null) return dataConversion.fromStorage(object); Transcoder transcoder = encoderRegistry.getTranscoder(objectFormat, requestMediaType); return transcoder.transcode(object, objectFormat, requestMediaType); } @Override public boolean isSync() { return sync; } @Override public String toString() { return "BaseCacheEntryListenerInvocation{id=" + identifier + '}'; } } @Override public CompletionStage<Void> removeListenerAsync(Object listener) { removeListenerFromMaps(listener); UUID id = clusterListenerIDs.remove(listener); if (id != null) { return clusterExecutor.submitConsumer(new ClusterListenerRemoveCallable( cache.wired().getName(), id), (a, ignore, t) -> { if (t != null) { throw new CacheException(t); } }); } return CompletableFutures.completedNull(); } @Override protected Set<CacheEntryListenerInvocation<K, V>> removeListenerInvocation(Class<? extends Annotation> annotation, Object listener) { Set<CacheEntryListenerInvocation<K, V>> markedForRemoval = super.removeListenerInvocation(annotation, listener); for (CacheEntryListenerInvocation<K, V> li : markedForRemoval) { if (li instanceof DelegatingCacheEntryListenerInvocation) { ((DelegatingCacheEntryListenerInvocation<K, V>) li).unregister(); } } return markedForRemoval; } }
104,025
50.625806
247
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/DelegatingCacheEntryListenerInvocation.java
package org.infinispan.notifications.cachelistener.filter; import java.lang.annotation.Annotation; import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletionStage; import org.infinispan.encoding.DataConversion; import org.infinispan.notifications.Listener; import org.infinispan.notifications.cachelistener.CacheEntryListenerInvocation; import org.infinispan.notifications.cachelistener.EventWrapper; import org.infinispan.notifications.cachelistener.event.CacheEntryEvent; import org.infinispan.notifications.cachelistener.event.Event; /** * A wrapper around a {@link CacheEntryListenerInvocation} that keeps a reference to the {@link * FilterIndexingServiceProvider} instance that handles this invocation. All methods are delegated to the wrapped * invocation except {@link CacheEntryListenerInvocation#invoke(EventWrapper, boolean)} and {@link * CacheEntryListenerInvocation#invoke(Object)}. FilterIndexingServiceProvider implementors must extends this class and * implement its abstract {@link #unregister} method. * * @param <K> cache key type * @param <V> cache value type * @author anistor@redhat.com * @since 7.2 */ public abstract class DelegatingCacheEntryListenerInvocation<K, V> implements CacheEntryListenerInvocation<K, V> { protected final CacheEntryListenerInvocation<K, V> invocation; protected DelegatingCacheEntryListenerInvocation(CacheEntryListenerInvocation<K, V> invocation) { this.invocation = invocation; } /** * Stops handling the invocation. This is called when the listener is being unregistered. */ public abstract void unregister(); @Override public Object getTarget() { return invocation.getTarget(); } @Override public CompletionStage<Void> invoke(Event<K, V> event) { return invocation.invoke(event); } @Override public CompletionStage<Void> invoke(EventWrapper<K, V, CacheEntryEvent<K, V>> event, boolean isLocalNodePrimaryOwner) { return null; } @Override public CompletionStage<Void> invokeNoChecks(EventWrapper<K, V, CacheEntryEvent<K, V>> event, boolean skipQueue, boolean skipConverter, boolean needsTransform) { return invocation.invokeNoChecks(event, skipQueue, skipConverter, needsTransform); } @Override public boolean isClustered() { return invocation.isClustered(); } @Override public boolean isSync() { return invocation.isSync(); } @Override public UUID getIdentifier() { return invocation.getIdentifier(); } @Override public Listener.Observation getObservation() { return invocation.getObservation(); } @Override public Class<? extends Annotation> getAnnotation() { return invocation.getAnnotation(); } @Override public CacheEventFilter<? super K, ? super V> getFilter() { return invocation.getFilter(); } @Override public <C> CacheEventConverter<? super K, ? super V, C> getConverter() { return invocation.getConverter(); } @Override public Set<Class<? extends Annotation>> getFilterAnnotations() { return invocation.getFilterAnnotations(); } @Override public DataConversion getKeyDataConversion() { return invocation.getKeyDataConversion(); } @Override public DataConversion getValueDataConversion() { return invocation.getValueDataConversion(); } @Override public boolean useStorageFormat() { return invocation.useStorageFormat(); } }
3,497
29.417391
163
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/package-info.java
/** * {@link org.infinispan.Cache}-specific notifications and eventing filtering classes. These classes * can be used by end user to limit what notifications are raised for a given cache event. * @api.public */ package org.infinispan.notifications.cachelistener.filter;
275
38.428571
101
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/CacheEventFilterConverter.java
package org.infinispan.notifications.cachelistener.filter; import org.infinispan.commons.dataconversion.MediaType; import org.infinispan.metadata.Metadata; /** * This interface is an optimization that can be used when an event filter and converter are most efficiently used as * the same object composing the filtering and conversion in the same method invocation. * * @author wburns * @since 7.0 */ public interface CacheEventFilterConverter<K, V, C> extends CacheEventFilter<K, V>, CacheEventConverter<K, V, C> { /** * Will both filter the entry and if passed subsequently convert the value to a new value. A returned value of {@code * null} will symbolize the value not passing the filter, so ensure your conversion will not return {@code null} if * you want this entry to be returned. * * @param key The key for the entry that was changed for the event * @param oldValue The previous value before the event takes place * @param oldMetadata The old value before the event takes place * @param newValue The new value for the entry after the event takes place * @param newMetadata The new metadata for the entry after the event takes place * @param eventType The type of event that is being raised * @return A non {@code null} value converted value when it also passes the filter or {@code null} for when it * doesn't pass the filter */ C filterAndConvert(K key, V oldValue, Metadata oldMetadata, V newValue, Metadata newMetadata, EventType eventType); @Override default MediaType format() { return MediaType.APPLICATION_OBJECT; } }
1,633
44.388889
120
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/PostCacheEventFilter.java
package org.infinispan.notifications.cachelistener.filter; import org.infinispan.metadata.Metadata; /** * A Filter that only allows post events to be accepted. * * @author wburns * @since 7.0 */ public class PostCacheEventFilter<K, V> implements CacheEventFilter<K, V> { @Override public boolean accept(K key, V oldValue, Metadata oldMetadata, V newValue, Metadata newMetadata, EventType eventType) { return !eventType.isPreEvent(); } }
459
26.058824
122
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/CacheEventFilter.java
package org.infinispan.notifications.cachelistener.filter; import org.infinispan.commons.dataconversion.MediaType; import org.infinispan.metadata.Metadata; /** * A filter that can be used to tell if an event should be raised or not. This filter allows for filtering based * on the previous value as well as the new updated value. The old value and old metadata are the previous values and * the new value and new metadata are the new values even for pre and post events. * * @author wburns * @since 7.0 */ public interface CacheEventFilter<K, V> { /** * Whether or not this event should be raised to the listener it is attached to. * @param key The key for the entry that was changed for the event * @param oldValue The previous value before the event takes place * @param oldMetadata The old value before the event takes place * @param newValue The new value for the entry after the event takes place * @param newMetadata The new metadata for the entry after the event takes place * @param eventType The type of event that is being raised * @return Whether or not to notify the listener */ public boolean accept(K key, V oldValue, Metadata oldMetadata, V newValue, Metadata newMetadata, EventType eventType); /** * @return The desired data format to be used in the accept operation. If null, the filter will receive data as it's stored. */ default MediaType format() { return MediaType.APPLICATION_OBJECT; } }
1,488
42.794118
127
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/CacheEventConverterFactory.java
package org.infinispan.notifications.cachelistener.filter; /** * Factory that can produce CacheEventConverters * * @author wburns * @since 7.0 */ public interface CacheEventConverterFactory { /** * Retrieves a cache event converter instance from this factory. * * @param params parameters for the factory to be used to create converter instances * @return a {@link org.infinispan.notifications.cachelistener.filter.CacheEventConverter} instance used * to reduce size of event payloads */ <K, V, C> CacheEventConverter<K, V, C> getConverter(Object[] params); }
596
30.421053
107
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/CompositeCacheEventFilter.java
package org.infinispan.notifications.cachelistener.filter; import org.infinispan.factories.ComponentRegistry; import org.infinispan.factories.annotations.Inject; import org.infinispan.factories.scopes.Scope; import org.infinispan.factories.scopes.Scopes; import org.infinispan.metadata.Metadata; /** * Allows AND-composing several cache event filters. * * @author wburns * @since 7.0 */ @Scope(Scopes.NONE) public class CompositeCacheEventFilter<K, V> implements CacheEventFilter<K, V> { private final CacheEventFilter<? super K, ? super V>[] filters; public CompositeCacheEventFilter(CacheEventFilter<? super K, ? super V>... filters) { this.filters = filters; } @Override public boolean accept(K key, V oldValue, Metadata oldMetadata, V newValue, Metadata newMetadata, EventType eventType) { for (CacheEventFilter<? super K, ? super V> f : filters) if (!f.accept(key, oldValue, oldMetadata, newValue, newMetadata, eventType)) return false; return true; } @Inject protected void injectDependencies(ComponentRegistry cr) { for (CacheEventFilter<? super K, ? super V> f : filters) { cr.wireDependencies(f); } } }
1,197
31.378378
122
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/EventType.java
package org.infinispan.notifications.cachelistener.filter; import org.infinispan.notifications.cachelistener.event.Event; /** * Enum that provides information to allow for an event to know which type and if this event was generated due to a * retry usually caused by a topology change while replicating. * * @author wburns * @since 7.0 */ public class EventType { private final Event.Type type; private final boolean retried; private final boolean pre; public EventType(boolean retried, boolean pre, Event.Type type) { this.retried = retried; this.pre = pre; this.type = type; } public boolean isPreEvent() { return pre; }; public boolean isRetry() { return retried; } public Event.Type getType() { return type; } public boolean isCreate() { return type == Event.Type.CACHE_ENTRY_CREATED; } public boolean isModified() { return type == Event.Type.CACHE_ENTRY_MODIFIED; } public boolean isRemove() { return type == Event.Type.CACHE_ENTRY_REMOVED; } public boolean isExpired() { return type == Event.Type.CACHE_ENTRY_EXPIRED; } }
1,153
22.08
115
java
null
infinispan-main/core/src/main/java/org/infinispan/notifications/cachelistener/filter/FilterIndexingServiceProvider.java
package org.infinispan.notifications.cachelistener.filter; import java.lang.annotation.Annotation; import java.util.List; import java.util.Map; import org.infinispan.encoding.DataConversion; import org.infinispan.metadata.Metadata; import org.infinispan.notifications.cachelistener.CacheEntryListenerInvocation; /** * A service provider for filter indexing services. This is supposed to perform the filtering operation in a more * efficient way than directly executing the filter by calling the {@link org.infinispan.notifications.cachelistener.filter.CacheEventFilterConverter#filterAndConvert(Object, * Object, Metadata, Object, Metadata, EventType)} method. Implementations are discovered via the {@link * java.util.ServiceLoader} or {@link org.infinispan.commons.util.ServiceFinder} mechanism. Implementations may have * their dependencies injected using the {@link org.infinispan.factories.annotations.Inject} annotation. * * @author anistor@redhat.com * @since 7.2 */ public interface FilterIndexingServiceProvider { /** * Start the provider. This is called after the dependencies are injected. */ void start(); /** * Reports whether this provider supports the given filter type. * * @param indexedFilter an indexable filter * @return {@code true} if the filter is supported, {@code false} otherwise */ boolean supportsFilter(IndexedFilter<?, ?, ?> indexedFilter); /** * Starts handling an invocation that uses an {@link IndexedFilter}. * * @param invocation the invocation to handle * @param <K> cache key type * @param <V> cache value type * @return the wrapped invocation */ <K, V> DelegatingCacheEntryListenerInvocation<K, V> interceptListenerInvocation(CacheEntryListenerInvocation<K, V> invocation); <K, V> void registerListenerInvocations(boolean isClustered, boolean isPrimaryOnly, boolean filterAndConvert, IndexedFilter<?, ?, ?> indexedFilter, Map<Class<? extends Annotation>, List<DelegatingCacheEntryListenerInvocation<K, V>>> listeners, DataConversion keyDataConversion, DataConversion valueDataConversion); /** * Stop the provider. */ void stop(); }
2,321
40.464286
174
java