repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/FileProvider.java
|
package org.infinispan.persistence.sifs;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.io.Closeable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.persistence.sifs.pmem.PmemUtilWrapper;
import org.infinispan.util.logging.LogFactory;
/**
* Provides resource management for files - only limited amount of files may be opened in one moment, and opened file
* should not be deleted. Also allows to generate file indexes.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
public class FileProvider {
private static final org.infinispan.persistence.sifs.Log log =
LogFactory.getLog(FileProvider.class, org.infinispan.persistence.sifs.Log.class);
private static final String REGEX_FORMAT = "^%s[0-9]+$";
private static final boolean ATTEMPT_PMEM;
private final File dataDir;
private final int openFileLimit;
private final ArrayBlockingQueue<Record> recordQueue;
private final ConcurrentMap<Integer, Record> openFiles = new ConcurrentHashMap<>();
private final AtomicInteger currentOpenFiles = new AtomicInteger(0);
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private final Set<Integer> logFiles = new HashSet<>();
private final Set<FileIterator> iterators = ConcurrentHashMap.newKeySet();
private final String prefix;
private final int maxFileSize;
private int nextFileId = 0;
static {
boolean attemptPmem = false;
try {
Class.forName("io.mashona.logwriting.PmemUtil");
// use persistent memory if available, otherwise fallback to regular file.
attemptPmem = true;
} catch (ClassNotFoundException e) {
log.debug("Persistent Memory not in classpath, not attempting");
}
ATTEMPT_PMEM = attemptPmem;
}
public FileProvider(Path dataDir, int openFileLimit, String prefix, int maxFileSize) {
this.openFileLimit = openFileLimit;
this.recordQueue = new ArrayBlockingQueue<>(openFileLimit);
this.dataDir = dataDir.toFile();
this.prefix = prefix;
this.maxFileSize = maxFileSize;
try {
Files.createDirectories(dataDir);
} catch (IOException e) {
throw PERSISTENCE.directoryCannotBeCreated(this.dataDir.getAbsolutePath());
}
}
public boolean isLogFile(int fileId) {
lock.readLock().lock();
try {
return logFiles.contains(fileId);
} finally {
lock.readLock().unlock();
}
}
public Handle getFile(int fileId) throws IOException {
lock.readLock().lock();
try {
for (; ; ) {
Record record = openFiles.get(fileId);
if (record == null) {
for (; ; ) {
int open = currentOpenFiles.get();
if (open >= openFileLimit) {
// we'll continue only after some other file will be closed
if (tryCloseFile()) break;
} else {
if (currentOpenFiles.compareAndSet(open, open + 1)) {
break;
}
}
}
// now we have either removed some other opened file or incremented the value below limit
for (;;) {
FileChannel fileChannel;
try {
fileChannel = openChannel(fileId);
} catch (FileNotFoundException e) {
currentOpenFiles.decrementAndGet();
log.debugf(e, "File %d was not found", fileId);
return null;
}
Record newRecord = new Record(fileChannel, fileId);
Record other = openFiles.putIfAbsent(fileId, newRecord);
if (other != null) {
fileChannel.close();
synchronized (other) {
if (other.isOpen()) {
// we have allocated opening a new file but then we use an old one
currentOpenFiles.decrementAndGet();
return new Handle(other);
}
}
} else {
Handle handle;
synchronized (newRecord) {
// the new file cannot be closed but it can be simultaneously fetched multiple times
if (!newRecord.isOpen()) {
throw new IllegalStateException();
}
handle = new Handle(newRecord);
}
try {
recordQueue.put(newRecord);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return handle;
}
}
}
synchronized (record) {
if (record.isOpen()) {
return new Handle(record);
}
}
}
} finally {
lock.readLock().unlock();
}
}
public long getFileSize(int file) {
lock.readLock().lock();
try {
if (logFiles.contains(file)) {
return -1;
}
return newFile(file).length();
} finally {
lock.readLock().unlock();
}
}
private String fileIdToString(int fileId) {
return prefix + fileId;
}
// Package private for tests
File newFile(int fileId) {
return new File(dataDir, fileIdToString(fileId));
}
private boolean tryCloseFile() throws IOException {
Record removed;
try {
removed = recordQueue.take();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
synchronized (removed) {
if (removed.isUsed()) {
try {
recordQueue.put(removed);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} else {
if (removed.isOpen()) {
// if the file was marked deleteOnClose it may have been already closed, but it couldn't be removed from
// the queue
removed.close();
openFiles.remove(removed.getFileId(), removed);
}
return true;
}
}
return false;
}
protected FileChannel openChannel(int fileId) throws FileNotFoundException {
return openChannel(newFile(fileId), false, true);
}
protected FileChannel openChannel(File file, boolean create, boolean readSharedMeadata) throws FileNotFoundException {
log.debugf("openChannel(%s)", file.getAbsolutePath());
FileChannel fileChannel = ATTEMPT_PMEM ? PmemUtilWrapper.pmemChannelFor(file, maxFileSize, create, readSharedMeadata) : null;
if (fileChannel == null) {
if (create) {
fileChannel = new FileOutputStream(file).getChannel();
} else {
fileChannel = new RandomAccessFile(file, "rw").getChannel();
}
}
return fileChannel;
}
public Log getFileForLog() throws IOException {
lock.writeLock().lock();
try {
for (;;) {
File f = newFile(nextFileId);
if (f.exists()) {
if (nextFileId == Integer.MAX_VALUE) {
nextFileId = 0;
} else {
nextFileId++;
}
} else {
logFiles.add(nextFileId);
for (FileIterator it : iterators) {
it.add(nextFileId);
}
// use persistent memory if available, otherwise fallback to regular file.
FileChannel fileChannel = openChannel(f, true, false);
if (fileChannel == null) {
fileChannel = new FileOutputStream(f).getChannel();
}
return new Log(nextFileId, fileChannel);
}
}
} finally {
lock.writeLock().unlock();
}
}
public CloseableIterator<Integer> getFileIterator() {
String regex = String.format(REGEX_FORMAT, prefix);
lock.readLock().lock();
try {
Set<Integer> set = new HashSet<>();
for (String file : dataDir.list()) {
if (file.matches(regex)) {
set.add(Integer.parseInt(file.substring(prefix.length())));
}
}
FileIterator iterator = new FileIterator(set.iterator());
iterators.add(iterator);
return iterator;
} finally {
lock.readLock().unlock();
}
}
public boolean hasFiles() {
String regex = String.format(REGEX_FORMAT, prefix);
lock.readLock().lock();
try {
for (String file : dataDir.list()) {
if (file.matches(regex)) {
return true;
}
}
return false;
} finally {
lock.readLock().unlock();
}
}
public void clear() throws IOException {
lock.writeLock().lock();
try {
log.debug("Dropping all data");
while (currentOpenFiles.get() > 0) {
if (tryCloseFile()) {
if (currentOpenFiles.decrementAndGet() == 0) {
break;
}
}
}
if (!recordQueue.isEmpty()) throw new IllegalStateException();
if (!openFiles.isEmpty()) throw new IllegalStateException();
File[] files = dataDir.listFiles();
if (files != null) {
for (File file : files) {
Files.delete(file.toPath());
}
}
} finally {
lock.writeLock().unlock();
}
}
public void deleteFile(int fileId) {
lock.readLock().lock();
try {
for (;;) {
Record newRecord = new Record(null, fileId);
Record record = openFiles.putIfAbsent(fileId, newRecord);
if (record == null) {
try {
newRecord.delete();
} catch (IOException e) {
log.cannotCloseDeleteFile(fileId, e);
}
openFiles.remove(fileId, newRecord);
return;
}
synchronized (record) {
if (openFiles.get(fileId) == record) {
try {
record.deleteOnClose();
} catch (IOException e) {
log.cannotCloseDeleteFile(fileId, e);
}
break;
}
}
}
} finally {
lock.readLock().unlock();
}
}
public void stop() {
int open = currentOpenFiles.get();
while (open > 0) {
try {
if (tryCloseFile()) {
open = currentOpenFiles.decrementAndGet();
} else {
// we can't close any further file
break;
}
} catch (IOException e) {
log.cannotCloseFile(e);
}
}
if (currentOpenFiles.get() != 0) {
for (Map.Entry<Integer, Record> entry : openFiles.entrySet()) {
log.debugf("File %d has %d open handles", entry.getKey().intValue(), entry.getValue().handleCount);
}
}
}
public final class Log implements Closeable {
public final int fileId;
public final FileChannel fileChannel;
public Log(int fileId, FileChannel fileChannel) {
this.fileId = fileId;
this.fileChannel = fileChannel;
}
@Override
public void close() throws IOException {
fileChannel.close();
lock.writeLock().lock();
try {
logFiles.remove(fileId);
} finally {
lock.writeLock().unlock();
}
}
}
public static final class Handle implements Closeable {
private boolean usable = true;
private final Record record;
private Handle(Record record) {
this.record = record;
record.increaseHandleCount();
}
public int read(ByteBuffer buffer, long offset) throws IOException {
if (!usable) throw new IllegalStateException();
return record.getFileChannel().read(buffer, offset);
}
@Override
public void close() throws IOException {
usable = false;
synchronized (record) {
record.decreaseHandleCount();
}
}
public long getFileSize() throws IOException {
return record.fileChannel.size();
}
public int getFileId() {
return record.getFileId();
}
}
private class Record {
private final int fileId;
private FileChannel fileChannel;
private int handleCount;
private boolean deleteOnClose = false;
private Record(FileChannel fileChannel, int fileId) {
this.fileChannel = fileChannel;
this.fileId = fileId;
}
FileChannel getFileChannel() {
return fileChannel;
}
void increaseHandleCount() {
handleCount++;
}
void decreaseHandleCount() throws IOException {
handleCount--;
if (handleCount == 0 && deleteOnClose) {
// we cannot easily remove the record from queue - keep it there until collection,
// but physically close and delete the file
fileChannel.close();
fileChannel = null;
openFiles.remove(fileId, this);
delete();
}
}
boolean isOpen() {
return fileChannel != null;
}
boolean isUsed() {
return handleCount > 0;
}
public int getFileId() {
return fileId;
}
public void close() throws IOException {
fileChannel.close();
fileChannel = null;
if (deleteOnClose) {
delete();
}
}
public void delete() throws IOException {
log.debugf("Deleting file %s", fileIdToString(fileId));
//noinspection ResultOfMethodCallIgnored
Files.deleteIfExists(newFile(fileId).toPath());
}
public void deleteOnClose() throws IOException {
if (handleCount == 0) {
if (fileChannel != null) {
fileChannel.close();
fileChannel = null;
}
openFiles.remove(fileId, this);
delete();
} else {
log.debug("Marking file " + fileId + " for deletion");
deleteOnClose = true;
}
}
}
private class FileIterator implements CloseableIterator<Integer> {
private final Iterator<Integer> diskFiles;
private final ConcurrentLinkedQueue<Integer> addedFiles = new ConcurrentLinkedQueue<>();
private FileIterator(Iterator<Integer> diskFiles) {
this.diskFiles = diskFiles;
}
public void add(int file) {
addedFiles.add(file);
}
@Override
public void close() {
iterators.remove(this);
}
@Override
public boolean hasNext() {
return diskFiles.hasNext() || !addedFiles.isEmpty();
}
@Override
public Integer next() {
return diskFiles.hasNext() ? diskFiles.next() : addedFiles.poll();
}
}
}
| 16,127
| 30.195358
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/LogRequest.java
|
package org.infinispan.persistence.sifs;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.persistence.spi.MarshallableEntry;
/**
* Request to persist entry in log file or request executed by the log appender thread.
*
* @author Radim Vansa <rvansa@redhat.com>
* @author William Burns <wburns@redhat.com>
*/
class LogRequest extends CompletableFuture<Void> {
enum Type {
STORE,
DELETE,
CLEAR_ALL,
PAUSE,
RESUME
}
private final Type type;
private final int segment;
private final Object key;
private final long expirationTime;
private final ByteBuffer serializedKey;
private final ByteBuffer serializedMetadata;
private final ByteBuffer serializedValue;
private final ByteBuffer serializedInternalMetadata;
private final long created;
private final long lastUsed;
private volatile int file;
private volatile int fileOffset;
private volatile IndexRequest indexRequest;
private LogRequest(Type type, int segment, Object key, long expirationTime, ByteBuffer serializedKey, ByteBuffer serializedMetadata,
ByteBuffer serializedInternalMetadata, ByteBuffer serializedValue, long created, long lastUsed) {
this.segment = segment;
this.key = key;
this.expirationTime = expirationTime;
this.serializedKey = serializedKey;
this.serializedMetadata = serializedMetadata;
this.serializedInternalMetadata = serializedInternalMetadata;
this.serializedValue = serializedValue;
this.created = created;
this.lastUsed = lastUsed;
this.type = type;
}
private LogRequest(Type type) {
this(type, -1, null, 0, null, null, null, null, -1, -1);
}
public static LogRequest storeRequest(int segment, MarshallableEntry entry) {
return new LogRequest(Type.STORE, segment, entry.getKey(), entry.expiryTime(), entry.getKeyBytes(), entry.getMetadataBytes(),
entry.getInternalMetadataBytes(), entry.getValueBytes(), entry.created(), entry.lastUsed());
}
public static LogRequest deleteRequest(int segment, Object key, ByteBuffer serializedKey) {
return new LogRequest(Type.DELETE, segment, key, -1, serializedKey, null, null, null, -1, -1);
}
public static LogRequest clearRequest() {
return new LogRequest(Type.CLEAR_ALL);
}
public static LogRequest pauseRequest() {
return new LogRequest(Type.PAUSE);
}
public static LogRequest resumeRequest() {
return new LogRequest(Type.RESUME);
}
public int length() {
return EntryHeader.HEADER_SIZE_11_0 + serializedKey.getLength()
+ (serializedValue != null ? serializedValue.getLength() : 0)
+ EntryMetadata.size(serializedMetadata)
+ (serializedInternalMetadata != null ? serializedInternalMetadata.getLength() : 0);
}
public Object getKey() {
return key;
}
public int getSement() {
return segment;
}
public ByteBuffer getSerializedKey() {
return serializedKey;
}
public ByteBuffer getSerializedMetadata() {
return serializedMetadata;
}
public ByteBuffer getSerializedInternalMetadata() {
return serializedInternalMetadata;
}
public ByteBuffer getSerializedValue() {
return serializedValue;
}
public long getCreated() {
return created;
}
public long getLastUsed() {
return lastUsed;
}
public long getExpiration() {
return expirationTime;
}
public boolean isClear() {
return type == Type.CLEAR_ALL;
}
public boolean isPause() {
return type == Type.PAUSE;
}
public boolean isResume() {
return type == Type.RESUME;
}
public void setIndexRequest(IndexRequest indexRequest) {
this.indexRequest = indexRequest;
}
public int getFile() {
return file;
}
public void setFile(int file) {
this.file = file;
}
public int getFileOffset() {
return fileOffset;
}
public void setFileOffset(int fileOffset) {
this.fileOffset = fileOffset;
}
public IndexRequest getIndexRequest() {
return indexRequest;
}
@Override
public String toString() {
return "LogRequest{" +
"type=" + type +
", segment=" + segment +
", key=" + key +
", file=" + file +
", fileOffset=" + fileOffset +
'}';
}
}
| 4,484
| 25.856287
| 135
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/Index.java
|
package org.infinispan.persistence.sifs;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PrimitiveIterator;
import java.util.TreeMap;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLongArray;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.NonBlockingManager;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.functions.Action;
import io.reactivex.rxjava3.functions.Consumer;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
import io.reactivex.rxjava3.schedulers.Schedulers;
/**
* Keeps the entry positions persisted in a file. It consists of couple of segments, each for one modulo-range of key's
* hashcodes (according to DataContainer's key equivalence configuration) - writes to each index segment are performed
* by single thread, having multiple segments spreads the load between them.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
class Index {
private static final Log log = LogFactory.getLog(Index.class, Log.class);
// PRE ISPN 13 GRACEFULLY VALUE = 0x512ACEF0;
private static final int GRACEFULLY = 0x512ACEF1;
private static final int DIRTY = 0xD112770C;
// 4 bytes for graceful shutdown
// 4 bytes for segment max (this way the index can be regenerated if number of segments change
// 8 bytes root offset
// 2 bytes root occupied
// 8 bytes free block offset
// 8 bytes number of elements
private static final int INDEX_FILE_HEADER_SIZE = 34;
private final NonBlockingManager nonBlockingManager;
private final FileProvider fileProvider;
private final Path indexDir;
private final Compactor compactor;
private final int minNodeSize;
private final int maxNodeSize;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private final Segment[] segments;
private final TimeService timeService;
private final File indexSizeFile;
public final AtomicLongArray sizePerSegment;
private final FlowableProcessor<IndexRequest>[] flowableProcessors;
private final IndexNode.OverwriteHook movedHook = new IndexNode.OverwriteHook() {
@Override
public boolean check(IndexRequest request, int oldFile, int oldOffset) {
return oldFile == request.getPrevFile() && oldOffset == request.getPrevOffset();
}
@Override
public void setOverwritten(IndexRequest request, int cacheSegment, boolean overwritten, int prevFile, int prevOffset) {
if (overwritten && request.getOffset() < 0 && request.getPrevOffset() >= 0) {
sizePerSegment.decrementAndGet(cacheSegment);
}
}
};
private final IndexNode.OverwriteHook updateHook = new IndexNode.OverwriteHook() {
@Override
public void setOverwritten(IndexRequest request, int cacheSegment, boolean overwritten, int prevFile, int prevOffset) {
nonBlockingManager.complete(request, overwritten);
if (request.getOffset() >= 0 && prevOffset < 0) {
sizePerSegment.incrementAndGet(cacheSegment);
} else if (request.getOffset() < 0 && prevOffset >= 0) {
sizePerSegment.decrementAndGet(cacheSegment);
}
}
};
private final IndexNode.OverwriteHook droppedHook = new IndexNode.OverwriteHook() {
@Override
public void setOverwritten(IndexRequest request, int cacheSegment, boolean overwritten, int prevFile, int prevOffset) {
if (request.getPrevFile() == prevFile && request.getPrevOffset() == prevOffset) {
sizePerSegment.decrementAndGet(cacheSegment);
}
}
};
public Index(NonBlockingManager nonBlockingManager, FileProvider fileProvider, Path indexDir, int segments,
int cacheSegments, int minNodeSize, int maxNodeSize, TemporaryTable temporaryTable, Compactor compactor,
TimeService timeService) throws IOException {
this.nonBlockingManager = nonBlockingManager;
this.fileProvider = fileProvider;
this.compactor = compactor;
this.timeService = timeService;
this.indexDir = indexDir;
this.minNodeSize = minNodeSize;
this.maxNodeSize = maxNodeSize;
this.sizePerSegment = new AtomicLongArray(cacheSegments);
indexDir.toFile().mkdirs();
this.indexSizeFile = new File(indexDir.toFile(), "index-count");
this.segments = new Segment[segments];
this.flowableProcessors = new FlowableProcessor[segments];
for (int i = 0; i < segments; ++i) {
UnicastProcessor<IndexRequest> flowableProcessor = UnicastProcessor.create();
Segment segment = new Segment(this, i, temporaryTable);
this.segments[i] = segment;
// It is possible to write from multiple threads
this.flowableProcessors[i] = flowableProcessor.toSerialized();
}
}
private boolean checkForExistingIndexSizeFile() {
int storeSegments = flowableProcessors.length;
int cacheSegments = sizePerSegment.length();
boolean validCount = false;
try (RandomAccessFile indexCount = new RandomAccessFile(indexSizeFile, "r")) {
int storeSegmentsCount = UnsignedNumeric.readUnsignedInt(indexCount);
int cacheSegmentsCount = UnsignedNumeric.readUnsignedInt(indexCount);
if (storeSegmentsCount == storeSegments && cacheSegmentsCount == cacheSegments) {
for (int i = 0; i < sizePerSegment.length(); ++i) {
long value = UnsignedNumeric.readUnsignedLong(indexCount);
sizePerSegment.set(i, value);
}
validCount = true;
} else {
log.tracef("Previous index file store segments " + storeSegmentsCount + " doesn't match configured" +
" store segments " + storeSegments + " or index file cache segments " + cacheSegmentsCount + " doesn't match configured" +
" cache segments " + cacheSegments);
}
} catch (IOException e) {
log.tracef("Encountered IOException %s while reading index count file, assuming index dirty", e.getMessage());
}
// Delete this so the file doesn't exist and will be written at stop. If the file isn't present it is considered dirty
indexSizeFile.delete();
return validCount;
}
public static byte[] toIndexKey(int cacheSegment, org.infinispan.commons.io.ByteBuffer buffer) {
return toIndexKey(cacheSegment, buffer.getBuf(), buffer.getOffset(), buffer.getLength());
}
static byte[] toIndexKey(int cacheSegment, byte[] bytes) {
return toIndexKey(cacheSegment, bytes, 0, bytes.length);
}
static byte[] toIndexKey(int cacheSegment, byte[] bytes, int offset, int length) {
int segmentBytes = UnsignedNumeric.sizeUnsignedInt(cacheSegment);
byte[] indexKey = new byte[length + segmentBytes];
UnsignedNumeric.writeUnsignedInt(indexKey, 0, cacheSegment);
System.arraycopy(bytes, 0, indexKey, segmentBytes + offset, length);
return indexKey;
}
/**
* @return True if the index was loaded from well persisted state
*/
public boolean load() {
if (!checkForExistingIndexSizeFile()) {
return false;
}
try {
File statsFile = new File(indexDir.toFile(), "index.stats");
if (!statsFile.exists()) {
return false;
}
try (FileChannel statsChannel = new RandomAccessFile(statsFile, "rw").getChannel()) {
// id / length / free / expirationTime
ByteBuffer buffer = ByteBuffer.allocate(4 + 4 + 4 + 8);
while (read(statsChannel, buffer)) {
buffer.flip();
int id = buffer.getInt();
int length = buffer.getInt();
int free = buffer.getInt();
long expirationTime = buffer.getLong();
if (!compactor.addFreeFile(id, length, free, expirationTime, false)) {
log.tracef("Unable to add free file: %s ", id);
return false;
}
log.tracef("Loading file info for file: %s with total: %s, free: %s", id, length, free);
buffer.flip();
}
}
// No reason to keep around after loading
statsFile.delete();
for (Segment segment : segments) {
if (!segment.load()) return false;
}
return true;
} catch (IOException e) {
log.trace("Exception encountered while attempting to load index, assuming index is bad", e);
return false;
}
}
public void reset() throws IOException {
for (Segment segment : segments) {
segment.reset();
}
}
/**
* Get record or null if expired
*/
public EntryRecord getRecord(Object key, int cacheSegment, org.infinispan.commons.io.ByteBuffer serializedKey) throws IOException {
return getRecord(key, cacheSegment, toIndexKey(cacheSegment, serializedKey), IndexNode.ReadOperation.GET_RECORD);
}
/**
* Get record (even if expired) or null if not present
*/
public EntryRecord getRecordEvenIfExpired(Object key, int cacheSegment, byte[] serializedKey) throws IOException {
return getRecord(key, cacheSegment, toIndexKey(cacheSegment, serializedKey), IndexNode.ReadOperation.GET_EXPIRED_RECORD);
}
private EntryRecord getRecord(Object key, int cacheSegment, byte[] indexKey, IndexNode.ReadOperation readOperation) throws IOException {
int segment = (key.hashCode() & Integer.MAX_VALUE) % segments.length;
lock.readLock().lock();
try {
return IndexNode.applyOnLeaf(segments[segment], cacheSegment, indexKey, segments[segment].rootReadLock(), readOperation);
} finally {
lock.readLock().unlock();
}
}
/**
* Get position or null if expired
*/
public EntryPosition getPosition(Object key, int cacheSegment, org.infinispan.commons.io.ByteBuffer serializedKey) throws IOException {
int segment = (key.hashCode() & Integer.MAX_VALUE) % segments.length;
lock.readLock().lock();
try {
return IndexNode.applyOnLeaf(segments[segment], cacheSegment, toIndexKey(cacheSegment, serializedKey), segments[segment].rootReadLock(), IndexNode.ReadOperation.GET_POSITION);
} finally {
lock.readLock().unlock();
}
}
/**
* Get position + numRecords, without expiration
*/
public EntryInfo getInfo(Object key, int cacheSegment, byte[] serializedKey) throws IOException {
int segment = (key.hashCode() & Integer.MAX_VALUE) % segments.length;
lock.readLock().lock();
try {
return IndexNode.applyOnLeaf(segments[segment], cacheSegment, toIndexKey(cacheSegment, serializedKey), segments[segment].rootReadLock(), IndexNode.ReadOperation.GET_INFO);
} finally {
lock.readLock().unlock();
}
}
public CompletionStage<Void> clear() {
lock.writeLock().lock();
try {
AggregateCompletionStage<Void> stage = CompletionStages.aggregateCompletionStage();
for (FlowableProcessor<IndexRequest> processor : flowableProcessors) {
IndexRequest clearRequest = IndexRequest.clearRequest();
processor.onNext(clearRequest);
stage.dependsOn(clearRequest);
}
for (int i = 0; i < sizePerSegment.length(); ++i) {
sizePerSegment.set(i, 0);
}
return stage.freeze();
} finally {
lock.writeLock().unlock();
}
}
public CompletionStage<Object> handleRequest(IndexRequest indexRequest) {
int processor = (indexRequest.getKey().hashCode() & Integer.MAX_VALUE) % segments.length;
flowableProcessors[processor].onNext(indexRequest);
return indexRequest;
}
public void ensureRunOnLast(Runnable runnable) {
AtomicInteger count = new AtomicInteger(flowableProcessors.length);
IndexRequest request = IndexRequest.syncRequest(() -> {
if (count.decrementAndGet() == 0) {
runnable.run();
}
});
for (FlowableProcessor<IndexRequest> flowableProcessor : flowableProcessors) {
flowableProcessor.onNext(request);
}
}
public void deleteFileAsync(int fileId) {
ensureRunOnLast(() -> {
// After all indexes have ensured they have processed all requests - the last one will delete the file
// This guarantees that the index can't see an outdated value
fileProvider.deleteFile(fileId);
compactor.releaseStats(fileId);
});
}
public CompletionStage<Void> stop() throws InterruptedException {
for (FlowableProcessor<IndexRequest> flowableProcessor : flowableProcessors) {
flowableProcessor.onComplete();
}
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
for (Segment segment : segments) {
aggregateCompletionStage.dependsOn(segment);
}
// After all SIFS segments are complete we write the size
return aggregateCompletionStage.freeze().thenRun(() -> {
try {
// Create the file first as it should not be present as we deleted during startup
indexSizeFile.createNewFile();
try (FileOutputStream indexCountStream = new FileOutputStream(indexSizeFile)) {
UnsignedNumeric.writeUnsignedInt(indexCountStream, segments.length);
UnsignedNumeric.writeUnsignedInt(indexCountStream, this.sizePerSegment.length());
for (int i = 0; i < sizePerSegment.length(); ++i) {
UnsignedNumeric.writeUnsignedLong(indexCountStream, sizePerSegment.get(i));
}
}
ConcurrentMap<Integer, Compactor.Stats> map = compactor.getFileStats();
File statsFile = new File(indexDir.toFile(), "index.stats");
try (FileChannel statsChannel = new RandomAccessFile(statsFile, "rw").getChannel()) {
statsChannel.truncate(0);
// Maximum size that all ints and long can add up to
ByteBuffer buffer = ByteBuffer.allocate(4 + 4 + 4 + 8);
for (Map.Entry<Integer, Compactor.Stats> entry : map.entrySet()) {
int file = entry.getKey();
int total = entry.getValue().getTotal();
if (total == -1) {
total = (int) fileProvider.getFileSize(file);
}
int free = entry.getValue().getFree();
if (total == free) {
log.tracef("Deleting file %s since it has no free bytes in it", file);
// No reason to keep an empty file around
fileProvider.deleteFile(file);
continue;
}
buffer.putInt(file);
buffer.putInt(total);
buffer.putInt(free);
buffer.putLong(entry.getValue().getNextExpirationTime());
buffer.flip();
write(statsChannel, buffer);
buffer.flip();
}
}
} catch (IOException e) {
throw CompletableFutures.asCompletionException(e);
}
});
}
public long approximateSize(IntSet cacheSegments) {
long size = 0;
for (PrimitiveIterator.OfInt segIter = cacheSegments.iterator(); segIter.hasNext(); ) {
int cacheSegment = segIter.nextInt();
size += sizePerSegment.get(cacheSegment);
if (size < 0) {
return Long.MAX_VALUE;
}
}
return size;
}
public long getMaxSeqId() throws IOException {
long maxSeqId = 0;
lock.readLock().lock();
try {
for (Segment seg : segments) {
maxSeqId = Math.max(maxSeqId, IndexNode.calculateMaxSeqId(seg, seg.rootReadLock()));
}
} finally {
lock.readLock().unlock();
}
return maxSeqId;
}
public void start(Executor executor) {
for (int i = 0; i < segments.length; ++i) {
Segment segment = segments[i];
flowableProcessors[i]
.observeOn(Schedulers.from(executor))
.subscribe(segment, segment::completeExceptionally, segment);
}
}
static boolean read(FileChannel channel, ByteBuffer buffer) throws IOException {
do {
int read = channel.read(buffer);
if (read < 0) {
return false;
}
} while (buffer.position() < buffer.limit());
return true;
}
private static void write(FileChannel indexFile, ByteBuffer buffer) throws IOException {
do {
int written = indexFile.write(buffer);
if (written < 0) {
throw new IllegalStateException("Cannot write to index file!");
}
} while (buffer.position() < buffer.limit());
}
static class Segment extends CompletableFuture<Void> implements Consumer<IndexRequest>, Action {
final Index index;
private final TemporaryTable temporaryTable;
private final TreeMap<Short, List<IndexSpace>> freeBlocks = new TreeMap<>();
private final ReadWriteLock rootLock = new ReentrantReadWriteLock();
private final FileChannel indexFile;
private long indexFileSize;
private volatile IndexNode root;
private Segment(Index index, int id, TemporaryTable temporaryTable) throws IOException {
this.index = index;
this.temporaryTable = temporaryTable;
File indexFileFile = new File(index.indexDir.toFile(), "index." + id);
this.indexFile = new RandomAccessFile(indexFileFile, "rw").getChannel();
// Just to init to empty
root = IndexNode.emptyWithLeaves(this);
}
boolean load() throws IOException {
int segmentMax = temporaryTable.getSegmentMax();
indexFile.position(0);
ByteBuffer buffer = ByteBuffer.allocate(INDEX_FILE_HEADER_SIZE);
boolean loaded;
if (indexFile.size() >= INDEX_FILE_HEADER_SIZE && read(indexFile, buffer)
&& buffer.getInt(0) == GRACEFULLY && buffer.getInt(4) == segmentMax) {
long rootOffset = buffer.getLong(8);
short rootOccupied = buffer.getShort(16);
long freeBlocksOffset = buffer.getLong(18);
root = new IndexNode(this, rootOffset, rootOccupied);
loadFreeBlocks(freeBlocksOffset);
indexFileSize = freeBlocksOffset;
loaded = true;
} else {
this.indexFile.truncate(0);
root = IndexNode.emptyWithLeaves(this);
loaded = false;
// reserve space for shutdown
indexFileSize = INDEX_FILE_HEADER_SIZE;
}
buffer.putInt(0, DIRTY);
buffer.position(0);
buffer.limit(4);
indexFile.position(0);
write(indexFile, buffer);
return loaded;
}
void reset() throws IOException {
this.indexFile.truncate(0);
root = IndexNode.emptyWithLeaves(this);
// reserve space for shutdown
indexFileSize = INDEX_FILE_HEADER_SIZE;
ByteBuffer buffer = ByteBuffer.allocate(INDEX_FILE_HEADER_SIZE);
buffer.putInt(0, DIRTY);
buffer.position(0);
buffer.limit(4);
indexFile.position(0);
write(indexFile, buffer);
}
@Override
public void accept(IndexRequest request) throws Throwable {
if (log.isTraceEnabled()) log.tracef("Indexing %s", request);
IndexNode.OverwriteHook overwriteHook;
IndexNode.RecordChange recordChange;
switch (request.getType()) {
case CLEAR:
root = IndexNode.emptyWithLeaves(this);
indexFile.truncate(0);
indexFileSize = INDEX_FILE_HEADER_SIZE;
freeBlocks.clear();
index.nonBlockingManager.complete(request, null);
return;
case SYNC_REQUEST:
Runnable runnable = (Runnable) request.getKey();
runnable.run();
index.nonBlockingManager.complete(request, null);
return;
case MOVED:
recordChange = IndexNode.RecordChange.MOVE;
overwriteHook = index.movedHook;
break;
case UPDATE:
recordChange = IndexNode.RecordChange.INCREASE;
overwriteHook = index.updateHook;
break;
case DROPPED:
recordChange = IndexNode.RecordChange.DECREASE;
overwriteHook = index.droppedHook;
break;
case FOUND_OLD:
recordChange = IndexNode.RecordChange.INCREASE_FOR_OLD;
overwriteHook = IndexNode.NOOP_HOOK;
break;
default:
throw new IllegalArgumentException(request.toString());
}
try {
IndexNode.setPosition(root, request, overwriteHook, recordChange);
} catch (IllegalStateException e) {
request.completeExceptionally(e);
}
temporaryTable.removeConditionally(request.getSegment(), request.getKey(), request.getFile(), request.getOffset());
if (request.getType() != IndexRequest.Type.UPDATE) {
// The update type will complete it in the switch statement above
index.nonBlockingManager.complete(request, null);
}
}
// This is ran when the flowable ends either via normal termination or error
@Override
public void run() throws IOException {
try {
IndexSpace rootSpace = allocateIndexSpace(root.length());
root.store(rootSpace);
indexFile.position(indexFileSize);
ByteBuffer buffer = ByteBuffer.allocate(4);
buffer.putInt(0, freeBlocks.size());
write(indexFile, buffer);
for (Map.Entry<Short, List<IndexSpace>> entry : freeBlocks.entrySet()) {
List<IndexSpace> list = entry.getValue();
int requiredSize = 8 + list.size() * 10;
buffer = buffer.capacity() < requiredSize ? ByteBuffer.allocate(requiredSize) : buffer;
buffer.position(0);
buffer.limit(requiredSize);
// TODO: change this to short
buffer.putInt(entry.getKey());
buffer.putInt(list.size());
for (IndexSpace space : list) {
buffer.putLong(space.offset);
buffer.putShort(space.length);
}
buffer.flip();
write(indexFile, buffer);
}
int headerWithoutMagic = INDEX_FILE_HEADER_SIZE - 8;
buffer = buffer.capacity() < headerWithoutMagic ? ByteBuffer.allocate(headerWithoutMagic) : buffer;
buffer.position(0);
// we need to set limit ahead, otherwise the putLong could throw IndexOutOfBoundsException
buffer.limit(headerWithoutMagic);
buffer.putLong(0, rootSpace.offset);
buffer.putShort(8, rootSpace.length);
buffer.putLong(10, indexFileSize);
indexFile.position(8);
write(indexFile, buffer);
buffer.position(0);
buffer.limit(8);
buffer.putInt(0, GRACEFULLY);
buffer.putInt(4, temporaryTable.getSegmentMax());
indexFile.position(0);
write(indexFile, buffer);
complete(null);
} catch (Throwable t) {
completeExceptionally(t);
}
}
private void loadFreeBlocks(long freeBlocksOffset) throws IOException {
indexFile.position(freeBlocksOffset);
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.limit(4);
if (!read(indexFile, buffer)) {
throw new IOException("Cannot read free blocks lists!");
}
int numLists = buffer.getInt(0);
for (int i = 0; i < numLists; ++i) {
buffer.position(0);
buffer.limit(8);
if (!read(indexFile, buffer)) {
throw new IOException("Cannot read free blocks lists!");
}
// TODO: change this to short
int blockLength = buffer.getInt(0);
assert blockLength <= Short.MAX_VALUE;
int listSize = buffer.getInt(4);
// Ignore any free block that had no entries as it adds time complexity to our lookup
if (listSize > 0) {
int requiredSize = 10 * listSize;
buffer = buffer.capacity() < requiredSize ? ByteBuffer.allocate(requiredSize) : buffer;
buffer.position(0);
buffer.limit(requiredSize);
if (!read(indexFile, buffer)) {
throw new IOException("Cannot read free blocks lists!");
}
buffer.flip();
ArrayList<IndexSpace> list = new ArrayList<>(listSize);
for (int j = 0; j < listSize; ++j) {
list.add(new IndexSpace(buffer.getLong(), buffer.getShort()));
}
freeBlocks.put((short) blockLength, list);
}
}
}
public FileChannel getIndexFile() {
return indexFile;
}
public FileProvider getFileProvider() {
return index.fileProvider;
}
public Compactor getCompactor() {
return index.compactor;
}
public IndexNode getRoot() {
// this has to be called with rootLock locked!
return root;
}
public void setRoot(IndexNode root) {
rootLock.writeLock().lock();
this.root = root;
rootLock.writeLock().unlock();
}
public int getMaxNodeSize() {
return index.maxNodeSize;
}
public int getMinNodeSize() {
return index.minNodeSize;
}
// this should be accessed only from the updater thread
IndexSpace allocateIndexSpace(short length) {
// Use tailMap so that we only require O(logN) to find the iterator
// This avoids an additional O(logN) to do an entry removal
Iterator<Map.Entry<Short, List<IndexSpace>>> iter = freeBlocks.tailMap(length).entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<Short, List<IndexSpace>> entry = iter.next();
short spaceLength = entry.getKey();
// Only use the space if it is only 25% larger to avoid too much fragmentation
if ((length + (length >> 2)) < spaceLength) {
break;
}
List<IndexSpace> list = entry.getValue();
if (!list.isEmpty()) {
IndexSpace spaceToReturn = list.remove(list.size() - 1);
if (list.isEmpty()) {
iter.remove();
}
return spaceToReturn;
}
iter.remove();
}
long oldSize = indexFileSize;
indexFileSize += length;
return new IndexSpace(oldSize, length);
}
// this should be accessed only from the updater thread
void freeIndexSpace(long offset, short length) {
if (length <= 0) throw new IllegalArgumentException("Offset=" + offset + ", length=" + length);
// TODO: fragmentation!
// TODO: memory bounds!
if (offset + length < indexFileSize) {
freeBlocks.computeIfAbsent(length, k -> new ArrayList<>()).add(new IndexSpace(offset, length));
} else {
indexFileSize -= length;
try {
indexFile.truncate(indexFileSize);
} catch (IOException e) {
log.cannotTruncateIndex(e);
}
}
}
Lock rootReadLock() {
return rootLock.readLock();
}
public TimeService getTimeService() {
return index.timeService;
}
}
/**
* Offset-length pair
*/
static class IndexSpace {
protected long offset;
protected short length;
IndexSpace(long offset, short length) {
this.offset = offset;
this.length = length;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof IndexSpace)) return false;
IndexSpace innerNode = (IndexSpace) o;
return length == innerNode.length && offset == innerNode.offset;
}
@Override
public int hashCode() {
int result = (int) (offset ^ (offset >>> 32));
result = 31 * result + length;
return result;
}
@Override
public String toString() {
return String.format("[%d-%d(%d)]", offset, offset + length, length);
}
}
<V> Flowable<EntryRecord> publish(IntSet cacheSegments, boolean loadValues) {
return Flowable.fromArray(segments)
.concatMap(segment -> segment.root.publish(cacheSegments, loadValues));
}
}
| 30,032
| 38.36173
| 184
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/EntryInfo.java
|
package org.infinispan.persistence.sifs;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
public class EntryInfo extends EntryPosition {
public final short numRecords;
public final int cacheSegment;
public EntryInfo(int file, int offset, short numRecords, int cacheSegment) {
super(file, offset);
this.numRecords = numRecords;
this.cacheSegment = cacheSegment;
}
public String toString() {
return String.format("[%d:%d] containing %d records in segment %d", file, offset, numRecords, cacheSegment);
}
}
| 556
| 26.85
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/EntryHeader.java
|
package org.infinispan.persistence.sifs;
import java.nio.ByteBuffer;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
public class EntryHeader {
private static final byte MAGIC = 0x01;
/* 1 byte - magic key
* 2 bytes - key length
* 2 bytes - metadata length
* 4 bytes - value length
* 8 bytes - seq id
* 8 bytes - expiration time
*/
static final int HEADER_SIZE_10_1 = 24;
/* 1 byte - magic key
* 2 bytes - key length
* 2 bytes - metadata length
* 4 bytes - value length
* 2 bytes - internal metadata length
* 8 bytes - seq id
* 8 bytes - expiration time
*/
static final int HEADER_SIZE_11_0 = 27;
private final int keyLength;
private final int valueLength;
private final int metadataLength;
private final long seqId;
private final long expiration;
private final int internalMetadataLength;
private final int headerLength;
public EntryHeader(ByteBuffer buffer) {
this(buffer, false);
}
public EntryHeader(ByteBuffer buffer, boolean oldFormat) {
byte magicByte;
if (!oldFormat && (magicByte = buffer.get()) != MAGIC) {
throw new IllegalStateException("Magic byte was: " + magicByte);
}
this.keyLength = buffer.getShort();
this.metadataLength = buffer.getShort();
this.valueLength = buffer.getInt();
this.internalMetadataLength = oldFormat ? 0 : buffer.getShort();
this.seqId = buffer.getLong();
this.expiration = buffer.getLong();
this.headerLength = oldFormat ? HEADER_SIZE_10_1 : HEADER_SIZE_11_0;
}
public int keyLength() {
return keyLength;
}
public int metadataLength() {
return metadataLength;
}
public int internalMetadataLength() {
return internalMetadataLength;
}
public int valueLength() {
return valueLength;
}
public long seqId() {
return seqId;
}
public long expiryTime() {
return expiration;
}
public int getHeaderLength() {
return headerLength;
}
@Override
public String toString() {
return String.format("[keyLength=%d, valueLength=%d, metadataLength=%d, internalMetadataLength=%d,seqId=%d, expiration=%d]", keyLength, valueLength, metadataLength, internalMetadataLength, seqId, expiration);
}
public int totalLength() {
return keyLength + metadataLength + internalMetadataLength + valueLength + headerLength;
}
public static void writeHeader(ByteBuffer buf, short keyLength, short metadataLength, int valueLength, short internalMetadataLength, long seqId, long expiration) {
buf.put(EntryHeader.MAGIC);
buf.putShort(keyLength);
buf.putShort(metadataLength);
buf.putInt(valueLength);
buf.putShort(internalMetadataLength);
buf.putLong(seqId);
buf.putLong(expiration);
}
}
| 2,845
| 27.178218
| 214
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/IndexNode.java
|
package org.infinispan.persistence.sifs;
import java.io.IOException;
import java.lang.ref.SoftReference;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.Deque;
import java.util.List;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.infinispan.commons.io.ByteBufferImpl;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.reactive.FlowableCreate;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.BackpressureStrategy;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.FlowableEmitter;
/**
* The recursive index structure. References to children are held in soft references,
* which allows JVM-handled caching and reduces the amount of reads required while
* evading OOMs if the index gets too big.
* This structure is described here at https://en.wikipedia.org/wiki/B%2B_tree
* <p>
* Each node can hold either some innerNodes along with keyParts which are pointer nodes to additional pointer or
* finally leafNodes which contain actual values.
* The keyParts dictate which innerNode should be followed when looking up a specific key done via log(N).
* A leaf node contains the actual value for the given key.
* <p>
* An IndexNode cannot have both innerNodes and leafNodes
* <p>
* Each index node is linked to a specific SoftIndexFileStore segment that is not the same thing as a cache segment.
* Whenever a cache segment is used its variable will be named cacheSegment or something similar to help prevent
* ambiguity.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
class IndexNode {
private static final Log log = LogFactory.getLog(IndexNode.class, Log.class);
private static final byte HAS_LEAVES = 1;
private static final byte HAS_NODES = 2;
// Prefix length (short) + keyNode length (short) + flag (byte)
private static final int INNER_NODE_HEADER_SIZE = 5;
private static final int INNER_NODE_REFERENCE_SIZE = 10;
private static final int LEAF_NODE_REFERENCE_SIZE = 14;
public static final int RESERVED_SPACE
= INNER_NODE_HEADER_SIZE + 2 * Math.max(INNER_NODE_REFERENCE_SIZE, LEAF_NODE_REFERENCE_SIZE);
private final Index.Segment segment;
private byte[] prefix;
private byte[][] keyParts;
private InnerNode[] innerNodes;
private LeafNode[] leafNodes = LeafNode.EMPTY_ARRAY;
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private long offset = -1;
private short keyPartsLength = -1;
private short contentLength = -1;
private short totalLength = -1;
private short occupiedSpace;
public enum RecordChange {
INCREASE,
INCREASE_FOR_OLD,
MOVE,
DECREASE,
}
IndexNode(Index.Segment segment, long offset, short occupiedSpace) throws IOException {
this.segment = segment;
this.offset = offset;
this.occupiedSpace = occupiedSpace;
ByteBuffer buffer = loadBuffer(segment.getIndexFile(), offset, occupiedSpace);
prefix = new byte[buffer.getShort()];
buffer.get(prefix);
byte flags = buffer.get();
int numKeyParts = buffer.getShort();
int afterHeaderPos = buffer.position();
keyParts = new byte[numKeyParts][];
for (int i = 0; i < numKeyParts; ++i) {
keyParts[i] = new byte[buffer.getShort()];
buffer.get(keyParts[i]);
}
assert (buffer.position() - afterHeaderPos) < Short.MAX_VALUE;
keyPartsLength = (short) (buffer.position() - afterHeaderPos);
if ((flags & HAS_LEAVES) != 0) {
leafNodes = new LeafNode[numKeyParts + 1];
for (int i = 0; i < numKeyParts + 1; ++i) {
leafNodes[i] = new LeafNode(buffer.getInt(), buffer.getInt(), buffer.getShort(), buffer.getInt());
}
} else if ((flags & HAS_NODES) != 0) {
innerNodes = new InnerNode[numKeyParts + 1];
for (int i = 0; i < numKeyParts + 1; ++i) {
innerNodes[i] = new InnerNode(buffer.getLong(), buffer.getShort());
}
}
assert (buffer.position() - afterHeaderPos) < Short.MAX_VALUE;
contentLength = (short) (buffer.position() - afterHeaderPos);
if (log.isTraceEnabled()) {
log.tracef("Loaded %08x from %d:%d (length %d)", System.identityHashCode(this), offset, occupiedSpace, length());
}
}
private static ByteBuffer loadBuffer(FileChannel indexFile, long offset, int occupiedSpace) throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(occupiedSpace);
int read = 0;
do {
int nowRead = indexFile.read(buffer, offset + read);
if (nowRead < 0) {
throw new IOException("Cannot read record [" + offset + ":" + occupiedSpace + "] (already read "
+ read + "), file size is " + indexFile.size());
}
read += nowRead;
} while (read < occupiedSpace);
buffer.rewind();
return buffer;
}
private IndexNode(Index.Segment segment, byte[] newPrefix, byte[][] newKeyParts, LeafNode[] newLeafNodes) {
this.segment = segment;
this.prefix = newPrefix;
this.keyParts = newKeyParts;
this.leafNodes = newLeafNodes;
}
private IndexNode(Index.Segment segment, byte[] newPrefix, byte[][] newKeyParts, InnerNode[] newInnerNodes) {
this.segment = segment;
this.prefix = newPrefix;
this.keyParts = newKeyParts;
this.innerNodes = newInnerNodes;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexNode indexNode = (IndexNode) o;
if (!Arrays.equals(innerNodes, indexNode.innerNodes)) return false;
if (!Arrays.equals(leafNodes, indexNode.leafNodes)) return false;
if (!Arrays.equals(prefix, indexNode.prefix)) return false;
if (!Arrays.deepEquals(keyParts, indexNode.keyParts)) return false;
return true;
}
/**
* Can be called only from single writer thread (therefore the write lock guards only other readers)
*/
private void replaceContent(IndexNode other) throws IOException {
try {
lock.writeLock().lock();
this.prefix = other.prefix;
this.keyParts = other.keyParts;
this.innerNodes = other.innerNodes;
this.leafNodes = other.leafNodes;
this.contentLength = -1;
this.keyPartsLength = -1;
this.totalLength = -1;
} finally {
lock.writeLock().unlock();
}
// don't have to acquire any lock here
// the only node with offset < 0 is the root - we can't lose reference to it
if (offset >= 0) {
store(new Index.IndexSpace(offset, occupiedSpace));
}
}
// called only internally or for root
void store(Index.IndexSpace indexSpace) throws IOException {
this.offset = indexSpace.offset;
this.occupiedSpace = indexSpace.length;
ByteBuffer buffer = ByteBuffer.allocate(length());
buffer.putShort((short) prefix.length);
buffer.put(prefix);
byte flags = 0;
if (innerNodes != null && innerNodes.length != 0) {
flags |= HAS_NODES;
} else if (leafNodes != null && leafNodes.length != 0) {
flags |= HAS_LEAVES;
}
buffer.put(flags);
buffer.putShort((short) keyParts.length);
for (byte[] keyPart : keyParts) {
buffer.putShort((short) keyPart.length);
buffer.put(keyPart);
}
if (innerNodes != null) {
for (InnerNode innerNode : innerNodes) {
buffer.putLong(innerNode.offset);
buffer.putShort(innerNode.length);
}
} else {
for (LeafNode leafNode : leafNodes) {
buffer.putInt(leafNode.file);
buffer.putInt(leafNode.offset);
buffer.putShort(leafNode.numRecords);
buffer.putInt(leafNode.cacheSegment);
}
}
assert buffer.position() == buffer.limit() : "Buffer position: " + buffer.position() + " limit: " + buffer.limit();
buffer.flip();
segment.getIndexFile().write(buffer, offset);
if (log.isTraceEnabled()) {
log.tracef("Persisted %08x (length %d, %d %s) to %d:%d", System.identityHashCode(this), length(),
innerNodes != null ? innerNodes.length : leafNodes.length,
innerNodes != null ? "children" : "leaves", offset, occupiedSpace);
}
}
private static class Path {
IndexNode node;
public int index;
private Path(IndexNode node, int index) {
this.node = node;
this.index = index;
}
}
private static boolean entryKeyEqualsBuffer(EntryRecord headerAndKey, org.infinispan.commons.io.ByteBuffer buffer) {
byte[] key = headerAndKey.getKey();
return Util.arraysEqual(key, 0, key.length, buffer.getBuf(), buffer.getOffset(), buffer.getOffset() + buffer.getLength());
}
public enum ReadOperation {
GET_RECORD {
@Override
protected EntryRecord apply(LeafNode leafNode, org.infinispan.commons.io.ByteBuffer key, FileProvider fileProvider, TimeService timeService) throws IOException, IndexNodeOutdatedException {
return leafNode.loadRecord(fileProvider, key, timeService);
}
},
GET_EXPIRED_RECORD {
@Override
protected EntryRecord apply(LeafNode leafNode, org.infinispan.commons.io.ByteBuffer key, FileProvider fileProvider, TimeService timeService) throws IOException, IndexNodeOutdatedException {
return leafNode.loadRecord(fileProvider, key, null);
}
},
GET_POSITION {
@Override
protected EntryPosition apply(LeafNode leafNode, org.infinispan.commons.io.ByteBuffer key, FileProvider fileProvider, TimeService timeService) throws IOException, IndexNodeOutdatedException {
EntryRecord hak = leafNode.loadHeaderAndKey(fileProvider);
if (entryKeyEqualsBuffer(hak, key)) {
if (hak.getHeader().expiryTime() > 0 && hak.getHeader().expiryTime() <= timeService.wallClockTime()) {
if (log.isTraceEnabled()) {
log.tracef("Found node on %d:%d but it is expired", leafNode.file, leafNode.offset);
}
return null;
}
return leafNode;
} else {
if (log.isTraceEnabled()) {
log.tracef("Found node on %d:%d but key does not match", leafNode.file, leafNode.offset);
}
}
return null;
}
},
GET_INFO {
@Override
protected EntryInfo apply(LeafNode leafNode, org.infinispan.commons.io.ByteBuffer key, FileProvider fileProvider, TimeService timeService) throws IOException, IndexNodeOutdatedException {
EntryRecord hak = leafNode.loadHeaderAndKey(fileProvider);
if (entryKeyEqualsBuffer(hak, key)) {
log.tracef("Found matching leafNode %s", leafNode);
return leafNode;
} else {
if (log.isTraceEnabled()) {
log.tracef("Found node on %d:%d but key does not match", leafNode.file, leafNode.offset);
}
return null;
}
}
};
protected abstract <T> T apply(LeafNode leafNode, org.infinispan.commons.io.ByteBuffer key, FileProvider fileProvider, TimeService timeService) throws IOException, IndexNodeOutdatedException;
}
public static <T> T applyOnLeaf(Index.Segment segment, int cacheSegment, byte[] indexKey, Lock rootLock, ReadOperation operation) throws IOException {
int attempts = 0;
for (; ; ) {
rootLock.lock();
IndexNode node = segment.getRoot();
Lock parentLock = rootLock, currentLock = null;
try {
while (node.innerNodes != null) {
currentLock = node.lock.readLock();
currentLock.lock();
parentLock.unlock();
parentLock = currentLock;
int insertionPoint = node.getInsertionPoint(indexKey);
node = node.innerNodes[insertionPoint].getIndexNode(segment);
if (node == null) {
return null;
}
}
currentLock = node.lock.readLock();
currentLock.lock();
if (node.leafNodes.length == 0) {
log.tracef("No leaf nodes found that maps to provided key");
return null;
}
int insertionPoint = node.getInsertionPoint(indexKey);
int cacheSegmentBytesSize = UnsignedNumeric.sizeUnsignedInt(cacheSegment);
return operation.apply(node.leafNodes[insertionPoint], ByteBufferImpl.create(indexKey, cacheSegmentBytesSize, indexKey.length - cacheSegmentBytesSize),
segment.getFileProvider(), segment.getTimeService());
} catch (IndexNodeOutdatedException e) {
try {
if (attempts > 10) {
throw log.indexLooksCorrupt(e);
}
Thread.sleep(1000);
attempts++;
} catch (InterruptedException e1) {
Thread.currentThread().interrupt();
}
// noop, we'll simply retry
} finally {
if (parentLock != currentLock) parentLock.unlock();
if (currentLock != null) currentLock.unlock();
}
}
}
public static long calculateMaxSeqId(Index.Segment segment, Lock lock) throws IOException {
lock.lock();
try {
return calculateMaxSeqId(segment.getRoot(), segment);
} finally {
lock.unlock();
}
}
private static long calculateMaxSeqId(IndexNode node, Index.Segment segment) throws IOException {
long maxSeqId = 0;
node.lock.readLock().lock();
try {
if (node.leafNodes != null) {
for (LeafNode ln : node.leafNodes) {
EntryRecord record = ln.loadHeaderAndKey(segment.getFileProvider());
maxSeqId = Math.max(maxSeqId, record.getHeader().seqId());
}
}
if (node.innerNodes != null) {
for (InnerNode in : node.innerNodes) {
maxSeqId = Math.max(maxSeqId, calculateMaxSeqId(in.getIndexNode(segment), segment));
}
}
} catch (IndexNodeOutdatedException e) {
throw log.indexLooksCorrupt(e);
} finally {
node.lock.readLock().unlock();
}
return maxSeqId;
}
private void updateFileOffsetInFile(int leafOffset, int newFile, int newOffset, short numRecords) throws IOException {
// Root is -1, so that means the beginning of the file
long offset = this.offset >= 0 ? this.offset : 0;
offset += headerLength();
offset += keyPartsLength();
offset += (long) leafOffset * LEAF_NODE_REFERENCE_SIZE;
ByteBuffer buffer = ByteBuffer.allocate(10);
buffer.putInt(newFile);
buffer.putInt(newOffset);
buffer.putShort(numRecords);
buffer.flip();
this.segment.getIndexFile().write(buffer, offset);
}
private static IndexNode findParentNode(IndexNode root, byte[] indexKey, Deque<Path> stack) throws IOException {
IndexNode node = root;
while (node.innerNodes != null) {
int insertionPoint = node.getInsertionPoint(indexKey);
if (stack != null) stack.push(new Path(node, insertionPoint));
if (log.isTraceEnabled()) {
log.tracef("Pushed %08x (length %d, %d children) to stack (insertion point %d)", System.identityHashCode(node), node.length(), node.innerNodes.length, insertionPoint);
}
node = node.innerNodes[insertionPoint].getIndexNode(root.segment);
}
return node;
}
public static void setPosition(IndexNode root, IndexRequest request, OverwriteHook overwriteHook, RecordChange recordChange) throws IOException {
int cacheSegment = request.getSegment();
// TODO: maybe we can optimize not copying this?
byte[] indexKey = Index.toIndexKey(cacheSegment, request.getSerializedKey());
Deque<Path> stack = new ArrayDeque<>();
IndexNode node = findParentNode(root, indexKey, stack);
IndexNode copy = node.copyWith(request, cacheSegment, indexKey, overwriteHook, recordChange);
if (copy == node) {
// no change was executed
return;
}
if (log.isTraceEnabled()) {
log.tracef("Created %08x (length %d) from %08x (length %d), stack size %d",
System.identityHashCode(copy), copy.length(), System.identityHashCode(node), node.length(), stack.size());
}
Deque<IndexNode> garbage = new ArrayDeque<>();
try {
JoinSplitResult result = manageLength(root.segment, stack, node, copy, garbage);
if (result == null) {
return;
}
if (log.isTraceEnabled()) {
log.tracef("Created (1) %d new nodes, GC %08x", result.newNodes.size(), System.identityHashCode(node));
}
garbage.push(node);
for (;;) {
if (stack.isEmpty()) {
IndexNode newRoot;
if (result.newNodes.size() == 1) {
newRoot = result.newNodes.get(0);
if (log.isTraceEnabled()) {
log.tracef("Setting new root %08x (index has shrunk)", System.identityHashCode(newRoot));
}
} else {
newRoot = IndexNode.emptyWithInnerNodes(root.segment).copyWith(0, 0, result.newNodes);
root.segment.getIndexFile().force(false);
if (log.isTraceEnabled()) {
log.tracef("Setting new root %08x (index has grown)", System.identityHashCode(newRoot));
}
}
newRoot.segment.setRoot(newRoot);
return;
}
Path path = stack.pop();
copy = path.node.copyWith(result.from, result.to, result.newNodes);
if (log.isTraceEnabled()) {
log.tracef("Created %08x (length %d) from %08x with the %d new nodes (%d - %d)",
System.identityHashCode(copy), copy.length(), System.identityHashCode(path.node), result.newNodes.size(), result.from, result.to);
}
result = manageLength(path.node.segment, stack, path.node, copy, garbage);
if (result == null) {
if (log.isTraceEnabled()) {
log.tracef("No more index updates required");
}
return;
}
if (log.isTraceEnabled()) {
log.tracef("Created (2) %d new nodes, GC %08x", result.newNodes.size(), System.identityHashCode(path.node));
}
garbage.push(path.node);
}
} finally {
while (!garbage.isEmpty()) {
IndexNode oldNode = garbage.pop();
oldNode.lock.writeLock().lock();
try {
if (oldNode.offset >= 0) {
oldNode.segment.freeIndexSpace(oldNode.offset, oldNode.occupiedSpace);
oldNode.offset = -1;
oldNode.occupiedSpace = -1;
}
} finally {
oldNode.lock.writeLock().unlock();
}
}
}
}
private static class JoinSplitResult {
public final int from;
public final int to;
final List<IndexNode> newNodes;
private JoinSplitResult(int from, int to, List<IndexNode> newNodes) {
this.from = from;
this.to = to;
this.newNodes = newNodes;
}
}
private static JoinSplitResult manageLength(Index.Segment segment, Deque<Path> stack, IndexNode node, IndexNode copy, Deque<IndexNode> garbage) throws IOException {
int from, to;
if (copy.length() < segment.getMinNodeSize() && !stack.isEmpty()) {
Path parent = stack.peek();
if (parent.node.innerNodes.length == 1) {
// we have no siblings - we can't merge with them even when we're really short
if (copy.length() <= node.occupiedSpace) {
node.replaceContent(copy);
return null;
} else {
return new JoinSplitResult(parent.index, parent.index, Collections.singletonList(copy));
}
}
int sizeWithLeft = Integer.MAX_VALUE;
int sizeWithRight = Integer.MAX_VALUE;
if (parent.index > 0) {
sizeWithLeft = copy.length() + parent.node.innerNodes[parent.index - 1].length - INNER_NODE_HEADER_SIZE;
}
if (parent.index < parent.node.innerNodes.length - 1) {
sizeWithRight = copy.length() + parent.node.innerNodes[parent.index + 1].length - INNER_NODE_HEADER_SIZE;
}
int joinWith;
// this is just some kind of heuristic, may be changed later
if (sizeWithLeft == Integer.MAX_VALUE) {
joinWith = parent.index + 1;
} else if (sizeWithRight == Integer.MAX_VALUE) {
joinWith = parent.index - 1;
} else if (sizeWithLeft > segment.getMaxNodeSize() && sizeWithRight > segment.getMaxNodeSize()) {
joinWith = sizeWithLeft >= sizeWithRight ? parent.index - 1 : parent.index + 1;
} else {
joinWith = sizeWithLeft <= sizeWithRight ? parent.index - 1 : parent.index + 1;
}
if (joinWith < 0 || joinWith >= parent.node.innerNodes.length) {
throw new IllegalStateException(String.format("parent %08x, %08x -> %08x: cannot join to %d, with left %d, with right %d, max %d",
System.identityHashCode(parent.node), System.identityHashCode(node), System.identityHashCode(copy),
joinWith, sizeWithLeft, sizeWithRight, segment.getMaxNodeSize()));
}
IndexNode joiner = parent.node.innerNodes[joinWith].getIndexNode(segment);
byte[] middleKey = concat(parent.node.prefix, parent.node.keyParts[joinWith < parent.index ? parent.index - 1 : parent.index]);
if (joinWith < parent.index) {
copy = join(joiner, middleKey, copy);
from = joinWith;
to = parent.index;
} else {
copy = join(copy, middleKey, joiner);
from = parent.index;
to = joinWith;
}
garbage.push(joiner);
} else if (copy.length() <= node.occupiedSpace) {
if (copy.innerNodes != null && copy.innerNodes.length == 1 && stack.isEmpty()) {
IndexNode child = copy.innerNodes[0].getIndexNode(copy.segment);
return new JoinSplitResult(0, 0, Collections.singletonList(child));
} else {
// special case where we only overwrite the key
node.replaceContent(copy);
return null;
}
} else if (stack.isEmpty()) {
from = to = 0;
} else {
from = to = stack.peek().index;
}
if (copy.length() <= segment.getMaxNodeSize()) {
return new JoinSplitResult(from, to, Collections.singletonList(copy));
} else {
return new JoinSplitResult(from, to, copy.split());
}
}
private static IndexNode join(IndexNode left, byte[] middleKey, IndexNode right) throws IOException {
byte[] newPrefix = commonPrefix(left.prefix, right.prefix);
byte[][] newKeyParts = new byte[left.keyParts.length + right.keyParts.length + 1][];
newPrefix = commonPrefix(newPrefix, middleKey);
copyKeyParts(left.keyParts, 0, newKeyParts, 0, left.keyParts.length, left.prefix, newPrefix);
byte[] rightmostKey;
try {
rightmostKey = left.rightmostKey();
} catch (IndexNodeOutdatedException e) {
throw new IllegalStateException(e);
}
int commonLength = Math.abs(compare(middleKey, rightmostKey));
newKeyParts[left.keyParts.length] = substring(middleKey, newPrefix.length, commonLength);
copyKeyParts(right.keyParts, 0, newKeyParts, left.keyParts.length + 1, right.keyParts.length, right.prefix, newPrefix);
if (left.innerNodes != null && right.innerNodes != null) {
InnerNode[] newInnerNodes = new InnerNode[left.innerNodes.length + right.innerNodes.length];
System.arraycopy(left.innerNodes, 0, newInnerNodes, 0, left.innerNodes.length);
System.arraycopy(right.innerNodes, 0, newInnerNodes, left.innerNodes.length, right.innerNodes.length);
return new IndexNode(left.segment, newPrefix, newKeyParts, newInnerNodes);
} else if (left.leafNodes != null && right.leafNodes != null) {
LeafNode[] newLeafNodes = new LeafNode[left.leafNodes.length + right.leafNodes.length];
System.arraycopy(left.leafNodes, 0, newLeafNodes, 0, left.leafNodes.length);
System.arraycopy(right.leafNodes, 0, newLeafNodes, left.leafNodes.length, right.leafNodes.length);
return new IndexNode(left.segment, newPrefix, newKeyParts, newLeafNodes);
} else {
throw new IllegalArgumentException("Cannot join " + left + " and " + right);
}
}
private IndexNode copyWith(int oldNodesFrom, int oldNodesTo, List<IndexNode> newNodes) throws IOException {
InnerNode[] newInnerNodes = new InnerNode[innerNodes.length + newNodes.size() - 1 - oldNodesTo + oldNodesFrom];
System.arraycopy(innerNodes, 0, newInnerNodes, 0, oldNodesFrom);
System.arraycopy(innerNodes, oldNodesTo + 1, newInnerNodes, oldNodesFrom + newNodes.size(), innerNodes.length - oldNodesTo - 1);
for (int i = 0; i < newNodes.size(); ++i) {
IndexNode node = newNodes.get(i);
Index.IndexSpace space = segment.allocateIndexSpace(node.length());
node.store(space);
newInnerNodes[i + oldNodesFrom] = new InnerNode(node);
}
byte[][] newKeys = new byte[newNodes.size() - 1][];
byte[] newPrefix = prefix;
for (int i = 0; i < newKeys.length; ++i) {
try {
// TODO: if all keys within the subtree are null (deleted), the new key will be null
// will be fixed with proper index reduction
newKeys[i] = newNodes.get(i + 1).leftmostKey();
if (newKeys[i] == null) {
throw new IllegalStateException();
}
} catch (IndexNodeOutdatedException e) {
throw new IllegalStateException("Index cannot be outdated for segment updater thread", e);
}
newPrefix = commonPrefix(newPrefix, newKeys[i]);
}
byte[][] newKeyParts = new byte[keyParts.length + newNodes.size() - 1 - oldNodesTo + oldNodesFrom][];
copyKeyParts(keyParts, 0, newKeyParts, 0, oldNodesFrom, prefix, newPrefix);
copyKeyParts(keyParts, oldNodesTo, newKeyParts, oldNodesFrom + newKeys.length, keyParts.length - oldNodesTo, prefix, newPrefix);
for (int i = 0; i < newKeys.length; ++i) {
newKeyParts[i + oldNodesFrom] = substring(newKeys[i], newPrefix.length, newKeys[i].length);
}
return new IndexNode(segment, newPrefix, newKeyParts, newInnerNodes);
}
private byte[] leftmostKey() throws IOException, IndexNodeOutdatedException {
if (innerNodes != null) {
for (InnerNode innerNode : innerNodes) {
byte[] key = innerNode.getIndexNode(segment).leftmostKey();
if (key != null) return key;
}
} else {
for (LeafNode leafNode : leafNodes) {
EntryRecord hak = leafNode.loadHeaderAndKey(segment.getFileProvider());
if (hak.getKey() != null) return Index.toIndexKey(leafNode.cacheSegment, hak.getKey());
}
}
return null;
}
private byte[] rightmostKey() throws IOException, IndexNodeOutdatedException {
if (innerNodes != null) {
for (int i = innerNodes.length - 1; i >= 0; --i) {
byte[] key = innerNodes[i].getIndexNode(segment).rightmostKey();
if (key != null) return key;
}
} else {
for (int i = leafNodes.length - 1; i >= 0; --i) {
EntryRecord hak = leafNodes[i].loadHeaderAndKey(segment.getFileProvider());
if (hak.getKey() != null) return Index.toIndexKey(leafNodes[i].cacheSegment, hak.getKey());
}
}
return null;
}
/**
* Called on the most bottom node
*/
private IndexNode copyWith(IndexRequest request, int cacheSegment, byte[] indexKey, OverwriteHook overwriteHook, RecordChange recordChange) throws IOException {
if (leafNodes == null) throw new IllegalArgumentException();
byte[] newPrefix;
int file = request.getFile();
int offset = request.getOffset();
int size = request.getSize();
if (leafNodes.length == 0) {
overwriteHook.setOverwritten(request, cacheSegment, false, -1, -1);
if (overwriteHook.check(request, -1, -1)) {
return new IndexNode(segment, prefix, keyParts, new LeafNode[]{new LeafNode(file, offset, (short) 1, cacheSegment)});
} else {
segment.getCompactor().free(file, size);
return this;
}
}
int insertPart = getInsertionPoint(indexKey);
LeafNode oldLeafNode = leafNodes[insertPart];
short numRecords = oldLeafNode.numRecords;
switch (recordChange) {
case INCREASE:
case INCREASE_FOR_OLD:
if (numRecords == Short.MAX_VALUE) {
throw new IllegalStateException("Too many records for this key (short overflow)");
}
numRecords++;
break;
case MOVE:
break;
case DECREASE:
numRecords--;
break;
}
byte[][] newKeyParts;
LeafNode[] newLeafNodes;
EntryRecord hak;
try {
hak = oldLeafNode.loadHeaderAndKey(segment.getFileProvider());
} catch (IndexNodeOutdatedException e) {
throw new IllegalStateException("Index cannot be outdated for segment updater thread", e);
}
byte[] oldIndexKey = Index.toIndexKey(oldLeafNode.cacheSegment, hak.getKey());
int keyComp = compare(oldIndexKey, indexKey);
Object objectKey = request.getKey();
if (keyComp == 0) {
if (numRecords > 0) {
if (overwriteHook.check(request, oldLeafNode.file, oldLeafNode.offset)) {
if (recordChange == RecordChange.INCREASE || recordChange == RecordChange.MOVE) {
if (log.isTraceEnabled()) {
log.trace(String.format("Overwriting %s %d:%d with %d:%d (%d)", objectKey,
oldLeafNode.file, oldLeafNode.offset, file, offset, numRecords));
}
updateFileOffsetInFile(insertPart, file, offset, numRecords);
segment.getCompactor().free(oldLeafNode.file, hak.getHeader().totalLength());
} else {
if (log.isTraceEnabled()) {
log.trace(String.format("Updating num records for %s %d:%d to %d", objectKey, oldLeafNode.file, oldLeafNode.offset, numRecords));
}
if (recordChange == RecordChange.INCREASE_FOR_OLD) {
// Mark old files as freed for compactor when rebuilding index
segment.getCompactor().free(file, size);
}
// We don't need to update the file as the file and position are the same, only the numRecords
// has been updated for REMOVED
file = oldLeafNode.file;
offset = oldLeafNode.offset;
}
lock.writeLock().lock();
try {
leafNodes[insertPart] = new LeafNode(file, offset, numRecords, cacheSegment);
} finally {
lock.writeLock().unlock();
}
overwriteHook.setOverwritten(request, cacheSegment, true, oldLeafNode.file, oldLeafNode.offset);
return this;
} else {
overwriteHook.setOverwritten(request, cacheSegment, false, -1, -1);
segment.getCompactor().free(file, size);
return this;
}
} else {
overwriteHook.setOverwritten(request, cacheSegment, true, oldLeafNode.file, oldLeafNode.offset);
if (keyParts.length <= 1) {
newPrefix = Util.EMPTY_BYTE_ARRAY;
newKeyParts = Util.EMPTY_BYTE_ARRAY_ARRAY;
} else {
newPrefix = prefix;
newKeyParts = new byte[keyParts.length - 1][];
if (insertPart == keyParts.length) {
System.arraycopy(keyParts, 0, newKeyParts, 0, newKeyParts.length);
} else {
System.arraycopy(keyParts, 0, newKeyParts, 0, insertPart);
System.arraycopy(keyParts, insertPart + 1, newKeyParts, insertPart, newKeyParts.length - insertPart);
}
}
if (leafNodes.length > 0) {
newLeafNodes = new LeafNode[leafNodes.length - 1];
System.arraycopy(leafNodes, 0, newLeafNodes, 0, insertPart);
System.arraycopy(leafNodes, insertPart + 1, newLeafNodes, insertPart, newLeafNodes.length - insertPart);
} else {
newLeafNodes = leafNodes;
}
segment.getCompactor().free(oldLeafNode.file, hak.getHeader().totalLength());
}
} else {
// IndexRequest cannot be MOVED or DROPPED when the key is not in the index
assert recordChange == RecordChange.INCREASE;
overwriteHook.setOverwritten(request, cacheSegment, false, -1, -1);
// We have to insert the record even if this is a delete request and the key was not found
// because otherwise we would have incorrect numRecord count. Eventually, Compactor will
// drop the tombstone and update index, removing this node
if (keyParts.length == 0) {
// TODO: we may use unnecessarily long keys here and the key is never shortened
newPrefix = keyComp > 0 ? indexKey : oldIndexKey;
} else {
newPrefix = commonPrefix(prefix, indexKey);
}
newKeyParts = new byte[keyParts.length + 1][];
newLeafNodes = new LeafNode[leafNodes.length + 1];
copyKeyParts(keyParts, 0, newKeyParts, 0, insertPart, prefix, newPrefix);
copyKeyParts(keyParts, insertPart, newKeyParts, insertPart + 1, keyParts.length - insertPart, prefix, newPrefix);
if (keyComp > 0) {
newKeyParts[insertPart] = substring(indexKey, newPrefix.length, keyComp);
System.arraycopy(leafNodes, 0, newLeafNodes, 0, insertPart + 1);
System.arraycopy(leafNodes, insertPart + 1, newLeafNodes, insertPart + 2, leafNodes.length - insertPart - 1);
log.tracef("Creating new leafNode for %s at %d:%d", objectKey, file, offset);
newLeafNodes[insertPart + 1] = new LeafNode(file, offset, (short) 1, cacheSegment);
} else {
newKeyParts[insertPart] = substring(oldIndexKey, newPrefix.length, -keyComp);
System.arraycopy(leafNodes, 0, newLeafNodes, 0, insertPart);
System.arraycopy(leafNodes, insertPart, newLeafNodes, insertPart + 1, leafNodes.length - insertPart);
log.tracef("Creating new leafNode for %s at %d:%d", objectKey, file, offset);
newLeafNodes[insertPart] = new LeafNode(file, offset, (short) 1, cacheSegment);
}
}
return new IndexNode(segment, newPrefix, newKeyParts, newLeafNodes);
}
private int getIterationPoint(byte[] key, int cacheSegment) {
int comp = compare(key, prefix, prefix.length);
int insertionPoint;
if (comp > 0) {
insertionPoint = 0;
} else if (comp < 0) {
insertionPoint = keyParts.length;
} else {
byte[] keyPostfix = substring(key, prefix.length, key.length);
insertionPoint = Arrays.binarySearch(keyParts, keyPostfix, REVERSED_COMPARE_TO);
if (insertionPoint < 0) {
insertionPoint = -insertionPoint - 1;
} else {
int cacheSegmentToUse = cacheSegment < 0 ? UnsignedNumeric.readUnsignedInt(key, 0) : cacheSegment;
if (UnsignedNumeric.sizeUnsignedInt(cacheSegmentToUse) < key.length) {
// When the length is bigger than a cache segment, that means the index prefix is a specific key and if it
// is equal we have to skip two spaces
// Example:
// KeyParts
// 84 = {byte[12]@9221} [-100, 1, -104, 1, 2, -118, 1, 5, 10, 3, 40, -71]
// 85 = {byte[12]@9222} [-100, 1, -104, 1, 2, -118, 1, 5, 10, 3, 40, -60]
// 86 = {byte[13]@9223} [-100, 1, -104, 1, 2, -118, 1, 5, 10, 3, 40, -60, 14]
// 87 = {byte[12]@9224} [-100, 1, -104, 1, 2, -118, 1, 5, 10, 3, 40, -54]
// 88 = {byte[12]@9225} [-100, 1, -104, 1, 2, -118, 1, 5, 10, 3, 40, -48]
// Segment Prefix
// {byte[13] [-100, 1, -104, 1, 2, -118, 1, 5, 10, 3, 40, -60, 14]
// The actual value is stored at 87 in this case per `getInsertionPoint` so we need to skip to 88
// CacheSegment is -1 for an innerNode because we have to find where in the leaf node the value is
// CacheSegment is > 0 for a leafNode
insertionPoint += cacheSegment < 0 ? 1 : 2;
}
}
}
return insertionPoint;
}
private int getInsertionPoint(byte[] key) {
int comp = compare(key, prefix, prefix.length);
int insertionPoint;
if (comp > 0) {
insertionPoint = 0;
} else if (comp < 0) {
insertionPoint = keyParts.length;
} else {
byte[] keyPostfix = substring(key, prefix.length, key.length);
insertionPoint = Arrays.binarySearch(keyParts, keyPostfix, REVERSED_COMPARE_TO);
if (insertionPoint < 0) {
insertionPoint = -insertionPoint - 1;
} else {
insertionPoint++; // identical elements must go to the right
}
}
return insertionPoint;
}
private List<IndexNode> split() {
int headerLength = headerLength();
int contentLength = contentLength();
int maxLength = segment.getMaxNodeSize();
int targetParts = contentLength / Math.max(maxLength - headerLength, 1) + 1;
int targetLength = contentLength / targetParts + headerLength;
List<IndexNode> list = new ArrayList<>();
int childLength = innerNodes != null ? INNER_NODE_REFERENCE_SIZE : LEAF_NODE_REFERENCE_SIZE;
byte[] prefixExtension = keyParts[0]; // the prefix can be only extended
int currentLength = INNER_NODE_HEADER_SIZE + prefix.length + prefixExtension.length + 2 * childLength + 2;
int nodeFrom = 0;
// TODO: under certain circumstances this algorithm can end up by splitting node into very uneven parts
// such as having one part with only 1 child, therefore only 15 bytes long
for (int i = 1; i < keyParts.length; ++i) {
int newLength;
byte[] newPrefixExtension = commonPrefix(prefixExtension, keyParts[i]);
if (newPrefixExtension.length != prefixExtension.length) {
newLength = currentLength + (prefixExtension.length - newPrefixExtension.length) * (i - nodeFrom - 1);
} else {
newLength = currentLength;
}
newLength += keyParts[i].length - newPrefixExtension.length + childLength + 2;
if (newLength < targetLength) {
currentLength = newLength;
} else {
IndexNode subNode;
if (newLength > maxLength) {
subNode = subNode(prefixExtension, nodeFrom, i);
++i;
} else {
subNode = subNode(newPrefixExtension, nodeFrom, i + 1);
i += 2;
}
list.add(subNode);
if (i < keyParts.length) {
newPrefixExtension = keyParts[i];
}
currentLength = INNER_NODE_HEADER_SIZE + prefix.length + newPrefixExtension.length + 2 * childLength + 2;
nodeFrom = i;
}
prefixExtension = newPrefixExtension;
}
if (nodeFrom <= keyParts.length) {
list.add(subNode(prefixExtension, nodeFrom, keyParts.length));
}
return list;
}
private IndexNode subNode(byte[] newPrefixExtension, int childFrom, int childTo) {
// first node takes up to child[to + 1], other do not take the child[from] == child[previousTo + 1]
// If the new node has > 1 keyParts, it ignores the first keyPart, otherwise it just sets the first child to be
// deleted (empty entry)
byte[][] newKeyParts = new byte[childTo - childFrom][];
if (newPrefixExtension.length > 0) {
for (int i = childFrom; i < childTo; ++i) {
newKeyParts[i - childFrom] = substring(keyParts[i], newPrefixExtension.length, keyParts[i].length);
}
} else {
System.arraycopy(keyParts, childFrom, newKeyParts, 0, childTo - childFrom);
}
byte[] newPrefix = childFrom == childTo ? Util.EMPTY_BYTE_ARRAY : concat(prefix, newPrefixExtension);
if (innerNodes != null) {
InnerNode[] newInnerNodes = new InnerNode[childTo - childFrom + 1];
System.arraycopy(innerNodes, childFrom, newInnerNodes, 0, childTo - childFrom + 1);
return new IndexNode(segment, newPrefix, newKeyParts, newInnerNodes);
} else if (leafNodes != null) {
LeafNode[] newLeafNodes = new LeafNode[childTo - childFrom + 1];
System.arraycopy(leafNodes, childFrom, newLeafNodes, 0, childTo - childFrom + 1);
return new IndexNode(segment, newPrefix, newKeyParts, newLeafNodes);
}
throw new IllegalStateException();
}
private static byte[] concat(byte[] first, byte[] second) {
if (first == null || first.length == 0) return second;
if (second == null || second.length == 0) return first;
byte[] result = new byte[first.length + second.length];
System.arraycopy(first, 0, result, 0, first.length);
System.arraycopy(second, 0, result, first.length, second.length);
return result;
}
private static void copyKeyParts(byte[][] src, int srcIndex, byte[][] dest, int destIndex, int length, byte[] oldPrefix, byte[] common) {
if (oldPrefix.length == common.length) {
System.arraycopy(src, srcIndex, dest, destIndex, length);
} else {
for (int i = 0; i < length; ++i) {
dest[destIndex + i] = findNewKeyPart(oldPrefix, src[srcIndex + i], common);
}
}
}
private static byte[] findNewKeyPart(byte[] oldPrefix, byte[] oldKeyPart, byte[] common) {
byte[] newPart = new byte[oldKeyPart.length + oldPrefix.length - common.length];
System.arraycopy(oldPrefix, common.length, newPart, 0, oldPrefix.length - common.length);
System.arraycopy(oldKeyPart, 0, newPart, oldPrefix.length - common.length, oldKeyPart.length);
return newPart;
}
private static byte[] substring(byte[] key, int begin, int end) {
if (end <= begin) return Util.EMPTY_BYTE_ARRAY;
if (begin == 0 && end == key.length) {
return key;
}
byte[] sub = new byte[end - begin];
System.arraycopy(key, begin, sub, 0, end - begin);
return sub;
}
private static byte[] commonPrefix(byte[] oldPrefix, byte[] newKey) {
int i = Arrays.mismatch(oldPrefix, newKey);
if (i == oldPrefix.length) {
return oldPrefix;
}
if (i == newKey.length) {
return newKey;
}
if (i == 0) {
return Util.EMPTY_BYTE_ARRAY;
}
byte[] prefix = new byte[i];
System.arraycopy(oldPrefix, 0, prefix, 0, i);
return prefix;
}
// Compares the two arrays. This is different from a regular compare that if the second array has more bytes than
// the first but contains all the same bytes it is treated equal
private static int compare(byte[] first, byte[] second, int secondLength) {
if (secondLength == 0) {
return 0;
}
int mismatchPos = Arrays.mismatch(first, 0, first.length, second, 0, secondLength);
if (mismatchPos == -1 || mismatchPos == secondLength) {
return 0;
} else if (mismatchPos >= first.length) {
return first.length + 1;
}
return second[mismatchPos] > first[mismatchPos] ? mismatchPos + 1 : -mismatchPos - 1;
}
public static final Comparator<byte[]> REVERSED_COMPARE_TO = ((Comparator<byte[]>) IndexNode::compare).reversed();
private static int compare(byte[] first, byte[] second) {
// Use Arrays.mismatch as it doesn't do boundary check for every byte and uses vectorized comparison for arrays
// larger than 7
int mismatchPos = Arrays.mismatch(first, second);
if (mismatchPos == -1) {
return 0;
} else if (mismatchPos >= first.length) {
return first.length + 1;
} else if (mismatchPos >= second.length) {
return -second.length - 1;
}
return second[mismatchPos] > first[mismatchPos] ? mismatchPos + 1 : -mismatchPos - 1;
}
private short headerLength() {
int headerLength = INNER_NODE_HEADER_SIZE + prefix.length;
assert headerLength <= Short.MAX_VALUE;
return (short) headerLength;
}
private short keyPartsLength() {
if (keyPartsLength >= 0) {
return keyPartsLength;
}
int sum = 0;
for (byte[] keyPart : keyParts) {
sum += 2 + keyPart.length;
}
assert sum <= Short.MAX_VALUE;
return keyPartsLength = (short) sum;
}
private short contentLength() {
if (contentLength >= 0) {
return contentLength;
}
int sum = keyPartsLength();
if (innerNodes != null) {
sum += INNER_NODE_REFERENCE_SIZE * innerNodes.length;
} else if (leafNodes != null) {
sum += LEAF_NODE_REFERENCE_SIZE * leafNodes.length;
} else {
throw new IllegalStateException();
}
assert sum <= Short.MAX_VALUE;
return contentLength = (short) sum;
}
public short length() {
if (totalLength >= 0) return totalLength;
int totalLength = headerLength() + contentLength();
assert totalLength >= 0 && totalLength <= Short.MAX_VALUE;
return this.totalLength = (short) totalLength;
}
public static IndexNode emptyWithLeaves(Index.Segment segment) {
return new IndexNode(segment, Util.EMPTY_BYTE_ARRAY, Util.EMPTY_BYTE_ARRAY_ARRAY, LeafNode.EMPTY_ARRAY);
}
private static IndexNode emptyWithInnerNodes(Index.Segment segment) {
return new IndexNode(segment, Util.EMPTY_BYTE_ARRAY, Util.EMPTY_BYTE_ARRAY_ARRAY, new InnerNode[]{new InnerNode(-1L, (short) -1)});
}
static final OverwriteHook NOOP_HOOK = (IndexRequest request, int cacheSegment, boolean overwritten, int prevFile, int prevOffset) -> { };
public interface OverwriteHook {
default boolean check(IndexRequest request, int oldFile, int oldOffset) {
return true;
}
void setOverwritten(IndexRequest request, int cacheSegment, boolean overwritten, int prevFile, int prevOffset);
}
static class InnerNode extends Index.IndexSpace {
private volatile SoftReference<IndexNode> reference;
InnerNode(long offset, short length) {
super(offset, length);
}
InnerNode(IndexNode node) {
super(node.offset, node.occupiedSpace);
reference = new SoftReference<>(node);
}
IndexNode getIndexNode(Index.Segment segment) throws IOException {
IndexNode node;
if (reference == null || (node = reference.get()) == null) {
synchronized (this) {
if (reference == null || (node = reference.get()) == null) {
if (offset < 0) return null;
// Is this okay?
node = new IndexNode(segment, offset, length);
reference = new SoftReference<>(node);
if (log.isTraceEnabled()) {
log.trace("Loaded inner node from " + offset + " - " + length);
}
}
}
}
return node;
}
}
private static class LeafNode extends EntryInfo {
private static final LeafNode[] EMPTY_ARRAY = new LeafNode[0];
private volatile SoftReference<EntryRecord> keyReference;
LeafNode(int file, int offset, short numRecords, int cacheSegment) {
super(file, offset, numRecords, cacheSegment);
}
public EntryRecord loadHeaderAndKey(FileProvider fileProvider) throws IOException, IndexNodeOutdatedException {
return getHeaderAndKey(fileProvider, null);
}
private EntryRecord getHeaderAndKey(FileProvider fileProvider, FileProvider.Handle handle) throws IOException, IndexNodeOutdatedException {
EntryRecord headerAndKey;
if (keyReference == null || (headerAndKey = keyReference.get()) == null) {
synchronized (this) {
if (keyReference == null || (headerAndKey = keyReference.get()) == null) {
boolean ownHandle = false;
if (handle == null) {
ownHandle = true;
handle = fileProvider.getFile(file);
if (handle == null) {
throw new IndexNodeOutdatedException(file + ":" + offset + " (" + numRecords + ")");
}
}
try {
int readOffset = offset < 0 ? ~offset : offset;
EntryHeader header = EntryRecord.readEntryHeader(handle, readOffset);
if (header == null) {
throw new IllegalStateException("Error reading header from " + file + ":" + readOffset + " | " + handle.getFileSize());
}
byte[] key = EntryRecord.readKey(handle, header, readOffset);
if (key == null) {
throw new IllegalStateException("Error reading key from " + file + ":" + readOffset);
}
headerAndKey = new EntryRecord(header, key);
keyReference = new SoftReference<>(headerAndKey);
} finally {
if (ownHandle) {
handle.close();
}
}
}
}
}
assert headerAndKey.getKey() != null;
return headerAndKey;
}
public EntryRecord loadRecord(FileProvider fileProvider, org.infinispan.commons.io.ByteBuffer key, TimeService timeService) throws IOException, IndexNodeOutdatedException {
FileProvider.Handle handle = fileProvider.getFile(file);
int readOffset = offset < 0 ? ~offset : offset;
if (handle == null) {
throw new IndexNodeOutdatedException(file + ":" + readOffset);
}
try {
boolean trace = log.isTraceEnabled();
EntryRecord headerAndKey = getHeaderAndKey(fileProvider, handle);
if (key != null && !entryKeyEqualsBuffer(headerAndKey, key)) {
if (trace) {
log.trace("Key on " + file + ":" + readOffset + " not matched.");
}
return null;
}
if (headerAndKey.getHeader().valueLength() <= 0) {
if (trace) {
log.trace("Entry " + file + ":" + readOffset + " matched, it is a tombstone.");
}
return null;
}
if (timeService != null && headerAndKey.getHeader().expiryTime() > 0 && headerAndKey.getHeader().expiryTime() <= timeService.wallClockTime()) {
if (trace) {
log.trace("Key on " + file + ":" + readOffset + " matched but expired.");
}
return null;
}
if (trace) {
log.trace("Loaded from " + file + ":" + readOffset);
}
return headerAndKey.loadMetadataAndValue(handle, readOffset, key != null);
} finally {
handle.close();
}
}
}
private static class IndexNodeOutdatedException extends Exception {
IndexNodeOutdatedException(String message) {
super(message);
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i <= keyParts.length; ++i) {
sb.append('\n');
if (leafNodes != null && i < leafNodes.length) {
sb.append(" [").append(leafNodes[i].file).append(':').append(leafNodes[i].offset).append(':').append(leafNodes[i].cacheSegment).append("] ");
} else {
sb.append(" [").append(innerNodes[i].offset).append(':').append(innerNodes[i].length).append("] ");
}
if (i < keyParts.length) {
sb.append(new String(concat(prefix, keyParts[i])));
}
}
sb.append('\n');
return sb.toString();
}
Flowable<EntryRecord> publish(IntSet cacheSegments, boolean loadValues) {
long currentTime = segment.getTimeService().wallClockTime();
int cacheSegmentSize = cacheSegments.size();
if (cacheSegmentSize == 0) {
return Flowable.empty();
}
// Needs defer as we mutate the deque so publisher can be subscribed to multiple times
return Flowable.defer(() -> {
// First sort all the cacheSegments by their unsigned numeric byte[] values.
// This allows us to start at the left most node, and we can iterate within the nodes if the cacheSegments are
// contiguous in the data
Deque<byte[]> sortedSegmentPrefixes = cacheSegments.intStream()
.filter(cacheSegment -> segment.index.sizePerSegment.get(cacheSegment) != 0)
.mapToObj(cacheSegment -> {
byte[] segmentPrefix = new byte[UnsignedNumeric.sizeUnsignedInt(cacheSegment)];
UnsignedNumeric.writeUnsignedInt(segmentPrefix, 0, cacheSegment);
return segmentPrefix;
}).sorted(REVERSED_COMPARE_TO)
.collect(Collectors.toCollection(ArrayDeque::new));
if (sortedSegmentPrefixes.isEmpty()) {
return Flowable.empty();
}
return new FlowableCreate<>(emitter -> {
// Set to true in 3 different cases: cacheSegment didn't map to next entry, emitter has no more requests or cancelled
ByRef.Boolean done = new ByRef.Boolean(false);
do {
// Reset so we can loop
done.set(false);
recursiveNode(this, segment, sortedSegmentPrefixes, emitter, loadValues, currentTime, new ByRef.Boolean(false), done, false);
// This handles two of the done cases - in which case we can't continue
if (emitter.requested() == 0 || emitter.isCancelled()) {
return;
}
} while (done.get() && !sortedSegmentPrefixes.isEmpty());
emitter.onComplete();
}, BackpressureStrategy.ERROR);
});
}
void recursiveNode(IndexNode node, Index.Segment segment, Deque<byte[]> segmentPrefixes, FlowableEmitter<EntryRecord> emitter,
boolean loadValues, long currentTime, ByRef.Boolean foundData, ByRef.Boolean done, boolean firstNodeAttempted) throws IOException {
Lock readLock = node.lock.readLock();
readLock.lock();
try {
byte[] previousKey = null;
int previousSegment = -1;
if (node.innerNodes != null) {
final int point = foundData.get() ? 0 : node.getIterationPoint(segmentPrefixes.getFirst(), -1);
// Need to search all inner nodes starting from that point until we hit the last entry for the segment
for (int i = point; !segmentPrefixes.isEmpty() && i < node.innerNodes.length && !done.get(); ++i) {
recursiveNode(node.innerNodes[i].getIndexNode(segment), segment, segmentPrefixes, emitter, loadValues,
currentTime, foundData, done, i == point);
}
} else if (node.leafNodes != null) {
int suggestedIteration;
byte[] segmentPrefix = segmentPrefixes.getFirst();
int cacheSegment = UnsignedNumeric.readUnsignedInt(segmentPrefix, 0);
boolean firstData = !foundData.get();
if (firstData) {
suggestedIteration = node.getIterationPoint(segmentPrefix, cacheSegment);
foundData.set(true);
} else {
suggestedIteration = 0;
}
for (int i = suggestedIteration; i < node.leafNodes.length; ++i) {
LeafNode leafNode = node.leafNodes[i];
if (leafNode.cacheSegment != cacheSegment) {
// The suggestion may be off by 1 if the page index prefix is longer than the segment but equal
if (i == suggestedIteration && firstData
&& segmentPrefix.length == UnsignedNumeric.sizeUnsignedInt(cacheSegment)) {
// No entry for the given segment, make sure to try next segment
if (i != node.leafNodes.length - 1
&& (i == node.keyParts.length ||
(i < node.keyParts.length &&
compare(node.keyParts[i], segmentPrefix, Math.min(segmentPrefix.length, node.keyParts[i].length)) == 0)))
continue;
// The cache segment does not map to the current innerNode, we are at the end of the leafNodes,
// and this is the first innerNode attempted. We need to also check the first leaf of the next innerNode if present.
if (i == node.leafNodes.length - 1 && firstNodeAttempted) {
return;
}
}
segmentPrefixes.removeFirst();
// If the data maps to the next segment in our ordered queue, we can continue reading,
// otherwise we end and the retry will kick in
segmentPrefix = segmentPrefixes.peekFirst();
if (segmentPrefix != null) {
cacheSegment = UnsignedNumeric.readUnsignedInt(segmentPrefix, 0);
}
// Next cacheSegment doesn't match either, thus we have to retry with the next prefix
// Note that if segmentPrefix is null above, this will always be true
if (leafNode.cacheSegment != cacheSegment) {
done.set(true);
return;
}
}
EntryRecord record;
try {
if (loadValues) {
log.tracef("Loading record for leafNode: %s", leafNode);
record = leafNode.loadRecord(segment.getFileProvider(), null, segment.getTimeService());
} else {
log.tracef("Loading header and key for leafNode: %s", leafNode);
record = leafNode.getHeaderAndKey(segment.getFileProvider(), null);
}
} catch (IndexNodeOutdatedException e) {
// Current key was outdated, we have to try from the previous entry we saw (note it is skipped)
if (previousKey != null) {
byte[] currentIndexKey = Index.toIndexKey(previousSegment, previousKey);
segmentPrefixes.removeFirst();
segmentPrefixes.addFirst(currentIndexKey);
}
done.set(true);
return;
}
if (record != null && record.getHeader().valueLength() > 0) {
// It is possible that the very first looked up entry was a previously seen value and if so
// we must skip it if it is equal to not return it twice.
// The current segmentPrefix will match the element's key bytes excluding the segment bytes
if (firstData && i == suggestedIteration) {
int keyLength = record.getHeader().keyLength();
int lengthDiff = segmentPrefix.length - keyLength;
if (lengthDiff > 0) {
byte[] keyArray = record.getKey();
if (Util.arraysEqual(keyArray, 0, keyArray.length, segmentPrefix, lengthDiff, segmentPrefix.length)) {
continue;
}
}
}
long expiryTime = record.getHeader().expiryTime();
if (expiryTime < 0 || expiryTime > currentTime) {
emitter.onNext(record);
if (emitter.requested() == 0) {
// Store the current key as the next prefix when we can't retrieve more values, so
// the next request will get the next value after this one
byte[] currentIndexKey = Index.toIndexKey(cacheSegment, record.getKey());
segmentPrefixes.removeFirst();
segmentPrefixes.addFirst(currentIndexKey);
done.set(true);
return;
} else if (emitter.isCancelled()) {
done.set(true);
return;
}
}
previousKey = record.getKey();
previousSegment = cacheSegment;
}
}
// We are continuing with the next innerNode, save the previous key, just in case we get an outdated
// exception on the first entry
if (previousKey != null) {
byte[] currentIndexKey = Index.toIndexKey(previousSegment, previousKey);
segmentPrefixes.removeFirst();
segmentPrefixes.addFirst(currentIndexKey);
}
}
} finally {
readLock.unlock();
}
}
}
| 62,891
| 43.954968
| 200
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/configuration/SoftIndexFileStoreConfigurationBuilder.java
|
package org.infinispan.persistence.sifs.configuration;
import static org.infinispan.configuration.cache.AbstractStoreConfiguration.SEGMENTED;
import static org.infinispan.persistence.sifs.configuration.SoftIndexFileStoreConfiguration.COMPACTION_THRESHOLD;
import static org.infinispan.persistence.sifs.configuration.SoftIndexFileStoreConfiguration.OPEN_FILES_LIMIT;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfigurationBuilder;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.global.GlobalStateConfiguration;
import org.infinispan.persistence.PersistenceUtil;
import org.infinispan.persistence.sifs.Log;
import org.infinispan.persistence.sifs.NonBlockingSoftIndexFileStore;
import org.infinispan.util.logging.LogFactory;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
public class SoftIndexFileStoreConfigurationBuilder extends AbstractStoreConfigurationBuilder<SoftIndexFileStoreConfiguration, SoftIndexFileStoreConfigurationBuilder> {
private static final Log log = LogFactory.getLog(SoftIndexFileStoreConfigurationBuilder.class, Log.class);
protected final IndexConfigurationBuilder index = new IndexConfigurationBuilder();
protected final DataConfigurationBuilder data = new DataConfigurationBuilder();
public SoftIndexFileStoreConfigurationBuilder(PersistenceConfigurationBuilder builder) {
this(builder, SoftIndexFileStoreConfiguration.attributeDefinitionSet(),
AsyncStoreConfiguration.attributeDefinitionSet());
}
public SoftIndexFileStoreConfigurationBuilder(PersistenceConfigurationBuilder builder, AttributeSet attributeSet,
AttributeSet asyncAttributeSet) {
super(builder, attributeSet, asyncAttributeSet);
}
/**
* The path where the Soft-Index store will keep its data files. Under this location the store will create
* a directory named after the cache name, under which a <code>data</code> directory will be created.
*
* The default behaviour is to use the {@link GlobalStateConfiguration#persistentLocation()}.
*/
public SoftIndexFileStoreConfigurationBuilder dataLocation (String dataLocation){
data.dataLocation(dataLocation);
return this;
}
/**
* The path where the Soft-Index store will keep its index files. Under this location the store will create
* a directory named after the cache name, under which a <code>index</code> directory will be created.
*
* The default behaviour is to use the {@link GlobalStateConfiguration#persistentLocation()}.
*/
public SoftIndexFileStoreConfigurationBuilder indexLocation (String indexLocation){
index.indexLocation(indexLocation);
return this;
}
/**
* Number of index segment files. Increasing this value improves throughput but requires more threads to be spawned.
* <p>
* Defaults to <code>16</code>.
*/
public SoftIndexFileStoreConfigurationBuilder indexSegments ( int indexSegments){
index.indexSegments(indexSegments);
return this;
}
/**
* Sets the maximum size of single data file with entries, in bytes.
*
* Defaults to <code>16777216</code> (16MB).
*/
public SoftIndexFileStoreConfigurationBuilder maxFileSize ( int maxFileSize){
data.maxFileSize(maxFileSize);
return this;
}
/**
* If the size of the node (continuous block on filesystem used in index implementation) drops below this threshold,
* the node will try to balance its size with some neighbour node, possibly causing join of multiple nodes.
*
* Defaults to <code>0</code>.
*/
public SoftIndexFileStoreConfigurationBuilder minNodeSize ( int minNodeSize){
index.minNodeSize(minNodeSize);
return this;
}
/**
* Max size of node (continuous block on filesystem used in index implementation), in bytes.
*
* Defaults to <code>4096</code>.
*/
public SoftIndexFileStoreConfigurationBuilder maxNodeSize ( int maxNodeSize){
index.maxNodeSize(maxNodeSize);
return this;
}
/**
* Sets the maximum number of entry writes that are waiting to be written to the index, per index segment.
*
* Defaults to <code>1000</code>.
*/
public SoftIndexFileStoreConfigurationBuilder indexQueueLength ( int indexQueueLength){
index.indexQueueLength(indexQueueLength);
return this;
}
/**
* Sets whether writes shoud wait to be fsynced to disk.
*
* Defaults to <code>false</code>.
*/
public SoftIndexFileStoreConfigurationBuilder syncWrites ( boolean syncWrites){
data.syncWrites(syncWrites);
return this;
}
/**
* Sets the maximum number of open files.
*
* Defaults to <code>1000</code>.
*/
public SoftIndexFileStoreConfigurationBuilder openFilesLimit ( int openFilesLimit){
attributes.attribute(OPEN_FILES_LIMIT).set(openFilesLimit);
return this;
}
/**
* If the amount of unused space in some data file gets above this threshold, the file is compacted - entries from that file are copied to a new file and the old file is deleted.
*
* Defaults to <code>0.5</code> (50%).
*/
public SoftIndexFileStoreConfigurationBuilder compactionThreshold ( double compactionThreshold){
attributes.attribute(COMPACTION_THRESHOLD).set(compactionThreshold);
return this;
}
@Override
public SoftIndexFileStoreConfiguration create () {
return new SoftIndexFileStoreConfiguration(attributes.protect(), async.create(), index.create(), data.create());
}
@Override
public Builder<?> read (SoftIndexFileStoreConfiguration template, Combine combine){
super.read(template, combine);
index.read(template.index(), combine);
data.read(template.data(), combine);
return this;
}
@Override
public SoftIndexFileStoreConfigurationBuilder self () {
return this;
}
@Override
protected void validate ( boolean skipClassChecks){
Attribute<Boolean> segmentedAttribute = attributes.attribute(SEGMENTED);
if (segmentedAttribute.isModified() && !segmentedAttribute.get()) {
throw org.infinispan.util.logging.Log.CONFIG.storeRequiresBeingSegmented(NonBlockingSoftIndexFileStore.class.getSimpleName());
}
super.validate(skipClassChecks);
index.validate();
double compactionThreshold = attributes.attribute(COMPACTION_THRESHOLD).get();
if (compactionThreshold <= 0 || compactionThreshold > 1) {
throw log.invalidCompactionThreshold(compactionThreshold);
}
}
@Override
public void validate (GlobalConfiguration globalConfig){
PersistenceUtil.validateGlobalStateStoreLocation(globalConfig, NonBlockingSoftIndexFileStore.class.getSimpleName(),
data.attributes().attribute(DataConfiguration.DATA_LOCATION),
index.attributes().attribute(IndexConfiguration.INDEX_LOCATION));
super.validate(globalConfig);
}
@Override
public String toString () {
return "SoftIndexFileStoreConfigurationBuilder{" +
"index=" + index +
", data=" + data +
", attributes=" + attributes +
", async=" + async +
'}';
}
}
| 8,073
| 40.405128
| 184
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/configuration/package-info.java
|
/**
* Configuration for {@link org.infinispan.persistence.sifs.SoftIndexFileStore}.
*
* @api.public
*/
package org.infinispan.persistence.sifs.configuration;
| 162
| 22.285714
| 80
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/configuration/IndexConfigurationBuilder.java
|
package org.infinispan.persistence.sifs.configuration;
import static org.infinispan.persistence.sifs.configuration.IndexConfiguration.INDEX_LOCATION;
import static org.infinispan.persistence.sifs.configuration.IndexConfiguration.INDEX_QUEUE_LENGTH;
import static org.infinispan.persistence.sifs.configuration.IndexConfiguration.INDEX_SEGMENTS;
import static org.infinispan.persistence.sifs.configuration.IndexConfiguration.MAX_NODE_SIZE;
import static org.infinispan.persistence.sifs.configuration.IndexConfiguration.MIN_NODE_SIZE;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.persistence.sifs.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @since 10.0
*/
public class IndexConfigurationBuilder implements Builder<IndexConfiguration> {
private static final Log log = LogFactory.getLog(IndexConfigurationBuilder.class, Log.class);
private final AttributeSet attributes;
public IndexConfigurationBuilder() {
this.attributes = IndexConfiguration.attributeDefinitionSet();
}
public AttributeSet attributes() {
return attributes;
}
public IndexConfigurationBuilder indexLocation(String indexLocation) {
attributes.attribute(INDEX_LOCATION).set(indexLocation);
return this;
}
public IndexConfigurationBuilder indexSegments(int indexSegments) {
attributes.attribute(INDEX_SEGMENTS).set(indexSegments);
return this;
}
public IndexConfigurationBuilder minNodeSize(int minNodeSize) {
attributes.attribute(MIN_NODE_SIZE).set(minNodeSize);
return this;
}
public IndexConfigurationBuilder maxNodeSize(int maxNodeSize) {
attributes.attribute(MAX_NODE_SIZE).set(maxNodeSize);
return this;
}
public IndexConfigurationBuilder indexQueueLength(int indexQueueLength) {
attributes.attribute(INDEX_QUEUE_LENGTH).set(indexQueueLength);
return this;
}
@Override
public IndexConfiguration create() {
return new IndexConfiguration(attributes.protect());
}
@Override
public Builder<?> read(IndexConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
@Override
public void validate() {
int minNodeSize = attributes.attribute(MIN_NODE_SIZE).get();
int maxNodeSize = attributes.attribute(MAX_NODE_SIZE).get();
if (maxNodeSize <= 0 || maxNodeSize > Short.MAX_VALUE) {
throw log.maxNodeSizeLimitedToShort(maxNodeSize);
} else if (minNodeSize < 0 || minNodeSize > maxNodeSize) {
throw log.minNodeSizeMustBeLessOrEqualToMax(minNodeSize, maxNodeSize);
}
}
@Override
public String toString() {
return "IndexConfigurationBuilder{" +
"attributes=" + attributes +
'}';
}
}
| 2,921
| 32.586207
| 98
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/configuration/DataConfigurationBuilder.java
|
package org.infinispan.persistence.sifs.configuration;
import static org.infinispan.persistence.sifs.configuration.DataConfiguration.DATA_LOCATION;
import static org.infinispan.persistence.sifs.configuration.DataConfiguration.MAX_FILE_SIZE;
import static org.infinispan.persistence.sifs.configuration.DataConfiguration.SYNC_WRITES;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public class DataConfigurationBuilder implements Builder<DataConfiguration> {
private final AttributeSet attributes;
public DataConfigurationBuilder() {
this.attributes = DataConfiguration.attributeDefinitionSet();
}
public AttributeSet attributes() {
return attributes;
}
public DataConfigurationBuilder dataLocation(String dataLocation) {
attributes.attribute(DATA_LOCATION).set(dataLocation);
return this;
}
public DataConfigurationBuilder maxFileSize(int maxFileSize) {
attributes.attribute(MAX_FILE_SIZE).set(maxFileSize);
return this;
}
public DataConfigurationBuilder syncWrites(boolean syncWrites) {
attributes.attribute(SYNC_WRITES).set(syncWrites);
return this;
}
@Override
public DataConfiguration create() {
return new DataConfiguration(attributes.protect());
}
@Override
public Builder<?> read(DataConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 1,555
| 30.755102
| 92
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/configuration/SoftIndexFileStoreConfiguration.java
|
package org.infinispan.persistence.sifs.configuration;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.ConfigurationFor;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.parsing.Attribute;
import org.infinispan.configuration.parsing.Element;
import org.infinispan.persistence.sifs.NonBlockingSoftIndexFileStore;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
@BuiltBy(SoftIndexFileStoreConfigurationBuilder.class)
@ConfigurationFor(NonBlockingSoftIndexFileStore.class)
public class SoftIndexFileStoreConfiguration extends AbstractStoreConfiguration<SoftIndexFileStoreConfiguration> {
public static final AttributeDefinition<Integer> OPEN_FILES_LIMIT = AttributeDefinition.builder(Attribute.OPEN_FILES_LIMIT, 1000).immutable().build();
public static final AttributeDefinition<Double> COMPACTION_THRESHOLD = AttributeDefinition.builder(Attribute.COMPACTION_THRESHOLD, 0.5d).immutable().build();
private final IndexConfiguration index;
private final DataConfiguration data;
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(SoftIndexFileStoreConfiguration.class, AbstractStoreConfiguration.attributeDefinitionSet(), OPEN_FILES_LIMIT, COMPACTION_THRESHOLD);
}
public SoftIndexFileStoreConfiguration(AttributeSet attributes,
AsyncStoreConfiguration async,
IndexConfiguration indexConfiguration,
DataConfiguration dataConfiguration) {
super(Element.FILE_STORE, attributes, async, indexConfiguration, dataConfiguration);
index = indexConfiguration;
data = dataConfiguration;
}
public String dataLocation() {
return data.dataLocation();
}
public String indexLocation() {
return index.indexLocation();
}
public int indexSegments() {
return index.indexSegments();
}
public int maxFileSize() {
return data.maxFileSize();
}
public int minNodeSize() {
return index.minNodeSize();
}
public int maxNodeSize() {
return index.maxNodeSize();
}
public int indexQueueLength() {
return index.indexQueueLength();
}
public boolean syncWrites() {
return data.syncWrites();
}
public int openFilesLimit() {
return attributes.attribute(OPEN_FILES_LIMIT).get();
}
public double compactionThreshold() {
return attributes.attribute(COMPACTION_THRESHOLD).get();
}
public IndexConfiguration index() {
return index;
}
public DataConfiguration data() {
return data;
}
}
| 2,927
| 33.046512
| 162
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/configuration/DataConfiguration.java
|
package org.infinispan.persistence.sifs.configuration;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.attributes.ConfigurationElement;
import org.infinispan.configuration.parsing.Attribute;
import org.infinispan.configuration.parsing.Element;
public class DataConfiguration extends ConfigurationElement<DataConfiguration> {
public static final AttributeDefinition<String> DATA_LOCATION = AttributeDefinition.builder(Attribute.PATH, null, String.class).immutable().autoPersist(false).build();
public static final AttributeDefinition<Integer> MAX_FILE_SIZE = AttributeDefinition.builder(Attribute.MAX_FILE_SIZE, 16 * 1024 * 1024).immutable().autoPersist(false).build();
public static final AttributeDefinition<Boolean> SYNC_WRITES = AttributeDefinition.builder(Attribute.SYNC_WRITES, false).immutable().autoPersist(false).build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(DataConfiguration.class, DATA_LOCATION, MAX_FILE_SIZE, SYNC_WRITES);
}
DataConfiguration(AttributeSet attributes) {
super(Element.DATA, attributes);
}
public int maxFileSize() {
return attributes.attribute(MAX_FILE_SIZE).get();
}
public boolean syncWrites() {
return attributes.attribute(SYNC_WRITES).get();
}
public String dataLocation() {
return attributes.attribute(DATA_LOCATION).get();
}
public void setDataLocation(String newLocation) {
attributes.attribute(DATA_LOCATION).set(newLocation);
}
}
| 1,639
| 42.157895
| 178
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/configuration/IndexConfiguration.java
|
package org.infinispan.persistence.sifs.configuration;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.attributes.ConfigurationElement;
import org.infinispan.configuration.parsing.Attribute;
import org.infinispan.configuration.parsing.Element;
public class IndexConfiguration extends ConfigurationElement<IndexConfiguration> {
public static final AttributeDefinition<String> INDEX_LOCATION = AttributeDefinition.builder(Attribute.PATH, null, String.class).immutable().autoPersist(false).build();
public static final AttributeDefinition<Integer> INDEX_QUEUE_LENGTH = AttributeDefinition.builder(Attribute.INDEX_QUEUE_LENGTH, 1000).immutable().autoPersist(false).build();
public static final AttributeDefinition<Integer> INDEX_SEGMENTS = AttributeDefinition.builder(Attribute.SEGMENTS, 3).immutable().autoPersist(false).build();
public static final AttributeDefinition<Integer> MIN_NODE_SIZE = AttributeDefinition.builder(Attribute.MIN_NODE_SIZE, 0).immutable().autoPersist(false).build();
public static final AttributeDefinition<Integer> MAX_NODE_SIZE = AttributeDefinition.builder(Attribute.MAX_NODE_SIZE, 4096).immutable().autoPersist(false).build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(IndexConfiguration.class, INDEX_LOCATION, INDEX_QUEUE_LENGTH, INDEX_SEGMENTS, MIN_NODE_SIZE, MAX_NODE_SIZE);
}
public IndexConfiguration(AttributeSet attributes) {
super(Element.INDEX, attributes);
}
public String indexLocation() {
return attributes.attribute(INDEX_LOCATION).get();
}
public void setLocation(String location) {
attributes.attribute(INDEX_LOCATION).set(location);
}
public int indexSegments() {
return attributes.attribute(INDEX_SEGMENTS).get();
}
public int minNodeSize() {
return attributes.attribute(MIN_NODE_SIZE).get();
}
public int maxNodeSize() {
return attributes.attribute(MAX_NODE_SIZE).get();
}
public int indexQueueLength() {
return attributes.attribute(INDEX_QUEUE_LENGTH).get();
}
}
| 2,206
| 44.040816
| 176
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/pmem/PmemUtilWrapper.java
|
package org.infinispan.persistence.sifs.pmem;
import java.io.File;
import java.io.FileNotFoundException;
import java.nio.channels.FileChannel;
import io.mashona.logwriting.PmemUtil;
/**
* This class is here solely for the purpose of encapsulating the {@link PmemUtil} class so we do not load it unless
* necessary, allowing this to be an optional dependency. Any code that invokes a method in this class should first
* check if the {@link PmemUtil} can be loaded via {@link Class#forName(String)} otherwise a {@link ClassNotFoundException}
* may be thrown when loading this class.
*/
public class PmemUtilWrapper {
/**
* Same as {@link PmemUtil#pmemChannelFor(File, int, boolean, boolean)}.
*/
static public FileChannel pmemChannelFor(File file, int length, boolean create, boolean readSharedMetadata) throws FileNotFoundException {
return PmemUtil.pmemChannelFor(file, length, create, readSharedMetadata);
}
}
| 942
| 40
| 141
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/internal/PersistenceUtil.java
|
package org.infinispan.persistence.internal;
import static org.infinispan.util.logging.Log.CONFIG;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.IntFunction;
import java.util.function.Predicate;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.commons.configuration.ConfigurationFor;
import org.infinispan.commons.reactive.RxJavaInterop;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.AbstractSegmentedStoreConfiguration;
import org.infinispan.configuration.cache.CustomStoreConfiguration;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.metadata.Metadata;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.support.ComposedSegmentedLoadWriteStore;
import org.infinispan.persistence.support.NonBlockingStoreAdapter;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Scheduler;
/**
* Persistence Utility that is useful for internal classes. Normally methods that require non public classes, such as
* PersistenceManager, should go in here.
* @author William Burns
* @since 9.4
*/
public class PersistenceUtil {
private static final Log log = LogFactory.getLog(PersistenceUtil.class);
private static final int SEGMENT_NOT_PROVIDED = -1;
public static <K, V> InternalCacheEntry<K,V> loadAndStoreInDataContainer(DataContainer<K, V> dataContainer,
final PersistenceManager persistenceManager, K key, final InvocationContext ctx, final TimeService timeService,
final AtomicReference<Boolean> isLoaded) {
return loadAndStoreInDataContainer(dataContainer, SEGMENT_NOT_PROVIDED, persistenceManager, key, ctx, timeService,
isLoaded);
}
public static <K, V> InternalCacheEntry<K,V> loadAndStoreInDataContainer(DataContainer<K, V> dataContainer, int segment,
final PersistenceManager persistenceManager, K key, final InvocationContext ctx, final TimeService timeService,
final AtomicReference<Boolean> isLoaded) {
return loadAndComputeInDataContainer(dataContainer, segment, persistenceManager, key, ctx, timeService, null, isLoaded);
}
public static <K, V> InternalCacheEntry<K,V> loadAndComputeInDataContainer(DataContainer<K, V> dataContainer,
int segment, final PersistenceManager persistenceManager, K key, final InvocationContext ctx,
final TimeService timeService, DataContainer.ComputeAction<K, V> action) {
return loadAndComputeInDataContainer(dataContainer, segment, persistenceManager, key, ctx, timeService, action, null);
}
private static <K, V> InternalCacheEntry<K, V> loadAndComputeInDataContainer(DataContainer<K, V> dataContainer,
int segment, final PersistenceManager persistenceManager, K key, final InvocationContext ctx,
final TimeService timeService, DataContainer.ComputeAction<K, V> action, final AtomicReference<Boolean> isLoaded) {
final ByRef<Boolean> expired = new ByRef<>(null);
DataContainer.ComputeAction<K, V> computeAction = (k, oldEntry, factory) -> {
InternalCacheEntry<K, V> entryToUse;
//under the lock, check if the entry exists in the DataContainer
if (oldEntry != null) {
if (oldEntry.canExpire() && oldEntry.isExpired(timeService.wallClockTime())) {
// If it was expired we can check CacheLoaders - since they can have different
// metadata than a store
MarshallableEntry<K, V> loaded = loadAndCheckExpiration(persistenceManager, key, segment, ctx, false);
if (loaded != null) {
if (isLoaded != null) {
isLoaded.set(Boolean.TRUE); //loaded!
}
entryToUse = convert(loaded, factory);
} else {
if (isLoaded != null) {
isLoaded.set(Boolean.FALSE); //not loaded
}
expired.set(Boolean.TRUE);
// Return the original entry - so it doesn't remove expired entry early
return oldEntry;
}
} else {
if (isLoaded != null) {
isLoaded.set(null); //no attempt to load
}
entryToUse = oldEntry;
}
} else {
// There was no entry in memory so check all the stores to see if it is there
MarshallableEntry<K, V> loaded = loadAndCheckExpiration(persistenceManager, key, segment, ctx, true);
if (loaded != null) {
if (isLoaded != null) {
isLoaded.set(Boolean.TRUE); //loaded!
}
entryToUse = convert(loaded, factory);
} else {if (isLoaded != null) {
isLoaded.set(Boolean.FALSE); //not loaded
}
entryToUse = null;
}
}
if (action != null) {
return action.compute(k, entryToUse, factory);
} else {
return entryToUse;
}
};
InternalCacheEntry<K,V> entry;
if (segment != SEGMENT_NOT_PROVIDED && dataContainer instanceof InternalDataContainer) {
entry = ((InternalDataContainer<K, V>) dataContainer).compute(segment, key, computeAction);
} else {
entry = dataContainer.compute(key, computeAction);
}
if (expired.get() == Boolean.TRUE) {
return null;
} else {
return entry;
}
}
public static <K, V> MarshallableEntry<K, V> loadAndCheckExpiration(PersistenceManager persistenceManager, Object key,
int segment, InvocationContext context) {
return loadAndCheckExpiration(persistenceManager, key, segment, context, true);
}
private static <K, V> MarshallableEntry<K, V> loadAndCheckExpiration(PersistenceManager persistenceManager, Object key,
int segment, InvocationContext context, boolean includeStores) {
final MarshallableEntry<K, V> loaded;
if (segment != SEGMENT_NOT_PROVIDED) {
loaded = CompletionStages.join(persistenceManager.loadFromAllStores(key, segment, context.isOriginLocal(), includeStores));
} else {
loaded = CompletionStages.join(persistenceManager.loadFromAllStores(key, context.isOriginLocal(), includeStores));
}
if (log.isTraceEnabled()) {
log.tracef("Loaded %s for key %s from persistence.", loaded, key);
}
return loaded;
}
public static <K, V> InternalCacheEntry<K, V> convert(MarshallableEntry<K, V> loaded, InternalEntryFactory factory) {
Metadata metadata = loaded.getMetadata();
InternalCacheEntry<K, V> ice;
if (metadata != null) {
ice = factory.create(loaded.getKey(), loaded.getValue(), metadata, loaded.created(), metadata.lifespan(),
loaded.lastUsed(), metadata.maxIdle());
} else {
ice = factory.create(loaded.getKey(), loaded.getValue(), (Metadata) null, loaded.created(), -1, loaded.lastUsed(), -1);
}
ice.setInternalMetadata(loaded.getInternalMetadata());
return ice;
}
public static <K> Predicate<? super K> combinePredicate(IntSet segments, KeyPartitioner keyPartitioner, Predicate<? super K> filter) {
if (segments != null) {
Predicate<Object> segmentFilter = k -> segments.contains(keyPartitioner.getSegment(k));
return filter == null ? segmentFilter : filter.and(segmentFilter);
}
return filter;
}
public static <R> Flowable<R> parallelizePublisher(IntSet segments, Scheduler scheduler,
IntFunction<Publisher<R>> publisherFunction) {
Flowable<Publisher<R>> flowable = Flowable.fromStream(segments.intStream().mapToObj(publisherFunction));
// We internally support removing rxjava empty flowables - don't waste thread on them
flowable = flowable.filter(f -> f != Flowable.empty());
return flowable.parallel()
.runOn(scheduler)
.flatMap(RxJavaInterop.identityFunction())
.sequential();
}
/**
* @deprecated This method is only public for use with prior Store classes, use
* {@link #storeFromConfiguration(StoreConfiguration)} when dealing with {@link NonBlockingStore} instances
*/
@SuppressWarnings("unchecked")
public static <T> T createStoreInstance(StoreConfiguration config) {
Class<?> classBasedOnConfigurationAnnotation = getClassBasedOnConfigurationAnnotation(config);
try {
Object instance = Util.getInstance(classBasedOnConfigurationAnnotation);
return (T) instance;
} catch (CacheConfigurationException unableToInstantiate) {
throw PERSISTENCE.unableToInstantiateClass(config.getClass());
}
}
public static <K, V> NonBlockingStore<K, V> storeFromConfiguration(StoreConfiguration cfg) {
final Object bareInstance;
if (cfg.segmented() && cfg instanceof AbstractSegmentedStoreConfiguration) {
bareInstance = new ComposedSegmentedLoadWriteStore<>((AbstractSegmentedStoreConfiguration) cfg);
} else {
bareInstance = PersistenceUtil.createStoreInstance(cfg);
}
if (!(bareInstance instanceof NonBlockingStore)) {
// All prior stores implemented at least Lifecycle
return new NonBlockingStoreAdapter<>((Lifecycle) bareInstance);
}
return (NonBlockingStore<K, V>) bareInstance;
}
private static Class<?> getClassBasedOnConfigurationAnnotation(StoreConfiguration cfg) {
ConfigurationFor annotation = cfg.getClass().getAnnotation(ConfigurationFor.class);
Class<?> classAnnotation = null;
if (annotation == null) {
if (cfg instanceof CustomStoreConfiguration) {
classAnnotation = ((CustomStoreConfiguration)cfg).customStoreClass();
}
} else {
classAnnotation = annotation.value();
}
if (classAnnotation == null) {
throw CONFIG.loaderConfigurationDoesNotSpecifyLoaderClass(cfg.getClass().getName());
}
return classAnnotation;
}
}
| 11,111
| 46.084746
| 137
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/package-info.java
|
/**
* Cache statistics.
*
* @api.public
*/
package org.infinispan.stats;
| 77
| 10.142857
| 29
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/ClusterCacheStats.java
|
package org.infinispan.stats;
/**
* Similar to {@link Stats} but cluster wide.
*
* @author Vladimir Blagojevic
* @since 7.1
*/
public interface ClusterCacheStats extends Stats, ClusterStats {
String OBJECT_NAME = "ClusterCacheStats";
/**
* @return cluster wide read/writes ratio for the cache
*/
double getReadWriteRatio();
/**
* @return cluster wide total percentage hit/(hit+miss) ratio for this cache
*/
double getHitRatio();
/**
* @return the total number of exclusive locks available in the cluster
*/
int getNumberOfLocksAvailable();
/**
* @return the total number of exclusive locks held in the cluster
*/
int getNumberOfLocksHeld();
/**
* @return the total number of invalidations in the cluster
*/
long getInvalidations();
/**
* @return the total number of activations in the cluster
*/
long getActivations();
/**
* @return the total number of passivations in the cluster
*/
long getPassivations();
/**
* @return the total number of persistence load operations in the cluster
*/
long getCacheLoaderLoads();
/**
* @return the total number of persistence misses in the cluster
*/
long getCacheLoaderMisses();
/**
* @return the total number of persistence store operations in the cluster
*/
long getStoreWrites();
/**
* @return the approximate number of entries.
*
* Each owner's copy is counted separately, except entries
* in shared stores.
*/
@Override
long getApproximateEntries();
/**
* @return the approximate number of entries in memory.
*
* Each owner's copy is counted separately.
*/
@Override
long getApproximateEntriesInMemory();
/**
* @return the approximate number of unique entries.
*/
@Override
long getApproximateEntriesUnique();
}
| 1,888
| 20.965116
| 79
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/CacheContainerStats.java
|
package org.infinispan.stats;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* Similar to {@link Stats} but in the scope of a single per node CacheContainer.
*
* @author Vladimir Blagojevic
* @since 7.1
* @deprecated Since 10.1.3. This mixes statistics across unrelated caches so the reported numbers don't have too much
* relevance. Please use {@link org.infinispan.stats.Stats} or {@link org.infinispan.stats.ClusterCacheStats} instead.
*/
@Scope(Scopes.GLOBAL)
@Deprecated
public interface CacheContainerStats extends Stats {
String OBJECT_NAME = "CacheContainerStats";
double getHitRatio();
double getReadWriteRatio();
}
| 696
| 28.041667
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/ClusterContainerStats.java
|
package org.infinispan.stats;
/**
* Cluster wide container statistics.
*
* @author Ryan Emerson
* @since 9.0
*/
public interface ClusterContainerStats extends ClusterStats {
String OBJECT_NAME = "ClusterContainerStats";
/**
* @return the maximum amount of free memory in bytes across the cluster JVMs.
*/
long getMemoryAvailable();
/**
* @return the maximum amount of memory that JVMs across the cluster will attempt to utilise in bytes.
*/
long getMemoryMax();
/**
* @return the total amount of memory in the JVMs across the cluster in bytes.
*/
long getMemoryTotal();
/**
* @return the amount of memory used by JVMs across the cluster in bytes.
*/
long getMemoryUsed();
}
| 744
| 21.575758
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/Stats.java
|
package org.infinispan.stats;
import org.infinispan.commons.dataconversion.internal.JsonSerialization;
/**
* Stats.
*
* @author Galder Zamarreño
* @since 4.0
*/
public interface Stats extends JsonSerialization {
/**
* @return Number of seconds since cache started.
*/
long getTimeSinceStart();
/**
* @return Number of seconds since stats where reset
*/
long getTimeSinceReset();
/**
* Returns the approximate number of entries in this cache that exist in memory or persistent storage.
*
* When the cache is configured with distribution, this method only returns the
* number of entries in the local cache instance. In other words, it does
* not communicate with other nodes to find out about data
* stored in the cluster and not available locally.
*
* @return Number of entries currently in the cache, including passivated entries.
*/
long getApproximateEntries();
/**
* The same as {@link #getApproximateEntries()}, however passivated entries are not included.
*/
long getApproximateEntriesInMemory();
/**
* The same as {@link #getApproximateEntries()}, however only entries owned as primary are counted.
*
* This is only different from {@link #getApproximateEntries()} only in distributed and replicated caches.
*/
long getApproximateEntriesUnique();
/**
* Returns the number of entries currently in this cache instance. When
* the cache is configured with distribution, this method only returns the
* number of entries in the local cache instance. In other words, it does
* not attempt to communicate with other nodes to find out about the data
* stored in other nodes in the cluster that is not available locally.
*
* @return Number of entries currently in the cache, including passivated entries.
* @deprecated Since 14.0, please use {@link #getApproximateEntries()} or {@link #getApproximateEntriesUnique()} instead.
*/
@Deprecated
int getCurrentNumberOfEntries();
/**
* The same as {@link #getCurrentNumberOfEntries()}, however passivated entries are not included.
* @deprecated Since 14.0, please use {@link #getApproximateEntriesInMemory()} instead.
*/
@Deprecated
int getCurrentNumberOfEntriesInMemory();
/**
* The amount of off-heap memory used by this cache, or -1 if the cache stores data in the heap.
*/
long getOffHeapMemoryUsed();
/**
* Provides how much memory the current eviction algorithm estimates is in use for data. This method will return a
* number 0 or greater if memory eviction is in use. If memory eviction is not enabled this method will always return -1.
* @return memory in use or -1 if memory eviction is not enabled
*/
long getDataMemoryUsed();
/**
* @return Number of entries stored in cache since start.
*/
long getStores();
/**
* @return Number of entries read from the cache since start.
*/
long getRetrievals();
/**
* @return Number of cache get hits.
*/
long getHits();
/**
* @return Number of cache get misses.
*/
long getMisses();
/**
* @return Number of cache removal hits.
*/
long getRemoveHits();
/**
* @return Number of cache removal misses.
*/
long getRemoveMisses();
/**
* @return Number of cache eviction.
*/
long getEvictions();
/**
* @return Average number of milliseconds for a cache get on the cache
*/
long getAverageReadTime();
/**
* @return Average number of nanoseconds for a cache get on the cache
*/
long getAverageReadTimeNanos();
/**
* @return Average number of milliseconds for a cache put on the cache
* @deprecated Since 14.0, please use {@link #getAverageReadTimeNanos()} instead.
*/
@Deprecated
long getAverageWriteTime();
/**
* @return Average number of milliseconds for a cache put on the cache
*/
long getAverageWriteTimeNanos();
/**
* @return Average number of milliseconds for a cache remove on the cache
* @deprecated Since 14.0, please use {@link #getAverageWriteTimeNanos()} instead.
*/
@Deprecated
long getAverageRemoveTime();
/**
* @return Average number of nanoseconds for a cache remove on the cache
*/
long getAverageRemoveTimeNanos();
/**
* @return Required minimum number of nodes to guarantee data consistency
*/
int getRequiredMinimumNumberOfNodes();
/**
* Reset statistics
*/
void reset();
/**
* Enables or disables statistics at runtime.
*
* @param enabled boolean indicating whether statistics should be enable or not
*/
void setStatisticsEnabled(boolean enabled);
}
| 4,740
| 27.560241
| 124
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/ClusterStats.java
|
package org.infinispan.stats;
/**
* @author Ryan Emerson
* @since 9.0
*/
interface ClusterStats {
/**
* @return The time in milliseconds, to wait between requests before re-retrieving cluster wide stats
*/
long getStaleStatsThreshold();
/**
* @param threshold the time in milliseconds, to wait between requests before re-retrieving cluster wide stats
*/
void setStaleStatsThreshold(long threshold);
/**
* Reset the collected statistics
*/
void reset();
}
| 503
| 20.913043
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/StatsCollector.java
|
package org.infinispan.stats.impl;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.LongAdder;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.offheap.OffHeapMemoryAllocator;
import org.infinispan.context.Flag;
import org.infinispan.eviction.EvictionType;
import org.infinispan.factories.AbstractNamedCacheComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.SurvivesRestarts;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.impl.CacheMgmtInterceptor;
import org.infinispan.jmx.JmxStatisticsExposer;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.jmx.annotations.MeasurementType;
import org.infinispan.jmx.annotations.Units;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.stats.Stats;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
@MBean(objectName = "Statistics", description = "General statistics such as timings, hit/miss ratio, etc.")
@Scope(Scopes.NAMED_CACHE)
public final class StatsCollector implements Stats, JmxStatisticsExposer {
public static final IntSet SEGMENT_0 = IntSets.immutableSet(0);
private final LongAdder hitTimes = new LongAdder();
private final LongAdder missTimes = new LongAdder();
private final LongAdder storeTimes = new LongAdder();
private final LongAdder removeTimes = new LongAdder();
private final LongAdder hits = new LongAdder();
private final LongAdder misses = new LongAdder();
private final LongAdder stores = new LongAdder();
private final LongAdder evictions = new LongAdder();
private final AtomicLong startNanoseconds = new AtomicLong(0);
private final AtomicLong resetNanoseconds = new AtomicLong(0);
private final LongAdder removeHits = new LongAdder();
private final LongAdder removeMisses = new LongAdder();
@Inject ComponentRef<AdvancedCache<?, ?>> cache;
@Inject TimeService timeService;
@Inject ComponentRef<InternalDataContainer<?, ?>> dataContainer;
@Inject OffHeapMemoryAllocator allocator;
@Inject Configuration configuration;
@Inject ComponentRegistry componentRegistry;
@Inject ComponentRef<PersistenceManager> persistenceManager;
@Start
public void start() {
statisticsEnabled = configuration.statistics().enabled();
}
// probably it's not *that* important to have perfect stats to make this variable volatile
private boolean statisticsEnabled = false;
@ManagedAttribute(
description = "Number of cache attribute hits",
displayName = "Number of cache hits",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getHits() {
return hits.sum();
}
@ManagedAttribute(
description = "Number of cache attribute misses",
displayName = "Number of cache misses",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getMisses() {
return misses.sum();
}
@ManagedAttribute(
description = "Number of cache removal hits",
displayName = "Number of cache removal hits",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getRemoveHits() {
return removeHits.sum();
}
@ManagedAttribute(
description = "Number of cache removals where keys were not found",
displayName = "Number of cache removal misses",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getRemoveMisses() {
return removeMisses.sum();
}
@ManagedAttribute(
description = "Number of cache attribute put operations",
displayName = "Number of cache puts" ,
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getStores() {
return stores.sum();
}
@Override
public long getRetrievals() {
return hits.longValue() + misses.longValue();
}
@ManagedAttribute(
description = "Number of cache eviction operations",
displayName = "Number of cache evictions",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getEvictions() {
return evictions.sum();
}
@ManagedAttribute(
description = "Percentage hit/(hit+miss) ratio for the cache",
displayName = "Hit ratio",
units = Units.PERCENTAGE
)
public double getHitRatio() {
long hitsL = hits.sum();
double total = hitsL + misses.sum();
// The reason for <= is that equality checks
// should be avoided for floating point numbers.
if (total <= 0)
return 0;
return hitsL / total;
}
@ManagedAttribute(
description = "Read/writes ratio for the cache",
displayName = "Read/write ratio",
units = Units.PERCENTAGE
)
public double getReadWriteRatio() {
long sum = stores.sum();
if (sum == 0)
return 0;
return (double) (hits.sum() + misses.sum()) / (double) sum;
}
@ManagedAttribute(
description = "Average number of milliseconds for a read operation on the cache",
displayName = "Average read time",
units = Units.MILLISECONDS
)
@Override
public long getAverageReadTime() {
return TimeUnit.NANOSECONDS.toMillis(getAverageReadTimeNanos());
}
@ManagedAttribute(
description = "Average number of nanoseconds for a read operation on the cache",
displayName = "Average read time (ns)",
units = Units.NANOSECONDS
)
@Override
public long getAverageReadTimeNanos() {
long total = hits.sum() + misses.sum();
if (total == 0)
return 0;
return (hitTimes.sum() + missTimes.sum()) / total;
}
@ManagedAttribute(
description = "Average number of milliseconds for a write operation in the cache",
displayName = "Average write time",
units = Units.MILLISECONDS
)
@Override
public long getAverageWriteTime() {
return TimeUnit.NANOSECONDS.toMillis(getAverageWriteTimeNanos());
}
@ManagedAttribute(
description = "Average number of nanoseconds for a write operation in the cache",
displayName = "Average write time (ns)",
units = Units.NANOSECONDS
)
@Override
public long getAverageWriteTimeNanos() {
long sum = stores.sum();
if (sum == 0)
return 0;
return (storeTimes.sum()) / sum;
}
@ManagedAttribute(
description = "Average number of milliseconds for a remove operation in the cache",
displayName = "Average remove time",
units = Units.MILLISECONDS
)
@Override
public long getAverageRemoveTime() {
return TimeUnit.NANOSECONDS.toMillis(getAverageWriteTimeNanos());
}
@ManagedAttribute(
description = "Average number of nanoseconds for a remove operation in the cache",
displayName = "Average remove time (ns)",
units = Units.NANOSECONDS
)
@Override
public long getAverageRemoveTimeNanos() {
long removes = getRemoveHits();
if (removes == 0)
return 0;
return removeTimes.sum() / removes;
}
@ManagedAttribute(description = "Required minimum number of nodes to hold current cache data",
displayName = "Required minimum number of nodes"
)
@Override
public int getRequiredMinimumNumberOfNodes() {
return CacheMgmtInterceptor.calculateRequiredMinimumNumberOfNodes(cache.wired(), componentRegistry);
}
@Override
public void reset() {
resetStatistics();
}
@Override
public boolean getStatisticsEnabled() {
return statisticsEnabled;
}
@ManagedAttribute(description = "Enables or disables the gathering of statistics by this component", writable = true)
@Override
public void setStatisticsEnabled(boolean enabled) {
statisticsEnabled = enabled;
}
@ManagedAttribute(
description = "Number of entries in the cache including passivated entries",
displayName = "Number of current cache entries"
)
@Deprecated
public int getNumberOfEntries() {
return cache.wired().withFlags(Flag.CACHE_MODE_LOCAL).size();
}
@ManagedAttribute(
description = "Number of entries currently in-memory excluding expired entries",
displayName = "Number of in-memory cache entries"
)
@Override
@Deprecated
public int getCurrentNumberOfEntriesInMemory() {
return dataContainer.running().size();
}
@ManagedAttribute(
description = "Number of seconds since cache started",
displayName = "Seconds since cache started",
units = Units.SECONDS,
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getTimeSinceStart() {
return timeService.timeDuration(startNanoseconds.get(), TimeUnit.SECONDS);
}
@ManagedAttribute(
description = "Number of seconds since the cache statistics were last reset",
displayName = "Seconds since cache statistics were reset",
units = Units.SECONDS
)
@Override
public long getTimeSinceReset() {
return timeService.timeDuration(resetNanoseconds.get(), TimeUnit.SECONDS);
}
@ManagedAttribute(
description = "Approximate current number of entries in the cache, including persisted and expired entries.",
displayName = "Approximate number of entries"
)
public long getApproximateEntries() {
return dataContainer.running().sizeIncludingExpired();
}
@ManagedAttribute(
description = "Approximate current number of entries in memory, including expired entries.",
displayName = "Approximate number of cache entries in memory"
)
public long getApproximateEntriesInMemory() {
return dataContainer.running().sizeIncludingExpired();
}
@ManagedAttribute(
description = "Approximate current number of entries in the cache, including persisted and expired entries.",
displayName = "Approximate number of entries"
)
public long getApproximateEntriesUnique() {
return getApproximateEntries();
}
@Override
public int getCurrentNumberOfEntries() {
return getNumberOfEntries();
}
@ManagedAttribute(
description = "Amount of memory in bytes allocated for use in eviction for data in the cache",
displayName = "Memory used by data in the cache"
)
@Override
public long getDataMemoryUsed() {
if (configuration.memory().isEvictionEnabled() && configuration.memory().evictionType() == EvictionType.MEMORY) {
return dataContainer.running().evictionSize();
}
return -1L;
}
@ManagedAttribute(
description = "Amount off-heap memory used by this cache (bytes)",
displayName = "Off-Heap memory used"
)
@Override
public long getOffHeapMemoryUsed() {
return allocator.getAllocatedAmount();
}
@ManagedOperation(
description = "Resets statistics gathered by this component",
displayName = "Reset Statistics (Statistics)"
)
@Override
public void resetStatistics() {
hits.reset();
misses.reset();
stores.reset();
evictions.reset();
hitTimes.reset();
missTimes.reset();
storeTimes.reset();
removeHits.reset();
removeTimes.reset();
removeMisses.reset();
resetNanoseconds.set(timeService.time());
}
public void recordMisses(int misses, long time) {
this.misses.add(misses);
this.missTimes.add(time);
}
public void recordHits(int hits, long time) {
this.hits.add(hits);
this.hitTimes.add(time);
}
public void recordEviction() {
evictions.increment();
}
public void recordEvictions(int evicted) {
evictions.add(evicted);
}
public void recordStores(int stores, long time) {
this.stores.add(stores);
this.storeTimes.add(time);
}
public void recordRemoveHits(int removes, long time) {
this.removeHits.add(removes);
this.removeTimes.add(time);
}
public void recordRemoveMisses(int removes) {
this.removeMisses.add(removes);
}
@Override
public Json toJson() {
throw new UnsupportedOperationException();
}
@DefaultFactoryFor(classes = StatsCollector.class)
@SurvivesRestarts
public static class Factory extends AbstractNamedCacheComponentFactory implements AutoInstantiableFactory {
@Override
public Object construct(String componentName) {
if (componentName.equals(StatsCollector.class.getName())) {
if (configuration.simpleCache()) {
return new StatsCollector();
} else {
return null;
}
} else {
throw CONTAINER.factoryCannotConstructComponent(componentName);
}
}
}
}
| 13,746
| 31.498818
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/StatsImpl.java
|
package org.infinispan.stats.impl;
import static org.infinispan.stats.impl.StatKeys.APPROXIMATE_ENTRIES;
import static org.infinispan.stats.impl.StatKeys.APPROXIMATE_ENTRIES_IN_MEMORY;
import static org.infinispan.stats.impl.StatKeys.APPROXIMATE_ENTRIES_UNIQUE;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_READ_TIME;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_READ_TIME_NANOS;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_REMOVE_TIME;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_REMOVE_TIME_NANOS;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_WRITE_TIME;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_WRITE_TIME_NANOS;
import static org.infinispan.stats.impl.StatKeys.DATA_MEMORY_USED;
import static org.infinispan.stats.impl.StatKeys.EVICTIONS;
import static org.infinispan.stats.impl.StatKeys.HITS;
import static org.infinispan.stats.impl.StatKeys.MISSES;
import static org.infinispan.stats.impl.StatKeys.NUMBER_OF_ENTRIES;
import static org.infinispan.stats.impl.StatKeys.NUMBER_OF_ENTRIES_IN_MEMORY;
import static org.infinispan.stats.impl.StatKeys.OFF_HEAP_MEMORY_USED;
import static org.infinispan.stats.impl.StatKeys.REMOVE_HITS;
import static org.infinispan.stats.impl.StatKeys.REMOVE_MISSES;
import static org.infinispan.stats.impl.StatKeys.REQUIRED_MIN_NODES;
import static org.infinispan.stats.impl.StatKeys.RETRIEVALS;
import static org.infinispan.stats.impl.StatKeys.STORES;
import static org.infinispan.stats.impl.StatKeys.TIME_SINCE_RESET;
import static org.infinispan.stats.impl.StatKeys.TIME_SINCE_START;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.impl.CacheMgmtInterceptor;
import org.infinispan.stats.Stats;
import net.jcip.annotations.Immutable;
/**
* StatsImpl.
*
* @author Galder Zamarreño
* @since 4.0
*/
@Immutable
public class StatsImpl implements Stats {
private static final String[] ATTRIBUTES = {
TIME_SINCE_RESET, TIME_SINCE_START, NUMBER_OF_ENTRIES, NUMBER_OF_ENTRIES_IN_MEMORY, OFF_HEAP_MEMORY_USED,
DATA_MEMORY_USED, RETRIEVALS, STORES, HITS, MISSES, REMOVE_HITS, REMOVE_MISSES, EVICTIONS,
AVERAGE_READ_TIME, AVERAGE_REMOVE_TIME, AVERAGE_WRITE_TIME, AVERAGE_READ_TIME_NANOS,
AVERAGE_REMOVE_TIME_NANOS, AVERAGE_WRITE_TIME_NANOS, REQUIRED_MIN_NODES,
APPROXIMATE_ENTRIES, APPROXIMATE_ENTRIES_IN_MEMORY, APPROXIMATE_ENTRIES_UNIQUE};
private final Map<String, Long> statsMap = new HashMap<>();
// mgmtInterceptor and source cannot be both non-null
private final CacheMgmtInterceptor mgmtInterceptor;
private final Stats source;
/**
* Use this factory to create Stats object from configuration and the interceptor chain.
*
* @param configuration
* @param chain
* @return Stats object
*/
public static Stats create(Configuration configuration, AsyncInterceptorChain chain) {
if (!configuration.statistics().available()) {
return new StatsImpl();
}
return new StatsImpl(chain.findInterceptorExtending(CacheMgmtInterceptor.class));
}
/**
* Use this factory to create Stats object from {@link StatsCollector}.
*
* @param collector
* @return
*/
public static Stats create(StatsCollector collector) {
if (collector == null || !collector.getStatisticsEnabled()) {
return new StatsImpl();
}
return new StatsImpl(collector);
}
/**
* Empty stats.
*/
private StatsImpl() {
this.source = null;
this.mgmtInterceptor = null;
emptyStats();
}
private StatsImpl(CacheMgmtInterceptor mgmtInterceptor) {
this.source = null;
this.mgmtInterceptor = mgmtInterceptor;
if (mgmtInterceptor != null && mgmtInterceptor.getStatisticsEnabled()) {
statsMap.put(TIME_SINCE_RESET, mgmtInterceptor.getTimeSinceReset());
statsMap.put(TIME_SINCE_START, mgmtInterceptor.getTimeSinceStart());
statsMap.put(APPROXIMATE_ENTRIES, mgmtInterceptor.getApproximateEntries());
statsMap.put(APPROXIMATE_ENTRIES_IN_MEMORY, mgmtInterceptor.getApproximateEntriesInMemory());
statsMap.put(APPROXIMATE_ENTRIES_UNIQUE, mgmtInterceptor.getApproximateEntriesUnique());
statsMap.put(NUMBER_OF_ENTRIES, (long) mgmtInterceptor.getNumberOfEntries());
statsMap.put(NUMBER_OF_ENTRIES_IN_MEMORY, (long) mgmtInterceptor.getNumberOfEntriesInMemory());
statsMap.put(DATA_MEMORY_USED, mgmtInterceptor.getDataMemoryUsed());
statsMap.put(OFF_HEAP_MEMORY_USED, mgmtInterceptor.getOffHeapMemoryUsed());
statsMap.put(RETRIEVALS, mgmtInterceptor.getHits() + mgmtInterceptor.getMisses());
statsMap.put(STORES, mgmtInterceptor.getStores());
statsMap.put(HITS, mgmtInterceptor.getHits());
statsMap.put(MISSES, mgmtInterceptor.getMisses());
statsMap.put(REMOVE_HITS, mgmtInterceptor.getRemoveHits());
statsMap.put(REMOVE_MISSES, mgmtInterceptor.getRemoveMisses());
statsMap.put(EVICTIONS, mgmtInterceptor.getEvictions());
statsMap.put(AVERAGE_READ_TIME, mgmtInterceptor.getAverageReadTime());
statsMap.put(AVERAGE_REMOVE_TIME, mgmtInterceptor.getAverageRemoveTime());
statsMap.put(AVERAGE_WRITE_TIME, mgmtInterceptor.getAverageWriteTime());
statsMap.put(AVERAGE_READ_TIME_NANOS, mgmtInterceptor.getAverageReadTimeNanos());
statsMap.put(AVERAGE_REMOVE_TIME_NANOS, mgmtInterceptor.getAverageRemoveTimeNanos());
statsMap.put(AVERAGE_WRITE_TIME_NANOS, mgmtInterceptor.getAverageWriteTimeNanos());
statsMap.put(REQUIRED_MIN_NODES, (long) mgmtInterceptor.getRequiredMinimumNumberOfNodes());
} else {
emptyStats();
}
}
private StatsImpl(Stats other) {
this.source = other;
this.mgmtInterceptor = null;
statsMap.put(TIME_SINCE_RESET, other.getTimeSinceReset());
statsMap.put(TIME_SINCE_START, other.getTimeSinceStart());
statsMap.put(APPROXIMATE_ENTRIES, other.getApproximateEntries());
statsMap.put(APPROXIMATE_ENTRIES_IN_MEMORY, other.getApproximateEntriesInMemory());
statsMap.put(APPROXIMATE_ENTRIES_UNIQUE, other.getApproximateEntriesUnique());
statsMap.put(NUMBER_OF_ENTRIES, (long) other.getCurrentNumberOfEntries());
statsMap.put(NUMBER_OF_ENTRIES_IN_MEMORY, (long) other.getCurrentNumberOfEntriesInMemory());
statsMap.put(DATA_MEMORY_USED, other.getDataMemoryUsed());
statsMap.put(OFF_HEAP_MEMORY_USED, other.getOffHeapMemoryUsed());
statsMap.put(RETRIEVALS, other.getHits() + other.getMisses());
statsMap.put(STORES, other.getStores());
statsMap.put(HITS, other.getHits());
statsMap.put(MISSES, other.getMisses());
statsMap.put(REMOVE_HITS, other.getRemoveHits());
statsMap.put(REMOVE_MISSES, other.getRemoveMisses());
statsMap.put(EVICTIONS, other.getEvictions());
statsMap.put(AVERAGE_READ_TIME, other.getAverageReadTime());
statsMap.put(AVERAGE_REMOVE_TIME, other.getAverageRemoveTime());
statsMap.put(AVERAGE_WRITE_TIME, other.getAverageWriteTime());
statsMap.put(AVERAGE_READ_TIME_NANOS, other.getAverageReadTimeNanos());
statsMap.put(AVERAGE_REMOVE_TIME_NANOS, other.getAverageRemoveTimeNanos());
statsMap.put(AVERAGE_WRITE_TIME_NANOS, other.getAverageWriteTimeNanos());
statsMap.put(REQUIRED_MIN_NODES, (long) other.getRequiredMinimumNumberOfNodes());
}
private void emptyStats() {
for (String key : ATTRIBUTES)
statsMap.put(key, -1L);
}
@Override
public long getTimeSinceStart() {
return statsMap.get(TIME_SINCE_START);
}
@Override
public long getTimeSinceReset() {
return statsMap.get(TIME_SINCE_RESET);
}
@Override
public long getApproximateEntries() {
return statsMap.get(APPROXIMATE_ENTRIES);
}
@Override
public long getApproximateEntriesInMemory() {
return statsMap.get(APPROXIMATE_ENTRIES_IN_MEMORY);
}
@Override
public long getApproximateEntriesUnique() {
return statsMap.get(APPROXIMATE_ENTRIES_UNIQUE);
}
@Override
public int getCurrentNumberOfEntries() {
return Math.toIntExact(statsMap.get(NUMBER_OF_ENTRIES));
}
@Override
public int getCurrentNumberOfEntriesInMemory() {
return Math.toIntExact(statsMap.get(NUMBER_OF_ENTRIES_IN_MEMORY));
}
@Override
public long getDataMemoryUsed() {
return statsMap.get(DATA_MEMORY_USED);
}
@Override
public long getOffHeapMemoryUsed() {
return statsMap.get(OFF_HEAP_MEMORY_USED);
}
@Override
public long getRetrievals() {
return statsMap.get(RETRIEVALS);
}
@Override
public long getStores() {
return statsMap.get(STORES);
}
@Override
public long getHits() {
return statsMap.get(HITS);
}
@Override
public long getMisses() {
return statsMap.get(MISSES);
}
@Override
public long getRemoveHits() {
return statsMap.get(REMOVE_HITS);
}
@Override
public long getRemoveMisses() {
return statsMap.get(REMOVE_MISSES);
}
@Override
public long getEvictions() {
return statsMap.get(EVICTIONS);
}
@Override
public long getAverageReadTime() {
return statsMap.get(AVERAGE_READ_TIME);
}
@Override
public long getAverageWriteTime() {
return statsMap.get(AVERAGE_WRITE_TIME);
}
@Override
public long getAverageRemoveTime() {
return statsMap.get(AVERAGE_REMOVE_TIME);
}
@Override
public long getAverageReadTimeNanos() {
return statsMap.get(AVERAGE_READ_TIME_NANOS);
}
@Override
public long getAverageWriteTimeNanos() {
return statsMap.get(AVERAGE_WRITE_TIME_NANOS);
}
@Override
public long getAverageRemoveTimeNanos() {
return statsMap.get(AVERAGE_REMOVE_TIME_NANOS);
}
@Override
public int getRequiredMinimumNumberOfNodes() {
return Math.toIntExact(statsMap.get(REQUIRED_MIN_NODES));
}
@Override
public void reset() {
if (mgmtInterceptor != null) {
mgmtInterceptor.resetStatistics();
} else if (source != null) {
source.reset();
}
}
@Override
public void setStatisticsEnabled(boolean enabled) {
if (mgmtInterceptor != null) {
mgmtInterceptor.setStatisticsEnabled(enabled);
} else if (source != null) {
source.setStatisticsEnabled(enabled);
}
}
@Override
public Json toJson() {
return Json.object()
.set("time_since_start", getTimeSinceStart())
.set("time_since_reset", getTimeSinceReset())
.set("approximate_entries", getApproximateEntries())
.set("approximate_entries_in_memory", getApproximateEntriesInMemory())
.set("approximate_entries_unique", getApproximateEntriesUnique())
.set("current_number_of_entries", getCurrentNumberOfEntries())
.set("current_number_of_entries_in_memory", getCurrentNumberOfEntriesInMemory())
.set("off_heap_memory_used", getOffHeapMemoryUsed())
.set("data_memory_used", getDataMemoryUsed())
.set("stores", getStores())
.set("retrievals", getRetrievals())
.set("hits", getHits())
.set("misses", getMisses())
.set("remove_hits", getRemoveHits())
.set("remove_misses", getRemoveMisses())
.set("evictions", getEvictions())
.set("average_read_time", getAverageReadTime())
.set("average_read_time_nanos", getAverageReadTimeNanos())
.set("average_write_time", getAverageWriteTime())
.set("average_write_time_nanos", getAverageRemoveTimeNanos())
.set("average_remove_time", getAverageRemoveTime())
.set("average_remove_time_nanos", getAverageRemoveTimeNanos())
.set("required_minimum_number_of_nodes", getRequiredMinimumNumberOfNodes());
}
}
| 12,160
| 36.418462
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/ClusterCacheStatsImpl.java
|
package org.infinispan.stats.impl;
import static org.infinispan.stats.impl.StatKeys.ACTIVATIONS;
import static org.infinispan.stats.impl.StatKeys.APPROXIMATE_ENTRIES;
import static org.infinispan.stats.impl.StatKeys.APPROXIMATE_ENTRIES_IN_MEMORY;
import static org.infinispan.stats.impl.StatKeys.APPROXIMATE_ENTRIES_UNIQUE;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_READ_TIME;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_READ_TIME_NANOS;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_REMOVE_TIME;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_REMOVE_TIME_NANOS;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_WRITE_TIME;
import static org.infinispan.stats.impl.StatKeys.AVERAGE_WRITE_TIME_NANOS;
import static org.infinispan.stats.impl.StatKeys.CACHE_LOADER_LOADS;
import static org.infinispan.stats.impl.StatKeys.CACHE_LOADER_MISSES;
import static org.infinispan.stats.impl.StatKeys.CACHE_WRITER_STORES;
import static org.infinispan.stats.impl.StatKeys.DATA_MEMORY_USED;
import static org.infinispan.stats.impl.StatKeys.EVICTIONS;
import static org.infinispan.stats.impl.StatKeys.HITS;
import static org.infinispan.stats.impl.StatKeys.INVALIDATIONS;
import static org.infinispan.stats.impl.StatKeys.MISSES;
import static org.infinispan.stats.impl.StatKeys.NUMBER_OF_ENTRIES;
import static org.infinispan.stats.impl.StatKeys.NUMBER_OF_ENTRIES_IN_MEMORY;
import static org.infinispan.stats.impl.StatKeys.NUMBER_OF_LOCKS_AVAILABLE;
import static org.infinispan.stats.impl.StatKeys.NUMBER_OF_LOCKS_HELD;
import static org.infinispan.stats.impl.StatKeys.OFF_HEAP_MEMORY_USED;
import static org.infinispan.stats.impl.StatKeys.PASSIVATIONS;
import static org.infinispan.stats.impl.StatKeys.REMOVE_HITS;
import static org.infinispan.stats.impl.StatKeys.REMOVE_MISSES;
import static org.infinispan.stats.impl.StatKeys.REQUIRED_MIN_NODES;
import static org.infinispan.stats.impl.StatKeys.STORES;
import static org.infinispan.stats.impl.StatKeys.TIME_SINCE_START;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.context.Flag;
import org.infinispan.eviction.impl.ActivationManager;
import org.infinispan.eviction.impl.PassivationManager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.impl.CacheLoaderInterceptor;
import org.infinispan.interceptors.impl.CacheMgmtInterceptor;
import org.infinispan.interceptors.impl.CacheWriterInterceptor;
import org.infinispan.interceptors.impl.InvalidationInterceptor;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.MeasurementType;
import org.infinispan.jmx.annotations.Units;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.LocalModeAddress;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.stats.ClusterCacheStats;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.function.TriConsumer;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
@MBean(objectName = ClusterCacheStats.OBJECT_NAME, description = "General cluster statistics such as timings, hit/miss ratio, etc. for a cache.")
@Scope(Scopes.NAMED_CACHE)
public class ClusterCacheStatsImpl extends AbstractClusterStats implements ClusterCacheStats {
private static final String[] LONG_ATTRIBUTES = {EVICTIONS, HITS, MISSES, OFF_HEAP_MEMORY_USED, REMOVE_HITS,
REMOVE_MISSES, INVALIDATIONS, PASSIVATIONS, ACTIVATIONS, CACHE_LOADER_LOADS, CACHE_LOADER_MISSES, CACHE_WRITER_STORES,
STORES, DATA_MEMORY_USED};
private static final Log log = LogFactory.getLog(ClusterCacheStatsImpl.class);
@Inject AdvancedCache<?, ?> cache;
@Inject Configuration cacheConfiguration;
@Inject GlobalConfiguration globalConfiguration;
ClusterExecutor clusterExecutor;
private double readWriteRatio;
private double hitRatio;
ClusterCacheStatsImpl() {
super(log);
}
public void start() {
this.statisticsEnabled = cacheConfiguration.statistics().enabled();
this.clusterExecutor = SecurityActions.getClusterExecutor(cache);
}
@Override
void updateStats() {
if (clusterExecutor == null) {
// Attempt to retrieve cluster stats before component has been initialized
return;
}
ConcurrentMap<Address, Map<String, Number>> resultMap = new ConcurrentHashMap<>();
TriConsumer<Address, Map<String, Number>, Throwable> triConsumer = (a, v, t) -> {
if (t != null) {
if (t instanceof CacheConfigurationException || t instanceof IllegalLifecycleStateException) {
log.tracef(t,"Exception encountered on %s whilst trying to calculate stats for cache %s", a, cache.getName());
return;
}
throw new CacheException(t);
}
if (a == null) {
// Local cache manager reports null for address
a = LocalModeAddress.INSTANCE;
}
if (!v.isEmpty())
resultMap.put(a, v);
};
boolean accurateSize = globalConfiguration.metrics().accurateSize();
DistributedCacheStatsCallable task = new DistributedCacheStatsCallable(cache.getName(), accurateSize);
try {
CompletableFuture<Void> future = clusterExecutor.submitConsumer(task, triConsumer);
future.join();
Collection<Map<String, Number>> responseList = resultMap.values();
for (String att : LONG_ATTRIBUTES)
putLongAttributes(responseList, att);
putLongAttributesAverage(responseList, AVERAGE_WRITE_TIME);
putLongAttributesAverage(responseList, AVERAGE_WRITE_TIME_NANOS);
putLongAttributesAverage(responseList, AVERAGE_READ_TIME);
putLongAttributesAverage(responseList, AVERAGE_READ_TIME_NANOS);
putLongAttributesAverage(responseList, AVERAGE_REMOVE_TIME);
putLongAttributesAverage(responseList, AVERAGE_REMOVE_TIME_NANOS);
putLongAttributesAverage(responseList, OFF_HEAP_MEMORY_USED);
putIntAttributes(responseList, NUMBER_OF_LOCKS_HELD);
putIntAttributes(responseList, NUMBER_OF_LOCKS_AVAILABLE);
putIntAttributesMax(responseList, REQUIRED_MIN_NODES);
putLongAttributes(responseList, APPROXIMATE_ENTRIES);
putLongAttributes(responseList, APPROXIMATE_ENTRIES_IN_MEMORY);
putLongAttributes(responseList, APPROXIMATE_ENTRIES_UNIQUE);
if (accurateSize) {
// Count each entry only once
long numberOfEntriesInMemory = cache.withFlags(Flag.SKIP_CACHE_LOAD).size();
statsMap.put(NUMBER_OF_ENTRIES_IN_MEMORY, numberOfEntriesInMemory);
int numberOfEntries = cache.size();
statsMap.put(NUMBER_OF_ENTRIES, (long) numberOfEntries);
} else {
statsMap.put(NUMBER_OF_ENTRIES_IN_MEMORY, -1L);
statsMap.put(NUMBER_OF_ENTRIES, -1L);
}
updateTimeSinceStart(responseList);
updateRatios(responseList);
} catch (CompletionException e) {
log.debug("Error while collecting cluster-wide cache stats", e.getCause());
}
}
// -------------------------------------------- JMX information -----------------------------------------------
@Override
@ManagedAttribute(description = "Average number of milliseconds for a read operation on the cache across the cluster",
displayName = "Cluster-wide total average read time (ms)",
units = Units.MILLISECONDS
)
public long getAverageReadTime() {
return getStatAsLong(AVERAGE_READ_TIME);
}
@Override
@ManagedAttribute(description = "Average number of nanoseconds for a read operation on the cache across the cluster",
displayName = "Cluster-wide average read time (ns)",
units = Units.NANOSECONDS
)
public long getAverageReadTimeNanos() {
return getStatAsLong(AVERAGE_READ_TIME_NANOS);
}
@Override
@ManagedAttribute(description = "Average number of milliseconds for a remove operation in the cache across the cluster",
displayName = "Cluster-wide average remove time (ms)",
units = Units.MILLISECONDS
)
public long getAverageRemoveTime() {
return getStatAsLong(AVERAGE_REMOVE_TIME);
}
@Override
@ManagedAttribute(description = "Average number of nanoseconds for a remove operation in the cache across the cluster",
displayName = "Cluster-wide average remove time (ns)",
units = Units.NANOSECONDS
)
public long getAverageRemoveTimeNanos() {
return getStatAsLong(AVERAGE_REMOVE_TIME_NANOS);
}
@Override
@ManagedAttribute(description = "Average number of milliseconds for a write operation in the cache across the cluster",
displayName = "Cluster-wide average write time (ms)",
units = Units.MILLISECONDS
)
public long getAverageWriteTime() {
return getStatAsLong(AVERAGE_WRITE_TIME);
}
@Override
@ManagedAttribute(description = "Average number of nanoseconds for a write operation in the cache across the cluster",
displayName = "Cluster-wide average write time (ns)",
units = Units.NANOSECONDS
)
public long getAverageWriteTimeNanos() {
return getStatAsLong(AVERAGE_WRITE_TIME_NANOS);
}
@ManagedAttribute(description = "Minimum number of nodes to avoid losing data",
displayName = "Required minimum number of nodes"
)
@Override
public int getRequiredMinimumNumberOfNodes() {
return getStatAsInt(REQUIRED_MIN_NODES);
}
@ManagedAttribute(description = "Total number of cache eviction operations across the cluster",
displayName = "Cluster-wide total number of cache evictions",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getEvictions() {
return getStatAsLong(EVICTIONS);
}
@ManagedAttribute(description = "Total number of cache read hits across the cluster",
displayName = "Cluster-wide total number of cache read hits",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getHits() {
return getStatAsLong(HITS);
}
@ManagedAttribute(description = "Percentage hit/(hit+miss) ratio for this cache",
displayName = "Cluster-wide hit ratio",
units = Units.PERCENTAGE
)
@Override
public double getHitRatio() {
if (isStatisticsEnabled()) {
fetchClusterWideStatsIfNeeded();
return hitRatio;
} else {
return -1;
}
}
@Override
@ManagedAttribute(description = "Total number of cache read misses",
displayName = "Cluster-wide number of cache read misses",
measurementType = MeasurementType.TRENDSUP
)
public long getMisses() {
return getStatAsLong(MISSES);
}
@ManagedAttribute(description = "Approximate number of entry replicas in the cache across the cluster, including passivated entries",
displayName = "Cluster-wide approximate number of entry replicas"
)
public long getApproximateEntries() {
return getStatAsLong(APPROXIMATE_ENTRIES);
}
@Override
@ManagedAttribute(description = "Approximate number of entry replicas in memory across the cluster",
displayName = "Cluster-wide approximate number of entry replicas in memory"
)
public long getApproximateEntriesInMemory() {
return getStatAsLong(APPROXIMATE_ENTRIES_IN_MEMORY);
}
@Override
@ManagedAttribute(description = "Approximate number of unique entries in the cache across the cluster, ignoring duplicate replicas",
displayName = "Cluster-wide approximate number of unique entries"
)
public long getApproximateEntriesUnique() {
return getStatAsLong(APPROXIMATE_ENTRIES_UNIQUE);
}
@ManagedAttribute(description = "Current number of entries in the cache across the cluster, including passivated entries",
displayName = "Cluster-wide number of current cache entries"
)
public int getNumberOfEntries() {
return getStatAsInt(NUMBER_OF_ENTRIES);
}
@Override
@ManagedAttribute(description = "Current number of entries in memory across the cluster",
displayName = "Cluster-wide number of entries in memory"
)
public int getCurrentNumberOfEntriesInMemory() {
return getStatAsInt(NUMBER_OF_ENTRIES_IN_MEMORY);
}
@ManagedAttribute(description = "Cluster-wide read/writes ratio for the cache",
displayName = "Cluster-wide read/write ratio",
units = Units.PERCENTAGE
)
@Override
public double getReadWriteRatio() {
if (isStatisticsEnabled()) {
fetchClusterWideStatsIfNeeded();
return readWriteRatio;
} else {
return -1;
}
}
@ManagedAttribute(description = "Cluster-wide total number of cache removal hits",
displayName = "Cluster-wide total number of cache removal hits",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getRemoveHits() {
return getStatAsLong(REMOVE_HITS);
}
@ManagedAttribute(description = "Cluster-wide total number of cache removals where keys were not found",
displayName = "Cluster-wide total number of cache removal misses",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getRemoveMisses() {
return getStatAsLong(REMOVE_MISSES);
}
@ManagedAttribute(description = "Cluster-wide total number of cache put operations",
displayName = "Cluster-wide total number of cache puts",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getStores() {
return getStatAsLong(STORES);
}
@ManagedAttribute(description = "Number of seconds since the first cache node started",
displayName = "Number of seconds since the first cache node started",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getTimeSinceStart() {
return getStatAsLong(TIME_SINCE_START);
}
@Override
public int getCurrentNumberOfEntries() {
return getNumberOfEntries();
}
@ManagedAttribute(
description = "Amount in bytes of memory used across the cluster for entries in this cache with eviction",
displayName = "Cluster-wide memory used by eviction"
)
@Override
public long getDataMemoryUsed() {
return getStatAsLong(DATA_MEMORY_USED);
}
@ManagedAttribute(
description = "Amount in bytes of off-heap memory used across the cluster for this cache",
displayName = "Cluster-wide off-heap memory used"
)
@Override
public long getOffHeapMemoryUsed() {
return getStatAsLong(OFF_HEAP_MEMORY_USED);
}
@Override
public long getRetrievals() {
return getHits() + getMisses();
}
@Override
public void reset() {
super.reset();
readWriteRatio = 0;
hitRatio = 0;
}
@ManagedAttribute(description = "Current number of exclusive locks available across the cluster",
displayName = "Cluster-wide number of locks available"
)
@Override
public int getNumberOfLocksAvailable() {
return getStatAsInt(NUMBER_OF_LOCKS_AVAILABLE);
}
@ManagedAttribute(description = "Current number of locks held across the cluster",
displayName = "Cluster-wide number of locks held"
)
@Override
public int getNumberOfLocksHeld() {
return getStatAsInt(NUMBER_OF_LOCKS_HELD);
}
@Override
@ManagedAttribute(description = "The total number of invalidations in the cluster",
displayName = "Cluster-wide total number of invalidations",
measurementType = MeasurementType.TRENDSUP
)
public long getInvalidations() {
return getStatAsLong(INVALIDATIONS);
}
@ManagedAttribute(description = "The total number of activations across the cluster",
displayName = "Cluster-wide total number of activations",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getActivations() {
return getStatAsLong(ACTIVATIONS);
}
@ManagedAttribute(description = "The total number of passivations across the cluster",
displayName = "Cluster-wide total number of passivations",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getPassivations() {
return getStatAsLong(PASSIVATIONS);
}
@ManagedAttribute(description = "The total number of persistence load operations in the cluster",
displayName = "Cluster-wide total number of persistence loads",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getCacheLoaderLoads() {
return getStatAsLong(CACHE_LOADER_LOADS);
}
@ManagedAttribute(description = "The total number of cacheloader load misses in the cluster",
displayName = "Cluster-wide total number of cacheloader misses",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getCacheLoaderMisses() {
return getStatAsLong(CACHE_LOADER_MISSES);
}
@ManagedAttribute(description = "The total number of cachestore store operations in the cluster",
displayName = "Cluster-wide total number of cachestore stores",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getStoreWrites() {
return getStatAsLong(CACHE_WRITER_STORES);
}
private void updateTimeSinceStart(Collection<Map<String, Number>> responseList) {
long timeSinceStartMax = 0;
for (Map<String, Number> m : responseList) {
Number timeSinceStart = m.get(TIME_SINCE_START);
if (timeSinceStart.longValue() > timeSinceStartMax) {
timeSinceStartMax = timeSinceStart.longValue();
}
}
statsMap.put(TIME_SINCE_START, timeSinceStartMax);
}
private void updateRatios(Collection<Map<String, Number>> responseList) {
long totalHits = 0;
long totalRetrievals = 0;
long sumOfAllReads = 0;
long sumOfAllWrites = 0;
for (Map<String, Number> m : responseList) {
long hits = m.get(HITS).longValue();
long misses = m.get(MISSES).longValue();
totalHits += hits;
sumOfAllReads += (totalHits + misses);
sumOfAllWrites += m.get(STORES).longValue();
totalRetrievals += (hits + misses);
}
this.hitRatio = totalRetrievals > 0 ? (double) totalHits / totalRetrievals : 0;
this.readWriteRatio = sumOfAllWrites > 0 ? (double) sumOfAllReads / sumOfAllWrites : 0;
}
private static <T extends AsyncInterceptor> T getFirstInterceptorWhichExtends(AdvancedCache<?, ?> cache,
Class<T> interceptorClass) {
return cache.getAsyncInterceptorChain().findInterceptorExtending(interceptorClass);
}
@Override
public Json toJson() {
//TODO
throw new UnsupportedOperationException();
}
private static class DistributedCacheStatsCallable implements Function<EmbeddedCacheManager, Map<String, Number>> {
private final String cacheName;
private final boolean accurateSize;
private DistributedCacheStatsCallable(String cacheName, boolean accurateSize) {
this.cacheName = cacheName;
this.accurateSize = accurateSize;
}
@Override
public Map<String, Number> apply(EmbeddedCacheManager embeddedCacheManager) {
if (!embeddedCacheManager.cacheExists(cacheName))
return Collections.emptyMap();
AdvancedCache<Object, Object> remoteCache =
SecurityActions.getUnwrappedCache(embeddedCacheManager.getCache(cacheName)).getAdvancedCache();
CacheMgmtInterceptor stats = getFirstInterceptorWhichExtends(remoteCache, CacheMgmtInterceptor.class);
Map<String, Number> map = new HashMap<>();
map.put(AVERAGE_READ_TIME, stats.getAverageReadTime());
map.put(AVERAGE_READ_TIME_NANOS, stats.getAverageReadTimeNanos());
map.put(AVERAGE_WRITE_TIME, stats.getAverageWriteTime());
map.put(AVERAGE_WRITE_TIME_NANOS, stats.getAverageWriteTimeNanos());
map.put(AVERAGE_REMOVE_TIME, stats.getAverageRemoveTime());
map.put(AVERAGE_REMOVE_TIME_NANOS, stats.getAverageRemoveTimeNanos());
map.put(EVICTIONS, stats.getEvictions());
map.put(HITS, stats.getHits());
map.put(MISSES, stats.getMisses());
map.put(APPROXIMATE_ENTRIES, stats.getApproximateEntries());
map.put(APPROXIMATE_ENTRIES_IN_MEMORY, stats.getApproximateEntriesInMemory());
map.put(APPROXIMATE_ENTRIES_UNIQUE, stats.getApproximateEntriesUnique());
map.put(DATA_MEMORY_USED, stats.getDataMemoryUsed());
map.put(OFF_HEAP_MEMORY_USED, stats.getOffHeapMemoryUsed());
map.put(REQUIRED_MIN_NODES, stats.getRequiredMinimumNumberOfNodes());
map.put(STORES, stats.getStores());
map.put(REMOVE_HITS, stats.getRemoveHits());
map.put(REMOVE_MISSES, stats.getRemoveMisses());
map.put(TIME_SINCE_START, stats.getTimeSinceStart());
LockManager lockManager = remoteCache.getLockManager();
map.put(NUMBER_OF_LOCKS_HELD, lockManager.getNumberOfLocksHeld());
//number of locks available is not exposed through the LockManager interface
map.put(NUMBER_OF_LOCKS_AVAILABLE, 0);
//invalidations
InvalidationInterceptor invalidationInterceptor = getFirstInterceptorWhichExtends(remoteCache,
InvalidationInterceptor.class);
if (invalidationInterceptor != null) {
map.put(INVALIDATIONS, invalidationInterceptor.getInvalidations());
} else {
map.put(INVALIDATIONS, 0);
}
//passivations
PassivationManager pManager = remoteCache.getComponentRegistry().getComponent(PassivationManager.class);
if (pManager != null) {
map.put(PASSIVATIONS, pManager.getPassivations());
} else {
map.put(PASSIVATIONS, 0);
}
//activations
ActivationManager aManager = remoteCache.getComponentRegistry().getComponent(ActivationManager.class);
if (aManager != null) {
map.put(ACTIVATIONS, aManager.getActivationCount());
} else {
map.put(ACTIVATIONS, 0);
}
//cache loaders
CacheLoaderInterceptor
aInterceptor = getFirstInterceptorWhichExtends(remoteCache, CacheLoaderInterceptor.class);
if (aInterceptor != null) {
map.put(CACHE_LOADER_LOADS, aInterceptor.getCacheLoaderLoads());
map.put(CACHE_LOADER_MISSES, aInterceptor.getCacheLoaderMisses());
} else {
map.put(CACHE_LOADER_LOADS, 0);
map.put(CACHE_LOADER_MISSES, 0);
}
//cache store
CacheWriterInterceptor
interceptor = getFirstInterceptorWhichExtends(remoteCache, CacheWriterInterceptor.class);
if (interceptor != null) {
map.put(CACHE_WRITER_STORES, interceptor.getWritesToTheStores());
} else {
map.put(CACHE_WRITER_STORES, 0);
}
return map;
}
}
public static class DistributedCacheStatsCallableExternalizer implements AdvancedExternalizer<DistributedCacheStatsCallable> {
@Override
public Set<Class<? extends DistributedCacheStatsCallable>> getTypeClasses() {
return Util.asSet(DistributedCacheStatsCallable.class);
}
@Override
public Integer getId() {
return Ids.DISTRIBUTED_CACHE_STATS_CALLABLE;
}
@Override
public void writeObject(ObjectOutput output, DistributedCacheStatsCallable object) throws IOException {
output.writeUTF(object.cacheName);
output.writeBoolean(object.accurateSize);
}
@Override
public DistributedCacheStatsCallable readObject(ObjectInput input) throws IOException, ClassNotFoundException {
String cacheName = input.readUTF();
boolean accurateSize = input.readBoolean();
return new DistributedCacheStatsCallable(cacheName, accurateSize);
}
}
}
| 25,476
| 39.058176
| 145
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/CacheContainerStatsImpl.java
|
package org.infinispan.stats.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.JmxStatisticsExposer;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.MeasurementType;
import org.infinispan.jmx.annotations.Units;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.stats.CacheContainerStats;
import org.infinispan.stats.Stats;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Cache container statistics needed for admin console
*
* @author Vladimir Blagojevic
* @since 7.1
* @deprecated Since 10.1.3. This mixes statistics across unrelated caches so the reported numbers don't have too much
* relevance. Please use {@link org.infinispan.stats.Stats} or {@link org.infinispan.stats.ClusterCacheStats} instead.
*/
@MBean(objectName = CacheContainerStats.OBJECT_NAME, description = "General cache container statistics such as timings, hit/miss ratio, etc. for a single node.")
@Scope(Scopes.GLOBAL)
@Deprecated
public class CacheContainerStatsImpl implements CacheContainerStats, JmxStatisticsExposer {
private static final Log log = LogFactory.getLog(CacheContainerStatsImpl.class);
private final EmbeddedCacheManager cm;
private final AtomicLong resetNanoseconds = new AtomicLong(0);
private boolean statisticsEnabled = false;
@Inject TimeService timeService;
private volatile StatsHolder enabledStats;
public CacheContainerStatsImpl(EmbeddedCacheManager cm) {
this.cm = cm;
}
@Start
void start() {
setStatisticsEnabled(SecurityActions.getCacheManagerConfiguration(cm).statistics());
}
@Override
public void setStatisticsEnabled(boolean enabled) {
this.statisticsEnabled = enabled;
if (enabled) {
//yes technically we do not reset stats but we initialize them
resetNanoseconds.set(timeService.time());
}
}
@Override
public boolean getStatisticsEnabled() {
return statisticsEnabled;
}
@Override
public void resetStatistics() {
if (getStatisticsEnabled()) {
getEnabledStats().forEach(Stats::reset);
resetNanoseconds.set(timeService.time());
}
}
@ManagedAttribute(description = "Enables or disables the gathering of statistics by this component",
displayName = "Statistics enabled",
dataType = DataType.TRAIT,
writable = true)
public boolean isStatisticsEnabled() {
return getStatisticsEnabled();
}
@ManagedAttribute(description = "Cache container total average number of milliseconds for all read operation in this cache container",
displayName = "Cache container total average read time",
units = Units.MILLISECONDS)
@Override
public long getAverageReadTime() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateAverageReadTime();
}
return result;
}
private long calculateAverageReadTime() {
long totalAverageReadTime = 0;
int includedCacheCounter = 0;
for (Stats stats : getEnabledStats()) {
long averageReadTime = stats.getAverageReadTime();
if (averageReadTime > 0) {
includedCacheCounter++;
totalAverageReadTime += averageReadTime;
}
}
if (includedCacheCounter > 0) {
totalAverageReadTime = totalAverageReadTime / includedCacheCounter;
}
return totalAverageReadTime;
}
@ManagedAttribute(description = "Cache container total average number of nanoseconds for all read operation in this cache container",
displayName = "Cache container total average read time (ns)",
units = Units.NANOSECONDS
)
@Override
public long getAverageReadTimeNanos() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateAverageReadTimeNanos();
}
return result;
}
private long calculateAverageReadTimeNanos() {
long totalAverageReadTime = 0;
int includedCacheCounter = 0;
for (Stats stats : getEnabledStats()) {
long averageReadTime = stats.getAverageReadTimeNanos();
if (averageReadTime > 0) {
includedCacheCounter++;
totalAverageReadTime += averageReadTime;
}
}
if (includedCacheCounter > 0) {
totalAverageReadTime = totalAverageReadTime / includedCacheCounter;
}
return totalAverageReadTime;
}
@ManagedAttribute(description = "Required minimum number of nodes to hold current cache data",
displayName = "Required minimum number of nodes"
)
@Override
public int getRequiredMinimumNumberOfNodes() {
int result = -1;
for (Stats stats : getEnabledStats()) {
result = Math.max(result, stats.getRequiredMinimumNumberOfNodes());
}
return result;
}
@ManagedAttribute(description = "Cache container total average number of milliseconds for all remove operation in this cache container",
displayName = "Cache container total average remove time",
units = Units.MILLISECONDS
)
@Override
public long getAverageRemoveTime() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateAverageRemoveTime();
}
return result;
}
private long calculateAverageRemoveTime() {
long totalAverageRemoveTime = 0;
int includedCacheCounter = 0;
for (Stats stats : getEnabledStats()) {
long averageRemoveTime = stats.getAverageRemoveTime();
if (averageRemoveTime > 0) {
includedCacheCounter++;
totalAverageRemoveTime += averageRemoveTime;
}
}
if (includedCacheCounter > 0) {
totalAverageRemoveTime = totalAverageRemoveTime / includedCacheCounter;
}
return totalAverageRemoveTime;
}
@ManagedAttribute(description = "Cache container total average number of nanoseconds for all remove operation in this cache container",
displayName = "Cache container total average remove time (ns)",
units = Units.NANOSECONDS
)
@Override
public long getAverageRemoveTimeNanos() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateAverageRemoveTimeNanos();
}
return result;
}
private long calculateAverageRemoveTimeNanos() {
long totalAverageRemoveTime = 0;
int includedCacheCounter = 0;
for (Stats stats : getEnabledStats()) {
long averageRemoveTime = stats.getAverageRemoveTimeNanos();
if (averageRemoveTime > 0) {
includedCacheCounter++;
totalAverageRemoveTime += averageRemoveTime;
}
}
if (includedCacheCounter > 0) {
totalAverageRemoveTime = totalAverageRemoveTime / includedCacheCounter;
}
return totalAverageRemoveTime;
}
@ManagedAttribute(description = "Cache container average number of milliseconds for all write operation in this cache container",
displayName = "Cache container average write time",
units = Units.MILLISECONDS
)
@Override
public long getAverageWriteTime() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateAverageWriteTime();
}
return result;
}
private long calculateAverageWriteTime() {
long totalAverageWriteTime = 0;
int includedCacheCounter = 0;
for (Stats stats : getEnabledStats()) {
long averageWriteTime = stats.getAverageWriteTime();
if (averageWriteTime > 0) {
includedCacheCounter++;
totalAverageWriteTime += averageWriteTime;
}
}
if (includedCacheCounter > 0) {
totalAverageWriteTime = totalAverageWriteTime / includedCacheCounter;
}
return totalAverageWriteTime;
}
@ManagedAttribute(description = "Cache container average number of nanoseconds for all write operation in this cache container",
displayName = "Cache container average write time (ns)",
units = Units.MILLISECONDS
)
@Override
public long getAverageWriteTimeNanos() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateAverageWriteTimeNanos();
}
return result;
}
private long calculateAverageWriteTimeNanos() {
long totalAverageWriteTime = 0;
int includedCacheCounter = 0;
for (Stats stats : getEnabledStats()) {
long averageWriteTime = stats.getAverageWriteTimeNanos();
if (averageWriteTime > 0) {
includedCacheCounter++;
totalAverageWriteTime += averageWriteTime;
}
}
if (includedCacheCounter > 0) {
totalAverageWriteTime = totalAverageWriteTime / includedCacheCounter;
}
return totalAverageWriteTime;
}
@ManagedAttribute(
description = "Cache container total number of cache eviction operations",
displayName = "Cache container total number of cache evictions",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getEvictions() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateEvictions();
}
return result;
}
private long calculateEvictions() {
long totalEvictions = 0;
for (Stats stats : getEnabledStats()) {
long evictions = stats.getEvictions();
if (evictions > 0) {
totalEvictions += evictions;
}
}
return totalEvictions;
}
@ManagedAttribute(
description = "Cache container total number of cache attribute hits",
displayName = "Cache container total number of cache hits",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getHits() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateHits();
}
return result;
}
private long calculateHits() {
long totalHits = 0;
for (Stats stats : getEnabledStats()) {
long hits = stats.getHits();
if (hits > 0) {
totalHits += hits;
}
}
return totalHits;
}
@ManagedAttribute(
description = "Cache container total percentage hit/(hit+miss) ratio for this cache",
displayName = "Cache container total hit ratio",
units = Units.PERCENTAGE
)
@Override
public double getHitRatio() {
double result = -1d;
if (getStatisticsEnabled()) {
result = calculateHitRatio();
}
return result;
}
private double calculateHitRatio() {
long totalHits = 0;
double totalRequests = 0;
double rwRatio = 0;
for (Stats stats : getEnabledStats()) {
long requests = stats.getRetrievals();
if (requests > 0) {
totalHits += stats.getHits();
totalRequests += requests;
}
}
if (totalRequests > 0) {
rwRatio = totalHits / totalRequests;
}
return rwRatio;
}
@ManagedAttribute(
description = "Cache container total number of cache attribute misses",
displayName = "Cache container total number of cache misses",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getMisses() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateMisses();
}
return result;
}
private long calculateMisses() {
long totalMisess = 0;
for (Stats stats : getEnabledStats()) {
long misses = stats.getMisses();
if (misses > 0) {
totalMisess += misses;
}
}
return totalMisess;
}
@ManagedAttribute(
description = "Cache container total number of entries currently in all caches from this cache container",
displayName = "Cache container total number of all cache entries"
)
public int getNumberOfEntries() {
int result = statisticsEnabled ? 0 : -1;
if (statisticsEnabled) {
for (Stats stats : getEnabledStats()) {
int numOfEntries = stats.getCurrentNumberOfEntries();
if (numOfEntries > 0) {
result += numOfEntries;
}
}
}
return result;
}
@ManagedAttribute(
description = "Cache container total number of entries currently in-memory for all caches in this cache container",
displayName = "Cache container total number of in-memory cache entries"
)
public int getCurrentNumberOfEntriesInMemory() {
int result = statisticsEnabled ? 0 : -1;
if (statisticsEnabled) {
for (Stats stats : getEnabledStats()) {
int numOfEntries = stats.getCurrentNumberOfEntriesInMemory();
if (numOfEntries > 0) {
result += numOfEntries;
}
}
}
return result;
}
@ManagedAttribute(
description = "Cache container read/writes ratio in all caches from this cache container",
displayName = "Cache container read/write ratio",
units = Units.PERCENTAGE
)
@Override
public double getReadWriteRatio() {
double result = -1d;
if (getStatisticsEnabled()) {
result = calculateReadWriteRatio();
}
return result;
}
private double calculateReadWriteRatio() {
long sumOfAllReads = 0;
long sumOfAllWrites = 0;
double rwRatio = 0;
for (Stats stats : getEnabledStats()) {
long stores = stats.getStores();
if (stores > 0) {
sumOfAllReads += stats.getRetrievals();
sumOfAllWrites += stores;
}
}
if (sumOfAllWrites > 0) {
rwRatio = (double) sumOfAllReads / sumOfAllWrites;
}
return rwRatio;
}
@ManagedAttribute(
description = "Cache container total number of cache removal hits",
displayName = "Cache container total number of cache removal hits",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getRemoveHits() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateRemoveHits();
}
return result;
}
private long calculateRemoveHits() {
long totalRemoveHits = 0;
for (Stats stats : getEnabledStats()) {
long removeHits = stats.getRemoveHits();
if (removeHits > 0) {
totalRemoveHits += removeHits;
}
}
return totalRemoveHits;
}
@ManagedAttribute(
description = "Cache container total number of cache removals where keys were not found",
displayName = "Cache container total number of cache removal misses",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getRemoveMisses() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateRemoveMisses();
}
return result;
}
private long calculateRemoveMisses() {
long totalRemoveMisses = 0;
for (Stats stats : getEnabledStats()) {
long removeMisses = stats.getRemoveMisses();
if (removeMisses > 0) {
totalRemoveMisses += removeMisses;
}
}
return totalRemoveMisses;
}
@ManagedAttribute(
description = "Cache container total number of cache put operations",
displayName = "Cache container total number of cache puts",
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getStores() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateStores();
}
return result;
}
@ManagedAttribute(
description = "Number of seconds since the cache container statistics were last reset",
displayName = "Seconds since cache container statistics were reset",
units = Units.SECONDS
)
@Override
public long getTimeSinceReset() {
long result = -1;
if (getStatisticsEnabled()) {
result = timeService.timeDuration(resetNanoseconds.get(), TimeUnit.SECONDS);
}
return result;
}
@Override
public long getApproximateEntries() {
long result = statisticsEnabled ? 0 : -1;
if (statisticsEnabled) {
for (Stats stats : getEnabledStats()) {
long numOfEntries = stats.getApproximateEntries();
if (numOfEntries > 0) {
result += numOfEntries;
}
}
}
return result;
}
@Override
public long getApproximateEntriesInMemory() {
long result = statisticsEnabled ? 0 : -1;
if (statisticsEnabled) {
for (Stats stats : getEnabledStats()) {
long numOfEntries = stats.getApproximateEntriesInMemory();
if (numOfEntries > 0) {
result += numOfEntries;
}
}
}
return result;
}
@Override
public long getApproximateEntriesUnique() {
long result = statisticsEnabled ? 0 : -1;
if (statisticsEnabled) {
for (Stats stats : getEnabledStats()) {
long numOfEntries = stats.getApproximateEntriesUnique();
if (numOfEntries > 0) {
result += numOfEntries;
}
}
}
return result;
}
private long calculateStores() {
long totalStores = 0;
for (Stats stats : getEnabledStats()) {
long stores = stats.getStores();
if (stores > 0) {
totalStores += stores;
}
}
return totalStores;
}
@ManagedAttribute(
description = "Number of seconds since cache started",
displayName = "Seconds since cache started",
units = Units.SECONDS,
measurementType = MeasurementType.TRENDSUP
)
@Override
public long getTimeSinceStart() {
long result = -1;
if (getStatisticsEnabled()) {
result = calculateTimeSinceStart();
}
return result;
}
private long calculateTimeSinceStart() {
long longestRunning = 0;
for (Stats stats : getEnabledStats()) {
long runningTime = stats.getTimeSinceStart();
if (runningTime > longestRunning) {
longestRunning = runningTime;
}
}
return longestRunning;
}
@Override
public int getCurrentNumberOfEntries() {
return getNumberOfEntries();
}
@ManagedAttribute(
description = "Amount in bytes of memory used in a given cache container for entries with eviction",
displayName = "Container memory used by eviction"
)
@Override
public long getDataMemoryUsed() {
return calculateDataMemoryUsed();
}
private long calculateDataMemoryUsed() {
long totalMemoryUsed = 0;
for (Stats stats : getEnabledStats()) {
long memoryUsed = stats.getDataMemoryUsed();
if (memoryUsed > 0) {
totalMemoryUsed += memoryUsed;
}
}
return totalMemoryUsed;
}
@ManagedAttribute(
description = "Amount in bytes of off-heap memory used by this cache container",
displayName = "Off-Heap memory used"
)
@Override
public long getOffHeapMemoryUsed() {
return calculateOffHeapUsed();
}
private long calculateOffHeapUsed() {
long totalOffHeapUsed = 0;
for (Stats stats : getEnabledStats()) {
long offHeapUsed = stats.getOffHeapMemoryUsed();
if (offHeapUsed > 0) {
totalOffHeapUsed += offHeapUsed;
}
}
return totalOffHeapUsed;
}
@Override
public long getRetrievals() {
return getHits() + getMisses();
}
@Override
public void reset() {
resetStatistics();
}
private AdvancedCache<?, ?> getCache(String cacheName) {
try {
return SecurityActions.getUnwrappedCache(cm.getCache(cacheName)).getAdvancedCache();
} catch (CacheException t) {
log.cannotObtainFailedCache(cacheName, t);
}
return null;
}
private List<Stats> getEnabledStats() {
if (enabledStats != null && !enabledStats.isExpired())
return enabledStats.stats;
List<Stats> stats = new ArrayList<>();
for (String cn : cm.getCacheNames()) {
if (cm.isRunning(cn)) {
AdvancedCache<?, ?> cache = getCache(cn);
if (cache != null) {
Configuration cfg = cache.getCacheConfiguration();
if (cfg.statistics().enabled()) {
stats.add(cache.getStats());
}
}
}
}
this.enabledStats = new StatsHolder(stats);
return stats;
}
@Override
public Json toJson() {
return Json.object()
.set("statistics_enabled", statisticsEnabled)
.set("number_of_entries", getNumberOfEntries())
.set("hit_ratio", getHitRatio())
.set("read_write_ratio", getReadWriteRatio())
.set("time_since_start", getTimeSinceStart())
.set("time_since_reset", getTimeSinceReset())
.set("current_number_of_entries", getCurrentNumberOfEntries())
.set("current_number_of_entries_in_memory", getCurrentNumberOfEntriesInMemory())
.set("off_heap_memory_used", getOffHeapMemoryUsed())
.set("data_memory_used", getDataMemoryUsed())
.set("stores", getStores())
.set("retrievals", getRetrievals())
.set("hits", getHits())
.set("misses", getMisses())
.set("remove_hits", getRemoveHits())
.set("remove_misses", getRemoveMisses())
.set("evictions", getEvictions())
.set("average_read_time", getAverageReadTime())
.set("average_read_time_nanos", getAverageReadTimeNanos())
.set("average_write_time", getAverageWriteTime())
.set("average_write_time_nanos", getAverageWriteTimeNanos())
.set("average_remove_time", getAverageRemoveTime())
.set("average_remove_time_nanos", getAverageRemoveTimeNanos())
.set("required_minimum_number_of_nodes", getRequiredMinimumNumberOfNodes());
}
private final class StatsHolder {
final long expiration;
final List<Stats> stats;
StatsHolder(List<Stats> stats) {
this.expiration = timeService.expectedEndTime(1, TimeUnit.SECONDS);
this.stats = stats;
}
boolean isExpired() {
return timeService.isTimeExpired(expiration);
}
}
}
| 23,257
| 30.816689
| 161
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/AbstractContainerStats.java
|
package org.infinispan.stats.impl;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.util.logging.Log;
/**
* An abstract class to expose statistics of the local JVM. Concrete classes must override {@link #statistics()}
* to return the complete list of statistics.
*
* @author José Bolina
* @since 14.0
*/
public abstract class AbstractContainerStats extends AbstractClusterStats {
// Memory
protected static final String MEMORY_AVAILABLE = "memoryAvailable";
protected static final String MEMORY_MAX = "memoryMax";
protected static final String MEMORY_TOTAL = "memoryTotal";
protected static final String MEMORY_USED = "memoryUsed";
private static final String[] LONG_ATTRIBUTES = {MEMORY_AVAILABLE, MEMORY_MAX, MEMORY_TOTAL, MEMORY_USED};
AbstractContainerStats(Log log) {
super(log);
}
protected static Map<String, Number> getLocalStatMaps() {
Map<String, Number> map = new HashMap<>();
long available = Runtime.getRuntime().freeMemory();
long total = Runtime.getRuntime().totalMemory();
long max = Runtime.getRuntime().maxMemory();
map.put(MEMORY_AVAILABLE, available);
map.put(MEMORY_MAX, max);
map.put(MEMORY_TOTAL, total);
map.put(MEMORY_USED, total - available);
return map;
}
protected abstract List<Map<String, Number>> statistics() throws Exception;
@Override
void updateStats() throws Exception {
List<Map<String, Number>> memoryMap = statistics();
for (String attr: LONG_ATTRIBUTES) {
putLongAttributes(memoryMap, attr);
}
}
}
| 1,626
| 30.288462
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/ClusterCacheStatsFactory.java
|
package org.infinispan.stats.impl;
import org.infinispan.factories.AbstractNamedCacheComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.stats.ClusterCacheStats;
/**
* ClusterCacheStatsFactory is a default factory class for {@link ClusterCacheStats}.
* <p>
* This is an internal class, not intended to be used by clients.
*
* @author Vladimir Blagojevic
* @since 7.1
*/
@DefaultFactoryFor(classes = ClusterCacheStats.class)
public class ClusterCacheStatsFactory extends AbstractNamedCacheComponentFactory implements AutoInstantiableFactory {
@Override
public Object construct(String componentName) {
return new ClusterCacheStatsImpl();
}
}
| 773
| 31.25
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/ClusterContainerStatsFactory.java
|
package org.infinispan.stats.impl;
import static org.infinispan.stats.impl.LocalContainerStatsImpl.LOCAL_CONTAINER_STATS;
import org.infinispan.factories.AbstractComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.stats.ClusterContainerStats;
/**
* @author Ryan Emerson
* @since 9.0
*/
@DefaultFactoryFor(classes = ClusterContainerStats.class, names = LOCAL_CONTAINER_STATS)
public class ClusterContainerStatsFactory extends AbstractComponentFactory implements AutoInstantiableFactory {
@Override
public Object construct(String componentName) {
if (componentName.equals(LOCAL_CONTAINER_STATS)) {
return new LocalContainerStatsImpl();
}
return new ClusterContainerStatsImpl();
}
}
| 831
| 32.28
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/AbstractClusterStats.java
|
package org.infinispan.stats.impl;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.infinispan.commons.time.TimeService;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.JmxStatisticsExposer;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.jmx.annotations.Units;
import org.infinispan.util.logging.Log;
/**
* @author Ryan Emerson
* @since 9.0
*/
@MBean
@Scope(Scopes.NONE)
public abstract class AbstractClusterStats implements JmxStatisticsExposer {
public static final long DEFAULT_STALE_STATS_THRESHOLD = 3000;
@Inject TimeService timeService;
private volatile long staleStatsThreshold = DEFAULT_STALE_STATS_THRESHOLD;
private volatile long statsUpdateTimestamp = 0;
volatile boolean statisticsEnabled = false;
private final Log log;
private final AtomicLong resetNanoseconds = new AtomicLong(0);
final HashMap<String, Number> statsMap = new HashMap<>();
AbstractClusterStats(Log log) {
this.log = log;
}
abstract void updateStats() throws Exception;
@Start
void start() {
setStatisticsEnabled(statisticsEnabled);
}
public void reset() {
statsMap.clear();
}
@ManagedAttribute(description = "Gets the threshold for cluster wide stats refresh (milliseconds)",
displayName = "Stale Stats Threshold",
dataType = DataType.TRAIT,
writable = true)
public long getStaleStatsThreshold() {
return staleStatsThreshold;
}
public void setStaleStatsThreshold(long staleStatsThreshold) {
this.staleStatsThreshold = staleStatsThreshold;
}
@Override
@ManagedOperation(description = "Resets statistics gathered by this component", displayName = "Reset statistics")
public void resetStatistics() {
if (isStatisticsEnabled()) {
reset();
resetNanoseconds.set(timeService.time());
}
}
@ManagedAttribute(
description = "Number of seconds since the cluster-wide statistics were last reset",
displayName = "Seconds since cluster-wide statistics were reset",
units = Units.SECONDS
)
public long getTimeSinceReset() {
long result = -1;
if (isStatisticsEnabled()) {
result = timeService.timeDuration(resetNanoseconds.get(), TimeUnit.SECONDS);
}
return result;
}
@Override
public void setStatisticsEnabled(boolean enabled) {
this.statisticsEnabled = enabled;
if (enabled) {
//yes technically we do not reset stats but we initialize them
resetNanoseconds.set(timeService.time());
}
}
@Override
public boolean getStatisticsEnabled() {
return statisticsEnabled;
}
@ManagedAttribute(description = "Enables or disables the gathering of statistics by this component",
displayName = "Statistics enabled",
dataType = DataType.TRAIT,
writable = true)
public boolean isStatisticsEnabled() {
return getStatisticsEnabled();
}
synchronized void fetchClusterWideStatsIfNeeded() {
long duration = timeService.timeDuration(statsUpdateTimestamp, timeService.time(), TimeUnit.MILLISECONDS);
if (duration > staleStatsThreshold) {
try {
updateStats();
} catch (Exception e) {
log.error("Could not execute cluster wide cache stats operation ", e);
} finally {
statsUpdateTimestamp = timeService.time();
}
}
}
long addLongAttributes(Collection<Map<String, Number>> responseList, String attribute) {
long total = 0;
for (Map<String, Number> m : responseList) {
Number value = m.get(attribute);
long longValue = value.longValue();
if (longValue >= 0) {
total += longValue;
} else {
total = -1;
}
}
return total;
}
private int addIntAttributes(Collection<Map<String, Number>> responseList, String attribute) {
int total = 0;
for (Map<String, Number> m : responseList) {
Number value = m.get(attribute);
int intValue = value.intValue();
if (intValue >= 0) {
total += intValue;
} else {
total = -1;
}
}
return total;
}
private int maxIntAttributes(Collection<Map<String, Number>> responseList, String attribute) {
int max = -1;
for (Map<String, Number> m : responseList) {
Number value = m.get(attribute);
int intValue = value.intValue();
max = Math.max(max, intValue);
}
return max;
}
void putLongAttributesAverage(Collection<Map<String, Number>> responseList, String attribute) {
long numValues = 0;
long total = 0;
for (Map<String, Number> m : responseList) {
Number value = m.get(attribute);
long longValue = value.longValue();
if (longValue >= 0) {
total += longValue;
numValues++;
}
}
if (numValues > 0) {
long average = total / numValues;
statsMap.put(attribute, average);
}
}
void putLongAttributes(Collection<Map<String, Number>> responseList, String attribute) {
statsMap.put(attribute, addLongAttributes(responseList, attribute));
}
void putIntAttributes(Collection<Map<String, Number>> responseList, String attribute) {
statsMap.put(attribute, addIntAttributes(responseList, attribute));
}
void putIntAttributesMax(Collection<Map<String, Number>> responseList, String attribute) {
statsMap.put(attribute, maxIntAttributes(responseList, attribute));
}
long getStatAsLong(String attribute) {
return getStat(attribute).longValue();
}
int getStatAsInt(String attribute) {
return getStat(attribute).intValue();
}
private Number getStat(String attribute) {
if (isStatisticsEnabled()) {
fetchClusterWideStatsIfNeeded();
return statsMap.getOrDefault(attribute, 0);
} else {
return -1;
}
}
}
| 6,466
| 29.942584
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/LocalContainerStatsImpl.java
|
package org.infinispan.stats.impl;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.stats.ClusterContainerStats;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Provide statistics of the local JVM instance. When the statistics collection is disabled, we return -1.
*
* @author José Bolina
* @since 14.0
*/
@Scope(Scopes.GLOBAL)
@MBean(objectName = LocalContainerStatsImpl.LOCAL_CONTAINER_STATS, description = "General statistic of local container.")
public class LocalContainerStatsImpl extends AbstractContainerStats implements ClusterContainerStats {
public static final String LOCAL_CONTAINER_STATS = "LocalContainerStats";
private static final Log log = LogFactory.getLog(LocalContainerStatsImpl.class);
LocalContainerStatsImpl() {
super(log);
}
@Inject
public void init(GlobalConfiguration configuration) {
this.statisticsEnabled = configuration.statistics();
}
@Override
protected List<Map<String, Number>> statistics() throws Exception {
return Collections.singletonList(getLocalStatMaps());
}
@ManagedAttribute(description = "The maximum amount of free memory in bytes in local JVM",
displayName = "Local available memory.")
@Override
public long getMemoryAvailable() {
return getStatAsLong(MEMORY_AVAILABLE);
}
@ManagedAttribute(description = "The maximum amount of memory in local JVM will attempt to utilise in bytes",
displayName = "Local JVM max memory")
@Override
public long getMemoryMax() {
return getStatAsLong(MEMORY_MAX);
}
@ManagedAttribute(description = "The total amount of memory in the local JVM in bytes",
displayName = "Local total memory")
@Override
public long getMemoryTotal() {
return getStatAsLong(MEMORY_TOTAL);
}
@ManagedAttribute(description = "The amount of memory used by the local JVM in bytes",
displayName = "Local memory utilisation")
@Override
public long getMemoryUsed() {
return getStatAsLong(MEMORY_USED);
}
}
| 2,428
| 33.211268
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/StatKeys.java
|
package org.infinispan.stats.impl;
/**
* @author Ryan Emerson
* @since 9.0
*/
final class StatKeys {
static final String TIME_SINCE_RESET = "timeSinceReset";
static final String TIME_SINCE_START = "timeSinceStart";
static final String REMOVE_MISSES = "removeMisses";
static final String REMOVE_HITS = "removeHits";
static final String AVERAGE_WRITE_TIME = "averageWriteTime";
static final String AVERAGE_READ_TIME = "averageReadTime";
static final String AVERAGE_REMOVE_TIME = "averageRemoveTime";
static final String AVERAGE_WRITE_TIME_NANOS = "averageWriteTimeNanos";
static final String AVERAGE_READ_TIME_NANOS = "averageReadTimeNanos";
static final String AVERAGE_REMOVE_TIME_NANOS = "averageRemoveTimeNanos";
static final String EVICTIONS = "evictions";
static final String HITS = "hits";
static final String MISSES = "misses";
static final String NUMBER_OF_ENTRIES = "numberOfEntries";
static final String NUMBER_OF_ENTRIES_IN_MEMORY = "numberOfEntriesInMemory";
static final String APPROXIMATE_ENTRIES = "approximateEntries";
static final String APPROXIMATE_ENTRIES_IN_MEMORY = "approximateEntriesInMemory";
static final String APPROXIMATE_ENTRIES_UNIQUE = "approximateEntriesUnique";
static final String DATA_MEMORY_USED = "dataMemoryUsed";
static final String OFF_HEAP_MEMORY_USED = "offHeapMemoryUsed";
static final String RETRIEVALS = "retrievals";
static final String STORES = "stores";
static final String REQUIRED_MIN_NODES = "minRequiredNodes";
//LockManager
static final String NUMBER_OF_LOCKS_HELD = "numberOfLocksHeld";
static final String NUMBER_OF_LOCKS_AVAILABLE = "numberOfLocksAvailable";
//Invalidation/passivation/activation
static final String INVALIDATIONS = "invalidations";
static final String PASSIVATIONS = "passivations";
static final String ACTIVATIONS = "activations";
//cache loaders
static final String CACHE_LOADER_LOADS = "cacheLoaderLoads";
static final String CACHE_LOADER_MISSES = "cacheLoaderMisses";
static final String CACHE_WRITER_STORES = "cacheWriterStores";
}
| 2,117
| 45.043478
| 84
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/stats/impl/ClusterContainerStatsImpl.java
|
package org.infinispan.stats.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.stats.ClusterContainerStats;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
* @since 9.0
*/
@Scope(Scopes.GLOBAL)
@MBean(objectName = ClusterContainerStats.OBJECT_NAME, description = "General container statistics aggregated across the cluster.")
public class ClusterContainerStatsImpl extends AbstractContainerStats implements ClusterContainerStats {
private static final Log log = LogFactory.getLog(ClusterContainerStatsImpl.class);
private ClusterExecutor clusterExecutor;
private EmbeddedCacheManager cacheManager;
ClusterContainerStatsImpl() {
super(log);
}
@Inject
public void init(EmbeddedCacheManager cacheManager, GlobalConfiguration configuration) {
this.cacheManager = cacheManager;
this.statisticsEnabled = configuration.statistics();
}
@Override
public void start() {
this.clusterExecutor = SecurityActions.getClusterExecutor(cacheManager);
}
@Override
protected List<Map<String, Number>> statistics() throws Exception {
final List<Map<String, Number>> successfulResponseMaps = new ArrayList<>();
// protect against stats collection before the component is ready
if (clusterExecutor != null) {
CompletableFutures.await(clusterExecutor.submitConsumer(ignore -> getLocalStatMaps(), (addr, stats, t) -> {
if (t == null) {
successfulResponseMaps.add(stats);
}
}));
}
return successfulResponseMaps;
}
@ManagedAttribute(description = "The maximum amount of free memory in bytes across the cluster JVMs",
displayName = "Cluster wide available memory.")
@Override
public long getMemoryAvailable() {
return getStatAsLong(MEMORY_AVAILABLE);
}
@ManagedAttribute(description = "The maximum amount of memory that JVMs across the cluster will attempt to utilise in bytes",
displayName = "Cluster wide max memory of JVMs")
@Override
public long getMemoryMax() {
return getStatAsLong(MEMORY_MAX);
}
@ManagedAttribute(description = "The total amount of memory in the JVMs across the cluster in bytes",
displayName = "Cluster wide total memory")
@Override
public long getMemoryTotal() {
return getStatAsLong(MEMORY_TOTAL);
}
@ManagedAttribute(description = "The amount of memory used by JVMs across the cluster in bytes",
displayName = "Cluster wide memory utilisation")
@Override
public long getMemoryUsed() {
return getStatAsLong(MEMORY_USED);
}
}
| 3,249
| 34.714286
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/InvocationSuccessAction.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* Callback interface for {@link BaseAsyncInterceptor#invokeNextThenAccept(InvocationContext, VisitableCommand, InvocationSuccessAction)}.
*
* @author Dan Berindei
* @since 9.0
*/
@FunctionalInterface
public interface InvocationSuccessAction<C extends VisitableCommand> extends InvocationCallback<C> {
/**
* Process the result from a successful invocation stage and possibly throw an exception.
*/
void accept(InvocationContext rCtx, C rCommand, Object rv) throws Throwable;
@Override
default Object apply(InvocationContext rCtx, C rCommand, Object rv, Throwable throwable) throws Throwable {
if (throwable == null) {
accept(rCtx, rCommand, rv);
return rv;
} else {
throw throwable;
}
}
}
| 903
| 30.172414
| 138
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/package-info.java
|
/**
* Infinispan is designed around a set of interceptors around a data container. These interceptors
* add behavioral aspects to the data container.
*/
package org.infinispan.interceptors;
| 194
| 31.5
| 99
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/BaseCustomAsyncInterceptor.java
|
package org.infinispan.interceptors;
import org.infinispan.Cache;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.manager.EmbeddedCacheManager;
/**
* Anyone using the {@link AsyncInterceptorChain#addInterceptor(AsyncInterceptor, int)} method (or any of its
* overloaded forms) or registering custom interceptors via XML should extend this base class when creating their own
* custom interceptors.
* <p>
* Annotations on custom interceptors, including {@link Inject}, {@link Start} and {@link Stop}
* will not be respected and callbacks will not be made.
* <p>
* Instead, custom interceptor authors should extend this base class to gain access to {@link Cache} and {@link EmbeddedCacheManager},
* from which other components may be accessed. Further, lifecycle should be implemented by overriding {@link #start()}
* and {@link #stop()} as defined in this class.
*
* @author Dan Berindei
* @since 9.0
*/
public class BaseCustomAsyncInterceptor extends DDAsyncInterceptor {
@Inject ComponentRef<Cache<?, ?>> cacheRef;
@Inject protected EmbeddedCacheManager embeddedCacheManager;
protected Cache<?, ?> cache;
@Start(priority = 1)
void setup() {
// Needed for backwards compatibility
this.cache = cacheRef.wired();
}
@Start
protected void start() {
// Meant to be overridden
}
@Stop
protected void stop() {
// Meant to be overridden
}
}
| 1,591
| 32.87234
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/InvocationCallback.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* Base interface for all callbacks used by {@link BaseAsyncInterceptor} and {@link InvocationStage} methods.
*
* @author Dan Berindei
* @since 9.0
*/
@FunctionalInterface
public interface InvocationCallback<C extends VisitableCommand> {
/**
* Process the result or the exception from an invocation stage and either return a simple value,
* return a new {@link InvocationStage}, or throw an exception.
*/
Object apply(InvocationContext rCtx, C rCommand, Object rv, Throwable throwable) throws Throwable;
}
| 669
| 32.5
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/InvocationExceptionFunction.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* Callback interface for {@link BaseAsyncInterceptor#invokeNextAndExceptionally(InvocationContext, VisitableCommand, InvocationExceptionFunction)}.
*
* @author Dan Berindei
* @since 9.0
*/
@FunctionalInterface
public interface InvocationExceptionFunction<C extends VisitableCommand> extends InvocationCallback<C> {
/**
* Process the result from a successful invocation stage and either return a simple value,
* return a new {@link InvocationStage}, or throw an exception.
*/
Object apply(InvocationContext rCtx, C rCommand, Throwable throwable) throws Throwable;
@Override
default Object apply(InvocationContext rCtx, C rCommand, Object rv, Throwable throwable) throws Throwable {
if (throwable == null)
return rv;
return apply(rCtx, rCommand, throwable);
}
}
| 956
| 33.178571
| 148
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/InvocationFinallyAction.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* Callback interface for {@link BaseAsyncInterceptor#invokeNextAndFinally(InvocationContext, VisitableCommand, InvocationFinallyAction)}.
*
* @author Dan Berindei
* @since 9.0
*/
@FunctionalInterface
public interface InvocationFinallyAction<C extends VisitableCommand> extends InvocationCallback<C> {
/**
* Process the result or the exception from an invocation stage and possibly throw an exception.
*/
void accept(InvocationContext rCtx, C rCommand, Object rv, Throwable throwable) throws Throwable;
@Override
default Object apply(InvocationContext rCtx, C rCommand, Object rv, Throwable throwable) throws Throwable {
accept(rCtx, rCommand, rv, throwable);
if (throwable == null) {
return rv;
} else {
throw throwable;
}
}
}
| 939
| 31.413793
| 138
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/AsyncInterceptorChain.java
|
package org.infinispan.interceptors;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commons.util.Experimental;
import org.infinispan.context.InvocationContext;
/**
* Interceptor chain using {@link AsyncInterceptor}s.
*
* Experimental: The ability to modify the interceptors at runtime may be removed in future versions.
*
* @author Dan Berindei
* @since 9.0
*/
@Experimental
public interface AsyncInterceptorChain {
/**
* @return An immutable list of the current interceptors.
*/
List<AsyncInterceptor> getInterceptors();
/**
* Inserts the given interceptor at the specified position in the chain (0 based indexing).
*
* @throws IllegalArgumentException if the position is invalid (e.g. 5 and there are only 2 interceptors
* in the chain)
*/
void addInterceptor(AsyncInterceptor interceptor, int position);
/**
* Removes the interceptor at the given position.
*
* @throws IllegalArgumentException if the position is invalid (e.g. 5 and there are only 2 interceptors
* in the chain)
*/
void removeInterceptor(int position);
/**
* Returns the number of interceptors in the chain.
*/
int size();
/**
* Removes all the occurrences of supplied interceptor type from the chain.
*/
void removeInterceptor(Class<? extends AsyncInterceptor> clazz);
/**
* Adds a new interceptor in list after an interceptor of a given type.
*
* @return true if the interceptor was added; i.e. the {@code afterInterceptor} exists
*/
boolean addInterceptorAfter(AsyncInterceptor toAdd, Class<? extends
AsyncInterceptor> afterInterceptor);
/**
* Adds a new interceptor in list before an interceptor of a given type.
*
* @return true if the interceptor was added; i.e. the {@code beforeInterceptor} exists
*/
boolean addInterceptorBefore(AsyncInterceptor toAdd, Class<? extends AsyncInterceptor> beforeInterceptor);
/**
* Replaces an existing interceptor of the given type in the interceptor chain with a new interceptor
* instance passed as parameter.
*
* @param replacingInterceptor the interceptor to add to the interceptor chain
* @param toBeReplacedInterceptorType the type of interceptor that should be swapped with the new one
* @return true if the interceptor was replaced
*/
boolean replaceInterceptor(AsyncInterceptor replacingInterceptor,
Class<? extends AsyncInterceptor> toBeReplacedInterceptorType);
/**
* Appends at the end.
*/
void appendInterceptor(AsyncInterceptor ci, boolean isCustom);
/**
* Walks the command through the interceptor chain. The received ctx is being passed in.
*
* <p>Note: Reusing the context for multiple invocations is allowed, however most context implementations are not
* thread-safe.</p>
*/
Object invoke(InvocationContext ctx, VisitableCommand command);
/**
* Walks the command through the interceptor chain. The received ctx is being passed in.
*
* <p>Note: Reusing the context for multiple invocations is allowed, however most context implementations are not
* thread-safe.</p>
*/
CompletableFuture<Object> invokeAsync(InvocationContext ctx, VisitableCommand command);
/**
* Walks the command through the interceptor chain. The received ctx is being passed in.
*
* <p>Note: Reusing the context for multiple invocations is allowed, however most context implementations are not
* thread-safe.</p>
*/
InvocationStage invokeStage(InvocationContext ctx, VisitableCommand command);
/**
* Returns the first interceptor extending the given class, or {@code null} if there is none.
*/
<T extends AsyncInterceptor> T findInterceptorExtending(Class<T> interceptorClass);
/**
* Returns the first interceptor with the given class, or {@code null} if there is none.
*/
<T extends AsyncInterceptor> T findInterceptorWithClass(Class<T> interceptorClass);
/**
* Checks whether the chain contains the supplied interceptor instance.
*/
boolean containsInstance(AsyncInterceptor interceptor);
/**
* Checks whether the chain contains an interceptor with the given class.
*/
boolean containsInterceptorType(Class<? extends AsyncInterceptor> interceptorType);
/**
* Checks whether the chain contains an interceptor with the given class, or a subclass.
*/
boolean containsInterceptorType(Class<? extends AsyncInterceptor> interceptorType,
boolean alsoMatchSubClasses);
}
| 4,706
| 34.659091
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/ExceptionSyncInvocationStage.java
|
package org.infinispan.interceptors;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.commons.util.concurrent.CompletableFutures;
/**
* A sync {@link InvocationStage} for {@link Throwable}.
* <p>
* It is similar to {@link SyncInvocationStage} but instead of being used with a successful value, it accepts a {@link
* Throwable}
*
* @author Pedro Ruivo
* @since 10.0
*/
public class ExceptionSyncInvocationStage extends InvocationStage {
private final Throwable throwable;
public ExceptionSyncInvocationStage(Throwable throwable) {
this.throwable = CompletableFutures.extractException(throwable);
}
@Override
public Object thenApply(InvocationContext ctx, VisitableCommand command, InvocationSuccessFunction function) {
return this;
}
@Override
public Object thenAccept(InvocationContext ctx, VisitableCommand command, InvocationSuccessAction function) {
return this;
}
@Override
public Object andExceptionally(InvocationContext ctx, VisitableCommand command,
InvocationExceptionFunction function) {
try {
return function.apply(ctx, command, throwable);
} catch (Throwable t) {
return new ExceptionSyncInvocationStage(t);
}
}
@Override
public Object andFinally(InvocationContext ctx, VisitableCommand command, InvocationFinallyAction action) {
try {
action.accept(ctx, command, null, throwable);
return this;
} catch (Throwable t) {
return new ExceptionSyncInvocationStage(t);
}
}
@Override
public Object andHandle(InvocationContext ctx, VisitableCommand command, InvocationFinallyFunction function) {
try {
return function.apply(ctx, command, null, throwable);
} catch (Throwable t) {
return new ExceptionSyncInvocationStage(t);
}
}
@Override
public Object thenReturn(InvocationContext ctx, VisitableCommand command, Object returnValue) {
return this;
}
@Override
public Object get() throws Throwable {
throw throwable;
}
@Override
public boolean isDone() {
return true;
}
@Override
public CompletableFuture<Object> toCompletableFuture() {
return CompletableFuture.failedFuture(throwable);
}
@Override
public Object addCallback(InvocationContext ctx, VisitableCommand command, InvocationCallback function) {
try {
return function.apply(ctx, command, null, throwable);
} catch (Throwable t) {
return new ExceptionSyncInvocationStage(t);
}
}
}
| 2,685
| 27.574468
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/EmptyAsyncInterceptorChain.java
|
package org.infinispan.interceptors;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* @author Dan Berindei
* @since 9.0
*/
public class EmptyAsyncInterceptorChain implements AsyncInterceptorChain {
public static final EmptyAsyncInterceptorChain INSTANCE = new EmptyAsyncInterceptorChain();
@Override
public List<AsyncInterceptor> getInterceptors() {
return Collections.emptyList();
}
@Override
public void addInterceptor(AsyncInterceptor interceptor, int position) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public void removeInterceptor(int position) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public int size() {
return 0;
}
@Override
public void removeInterceptor(Class<? extends AsyncInterceptor> clazz) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public boolean addInterceptorAfter(AsyncInterceptor toAdd,
Class<? extends AsyncInterceptor> afterInterceptor) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public boolean addInterceptorBefore(AsyncInterceptor toAdd,
Class<? extends AsyncInterceptor> beforeInterceptor) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public boolean replaceInterceptor(AsyncInterceptor replacingInterceptor,
Class<? extends AsyncInterceptor> toBeReplacedInterceptorType) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public void appendInterceptor(AsyncInterceptor ci, boolean isCustom) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public Object invoke(InvocationContext ctx, VisitableCommand command) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public CompletableFuture<Object> invokeAsync(InvocationContext ctx, VisitableCommand command) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public InvocationStage invokeStage(InvocationContext ctx, VisitableCommand command) {
throw CONTAINER.interceptorStackNotSupported();
}
@Override
public <T extends AsyncInterceptor> T findInterceptorExtending(Class<T> interceptorClass) {
return null;
}
@Override
public <T extends AsyncInterceptor> T findInterceptorWithClass(Class<T> interceptorClass) {
return null;
}
@Override
public boolean containsInstance(AsyncInterceptor interceptor) {
return false;
}
@Override
public boolean containsInterceptorType(Class<? extends AsyncInterceptor> interceptorType) {
return false;
}
@Override
public boolean containsInterceptorType(Class<? extends AsyncInterceptor> interceptorType,
boolean alsoMatchSubClasses) {
return false;
}
}
| 3,030
| 27.064815
| 98
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/InvocationSuccessFunction.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* Callback interface for {@link BaseAsyncInterceptor#invokeNextThenApply(InvocationContext, VisitableCommand, InvocationSuccessFunction)}.
*
* @author Dan Berindei
* @since 9.0
*/
@FunctionalInterface
public interface InvocationSuccessFunction<C extends VisitableCommand> extends InvocationCallback<C> {
/**
* Process the result from a successful invocation stage and either return a simple value,
* return a new {@link InvocationStage}, or throw an exception.
*/
Object apply(InvocationContext rCtx, C rCommand, Object rv) throws Throwable;
@Override
default Object apply(InvocationContext rCtx, C rCommand, Object rv, Throwable throwable) throws Throwable {
if (throwable != null)
throw throwable;
return apply(rCtx, rCommand, rv);
}
}
| 934
| 32.392857
| 139
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/BaseAsyncInterceptor.java
|
package org.infinispan.interceptors;
import java.util.Collection;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commons.util.Experimental;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.impl.SimpleAsyncInvocationStage;
import org.infinispan.util.concurrent.CompletionStages;
/**
* Base class for an interceptor in the new asynchronous invocation chain.
*
* @author Dan Berindei
* @since 9.0
*/
@Experimental
@Scope(Scopes.NAMED_CACHE)
public abstract class BaseAsyncInterceptor implements AsyncInterceptor {
private final InvocationSuccessFunction<VisitableCommand> invokeNextFunction = (rCtx, rCommand, rv) -> invokeNext(rCtx, rCommand);
@Inject protected Configuration cacheConfiguration;
private AsyncInterceptor nextInterceptor;
private DDAsyncInterceptor nextDDInterceptor;
/**
* Used internally to set up the interceptor.
*/
@Override
public final void setNextInterceptor(AsyncInterceptor nextInterceptor) {
this.nextInterceptor = nextInterceptor;
this.nextDDInterceptor =
nextInterceptor instanceof DDAsyncInterceptor ? (DDAsyncInterceptor) nextInterceptor : null;
}
/**
* Invoke the next interceptor, possibly with a new command.
*
* <p>Use {@link #invokeNextThenApply(InvocationContext, VisitableCommand, InvocationSuccessFunction)}
* or {@link #invokeNextThenAccept(InvocationContext, VisitableCommand, InvocationSuccessAction)} instead
* if you need to process the return value of the next interceptor.</p>
*
* <p>Note: {@code invokeNext(ctx, command)} does not throw exceptions. In order to handle exceptions from the
* next interceptors, you <em>must</em> use
* {@link #invokeNextAndHandle(InvocationContext, VisitableCommand, InvocationFinallyFunction)},
* {@link #invokeNextAndFinally(InvocationContext, VisitableCommand, InvocationFinallyAction)},
* or {@link #invokeNextAndExceptionally(InvocationContext, VisitableCommand, InvocationExceptionFunction)}.</p>
*/
public final Object invokeNext(InvocationContext ctx, VisitableCommand command) {
try {
if (nextDDInterceptor != null) {
return command.acceptVisitor(ctx, nextDDInterceptor);
} else {
return nextInterceptor.visitCommand(ctx, command);
}
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
/**
* Invoke the next interceptor, possibly with a new command, and execute an {@link InvocationCallback}
* after all the interceptors have finished successfully.
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final <C extends VisitableCommand> Object invokeNextThenApply(InvocationContext ctx, C command,
InvocationSuccessFunction<C> function) {
try {
Object rv;
if (nextDDInterceptor != null) {
rv = command.acceptVisitor(ctx, nextDDInterceptor);
} else {
rv = nextInterceptor.visitCommand(ctx, command);
}
if (rv instanceof InvocationStage) {
return ((InvocationStage) rv).thenApply(ctx, command, function);
}
return function.apply(ctx, command, rv);
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
/**
* Invoke the next interceptor, possibly with a new command, and execute an {@link InvocationCallback}
* after all the interceptors have finished successfully.
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final <C extends VisitableCommand> Object invokeNextThenAccept(InvocationContext ctx, C command,
InvocationSuccessAction<C> action) {
try {
Object rv;
if (nextDDInterceptor != null) {
rv = command.acceptVisitor(ctx, nextDDInterceptor);
} else {
rv = nextInterceptor.visitCommand(ctx, command);
}
if (rv instanceof InvocationStage) {
return ((InvocationStage) rv).thenAccept(ctx, command, action);
}
action.accept(ctx, command, rv);
return rv;
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
/**
* Invoke the next interceptor, possibly with a new command, and execute an {@link InvocationCallback}
* after all the interceptors have finished with an exception.
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final <C extends VisitableCommand> Object invokeNextAndExceptionally(InvocationContext ctx, C command,
InvocationExceptionFunction<C> function) {
try {
Object rv;
if (nextDDInterceptor != null) {
rv = command.acceptVisitor(ctx, nextDDInterceptor);
} else {
rv = nextInterceptor.visitCommand(ctx, command);
}
if (rv instanceof InvocationStage) {
return ((InvocationStage) rv).andExceptionally(ctx, command, function);
}
// No exception
return rv;
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
/**
* Invoke the next interceptor, possibly with a new command, and execute an {@link InvocationCallback}
* after all the interceptors have finished, with or without an exception.
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final <C extends VisitableCommand> Object invokeNextAndFinally(InvocationContext ctx, C command,
InvocationFinallyAction<C> action) {
try {
Object rv;
Throwable throwable;
try {
if (nextDDInterceptor != null) {
rv = command.acceptVisitor(ctx, nextDDInterceptor);
} else {
rv = nextInterceptor.visitCommand(ctx, command);
}
throwable = null;
if (rv instanceof InvocationStage) {
return ((InvocationStage) rv).andFinally(ctx, command, action);
}
} catch (Throwable t) {
rv = null;
throwable = t;
}
action.accept(ctx, command, rv, throwable);
return throwable == null ? rv : new ExceptionSyncInvocationStage(throwable);
} catch (Throwable t) {
return new ExceptionSyncInvocationStage(t);
}
}
/**
* Invoke the next interceptor, possibly with a new command, and execute an {@link InvocationCallback}
* after all the interceptors have finished, with or without an exception.
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final <C extends VisitableCommand> Object invokeNextAndHandle(InvocationContext ctx, C command,
InvocationFinallyFunction<C> function) {
try {
Object rv;
Throwable throwable;
try {
if (nextDDInterceptor != null) {
rv = command.acceptVisitor(ctx, nextDDInterceptor);
} else {
rv = nextInterceptor.visitCommand(ctx, command);
}
throwable = null;
if (rv instanceof InvocationStage) {
return ((InvocationStage) rv).andHandle(ctx, command, function);
}
} catch (Throwable t) {
rv = null;
throwable = t;
}
return function.apply(ctx, command, rv, throwable);
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
/**
* Suspend the invocation until {@code valueFuture} completes, then return its result without running
* the remaining interceptors.
*
* <p>The caller can add a callback that will run when {@code valueFuture} completes, e.g.
* {@code asyncValue(v).thenApply(ctx, command, (rCtx, rCommand, rv, t) -> invokeNext(rCtx, rCommand))}.
* For this particular scenario, however, it's simpler to use
* {@link #asyncInvokeNext(InvocationContext, VisitableCommand, CompletionStage)}.</p>
*/
public static InvocationStage asyncValue(CompletionStage<?> valueFuture) {
return new SimpleAsyncInvocationStage(valueFuture);
}
/**
* Suspend the invocation until {@code delay} completes, then if successful invoke the next interceptor.
*
* <p>If {@code delay} is null or already completed normally, immediately invoke the next interceptor in this thread.</p>
*
* <p>If {@code delay} completes exceptionally, skip the next interceptor and continue with the exception.</p>
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final Object asyncInvokeNext(InvocationContext ctx, VisitableCommand command,
CompletionStage<?> delay) {
if (delay == null || CompletionStages.isCompletedSuccessfully(delay)) {
return invokeNext(ctx, command);
}
return asyncValue(delay).thenApply(ctx, command, invokeNextFunction);
}
/**
* Suspend the invocation until {@code invocationStage} completes, then if successful invoke the next interceptor.
*
* <p>If {@code invocationStage} completes exceptionally, skip the next interceptor and continue with the exception.</p>
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final Object asyncInvokeNext(InvocationContext ctx, VisitableCommand command,
InvocationStage invocationStage) {
return invocationStage.thenApply(ctx, command, invokeNextFunction);
}
/**
* Suspend invocation until all {@code delays} complete, then if successful invoke the next interceptor.
* If the list is empty or null, invoke the next interceptor immediately.
*
* <p>If any of {@code delays} completes exceptionally, skip the next interceptor and continue with the exception.</p>
*
* <p>You need to wrap the result with {@link #makeStage(Object)} if you need to add another handler.</p>
*/
public final Object asyncInvokeNext(InvocationContext ctx, VisitableCommand command,
Collection<? extends CompletionStage<?>> delays) {
if (delays == null || delays.isEmpty()) {
return invokeNext(ctx, command);
} else if (delays.size() == 1) {
return asyncInvokeNext(ctx, command, delays.iterator().next());
} else {
CompletableFuture<Void> delay = CompletableFuture.allOf(delays.stream()
.map(CompletionStage::toCompletableFuture)
.toArray(CompletableFuture[]::new));
return asyncInvokeNext(ctx, command, delay);
}
}
/**
* Return the value if {@code throwable != null}, throw the exception otherwise.
*/
public static Object valueOrException(Object rv, Throwable throwable) throws Throwable {
if (throwable == null) {
return rv;
} else {
throw throwable;
}
}
/**
* Encode the result of an {@link #invokeNext(InvocationContext, VisitableCommand)} in an {@link InvocationStage}.
*
* <p>May not create a new instance, if the result is already an {@code InvocationStage}.
*/
public static InvocationStage makeStage(Object rv) {
if (rv instanceof InvocationStage) {
return (InvocationStage) rv;
} else {
return new SyncInvocationStage(rv);
}
}
/**
* Returns an InvocationStage if the provided CompletionStage is null, not completed or completed via exception.
* If these are not true the sync value is returned directly.
* @param stage wait for completion of this if not null
* @param syncValue sync value to return if stage is complete or as stage value
* @return invocation stage or sync value
*/
public static Object delayedValue(CompletionStage<?> stage, Object syncValue) {
if (stage != null) {
CompletableFuture<?> future = stage.toCompletableFuture();
if (!future.isDone()) {
return asyncValue(stage.thenApply(v -> syncValue));
}
if (future.isCompletedExceptionally()) {
return asyncValue(stage);
}
}
return syncValue;
}
/**
* This method should be used instead of {@link #delayedValue(CompletionStage, Object)} when a
* {@link InvocationFinallyFunction} is used to properly handle the exception if any is present.
* @param stage
* @param syncValue
* @param throwable
* @return
*/
public static Object delayedValue(CompletionStage<?> stage, Object syncValue, Throwable throwable) {
if (throwable == null) {
return delayedValue(stage, syncValue);
}
if (stage != null) {
CompletableFuture<?> future = stage.toCompletableFuture();
if (!future.isDone() || future.isCompletedExceptionally()) {
return asyncValue(
stage.handle((ignore, t) -> {
if (t != null) {
throwable.addSuppressed(t);
}
return null;
}).thenCompose(ignore -> CompletableFuture.failedFuture(throwable))
);
}
}
return new ExceptionSyncInvocationStage(throwable);
}
/**
* The same as {@link #delayedValue(CompletionStage, Object)}, except that it is optimizes cases where the return
* value is null.
* @param stage wait for completion of this if not null
* @return invocation stage or null sync value
*/
public static Object delayedNull(CompletionStage<Void> stage) {
// If stage was null - meant we didn't notify or if it already completed, no reason to create a stage instance
if (stage == null || CompletionStages.isCompletedSuccessfully(stage)) {
return null;
} else {
return asyncValue(stage);
}
}
protected static boolean isSuccessfullyDone(Object maybeStage) {
if (maybeStage instanceof InvocationStage) {
InvocationStage stage = (InvocationStage) maybeStage;
return stage.isDone() && !stage.toCompletableFuture().isCompletedExceptionally();
}
return true;
}
}
| 15,206
| 40.663014
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/SyncInvocationStage.java
|
package org.infinispan.interceptors;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* @author Dan Berindei
* @since 9.0
*/
public class SyncInvocationStage extends InvocationStage {
static SyncInvocationStage COMPLETED_NULL_STAGE = new SyncInvocationStage();
private final Object rv;
public SyncInvocationStage(Object rv) {
this.rv = rv;
}
public SyncInvocationStage() {
this.rv = null;
}
@Override
public Object get() throws Throwable {
return rv;
}
@Override
public boolean isDone() {
return true;
}
@Override
public CompletableFuture<Object> toCompletableFuture() {
return CompletableFuture.completedFuture(rv);
}
@Override
public <C extends VisitableCommand> Object thenApply(InvocationContext ctx, C command,
InvocationSuccessFunction<C> function) {
try {
return function.apply(ctx, command, rv);
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
@Override
public <C extends VisitableCommand> Object thenAccept(InvocationContext ctx, C command,
InvocationSuccessAction<C> action) {
return thenAcceptMakeStage(ctx, command, action);
}
public <C extends VisitableCommand> Object andExceptionally(InvocationContext ctx, C command,
InvocationExceptionFunction<C> function) {
return this;
}
public <C extends VisitableCommand> Object andFinally(InvocationContext ctx, C command,
InvocationFinallyAction<C> action) {
return andFinallyMakeStage(ctx, command, action);
}
public <C extends VisitableCommand> Object andHandle(InvocationContext ctx, C command,
InvocationFinallyFunction<C> function) {
try {
return function.apply(ctx, command, rv, null);
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
@Override
public <C extends VisitableCommand> Object addCallback(InvocationContext ctx, C command, InvocationCallback<C> function) {
try {
return function.apply(ctx, command, rv, null);
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
/**
* After the current stage completes successfully, invoke {@code function} and return its result.
*
* The result may be either a plain value, or a new {@link InvocationStage}.
*/
public <C extends VisitableCommand> InvocationStage thenApplyMakeStage(InvocationContext ctx, C command,
InvocationSuccessFunction<C> function) {
try {
return makeStage(function.apply(ctx, command, rv));
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
public <C extends VisitableCommand> InvocationStage thenAcceptMakeStage(InvocationContext ctx, C command,
InvocationSuccessAction<C> action) {
try {
action.accept(ctx, command, rv);
return this;
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
public <C extends VisitableCommand> InvocationStage andExceptionallyMakeStage(InvocationContext ctx, C command,
InvocationExceptionFunction<C> function) {
return this;
}
public <C extends VisitableCommand> InvocationStage andFinallyMakeStage(InvocationContext ctx, C command,
InvocationFinallyAction<C> action) {
try {
action.accept(ctx, command, rv, null);
return this;
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
public <C extends VisitableCommand> InvocationStage andHandleMakeStage(InvocationContext ctx, C command,
InvocationFinallyFunction<C> function) {
try {
return makeStage(function.apply(ctx, command, rv, null));
} catch (Throwable throwable) {
return new ExceptionSyncInvocationStage(throwable);
}
}
@Override
public Object thenReturn(InvocationContext ctx, VisitableCommand command, Object returnValue) {
return returnValue;
}
@Override
public String toString() {
return "SyncInvocationStage(" + rv + ')';
}
}
| 4,672
| 31.908451
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/AsyncInterceptor.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commons.util.Experimental;
import org.infinispan.context.InvocationContext;
/**
* Interface for sequential interceptors.
*
* @author Dan Berindei
* @since 9.0
*/
@Experimental
public interface AsyncInterceptor {
/**
* Perform some work for a command invocation.
*
* The interceptor is responsible for invoking the next interceptor in the chain, using
* {@link BaseAsyncInterceptor#invokeNext(InvocationContext, VisitableCommand)} or the other methods in
* {@link BaseAsyncInterceptor}.
*
* @return Either a regular value, or an {@link InvocationStage} created by the {@link BaseAsyncInterceptor} methods.
*/
Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable;
/**
* Sets up the interceptor. Do not call explicitly.
*/
void setNextInterceptor(AsyncInterceptor interceptorStage);
}
| 983
| 30.741935
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/DDAsyncInterceptor.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.Visitor;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.read.EntrySetCommand;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.read.KeySetCommand;
import org.infinispan.commands.read.SizeCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.EvictCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.expiration.impl.TouchCommand;
/**
* Interface for async interceptors using double-dispatch.
*
* @author Dan Berindei
* @since 9.0
*/
public abstract class DDAsyncInterceptor extends BaseAsyncInterceptor implements Visitor {
@Override
public final Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable {
return command.acceptVisitor(ctx, this);
}
protected Object handleDefault(InvocationContext ctx, VisitableCommand command) throws Throwable {
return invokeNext(ctx, command);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitEvictCommand(InvocationContext ctx, EvictCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitSizeCommand(InvocationContext ctx, SizeCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitKeySetCommand(InvocationContext ctx, KeySetCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitEntrySetCommand(InvocationContext ctx, EntrySetCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitInvalidateCommand(InvocationContext ctx, InvalidateCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitInvalidateL1Command(InvocationContext ctx, InvalidateL1Command command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitUnknownCommand(InvocationContext ctx, VisitableCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitReadOnlyManyCommand(InvocationContext ctx, ReadOnlyManyCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx,
WriteOnlyManyEntriesCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command)
throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx,
ReadWriteManyEntriesCommand command) throws Throwable {
return handleDefault(ctx, command);
}
@Override
public Object visitTouchCommand(InvocationContext ctx, TouchCommand command) throws Throwable {
return handleDefault(ctx, command);
}
}
| 8,454
| 34.52521
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/InvocationFinallyFunction.java
|
package org.infinispan.interceptors;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* Callback interface for {@link BaseAsyncInterceptor#invokeNextAndHandle(InvocationContext, VisitableCommand, InvocationFinallyFunction)}.
*
* @author Dan Berindei
* @since 9.0
*/
@FunctionalInterface
public interface InvocationFinallyFunction<C extends VisitableCommand> extends InvocationCallback<C> {
}
| 452
| 29.2
| 139
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/InvocationStage.java
|
package org.infinispan.interceptors;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
/**
* A partial command invocation, either completed or in progress.
* <p>
* It is similar to a {@link java.util.concurrent.CompletionStage}, but it allows more callback functions
* to be stateless by passing the context and the invoked command as parameters.
* <p>
* Unlike {@link java.util.concurrent.CompletionStage}, adding a callback <em>can</em> delay the completion
* of the initial stage and change its result.
*
* @author Dan Berindei
* @since 9.0
*/
public abstract class InvocationStage {
/**
* Wait for the invocation to complete and return its value.
*
* @throws Throwable Any exception raised during the invocation.
*/
public abstract Object get() throws Throwable;
/**
* @return {@code true} if the invocation is complete.
*/
public abstract boolean isDone();
/**
* {@link CompletableFuture} conversion.
*/
public abstract CompletableFuture<Object> toCompletableFuture();
/**
* After the current stage completes successfully, invoke {@code function} and return its result.
* <p>
* The result may be either a plain value, {@code this}, or a new {@link InvocationStage}.
* If {@code function} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> Object thenApply(InvocationContext ctx, C command, InvocationSuccessFunction<C> function) {
return addCallback(ctx, command, function);
}
/**
* After the current stage completes successfully, invoke {@code action}.
* <p>
* The result may be either a plain value, {@code this}, or a new {@link InvocationStage}.
* If {@code action} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> Object thenAccept(InvocationContext ctx, C command, InvocationSuccessAction<C> action) {
return addCallback(ctx, command, action);
}
/**
* After the current stage completes exceptionally, invoke {@code function} and return its result.
* <p>
* The result may be either a plain value, {@code this}, or a new {@link InvocationStage}.
* If {@code function} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> Object andExceptionally(InvocationContext ctx, C command,
InvocationExceptionFunction<C> function) {
return addCallback(ctx, command, function);
}
/**
* After the current stage completes, invoke {@code action}.
* <p>
* The result may be either a plain value, {@code this}, or a new {@link InvocationStage}.
* If {@code action} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> Object andFinally(InvocationContext ctx, C command, InvocationFinallyAction<C> action) {
return addCallback(ctx, command, action);
}
/**
* After the current stage completes, invoke {@code function} and return its result.
* <p>
* The result may be either a plain value, {@code this}, or a new {@link InvocationStage}.
* If {@code function} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> Object andHandle(InvocationContext ctx, C command, InvocationFinallyFunction<C> function) {
return addCallback(ctx, command, function);
}
/**
* After the current stage completes, invoke {@code function} and return its result.
* <p>
* The result may be either a plain value, or a new {@link InvocationStage}.
* If {@code function} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public abstract <C extends VisitableCommand> Object addCallback(InvocationContext ctx, C command, InvocationCallback<C> function);
/**
* After the current stage completes successfully, invoke {@code function} and return its result.
* <p>
* The result may be either {@code this}, or a new {@link InvocationStage}.
* If {@code function} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> InvocationStage thenApplyMakeStage(InvocationContext ctx, C command,
InvocationSuccessFunction<C> function) {
return makeStage(thenApply(ctx, command, function));
}
/**
* After the current stage completes successfully, invoke {@code action}.
* <p>
* The result may be either {@code this}, or a new {@link InvocationStage}.
* If {@code action} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> InvocationStage thenAcceptMakeStage(InvocationContext ctx, C command,
InvocationSuccessAction<C> action) {
return makeStage(thenAccept(ctx, command, action));
}
/**
* After the current stage completes exceptionally, invoke {@code function} and return its result.
* <p>
* The result may be either {@code this}, or a new {@link InvocationStage}.
* If {@code function} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> InvocationStage andExceptionallyMakeStage(InvocationContext ctx, C command,
InvocationExceptionFunction<C> function) {
return makeStage(andExceptionally(ctx, command, function));
}
/**
* After the current stage completes, invoke {@code action}.
* <p>
* The result may be either {@code this}, or a new {@link InvocationStage}.
* If {@code action} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> InvocationStage andFinallyMakeStage(InvocationContext ctx, C command,
InvocationFinallyAction<C> action) {
return makeStage(andFinally(ctx, command, action));
}
/**
* After the current stage completes, invoke {@code function} and return its result.
* <p>
* The result may be either {@code this}, or a new {@link InvocationStage}.
* If {@code function} throws an exception, the result {@link InvocationStage} will complete with the same exception.
*/
public <C extends VisitableCommand> InvocationStage andHandleMakeStage(InvocationContext ctx, C command,
InvocationFinallyFunction<C> function) {
return makeStage(andHandle(ctx, command, function));
}
/**
* If {@code maybeStage} is not an {@code InvocationStage}, wrap it, otherwise cast it to an {@code InvocationStage}.
*/
public static InvocationStage makeStage(Object maybeStage) {
if (maybeStage instanceof InvocationStage) {
return (InvocationStage) maybeStage;
} else {
return new SyncInvocationStage(maybeStage);
}
}
/**
* @return an {@code InvocationStage} instance completed successfully with value {@code null}.
*/
public static InvocationStage completedNullStage() {
return SyncInvocationStage.COMPLETED_NULL_STAGE;
}
/**
* Overrides the return value of this {@link InvocationStage} if it is completed successfully.
*
* The result may be either {@code rv}, a new {@link InvocationStage} or {@code this}
*/
public Object thenReturn(InvocationContext ctx, VisitableCommand command, Object returnValue) {
return thenApply(ctx, command, (rCtx, rCommand, rv) -> returnValue);
}
}
| 8,030
| 43.865922
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/ReadWriteManyEntriesHelper.java
|
package org.infinispan.interceptors.distribution;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Function;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.util.ReadOnlySegmentAwareMap;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.remoting.transport.Address;
class ReadWriteManyEntriesHelper extends WriteManyCommandHelper<ReadWriteManyEntriesCommand, Map<Object, Object>, Map.Entry<Object, Object>> {
ReadWriteManyEntriesHelper(Function<WriteManyCommandHelper<ReadWriteManyEntriesCommand, ?, ?>, InvocationSuccessFunction<ReadWriteManyEntriesCommand>> createRemoteCallback) {
super(createRemoteCallback);
}
@Override
public ReadWriteManyEntriesCommand copyForLocal(ReadWriteManyEntriesCommand cmd, Map<Object, Object> entries) {
return new ReadWriteManyEntriesCommand(cmd).withArguments(entries);
}
@Override
public ReadWriteManyEntriesCommand copyForPrimary(ReadWriteManyEntriesCommand cmd, LocalizedCacheTopology topology, IntSet segments) {
return new ReadWriteManyEntriesCommand(cmd)
.withArguments(new ReadOnlySegmentAwareMap<>(cmd.getArguments(), topology, segments));
}
@Override
public ReadWriteManyEntriesCommand copyForBackup(ReadWriteManyEntriesCommand cmd, LocalizedCacheTopology topology,
Address target, IntSet segments) {
ReadWriteManyEntriesCommand copy = new ReadWriteManyEntriesCommand(cmd)
.withArguments(new ReadOnlySegmentAwareMap(cmd.getArguments(), topology, segments));
copy.setForwarded(true);
return copy;
}
@Override
public Collection<Map.Entry<Object, Object>> getItems(ReadWriteManyEntriesCommand cmd) {
return cmd.getArguments().entrySet();
}
@Override
public Object item2key(Map.Entry<Object, Object> entry) {
return entry.getKey();
}
@Override
public Map<Object, Object> newContainer() {
// Make sure the iteration in containers is ordered
return new LinkedHashMap<>();
}
@Override
public void accumulate(Map<Object, Object> map, Map.Entry<Object, Object> entry) {
map.put(entry.getKey(), entry.getValue());
}
@Override
public int containerSize(Map<Object, Object> map) {
return map.size();
}
@Override
public Iterable<Object> toKeys(Map<Object, Object> map) {
return map.keySet();
}
@Override
public boolean shouldRegisterRemoteCallback(ReadWriteManyEntriesCommand cmd) {
return !cmd.isForwarded();
}
@Override
public Object transformResult(Object[] results) {
return results == null ? null : Arrays.asList(results);
}
}
| 2,925
| 34.682927
| 177
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/WriteManyCommandHelper.java
|
package org.infinispan.interceptors.distribution;
import java.util.Collection;
import java.util.function.Function;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.remoting.transport.Address;
public abstract class WriteManyCommandHelper<C extends WriteCommand, Container, Item> {
protected final InvocationSuccessFunction<C> remoteCallback;
protected WriteManyCommandHelper(Function<WriteManyCommandHelper<C, ?, ?>, InvocationSuccessFunction<C>> createRemoteCallback) {
this.remoteCallback = createRemoteCallback.apply(this);
}
public InvocationSuccessFunction<C> getRemoteCallback() {
return remoteCallback;
}
public abstract C copyForLocal(C cmd, Container container);
public abstract C copyForPrimary(C cmd, LocalizedCacheTopology topology, IntSet segments);
public abstract C copyForBackup(C cmd, LocalizedCacheTopology topology, Address target, IntSet segments);
public abstract Collection<Item> getItems(C cmd);
public abstract Object item2key(Item item);
public abstract Container newContainer();
public abstract void accumulate(Container container, Item item);
public abstract int containerSize(Container container);
public abstract Iterable<Object> toKeys(Container container);
public abstract boolean shouldRegisterRemoteCallback(C cmd);
public abstract Object transformResult(Object[] results);
}
| 1,572
| 33.955556
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/PrimaryOwnerOnlyCollector.java
|
package org.infinispan.interceptors.distribution;
import java.util.concurrent.CompletableFuture;
/**
* A {@link Collector} implementation that only waits for the primary owner.
*
* @author Pedro Ruivo
* @since 9.0
*/
public class PrimaryOwnerOnlyCollector<T> implements Collector<T> {
private final CompletableFuture<T> future;
public PrimaryOwnerOnlyCollector() {
future = new CompletableFuture<>();
}
public CompletableFuture<T> getFuture() {
return future;
}
@Override
public void primaryException(Throwable throwable) {
future.completeExceptionally(throwable);
}
@Override
public void primaryResult(T result, boolean success) {
future.complete(result);
}
}
| 730
| 21.151515
| 76
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/package-info.java
|
/**
* Interceptors dealing with command replication in distributed/replicated mode.
*
* @api.private
*/
package org.infinispan.interceptors.distribution;
| 158
| 21.714286
| 80
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/VersionedResults.java
|
package org.infinispan.interceptors.distribution;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.util.Util;
import org.infinispan.container.versioning.EntryVersion;
public class VersionedResults {
public final Object[] values;
public final EntryVersion[] versions;
public VersionedResults(Object[] values, EntryVersion[] versions) {
this.values = values;
this.versions = versions;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("VersionedResults{");
for (int i = 0; i < values.length; ++i) {
sb.append(values[i]).append(" (").append(versions[i]).append(')');
if (i != values.length - 1) sb.append(", ");
}
sb.append('}');
return sb.toString();
}
public static class Externalizer implements AdvancedExternalizer<VersionedResults> {
@Override
public Set<Class<? extends VersionedResults>> getTypeClasses() {
return Util.asSet(VersionedResults.class);
}
@Override
public Integer getId() {
return Ids.VERSIONED_RESULTS;
}
@Override
public void writeObject(ObjectOutput output, VersionedResults object) throws IOException {
output.writeInt(object.values.length);
// TODO: we could optimize this if all objects are of the same type
for (Object value : object.values) output.writeObject(value);
for (EntryVersion version : object.versions) output.writeObject(version);
}
@Override
public VersionedResults readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int length = input.readInt();
Object[] values = new Object[length];
for (int i = 0; i < length; ++i) {
values[i] = input.readObject();
}
EntryVersion[] versions = new EntryVersion[length];
for (int i = 0; i < length; ++i) {
versions[i] = (EntryVersion) input.readObject();
}
return new VersionedResults(values, versions);
}
}
}
| 2,244
| 32.014706
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/MergingCompletableFuture.java
|
package org.infinispan.interceptors.distribution;
import java.lang.reflect.Array;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.Function;
import org.infinispan.statetransfer.OutdatedTopologyException;
class MergingCompletableFuture<T> extends CountDownCompletableFuture {
private final Function<T[], Object> transform;
protected final T[] results;
protected volatile boolean hasUnsureResponse;
protected volatile boolean lostData;
MergingCompletableFuture(int participants, T[] results, Function<T[], Object> transform) {
super(participants);
// results can be null if the command has flag IGNORE_RETURN_VALUE
this.results = results;
this.transform = transform;
}
static BiConsumer<MergingCompletableFuture<Object>, Object> moveListItemsToFuture(int myOffset) {
return (f, rv) -> moveListItemsToFuture(rv, f, myOffset);
}
static void moveListItemsToFuture(Object rv, MergingCompletableFuture<Object> f, int myOffset) {
Collection<?> items;
if (rv == null && f.results == null) {
return;
} else if (rv instanceof Map) {
items = ((Map) rv).entrySet();
} else if (rv instanceof Collection) {
items = (Collection<?>) rv;
} else if (rv != null && rv.getClass().isArray() && !rv.getClass().getComponentType().isPrimitive()) {
System.arraycopy(rv, 0, f.results, myOffset, Array.getLength(rv));
return;
} else {
f.completeExceptionally(new IllegalArgumentException("Unexpected result value " + rv));
return;
}
Iterator<?> it = items.iterator();
for (int i = 0; it.hasNext(); ++i) {
f.results[myOffset + i] = it.next();
}
}
@Override
protected Object result() {
// If we've lost data but did not get any unsure responses we should return limited stream.
// If we've got unsure response but did not lose any data - no problem, there has been another
// response delivering the results.
// Only if those two combine we'll rather throw OTE and retry.
if (hasUnsureResponse && lostData) {
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
return transform == null || results == null ? null : transform.apply(results);
}
}
| 2,369
| 37.225806
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/L1TxInterceptor.java
|
package org.infinispan.interceptors.distribution;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.commons.util.concurrent.CompletableFutures;
/**
* Interceptor that handles L1 logic for transactional caches.
*
* @author William Burns
*/
public class L1TxInterceptor extends L1NonTxInterceptor {
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
return performCommandWithL1WriteIfAble(ctx, command, false, true, true);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
// TODO: need to figure out if we do anything here? - is the prepare/commmit L1 invalidation sufficient?
return invokeNext(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return performCommandWithL1WriteIfAble(ctx, command, false, true, true);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return performCommandWithL1WriteIfAble(ctx, command, false, true, false);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return performCommandWithL1WriteIfAble(ctx, command, false, true, false);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return performCommandWithL1WriteIfAble(ctx, command, false, true, false);
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (command.isOnePhaseCommit() && shouldFlushL1(ctx)) {
return flushL1CachesAndInvokeNext(ctx, command);
}
return invokeNext(ctx, command);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
if (shouldFlushL1(ctx)) {
return flushL1CachesAndInvokeNext(ctx, command);
}
return invokeNext(ctx, command);
}
@Override
protected boolean skipL1Lookup(FlagAffectedCommand command, Object key) {
// TODO: need to skip L1 lookups when the command doesn't require the value to be returned like unsafe return values or write skew check ??
return super.skipL1Lookup(command, key);
}
private boolean shouldFlushL1(TxInvocationContext ctx) {
return !ctx.getAffectedKeys().isEmpty();
}
private Object flushL1CachesAndInvokeNext(TxInvocationContext ctx, VisitableCommand command) {
CompletableFuture<?> f = (CompletableFuture<?>) l1Manager.flushCache(ctx.getAffectedKeys(), ctx.getOrigin(), true);
if (f != null && !f.isDone()) {
return asyncInvokeNext(ctx, command, f.exceptionally(throwable -> {
getLog().failedInvalidatingRemoteCache(throwable);
throw CompletableFutures.asCompletionException(throwable);
}));
} else {
return invokeNext(ctx, command);
}
}
}
| 3,764
| 38.21875
| 145
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/TxDistributionInterceptor.java
|
package org.infinispan.interceptors.distribution;
import static org.infinispan.transaction.impl.WriteSkewHelper.mergePrepareResponses;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.function.BiFunction;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.SegmentSpecificCommand;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.functional.FunctionalCommand;
import org.infinispan.commands.functional.Mutation;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.TxReadOnlyKeyCommand;
import org.infinispan.commands.functional.TxReadOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.tx.TransactionBoundaryCommand;
import org.infinispan.commands.tx.VersionedCommitCommand;
import org.infinispan.commands.write.AbstractDataWriteCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.RemoveExpiredCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.ValueMatcher;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.container.versioning.IncrementableEntryVersion;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.LocalTxInvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.functional.EntryView;
import org.infinispan.functional.impl.EntryViews;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.PrepareResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.UnsureResponse;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.CacheTopologyUtil;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Handles the distribution of the transactional caches.
*
* @author Mircea Markus
* @author Dan Berindei
*/
public class TxDistributionInterceptor extends BaseDistributionInterceptor {
private static final Log log = LogFactory.getLog(TxDistributionInterceptor.class);
private static final long SKIP_REMOTE_FLAGS = FlagBitSets.CACHE_MODE_LOCAL | FlagBitSets.SKIP_REMOTE_LOOKUP;
@Inject PartitionHandlingManager partitionHandlingManager;
@Inject CommandsFactory commandsFactory;
private boolean forceRemoteReadForFunctionalCommands;
private final TxReadOnlyManyHelper txReadOnlyManyHelper = new TxReadOnlyManyHelper();
private final ReadWriteManyHelper readWriteManyHelper = new ReadWriteManyHelper();
private final ReadWriteManyEntriesHelper readWriteManyEntriesHelper = new ReadWriteManyEntriesHelper();
@Override
public void configure() {
super.configure();
// When cross-site replication is enabled, we need to retrieve the previous value from remote node
// even for functional commands; we will need to send the modified value to backup sites and therefore
// we need it in the context.
forceRemoteReadForFunctionalCommands = cacheConfiguration.sites().hasBackups();
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return handleTxWriteCommand(ctx, command, command.getKey());
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
// Contrary to functional commands, compute() needs to return the new value returned by the remapping function.
// Since we can assume that old and new values are comparable in size, fetching old value into context is acceptable
// and it is more efficient in case that we execute more subsequent modifications than sending the return value.
return handleTxWriteCommand(ctx, command, command.getKey());
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
// Contrary to functional commands, compute() needs to return the new value returned by the remapping function.
// Since we can assume that old and new values are comparable in size, fetching old value into context is acceptable
// and it is more efficient in case that we execute more subsequent modifications than sending the return value.
return handleTxWriteCommand(ctx, command, command.getKey());
}
private void updateMatcherForRetry(WriteCommand command) {
// The command is already included in PrepareCommand.modifications - when the command is executed on the remote
// owners it should not behave conditionally anymore because its success/failure is defined on originator.
command.setValueMatcher(command.isSuccessful() ? ValueMatcher.MATCH_ALWAYS : ValueMatcher.MATCH_NEVER);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return handleTxWriteCommand(ctx, command, command.getKey());
}
@Override
public Object visitRemoveExpiredCommand(InvocationContext ctx, RemoveExpiredCommand command) throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
if (command.hasAnyFlag(FlagBitSets.PUT_FOR_EXTERNAL_READ)) {
return handleNonTxWriteCommand(ctx, command);
}
return handleTxWriteCommand(ctx, command, command.getKey());
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
return handleTxWriteManyEntriesCommand(ctx, command, command.getMap(),
(c, entries) -> new PutMapCommand(c).withMap(entries));
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command)
throws Throwable {
if (ctx.isOriginLocal()) {
TxInvocationContext<LocalTransaction> localTxCtx = (TxInvocationContext<LocalTransaction>) ctx;
Collection<Address> affectedNodes = CacheTopologyUtil.checkTopology(command, getCacheTopology()).getWriteOwners(command.getKeys());
localTxCtx.getCacheTransaction().locksAcquired(affectedNodes);
log.tracef("Registered remote locks acquired %s", affectedNodes);
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
MapResponseCollector collector = MapResponseCollector.ignoreLeavers(affectedNodes.size());
CompletionStage<Map<Address, Response>> remoteInvocation = isReplicated ?
rpcManager.invokeCommandOnAll(command, collector, rpcOptions) :
rpcManager.invokeCommand(affectedNodes, command, collector, rpcOptions);
return asyncValue(remoteInvocation.thenApply(responses -> {
checkTxCommandResponses(responses, command, localTxCtx, localTxCtx.getCacheTransaction().getRemoteLocksAcquired(), null);
return null;
}));
}
return invokeNext(ctx, command);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) throws Throwable {
return handleTxFunctionalCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command) throws Throwable {
return handleTxFunctionalCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) throws Throwable {
return handleTxFunctionalCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) throws Throwable {
return handleTxWriteManyEntriesCommand(ctx, command, command.getArguments(), (c, entries) -> new WriteOnlyManyEntriesCommand(c).withArguments(entries));
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) throws Throwable {
return handleTxFunctionalCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) throws Throwable {
return handleTxWriteManyCommand(ctx, command, command.getAffectedKeys(), (c, keys) -> new WriteOnlyManyCommand(c).withKeys(keys));
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) throws Throwable {
if (ctx.isOriginLocal()) {
if (forceRemoteReadForFunctionalCommands && !command.hasAnyFlag(FlagBitSets.SKIP_XSITE_BACKUP)) {
CompletionStage<Void> cf = remoteGetMany(ctx, command, command.getAffectedKeys());
return asyncInvokeNext(ctx, command, cf);
} else {
return handleFunctionalReadManyCommand(ctx, command, readWriteManyHelper);
}
} else {
return handleTxWriteManyCommand(ctx, command, command.getAffectedKeys(), readWriteManyHelper::copyForLocal);
}
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) throws Throwable {
if (ctx.isOriginLocal()) {
if (forceRemoteReadForFunctionalCommands && !command.hasAnyFlag(FlagBitSets.SKIP_XSITE_BACKUP)) {
CompletionStage<Void> cf = remoteGetMany(ctx, command, command.getAffectedKeys());
return asyncInvokeNext(ctx, command, cf);
} else {
return handleFunctionalReadManyCommand(ctx, command, readWriteManyEntriesHelper);
}
} else {
return handleTxWriteManyEntriesCommand(ctx, command, command.getArguments(),
(c, entries) -> new ReadWriteManyEntriesCommand<>(c).withArguments(entries));
}
}
// ---- TX boundary commands
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return handleSecondPhaseCommand(ctx, command);
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
return invokeNext(ctx, command);
}
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> {
if (!shouldInvokeRemoteTxCommand((TxInvocationContext) rCtx)) {
return rv;
}
TxInvocationContext<LocalTransaction> localTxCtx = (TxInvocationContext<LocalTransaction>) rCtx;
LocalTransaction localTx = localTxCtx.getCacheTransaction();
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(rCommand, getCacheTopology());
Collection<Address> writeOwners = cacheTopology.getWriteOwners(localTxCtx.getAffectedKeys());
localTx.locksAcquired(writeOwners);
Collection<Address> recipients = isReplicated ? null : localTx.getCommitNodes(writeOwners, cacheTopology);
CompletionStage<Object> remotePrepare =
prepareOnAffectedNodes(localTxCtx, rCommand, recipients);
return asyncValue(remotePrepare);
});
}
protected CompletionStage<Object> prepareOnAffectedNodes(TxInvocationContext<?> ctx, PrepareCommand command,
Collection<Address> recipients) {
try {
CompletionStage<Map<Address, Response>> remoteInvocation;
if (recipients != null) {
MapResponseCollector collector =
MapResponseCollector.ignoreLeavers(recipients.size());
remoteInvocation = rpcManager.invokeCommand(recipients, command, collector, rpcManager.getSyncRpcOptions());
} else {
MapResponseCollector collector =
MapResponseCollector.ignoreLeavers(rpcManager.getMembers().size());
remoteInvocation = rpcManager.invokeCommandOnAll(command, collector, rpcManager.getSyncRpcOptions());
}
return remoteInvocation.handle((responses, t) -> {
transactionRemotelyPrepared(ctx);
CompletableFutures.rethrowExceptionIfPresent(t);
PrepareResponse prepareResponse = new PrepareResponse();
checkTxCommandResponses(responses, command, (LocalTxInvocationContext) ctx, recipients, prepareResponse);
for (Response r : responses.values()) mergePrepareResponses(r, prepareResponse);
return prepareResponse;
});
} catch (Throwable t) {
transactionRemotelyPrepared(ctx);
throw t;
}
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
return handleSecondPhaseCommand(ctx, command);
}
private Object handleSecondPhaseCommand(TxInvocationContext ctx, TransactionBoundaryCommand command) {
if (shouldInvokeRemoteTxCommand(ctx)) {
Collection<Address> recipients = getCommitNodes(ctx, command);
CompletionStage<Map<Address, Response>> remoteInvocation;
if (recipients != null) {
MapResponseCollector collector = MapResponseCollector.ignoreLeavers(recipients.size());
remoteInvocation = rpcManager.invokeCommand(recipients, command, collector, rpcManager.getSyncRpcOptions());
} else {
MapResponseCollector collector = MapResponseCollector.ignoreLeavers();
remoteInvocation = rpcManager.invokeCommandOnAll(command, collector, rpcManager.getSyncRpcOptions());
}
InvocationStage remoteResponse = asyncValue(remoteInvocation.thenAccept(responses ->
checkTxCommandResponses(responses, command, ctx, recipients, null)));
return invokeNextThenApply(ctx, command, remoteResponse::thenReturn);
}
return invokeNext(ctx, command);
}
private Collection<Address> getCommitNodes(TxInvocationContext ctx, TopologyAffectedCommand command) {
LocalTransaction localTx = (LocalTransaction) ctx.getCacheTransaction();
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
Collection<Address> affectedNodes =
isReplicated ? null : cacheTopology.getWriteOwners(ctx.getAffectedKeys());
return localTx.getCommitNodes(affectedNodes, cacheTopology);
}
protected void checkTxCommandResponses(Map<Address, Response> responseMap,
TransactionBoundaryCommand command, TxInvocationContext<LocalTransaction> context,
Collection<Address> recipients, PrepareResponse prepareResponse) {
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
for (Map.Entry<Address, Response> e : responseMap.entrySet()) {
Address recipient = e.getKey();
Response response = e.getValue();
mergePrepareResponses(response, prepareResponse);
if (response == CacheNotFoundResponse.INSTANCE) {
// Prepare/Commit commands are sent to all affected nodes, including the ones that left the cluster.
// We must not register a partial commit when receiving a CacheNotFoundResponse from one of those.
if (!cacheTopology.getMembers().contains(recipient)) {
if (log.isTraceEnabled()) log.tracef("Ignoring response from node not targeted %s", recipient);
} else {
if (checkCacheNotFoundResponseInPartitionHandling(command, context, recipients)) {
if (log.isTraceEnabled()) log.tracef("Cache not running on node %s, or the node is missing. It will be handled by the PartitionHandlingManager", recipient);
return;
} else {
if (log.isTraceEnabled()) log.tracef("Cache not running on node %s, or the node is missing", recipient);
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
}
} else if (response == UnsureResponse.INSTANCE) {
if (log.isTraceEnabled()) log.tracef("Node %s has a newer topology id", recipient);
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
}
}
private boolean checkCacheNotFoundResponseInPartitionHandling(TransactionBoundaryCommand command,
TxInvocationContext<LocalTransaction> context, Collection<Address> recipients) {
final GlobalTransaction globalTransaction = command.getGlobalTransaction();
final Collection<Object> lockedKeys = context.getLockedKeys();
if (command instanceof RollbackCommand) {
return partitionHandlingManager.addPartialRollbackTransaction(globalTransaction, recipients, lockedKeys);
} else if (command instanceof PrepareCommand) {
if (((PrepareCommand) command).isOnePhaseCommit()) {
return partitionHandlingManager.addPartialCommit1PCTransaction(globalTransaction, recipients, lockedKeys,
((PrepareCommand) command).getModifications());
}
} else if (command instanceof CommitCommand) {
Map<Object, IncrementableEntryVersion> newVersion = null;
if (command instanceof VersionedCommitCommand) {
newVersion = ((VersionedCommitCommand) command).getUpdatedVersions();
}
return partitionHandlingManager.addPartialCommit2PCTransaction(globalTransaction, recipients, lockedKeys, newVersion);
}
return false;
}
/**
* If we are within one transaction we won't do any replication as replication would only be performed at commit
* time. If the operation didn't originate locally we won't do any replication either.
*/
private Object handleTxWriteCommand(InvocationContext ctx, AbstractDataWriteCommand command,
Object key) throws Throwable {
try {
if (!ctx.isOriginLocal()) {
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
// Ignore any remote command when we aren't the owner
if (!cacheTopology.isSegmentWriteOwner(command.getSegment())) {
return null;
}
}
CacheEntry entry = ctx.lookupEntry(command.getKey());
if (entry == null) {
if (isLocalModeForced(command) || command.hasAnyFlag(FlagBitSets.SKIP_REMOTE_LOOKUP) || !needsPreviousValue(ctx, command)) {
// in transactional mode, we always need the entry wrapped
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
} else {
// we need to retrieve the value locally regardless of load type; in transactional mode all operations
// execute on origin
// Also, operations that need value on backup [delta write] need to do the remote lookup even on
// non-origin
Object result = asyncInvokeNext(ctx, command, remoteGetSingleKey(ctx, command, command.getKey(), true));
return makeStage(result)
.andFinally(ctx, command, (rCtx, rCommand, rv, t) ->
updateMatcherForRetry(rCommand));
}
}
// already wrapped, we can continue
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> updateMatcherForRetry(rCommand));
} catch (Throwable t) {
updateMatcherForRetry(command);
throw t;
}
}
protected <C extends TopologyAffectedCommand & FlagAffectedCommand, K, V> Object
handleTxWriteManyEntriesCommand(InvocationContext ctx, C command, Map<K, V> entries,
BiFunction<C, Map<K, V>, C> copyCommand) {
boolean ignorePreviousValue = command.hasAnyFlag(SKIP_REMOTE_FLAGS) || command.loadType() == VisitableCommand.LoadType.DONT_LOAD;
Map<K, V> filtered = new HashMap<>(entries.size());
Collection<Object> remoteKeys = new ArrayList<>();
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
for (Map.Entry<K, V> e : entries.entrySet()) {
K key = e.getKey();
if (ctx.isOriginLocal() || cacheTopology.isWriteOwner(key)) {
if (ctx.lookupEntry(key) == null) {
if (ignorePreviousValue) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
} else {
remoteKeys.add(key);
}
}
filtered.put(key, e.getValue());
}
}
CompletionStage<Void> remoteGet = remoteGetMany(ctx, command, remoteKeys);
return asyncInvokeNext(ctx, copyCommand.apply(command, filtered), remoteGet);
}
protected <C extends VisitableCommand & FlagAffectedCommand & TopologyAffectedCommand, K> Object handleTxWriteManyCommand(
InvocationContext ctx, C command, Collection<K> keys, BiFunction<C, List<K>, C> copyCommand) {
boolean ignorePreviousValue = command.hasAnyFlag(SKIP_REMOTE_FLAGS) || command.loadType() == VisitableCommand.LoadType.DONT_LOAD;
List<K> filtered = new ArrayList<>(keys.size());
List<Object> remoteKeys = null;
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
for (K key : keys) {
if (ctx.isOriginLocal() || cacheTopology.isWriteOwner(key)) {
if (ctx.lookupEntry(key) == null) {
if (ignorePreviousValue) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
} else {
if (remoteKeys == null) {
remoteKeys = new ArrayList<>();
}
remoteKeys.add(key);
}
}
filtered.add(key);
}
}
CompletionStage<?> remoteGetMany = remoteKeys != null ? remoteGetMany(ctx, command, remoteKeys) : CompletableFutures.completedNull();
return asyncInvokeNext(ctx, copyCommand.apply(command, filtered), remoteGetMany);
}
public <C extends AbstractDataWriteCommand & FunctionalCommand> Object handleTxFunctionalCommand(InvocationContext ctx, C command) {
Object key = command.getKey();
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
if (ctx.isOriginLocal()) {
CacheEntry entry = ctx.lookupEntry(key);
if (entry == null) {
if (command.hasAnyFlag(SKIP_REMOTE_FLAGS) || command.loadType() == VisitableCommand.LoadType.DONT_LOAD) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
return invokeNext(ctx, command);
}
int segment = command.getSegment();
DistributionInfo distributionInfo = cacheTopology.getSegmentDistribution(segment);
// If this node is a write owner we're obliged to apply the value locally even if we can't read it - otherwise
// we could have stale value after state transfer.
if (distributionInfo.isWriteOwner() || forceRemoteReadForFunctionalCommands && !command.hasAnyFlag(FlagBitSets.SKIP_XSITE_BACKUP)) {
return asyncInvokeNext(ctx, command, remoteGetSingleKey(ctx, command, key, true));
}
List<Mutation<Object, Object, ?>> mutationsOnKey = getMutationsOnKey((TxInvocationContext) ctx, command, key);
mutationsOnKey.add(command.toMutation(key));
TxReadOnlyKeyCommand remoteRead = commandsFactory.buildTxReadOnlyKeyCommand(key, null, mutationsOnKey, segment,
command.getParams(), command.getKeyDataConversion(), command.getValueDataConversion());
remoteRead.setTopologyId(command.getTopologyId());
CompletionStage<SuccessfulResponse> remoteGet =
rpcManager.invokeCommandStaggered(distributionInfo.readOwners(), remoteRead,
new RemoteGetSingleKeyCollector(), rpcManager.getSyncRpcOptions());
return asyncValue(remoteGet).thenApply(ctx, command, (rCtx, rCommand, response) -> {
Object responseValue = ((SuccessfulResponse) response).getResponseValue();
return unwrapFunctionalResultOnOrigin(rCtx, rCommand.getKey(), responseValue);
});
}
// It's possible that this is not an owner, but the entry was loaded from L1 - let the command run
return invokeNext(ctx, command);
} else {
if (!cacheTopology.isWriteOwner(key)) {
return null;
}
CacheEntry entry = ctx.lookupEntry(key);
if (entry == null) {
if (command.hasAnyFlag(SKIP_REMOTE_FLAGS) || command.loadType() == VisitableCommand.LoadType.DONT_LOAD) {
// in transactional mode, we always need the entry wrapped
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
} else {
return asyncInvokeNext(ctx, command, remoteGetSingleKey(ctx, command, command.getKey(), true));
}
}
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) ->
wrapFunctionalResultOnNonOriginOnReturn(rv, entry));
}
}
private boolean needsPreviousValue(InvocationContext ctx, FlagAffectedCommand command) {
switch (command.loadType()) {
case DONT_LOAD:
return false;
case PRIMARY:
// In transactional cache, the result is determined on origin
return ctx.isOriginLocal();
case OWNER:
return true;
default:
throw new IllegalStateException();
}
}
@Override
public Object visitReadOnlyManyCommand(InvocationContext ctx, ReadOnlyManyCommand command) throws Throwable {
return handleFunctionalReadManyCommand(ctx, command, txReadOnlyManyHelper);
}
@Override
protected ReadOnlyKeyCommand remoteReadOnlyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) {
if (!ctx.isInTxScope()) {
return command;
}
List<Mutation<Object, Object, ?>> mutations = getMutationsOnKey((TxInvocationContext) ctx, null, command.getKey());
return commandsFactory.buildTxReadOnlyKeyCommand(command.getKey(), command.getFunction(), mutations, command.getSegment(),
command.getParams(), command.getKeyDataConversion(), command.getValueDataConversion());
}
@Override
protected <C extends FlagAffectedCommand & TopologyAffectedCommand> CompletionStage<Void> remoteGetSingleKey(
InvocationContext ctx, C command, Object key, boolean isWrite) {
CompletionStage<Void> cf = super.remoteGetSingleKey(ctx, command, key, isWrite);
// If the remoteGetSingleKey is executed on non-origin node, the mutations list already contains all modifications
// and we are just trying to execute all of them from EntryWrappingIntercepot$EntryWrappingVisitor
if (!ctx.isOriginLocal() || !ctx.isInTxScope()) {
return cf;
}
List<Mutation<Object, Object, ?>> mutationsOnKey = getMutationsOnKey((TxInvocationContext) ctx, command instanceof WriteCommand ? (WriteCommand) command : null, key);
if (mutationsOnKey.isEmpty()) {
return cf;
}
return cf.thenCompose(ignore -> {
// TODO Dan: apply the modifications before wrapping the entry in the context
CompletionStage<Void> stage = entryFactory.wrapEntryForWriting(ctx, key, SegmentSpecificCommand.extractSegment(command, key, keyPartitioner),
false, true, CompletableFutures.completedNull());
return stage.thenRun(() -> {
MVCCEntry cacheEntry = (MVCCEntry) ctx.lookupEntry(key);
for (Mutation mutation : mutationsOnKey) {
EntryView.ReadWriteEntryView readWriteEntryView =
EntryViews.readWrite(cacheEntry, mutation.keyDataConversion(), mutation.valueDataConversion());
mutation.apply(readWriteEntryView);
cacheEntry.updatePreviousValue();
}
});
});
}
@Override
protected void handleRemotelyRetrievedKeys(InvocationContext ctx, WriteCommand appliedCommand, List<?> remoteKeys) {
if (!ctx.isInTxScope()) {
return;
}
List<List<Mutation<Object, Object, ?>>> mutations = getMutations(ctx, appliedCommand, remoteKeys);
if (mutations == null || mutations.isEmpty()) {
return;
}
Iterator<?> keysIterator = remoteKeys.iterator();
Iterator<List<Mutation<Object, Object, ?>>> mutationsIterator = mutations.iterator();
for (; keysIterator.hasNext() && mutationsIterator.hasNext(); ) {
Object key = keysIterator.next();
CompletionStage<Void> stage = entryFactory.wrapEntryForWriting(ctx, key, keyPartitioner.getSegment(key), false, true, CompletableFutures.completedNull());
// We rely on the fact that when isOwner is false this never blocks
assert CompletionStages.isCompletedSuccessfully(stage);
MVCCEntry cacheEntry = (MVCCEntry) ctx.lookupEntry(key);
EntryView.ReadWriteEntryView readWriteEntryView = EntryViews.readWrite(cacheEntry, DataConversion.IDENTITY_KEY, DataConversion.IDENTITY_VALUE);
for (Mutation mutation : mutationsIterator.next()) {
mutation.apply(readWriteEntryView);
cacheEntry.updatePreviousValue();
}
}
assert !keysIterator.hasNext();
assert !mutationsIterator.hasNext();
}
private static List<Mutation<Object, Object, ?>> getMutationsOnKey(TxInvocationContext ctx, WriteCommand untilCommand, Object key) {
List<Mutation<Object, Object, ?>> mutations = new ArrayList<>();
// We don't use getAllModifications() because this goes remote and local mods should not affect it
for (WriteCommand write : ctx.getCacheTransaction().getModifications()) {
if (write == untilCommand) {
// We've reached this command in the modifications list; this happens when we're replaying a prepared
// transaction - see EntryWrappingInterceptor.wrapEntriesForPrepareAndApply
break;
}
if (write.getAffectedKeys().contains(key)) {
if (write instanceof FunctionalCommand) {
mutations.add(((FunctionalCommand) write).toMutation(key));
} else {
// Non-functional modification must have retrieved the value into context and we should not do any
// remote reads!
throw new IllegalStateException("Attempt to remote functional read after non-functional modification! " +
"key=" + key + ", modification=" + write);
}
}
}
return mutations;
}
private static List<List<Mutation<Object, Object,?>>> getMutations(InvocationContext ctx, WriteCommand untilCommand, List<?> keys) {
if (!ctx.isInTxScope()) {
return null;
}
log.tracef("Looking up mutations for %s", keys);
TxInvocationContext txCtx = (TxInvocationContext) ctx;
List<List<Mutation<Object, Object,?>>> mutations = new ArrayList<>(keys.size());
for (int i = keys.size(); i > 0; --i) mutations.add(Collections.emptyList());
for (WriteCommand write : txCtx.getCacheTransaction().getModifications()) {
if (write == untilCommand) {
// We've reached this command in the modifications list; this happens when we're replaying a prepared
// transaction - see EntryWrappingInterceptor.wrapEntriesForPrepareAndApply
break;
}
for (int i = 0; i < keys.size(); ++i) {
Object key = keys.get(i);
if (write.getAffectedKeys().contains(key)) {
if (write instanceof FunctionalCommand) {
List<Mutation<Object, Object,?>> list = mutations.get(i);
if (list.isEmpty()) {
list = new ArrayList<>();
mutations.set(i, list);
}
list.add(((FunctionalCommand) write).toMutation(key));
} else {
// Non-functional modification must have retrieved the value into context and we should not do any
// remote reads!
throw new IllegalStateException("Attempt to remote functional read after non-functional modification! " +
"key=" + key + ", modification=" + write);
}
}
}
}
return mutations;
}
private class TxReadOnlyManyHelper extends ReadOnlyManyHelper {
@Override
public ReadOnlyManyCommand copyForRemote(ReadOnlyManyCommand command, List<Object> keys, InvocationContext ctx) {
List<List<Mutation<Object, Object,?>>> mutations = getMutations(ctx, null, keys);
if (mutations == null) {
return new ReadOnlyManyCommand<>(command).withKeys(keys);
} else {
return new TxReadOnlyManyCommand(command, mutations).withKeys(keys);
}
}
}
private abstract class BaseFunctionalWriteHelper<C extends FunctionalCommand & WriteCommand> implements ReadManyCommandHelper<C> {
@Override
public Collection<?> keys(C command) {
return command.getAffectedKeys();
}
@Override
public ReadOnlyManyCommand<?, ?, ?> copyForRemote(C cmd, List<Object> keys, InvocationContext ctx) {
List<List<Mutation<Object, Object,?>>> mutations = getMutations(ctx, cmd, keys);
// write command is always executed in transactional scope
assert mutations != null;
for (int i = 0; i < keys.size(); ++i) {
List<Mutation<Object, Object,?>> list = mutations.get(i);
Mutation mutation = cmd.toMutation(keys.get(i));
if (list.isEmpty()) {
mutations.set(i, Collections.singletonList(mutation));
} else {
list.add(mutation);
}
}
return commandsFactory.buildTxReadOnlyManyCommand(keys, mutations, cmd.getParams(), cmd.getKeyDataConversion(), cmd.getValueDataConversion());
}
@Override
public void applyLocalResult(MergingCompletableFuture allFuture, Object rv) {
int pos = 0;
for (Object value : ((List) rv)) {
allFuture.results[pos++] = value;
}
}
@Override
public Object transformResult(Object[] results) {
return Arrays.asList(results);
}
@Override
public Object apply(InvocationContext rCtx, C rCommand, Object rv) throws Throwable {
return wrapFunctionalManyResultOnNonOrigin(rCtx, rCommand.getAffectedKeys(), ((List) rv).toArray());
}
@Override
public CompletionStage<Void> fetchRequiredKeys(LocalizedCacheTopology cacheTopology, Map<Address, List<Object>> requestedKeys, List<Object> availableKeys, InvocationContext ctx, C command) {
List<Object> fetchedKeys = null;
for (Map.Entry<Address, List<Object>> addressKeys : requestedKeys.entrySet()) {
for (Iterator<Object> iterator = addressKeys.getValue().iterator(); iterator.hasNext(); ) {
Object key = iterator.next();
if (cacheTopology.getDistribution(key).isWriteOwner()) {
iterator.remove();
availableKeys.add(key);
if (fetchedKeys == null) {
fetchedKeys = new ArrayList<>();
}
fetchedKeys.add(key);
}
}
}
if (fetchedKeys != null) {
return remoteGetMany(ctx, command, fetchedKeys);
} else {
return null;
}
}
}
private class ReadWriteManyHelper extends BaseFunctionalWriteHelper<ReadWriteManyCommand> {
@Override
public ReadWriteManyCommand copyForLocal(ReadWriteManyCommand command, List<Object> keys) {
return new ReadWriteManyCommand(command).withKeys(keys);
}
}
private class ReadWriteManyEntriesHelper extends BaseFunctionalWriteHelper<ReadWriteManyEntriesCommand> {
@Override
public ReadWriteManyEntriesCommand copyForLocal(ReadWriteManyEntriesCommand command, List<Object> keys) {
return new ReadWriteManyEntriesCommand(command).withArguments(filterEntries(command.getArguments(), keys));
}
private <K, V> Map<K, V> filterEntries(Map<K, V> originalEntries, List<K> keys) {
Map<K, V> entries = new HashMap<>(keys.size());
for (K key : keys) {
entries.put(key, originalEntries.get(key));
}
return entries;
}
}
}
| 39,371
| 49.868217
| 196
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/NonTxDistributionInterceptor.java
|
package org.infinispan.interceptors.distribution;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.BiConsumer;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.interceptors.InvocationFinallyAction;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.remoting.transport.impl.SingletonMapResponseCollector;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.util.CacheTopologyUtil;
/**
* Non-transactional interceptor used by distributed caches that support concurrent writes.
* It is implemented based on lock forwarding. E.g.
* - 'k' is written on node A, owners(k)={B,C}
* - A forwards the given command to B
* - B acquires a lock on 'k' then it forwards it to the remaining owners: C
* - C applies the change and returns to B (no lock acquisition is needed)
* - B applies the result as well, releases the lock and returns the result of the operation to A.
* <p>
* Note that even though this introduces an additional RPC (the forwarding), it behaves very well in
* conjunction with
* consistent-hash aware hotrod clients which connect directly to the lock owner.
*
* @author Mircea Markus
* @author Dan Berindei
* @since 8.1
*/
public class NonTxDistributionInterceptor extends BaseDistributionInterceptor {
private final PutMapHelper putMapHelper = new PutMapHelper(this::createRemoteCallback);
private final ReadWriteManyHelper readWriteManyHelper = new ReadWriteManyHelper(this::createRemoteCallback);
private final ReadWriteManyEntriesHelper readWriteManyEntriesHelper = new ReadWriteManyEntriesHelper(this::createRemoteCallback);
private final WriteOnlyManyEntriesHelper writeOnlyManyEntriesHelper = new WriteOnlyManyEntriesHelper(this::createRemoteCallback);
private final WriteOnlyManyHelper writeOnlyManyHelper = new WriteOnlyManyHelper(this::createRemoteCallback);
private Map<Address, IntSet> primaryOwnersOfSegments(ConsistentHash ch) {
Map<Address, IntSet> map = new HashMap<>(ch.getMembers().size());
for (Address member : ch.getMembers()) {
Set<Integer> segments = ch.getPrimarySegmentsForOwner(member);
if (!segments.isEmpty()) {
map.put(member, IntSets.from(segments));
}
}
return map;
}
// we're assuming that this function runs on the primary owner of the given segments
private Map<Address, IntSet> backupOwnersOfSegments(LocalizedCacheTopology topology, IntSet segments) {
Map<Address, IntSet> map = new HashMap<>(topology.getMembers().size() * 3 / 2);
if (topology.getReadConsistentHash().isReplicated()) {
// Use writeOwners to exclude zero-capacity members
Collection<Address> writeOwners = topology.getSegmentDistribution(0).writeOwners();
for (Address writeOwner : writeOwners) {
if (!writeOwner.equals(topology.getLocalAddress())) {
map.put(writeOwner, segments);
}
}
} else {
int numSegments = topology.getNumSegments();
for (PrimitiveIterator.OfInt iter = segments.iterator(); iter.hasNext(); ) {
int segment = iter.nextInt();
Collection<Address> backupOwners = topology.getSegmentDistribution(segment).writeBackups();
for (Address backupOwner : backupOwners) {
if (!backupOwner.equals(topology.getLocalAddress())) {
map.computeIfAbsent(backupOwner, o -> IntSets.mutableEmptySet(numSegments)).set(segment);
}
}
}
}
return map;
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws
Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command)
throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command)
throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command)
throws Throwable {
return handleReadWriteManyCommand(ctx, command, putMapHelper);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx,
WriteOnlyManyEntriesCommand command) throws Throwable {
return handleWriteOnlyManyCommand(ctx, command, writeOnlyManyEntriesHelper);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx,
WriteOnlyManyCommand command) throws Throwable {
return handleWriteOnlyManyCommand(ctx, command, writeOnlyManyHelper);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx,
ReadWriteManyCommand command) throws Throwable {
return handleReadWriteManyCommand(ctx, command, readWriteManyHelper);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx,
ReadWriteManyEntriesCommand command) throws Throwable {
return handleReadWriteManyCommand(ctx, command, readWriteManyEntriesHelper);
}
private <C extends WriteCommand, Container, Item> Object handleWriteOnlyManyCommand(
InvocationContext ctx, C command, WriteManyCommandHelper<C, Container, Item> helper) throws Exception {
// TODO: due to possible repeating of the operation (after OutdatedTopologyException is thrown)
// it is possible that the function will be applied multiple times on some of the nodes.
// There is no general solution for this ATM; proper solution will probably record CommandInvocationId
// in the entry, and implement some housekeeping
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
ConsistentHash ch = cacheTopology.getWriteConsistentHash();
if (ctx.isOriginLocal()) {
Map<Address, IntSet> segmentMap = primaryOwnersOfSegments(ch);
CountDownCompletableFuture allFuture = new CountDownCompletableFuture(segmentMap.size());
// Go through all members, for this node invokeNext (if this node is an owner of some keys),
// for the others (that own some keys) issue a remote call.
// Everything is finished when allFuture is completed
for (Entry<Address, IntSet> pair : segmentMap.entrySet()) {
Address member = pair.getKey();
IntSet segments = pair.getValue();
handleSegmentsForWriteOnlyManyCommand(ctx, command, helper, allFuture, member, segments, cacheTopology);
}
return asyncValue(allFuture);
} else { // origin is not local
// check that we have all the data we need
return handleRemoteWriteOnlyManyCommand(ctx, command, helper);
}
}
private <C extends WriteCommand, Container, Item> void handleSegmentsForWriteOnlyManyCommand(
InvocationContext ctx, C command, WriteManyCommandHelper<C, Container, Item> helper,
CountDownCompletableFuture allFuture, Address member, IntSet segments, LocalizedCacheTopology topology) {
if (member.equals(rpcManager.getAddress())) {
Container myItems = filterAndWrap(ctx, command, segments, helper);
C localCommand = helper.copyForLocal(command, myItems);
localCommand.setTopologyId(command.getTopologyId());
// Local keys are backed up in the handler, and counters on allFuture are decremented when the backup
// calls complete.
invokeNextAndFinally(ctx, localCommand,
createLocalInvocationHandler(allFuture, segments, helper, (f, rv) -> {}, topology));
return;
}
C copy = helper.copyForPrimary(command, topology, segments);
copy.setTopologyId(command.getTopologyId());
int size = helper.getItems(copy).size();
if (size <= 0) {
allFuture.countDown();
return;
}
SingletonMapResponseCollector collector = SingletonMapResponseCollector.validOnly();
rpcManager.invokeCommand(member, copy, collector, rpcManager.getSyncRpcOptions())
.whenComplete((responseMap, throwable) -> {
if (throwable != null) {
allFuture.completeExceptionally(throwable);
} else {
// FIXME Dan: The response cannot be a CacheNotFoundResponse at this point
if (getSuccessfulResponseOrFail(responseMap, allFuture,
rsp -> allFuture.completeExceptionally(OutdatedTopologyException.RETRY_NEXT_TOPOLOGY)) == null) {
return;
}
allFuture.countDown();
}
});
}
private <C extends WriteCommand, Item> Object handleRemoteWriteOnlyManyCommand(
InvocationContext ctx, C command, WriteManyCommandHelper<C, ?, Item> helper) {
for (Object key : command.getAffectedKeys()) {
if (ctx.lookupEntry(key) == null) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
}
}
if (helper.shouldRegisterRemoteCallback(command)) {
return invokeNextThenApply(ctx, command, helper.getRemoteCallback());
} else {
return invokeNext(ctx, command);
}
}
private <C extends WriteCommand, Container, Item> Container filterAndWrap(
InvocationContext ctx, C command, IntSet segments,
WriteManyCommandHelper<C, Container, Item> helper) {
// Filter command keys/entries into the collection, and wrap null for those that are not in context yet
Container myItems = helper.newContainer();
for (Item item : helper.getItems(command)) {
Object key = helper.item2key(item);
if (segments.contains(keyPartitioner.getSegment(key))) {
helper.accumulate(myItems, item);
CacheEntry entry = ctx.lookupEntry(key);
if (entry == null) {
// executed only be write-only commands
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
}
}
}
return myItems;
}
protected <C extends WriteCommand, Container, Item> Object handleReadWriteManyCommand(
InvocationContext ctx, C command, WriteManyCommandHelper<C, Item, Container> helper) throws Exception {
// TODO: due to possible repeating of the operation (after OutdatedTopologyException is thrown)
// it is possible that the function will be applied multiple times on some of the nodes.
// There is no general solution for this ATM; proper solution will probably record CommandInvocationId
// in the entry, and implement some housekeeping
LocalizedCacheTopology topology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
ConsistentHash ch = topology.getWriteConsistentHash();
if (ctx.isOriginLocal()) {
Map<Address, IntSet> segmentMap = primaryOwnersOfSegments(ch);
Object[] results = null;
if (!command.hasAnyFlag(FlagBitSets.IGNORE_RETURN_VALUES)) {
results = new Object[helper.getItems(command).size()];
}
MergingCompletableFuture<Object> allFuture
= new MergingCompletableFuture<>(segmentMap.size(), results, helper::transformResult);
MutableInt offset = new MutableInt();
// Go through all members, for this node invokeNext (if this node is an owner of some keys),
// for the others (that own some keys) issue a remote call.
// Everything is finished when allFuture is completed
for (Entry<Address, IntSet> pair : segmentMap.entrySet()) {
Address member = pair.getKey();
IntSet segments = pair.getValue();
if (member.equals(rpcManager.getAddress())) {
handleLocalSegmentsForReadWriteManyCommand(ctx, command, helper, allFuture, offset, segments, topology);
} else {
handleRemoteSegmentsForReadWriteManyCommand(command, helper, allFuture, offset, member, segments, topology);
}
}
return asyncValue(allFuture);
} else { // origin is not local
return handleRemoteReadWriteManyCommand(ctx, command, helper);
}
}
private <C extends WriteCommand, Container, Item> void handleLocalSegmentsForReadWriteManyCommand(
InvocationContext ctx, C command, WriteManyCommandHelper<C, Container, Item> helper,
MergingCompletableFuture<Object> allFuture, MutableInt offset, IntSet segments,
LocalizedCacheTopology topology) {
Container myItems = helper.newContainer();
List<Object> remoteKeys = null;
// Filter command keys/entries into the collection, and record remote retrieval for those that are not
// in the context yet
for (Item item : helper.getItems(command)) {
Object key = helper.item2key(item);
if (segments.contains(keyPartitioner.getSegment(key))) {
helper.accumulate(myItems, item);
CacheEntry cacheEntry = ctx.lookupEntry(key);
if (cacheEntry == null) {
// this should be a rare situation, so we don't mind being a bit ineffective with the remote gets
if (command.hasAnyFlag(FlagBitSets.SKIP_REMOTE_LOOKUP | FlagBitSets.CACHE_MODE_LOCAL)) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
} else {
if (remoteKeys == null) {
remoteKeys = new ArrayList<>();
}
remoteKeys.add(key);
}
}
}
}
CompletionStage<Void> retrievals = remoteKeys != null ? remoteGetMany(ctx, command, remoteKeys) : null;
int size = helper.containerSize(myItems);
if (size == 0) {
allFuture.countDown();
return;
}
final int myOffset = offset.value;
offset.value += size;
C localCommand = helper.copyForLocal(command, myItems);
localCommand.setTopologyId(command.getTopologyId());
InvocationFinallyAction<C> handler =
createLocalInvocationHandler(allFuture, segments, helper, MergingCompletableFuture.moveListItemsToFuture(myOffset), topology);
// It's safe to ignore the invocation stages below, because handleRemoteSegmentsForReadWriteManyCommand
// does not touch the context.
if (retrievals == null) {
invokeNextAndFinally(ctx, localCommand, handler);
} else {
// We must wait until all retrievals finish before proceeding with the local command
Object result = asyncInvokeNext(ctx, command, retrievals);
makeStage(result).andFinally(ctx, command, handler);
}
// Local keys are backed up in the handler, and counters on allFuture are decremented when the backup
// calls complete.
}
private <C extends WriteCommand, Item> void handleRemoteSegmentsForReadWriteManyCommand(
C command, WriteManyCommandHelper<C, ?, Item> helper, MergingCompletableFuture<Object> allFuture,
MutableInt offset, Address member, IntSet segments, LocalizedCacheTopology topology) {
final int myOffset = offset.value;
// TODO: here we iterate through all entries - is the ReadOnlySegmentAwareMap really worth it?
C copy = helper.copyForPrimary(command, topology, segments);
copy.setTopologyId(command.getTopologyId());
int size = helper.getItems(copy).size();
offset.value += size;
if (size <= 0) {
allFuture.countDown();
return;
}
// Send the command to primary owner
SingletonMapResponseCollector collector = SingletonMapResponseCollector.validOnly();
rpcManager.invokeCommand(member, copy, collector, rpcManager.getSyncRpcOptions())
.whenComplete((responses, throwable) -> {
if (throwable != null) {
allFuture.completeExceptionally(throwable);
} else {
// FIXME Dan: The response cannot be a CacheNotFoundResponse at this point
SuccessfulResponse response = getSuccessfulResponseOrFail(responses, allFuture,
rsp -> allFuture.completeExceptionally(OutdatedTopologyException.RETRY_NEXT_TOPOLOGY));
if (response == null) {
return;
}
Object responseValue = response.getResponseValue();
MergingCompletableFuture.moveListItemsToFuture(responseValue, allFuture, myOffset);
allFuture.countDown();
}
});
}
private <C extends WriteCommand, Item> Object handleRemoteReadWriteManyCommand(
InvocationContext ctx, C command, WriteManyCommandHelper<C, ?, Item> helper) throws Exception {
List<Object> remoteKeys = null;
// check that we have all the data we need
for (Object key : command.getAffectedKeys()) {
CacheEntry cacheEntry = ctx.lookupEntry(key);
if (cacheEntry == null) {
// this should be a rare situation, so we don't mind being a bit ineffective with the remote gets
if (command.hasAnyFlag(FlagBitSets.SKIP_REMOTE_LOOKUP | FlagBitSets.CACHE_MODE_LOCAL)) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
} else {
if (remoteKeys == null) {
remoteKeys = new ArrayList<>();
}
remoteKeys.add(key);
}
}
}
Object result;
if (remoteKeys != null) {
result = asyncInvokeNext(ctx, command, remoteGetMany(ctx, command, remoteKeys));
} else {
result = invokeNext(ctx, command);
}
if (helper.shouldRegisterRemoteCallback(command)) {
return makeStage(result).thenApply(ctx, command, helper.getRemoteCallback());
} else {
return result;
}
}
private <C extends WriteCommand, F extends CountDownCompletableFuture, Item>
InvocationFinallyAction<C> createLocalInvocationHandler(
F allFuture, IntSet segments, WriteManyCommandHelper<C, ?, Item> helper,
BiConsumer<F, Object> returnValueConsumer, LocalizedCacheTopology topology) {
return (rCtx, rCommand, rv, throwable) -> {
if (throwable != null) {
allFuture.completeExceptionally(throwable);
} else try {
returnValueConsumer.accept(allFuture, rv);
Map<Address, IntSet> backupOwners = backupOwnersOfSegments(topology, segments);
for (Entry<Address, IntSet> backup : backupOwners.entrySet()) {
// rCommand is the original command
C backupCopy = helper.copyForBackup(rCommand, topology, backup.getKey(), backup.getValue());
backupCopy.setTopologyId(rCommand.getTopologyId());
if (helper.getItems(backupCopy).isEmpty()) continue;
Address backupOwner = backup.getKey();
if (isSynchronous(backupCopy)) {
allFuture.increment();
rpcManager.invokeCommand(backupOwner, backupCopy, SingleResponseCollector.validOnly(),
rpcManager.getSyncRpcOptions())
.whenComplete((response, remoteThrowable) -> {
if (remoteThrowable != null) {
allFuture.completeExceptionally(remoteThrowable);
} else {
allFuture.countDown();
}
});
} else {
rpcManager.sendTo(backupOwner, backupCopy, DeliverOrder.PER_SENDER);
}
}
allFuture.countDown();
} catch (Throwable t) {
allFuture.completeExceptionally(t);
}
};
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command)
throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command)
throws Throwable {
return handleNonTxWriteCommand(ctx, command);
}
private final static class MutableInt {
public int value;
}
private <C extends WriteCommand> Object writeManyRemoteCallback(WriteManyCommandHelper<C , ?, ?> helper,InvocationContext ctx, C command, Object rv) {
// The node running this method must be primary owner for all the command's keys
// Check that the command topology is actual, so we can assume that we really are primary owner
LocalizedCacheTopology topology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
Map<Address, IntSet> backups = backupOwnersOfSegments(topology, extractCommandSegments(command, topology));
if (backups.isEmpty()) {
return rv;
}
boolean isSync = isSynchronous(command);
CompletableFuture[] futures = isSync ? new CompletableFuture[backups.size()] : null;
int future = 0;
for (Entry<Address, IntSet> backup : backups.entrySet()) {
C copy = helper.copyForBackup(command, topology, backup.getKey(), backup.getValue());
copy.setTopologyId(command.getTopologyId());
Address backupOwner = backup.getKey();
if (isSync) {
futures[future++] = rpcManager
.invokeCommand(backupOwner, copy, SingleResponseCollector.validOnly(),
rpcManager.getSyncRpcOptions())
.toCompletableFuture();
} else {
rpcManager.sendTo(backupOwner, copy, DeliverOrder.PER_SENDER);
}
}
return isSync ? asyncValue(CompletableFuture.allOf(futures).thenApply(nil -> rv)) : rv;
}
private <C extends WriteCommand> IntSet extractCommandSegments(C command, LocalizedCacheTopology topology) {
IntSet keySegments = IntSets.mutableEmptySet(topology.getNumSegments());
for (Object key : command.getAffectedKeys()) {
keySegments.set(keyPartitioner.getSegment(key));
}
return keySegments;
}
private <C extends WriteCommand> InvocationSuccessFunction createRemoteCallback(WriteManyCommandHelper<C, ?, ?> helper) {
return (ctx, command, rv) -> writeManyRemoteCallback(helper, ctx, (C) command, rv);
}
}
| 25,435
| 46.722326
| 153
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/L1LastChanceInterceptor.java
|
package org.infinispan.interceptors.distribution;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.L1Manager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.interceptors.impl.BaseRpcInterceptor;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* L1 based interceptor that flushes the L1 cache at the end after a transaction/entry is committed to the data
* container but before the lock has been released. This is here to asynchronously clear any L1 cached values that were
* retrieved between when the data was updated, causing a L1 invalidation, and when the data was put into the data
* container
*
* @author wburns
*/
public class L1LastChanceInterceptor extends BaseRpcInterceptor {
private static final Log log = LogFactory.getLog(L1LastChanceInterceptor.class);
@Inject L1Manager l1Manager;
@Inject ClusteringDependentLogic cdl;
private final InvocationSuccessFunction<DataWriteCommand> handleDataWriteCommandEntryInL1 = this::handleDataWriteCommandEntryInL1;
private final InvocationSuccessFunction<DataWriteCommand> handleDataWriteCommandEntryNotInL1 = this::handleDataWriteCommandEntryNotInL1;
private final InvocationSuccessFunction<WriteCommand> handleWriteManyCommand = this::handleWriteManyCommand;
private final InvocationSuccessFunction<PrepareCommand> handlePrepareCommand = this::handlePrepareCommand;
private final InvocationSuccessFunction<CommitCommand> handleCommitCommand = this::handleCommitCommand;
private boolean nonTransactional;
@Start
public void start() {
nonTransactional = !cacheConfiguration.transaction().transactionMode().isTransactional();
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, true);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, true);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, true);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, false);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, false);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, false);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, false);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, false);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, false);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, handleWriteManyCommand);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command, false);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, handleWriteManyCommand);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, handleWriteManyCommand);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, handleWriteManyCommand);
}
public Object visitDataWriteCommand(InvocationContext ctx, DataWriteCommand command, boolean assumeOriginKeptEntryInL1) throws Throwable {
return invokeNextThenApply(ctx, command, assumeOriginKeptEntryInL1 ? handleDataWriteCommandEntryInL1 : handleDataWriteCommandEntryNotInL1);
}
private Object handleDataWriteCommand(InvocationContext rCtx, DataWriteCommand writeCommand, Object rv, boolean assumeOriginKeptEntryInL1) {
Object key;
Object key1 = (key = writeCommand.getKey());
if (shouldUpdateOnWriteCommand(writeCommand) && writeCommand.isSuccessful() &&
cdl.getCacheTopology().isWriteOwner(key1)) {
if (log.isTraceEnabled()) {
log.trace("Sending additional invalidation for requestors if necessary.");
}
// Send out a last attempt L1 invalidation in case if someone cached the L1
// value after they already received an invalidation
CompletableFuture<?> f = l1Manager.flushCache(Collections.singleton(key), rCtx.getOrigin(), assumeOriginKeptEntryInL1);
return asyncReturnValue(f, rv);
}
return rv;
}
private Object handleDataWriteCommandEntryInL1(InvocationContext rCtx, DataWriteCommand rCommand, Object rv) {
return handleDataWriteCommand(rCtx, rCommand, rv, true);
}
private Object handleDataWriteCommandEntryNotInL1(InvocationContext rCtx, DataWriteCommand rCommand, Object rv) {
return handleDataWriteCommand(rCtx, rCommand, rv, false);
}
private Object asyncReturnValue(CompletableFuture<?> f, Object rv) {
if (f == null || f.isDone()) {
return rv;
}
return asyncValue(f.handle((nil, throwable) -> {
if (throwable != null) {
getLog().failedInvalidatingRemoteCache(throwable);
throw CompletableFutures.asCompletionException(throwable);
}
return rv;
}));
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, handleWriteManyCommand);
}
private Object handleWriteManyCommand(InvocationContext rCtx, WriteCommand command, Object rv) {
if (shouldUpdateOnWriteCommand(command)) {
Collection<?> keys = command.getAffectedKeys();
Set<Object> toInvalidate = new HashSet<>(keys.size());
for (Object k : keys) {
if (cdl.getCacheTopology().isWriteOwner(k)) {
toInvalidate.add(k);
}
}
if (!toInvalidate.isEmpty()) {
if (log.isTraceEnabled()) {
log.trace("Sending additional invalidation for requestors if necessary.");
}
CompletableFuture<?> f = l1Manager.flushCache(toInvalidate, rCtx.getOrigin(), true);
return asyncReturnValue(f, rv);
}
}
return rv;
}
private boolean shouldUpdateOnWriteCommand(WriteCommand command) {
return nonTransactional && !command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL);
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, handlePrepareCommand);
}
private Object handlePrepareCommand(InvocationContext rCtx, PrepareCommand rCommand, Object rv) {
if (rCommand.isOnePhaseCommit()) {
CompletableFuture<?> f = handleLastChanceL1InvalidationOnCommit(((TxInvocationContext<?>) rCtx));
return asyncReturnValue(f, rv);
}
return rv;
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, handleCommitCommand);
}
private Object handleCommitCommand(InvocationContext rCtx, CommitCommand rCommand, Object rv) {
CompletableFuture<?> f = handleLastChanceL1InvalidationOnCommit((TxInvocationContext<?>) rCtx);
return asyncReturnValue(f, rv);
}
private CompletableFuture<?> handleLastChanceL1InvalidationOnCommit(TxInvocationContext<?> ctx) {
if (shouldFlushL1(ctx)) {
if (log.isTraceEnabled()) {
log.tracef("Sending additional invalidation for requestors if necessary.");
}
return l1Manager.flushCache(ctx.getAffectedKeys(), ctx.getOrigin(), true);
}
return null;
}
private boolean shouldFlushL1(TxInvocationContext ctx) {
return !ctx.getAffectedKeys().isEmpty();
}
@Override
protected Log getLog() {
return log;
}
}
| 10,678
| 41.545817
| 145
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/RemoteGetSingleKeyCollector.java
|
package org.infinispan.interceptors.distribution;
import static org.infinispan.util.logging.Log.CLUSTER;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.UnsureResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.ResponseCollectors;
import org.infinispan.statetransfer.OutdatedTopologyException;
/**
* Return the first successful response for a staggered remote get, used in dist mode.
*
* Throw an {@link OutdatedTopologyException} if all responses are either {@link UnsureResponse} or
* {@link CacheNotFoundResponse}.
* Throw an exception immediately if a response is exceptional or unexpected.
*
* @author Dan Berindei
* @since 9.4
*/
public class RemoteGetSingleKeyCollector implements ResponseCollector<SuccessfulResponse> {
private boolean hasSuspectResponse;
@Override
public SuccessfulResponse addResponse(Address sender, Response response) {
if (response.isSuccessful()) {
return (SuccessfulResponse) response;
}
if (response instanceof ExceptionResponse) {
throw ResponseCollectors.wrapRemoteException(sender, ((ExceptionResponse) response).getException());
}
if (response instanceof UnsureResponse) {
return null;
} else if (response instanceof CacheNotFoundResponse) {
hasSuspectResponse = true;
return null;
}
throw CLUSTER.unexpectedResponse(sender, response);
}
@Override
public SuccessfulResponse finish() {
// We got UnsureResponse or CacheNotFoundResponse from all the targets: all of them either have a newer
// topology or are no longer in the cluster.
if (hasSuspectResponse) {
// We got at least one CacheNotFoundResponses, but we don't give up because write owners might have a copy.
// Wait for a new topology to avoid an infinite loop.
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
} else {
// Safe to retry in the same topology without an infinite loop, see the javadoc.
throw OutdatedTopologyException.RETRY_SAME_TOPOLOGY;
}
}
}
| 2,403
| 39.066667
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/L1WriteSynchronizer.java
|
package org.infinispan.interceptors.distribution;
import static org.infinispan.commons.util.Util.toStr;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.L1Metadata;
import org.infinispan.statetransfer.StateTransferLock;
import org.jboss.logging.Logger;
/**
* A write synchronizer that allows for a single thread to run the L1 update while others can block until it is
* completed. Also allows for someone to attempt to cancel the write to the L1. If they are unable to, they should
* really wait until the L1 write has completed so they can guarantee their update will be ordered properly.
*
* @author wburns
* @since 6.0
*/
public class L1WriteSynchronizer {
private static final Logger log = Logger.getLogger(L1WriteSynchronizer.class);
private final L1WriteSync sync = new L1WriteSync();
private final long l1Lifespan;
private final InternalDataContainer<Object, Object> dc;
private final StateTransferLock stateTransferLock;
private final ClusteringDependentLogic cdl;
public L1WriteSynchronizer(InternalDataContainer dc, long l1Lifespan, StateTransferLock stateTransferLock,
ClusteringDependentLogic cdl) {
//noinspection unchecked
this.dc = dc;
this.l1Lifespan = l1Lifespan;
this.stateTransferLock = stateTransferLock;
this.cdl = cdl;
}
private static class L1WriteSync extends AbstractQueuedSynchronizer {
private static final int READY = 0;
private static final int RUNNING = 1;
private static final int SKIP = 2;
private static final int COMPLETED = 4;
private Object result;
private Throwable exception;
/**
* Implements AQS base acquire to succeed when completed
*/
protected int tryAcquireShared(int ignore) {
return getState() == COMPLETED ? 1 : -1;
}
/**
* Implements AQS base release to always signal after setting
* value
*/
protected boolean tryReleaseShared(int ignore) {
return true;
}
/**
* Attempt to update the sync to signal that we want to update L1 with value
* @return whether it should continue with running L1 update
*/
boolean attemptUpdateToRunning() {
// Multiple invocations should say it is marked as running
if (getState() == RUNNING) {
return true;
}
return compareAndSetState(READY, RUNNING);
}
/**
* Attempt to update the sync to signal that we want to cancel the L1 update
* @return whether the L1 run was skipped
*/
boolean attemptToSkipFullRun() {
// Multiple invocations should say it skipped
if (getState() == SKIP) {
return true;
}
return compareAndSetState(READY, SKIP);
}
Object innerGet() throws InterruptedException, ExecutionException {
acquireSharedInterruptibly(0);
if (exception != null) {
throw new ExecutionException(exception);
}
return result;
}
Object innerGet(long time, TimeUnit unit) throws InterruptedException, TimeoutException, ExecutionException {
if (!tryAcquireSharedNanos(0, unit.toNanos(time))) {
throw new TimeoutException();
}
if (exception != null) {
throw new ExecutionException(exception);
}
return result;
}
void innerSet(Object value) {
// This should never have to loop, but just in case :P
for (;;) {
int s = getState();
if (s == COMPLETED) {
return;
}
if (compareAndSetState(s, COMPLETED)) {
result = value;
releaseShared(0);
return;
}
}
}
void innerException(Throwable t) {
// This should never have to loop, but just in case :P
for (;;) {
int s = getState();
if (s == COMPLETED) {
return;
}
if (compareAndSetState(s, COMPLETED)) {
exception = t;
releaseShared(0);
return;
}
}
}
}
public Object get() throws InterruptedException, ExecutionException {
return sync.innerGet();
}
public Object get(long time, TimeUnit unit) throws TimeoutException, InterruptedException, ExecutionException {
return sync.innerGet(time, unit);
}
/**
* Attempts to mark the L1 update to only retrieve the value and not to actually update the L1 cache.
* If the L1 skipping is not successful, that means it is currently running, which means for consistency
* any writes should wait until this update completes since the update doesn't acquire any locks
* @return Whether or not it was successful in skipping L1 update
*/
public boolean trySkipL1Update() {
return sync.attemptToSkipFullRun();
}
public void retrievalEncounteredException(Throwable t) {
sync.innerException(t);
}
/**
* Attempts to the L1 update and set the value. If the L1 update was marked as being skipped this will instead
* just set the value to release blockers.
* A null value can be provided which will not run the L1 update but will just alert other waiters that a null
* was given.
*/
public void runL1UpdateIfPossible(InternalCacheEntry ice) {
try {
if (ice != null) {
Object key;
if (sync.attemptUpdateToRunning() && !dc.containsKey((key = ice.getKey()))) {
runL1Update(key, ice);
}
}
}
finally {
sync.innerSet(ice);
}
}
private void runL1Update(Object key, InternalCacheEntry ice) {
// Acquire the transfer lock to ensure that we don't have a rehash and change to become an owner,
// note we check the ownership in following if
stateTransferLock.acquireSharedTopologyLock();
try {
// Now we can update the L1 if there isn't a value already there and we haven't now become a write
// owner
if (!dc.containsKey(key) && !cdl.getCacheTopology().isWriteOwner(key)) {
log.tracef("Caching remotely retrieved entry for key %s in L1", toStr(key));
long lifespan = ice.getLifespan() < 0 ? l1Lifespan : Math.min(ice.getLifespan(), l1Lifespan);
// Make a copy of the metadata stored internally, adjust
// lifespan/maxIdle settings and send them a modification
Metadata newMetadata = ice.getMetadata().builder()
.lifespan(lifespan).maxIdle(-1).build();
dc.put(key, ice.getValue(), new L1Metadata(newMetadata));
} else {
log.tracef("Data container contained value after rehash for key %s", key);
}
}
finally {
stateTransferLock.releaseSharedTopologyLock();
}
}
}
| 7,371
| 34.613527
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/TriangleDistributionInterceptor.java
|
package org.infinispan.interceptors.distribution;
import static org.infinispan.commands.VisitableCommand.LoadType.OWNER;
import static org.infinispan.commands.VisitableCommand.LoadType.PRIMARY;
import static org.infinispan.util.TriangleFunctionsUtil.filterBySegment;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.BiFunction;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.commands.CommandInvocationId;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.triangle.BackupNoopCommand;
import org.infinispan.commands.triangle.BackupWriteCommand;
import org.infinispan.commands.write.BackupAckCommand;
import org.infinispan.commands.write.BackupMultiKeyAckCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.util.InfinispanCollections;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.TriangleOrderManager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.interceptors.ExceptionSyncInvocationStage;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.UnsuccessfulResponse;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.statetransfer.StateTransferInterceptor;
import org.infinispan.util.CacheTopologyUtil;
import org.infinispan.util.TriangleFunctionsUtil;
import org.infinispan.util.concurrent.CommandAckCollector;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Non-transactional interceptor used by distributed caches that supports concurrent writes.
* <p>
* It is implemented based on the Triangle algorithm.
* <p>
* The {@link GetKeyValueCommand} reads the value locally if it is available (the node is an owner or the value is
* stored in L1). If it isn't available, a remote request is made. The {@link DataWriteCommand} is performed as follow:
* <ul> <li>The command if forwarded to the primary owner of the key.</li> <li>The primary owner locks the key and
* executes the operation; sends the {@link BackupWriteCommand} to the backup owners; releases the lock; sends the
* {@link SuccessfulResponse} or {@link UnsuccessfulResponse} back to the originator.</li>
* <li>The backup owner applies the update and sends a {@link
* BackupAckCommand} back to the originator.</li> <li>The originator collects the ack from all the owners and
* returns.</li> </ul> The {@link PutMapCommand} is performed in a similar way: <ul> <li>The subset of the map is split
* by primary owner.</li> <li>The primary owner locks the key and executes the command; splits the keys by backup owner
* and send them; and replies to the originator.</li> <li>The backup owner applies the update and sends back the {@link
* BackupMultiKeyAckCommand} to the originator.</li> <li>The originator collects all the acknowledges from all owners
* and returns.</li> </ul> The acknowledges management is done by the {@link CommandAckCollector}.
* <p>
* If a topology changes while a command is executed, an {@link OutdatedTopologyException} is thrown. The {@link
* StateTransferInterceptor} will catch it and retries the command.
* <p>
* TODO: finish the wiki page and add a link to it!
*
* @author Pedro Ruivo
* @since 9.0
*/
public class TriangleDistributionInterceptor extends BaseDistributionInterceptor {
private static final Log log = LogFactory.getLog(TriangleDistributionInterceptor.class);
@Inject CommandAckCollector commandAckCollector;
@Inject CommandsFactory commandsFactory;
@Inject TriangleOrderManager triangleOrderManager;
private Address localAddress;
@Start
public void start() {
localAddress = rpcManager.getAddress();
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
return handleSingleKeyWriteCommand(ctx,command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command)
throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command)
throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) throws Throwable {
return handleSingleKeyWriteCommand(ctx, command, TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
return ctx.isOriginLocal() ?
handleLocalManyKeysCommand(ctx, command,
TriangleFunctionsUtil::copy,
TriangleFunctionsUtil::mergeHashMap,
HashMap::new,
TriangleFunctionsUtil::backupFrom) :
handleRemoteManyKeysCommand(ctx, command,
PutMapCommand::isForwarded,
TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) {
return ctx.isOriginLocal() ?
handleLocalManyKeysCommand(ctx, command,
TriangleFunctionsUtil::copy,
TriangleFunctionsUtil::voidMerge,
() -> null,
TriangleFunctionsUtil::backupFrom) :
handleRemoteManyKeysCommand(ctx, command,
WriteOnlyManyEntriesCommand::isForwarded,
TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) {
return ctx.isOriginLocal() ?
handleLocalManyKeysCommand(ctx, command,
TriangleFunctionsUtil::copy,
TriangleFunctionsUtil::voidMerge,
() -> null,
TriangleFunctionsUtil::backupFrom) :
handleRemoteManyKeysCommand(ctx, command,
WriteOnlyManyCommand::isForwarded,
TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) throws Throwable {
return ctx.isOriginLocal() ?
handleLocalManyKeysCommand(ctx, command,
TriangleFunctionsUtil::copy,
TriangleFunctionsUtil::mergeList,
LinkedList::new,
TriangleFunctionsUtil::backupFrom) :
handleRemoteManyKeysCommand(ctx, command,
ReadWriteManyCommand::isForwarded,
TriangleFunctionsUtil::backupFrom);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command)
throws Throwable {
return ctx.isOriginLocal() ?
handleLocalManyKeysCommand(ctx, command,
TriangleFunctionsUtil::copy,
TriangleFunctionsUtil::mergeList,
LinkedList::new,
TriangleFunctionsUtil::backupFrom) :
handleRemoteManyKeysCommand(ctx, command,
ReadWriteManyEntriesCommand::isForwarded,
TriangleFunctionsUtil::backupFrom);
}
private <R, C extends WriteCommand> Object handleLocalManyKeysCommand(InvocationContext ctx, C command,
SubsetCommandCopy<C> commandCopy,
MergeResults<R> mergeResults,
Supplier<R> emptyResult,
MultiKeyBackupBuilder<C> backupBuilder) {
//local command. we need to split by primary owner to send the command to them
final LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
final PrimaryOwnerClassifier filter = new PrimaryOwnerClassifier(cacheTopology, command.getAffectedKeys());
return isSynchronous(command) ?
syncLocalManyKeysWrite(ctx, command, cacheTopology, filter, commandCopy, mergeResults, emptyResult,
backupBuilder) :
asyncLocalManyKeysWrite(ctx, command, cacheTopology, filter, commandCopy, backupBuilder);
}
private <C extends WriteCommand> Object handleRemoteManyKeysCommand(InvocationContext ctx, C command,
Predicate<C> isBackup,
MultiKeyBackupBuilder<C> backupBuilder) {
return isBackup.test(command) ?
remoteBackupManyKeysWrite(ctx, command, InfinispanCollections.toObjectSet(command.getAffectedKeys())) :
remotePrimaryManyKeysWrite(ctx, command, InfinispanCollections.toObjectSet(command.getAffectedKeys()),
backupBuilder);
}
private <C extends WriteCommand> Object remoteBackupManyKeysWrite(InvocationContext ctx, C command,
Set<Object> keys) {
//backup & remote
final LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
return asyncInvokeNext(ctx, command,
checkRemoteGetIfNeeded(ctx, command, keys, cacheTopology, command.loadType() == OWNER));
}
private <C extends WriteCommand> Object remotePrimaryManyKeysWrite(InvocationContext ctx, C command,
Set<Object> keys,
MultiKeyBackupBuilder<C> backupBuilder) {
//primary owner & remote
final LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
//primary, we need to send the command to the backups ordered!
sendToBackups(command, keys, cacheTopology, backupBuilder);
return asyncInvokeNext(ctx, command,
checkRemoteGetIfNeeded(ctx, command, keys, cacheTopology, command.loadType() == OWNER));
}
private <R, C extends WriteCommand> Object syncLocalManyKeysWrite(InvocationContext ctx, C command,
LocalizedCacheTopology cacheTopology,
PrimaryOwnerClassifier filter,
SubsetCommandCopy<C> commandCopy,
MergeResults<R> mergeResults,
Supplier<R> emptyResult,
MultiKeyBackupBuilder<C> backupBuilder) {
//local & sync
final Set<Object> localKeys = filter.primaries.remove(localAddress);
Collector<R> collector = commandAckCollector.createSegmentBasedCollector(command.getCommandInvocationId().getId(),
filter.backups, command.getTopologyId());
CompletableFuture<R> localResult = new CompletableFuture<>();
try {
forwardToPrimaryOwners(command, filter, localResult, mergeResults, commandCopy)
.handle((result, throwable) -> {
if (throwable != null) {
collector.primaryException(throwable);
} else {
collector.primaryResult(result, true);
}
return null;
});
} catch (Throwable t) {
collector.primaryException(t);
}
if (localKeys != null) {
return makeStage(invokeNextWriteManyKeysInPrimary(ctx, command, localKeys, cacheTopology, commandCopy,
backupBuilder))
.andHandle(ctx, command, (rCtx, rCommand, rv, throwable) -> {
if (throwable != null) {
localResult.completeExceptionally(CompletableFutures.extractException(throwable));
} else {
//noinspection unchecked
localResult.complete((R) rv);
}
return asyncValue(collector.getFuture());
});
} else {
localResult.complete(command.hasAnyFlag(FlagBitSets.IGNORE_RETURN_VALUES) ? null : emptyResult.get());
return asyncValue(collector.getFuture());
}
}
private <C extends WriteCommand> Object asyncLocalManyKeysWrite(InvocationContext ctx, C command,
LocalizedCacheTopology cacheTopology,
PrimaryOwnerClassifier filter,
SubsetCommandCopy<C> commandCopy,
MultiKeyBackupBuilder<C> backupBuilder) {
//local & async
final Set<Object> localKeys = filter.primaries.remove(localAddress);
forwardToPrimaryOwners(command, filter, commandCopy);
return localKeys != null ?
invokeNextWriteManyKeysInPrimary(ctx, command, localKeys, cacheTopology, commandCopy, backupBuilder) :
null; //no local keys to handle
}
private <C extends WriteCommand> Object invokeNextWriteManyKeysInPrimary(InvocationContext ctx, C command,
Set<Object> keys,
LocalizedCacheTopology cacheTopology,
SubsetCommandCopy<C> commandCopy,
MultiKeyBackupBuilder<C> backupBuilder) {
try {
sendToBackups(command, keys, cacheTopology, backupBuilder);
final VisitableCommand.LoadType loadType = command.loadType();
C primaryCmd = commandCopy.copySubset(command, keys);
return asyncInvokeNext(ctx, primaryCmd,
checkRemoteGetIfNeeded(ctx, primaryCmd, keys, cacheTopology,
loadType == PRIMARY || loadType == OWNER));
} catch (Throwable t) {
// Wrap marshalling exception in an invocation stage
return new ExceptionSyncInvocationStage(t);
}
}
private <C extends WriteCommand> void sendToBackups(C command, Collection<Object> keysToSend,
LocalizedCacheTopology cacheTopology, MultiKeyBackupBuilder<C> backupBuilder) {
int topologyId = command.getTopologyId();
for (Map.Entry<Integer, Collection<Object>> entry : filterBySegment(cacheTopology, keysToSend).entrySet()) {
int segmentId = entry.getKey();
Collection<Address> backups = cacheTopology.getSegmentDistribution(segmentId).writeBackups();
if (backups.isEmpty()) {
// Only the primary owner. Other segments may have more than one owner, e.g. during rebalance.
continue;
}
long sequence = triangleOrderManager.next(segmentId, topologyId);
try {
BackupWriteCommand backupCommand = backupBuilder.build(commandsFactory, command, entry.getValue());
backupCommand.setSequence(sequence);
backupCommand.setSegmentId(segmentId);
if (log.isTraceEnabled()) {
log.tracef("Command %s got sequence %s for segment %s", command.getCommandInvocationId(), segmentId,
sequence);
}
rpcManager.sendToMany(backups, backupCommand, DeliverOrder.NONE);
} catch (Throwable t) {
sendBackupNoopCommand(command, backups, segmentId, sequence);
throw t;
}
}
}
private <C extends WriteCommand> void forwardToPrimaryOwners(C command, PrimaryOwnerClassifier splitter,
SubsetCommandCopy<C> commandCopy) {
for (Map.Entry<Address, Set<Object>> entry : splitter.primaries.entrySet()) {
C copy = commandCopy.copySubset(command, entry.getValue());
copy.setTopologyId(command.getTopologyId());
rpcManager.sendTo(entry.getKey(), copy, DeliverOrder.NONE);
}
}
private <R, C extends WriteCommand> CompletableFuture<R> forwardToPrimaryOwners(C command,
PrimaryOwnerClassifier splitter,
CompletableFuture<R> localResult,
MergeResults<R> mergeResults,
SubsetCommandCopy<C> commandCopy) {
CompletableFuture<R> future = localResult;
for (Map.Entry<Address, Set<Object>> entry : splitter.primaries.entrySet()) {
C copy = commandCopy.copySubset(command, entry.getValue());
copy.setTopologyId(command.getTopologyId());
CompletionStage<ValidResponse> remoteFuture = rpcManager.invokeCommand(entry.getKey(), copy,
SingleResponseCollector.validOnly(),
rpcManager.getSyncRpcOptions());
future = remoteFuture.toCompletableFuture().thenCombine(future, mergeResults);
}
return future;
}
private <C extends DataWriteCommand> Object handleSingleKeyWriteCommand(InvocationContext context, C command,
BackupBuilder<C> backupBuilder) {
assert !context.isInTxScope();
if (command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL)) {
//don't go through the triangle
return invokeNext(context, command);
}
LocalizedCacheTopology topology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
DistributionInfo distributionInfo = topology.getDistributionForSegment(command.getSegment());
if (distributionInfo.isPrimary()) {
assert context.lookupEntry(command.getKey()) != null;
return context.isOriginLocal() ?
localPrimaryOwnerWrite(context, command, distributionInfo, backupBuilder) :
remotePrimaryOwnerWrite(context, command, distributionInfo, backupBuilder);
} else if (distributionInfo.isWriteBackup()) {
return context.isOriginLocal() ?
localWriteInvocation(context, command, distributionInfo) :
remoteBackupOwnerWrite(context, command);
} else {
//always local!
assert context.isOriginLocal();
return localWriteInvocation(context, command, distributionInfo);
}
}
private Object remoteBackupOwnerWrite(InvocationContext context, DataWriteCommand command) {
CacheEntry entry = context.lookupEntry(command.getKey());
if (entry == null) {
if (command.loadType() == OWNER) {
return asyncInvokeNext(context, command, remoteGetSingleKey(context, command, command.getKey(), true));
}
entryFactory.wrapExternalEntry(context, command.getKey(), null, false, true);
}
return invokeNext(context, command);
}
private <C extends DataWriteCommand> Object localPrimaryOwnerWrite(InvocationContext context, C command,
DistributionInfo distributionInfo, BackupBuilder<C> backupBuilder) {
if (command.hasAnyFlag(FlagBitSets.COMMAND_RETRY)) {
command.setValueMatcher(command.getValueMatcher().matcherForRetry());
}
return invokeNextThenApply(context, command, (rCtx, rCommand, rv) -> {
//noinspection unchecked
final C dwCommand = (C) rCommand;
final CommandInvocationId id = dwCommand.getCommandInvocationId();
Collection<Address> backupOwners = distributionInfo.writeBackups();
if (!dwCommand.isSuccessful() || backupOwners.isEmpty()) {
if (log.isTraceEnabled()) {
log.tracef("Not sending command %s to backups", id);
}
return rv;
}
final int topologyId = dwCommand.getTopologyId();
final boolean sync = isSynchronous(dwCommand);
if (sync || dwCommand.isReturnValueExpected()) {
Collector<Object> collector = commandAckCollector.create(id.getId(),
sync ? backupOwners : Collections.emptyList(),
topologyId);
try {
//check the topology after registering the collector.
//if we don't, the collector may wait forever (==timeout) for non-existing acknowledges.
checkTopologyId(topologyId, collector);
sendToBackups(distributionInfo.segmentId(), dwCommand, backupOwners, backupBuilder);
collector.primaryResult(rv, true);
} catch (Throwable t) {
collector.primaryException(t);
}
return asyncValue(collector.getFuture());
} else {
sendToBackups(distributionInfo.segmentId(), dwCommand, backupOwners, backupBuilder);
return rv;
}
});
}
private void sendBackupNoopCommand(WriteCommand command, Collection<Address> targets, int segment, long sequence) {
BackupNoopCommand noopCommand = commandsFactory.buildBackupNoopCommand();
noopCommand.setWriteCommand(command);
noopCommand.setSegmentId(segment);
noopCommand.setSequence(sequence);
rpcManager.sendToMany(targets, noopCommand, DeliverOrder.NONE);
}
private <C extends DataWriteCommand> Object remotePrimaryOwnerWrite(InvocationContext context, C command,
final DistributionInfo distributionInfo, BackupBuilder<C> backupBuilder) {
//we are the primary owner. we need to execute the command, check if successful, send to backups and reply to originator is needed.
if (command.hasAnyFlag(FlagBitSets.COMMAND_RETRY)) {
command.setValueMatcher(command.getValueMatcher().matcherForRetry());
}
return invokeNextThenApply(context, command, (rCtx, rCommand, rv) -> {
//noinspection unchecked
final C dwCommand = (C) rCommand;
final CommandInvocationId id = dwCommand.getCommandInvocationId();
Collection<Address> backupOwners = distributionInfo.writeBackups();
if (!dwCommand.isSuccessful() || backupOwners.isEmpty()) {
if (log.isTraceEnabled()) {
log.tracef("Command %s not successful in primary owner.", id);
}
return rv;
}
sendToBackups(distributionInfo.segmentId(), dwCommand, backupOwners, backupBuilder);
return rv;
});
}
private <C extends DataWriteCommand> void sendToBackups(int segmentId, C command, Collection<Address> backupOwners,
BackupBuilder<C> backupBuilder) {
CommandInvocationId id = command.getCommandInvocationId();
if (log.isTraceEnabled()) {
log.tracef("Command %s send to backup owner %s.", id, backupOwners);
}
long sequenceNumber = triangleOrderManager.next(segmentId, command.getTopologyId());
try {
BackupWriteCommand backupCommand = backupBuilder.build(commandsFactory, command);
backupCommand.setSequence(sequenceNumber);
backupCommand.setSegmentId(segmentId);
if (log.isTraceEnabled()) {
log.tracef("Command %s got sequence %s for segment %s", id, sequenceNumber, segmentId);
}
// TODO Should we use sendToAll in replicated mode?
// we must send the message only after the collector is registered in the map
rpcManager.sendToMany(backupOwners, backupCommand, DeliverOrder.NONE);
} catch (Throwable t) {
sendBackupNoopCommand(command, backupOwners, segmentId, sequenceNumber);
throw t;
}
}
private Object localWriteInvocation(InvocationContext context, DataWriteCommand command,
DistributionInfo distributionInfo) {
assert context.isOriginLocal();
final CommandInvocationId invocationId = command.getCommandInvocationId();
final boolean sync = isSynchronous(command);
if (sync || command.isReturnValueExpected() && !command.hasAnyFlag(FlagBitSets.PUT_FOR_EXTERNAL_READ)) {
final int topologyId = command.getTopologyId();
Collector<Object> collector = commandAckCollector.create(invocationId.getId(),
sync ? distributionInfo.writeBackups() : Collections.emptyList(),
topologyId);
try {
//check the topology after registering the collector.
//if we don't, the collector may wait forever (==timeout) for non-existing acknowledges.
checkTopologyId(topologyId, collector);
forwardToPrimary(command, distributionInfo, collector);
return asyncValue(collector.getFuture());
} catch (Throwable t) {
collector.primaryException(t);
throw t;
}
} else {
rpcManager.sendTo(distributionInfo.primary(), command, DeliverOrder.NONE);
return null;
}
}
private void forwardToPrimary(DataWriteCommand command, DistributionInfo distributionInfo,
Collector<Object> collector) {
CompletionStage<ValidResponse> remoteInvocation =
rpcManager.invokeCommand(distributionInfo.primary(), command, SingleResponseCollector.validOnly(),
rpcManager.getSyncRpcOptions());
remoteInvocation.handle((response, throwable) -> {
if (throwable != null) {
collector.primaryException(CompletableFutures.extractException(throwable));
} else {
if (!response.isSuccessful()) {
command.fail();
}
collector.primaryResult(response.getResponseValue(), response.isSuccessful());
}
return null;
});
}
private <C extends FlagAffectedCommand & TopologyAffectedCommand> CompletionStage<?> checkRemoteGetIfNeeded(
InvocationContext ctx, C command, Set<Object> keys, LocalizedCacheTopology cacheTopology,
boolean needsPreviousValue) {
List<Object> remoteKeys = null;
for (Object key : keys) {
CacheEntry cacheEntry = ctx.lookupEntry(key);
if (cacheEntry == null && cacheTopology.isWriteOwner(key)) {
if (!needsPreviousValue || command.hasAnyFlag(FlagBitSets.SKIP_REMOTE_LOOKUP | FlagBitSets.CACHE_MODE_LOCAL)) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
} else {
if (remoteKeys == null) {
remoteKeys = new ArrayList<>();
}
remoteKeys.add(key);
}
}
}
return remoteKeys != null ? remoteGetMany(ctx, command, remoteKeys) : CompletableFutures.completedNull();
}
private void checkTopologyId(int topologyId, Collector<?> collector) {
int currentTopologyId = distributionManager.getCacheTopology().getTopologyId();
if (currentTopologyId != topologyId && topologyId != -1) {
collector.primaryException(OutdatedTopologyException.RETRY_NEXT_TOPOLOGY);
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
}
private interface SubsetCommandCopy<T> {
T copySubset(T t, Collection<Object> keys);
}
private interface MergeResults<T> extends BiFunction<ValidResponse, T, T> {
}
private interface BackupBuilder<C> {
BackupWriteCommand build(CommandsFactory factory, C command);
}
private interface MultiKeyBackupBuilder<C> {
BackupWriteCommand build(CommandsFactory factory, C command, Collection<Object> keys);
}
/**
* Classifies the keys by primary owner (address => keys & segments) and backup owners (address => segments).
* <p>
* The first map is used to forward the command to the primary owner with the subset of keys.
* <p>
* The second map is used to initialize the {@link CommandAckCollector} to wait for the backups acknowledges.
*/
private static class PrimaryOwnerClassifier {
private final Map<Address, Collection<Integer>> backups;
private final Map<Address, Set<Object>> primaries;
private final LocalizedCacheTopology cacheTopology;
private final int entryCount;
private PrimaryOwnerClassifier(LocalizedCacheTopology cacheTopology, Collection<?> keys) {
this.cacheTopology = cacheTopology;
int memberSize = cacheTopology.getMembers().size();
this.backups = new HashMap<>(memberSize);
this.primaries = new HashMap<>(memberSize);
Set<Object> distinctKeys = new HashSet<>(keys);
this.entryCount = distinctKeys.size();
distinctKeys.forEach(this::check);
}
private void check(Object key) {
int segment = cacheTopology.getSegment(key);
DistributionInfo distributionInfo = cacheTopology.getDistributionForSegment(segment);
final Address primaryOwner = distributionInfo.primary();
primaries.computeIfAbsent(primaryOwner, address -> new HashSet<>(entryCount))
.add(key);
for (Address backup : distributionInfo.writeBackups()) {
backups.computeIfAbsent(backup, address -> new HashSet<>(entryCount)).add(segment);
}
}
}
}
| 31,528
| 47.0625
| 137
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/CountDownCompletableFuture.java
|
package org.infinispan.interceptors.distribution;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicInteger;
class CountDownCompletableFuture extends CompletableFuture<Object> {
protected final AtomicInteger counter;
public CountDownCompletableFuture(int participants) {
this.counter = new AtomicInteger(participants);
assert participants != 0;
}
public void countDown() {
if (counter.decrementAndGet() == 0) {
Object result = null;
try {
result = result();
} catch (Throwable t) {
completeExceptionally(t);
} finally {
// no-op when completed with exception
complete(result);
}
}
}
public void increment() {
int preValue = counter.getAndIncrement();
if (preValue == 0) {
throw new IllegalStateException();
}
}
protected Object result() {
return null;
}
}
| 977
| 24.076923
| 68
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/ConcurrentChangeException.java
|
package org.infinispan.interceptors.distribution;
import org.infinispan.commons.CacheException;
/**
* Thrown when the version of entry has changed between loading the entry to the context and committing new value.
* @see ScatteredDistributionInterceptor
*
* @author Radim Vansa <rvansa@redhat.com>
*/
public class ConcurrentChangeException extends CacheException {
/**
* Throwing this exception is cheaper because it does not fill in the stack trace.
*/
public ConcurrentChangeException() {
super(null, null, false, false);
}
}
| 565
| 27.3
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/PutMapHelper.java
|
package org.infinispan.interceptors.distribution;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Function;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.util.ReadOnlySegmentAwareMap;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.remoting.transport.Address;
class PutMapHelper extends WriteManyCommandHelper<PutMapCommand, Map<Object, Object>, Map.Entry<Object, Object>> {
PutMapHelper(Function<WriteManyCommandHelper<PutMapCommand, ?, ?>, InvocationSuccessFunction<PutMapCommand>> createRemoteCallback) {
super(createRemoteCallback);
}
@Override
public PutMapCommand copyForLocal(PutMapCommand cmd, Map<Object, Object> container) {
return new PutMapCommand(cmd).withMap(container);
}
@Override
public PutMapCommand copyForPrimary(PutMapCommand cmd, LocalizedCacheTopology topology, IntSet segments) {
return new PutMapCommand(cmd).withMap(new ReadOnlySegmentAwareMap<>(cmd.getMap(), topology, segments));
}
@Override
public PutMapCommand copyForBackup(PutMapCommand cmd, LocalizedCacheTopology topology,
Address target, IntSet segments) {
PutMapCommand copy = new PutMapCommand(cmd).withMap(new ReadOnlySegmentAwareMap(cmd.getMap(), topology, segments));
copy.setForwarded(true);
return copy;
}
@Override
public Collection<Map.Entry<Object, Object>> getItems(PutMapCommand cmd) {
return cmd.getMap().entrySet();
}
@Override
public Object item2key(Map.Entry<Object, Object> entry) {
return entry.getKey();
}
@Override
public Map<Object, Object> newContainer() {
return new LinkedHashMap<>();
}
@Override
public void accumulate(Map<Object, Object> map, Map.Entry<Object, Object> entry) {
map.put(entry.getKey(), entry.getValue());
}
@Override
public int containerSize(Map<Object, Object> map) {
return map.size();
}
@Override
public Iterable<Object> toKeys(Map<Object, Object> map) {
return map.keySet();
}
@Override
public boolean shouldRegisterRemoteCallback(PutMapCommand cmd) {
return !cmd.isForwarded();
}
@Override
public Object transformResult(Object[] results) {
if (results == null) return null;
Map<Object, Object> result = new HashMap<>();
for (Object r : results) {
Map.Entry<Object, Object> entry = (Map.Entry<Object, Object>) r;
result.put(entry.getKey(), entry.getValue());
}
return result;
}
}
| 2,760
| 31.482353
| 135
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/VersionedResult.java
|
package org.infinispan.interceptors.distribution;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.util.Util;
import org.infinispan.container.versioning.EntryVersion;
public class VersionedResult {
public final Object result;
public final EntryVersion version;
public VersionedResult(Object result, EntryVersion version) {
this.result = result;
this.version = version;
}
@Override
public String toString() {
return new StringBuilder("VersionedResult{").append(result).append(" (").append(version).append(")}").toString();
}
public static class Externalizer implements AdvancedExternalizer<VersionedResult> {
@Override
public Set<Class<? extends VersionedResult>> getTypeClasses() {
return Util.asSet(VersionedResult.class);
}
@Override
public Integer getId() {
return Ids.VERSIONED_RESULT;
}
@Override
public void writeObject(ObjectOutput output, VersionedResult object) throws IOException {
output.writeObject(object.result);
output.writeObject(object.version);
}
@Override
public VersionedResult readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new VersionedResult(input.readObject(), (EntryVersion) input.readObject());
}
}
}
| 1,526
| 29.54
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/BiasedCollector.java
|
package org.infinispan.interceptors.distribution;
import org.infinispan.remoting.RpcException;
import org.infinispan.remoting.responses.BiasRevocationResponse;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.ResponseCollectors;
public interface BiasedCollector extends Collector<ValidResponse>, ResponseCollector<ValidResponse> {
void addPendingAcks(boolean success, Address[] waitFor);
@Override
default ValidResponse addResponse(Address sender, Response response) {
if (response instanceof ValidResponse) {
if (response instanceof BiasRevocationResponse) {
addPendingAcks(response.isSuccessful(), ((BiasRevocationResponse) response).getWaitList());
}
ValidResponse valid = (ValidResponse) response;
primaryResult(valid, response.isSuccessful());
return valid;
} else if (response instanceof ExceptionResponse) {
primaryException(ResponseCollectors.wrapRemoteException(sender, ((ExceptionResponse) response).getException()));
} else if (response instanceof CacheNotFoundResponse) {
primaryException(ResponseCollectors.remoteNodeSuspected(sender));
} else {
primaryException(new RpcException("Unknown response type: " + response));
}
// There won't be any further targets so finish() will be called
return null;
}
@Override
default ValidResponse finish() {
return null;
}
}
| 1,765
| 42.073171
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/ReadWriteManyHelper.java
|
package org.infinispan.interceptors.distribution;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.function.Function;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.util.ReadOnlySegmentAwareCollection;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.remoting.transport.Address;
class ReadWriteManyHelper extends WriteManyCommandHelper<ReadWriteManyCommand, Collection<Object>, Object> {
ReadWriteManyHelper(Function<WriteManyCommandHelper<ReadWriteManyCommand, ?, ?>, InvocationSuccessFunction<ReadWriteManyCommand>> createRemoteCallback) {
super(createRemoteCallback);
}
@Override
public ReadWriteManyCommand copyForLocal(ReadWriteManyCommand cmd, Collection<Object> keys) {
return new ReadWriteManyCommand(cmd).withKeys(keys);
}
@Override
public ReadWriteManyCommand copyForPrimary(ReadWriteManyCommand cmd, LocalizedCacheTopology topology, IntSet segments) {
return new ReadWriteManyCommand(cmd).withKeys(new ReadOnlySegmentAwareCollection(cmd.getAffectedKeys(), topology, segments));
}
@Override
public ReadWriteManyCommand copyForBackup(ReadWriteManyCommand cmd, LocalizedCacheTopology topology,
Address target, IntSet segments) {
ReadWriteManyCommand copy = new ReadWriteManyCommand(cmd).withKeys(
new ReadOnlySegmentAwareCollection(cmd.getAffectedKeys(), topology, segments));
copy.setForwarded(true);
return copy;
}
@Override
public Collection<Object> getItems(ReadWriteManyCommand cmd) {
return cmd.getAffectedKeys();
}
@Override
public Object item2key(Object key) {
return key;
}
@Override
public Collection<Object> newContainer() {
return new ArrayList<>();
}
@Override
public void accumulate(Collection<Object> list, Object key) {
list.add(key);
}
@Override
public int containerSize(Collection<Object> list) {
return list.size();
}
@Override
public Iterable<Object> toKeys(Collection<Object> list) {
return list;
}
@Override
public boolean shouldRegisterRemoteCallback(ReadWriteManyCommand cmd) {
return !cmd.isForwarded();
}
@Override
public Object transformResult(Object[] results) {
return results == null ? null : Arrays.asList(results);
}
}
| 2,558
| 31.392405
| 156
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/Collector.java
|
package org.infinispan.interceptors.distribution;
import java.util.concurrent.CompletableFuture;
/**
* Represents the ack collector for a write operation in triangle algorithm.
*
* @author Pedro Ruivo
* @since 9.0
*/
public interface Collector<T> {
/**
* @return The {@link CompletableFuture} that will be completed when all the acks are received.
*/
CompletableFuture<T> getFuture();
/**
* The exception results of the primary owner.
*
* @param throwable the {@link Throwable} throw by the primary owner
*/
void primaryException(Throwable throwable);
/**
* The write operation's return value.
*
* @param result the operation's return value
* @param success {@code true} if it was successful, {@code false} otherwise (for conditional operations).
*/
void primaryResult(T result, boolean success);
}
| 873
| 24.705882
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/L1NonTxInterceptor.java
|
package org.infinispan.interceptors.distribution;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.DataCommand;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.read.AbstractDataCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.configuration.cache.ClusteringConfiguration;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.EntryFactory;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.L1Manager;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.interceptors.impl.BaseRpcInterceptor;
import org.infinispan.interceptors.impl.MultiSubCommandInvoker;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Interceptor that handles L1 logic for non-transactional caches.
*
* @author Mircea Markus
* @author William Burns
*/
public class L1NonTxInterceptor extends BaseRpcInterceptor {
private static final Log log = LogFactory.getLog(L1NonTxInterceptor.class);
@Inject protected L1Manager l1Manager;
@Inject protected ClusteringDependentLogic cdl;
@Inject protected EntryFactory entryFactory;
@Inject protected CommandsFactory commandsFactory;
@Inject protected InternalDataContainer dataContainer;
@Inject protected StateTransferLock stateTransferLock;
@Inject protected KeyPartitioner keyPartitioner;
private long l1Lifespan;
private long replicationTimeout;
/**
* This map holds all the current write synchronizers registered for a given key. This map is only added to when an
* operation is invoked that would cause a remote get to occur (which is controlled by whether or not the
* {@link L1NonTxInterceptor#skipL1Lookup(FlagAffectedCommand, Object)} method returns
* true. This map <b>MUST</b> have the value inserted removed in a finally block after the remote get is done to
* prevent reference leaks.
* <p>
* Having a value in this map allows for other concurrent operations that require a remote get to not have to
* actually perform a remote get as the first thread is doing this. So in this case any subsequent operations
* wanting the remote value can just call the
* {@link L1WriteSynchronizer#get()} method or one of it's overridden
* methods. Note the way to tell if another thread is performing the remote get is to use the
* {@link ConcurrentMap#putIfAbsent(Object, Object)} method and check if the return value is null or not.
* <p>
* Having a value in this map allows for a concurrent write or L1 invalidation to try to stop the synchronizer from
* updating the L1 value by invoking it's
* {@link L1WriteSynchronizer#trySkipL1Update()} method. If this method
* returns false, then the write or L1 invalidation <b>MUST</b> wait for the synchronizer to complete before
* continuing to ensure it is able to remove the newly cached L1 value as it is now invalid. This waiting should be
* done by calling {@link L1WriteSynchronizer#get()} method or one of it's
* overridden methods. Failure to wait for the update to occur could cause a L1 data inconsistency as the
* invalidation may not invalidate the new value.
*/
private final ConcurrentMap<Object, L1WriteSynchronizer> concurrentWrites = new ConcurrentHashMap<>();
@Start
public void start() {
l1Lifespan = cacheConfiguration.clustering().l1().lifespan();
replicationTimeout = cacheConfiguration.clustering().remoteTimeout();
cacheConfiguration.clustering()
.attributes().attribute(ClusteringConfiguration.REMOTE_TIMEOUT)
.addListener((a, ignored) -> {
replicationTimeout = a.get();
});
}
@Override
public final Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command)
throws Throwable {
return visitDataReadCommand(ctx, command, false);
}
@Override
public final Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command)
throws Throwable {
return visitDataReadCommand(ctx, command, true);
}
private Object visitDataReadCommand(InvocationContext ctx, AbstractDataCommand command,
boolean isEntry) throws Throwable {
return performCommandWithL1WriteIfAble(ctx, command, isEntry, false, true);
}
protected Object performCommandWithL1WriteIfAble(InvocationContext ctx, DataCommand command,
boolean isEntry, boolean shouldAlwaysRunNextInterceptor, boolean registerL1) throws Throwable {
if (ctx.isOriginLocal()) {
Object key = command.getKey();
// If the command isn't going to return a remote value - just pass it down the interceptor chain
if (!skipL1Lookup(command, key)) {
return performL1Lookup(ctx, command, shouldAlwaysRunNextInterceptor, key, isEntry);
}
} else {
// If this is a remote command, and we found a value in our cache
// we store it so that we can later invalidate it
if (registerL1) {
l1Manager.addRequestor(command.getKey(), ctx.getOrigin());
}
}
return invokeNext(ctx, command);
}
private Object performL1Lookup(InvocationContext ctx, VisitableCommand command,
boolean runInterceptorOnConflict, Object key, boolean isEntry) throws Throwable {
// Most times the putIfAbsent will be successful, so not doing a get first
L1WriteSynchronizer l1WriteSync = new L1WriteSynchronizer(dataContainer, l1Lifespan, stateTransferLock,
cdl);
L1WriteSynchronizer presentSync = concurrentWrites.putIfAbsent(key, l1WriteSync);
// If the sync was null that means we are the first to register for the given key. If not that means there is
// a concurrent request that also wants to do a remote get for the key. If there was another thread requesting
// the key we should wait until they get the value instead of doing another remote get.
if (presentSync == null) {
// Note this is the same synchronizer we just created that is registered with the L1Manager
l1Manager.registerL1WriteSynchronizer(key, l1WriteSync);
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
if (t != null) {
l1WriteSync.retrievalEncounteredException(t);
}
// TODO Do we need try/finally here?
l1Manager.unregisterL1WriteSynchronizer(key, l1WriteSync);
concurrentWrites.remove(key);
});
} else {
if (log.isTraceEnabled()) {
log.tracef("Found current request for key %s, waiting for their invocation's response", key);
}
Object returnValue;
try {
returnValue = presentSync.get(replicationTimeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// This should never be required since the status is always set in a try catch above - but IBM
// doesn't...
log.warnf("Synchronizer didn't return in %s milliseconds - running command normally!",
replicationTimeout);
// Always run next interceptor if a timeout occurs
return invokeNext(ctx, command);
} catch (ExecutionException e) {
throw e.getCause();
}
if (runInterceptorOnConflict) {
// The command needs to write something. Execute the rest of the invocation chain.
return invokeNext(ctx, command);
} else if (!isEntry && returnValue instanceof InternalCacheEntry) {
// The command is read-only, and we found the value in the L1 cache. Return it.
returnValue = ((InternalCacheEntry) returnValue).getValue();
}
return returnValue;
}
}
protected boolean skipL1Lookup(FlagAffectedCommand command, Object key) {
return command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL) || command.hasAnyFlag(FlagBitSets.SKIP_REMOTE_LOOKUP)
|| command.hasAnyFlag(FlagBitSets.IGNORE_RETURN_VALUES) || cdl.getCacheTopology().isWriteOwner(key)
|| dataContainer.containsKey(key);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command)
throws Throwable {
return handleDataWriteCommand(ctx, command, true);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, false);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, false);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, false);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, false);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, false);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, true);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, true);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return handleDataWriteCommand(ctx, command, true);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command);
}
private Object handleWriteManyCommand(InvocationContext ctx, WriteCommand command) {
Collection<?> keys = command.getAffectedKeys();
Set<Object> toInvalidate = new HashSet<>(keys.size());
for (Object k : keys) {
if (cdl.getCacheTopology().isWriteOwner(k)) {
toInvalidate.add(k);
}
}
CompletableFuture<?> invalidationFuture =
!toInvalidate.isEmpty() ? l1Manager.flushCache(toInvalidate, ctx.getOrigin(), true) : null;
//we also need to remove from L1 the keys that are not ours
Iterator<VisitableCommand> subCommands = keys.stream()
.filter(k -> !cdl.getCacheTopology().isWriteOwner(k))
// TODO: To be fixed in https://issues.redhat.com/browse/ISPN-11125
.map(k -> CompletionStages.join(removeFromL1Command(ctx, k, keyPartitioner.getSegment(k)))).iterator();
return invokeNextAndHandle(ctx, command, (InvocationContext rCtx, WriteCommand writeCommand, Object rv, Throwable ex) -> {
if (ex != null) {
if (mustSyncInvalidation(invalidationFuture, writeCommand)) {
return asyncValue(invalidationFuture).thenApply(rCtx, writeCommand, (rCtx1, rCommand1, rv1) -> {
throw ex;
});
}
throw ex;
} else {
if (mustSyncInvalidation(invalidationFuture, writeCommand)) {
return asyncValue(invalidationFuture).thenApply(null, null,
(rCtx2, rCommand2, rv2) -> MultiSubCommandInvoker.invokeEach(rCtx, subCommands, this, rv));
} else {
return MultiSubCommandInvoker.invokeEach(rCtx, subCommands, this, rv);
}
}
});
}
@Override
public Object visitInvalidateL1Command(InvocationContext ctx, InvalidateL1Command invalidateL1Command)
throws Throwable {
CompletableFuture<Void> initialStage = new CompletableFuture<>();
CompletionStage<Void> currentStage = initialStage;
for (Object key : invalidateL1Command.getKeys()) {
abortL1UpdateOrWait(key);
// If our invalidation was sent when the value wasn't yet cached but is still being requested the context
// may not have the value - if so we need to add it then now that we know we waited for the get response
// to complete
if (ctx.lookupEntry(key) == null) {
currentStage = entryFactory.wrapEntryForWriting(ctx, key, keyPartitioner.getSegment(key),
true, false, currentStage);
}
}
return asyncInvokeNext(ctx, invalidateL1Command, EntryFactory.expirationCheckDelay(currentStage, initialStage));
}
private void abortL1UpdateOrWait(Object key) {
L1WriteSynchronizer sync = concurrentWrites.remove(key);
if (sync != null) {
if (sync.trySkipL1Update()) {
if (log.isTraceEnabled()) {
log.tracef("Aborted possible L1 update due to concurrent invalidation for key %s", key);
}
} else {
if (log.isTraceEnabled()) {
log.tracef("L1 invalidation found a pending update for key %s - need to block until finished", key);
}
// We have to wait for the pending L1 update to complete before we can properly invalidate. Any additional
// gets that come in after this invalidation we ignore for now.
boolean success;
try {
sync.get();
success = true;
} catch (InterruptedException e) {
success = false;
// Save the interruption status, but don't throw an explicit exception
Thread.currentThread().interrupt();
}
catch (ExecutionException e) {
// We don't care what the L1 update exception was
success = false;
}
if (log.isTraceEnabled()) {
log.tracef("Pending L1 update completed successfully: %b - L1 invalidation can occur for key %s", success, key);
}
}
}
}
private Object handleDataWriteCommand(InvocationContext ctx, DataWriteCommand command,
boolean assumeOriginKeptEntryInL1) {
if (command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL)) {
if (log.isTraceEnabled()) {
log.tracef("local mode forced, suppressing L1 calls.");
}
return invokeNext(ctx, command);
}
CompletableFuture<?> l1InvalidationFuture = invalidateL1InCluster(ctx, command, assumeOriginKeptEntryInL1);
return invokeNextAndHandle(ctx, command, (InvocationContext rCtx, DataWriteCommand dataWriteCommand, Object rv, Throwable ex) -> {
if (ex != null) {
if (mustSyncInvalidation(l1InvalidationFuture, dataWriteCommand)) {
return asyncValue(l1InvalidationFuture).thenApply(rCtx, dataWriteCommand, (rCtx1, rCommand1, rv1) -> {
throw ex;
});
}
throw ex;
} else {
if (mustSyncInvalidation(l1InvalidationFuture, dataWriteCommand)) {
if (shouldRemoveFromLocalL1(rCtx, dataWriteCommand)) {
CompletionStage<VisitableCommand> removeFromL1CommandStage = removeFromL1Command(rCtx, dataWriteCommand.getKey(),
dataWriteCommand.getSegment());
// TODO: To be fixed in https://issues.redhat.com/browse/ISPN-11125
VisitableCommand removeFromL1Command = CompletionStages.join(removeFromL1CommandStage);
return makeStage(asyncInvokeNext(rCtx, removeFromL1Command, l1InvalidationFuture))
.thenApply(null, null, (rCtx2, rCommand2, rv2) -> rv);
} else {
return asyncValue(l1InvalidationFuture).thenApply(rCtx, dataWriteCommand, (rCtx1, rCommand1, rv1) -> rv);
}
} else if (shouldRemoveFromLocalL1(rCtx, dataWriteCommand)) {
CompletionStage<VisitableCommand> removeFromL1CommandStage = removeFromL1Command(rCtx, dataWriteCommand.getKey(),
dataWriteCommand.getSegment());
// TODO: To be fixed in https://issues.redhat.com/browse/ISPN-11125
VisitableCommand removeFromL1Command = CompletionStages.join(removeFromL1CommandStage);
return invokeNextThenApply(rCtx, removeFromL1Command, (rCtx2, rCommand2, rv2) -> rv);
} else if (log.isTraceEnabled()) {
log.trace("Allowing entry to commit as local node is owner");
}
}
return rv;
});
}
private boolean mustSyncInvalidation(CompletableFuture<?> invalidationFuture, WriteCommand writeCommand) {
return invalidationFuture != null && !invalidationFuture.isDone() && isSynchronous(writeCommand);
}
private boolean shouldRemoveFromLocalL1(InvocationContext ctx, DataWriteCommand command) {
return ctx.isOriginLocal() && !cdl.getCacheTopology().isWriteOwner(command.getKey());
}
private CompletionStage<VisitableCommand> removeFromL1Command(InvocationContext ctx, Object key, int segment) {
if (log.isTraceEnabled()) {
log.tracef("Removing entry from L1 for key %s", key);
}
abortL1UpdateOrWait(key);
ctx.removeLookedUpEntry(key);
CompletionStage<Void> stage = entryFactory.wrapEntryForWriting(ctx, key, segment, true, false, CompletableFutures.completedNull());
return stage.thenApply(ignore -> commandsFactory.buildInvalidateFromL1Command(EnumUtil.EMPTY_BIT_SET,
Collections.singleton(key)));
}
private CompletableFuture<?> invalidateL1InCluster(InvocationContext ctx, DataWriteCommand command, boolean assumeOriginKeptEntryInL1) {
CompletableFuture<?> l1InvalidationFuture = null;
if (cdl.getCacheTopology().isWriteOwner(command.getKey())) {
l1InvalidationFuture = l1Manager.flushCache(Collections.singletonList(command.getKey()), ctx.getOrigin(), assumeOriginKeptEntryInL1);
} else if (log.isTraceEnabled()) {
log.tracef("Not invalidating key '%s' as local node(%s) is not owner", command.getKey(), rpcManager.getAddress());
}
return l1InvalidationFuture;
}
@Override
protected Log getLog() {
return log;
}
}
| 21,520
| 47.580135
| 142
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/VersionedDistributionInterceptor.java
|
package org.infinispan.interceptors.distribution;
import static org.infinispan.transaction.impl.WriteSkewHelper.versionFromEntry;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.versioning.EntryVersion;
import org.infinispan.container.versioning.IncrementableEntryVersion;
import org.infinispan.container.versioning.InequalVersionComparisonResult;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.remoting.responses.PrepareResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.transaction.impl.AbstractCacheTransaction;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.xa.CacheTransaction;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A version of the {@link TxDistributionInterceptor} that adds logic to handling prepares when entries are
* versioned.
*
* @author Manik Surtani
* @author Dan Berindei
*/
public class VersionedDistributionInterceptor extends TxDistributionInterceptor {
private static final Log log = LogFactory.getLog(VersionedDistributionInterceptor.class);
@Inject VersionGenerator versionGenerator;
@Override
protected Log getLog() {
return log;
}
@Override
protected void wrapRemoteEntry(InvocationContext ctx, Object key, CacheEntry ice, boolean isWrite) {
if (ctx.isInTxScope()) {
AbstractCacheTransaction cacheTransaction = ((TxInvocationContext<?>) ctx).getCacheTransaction();
EntryVersion seenVersion = cacheTransaction.getVersionsRead().get(key);
if (seenVersion != null) {
IncrementableEntryVersion newVersion = versionFromEntry(ice);
if (newVersion == null) {
throw new IllegalStateException("Wrapping entry without version");
}
if (seenVersion.compareTo(newVersion) != InequalVersionComparisonResult.EQUAL) {
if (ctx.lookupEntry(key) == null) {
// We have read the entry using functional command on remote node and now we want
// the full entry, but we cannot provide the same version as the already read one.
throw CONTAINER.writeSkewOnRead(key, key, seenVersion, newVersion);
} else {
// We have retrieved remote entry despite being already wrapped: that can happen
// for GetKeysInGroupCommand which does not know what entries will it fetch.
// We can safely ignore the newly fetched value.
return;
}
}
}
}
super.wrapRemoteEntry(ctx, key, ice, isWrite);
}
@Override
protected Object wrapFunctionalResultOnNonOriginOnReturn(Object rv, CacheEntry entry) {
IncrementableEntryVersion version = versionFromEntry(entry);
return new VersionedResult(rv, version == null ? versionGenerator.nonExistingVersion() : version);
}
@Override
protected Object wrapFunctionalManyResultOnNonOrigin(InvocationContext ctx, Collection<?> keys, Object[] values) {
// note: this relies on the fact that keys are already ordered on remote node
EntryVersion[] versions = new EntryVersion[keys.size()];
int i = 0;
for (Object key : keys) {
IncrementableEntryVersion version = versionFromEntry(ctx.lookupEntry(key));
versions[i++] = version == null ? versionGenerator.nonExistingVersion() : version;
}
return new VersionedResults(values, versions);
}
@Override
protected Object[] unwrapFunctionalManyResultOnOrigin(InvocationContext ctx, List<Object> keys, Object responseValue) {
if (responseValue instanceof VersionedResults) {
VersionedResults vrs = (VersionedResults) responseValue;
if (ctx.isInTxScope()) {
AbstractCacheTransaction tx = ((TxInvocationContext<?>) ctx).getCacheTransaction();
for (int i = 0; i < vrs.versions.length; ++i) {
checkAndAddReadVersion(tx, keys.get(i), vrs.versions[i]);
}
}
return vrs.values;
} else {
return null;
}
}
@Override
protected Object unwrapFunctionalResultOnOrigin(InvocationContext ctx, Object key, Object responseValue) {
VersionedResult vr = (VersionedResult) responseValue;
// As an optimization, read-only single-key commands are executed in SingleKeyNonTxInvocationContext
if (ctx.isInTxScope()) {
AbstractCacheTransaction tx = ((TxInvocationContext) ctx).getCacheTransaction();
checkAndAddReadVersion(tx, key, vr.version);
}
return vr.result;
}
private void checkAndAddReadVersion(AbstractCacheTransaction tx, Object key, EntryVersion version) {
// TODO: should we check the write skew configuration here?
// TODO: version seen or looked up remote version?
EntryVersion lastVersionSeen = tx.getVersionsRead().get(key);
if (lastVersionSeen != null && lastVersionSeen.compareTo(version) != InequalVersionComparisonResult.EQUAL) {
throw CONTAINER.writeSkewOnRead(key, key, lastVersionSeen, version);
}
tx.addVersionRead(key, version);
}
@Override
protected CompletionStage<Object> prepareOnAffectedNodes(TxInvocationContext<?> ctx, PrepareCommand command, Collection<Address> recipients) {
CompletionStage<Map<Address, Response>> remoteInvocation;
if (recipients != null) {
MapResponseCollector collector = MapResponseCollector.ignoreLeavers(recipients.size());
remoteInvocation = rpcManager.invokeCommand(recipients, command, collector, rpcManager.getSyncRpcOptions());
} else {
MapResponseCollector collector = MapResponseCollector.ignoreLeavers();
remoteInvocation = rpcManager.invokeCommandOnAll(command, collector, rpcManager.getSyncRpcOptions());
}
return remoteInvocation.handle((responses, t) -> {
transactionRemotelyPrepared(ctx);
CompletableFutures.rethrowExceptionIfPresent(t);
PrepareResponse prepareResponse = new PrepareResponse();
checkTxCommandResponses(responses, command, (TxInvocationContext<LocalTransaction>) ctx, recipients,
prepareResponse);
// Now store newly generated versions from lock owners for use during the commit phase.
CacheTransaction ct = ctx.getCacheTransaction();
ct.setUpdatedEntryVersions(prepareResponse.mergeEntryVersions(ct.getUpdatedEntryVersions()));
return prepareResponse;
});
}
}
| 7,173
| 44.987179
| 145
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/WriteOnlyManyHelper.java
|
package org.infinispan.interceptors.distribution;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.function.Function;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.util.ReadOnlySegmentAwareCollection;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.remoting.transport.Address;
class WriteOnlyManyHelper extends WriteManyCommandHelper<WriteOnlyManyCommand, Collection<Object>, Object> {
WriteOnlyManyHelper(Function<WriteManyCommandHelper<WriteOnlyManyCommand, ?, ?>, InvocationSuccessFunction<WriteOnlyManyCommand>> createRemoteCallback) {
super(createRemoteCallback);
}
@Override
public WriteOnlyManyCommand copyForLocal(WriteOnlyManyCommand cmd, Collection<Object> keys) {
return new WriteOnlyManyCommand(cmd).withKeys(keys);
}
@Override
public WriteOnlyManyCommand copyForPrimary(WriteOnlyManyCommand cmd, LocalizedCacheTopology topology, IntSet segments) {
return new WriteOnlyManyCommand(cmd)
.withKeys(new ReadOnlySegmentAwareCollection(cmd.getAffectedKeys(), topology, segments));
}
@Override
public WriteOnlyManyCommand copyForBackup(WriteOnlyManyCommand cmd, LocalizedCacheTopology topology,
Address target, IntSet segments) {
WriteOnlyManyCommand copy = new WriteOnlyManyCommand(cmd)
.withKeys(new ReadOnlySegmentAwareCollection(cmd.getAffectedKeys(), topology, segments));
copy.setForwarded(true);
return copy;
}
@Override
public Collection<Object> getItems(WriteOnlyManyCommand cmd) {
return cmd.getAffectedKeys();
}
@Override
public Object item2key(Object key) {
return key;
}
@Override
public Collection<Object> newContainer() {
return new ArrayList<>();
}
@Override
public void accumulate(Collection<Object> list, Object key) {
list.add(key);
}
@Override
public int containerSize(Collection<Object> list) {
return list.size();
}
@Override
public Iterable<Object> toKeys(Collection<Object> list) {
return list;
}
@Override
public boolean shouldRegisterRemoteCallback(WriteOnlyManyCommand cmd) {
return !cmd.isForwarded();
}
@Override
public Object transformResult(Object[] results) {
return results == null ? null : Arrays.asList(results);
}
}
| 2,571
| 31.15
| 156
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/WriteOnlyManyEntriesHelper.java
|
package org.infinispan.interceptors.distribution;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.Function;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.util.ReadOnlySegmentAwareMap;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.remoting.transport.Address;
class WriteOnlyManyEntriesHelper extends WriteManyCommandHelper<WriteOnlyManyEntriesCommand, Map<Object, Object>, Map.Entry<Object, Object>> {
WriteOnlyManyEntriesHelper(Function<WriteManyCommandHelper<WriteOnlyManyEntriesCommand, ?, ?>, InvocationSuccessFunction<WriteOnlyManyEntriesCommand>> createRemoteCallback) {
super(createRemoteCallback);
}
@Override
public WriteOnlyManyEntriesCommand copyForLocal(WriteOnlyManyEntriesCommand cmd, Map<Object, Object> entries) {
return new WriteOnlyManyEntriesCommand(cmd).withArguments(entries);
}
@Override
public WriteOnlyManyEntriesCommand copyForPrimary(WriteOnlyManyEntriesCommand cmd, LocalizedCacheTopology topology, IntSet segments) {
return new WriteOnlyManyEntriesCommand(cmd)
.withArguments(new ReadOnlySegmentAwareMap<>(cmd.getArguments(), topology, segments));
}
@Override
public WriteOnlyManyEntriesCommand copyForBackup(WriteOnlyManyEntriesCommand cmd, LocalizedCacheTopology topology,
Address target, IntSet segments) {
WriteOnlyManyEntriesCommand copy = new WriteOnlyManyEntriesCommand(cmd)
.withArguments(new ReadOnlySegmentAwareMap(cmd.getArguments(), topology, segments));
copy.setForwarded(true);
return copy;
}
@Override
public Collection<Map.Entry<Object, Object>> getItems(WriteOnlyManyEntriesCommand cmd) {
return cmd.getArguments().entrySet();
}
@Override
public Object item2key(Map.Entry<Object, Object> entry) {
return entry.getKey();
}
@Override
public Map<Object, Object> newContainer() {
// Make sure the iteration in containers is ordered
return new LinkedHashMap<>();
}
@Override
public void accumulate(Map<Object, Object> map, Map.Entry<Object, Object> entry) {
map.put(entry.getKey(), entry.getValue());
}
@Override
public int containerSize(Map<Object, Object> map) {
return map.size();
}
@Override
public Iterable<Object> toKeys(Map<Object, Object> map) {
return map.keySet();
}
@Override
public boolean shouldRegisterRemoteCallback(WriteOnlyManyEntriesCommand cmd) {
return !cmd.isForwarded();
}
@Override
public Object transformResult(Object[] results) {
return results == null ? null : Arrays.asList(results);
}
}
| 2,925
| 34.682927
| 177
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/distribution/BaseDistributionInterceptor.java
|
package org.infinispan.interceptors.distribution;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.stream.Stream;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.SegmentSpecificCommand;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.read.AbstractDataCommand;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.remote.BaseClusteredReadCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commands.write.AbstractDataWriteCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.ValueMatcher;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ArrayCollector;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.entries.NullCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.RemoteValueRetrievedListener;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.expiration.impl.InternalExpirationManager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.interceptors.impl.ClusteringInterceptor;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.UnsureResponse;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.remoting.transport.impl.SingletonMapResponseCollector;
import org.infinispan.remoting.transport.impl.VoidResponseCollector;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.CacheTopologyUtil;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Base class for distribution of entries across a cluster.
*
* @author Manik Surtani
* @author Mircea.Markus@jboss.com
* @author Pete Muir
* @author Dan Berindei <dan@infinispan.org>
*/
public abstract class BaseDistributionInterceptor extends ClusteringInterceptor {
private static final Log log = LogFactory.getLog(BaseDistributionInterceptor.class);
private static final Object LOST_PLACEHOLDER = new Object();
@Inject protected RemoteValueRetrievedListener rvrl;
@Inject protected KeyPartitioner keyPartitioner;
@Inject protected TimeService timeService;
@Inject protected InternalExpirationManager<Object, Object> expirationManager;
protected boolean isL1Enabled;
protected boolean isReplicated;
private final ReadOnlyManyHelper readOnlyManyHelper = new ReadOnlyManyHelper();
private final InvocationSuccessFunction<AbstractDataWriteCommand> primaryReturnHandler = this::primaryReturnHandler;
@Override
protected Log getLog() {
return log;
}
@Start
public void configure() {
// Can't rely on the super injectConfiguration() to be called before our injectDependencies() method2
isL1Enabled = cacheConfiguration.clustering().l1().enabled();
isReplicated = cacheConfiguration.clustering().cacheMode().isReplicated();
}
@Override
public final Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
if (ctx.isOriginLocal() && !isLocalModeForced(command)) {
if (isSynchronous(command)) {
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
return asyncInvokeNext(ctx, command,
rpcManager.invokeCommandOnAll(command, MapResponseCollector.ignoreLeavers(),
rpcOptions));
} else {
rpcManager.sendToAll(command, DeliverOrder.PER_SENDER);
return invokeNext(ctx, command);
}
}
return invokeNext(ctx, command);
}
protected DistributionInfo retrieveDistributionInfo(LocalizedCacheTopology topology, ReplicableCommand command, Object key) {
return topology.getSegmentDistribution(SegmentSpecificCommand.extractSegment(command, key, keyPartitioner));
}
/**
* Fetch a key from its remote owners and store it in the context.
*
* <b>Not thread-safe</b>. The invocation context should not be accessed concurrently from multiple threads,
* so this method should only be used for single-key commands.
*/
protected <C extends FlagAffectedCommand & TopologyAffectedCommand> CompletionStage<Void> remoteGetSingleKey(
InvocationContext ctx, C command, Object key, boolean isWrite) {
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
int topologyId = cacheTopology.getTopologyId();
DistributionInfo info = retrieveDistributionInfo(cacheTopology, command, key);
if (info.isReadOwner()) {
if (log.isTraceEnabled()) {
log.tracef("Key %s became local after wrapping, retrying command. Command topology is %d, current topology is %d",
key, command.getTopologyId(), topologyId);
}
// The topology has changed between EWI and BDI, let's retry
if (command.getTopologyId() == topologyId) {
throw new IllegalStateException();
}
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
}
if (log.isTraceEnabled()) {
log.tracef("Perform remote get for key %s. currentTopologyId=%s, owners=%s",
key, topologyId, info.readOwners());
}
ClusteredGetCommand getCommand = cf.buildClusteredGetCommand(key, info.segmentId(), command.getFlagsBitSet());
getCommand.setTopologyId(topologyId);
getCommand.setWrite(isWrite);
return rpcManager.invokeCommandStaggered(info.readOwners(), getCommand, new RemoteGetSingleKeyCollector(),
rpcManager.getSyncRpcOptions())
.thenAccept(response -> {
Object responseValue = response.getResponseValue();
if (responseValue == null) {
if (rvrl != null) {
rvrl.remoteValueNotFound(key);
}
wrapRemoteEntry(ctx, key, NullCacheEntry.getInstance(), isWrite);
return;
}
InternalCacheEntry ice = ((InternalCacheValue) responseValue).toInternalCacheEntry(key);
if (rvrl != null) {
rvrl.remoteValueFound(ice);
}
wrapRemoteEntry(ctx, key, ice, isWrite);
});
}
protected void wrapRemoteEntry(InvocationContext ctx, Object key, CacheEntry ice, boolean isWrite) {
entryFactory.wrapExternalEntry(ctx, key, ice, true, isWrite);
}
protected final Object handleNonTxWriteCommand(InvocationContext ctx, AbstractDataWriteCommand command) {
Object key = command.getKey();
CacheEntry entry = ctx.lookupEntry(key);
if (isLocalModeForced(command)) {
if (entry == null) {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
}
return invokeNext(ctx, command);
}
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
DistributionInfo info = cacheTopology.getSegmentDistribution(SegmentSpecificCommand.extractSegment(command, key,
keyPartitioner));
if (isReplicated && command.hasAnyFlag(FlagBitSets.BACKUP_WRITE) && !info.isWriteOwner()) {
// Replicated caches receive broadcast commands even when they are not owners (e.g. zero capacity nodes)
// The originator will ignore the UnsuccessfulResponse
command.fail();
return null;
}
if (entry == null) {
boolean load = shouldLoad(ctx, command, info);
if (info.isPrimary()) {
throw new IllegalStateException("Primary owner in writeCH should always be an owner in readCH as well.");
} else if (ctx.isOriginLocal()) {
return invokeRemotely(ctx, command, info.primary());
} else {
if (load) {
CompletionStage<?> remoteGet = remoteGetSingleKey(ctx, command, command.getKey(), true);
return asyncInvokeNext(ctx, command, remoteGet);
} else {
entryFactory.wrapExternalEntry(ctx, key, null, false, true);
return invokeNext(ctx, command);
}
}
} else {
if (info.isPrimary()) {
return invokeNextThenApply(ctx, command, primaryReturnHandler);
} else if (ctx.isOriginLocal()) {
return invokeRemotely(ctx, command, info.primary());
} else {
return invokeNext(ctx, command);
}
}
}
protected Object primaryReturnHandler(InvocationContext ctx, AbstractDataWriteCommand command, Object localResult) {
if (!command.isSuccessful()) {
if (log.isTraceEnabled()) log.tracef("Skipping the replication of the conditional command as it did not succeed on primary owner (%s).", command);
return localResult;
}
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
int segment = SegmentSpecificCommand.extractSegment(command, command.getKey(), keyPartitioner);
DistributionInfo distributionInfo = cacheTopology.getSegmentDistribution(segment);
Collection<Address> owners = distributionInfo.writeOwners();
if (owners.size() == 1) {
// There are no backups, skip the replication part.
return localResult;
}
// Cache the matcher and reset it if we get OOTE (or any other exception) from backup
ValueMatcher originalMatcher = command.getValueMatcher();
// Ignore the previous value on the backup owners
command.setValueMatcher(ValueMatcher.MATCH_ALWAYS);
if (!isSynchronous(command)) {
if (isReplicated) {
rpcManager.sendToAll(command, DeliverOrder.PER_SENDER);
} else {
rpcManager.sendToMany(owners, command, DeliverOrder.PER_SENDER);
}
// Switch to the retry policy, in case the primary owner changes before we commit locally
command.setValueMatcher(originalMatcher.matcherForRetry());
return localResult;
}
VoidResponseCollector collector = VoidResponseCollector.ignoreLeavers();
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
// Mark the command as a backup write so it can skip some checks
command.addFlags(FlagBitSets.BACKUP_WRITE);
CompletionStage<Void> remoteInvocation = isReplicated ?
rpcManager.invokeCommandOnAll(command, collector, rpcOptions) :
rpcManager.invokeCommand(owners, command, collector, rpcOptions);
return asyncValue(remoteInvocation.handle((ignored, t) -> {
// Unset the backup write bit as the command will be retried
command.setFlagsBitSet(command.getFlagsBitSet() & ~FlagBitSets.BACKUP_WRITE);
// Switch to the retry policy, in case the primary owner changed and the write already succeeded on the new primary
command.setValueMatcher(originalMatcher.matcherForRetry());
CompletableFutures.rethrowExceptionIfPresent(t);
return localResult;
}));
}
@Override
public Object visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable {
if (command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL | FlagBitSets.SKIP_REMOTE_LOOKUP)) {
for (Object key : command.getKeys()) {
if (ctx.lookupEntry(key) == null) {
entryFactory.wrapExternalEntry(ctx, key, NullCacheEntry.getInstance(), true, false);
}
}
return invokeNext(ctx, command);
}
if (!ctx.isOriginLocal()) {
for (Object key : command.getKeys()) {
if (ctx.lookupEntry(key) == null) {
return UnsureResponse.INSTANCE;
}
}
return invokeNext(ctx, command);
}
CompletionStage<Void> remoteGetFuture = remoteGetMany(ctx, command, command.getKeys());
return asyncInvokeNext(ctx, command, remoteGetFuture);
}
protected <C extends FlagAffectedCommand & TopologyAffectedCommand>
CompletionStage<Void> remoteGetMany(InvocationContext ctx, C command, Collection<?> keys) {
return doRemoteGetMany(ctx, command, keys, null, false);
}
private <C extends FlagAffectedCommand & TopologyAffectedCommand>
CompletionStage<Void> doRemoteGetMany(InvocationContext ctx, C command, Collection<?> keys,
Map<Object, Collection<Address>> unsureOwners, boolean hasSuspectedOwner) {
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
Map<Address, List<Object>> requestedKeys = getKeysByOwner(ctx, keys, cacheTopology, null, unsureOwners);
if (requestedKeys.isEmpty()) {
for (Object key : keys) {
if (ctx.lookupEntry(key) == null) {
// We got an UnsureResponse or CacheNotFoundResponse from all the owners, retry
if (hasSuspectedOwner) {
// After all the owners are lost, we must wait for a new topology in case the key is still available
throw OutdatedTopologyException.RETRY_NEXT_TOPOLOGY;
} else {
// If we got only UnsureResponses we can retry without waiting, see RETRY_SAME_TOPOLOGY javadoc
throw OutdatedTopologyException.RETRY_SAME_TOPOLOGY;
}
}
}
return CompletableFutures.completedNull();
}
GlobalTransaction gtx = ctx.isInTxScope() ? ((TxInvocationContext) ctx).getGlobalTransaction() : null;
ClusteredReadCommandGenerator commandGenerator =
new ClusteredReadCommandGenerator(requestedKeys, command.getFlagsBitSet(), command.getTopologyId(), gtx);
RemoteGetManyKeyCollector collector = new RemoteGetManyKeyCollector(requestedKeys, ctx, command, unsureOwners,
hasSuspectedOwner);
// We cannot retry in the collector, because it can't return a CompletionStage
return rpcManager.invokeCommands(requestedKeys.keySet(), commandGenerator, collector,
rpcManager.getSyncRpcOptions())
.thenCompose(unsureOwners1 -> {
Collection<?> keys1 = unsureOwners1 != null ? unsureOwners1.keySet() : Collections.emptyList();
return doRemoteGetMany(ctx, command, keys1, unsureOwners1, collector.hasSuspectedOwner());
});
}
protected void handleRemotelyRetrievedKeys(InvocationContext ctx, WriteCommand appliedCommand, List<?> remoteKeys) {
}
@Override
public Object visitReadOnlyManyCommand(InvocationContext ctx, ReadOnlyManyCommand command) throws Throwable {
return handleFunctionalReadManyCommand(ctx, command, readOnlyManyHelper);
}
protected <C extends TopologyAffectedCommand & FlagAffectedCommand> Object handleFunctionalReadManyCommand(
InvocationContext ctx, C command, ReadManyCommandHelper<C> helper) {
// We cannot merge this method with visitGetAllCommand because this can't wrap entries into context
// TODO: repeatable-reads are not implemented - see visitReadOnlyKeyCommand
if (command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL | FlagBitSets.SKIP_REMOTE_LOOKUP)) {
return handleLocalOnlyReadManyCommand(ctx, command, helper.keys(command));
}
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
Collection<?> keys = helper.keys(command);
if (!ctx.isOriginLocal()) {
return handleRemoteReadManyCommand(ctx, command, keys, helper);
}
if (keys.isEmpty()) {
return Stream.empty();
}
ConsistentHash ch = cacheTopology.getReadConsistentHash();
int estimateForOneNode = 2 * keys.size() / ch.getMembers().size();
List<Object> availableKeys = new ArrayList<>(estimateForOneNode);
Map<Address, List<Object>> requestedKeys = getKeysByOwner(ctx, keys, cacheTopology, availableKeys, null);
CompletionStage<Void> requiredKeysFuture = helper.fetchRequiredKeys(cacheTopology, requestedKeys, availableKeys,
ctx, command);
if (requiredKeysFuture == null) {
return asyncValue(fetchAndApplyValues(ctx, command, helper, keys, availableKeys, requestedKeys));
} else {
// We need to run the requiredKeysFuture and fetchAndApplyValues futures serially for two reasons
// a) adding the values to context after fetching the values remotely is not synchronized
// b) fetchAndApplyValues invokes the command on availableKeys and stores the result
return asyncValue(requiredKeysFuture.thenCompose(nil -> fetchAndApplyValues(ctx, command, helper, keys, availableKeys, requestedKeys)));
}
}
private <C extends TopologyAffectedCommand & FlagAffectedCommand> MergingCompletableFuture<Object> fetchAndApplyValues(InvocationContext ctx, C command, ReadManyCommandHelper<C> helper, Collection<?> keys, List<Object> availableKeys, Map<Address, List<Object>> requestedKeys) {
// TODO: while this works in a non-blocking way, the returned stream is not lazy as the functional
// contract suggests. Traversable is also not honored as it is executed only locally on originator.
// On FutureMode.ASYNC, there should be one command per target node going from the top level
// to allow retries in StateTransferInterceptor in case of topology change.
MergingCompletableFuture<Object> allFuture = new MergingCompletableFuture<>(
requestedKeys.size() + (availableKeys.isEmpty() ? 0 : 1),
new Object[keys.size()], helper::transformResult);
handleLocallyAvailableKeys(ctx, command, availableKeys, allFuture, helper);
int pos = availableKeys.size();
for (Map.Entry<Address, List<Object>> addressKeys : requestedKeys.entrySet()) {
List<Object> keysForAddress = addressKeys.getValue();
ReadOnlyManyCommand remoteCommand = helper.copyForRemote(command, keysForAddress, ctx);
remoteCommand.setTopologyId(command.getTopologyId());
Address target = addressKeys.getKey();
rpcManager.invokeCommand(target, remoteCommand, SingletonMapResponseCollector.ignoreLeavers(),
rpcManager.getSyncRpcOptions())
.whenComplete(
new ReadManyHandler(target, allFuture, ctx, command, keysForAddress, null, pos, helper));
pos += keysForAddress.size();
}
return allFuture;
}
private Object handleLocalOnlyReadManyCommand(InvocationContext ctx, VisitableCommand command, Collection<?> keys) {
for (Object key : keys) {
if (ctx.lookupEntry(key) == null) {
entryFactory.wrapExternalEntry(ctx, key, NullCacheEntry.getInstance(), true, false);
}
}
return invokeNext(ctx, command);
}
private <C extends TopologyAffectedCommand & VisitableCommand> Object handleRemoteReadManyCommand(
InvocationContext ctx, C command, Collection<?> keys, InvocationSuccessFunction<C> remoteReturnHandler) {
for (Object key : keys) {
if (ctx.lookupEntry(key) == null) {
return UnsureResponse.INSTANCE;
}
}
return invokeNextThenApply(ctx, command, remoteReturnHandler);
}
private class ReadManyHandler<C extends FlagAffectedCommand & TopologyAffectedCommand> implements BiConsumer<Map<Address, Response>, Throwable> {
private final Address target;
private final MergingCompletableFuture<Object> allFuture;
private final InvocationContext ctx;
private final C command;
private final List<Object> keys;
private final int destinationIndex;
private final Map<Object, Collection<Address>> contactedNodes;
private final ReadManyCommandHelper<C> helper;
private ReadManyHandler(Address target, MergingCompletableFuture<Object> allFuture, InvocationContext ctx, C command, List<Object> keys,
Map<Object, Collection<Address>> contactedNodes, int destinationIndex, ReadManyCommandHelper<C> helper) {
this.target = target;
this.allFuture = allFuture;
this.ctx = ctx;
this.command = command;
this.keys = keys;
this.destinationIndex = destinationIndex;
this.contactedNodes = contactedNodes;
this.helper = helper;
}
@Override
public void accept(Map<Address, Response> responseMap, Throwable throwable) {
if (throwable != null) {
allFuture.completeExceptionally(throwable);
return;
}
SuccessfulResponse response = getSuccessfulResponseOrFail(responseMap, allFuture, this::handleMissingResponse);
if (response == null) {
return;
}
try {
Object responseValue = response.getResponseValue();
Object[] values = unwrapFunctionalManyResultOnOrigin(ctx, keys, responseValue);
if (values != null) {
System.arraycopy(values, 0, allFuture.results, destinationIndex, values.length);
allFuture.countDown();
} else {
allFuture.completeExceptionally(new IllegalStateException("Unexpected response value " + responseValue));
}
} catch (Throwable t) {
allFuture.completeExceptionally(t);
}
}
private void handleMissingResponse(Response response) {
if (response instanceof UnsureResponse) {
allFuture.hasUnsureResponse = true;
}
Map<Object, Collection<Address>> contactedNodes = this.contactedNodes == null ? new HashMap<>() : this.contactedNodes;
Map<Address, List<Object>> requestedKeys;
synchronized (contactedNodes) {
for (Object key : keys) {
contactedNodes.computeIfAbsent(key, k -> new ArrayList<>(4)).add(target);
}
requestedKeys = getKeysByOwner(ctx, keys, CacheTopologyUtil.checkTopology(command, getCacheTopology()), null, contactedNodes);
}
int pos = destinationIndex;
for (Map.Entry<Address, List<Object>> addressKeys : requestedKeys.entrySet()) {
allFuture.increment();
List<Object> keysForAddress = addressKeys.getValue();
ReadOnlyManyCommand remoteCommand = helper.copyForRemote(command, keysForAddress, ctx);
remoteCommand.setTopologyId(command.getTopologyId());
Address target = addressKeys.getKey();
rpcManager.invokeCommand(target, remoteCommand, SingletonMapResponseCollector.ignoreLeavers(),
rpcManager.getSyncRpcOptions())
.whenComplete(new ReadManyHandler(target, allFuture, ctx, command, keysForAddress,
contactedNodes, pos, helper));
pos += keysForAddress.size();
}
Arrays.fill(allFuture.results, pos, destinationIndex + keys.size(), LOST_PLACEHOLDER);
allFuture.lostData = true;
allFuture.countDown();
}
}
private <C extends VisitableCommand> void handleLocallyAvailableKeys(
InvocationContext ctx, C command, List<Object> availableKeys,
MergingCompletableFuture<Object> allFuture, ReadManyCommandHelper<C> helper) {
if (availableKeys.isEmpty()) {
return;
}
C localCommand = helper.copyForLocal(command, availableKeys);
invokeNextAndHandle(ctx, localCommand, (rCtx, rCommand, rv, throwable) -> {
if (throwable != null) {
allFuture.completeExceptionally(throwable);
} else {
try {
helper.applyLocalResult(allFuture, rv);
allFuture.countDown();
} catch (Throwable t) {
allFuture.completeExceptionally(t);
}
}
return asyncValue(allFuture);
});
}
private Map<Address, List<Object>> getKeysByOwner(InvocationContext ctx, Collection<?> keys,
LocalizedCacheTopology cacheTopology,
List<Object> availableKeys,
Map<Object, Collection<Address>> ignoredOwners) {
int capacity = cacheTopology.getMembers().size();
Map<Address, List<Object>> requestedKeys = new HashMap<>(capacity);
int estimateForOneNode = 2 * keys.size() / capacity;
for (Object key : keys) {
CacheEntry entry = ctx.lookupEntry(key);
if (entry == null) {
DistributionInfo distributionInfo = cacheTopology.getDistribution(key);
// Let's try to minimize the number of messages by preferring owner to which we've already
// decided to send message
boolean foundExisting = false;
Collection<Address> ignoreForKey = null;
for (Address address : distributionInfo.readOwners()) {
if (address.equals(rpcManager.getAddress())) {
throw new IllegalStateException("Entry should already be wrapped on read owners!");
} else if (ignoredOwners != null) {
if (ignoreForKey == null) {
ignoreForKey = ignoredOwners.get(key);
}
if (ignoreForKey != null && ignoreForKey.contains(address)) {
continue;
}
}
List<Object> list = requestedKeys.get(address);
if (list != null) {
list.add(key);
foundExisting = true;
break;
}
}
if (!foundExisting) {
Address target = null;
if (ignoredOwners == null) {
target = distributionInfo.primary();
} else {
for (Address address : distributionInfo.readOwners()) {
if (ignoreForKey == null) {
ignoreForKey = ignoredOwners.get(key);
}
if (ignoreForKey == null || !ignoreForKey.contains(address)) {
target = address;
break;
}
}
}
// If all read owners should be ignored we won't put that entry anywhere
if (target != null) {
List<Object> list = new ArrayList<>(estimateForOneNode);
list.add(key);
requestedKeys.put(target, list);
}
}
} else if (availableKeys != null) {
availableKeys.add(key);
}
}
return requestedKeys;
}
protected Object wrapFunctionalManyResultOnNonOrigin(InvocationContext rCtx, Collection<?> keys, Object[] values) {
return values;
}
protected Object[] unwrapFunctionalManyResultOnOrigin(InvocationContext ctx, List<Object> keys, Object responseValue) {
return responseValue instanceof Object[] ? (Object[]) responseValue : null;
}
private Object visitGetCommand(InvocationContext ctx, AbstractDataCommand command) throws Throwable {
if (ctx.lookupEntry(command.getKey()) != null) {
return invokeNext(ctx, command);
}
if (!ctx.isOriginLocal())
return UnsureResponse.INSTANCE;
if (!readNeedsRemoteValue(command))
return null;
return asyncInvokeNext(ctx, command, remoteGetSingleKey(ctx, command, command.getKey(), false));
}
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command)
throws Throwable {
return visitGetCommand(ctx, command);
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx,
GetCacheEntryCommand command) throws Throwable {
return visitGetCommand(ctx, command);
}
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command)
throws Throwable {
// TODO: repeatable-reads are not implemented, these need to keep the read values on remote side for the duration
// of the transaction, and that requires synchronous invocation of the readonly command on all owners.
// For better consistency, use versioning and write skew check that will fail the transaction when we apply
// the function on different version of the entry than the one previously read
Object key = command.getKey();
CacheEntry entry = ctx.lookupEntry(key);
if (entry != null) {
if (ctx.isOriginLocal()) {
// the entry is owned locally (it is NullCacheEntry if it was not found), no need to go remote
return invokeNext(ctx, command);
} else {
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) ->
wrapFunctionalResultOnNonOriginOnReturn(rv, entry));
}
}
if (!ctx.isOriginLocal()) {
return UnsureResponse.INSTANCE;
}
if (readNeedsRemoteValue(command)) {
LocalizedCacheTopology cacheTopology = CacheTopologyUtil.checkTopology(command, getCacheTopology());
Collection<Address> owners = cacheTopology.getDistribution(key).readOwners();
if (log.isTraceEnabled())
log.tracef("Doing a remote get for key %s in topology %d to %s", key, cacheTopology.getTopologyId(), owners);
ReadOnlyKeyCommand remoteCommand = remoteReadOnlyCommand(ctx, command);
// make sure that the command topology is set to the value according which we route it
remoteCommand.setTopologyId(cacheTopology.getTopologyId());
CompletionStage<SuccessfulResponse> rpc =
rpcManager.invokeCommandStaggered(owners, remoteCommand, new RemoteGetSingleKeyCollector(),
rpcManager.getSyncRpcOptions());
return asyncValue(rpc).thenApply(ctx, command, (rCtx, rCommand, response) -> {
Object responseValue = ((SuccessfulResponse) response).getResponseValue();
return unwrapFunctionalResultOnOrigin(rCtx, rCommand.getKey(), responseValue);
});
} else {
// This has LOCAL flags, just wrap NullCacheEntry and let the command run
entryFactory.wrapExternalEntry(ctx, key, NullCacheEntry.getInstance(), true, false);
return invokeNext(ctx, command);
}
}
protected ReadOnlyKeyCommand remoteReadOnlyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) {
return command;
}
protected Object wrapFunctionalResultOnNonOriginOnReturn(Object rv, CacheEntry entry) {
return rv;
}
protected Object unwrapFunctionalResultOnOrigin(InvocationContext ctx, Object key, Object responseValue) {
return responseValue;
}
protected Object invokeRemotely(InvocationContext ctx, DataWriteCommand command, Address primaryOwner) {
if (log.isTraceEnabled()) getLog().tracef("I'm not the primary owner, so sending the command to the primary owner(%s) in order to be forwarded", primaryOwner);
boolean isSyncForwarding = isSynchronous(command) || command.isReturnValueExpected();
if (!isSyncForwarding) {
rpcManager.sendTo(primaryOwner, command, DeliverOrder.PER_SENDER);
return null;
}
CompletionStage<ValidResponse> remoteInvocation;
try {
remoteInvocation = rpcManager.invokeCommand(primaryOwner, command, SingleResponseCollector.validOnly(),
rpcManager.getSyncRpcOptions());
} catch (Throwable t) {
command.setValueMatcher(command.getValueMatcher().matcherForRetry());
throw t;
}
return asyncValue(remoteInvocation).andHandle(ctx, command, (rCtx, dataWriteCommand, rv, t) -> {
dataWriteCommand.setValueMatcher(dataWriteCommand.getValueMatcher().matcherForRetry());
CompletableFutures.rethrowExceptionIfPresent(t);
Response response = ((Response) rv);
if (!response.isSuccessful()) {
dataWriteCommand.fail();
// FIXME A response cannot be successful and not valid
} else if (!(response instanceof ValidResponse)) {
throw unexpected(primaryOwner, response);
}
// We expect only successful/unsuccessful responses, not unsure
return ((ValidResponse) response).getResponseValue();
});
}
/**
* @return {@code true} if the value is not available on the local node and a read command is allowed to
* fetch it from a remote node. Does not check if the value is already in the context.
*/
protected boolean readNeedsRemoteValue(FlagAffectedCommand command) {
return !command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL | FlagBitSets.SKIP_REMOTE_LOOKUP);
}
protected interface ReadManyCommandHelper<C extends VisitableCommand> extends InvocationSuccessFunction<C> {
Collection<?> keys(C command);
C copyForLocal(C command, List<Object> keys);
ReadOnlyManyCommand copyForRemote(C command, List<Object> keys, InvocationContext ctx);
void applyLocalResult(MergingCompletableFuture allFuture, Object rv);
Object transformResult(Object[] results);
CompletionStage<Void> fetchRequiredKeys(LocalizedCacheTopology cacheTopology,
Map<Address, List<Object>> requestedKeys, List<Object> availableKeys,
InvocationContext ctx, C command);
}
protected class ReadOnlyManyHelper implements ReadManyCommandHelper<ReadOnlyManyCommand> {
@Override
public Object apply(InvocationContext rCtx, ReadOnlyManyCommand rCommand, Object rv) throws Throwable {
return wrapFunctionalManyResultOnNonOrigin(rCtx, rCommand.getKeys(), ((Stream) rv).toArray());
}
@Override
public Collection<?> keys(ReadOnlyManyCommand command) {
return command.getKeys();
}
@Override
public ReadOnlyManyCommand copyForLocal(ReadOnlyManyCommand command, List<Object> keys) {
return new ReadOnlyManyCommand(command).withKeys(keys);
}
@Override
public ReadOnlyManyCommand copyForRemote(ReadOnlyManyCommand command, List<Object> keys, InvocationContext ctx) {
return new ReadOnlyManyCommand(command).withKeys(keys);
}
@Override
public void applyLocalResult(MergingCompletableFuture allFuture, Object rv) {
((Stream) rv).collect(new ArrayCollector(allFuture.results));
}
@Override
public Object transformResult(Object[] results) {
return Arrays.stream(results).filter(o -> o != LOST_PLACEHOLDER);
}
@Override
public CompletionStage<Void> fetchRequiredKeys(LocalizedCacheTopology cacheTopology,
Map<Address, List<Object>> requestedKeys,
List<Object> availableKeys, InvocationContext ctx,
ReadOnlyManyCommand command) {
return null;
}
}
private class ClusteredReadCommandGenerator implements Function<Address, ReplicableCommand> {
private final Map<Address, List<Object>> requestedKeys;
private final long flags;
private final int topologyId;
private final GlobalTransaction gtx;
public ClusteredReadCommandGenerator(Map<Address, List<Object>> requestedKeys, long flags, int topologyId,
GlobalTransaction gtx) {
this.requestedKeys = requestedKeys;
this.flags = flags;
this.topologyId = topologyId;
this.gtx = gtx;
}
@Override
public ReplicableCommand apply(Address target) {
List<Object> targetKeys = requestedKeys.get(target);
assert !targetKeys.isEmpty();
BaseClusteredReadCommand getCommand = cf.buildClusteredGetAllCommand(targetKeys, flags, gtx);
getCommand.setTopologyId(topologyId);
return getCommand;
}
}
/**
* Response collector for multi-key multi-target remote read commands.
*
* <p>Wrap in the context all received values, unless receiving an exception or unexpected response.
* Throw an exception immediately if a response is exceptional or unexpected.
* After processing all responses, if any of them were either {@link UnsureResponse} or
* {@link CacheNotFoundResponse}, throw an {@link OutdatedTopologyException}.</p>
*/
private class RemoteGetManyKeyCollector implements ResponseCollector<Map<Object, Collection<Address>>> {
private final Map<Address, List<Object>> requestedKeys;
private final InvocationContext ctx;
private final ReplicableCommand command;
private Map<Object, Collection<Address>> unsureOwners;
private boolean hasSuspectedOwner;
public RemoteGetManyKeyCollector(Map<Address, List<Object>> requestedKeys, InvocationContext ctx,
ReplicableCommand command, Map<Object, Collection<Address>> unsureOwners,
boolean hasSuspectedOwner) {
this.requestedKeys = requestedKeys;
this.ctx = ctx;
this.command = command;
this.unsureOwners = unsureOwners;
this.hasSuspectedOwner = hasSuspectedOwner;
}
@Override
public Map<Object, Collection<Address>> addResponse(Address sender, Response response) {
if (!(response instanceof SuccessfulResponse)) {
if (response instanceof CacheNotFoundResponse) {
hasSuspectedOwner = true;
addUnsureOwner(sender);
return null;
} else if (response instanceof UnsureResponse) {
addUnsureOwner(sender);
return null;
} else {
if (response instanceof ExceptionResponse) {
throw CompletableFutures.asCompletionException(((ExceptionResponse) response).getException());
} else {
throw unexpected(sender, response);
}
}
}
SuccessfulResponse successfulResponse = (SuccessfulResponse) response;
Object responseValue = successfulResponse.getResponseValue();
if (!(responseValue instanceof InternalCacheValue[])) {
throw CompletableFutures.asCompletionException(
new IllegalStateException("Unexpected response value: " + responseValue));
}
List<Object> senderKeys = requestedKeys.get(sender);
InternalCacheValue[] values = (InternalCacheValue[]) responseValue;
for (int i = 0; i < senderKeys.size(); ++i) {
Object key = senderKeys.get(i);
InternalCacheValue value = values[i];
CacheEntry entry = value == null ? NullCacheEntry.getInstance() : value.toInternalCacheEntry(key);
wrapRemoteEntry(ctx, key, entry, false);
}
// TODO Dan: handleRemotelyRetrievedKeys could call wrapRemoteEntry itself after transforming the entries
handleRemotelyRetrievedKeys(ctx, command instanceof WriteCommand ? (WriteCommand) command : null, senderKeys);
return null;
}
public void addUnsureOwner(Address sender) {
if (unsureOwners == null) {
unsureOwners = new HashMap<>();
}
List<Object> senderKeys = requestedKeys.get(sender);
for (Object key : senderKeys) {
Collection<Address> keyUnsureOwners = unsureOwners.get(key);
if (keyUnsureOwners == null) {
keyUnsureOwners = new ArrayList<>();
unsureOwners.put(key, keyUnsureOwners);
}
keyUnsureOwners.add(sender);
}
}
@Override
public Map<Object, Collection<Address>> finish() {
return unsureOwners;
}
public boolean hasSuspectedOwner() {
return hasSuspectedOwner;
}
}
}
| 42,560
| 47.200453
| 280
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/locking/package-info.java
|
/**
* Interceptors dealing with locking.
*
* @api.private
*/
package org.infinispan.interceptors.locking;
| 110
| 14.857143
| 44
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/locking/AbstractLockingInterceptor.java
|
package org.infinispan.interceptors.locking;
import static org.infinispan.commons.util.Util.toStr;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.DataCommand;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.RemoveExpiredCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.InvocationFinallyAction;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.concurrent.locks.KeyAwareLockPromise;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
/**
* Base class for various locking interceptors in this package.
*
* @author Mircea Markus
*/
public abstract class AbstractLockingInterceptor extends DDAsyncInterceptor {
final InvocationFinallyAction<VisitableCommand> unlockAllReturnHandler = this::handleUnlockAll;
@Inject protected LockManager lockManager;
@Inject protected ClusteringDependentLogic cdl;
protected boolean invalidationMode;
@Start
public void start() {
invalidationMode = cacheConfiguration.clustering().cacheMode().isInvalidation();
}
protected abstract Log getLog();
@Override
public final Object visitClearCommand(InvocationContext ctx, ClearCommand command) {
return invokeNext(ctx, command);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
if (command.hasAnyFlag(FlagBitSets.PUT_FOR_EXTERNAL_READ)) {
// Cache.putForExternalRead() is non-transactional
return visitNonTxDataWriteCommand(ctx, command);
}
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitRemoveExpiredCommand(InvocationContext ctx, RemoveExpiredCommand command) throws Throwable {
return visitNonTxDataWriteCommand(ctx, command);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
return visitNonTxDataWriteCommand(ctx, command);
}
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command) throws Throwable {
return visitDataReadCommand(ctx, command);
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) throws Throwable {
return visitDataReadCommand(ctx, command);
}
protected abstract Object visitDataReadCommand(InvocationContext ctx, DataCommand command) throws Throwable;
protected abstract Object visitDataWriteCommand(InvocationContext ctx, DataWriteCommand command) throws Throwable;
// We need this method in here because of putForExternalRead
final Object visitNonTxDataWriteCommand(InvocationContext ctx, DataWriteCommand command) {
// Non-tx invalidation mode ignores the primary owner, always locks on the originator
boolean shouldLockKey = invalidationMode ? ctx.isOriginLocal() : shouldLockKey(command);
if (hasSkipLocking(command) || !shouldLockKey) {
return invokeNext(ctx, command);
}
InvocationStage lockStage = lockAndRecord(ctx, command, command.getKey(), getLockTimeoutMillis(command));
return nonTxLockAndInvokeNext(ctx, command, lockStage, unlockAllReturnHandler);
}
@Override
public Object visitInvalidateCommand(InvocationContext ctx, InvalidateCommand command) {
if (hasSkipLocking(command)) {
return invokeNext(ctx, command);
}
InvocationStage lockStage = lockAllAndRecord(ctx, command, Arrays.asList(command.getKeys()),
getLockTimeoutMillis(command));
return nonTxLockAndInvokeNext(ctx, command, lockStage, unlockAllReturnHandler);
}
@Override
public final Object visitInvalidateL1Command(InvocationContext ctx, InvalidateL1Command command) throws Throwable {
if (command.isCausedByALocalWrite(cdl.getAddress())) {
if (getLog().isTraceEnabled()) getLog().trace("Skipping invalidation as the write operation originated here.");
return null;
}
if (hasSkipLocking(command)) {
return invokeNext(ctx, command);
}
final Object[] keys = command.getKeys();
if (keys == null || keys.length < 1) {
return null;
}
ArrayList<Object> keysToInvalidate = new ArrayList<>(keys.length);
for (Object key : keys) {
try {
// Not blocking because the timeout is 0, although LockManager.tryLock() would have been nice
KeyAwareLockPromise lockPromise = lockManager.lock(key, ctx.getLockOwner(), 0, TimeUnit.MILLISECONDS);
lockPromise.lock();
ctx.addLockedKey(key);
keysToInvalidate.add(key);
} catch (TimeoutException te) {
getLog().unableToLockToInvalidate(key, cdl.getAddress());
}
}
if (keysToInvalidate.isEmpty()) {
return null;
}
command.setKeys(keysToInvalidate.toArray());
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
rCommand.setKeys(keys);
if (!rCtx.isInTxScope()) lockManager.unlockAll(rCtx);
});
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command, command.getMap().keySet(), command.isForwarded());
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) throws Throwable {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) throws Throwable {
return visitDataReadCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command, command.getAffectedKeys(), command.isForwarded());
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command, command.getAffectedKeys(), command.isForwarded());
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command, command.getAffectedKeys(), command.isForwarded());
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) throws Throwable {
return handleWriteManyCommand(ctx, command, command.getAffectedKeys(), command.isForwarded());
}
@Override
public Object visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable {
return handleReadManyCommand(ctx, command, command.getKeys());
}
@Override
public Object visitReadOnlyManyCommand(InvocationContext ctx, ReadOnlyManyCommand command) throws Throwable {
return handleReadManyCommand(ctx, command, command.getKeys());
}
protected abstract Object handleReadManyCommand(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) throws Throwable;
protected abstract <K> Object handleWriteManyCommand(InvocationContext ctx, WriteCommand command, Collection<K> keys, boolean forwarded) throws Throwable;
protected final long getLockTimeoutMillis(FlagAffectedCommand command) {
return command.hasAnyFlag(FlagBitSets.ZERO_LOCK_ACQUISITION_TIMEOUT) ? 0 :
cacheConfiguration.locking().lockAcquisitionTimeout();
}
final boolean shouldLockKey(DataWriteCommand command) {
return shouldLockKey(command.getSegment());
}
final boolean shouldLockKey(Object key) {
//only the primary owner acquires the lock.
boolean shouldLock = isLockOwner(key);
if (getLog().isTraceEnabled()) getLog().tracef("Are (%s) we the lock owners for key '%s'? %s", cdl.getAddress(), toStr(key), shouldLock);
return shouldLock;
}
final boolean shouldLockKey(int keySegment) {
//only the primary owner acquires the lock.
boolean shouldLock = isLockOwner(keySegment);
if (getLog().isTraceEnabled()) getLog().tracef("Are (%s) we the lock owners for segment '%s'? %s", cdl.getAddress(), keySegment, shouldLock);
return shouldLock;
}
final boolean isLockOwner(Object key) {
return cdl.getCacheTopology().getDistribution(key).isPrimary();
}
final boolean isLockOwner(int keySegment) {
return cdl.getCacheTopology().getSegmentDistribution(keySegment).isPrimary();
}
protected final InvocationStage lockAndRecord(InvocationContext context, VisitableCommand command, Object key,
long timeout) {
return lockManager.lock(key, context.getLockOwner(), timeout, TimeUnit.MILLISECONDS).toInvocationStage()
.thenAcceptMakeStage(context, command, (rCtx, rCommand, rv) -> rCtx.addLockedKey(key));
}
final InvocationStage lockAllAndRecord(InvocationContext context, VisitableCommand command, Collection<?> keys,
long timeout) {
return lockManager.lockAll(keys, context.getLockOwner(), timeout, TimeUnit.MILLISECONDS).toInvocationStage()
.andFinallyMakeStage(context, command, (rCtx, rCommand, rv, throwable) -> {
if (throwable == null) {
rCtx.addLockedKeys(keys);
} else {
// Clean up in case lockAll acquired one lock and timed out on another
lockManager.unlockAll(keys, rCtx.getLockOwner());
}
});
}
final boolean hasSkipLocking(FlagAffectedCommand command) {
return command.hasAnyFlag(FlagBitSets.SKIP_LOCKING);
}
/**
* Locks and invoke the next interceptor for non-transactional commands.
*/
final Object nonTxLockAndInvokeNext(InvocationContext ctx, VisitableCommand command,
InvocationStage lockStage, InvocationFinallyAction<VisitableCommand> finallyFunction) {
return lockStage.andHandle(ctx, command, (rCtx, rCommand, rv, throwable) -> {
if (throwable != null) {
lockManager.unlockAll(rCtx);
throw throwable;
} else {
return invokeNextAndFinally(rCtx, rCommand, finallyFunction);
}
});
}
private void handleUnlockAll(InvocationContext rCtx, VisitableCommand rCommand, Object rv, Throwable throwable) {
lockManager.unlockAll(rCtx);
}
}
| 13,953
| 41.672783
| 157
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/locking/ClusteringDependentLogic.java
|
package org.infinispan.interceptors.locking;
import static org.infinispan.transaction.impl.WriteSkewHelper.performWriteSkewCheckAndReturnNewVersions;
import java.lang.invoke.MethodHandles;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.SegmentSpecificCommand;
import org.infinispan.commands.tx.VersionedPrepareCommand;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.MemoryConfiguration;
import org.infinispan.configuration.cache.PersistenceConfiguration;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ClearCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.versioning.IncrementableEntryVersion;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.eviction.EvictionManager;
import org.infinispan.eviction.impl.ActivationManager;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.functional.impl.FunctionalNotifier;
import org.infinispan.interceptors.impl.CacheLoaderInterceptor;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.L1Metadata;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.NotifyHelper;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.manager.PersistenceManager.StoreChangeListener;
import org.infinispan.persistence.manager.PersistenceStatus;
import org.infinispan.persistence.util.EntryLoader;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.LocalModeAddress;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.statetransfer.CommitManager;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.transaction.impl.WriteSkewHelper;
import org.infinispan.transaction.xa.CacheTransaction;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.DataOperationOrderer;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Abstractization for logic related to different clustering modes: replicated or distributed. This implements the <a
* href="http://en.wikipedia.org/wiki/Bridge_pattern">Bridge</a> pattern as described by the GoF: this plays the role of
* the <b>Implementor</b> and various LockingInterceptors are the <b>Abstraction</b>.
*
* @author Mircea Markus
* @author Pedro Ruivo
*/
@Scope(Scopes.NAMED_CACHE)
public interface ClusteringDependentLogic {
enum Commit {
/**
* Do not commit the entry.
*/
NO_COMMIT(false, false),
/**
* Commit the entry but this node is not an owner, therefore, listeners should not be fired.
*/
COMMIT_NON_LOCAL(true, false),
/**
* Commit the entry, this is the owner.
*/
COMMIT_LOCAL(true, true);
private final boolean commit;
private final boolean local;
Commit(boolean commit, boolean local) {
this.commit = commit;
this.local = local;
}
public boolean isCommit() {
return commit;
}
public boolean isLocal() {
return local;
}
}
/**
* Starts the object - must be first wired via component registry
*/
void start();
/**
* @return information about the location of keys.
*/
LocalizedCacheTopology getCacheTopology();
/**
* Commits the entry to the data container. The commit operation is always done synchronously in the current thread.
* However notifications for said operations can be performed asynchronously and the returned CompletionStage will
* complete when the notifications if any are completed.
* @param entry
* @param command
* @param ctx
* @param trackFlag
* @param l1Invalidation
* @return completion stage that is complete when all notifications for the commit are complete or null if already complete
*/
CompletionStage<Void> commitEntry(CacheEntry entry, FlagAffectedCommand command, InvocationContext ctx, Flag trackFlag, boolean l1Invalidation);
/**
* Determines what type of commit this is. Whether we shouldn't commit, or if this is a commit due to owning the key
* or not
* @param command
* @param ctx
* @param segment if 0 or greater assumes the underlying container is segmented.
* @param removed
* @return
*/
Commit commitType(FlagAffectedCommand command, InvocationContext ctx, int segment, boolean removed);
CompletionStage<Map<Object, IncrementableEntryVersion>> createNewVersionsAndCheckForWriteSkews(VersionGenerator versionGenerator, TxInvocationContext context, VersionedPrepareCommand prepareCommand);
Address getAddress();
<K, V> EntryLoader<K, V> getEntryLoader();
@Scope(Scopes.NAMED_CACHE)
abstract class AbstractClusteringDependentLogic implements ClusteringDependentLogic, StoreChangeListener {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@Inject protected ComponentRegistry componentRegistry;
@Inject protected DistributionManager distributionManager;
@Inject protected InternalDataContainer<Object, Object> dataContainer;
@Inject protected CacheNotifier<Object, Object> notifier;
@Inject protected CommitManager commitManager;
@Inject protected PersistenceManager persistenceManager;
@Inject protected TimeService timeService;
@Inject protected FunctionalNotifier<Object, Object> functionalNotifier;
@Inject protected Configuration configuration;
@Inject protected KeyPartitioner keyPartitioner;
@Inject protected EvictionManager<?,?> evictionManager;
@Inject protected DataOperationOrderer orderer;
@Inject protected ActivationManager activationManager;
@Inject protected KeyPartitioner keyPartioner;
private WriteSkewHelper.KeySpecificLogic keySpecificLogic;
private EntryLoader<?, ?> entryLoader;
private volatile boolean writeOrdering = false;
private volatile boolean passivation = false;
@Start
public void start() {
updateOrdering(configuration.persistence().usingStores());
this.keySpecificLogic = initKeySpecificLogic();
CacheLoaderInterceptor<?, ?> cli = componentRegistry.getComponent(CacheLoaderInterceptor.class);
if (cli != null) {
entryLoader = cli;
} else {
entryLoader = (ctx, key, segment, cmd) -> {
InternalCacheEntry<Object, Object> ice = dataContainer.peek(segment, key);
if (ice != null && ice.canExpire() && ice.isExpired(timeService.wallClockTime())) {
ice = null;
}
return CompletableFuture.completedFuture(ice);
};
}
persistenceManager.addStoreListener(this);
}
@Stop
public void stop() {
persistenceManager.removeStoreListener(this);
}
@Override
public void storeChanged(PersistenceStatus persistenceStatus) {
synchronized (this) {
updateOrdering(persistenceStatus.isEnabled());
}
}
private void updateOrdering(boolean usingStores) {
MemoryConfiguration memoryConfiguration = configuration.memory();
PersistenceConfiguration persistenceConfiguration = configuration.persistence();
// If eviction is enabled or stores we have to make sure to order writes with other concurrent operations
// Eviction requires it to perform eviction notifications in a non blocking fashion
// Stores require writing entries to data container after loading - in an atomic fashion
if (memoryConfiguration.isEvictionEnabled() || usingStores) {
writeOrdering = true;
// Passivation also has some additional things required when doing writes
passivation = usingStores && persistenceConfiguration.passivation();
}
}
@Override
public CompletionStage<Map<Object, IncrementableEntryVersion>> createNewVersionsAndCheckForWriteSkews(VersionGenerator versionGenerator, TxInvocationContext context, VersionedPrepareCommand prepareCommand) {
// Perform a write skew check on mapped entries.
CompletionStage<Map<Object, IncrementableEntryVersion>> uv = performWriteSkewCheckAndReturnNewVersions(prepareCommand, entryLoader, versionGenerator, context,
keySpecificLogic, keyPartitioner);
return uv.thenApply(evm -> {
CacheTransaction cacheTransaction = context.getCacheTransaction();
Map<Object, IncrementableEntryVersion> uvOld = cacheTransaction.getUpdatedEntryVersions();
if (uvOld != null && !uvOld.isEmpty()) {
uvOld.putAll(evm);
evm = uvOld;
}
cacheTransaction.setUpdatedEntryVersions(evm);
return (evm.isEmpty()) ? null : evm;
});
}
@Override
public final CompletionStage<Void> commitEntry(CacheEntry entry, FlagAffectedCommand command, InvocationContext ctx, Flag trackFlag, boolean l1Invalidation) {
if (entry instanceof ClearCacheEntry) {
return commitClearCommand(dataContainer, ctx, command);
}
if (!writeOrdering) {
return commitSingleEntry(entry, command, ctx, trackFlag, l1Invalidation);
}
if (passivation) {
return commitEntryPassivation(entry, command, ctx, trackFlag, l1Invalidation);
}
return commitEntryOrdered(entry, command, ctx, trackFlag, l1Invalidation);
}
private CompletionStage<Void> commitClearCommand(DataContainer<Object, Object> dataContainer, InvocationContext context,
FlagAffectedCommand command) {
if (notifier.hasListener(CacheEntryRemoved.class)) {
Iterator<InternalCacheEntry<Object, Object>> iterator = dataContainer.iteratorIncludingExpired();
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
while (iterator.hasNext()) {
InternalCacheEntry entry = iterator.next();
// Iterator doesn't support remove
dataContainer.remove(entry.getKey());
aggregateCompletionStage.dependsOn(notifier.notifyCacheEntryRemoved(entry.getKey(), entry.getValue(), entry.getMetadata(), false, context, command));
}
return aggregateCompletionStage.freeze();
} else {
dataContainer.clear();
return CompletableFutures.completedNull();
}
}
private CompletionStage<Void> commitEntryPassivation(CacheEntry entry, FlagAffectedCommand command, InvocationContext ctx,
Flag trackFlag, boolean l1Invalidation) {
// To clarify the below section these operations must be done in order and cannot be be reordered otherwise
// it can cause data guarantee issues with other operations
// 1. Acquire the order guarantee via orderer.orderOn
// 2. Query the data container if the entry is in memory
// 3. Update the in memory contents
// 4. Remove the entry from the store if the entry was not in memory
// 5. Complete/release the order guarantee
Object key = entry.getKey();
int segment = SegmentSpecificCommand.extractSegment(command, key, keyPartioner);
CompletableFuture<DataOperationOrderer.Operation> ourFuture = new CompletableFuture<>();
// If this future is not null it means there is another pending read/write/eviction for this key, thus
// we have to wait on it before performing our commit to ensure data is updated properly
CompletionStage<DataOperationOrderer.Operation> waitingFuture = orderer.orderOn(key, ourFuture);
// We don't want to waste time removing an entry from the store if it is in the data container
// We use peek here instead of containsKey as the value could be expired - if so we want to make sure
// passivation manager knows the key is not in the store
CompletionStage<Void> chainedStage;
if (waitingFuture != null) {
// We have to wait on another operation to complete before doing the update
chainedStage = waitingFuture.thenCompose(ignore -> activateKey(key, segment, entry, command, ctx, trackFlag, l1Invalidation));
} else {
chainedStage = activateKey(key, segment, entry, command, ctx, trackFlag, l1Invalidation);
}
// After everything is done we have to make sure to complete our future
return chainedStage.whenComplete((ignore, ignoreT) -> orderer.completeOperation(key, ourFuture, operation(entry)));
}
private static DataOperationOrderer.Operation operation(CacheEntry entry) {
return entry.isRemoved() ? DataOperationOrderer.Operation.REMOVE : DataOperationOrderer.Operation.WRITE;
}
private CompletionStage<Void> activateKey(Object key, int segment, CacheEntry entry, FlagAffectedCommand command,
InvocationContext ctx, Flag trackFlag, boolean l1Invalidation) {
// If entry wasn't in container we should activate to remove from store
boolean shouldActivate = dataContainer.peek(segment, key) == null;
CompletionStage<Void> commitStage = commitSingleEntry(entry, command, ctx, trackFlag, l1Invalidation);
if (shouldActivate) {
return commitStage.thenCompose(ignore1 -> {
if (log.isTraceEnabled()) {
log.tracef("Activating entry for key %s due to update in dataContainer", key);
}
return activationManager.activateAsync(key, segment);
});
} else if (log.isTraceEnabled()) {
log.tracef("Skipping removal from store as %s was in the data container", key);
}
return commitStage;
}
private CompletionStage<Void> commitEntryOrdered(CacheEntry entry, FlagAffectedCommand command, InvocationContext ctx,
Flag trackFlag, boolean l1Invalidation) {
Object key = entry.getKey();
CompletableFuture<DataOperationOrderer.Operation> ourFuture = new CompletableFuture<>();
// If this future is null it means there is another pending read/write/eviction for this key, thus
// we have to wait on it before performing our commit to ensure data is updated properly
CompletionStage<DataOperationOrderer.Operation> waitingFuture = orderer.orderOn(key, ourFuture);
CompletionStage<Void> chainedStage;
// We have to wait on another operation to complete before doing the update
if (waitingFuture != null) {
chainedStage = waitingFuture.thenCompose(ignore -> commitSingleEntry(entry, command, ctx, trackFlag, l1Invalidation));
} else {
chainedStage = commitSingleEntry(entry, command, ctx, trackFlag, l1Invalidation);
}
// After everything is done we have to make sure to complete our future
if (CompletionStages.isCompletedSuccessfully(chainedStage)) {
orderer.completeOperation(key, ourFuture, operation(entry));
return CompletableFutures.completedNull();
} else {
return chainedStage.whenComplete((ignore, ignoreT) -> orderer.completeOperation(key, ourFuture, operation(entry)));
}
}
protected abstract CompletionStage<Void> commitSingleEntry(CacheEntry entry, FlagAffectedCommand command,
InvocationContext ctx, Flag trackFlag, boolean l1Invalidation);
protected Commit clusterCommitType(FlagAffectedCommand command, InvocationContext ctx, int segment, boolean removed) {
// ignore locality for removals, even if skipOwnershipCheck is not true
if (command != null && command.hasAnyFlag(FlagBitSets.SKIP_OWNERSHIP_CHECK)) {
return Commit.COMMIT_LOCAL;
}
// Non-transactional caches do not write the entry on the originator when the originator is a backup owner,
// but that check is done in NonTx/TriangleDistributionInterceptor, so we don't check here again.
// We also want to allow the command to commit when the originator starts as primary but becomes a backup
// after the backups acked the write, so the command doesn't have to be retried.
if (getCacheTopology().isSegmentWriteOwner(segment)) {
return Commit.COMMIT_LOCAL;
}
return Commit.NO_COMMIT;
}
@Override
public Commit commitType(FlagAffectedCommand command, InvocationContext ctx, int segment, boolean removed) {
return clusterCommitType(command, ctx, segment, removed);
}
protected abstract WriteSkewHelper.KeySpecificLogic initKeySpecificLogic();
@Override
public LocalizedCacheTopology getCacheTopology() {
return distributionManager.getCacheTopology();
}
@Override
public Address getAddress() {
return getCacheTopology().getLocalAddress();
}
@Override
public final <K, V> EntryLoader<K, V> getEntryLoader() {
//noinspection unchecked
return (EntryLoader<K, V>) entryLoader;
}
}
/**
* This logic is used in local mode caches.
*/
class LocalLogic extends AbstractClusteringDependentLogic {
private LocalizedCacheTopology localTopology;
@Inject
public void init(Transport transport, Configuration configuration, KeyPartitioner keyPartitioner) {
Address address = transport != null ? transport.getAddress() : LocalModeAddress.INSTANCE;
boolean segmented = configuration.persistence().stores().stream().anyMatch(StoreConfiguration::segmented);
if (segmented) {
this.localTopology = LocalizedCacheTopology.makeSegmentedSingletonTopology(keyPartitioner,
configuration.clustering().hash().numSegments(), address);
} else {
this.localTopology = LocalizedCacheTopology.makeSingletonTopology(CacheMode.LOCAL, address);
}
}
@Override
public LocalizedCacheTopology getCacheTopology() {
return localTopology;
}
@Override
public Address getAddress() {
return localTopology.getLocalAddress();
}
@Override
public Commit commitType(FlagAffectedCommand command, InvocationContext ctx, int segment, boolean removed) {
return Commit.COMMIT_LOCAL;
}
@Override
protected CompletionStage<Void> commitSingleEntry(CacheEntry entry, FlagAffectedCommand command, InvocationContext ctx,
Flag trackFlag, boolean l1Invalidation) {
// Cache flags before they're reset
// TODO: Can the reset be done after notification instead?
boolean created = entry.isCreated();
boolean removed = entry.isRemoved();
boolean expired = removed && entry instanceof MVCCEntry && ((MVCCEntry) entry).isExpired();
Object key = entry.getKey();
int segment = SegmentSpecificCommand.extractSegment(command, key, keyPartitioner);
InternalCacheEntry previousEntry = dataContainer.peek(segment, entry.getKey());
Object previousValue;
Metadata previousMetadata;
if (previousEntry != null) {
previousValue = previousEntry.getValue();
previousMetadata = previousEntry.getMetadata();
} else {
previousValue = null;
previousMetadata = null;
}
CompletionStage<Void> stage = commitManager.commit(entry, trackFlag, segment, l1Invalidation, ctx);
// Notify after events if necessary
return stage.thenCompose(ignore -> NotifyHelper.entryCommitted(notifier, functionalNotifier, created, removed, expired,
entry, ctx, command, previousValue, previousMetadata, evictionManager));
}
@Override
protected WriteSkewHelper.KeySpecificLogic initKeySpecificLogic() {
return WriteSkewHelper.ALWAYS_TRUE_LOGIC;
}
}
/**
* This logic is used in invalidation mode caches.
*/
class InvalidationLogic extends AbstractClusteringDependentLogic {
@Override
public Commit commitType(FlagAffectedCommand command, InvocationContext ctx, int segment, boolean removed) {
return Commit.COMMIT_LOCAL;
}
@Override
protected CompletionStage<Void> commitSingleEntry(CacheEntry entry, FlagAffectedCommand command,
InvocationContext ctx, Flag trackFlag, boolean l1Invalidation) {
// Cache flags before they're reset
// TODO: Can the reset be done after notification instead?
boolean created = entry.isCreated();
boolean removed = entry.isRemoved();
boolean expired;
if (removed && entry instanceof MVCCEntry) {
expired = ((MVCCEntry) entry).isExpired();
} else {
expired = false;
}
Object key = entry.getKey();
int segment = SegmentSpecificCommand.extractSegment(command, key, keyPartitioner);
InternalCacheEntry previousEntry = dataContainer.peek(segment, entry.getKey());
Object previousValue;
Metadata previousMetadata;
if (previousEntry != null) {
previousValue = previousEntry.getValue();
previousMetadata = previousEntry.getMetadata();
} else {
previousValue = null;
previousMetadata = null;
}
CompletionStage<Void> stage = commitManager.commit(entry, trackFlag, segment, l1Invalidation, ctx);
// Notify after events if necessary
return stage.thenCompose(ignore -> NotifyHelper.entryCommitted(notifier, functionalNotifier, created, removed, expired,
entry, ctx, command, previousValue, previousMetadata, evictionManager));
}
@Override
protected WriteSkewHelper.KeySpecificLogic initKeySpecificLogic() {
return null; //not used because write skew check is not allowed with invalidation
}
}
/**
* This logic is used in replicated mode caches.
*/
class ReplicationLogic extends InvalidationLogic {
@Inject StateTransferLock stateTransferLock;
private final WriteSkewHelper.KeySpecificLogic localNodeIsPrimaryOwner =
segment -> getCacheTopology().getSegmentDistribution(segment).isPrimary();
@Override
public Commit commitType(FlagAffectedCommand command, InvocationContext ctx, int segment, boolean removed) {
return clusterCommitType(command, ctx, segment, removed);
}
@Override
protected CompletionStage<Void> commitSingleEntry(CacheEntry entry, FlagAffectedCommand command,
InvocationContext ctx, Flag trackFlag, boolean l1Invalidation) {
Object key = entry.getKey();
int segment = SegmentSpecificCommand.extractSegment(command, key, keyPartitioner);
Commit doCommit;
Object previousValue = null;
Metadata previousMetadata = null;
CompletionStage<Void> stage = null;
// Don't allow the CH to change (and state transfer to invalidate entries)
// between the ownership check and the commit
stateTransferLock.acquireSharedTopologyLock();
try {
doCommit = commitType(command, ctx, segment, entry.isRemoved());
if (doCommit.isCommit()) {
InternalCacheEntry previousEntry = dataContainer.peek(segment, key);
if (previousEntry != null) {
previousValue = previousEntry.getValue();
previousMetadata = previousEntry.getMetadata();
}
stage =commitManager.commit(entry, trackFlag, segment, l1Invalidation, ctx);
}
} finally {
stateTransferLock.releaseSharedTopologyLock();
}
if (doCommit.isCommit() && doCommit.isLocal()) {
boolean created = entry.isCreated();
boolean removed = entry.isRemoved();
boolean expired;
if (removed && entry instanceof MVCCEntry) {
expired = ((MVCCEntry) entry).isExpired();
} else {
expired = false;
}
if (stage == null || CompletionStages.isCompletedSuccessfully(stage)) {
return NotifyHelper.entryCommitted(notifier, functionalNotifier, created, removed, expired,
entry, ctx, command, previousValue, previousMetadata, evictionManager);
} else {
Object finalPreviousValue = previousValue;
Metadata finalPreviousMetadata = previousMetadata;
return stage.thenCompose(ignore -> NotifyHelper.entryCommitted(notifier, functionalNotifier, created, removed, expired,
entry, ctx, command, finalPreviousValue, finalPreviousMetadata, evictionManager));
}
}
return stage == null ? CompletableFutures.completedNull() : stage;
}
@Override
protected WriteSkewHelper.KeySpecificLogic initKeySpecificLogic() {
//in two phase commit, only the primary owner should perform the write skew check
return localNodeIsPrimaryOwner;
}
}
/**
* This logic is used in distributed mode caches.
*/
class DistributionLogic extends AbstractClusteringDependentLogic {
@Inject StateTransferLock stateTransferLock;
private final WriteSkewHelper.KeySpecificLogic localNodeIsPrimaryOwner = segment -> getCacheTopology().getSegmentDistribution(segment).isPrimary();
@Override
protected CompletionStage<Void> commitSingleEntry(CacheEntry entry, FlagAffectedCommand command,
InvocationContext ctx, Flag trackFlag, boolean l1Invalidation) {
Object key = entry.getKey();
int segment = SegmentSpecificCommand.extractSegment(command, key, keyPartitioner);
Commit doCommit;
Object previousValue = null;
Metadata previousMetadata = null;
// Don't allow the CH to change (and state transfer to invalidate entries)
// between the ownership check and the commit
stateTransferLock.acquireSharedTopologyLock();
CompletionStage<Void> stage = null;
try {
doCommit = commitType(command, ctx, segment, entry.isRemoved());
boolean isL1Write = false;
if (!doCommit.isCommit() && configuration.clustering().l1().enabled()) {
// transform for L1
if (!entry.isRemoved()) {
long lifespan = entry.getLifespan();
if (lifespan < 0 || lifespan > configuration.clustering().l1().lifespan()) {
Metadata metadata = entry.getMetadata().builder()
.lifespan(configuration.clustering().l1().lifespan())
.build();
entry.setMetadata(new L1Metadata(metadata));
}
}
isL1Write = true;
doCommit = Commit.COMMIT_NON_LOCAL;
} else if (doCommit.isCommit() && entry.getMetadata() instanceof L1Metadata) {
throw new IllegalStateException("Local entries must not have L1 metadata");
}
if (doCommit.isCommit()) {
// TODO use value from the entry
InternalCacheEntry previousEntry = dataContainer.peek(segment, key);
if (previousEntry != null) {
previousValue = previousEntry.getValue();
previousMetadata = previousEntry.getMetadata();
}
// don't overwrite non-L1 entry with L1 (e.g. when originator == backup
// and therefore we have two contexts on one node)
boolean skipL1Write = isL1Write && previousEntry != null && !previousEntry.isL1Entry();
if (!skipL1Write) {
stage = commitManager.commit(entry, trackFlag, segment, l1Invalidation || isL1Write, ctx);
}
}
} finally {
stateTransferLock.releaseSharedTopologyLock();
}
if (doCommit.isCommit() && doCommit.isLocal()) {
boolean created = entry.isCreated();
boolean removed = entry.isRemoved();
boolean expired;
if (removed && entry instanceof MVCCEntry) {
expired = ((MVCCEntry) entry).isExpired();
} else {
expired = false;
}
if (stage == null || CompletionStages.isCompletedSuccessfully(stage)) {
return NotifyHelper.entryCommitted(notifier, functionalNotifier, created, removed, expired,
entry, ctx, command, previousValue, previousMetadata, evictionManager);
} else {
Object finalPreviousValue = previousValue;
Metadata finalPreviousMetadata = previousMetadata;
return stage.thenCompose(ignore -> NotifyHelper.entryCommitted(notifier, functionalNotifier, created,
removed, expired, entry, ctx, command, finalPreviousValue, finalPreviousMetadata, evictionManager));
}
}
return stage == null ? CompletableFutures.completedNull() : stage;
}
@Override
protected WriteSkewHelper.KeySpecificLogic initKeySpecificLogic() {
//in two phase commit, only the primary owner should perform the write skew check
return localNodeIsPrimaryOwner;
}
}
}
| 31,422
| 45.760417
| 213
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/locking/NonTransactionalLockingInterceptor.java
|
package org.infinispan.interceptors.locking;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.infinispan.InvalidCacheUsageException;
import org.infinispan.commands.DataCommand;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Locking interceptor to be used for non-transactional caches.
*
* @author Mircea Markus
*/
public class NonTransactionalLockingInterceptor extends AbstractLockingInterceptor {
private static final Log log = LogFactory.getLog(NonTransactionalLockingInterceptor.class);
@Override
protected Log getLog() {
return log;
}
@Override
protected final Object visitDataReadCommand(InvocationContext ctx, DataCommand command) throws Throwable {
assertNonTransactional(ctx);
return invokeNext(ctx, command);
}
@Override
protected Object visitDataWriteCommand(InvocationContext ctx, DataWriteCommand command) throws Throwable {
assertNonTransactional(ctx);
return visitNonTxDataWriteCommand(ctx, command);
}
@Override
public Object visitInvalidateCommand(InvocationContext ctx, InvalidateCommand command) {
// Non-transactional invalidation caches only lock the key on the originator
if (!ctx.isOriginLocal())
return invokeNext(ctx, command);
return super.visitInvalidateCommand(ctx, command);
}
@Override
protected Object handleReadManyCommand(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) {
assertNonTransactional(ctx);
return invokeNext(ctx, command);
}
@Override
protected <K> Object handleWriteManyCommand(InvocationContext ctx, WriteCommand command, Collection<K> keys, boolean forwarded) throws Throwable {
assertNonTransactional(ctx);
if (forwarded || hasSkipLocking(command)) {
return invokeNext(ctx, command);
}
List<K> keysToLock = Collections.emptyList();
for (K key : keys) {
if (shouldLockKey(key)) {
if (keysToLock == Collections.emptyList()) {
keysToLock = new ArrayList<>();
}
keysToLock.add(key);
}
}
InvocationStage lockStage = lockAllAndRecord(ctx, command, keysToLock, getLockTimeoutMillis(command));
return nonTxLockAndInvokeNext(ctx, command, lockStage, unlockAllReturnHandler);
}
private void assertNonTransactional(InvocationContext ctx) {
//this only happens if the cache is used in a transaction's scope
if (ctx.isInTxScope()) {
throw new InvalidCacheUsageException(
"This is a non-transactional cache and cannot be accessed with a transactional InvocationContext.");
}
}
}
| 3,085
| 34.883721
| 149
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/locking/AbstractTxLockingInterceptor.java
|
package org.infinispan.interceptors.locking;
import java.util.ArrayList;
import java.util.Collection;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.transaction.impl.AbstractCacheTransaction;
import org.infinispan.util.concurrent.locks.PendingLockManager;
import org.infinispan.util.concurrent.locks.PendingLockPromise;
/**
* Base class for transaction based locking interceptors.
*
* @author Mircea.Markus@jboss.com
*/
public abstract class AbstractTxLockingInterceptor extends AbstractLockingInterceptor {
@Inject PartitionHandlingManager partitionHandlingManager;
@Inject PendingLockManager pendingLockManager;
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, unlockAllReturnHandler);
}
@Override
protected Object handleReadManyCommand(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) {
if (ctx.isInTxScope())
return invokeNext(ctx, command);
return invokeNextAndFinally(ctx, command, unlockAllReturnHandler);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
if (t instanceof OutdatedTopologyException)
throw t;
releaseLockOnTxCompletion(((TxInvocationContext<?>) rCtx));
});
}
/**
* The backup (non-primary) owners keep a "backup lock" for each key they received in a lock/prepare command.
* Normally there can be many transactions holding the backup lock at the same time, but when the secondary owner
* becomes a primary owner a new transaction trying to obtain the "real" lock will have to wait for all backup
* locks to be released. The backup lock will be released either by a commit/rollback/unlock command or by
* the originator leaving the cluster (if recovery is disabled).
*/
final InvocationStage lockOrRegisterBackupLock(TxInvocationContext<?> ctx, VisitableCommand command, Object key,
long lockTimeout) {
switch (cdl.getCacheTopology().getDistribution(key).writeOwnership()) {
case PRIMARY:
return checkPendingAndLockKey(ctx, command, key, lockTimeout);
case BACKUP:
ctx.getCacheTransaction().addBackupLockForKey(key);
// fallthrough
default:
return InvocationStage.completedNullStage();
}
}
/**
* Same as {@link #lockOrRegisterBackupLock(TxInvocationContext, VisitableCommand, Object, long)}
*
* @return a collection with the keys locked.
*/
final InvocationStage lockAllOrRegisterBackupLock(TxInvocationContext<?> ctx, VisitableCommand command,
Collection<?> keys, long lockTimeout) {
if (keys.isEmpty()) {
return InvocationStage.completedNullStage();
}
Collection<Object> keysToLock = new ArrayList<>(keys.size());
AbstractCacheTransaction cacheTransaction = ctx.getCacheTransaction();
LocalizedCacheTopology cacheTopology = cdl.getCacheTopology();
for (Object key : keys) {
// Skip keys that are already locked (when retrying a lock/prepare command)
if (cacheTransaction.ownsLock(key))
continue;
switch (cacheTopology.getDistribution(key).writeOwnership()) {
case PRIMARY:
keysToLock.add(key);
break;
case BACKUP:
cacheTransaction.addBackupLockForKey(key);
break;
default:
break;
}
}
if (keysToLock.isEmpty()) {
return InvocationStage.completedNullStage();
}
return checkPendingAndLockAllKeys(ctx, command, keysToLock, lockTimeout);
}
/**
* Besides acquiring a lock, this method also handles the following situation:
* 1. consistentHash("k") == {A, B}, tx1 prepared on A and B. Then node A crashed (A == single lock owner)
* 2. at this point tx2 which also writes "k" tries to prepare on B.
* 3. tx2 has to determine that "k" is already locked by another tx (i.e. tx1) and it has to wait for that tx to finish before acquiring the lock.
*
* The algorithm used at step 3 is:
* - the transaction table(TT) associates the current topology id with every remote and local transaction it creates
* - TT also keeps track of the minimal value of all the topology ids of all the transactions still present in the cache (minTopologyId)
* - when a tx wants to acquire lock "k":
* - if tx.topologyId > TT.minTopologyId then "k" might be a key whose owner crashed. If so:
* - obtain the list LT of transactions that started in a previous topology (txTable.getTransactionsPreparedBefore)
* - for each t in LT:
* - if t wants to write "k" then block until t finishes (CacheTransaction.waitForTransactionsToFinishIfItWritesToKey)
* - only then try to acquire lock on "k"
* - if tx.topologyId == TT.minTopologyId try to acquire lock straight away.
*
* Note: The algorithm described below only when nodes leave the cluster, so it doesn't add a performance burden
* when the cluster is stable.
*/
private InvocationStage checkPendingAndLockKey(TxInvocationContext<?> ctx, VisitableCommand command, Object key,
long lockTimeout) {
PendingLockPromise pendingLockPromise =
pendingLockManager.checkPendingTransactionsForKey(ctx, key, lockTimeout, TimeUnit.MILLISECONDS);
if (pendingLockPromise.isReady()) {
//if it has already timed-out, do not try to acquire the lock
return pendingLockPromise.hasTimedOut() ?
pendingLockPromise.toInvocationStage() :
lockAndRecord(ctx, command, key, lockTimeout);
}
return pendingLockPromise.toInvocationStage().thenApplyMakeStage(ctx, command, (rCtx, rCommand, rv) -> {
long remaining = pendingLockPromise.getRemainingTimeout();
return lockAndRecord(ctx, command, key, remaining);
});
}
private InvocationStage checkPendingAndLockAllKeys(TxInvocationContext<?> ctx, VisitableCommand command,
Collection<Object> keys, long lockTimeout) {
PendingLockPromise pendingLockPromise =
pendingLockManager.checkPendingTransactionsForKeys(ctx, keys, lockTimeout, TimeUnit.MILLISECONDS);
if (pendingLockPromise.isReady()) {
//if it has already timed-out, do not try to acquire the lock
return pendingLockPromise.hasTimedOut() ?
pendingLockPromise.toInvocationStage() :
lockAllAndRecord(ctx, command, keys, lockTimeout);
}
return pendingLockPromise.toInvocationStage().thenApplyMakeStage(ctx, command, ((rCtx, rCommand, rv) -> {
long remaining = pendingLockPromise.getRemainingTimeout();
return lockAllAndRecord(ctx, command, keys, remaining);
}));
}
void releaseLockOnTxCompletion(TxInvocationContext<?> ctx) {
boolean shouldReleaseLocks = ctx.isOriginLocal() &&
!partitionHandlingManager.isTransactionPartiallyCommitted(ctx.getGlobalTransaction());
if (shouldReleaseLocks) {
lockManager.unlockAll(ctx);
}
}
}
| 8,107
| 45.597701
| 149
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/locking/OptimisticLockingInterceptor.java
|
package org.infinispan.interceptors.locking;
import java.util.Collection;
import org.infinispan.InvalidCacheUsageException;
import org.infinispan.commands.DataCommand;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.InvocationFinallyAction;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Locking interceptor to be used by optimistic transactional caches.
*
* @author Mircea Markus
*/
public class OptimisticLockingInterceptor extends AbstractTxLockingInterceptor {
private static final Log log = LogFactory.getLog(OptimisticLockingInterceptor.class);
private final InvocationFinallyAction<VisitableCommand> releaseLockOnCompletionAction = (rCtx, rCommand, rv, throwable) -> releaseLockOnTxCompletion((TxInvocationContext<?>) rCtx);
private final InvocationSuccessFunction<PrepareCommand> onePhaseCommitFunction = (rCtx, rCommand, rv) -> invokeNextAndFinally(rCtx, rCommand, releaseLockOnCompletionAction);
@Override
protected Log getLog() {
return log;
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
final Collection<?> keysToLock = command.getKeysToLock();
InvocationStage lockStage = InvocationStage.completedNullStage();
((TxInvocationContext<?>) ctx).addAllAffectedKeys(command.getAffectedKeys());
if (!keysToLock.isEmpty()) {
if (command.isRetriedCommand()) {
// Don't keep backup locks if the local node is the primary owner in the current topology
// The lock/prepare command is being retried, so it's not a "pending" transaction
// However, keep the backup locks if the prepare command is being replayed because of state transfer
ctx.getCacheTransaction().cleanupBackupLocks();
}
lockStage = lockAllOrRegisterBackupLock(ctx, command, keysToLock, command.hasZeroLockAcquisition() ? 0 :
cacheConfiguration.locking().lockAcquisitionTimeout());
}
if (command.isOnePhaseCommit()) {
return lockStage.thenApply(ctx, command, onePhaseCommitFunction);
} else {
return asyncInvokeNext(ctx, command, lockStage);
}
}
@Override
protected Object visitDataReadCommand(InvocationContext ctx, DataCommand command) throws Throwable {
return invokeNext(ctx, command);
}
@Override
protected Object visitDataWriteCommand(InvocationContext ctx, DataWriteCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, unlockAllReturnHandler);
}
@Override
protected Object handleReadManyCommand(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) {
return invokeNext(ctx, command);
}
@Override
protected <K> Object handleWriteManyCommand(InvocationContext ctx, WriteCommand command,
Collection<K> keys, boolean forwarded) throws Throwable {
// TODO: can locks be acquired here with optimistic locking at all? Shouldn't we unlock only when exception is thrown?
return invokeNextAndFinally(ctx, command, unlockAllReturnHandler);
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) throws Throwable {
throw new InvalidCacheUsageException("Explicit locking is not allowed with optimistic caches!");
}
}
| 3,936
| 44.77907
| 183
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/locking/PessimisticLockingInterceptor.java
|
package org.infinispan.interceptors.locking;
import java.util.Collection;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.DataCommand;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.interceptors.InvocationSuccessAction;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.topology.CacheTopology;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Locking interceptor to be used by pessimistic caches.
* Design note: when a lock "k" needs to be acquired (e.g. cache.put("k", "v")), if the lock owner is the local node,
* no remote call is performed to migrate locking logic to the other (numOwners - 1) lock owners. This is a good
* optimisation for in-vm transactions: if the local node crashes before prepare then the replicated lock information
* would be useless as the tx is rolled back. OTOH for remote hotrod/transactions this additional RPC makes sense because
* there's no such thing as transaction originator node, so this might become a configuration option when HotRod tx are
* in place.
*
* Implementation note: current implementation acquires locks remotely first and then locally. This is required
* by the deadlock detection logic, but might not be optimal: acquiring locks locally first might help to fail fast the
* in the case of keys being locked.
*
* @author Mircea Markus
*/
public class PessimisticLockingInterceptor extends AbstractTxLockingInterceptor {
private static final Log log = LogFactory.getLog(PessimisticLockingInterceptor.class);
private final InvocationSuccessFunction<LockControlCommand> localLockCommandWork =
(rCtx, rCommand, rv) -> localLockCommandWork((TxInvocationContext) rCtx, rCommand);
private final InvocationSuccessAction<PrepareCommand> releaseLockOnCompletion =
(rCtx, rCommand, rv) -> releaseLockOnTxCompletion((TxInvocationContext) rCtx);
@Inject CommandsFactory cf;
@Override
protected Log getLog() {
return log;
}
@Override
protected final Object visitDataReadCommand(InvocationContext ctx, DataCommand command)
throws Throwable {
if (!readNeedsLock(ctx, command)) {
return invokeNext(ctx, command);
}
if (!readNeedsLock(ctx, command)) {
return invokeNext(ctx, command);
}
Object key = command.getKey();
if (!needRemoteLocks(ctx, key, command)) {
return acquireLocalLockAndInvokeNext(ctx, command);
}
TxInvocationContext txContext = (TxInvocationContext) ctx;
LockControlCommand lcc = cf.buildLockControlCommand(key, command.getFlagsBitSet(),
txContext.getGlobalTransaction());
lcc.setTopologyId(command.getTopologyId());
// This invokes the chain down using the lock control command and then after it is acquired invokes
// the chain again with the actual command
return invokeNextThenApply(ctx, lcc, (rCtx, rCommand, rv) -> acquireLocalLockAndInvokeNext(rCtx, command));
}
private boolean readNeedsLock(InvocationContext ctx, FlagAffectedCommand command) {
return ctx.isInTxScope() && command.hasAnyFlag(FlagBitSets.FORCE_WRITE_LOCK) && !hasSkipLocking(command);
}
private InvocationStage acquireLocalLock(InvocationContext ctx, DataCommand command) {
final TxInvocationContext txContext = (TxInvocationContext) ctx;
Object key = command.getKey();
txContext.addAffectedKey(key);
// Don't keep backup locks if the local node is the primary owner in the current topology
// The lock/prepare command is being retried, so it's not a "pending" transaction
txContext.getCacheTransaction().removeBackupLock(key);
return lockOrRegisterBackupLock(txContext, command, key, getLockTimeoutMillis(command));
}
@Override
protected Object handleReadManyCommand(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) {
Object maybeStage;
if (!readNeedsLock(ctx, command)) {
maybeStage = invokeNext(ctx, command);
} else {
maybeStage = lockAndRecordForManyKeysCommand(ctx, command, keys);
}
return maybeStage;
}
private InvocationStage acquireLocalLocks(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) {
final TxInvocationContext<?> txContext = (TxInvocationContext) ctx;
txContext.addAllAffectedKeys(keys);
// Don't keep backup locks if the local node is the primary owner in the current topology
// The read/write many command is being retried, so it's not a "pending" transaction
txContext.getCacheTransaction().removeBackupLocks(keys);
return lockAllOrRegisterBackupLock(txContext, command, keys, getLockTimeoutMillis(command));
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (!command.isOnePhaseCommit()) {
return invokeNext(ctx, command);
}
// Don't release the locks on exception, the RollbackCommand will do it
return invokeNextThenAccept(ctx, command, releaseLockOnCompletion);
}
@Override
protected <K> Object handleWriteManyCommand(InvocationContext ctx, WriteCommand command, Collection<K> keys, boolean forwarded) {
Object maybeStage;
if (hasSkipLocking(command)) {
maybeStage = invokeNext(ctx, command);
} else {
maybeStage = lockAndRecordForManyKeysCommand(ctx, command, keys);
}
return maybeStage;
}
@Override
protected Object visitDataWriteCommand(InvocationContext ctx, DataWriteCommand command)
throws Throwable {
Object maybeStage;
Object key = command.getKey();
if (hasSkipLocking(command)) {
// Non-modifying functional write commands are executed in non-transactional context on non-originators
if (ctx.isInTxScope()) {
// Mark the key as affected even with SKIP_LOCKING
((TxInvocationContext<?>) ctx).addAffectedKey(key);
}
maybeStage = invokeNext(ctx, command);
} else {
if (!needRemoteLocks(ctx, key, command)) {
maybeStage = acquireLocalLockAndInvokeNext(ctx, command);
} else {
final TxInvocationContext txContext = (TxInvocationContext) ctx;
LockControlCommand lcc = cf.buildLockControlCommand(key, command.getFlagsBitSet(),
txContext.getGlobalTransaction());
lcc.setTopologyId(command.getTopologyId());
// This invokes the chain down using the lock control command and then after it is acquired invokes
// the chain again with the actual command
return invokeNextThenApply(ctx, lcc, (rCtx, rCommand, rv) -> acquireLocalLockAndInvokeNext(rCtx, command));
}
}
return maybeStage;
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command) {
if (!ctx.isInTxScope())
throw new IllegalStateException("Locks should only be acquired within the scope of a transaction!");
boolean skipLocking = hasSkipLocking(command);
if (skipLocking) {
return false;
}
// First go through the distribution interceptor to acquire the remote lock - required by DLD.
// Only acquire remote lock if multiple keys or the single key primary owner is not the local node.
if (ctx.isOriginLocal()) {
final boolean isSingleKeyAndLocal =
!command.multipleKeys() && cdl.getCacheTopology().getDistribution(command.getSingleKey()).isPrimary();
boolean needBackupLocks = !isSingleKeyAndLocal || isStateTransferInProgress();
if (needBackupLocks && !command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL)) {
LocalTransaction localTx = (LocalTransaction) ctx.getCacheTransaction();
if (localTx.getAffectedKeys().containsAll(command.getKeys())) {
if (log.isTraceEnabled())
log.tracef("Already own locks on keys: %s, skipping remote call", command.getKeys());
return true;
}
} else {
if (log.isTraceEnabled())
log.tracef("Single key %s and local, skipping remote call", command.getSingleKey());
return localLockCommandWork(ctx, command);
}
}
return invokeNextThenApply(ctx, command, localLockCommandWork);
}
private Object localLockCommandWork(TxInvocationContext<?> ctx, LockControlCommand command) {
if (ctx.isOriginLocal()) {
ctx.addAllAffectedKeys(command.getKeys());
}
if (command.isUnlock()) {
if (ctx.isOriginLocal()) throw new AssertionError(
"There's no advancedCache.unlock so this must have originated remotely.");
return false;
}
// Don't keep backup locks if the local node is the primary owner in the current topology
// The lock/prepare command is being retried, so it's not a "pending" transaction
ctx.getCacheTransaction().removeBackupLocks(command.getKeys());
return lockAllOrRegisterBackupLock(ctx, command, command.getKeys(), getLockTimeoutMillis(command))
.thenApply(ctx, command, (rCtx, rCommand, rv) -> true);
}
private boolean needRemoteLocks(InvocationContext ctx, Collection<?> keys,
FlagAffectedCommand command) {
boolean needBackupLocks = ctx.isOriginLocal() && (!isLockOwner(keys) || isStateTransferInProgress());
boolean needRemoteLock = false;
if (needBackupLocks && !command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL)) {
final TxInvocationContext txContext = (TxInvocationContext) ctx;
LocalTransaction localTransaction = (LocalTransaction) txContext.getCacheTransaction();
needRemoteLock = !localTransaction.getAffectedKeys().containsAll(keys);
if (!needRemoteLock) {
if (log.isTraceEnabled()) log.tracef("We already have lock for keys %s, skip remote lock acquisition", keys);
}
}
return needRemoteLock;
}
private boolean needRemoteLocks(InvocationContext ctx, Object key, FlagAffectedCommand command) {
boolean needBackupLocks = ctx.isOriginLocal() && (!isLockOwner(key) || isStateTransferInProgress());
boolean needRemoteLock = false;
if (needBackupLocks && !command.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL)) {
final TxInvocationContext txContext = (TxInvocationContext) ctx;
LocalTransaction localTransaction = (LocalTransaction) txContext.getCacheTransaction();
needRemoteLock = !localTransaction.getAffectedKeys().contains(key);
if (!needRemoteLock) {
if (log.isTraceEnabled())
log.tracef("We already have lock for key %s, skip remote lock acquisition", key);
}
} else {
if (log.isTraceEnabled())
log.tracef("Don't need backup locks for key %s", key);
}
return needRemoteLock;
}
private boolean isLockOwner(Collection<?> keys) {
for (Object key : keys) {
if (!isLockOwner(key)) {
return false;
}
}
return true;
}
private boolean isStateTransferInProgress() {
return cdl.getCacheTopology().getPhase() == CacheTopology.Phase.READ_OLD_WRITE_ALL;
}
private Object lockAndRecordForManyKeysCommand(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) {
if (!needRemoteLocks(ctx, keys, command)) {
return acquireLocalLocksAndInvokeNext(ctx, command, keys);
} else {
final TxInvocationContext txContext = (TxInvocationContext) ctx;
LockControlCommand lcc = cf.buildLockControlCommand(keys, command.getFlagsBitSet(),
txContext.getGlobalTransaction());
if (command instanceof TopologyAffectedCommand) {
lcc.setTopologyId(((TopologyAffectedCommand) command).getTopologyId());
}
// This invokes the chain down using the lock control command and then after it is acquired invokes
// the chain again with the actual command
return invokeNextThenApply(ctx, lcc,
(rCtx, rCommand, rv) -> acquireLocalLocksAndInvokeNext(rCtx, command, keys));
}
}
private Object acquireLocalLocksAndInvokeNext(InvocationContext ctx, FlagAffectedCommand command, Collection<?> keys) {
InvocationStage lockStage = acquireLocalLocks(ctx, command, keys);
return asyncInvokeNext(ctx, command, lockStage);
}
private Object acquireLocalLockAndInvokeNext(InvocationContext ctx, DataCommand command) {
InvocationStage lockStage = acquireLocalLock(ctx, command);
return asyncInvokeNext(ctx, command, lockStage);
}
}
| 13,400
| 45.53125
| 132
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/PassivationWriterInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.persistence.manager.PersistenceManager.AccessMode.BOTH;
import static org.infinispan.persistence.manager.PersistenceManager.AccessMode.PRIVATE;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Handles store write operations when passivation enabled that don't entail reading the entry first
*
* @author William Burns
* @since 9.0
*/
public class PassivationWriterInterceptor extends DDAsyncInterceptor {
@Inject protected PersistenceManager persistenceManager;
private static final Log log = LogFactory.getLog(PassivationWriterInterceptor.class);
protected Log getLog() {
return log;
}
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
if (isStoreEnabled(command) && !ctx.isInTxScope())
return asyncInvokeNext(ctx, command, persistenceManager.clearAllStores(ctx.isOriginLocal() ? BOTH : PRIVATE));
return invokeNext(ctx, command);
}
protected boolean isStoreEnabled(FlagAffectedCommand command) {
if (command.hasAnyFlag(FlagBitSets.SKIP_CACHE_STORE)) {
if (log.isTraceEnabled()) {
log.trace("Skipping cache store since the call contain a skip cache store flag");
}
return false;
}
return true;
}
}
| 1,762
| 34.979592
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/ContainerFullException.java
|
package org.infinispan.interceptors.impl;
import org.infinispan.commons.CacheException;
/**
* Exception that is thrown when exception based eviction is enabled and the cache is full
* @author wburns
* @since 9.0
*/
public class ContainerFullException extends CacheException {
public ContainerFullException(String msg) {
super(msg);
}
}
| 354
| 22.666667
| 90
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/ClusteredCacheLoaderInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.commons.util.Util.toStr;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* The same as a regular cache loader interceptor, except that it contains additional logic to force loading from the
* cache loader if needed on a remote node, in certain conditions.
*
* @author Manik Surtani
* @since 9.0
*/
public class ClusteredCacheLoaderInterceptor<K, V> extends CacheLoaderInterceptor<K, V> {
private static final Log log = LogFactory.getLog(ClusteredCacheLoaderInterceptor.class);
@Inject DistributionManager distributionManager;
private boolean transactional;
@Start(priority = 15)
void startClusteredCacheLoaderInterceptor() {
transactional = cacheConfiguration.transaction().transactionMode().isTransactional();
}
@Override
protected boolean skipLoadForWriteCommand(WriteCommand cmd, Object key, InvocationContext ctx) {
if (transactional) {
// LoadType.OWNER is used when the previous value is required to produce new value itself (functional commands
// or delta-aware), therefore, we have to load them into context. Other load types have checked the value
// already on the originator and therefore the value is loaded only for WSC (without this interceptor)
if (!ctx.isOriginLocal() && cmd.loadType() != VisitableCommand.LoadType.OWNER) {
return true;
}
} else {
switch (cmd.loadType()) {
case DONT_LOAD:
return true;
case PRIMARY:
if (cmd.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL)) {
return cmd.hasAnyFlag(FlagBitSets.SKIP_CACHE_LOAD);
}
if (!distributionManager.getCacheTopology().getDistribution(key).isPrimary()) {
if (log.isTraceEnabled()) {
log.tracef("Skip load for command %s. This node is not the primary owner of %s", cmd, toStr(key));
}
return true;
}
break;
case OWNER:
if (cmd.hasAnyFlag(FlagBitSets.CACHE_MODE_LOCAL)) {
return cmd.hasAnyFlag(FlagBitSets.SKIP_CACHE_LOAD);
}
// TODO [Dan] I'm not sure using the write CH is OK here
DistributionInfo info = distributionManager.getCacheTopology().getDistribution(key);
if (!info.isPrimary() && (!info.isWriteOwner() || ctx.isOriginLocal())) {
if (log.isTraceEnabled()) {
log.tracef("Skip load for command %s. This node is neither the primary owner nor non-origin backup of %s", cmd, toStr(key));
}
return true;
}
break;
}
}
return super.skipLoadForWriteCommand(cmd, key, ctx);
}
@Override
protected boolean canLoad(Object key, int segment) {
// Don't load the value if we are using distributed mode and aren't in the read CH
LocalizedCacheTopology cacheTopology = distributionManager.getCacheTopology();
return cacheTopology != null && cacheTopology.isSegmentReadOwner(segment);
}
}
| 3,720
| 41.770115
| 145
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/TxInterceptor.java
|
package org.infinispan.interceptors.impl;
import java.util.concurrent.atomic.AtomicLong;
import jakarta.transaction.Status;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import org.infinispan.Cache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.read.EntrySetCommand;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.read.KeySetCommand;
import org.infinispan.commands.read.SizeCommand;
import org.infinispan.commands.tx.AbstractTransactionBoundaryCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.RemoveExpiredCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.LocalTxInvocationContext;
import org.infinispan.context.impl.RemoteTxInvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.jmx.JmxStatisticsExposer;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.jmx.annotations.MeasurementType;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.RemoteTransaction;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.xa.CacheTransaction;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.transaction.xa.recovery.RecoveryManager;
import org.infinispan.util.concurrent.locks.LockReleasedException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Interceptor in charge with handling transaction related operations, e.g enlisting cache as an transaction
* participant, propagating remotely initiated changes.
*
* @author <a href="mailto:manik@jboss.org">Manik Surtani (manik@jboss.org)</a>
* @author Mircea.Markus@jboss.com
* @see org.infinispan.transaction.xa.TransactionXaAdapter
* @since 9.0
*/
@MBean(objectName = "Transactions", description = "Component that manages the cache's participation in JTA transactions.")
public class TxInterceptor<K, V> extends DDAsyncInterceptor implements JmxStatisticsExposer {
private static final Log log = LogFactory.getLog(TxInterceptor.class);
private final AtomicLong prepares = new AtomicLong(0);
private final AtomicLong commits = new AtomicLong(0);
private final AtomicLong rollbacks = new AtomicLong(0);
@Inject CommandsFactory commandsFactory;
@Inject ComponentRef<Cache<K, V>> cache;
@Inject RecoveryManager recoveryManager;
@Inject TransactionTable txTable;
@Inject KeyPartitioner keyPartitioner;
private boolean useOnePhaseForAutoCommitTx;
private boolean useVersioning;
private boolean statisticsEnabled;
private static void checkTransactionThrowable(CacheTransaction tx, Throwable throwable) {
if (tx.isMarkedForRollback() && throwable instanceof LockReleasedException) {
throw log.transactionAlreadyRolledBack(tx.getGlobalTransaction());
}
}
@Start
public void start() {
statisticsEnabled = cacheConfiguration.statistics().enabled();
useOnePhaseForAutoCommitTx = cacheConfiguration.transaction().use1PcForAutoCommitTransactions();
useVersioning = Configurations.isTxVersioned(cacheConfiguration);
}
@Override
public Object visitPrepareCommand(@SuppressWarnings("rawtypes") TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return handlePrepareCommand(ctx, command);
}
private Object handlePrepareCommand(TxInvocationContext<?> ctx, PrepareCommand command) {
// Debugging for ISPN-5379
ctx.getCacheTransaction().freezeModifications();
//if it is remote and 2PC then first log the tx only after replying mods
if (this.statisticsEnabled) prepares.incrementAndGet();
if (!ctx.isOriginLocal()) {
((RemoteTransaction) ctx.getCacheTransaction()).setLookedUpEntriesTopology(command.getTopologyId());
Object verifyResult = invokeNextAndHandle(ctx, command, (rCtx, rCommand, rv, throwable) -> {
if (!rCtx.isOriginLocal()) {
return verifyRemoteTransaction((RemoteTxInvocationContext) rCtx, rCommand, rv, throwable);
}
return valueOrException(rv, throwable);
});
return makeStage(verifyResult).thenAccept(ctx, command, (rCtx, prepareCommand, rv) -> {
if (prepareCommand.isOnePhaseCommit()) {
txTable.remoteTransactionCommitted(prepareCommand.getGlobalTransaction(), true);
} else {
txTable.remoteTransactionPrepared(prepareCommand.getGlobalTransaction());
}
});
} else {
return invokeNext(ctx, command);
}
}
@Override
public Object visitCommitCommand(@SuppressWarnings("rawtypes") TxInvocationContext ctx, CommitCommand command) throws Throwable {
// TODO The local origin check is needed for CommitFailsTest, but it doesn't appear correct to roll back an in-doubt tx
if (!ctx.isOriginLocal()) {
GlobalTransaction gtx = ctx.getGlobalTransaction();
if (txTable.isTransactionCompleted(gtx)) {
if (log.isTraceEnabled()) log.tracef("Transaction %s already completed, skipping commit", gtx);
return null;
}
InvocationStage replayStage = replayRemoteTransactionIfNeeded((RemoteTxInvocationContext) ctx,
command.getTopologyId());
if (replayStage != null) {
return replayStage.andHandle(ctx, command, (rCtx, rCommand, rv, t) ->
finishCommit((TxInvocationContext<?>) rCtx, rCommand));
} else {
return finishCommit(ctx, command);
}
}
return finishCommit(ctx, command);
}
private Object finishCommit(TxInvocationContext<?> ctx, VisitableCommand command) {
GlobalTransaction gtx = ctx.getGlobalTransaction();
if (this.statisticsEnabled) commits.incrementAndGet();
return invokeNextThenAccept(ctx, command, (rCtx, rCommand, rv) -> {
if (!rCtx.isOriginLocal()) {
txTable.remoteTransactionCommitted(gtx, false);
}
});
}
@Override
public Object visitRollbackCommand(@SuppressWarnings("rawtypes") TxInvocationContext ctx, RollbackCommand command) throws Throwable {
if (this.statisticsEnabled) rollbacks.incrementAndGet();
// The transaction was marked as completed in RollbackCommand.prepare()
if (!ctx.isOriginLocal()) {
txTable.remoteTransactionRollback(command.getGlobalTransaction());
}
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
//for tx that rollback we do not send a TxCompletionNotification, so we should cleanup
// the recovery info here
if (recoveryManager != null) {
GlobalTransaction gtx = rCommand.getGlobalTransaction();
recoveryManager.removeRecoveryInformation(gtx.getXid());
}
});
}
@Override
public Object visitLockControlCommand(@SuppressWarnings("rawtypes") TxInvocationContext ctx, LockControlCommand command)
throws Throwable {
enlistIfNeeded(ctx);
if (ctx.isOriginLocal()) {
command.setGlobalTransaction(ctx.getGlobalTransaction());
}
return invokeNextAndHandle(ctx, command, (rCtx, rCommand, rv, throwable) -> {
if (!rCtx.isOriginLocal()) {
return verifyRemoteTransaction((RemoteTxInvocationContext) rCtx, rCommand, rv, throwable);
}
checkTransactionThrowable(((TxInvocationContext<?>) rCtx).getCacheTransaction(), throwable);
return valueOrException(rv, throwable);
});
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitRemoveExpiredCommand(InvocationContext ctx, RemoveExpiredCommand command) {
// Remove expired is non transactional
return invokeNext(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
return invokeNext(ctx, command);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitSizeCommand(InvocationContext ctx, SizeCommand command) throws Throwable {
if (!ctx.isOriginLocal() || !ctx.isInTxScope())
return invokeNext(ctx, command);
enlistIfNeeded(ctx);
// If we have any entries looked up - even read, we can't allow size optimizations
if (ctx.isInTxScope() && ctx.lookedUpEntriesCount() > 0) {
command.addFlags(FlagBitSets.SKIP_SIZE_OPTIMIZATION);
}
return invokeNext(ctx, command);
}
@Override
public Object visitKeySetCommand(InvocationContext ctx, KeySetCommand command) throws Throwable {
if (ctx.isOriginLocal() && ctx.isInTxScope()) {
enlistIfNeeded(ctx);
}
return invokeNext(ctx, command);
}
@Override
public Object visitEntrySetCommand(InvocationContext ctx, EntrySetCommand command) throws Throwable {
if (ctx.isOriginLocal() && ctx.isInTxScope()) {
enlistIfNeeded(ctx);
}
return invokeNext(ctx, command);
}
@Override
public Object visitInvalidateCommand(InvocationContext ctx, InvalidateCommand invalidateCommand)
throws Throwable {
return handleWriteCommand(ctx, invalidateCommand);
}
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command)
throws Throwable {
enlistIfNeeded(ctx);
return invokeNext(ctx, command);
}
@Override
public final Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command)
throws Throwable {
enlistIfNeeded(ctx);
return invokeNext(ctx, command);
}
@Override
public Object visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable {
enlistIfNeeded(ctx);
return invokeNext(ctx, command);
}
private void enlistIfNeeded(InvocationContext ctx) throws SystemException {
if (shouldEnlist(ctx)) {
assert ctx instanceof LocalTxInvocationContext;
enlist((LocalTxInvocationContext) ctx);
}
}
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) throws Throwable {
enlistIfNeeded(ctx);
return invokeNext(ctx, command);
}
@Override
public Object visitReadOnlyManyCommand(InvocationContext ctx, ReadOnlyManyCommand command) throws Throwable {
enlistIfNeeded(ctx);
return invokeNext(ctx, command);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
private Object handleWriteCommand(InvocationContext ctx, WriteCommand command)
throws Throwable {
if (shouldEnlist(ctx)) {
assert ctx instanceof LocalTxInvocationContext;
LocalTransaction localTransaction = enlist((LocalTxInvocationContext) ctx);
boolean implicitWith1Pc = useOnePhaseForAutoCommitTx && localTransaction.isImplicitTransaction();
if (implicitWith1Pc) {
//in this situation we don't support concurrent updates so skip locking entirely
command.addFlags(FlagBitSets.SKIP_LOCKING);
}
}
return invokeNextAndFinally(ctx, command, (rCtx, writeCommand, rv, t) -> {
// We shouldn't mark the transaction for rollback if it's going to be retried
if (t != null && !(t instanceof OutdatedTopologyException)) {
// Don't mark the transaction for rollback if it's fail silent (i.e. putForExternalRead)
if (rCtx.isOriginLocal() && rCtx.isInTxScope() && !writeCommand.hasAnyFlag(FlagBitSets.FAIL_SILENTLY)) {
TxInvocationContext<?> txCtx = (TxInvocationContext<?>) rCtx;
// avoid invoke setRollbackOnly() if the transaction is already rolled back
checkTransactionThrowable(txCtx.getCacheTransaction(), t);
txCtx.getTransaction().setRollbackOnly();
}
}
if (t == null && shouldEnlist(rCtx) && writeCommand.isSuccessful()) {
assert rCtx instanceof LocalTxInvocationContext;
((LocalTxInvocationContext) rCtx).getCacheTransaction().addModification(writeCommand);
}
});
}
private LocalTransaction enlist(LocalTxInvocationContext ctx) throws SystemException {
Transaction transaction = ctx.getTransaction();
if (transaction == null) throw new IllegalStateException("This should only be called in an tx scope");
LocalTransaction localTransaction = ctx.getCacheTransaction();
if (localTransaction.isFromStateTransfer()) {
return localTransaction;
}
int status = transaction.getStatus();
if (isNotValid(status)) {
if (!localTransaction.isEnlisted()) {
// This transaction wouldn't be removed by TM.commit() or TM.rollback()
txTable.removeLocalTransaction(localTransaction);
}
throw new IllegalStateException("Transaction " + transaction +
" is not in a valid state to be invoking cache operations on.");
}
txTable.enlist(transaction, localTransaction);
return localTransaction;
}
private boolean isNotValid(int status) {
return status != Status.STATUS_ACTIVE
&& status != Status.STATUS_PREPARING
&& status != Status.STATUS_COMMITTING;
}
private static boolean shouldEnlist(InvocationContext ctx) {
return ctx.isInTxScope() && ctx.isOriginLocal();
}
@Override
public boolean getStatisticsEnabled() {
return isStatisticsEnabled();
}
@Override
public void setStatisticsEnabled(boolean enabled) {
statisticsEnabled = enabled;
}
@Override
@ManagedOperation(
description = "Resets statistics gathered by this component",
displayName = "Reset Statistics"
)
public void resetStatistics() {
prepares.set(0);
commits.set(0);
rollbacks.set(0);
}
@ManagedAttribute(
displayName = "Statistics enabled",
dataType = DataType.TRAIT,
writable = true
)
public boolean isStatisticsEnabled() {
return this.statisticsEnabled;
}
@ManagedAttribute(
description = "Number of transaction prepares performed since last reset",
displayName = "Prepares",
measurementType = MeasurementType.TRENDSUP
)
public long getPrepares() {
return prepares.get();
}
@ManagedAttribute(
description = "Number of transaction commits performed since last reset",
displayName = "Commits",
measurementType = MeasurementType.TRENDSUP
)
public long getCommits() {
return commits.get();
}
@ManagedAttribute(
description = "Number of transaction rollbacks performed since last reset",
displayName = "Rollbacks",
measurementType = MeasurementType.TRENDSUP
)
public long getRollbacks() {
return rollbacks.get();
}
private Object verifyRemoteTransaction(RemoteTxInvocationContext ctx, AbstractTransactionBoundaryCommand command,
Object rv, Throwable throwable) throws Throwable {
final GlobalTransaction globalTransaction = command.getGlobalTransaction();
// It is also possible that the LCC timed out on the originator's end and this node has processed
// a TxCompletionNotification. So we need to check the presence of the remote transaction to
// see if we need to clean up any acquired locks on our end.
boolean alreadyCompleted = txTable.isTransactionCompleted(globalTransaction) || !txTable.containRemoteTx(globalTransaction);
if (log.isTraceEnabled()) {
log.tracef("Verifying transaction: alreadyCompleted=%s", alreadyCompleted);
}
if (alreadyCompleted) {
if (log.isTraceEnabled()) {
log.tracef("Rolling back remote transaction %s because it was already completed",
globalTransaction);
}
// The rollback command only marks the transaction as completed in invokeAsync()
txTable.markTransactionCompleted(globalTransaction, false);
RollbackCommand rollback = commandsFactory.buildRollbackCommand(command.getGlobalTransaction());
return invokeNextAndFinally(ctx, rollback, (rCtx, rCommand, rv1, throwable1) -> {
//noinspection unchecked
RemoteTransaction remoteTx = ((TxInvocationContext<RemoteTransaction>) rCtx).getCacheTransaction();
remoteTx.markForRollback(true);
txTable.removeRemoteTransaction(globalTransaction);
});
}
return valueOrException(rv, throwable);
}
private InvocationStage replayRemoteTransactionIfNeeded(RemoteTxInvocationContext ctx, int topologyId)
throws Throwable {
// If a commit is received for a transaction that doesn't have its 'lookedUpEntries' populated
// we know for sure this transaction is 2PC and was received via state transfer but the preceding PrepareCommand
// was not received by local node because it was executed on the previous key owners. We need to re-prepare
// the transaction on local node to ensure its locks are acquired and lookedUpEntries is properly populated.
RemoteTransaction remoteTx = ctx.getCacheTransaction();
if (log.isTraceEnabled()) {
log.tracef("Remote tx topology id %d and command topology is %d", remoteTx.lookedUpEntriesTopology(), topologyId);
}
if (remoteTx.lookedUpEntriesTopology() < topologyId) {
PrepareCommand prepareCommand;
if (useVersioning) {
prepareCommand = commandsFactory.buildVersionedPrepareCommand(ctx.getGlobalTransaction(), ctx.getModifications(), false);
} else {
prepareCommand = commandsFactory.buildPrepareCommand(ctx.getGlobalTransaction(), ctx.getModifications(), false);
}
prepareCommand.markTransactionAsRemote(true);
prepareCommand.setOrigin(ctx.getOrigin());
if (log.isTraceEnabled()) {
log.tracef("Replaying the transactions received as a result of state transfer %s",
prepareCommand);
}
return makeStage(handlePrepareCommand(ctx, prepareCommand));
}
return null;
}
}
| 22,884
| 40.913919
| 136
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/package-info.java
|
/**
* Basic interceptors
*
* @since 9.0
*
* @api.private
*/
package org.infinispan.interceptors.impl;
| 108
| 11.111111
| 41
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/NonTxIracRemoteSiteInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.util.IracUtils.logUpdateDiscarded;
import static org.infinispan.util.IracUtils.setPrivateMetadata;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.container.versioning.IncrementableEntryVersion;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.container.versioning.irac.IracEntryVersion;
import org.infinispan.container.versioning.irac.IracTombstoneManager;
import org.infinispan.container.versioning.irac.IracVersionGenerator;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.Ownership;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.InvocationSuccessAction;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.transaction.impl.WriteSkewHelper;
import org.infinispan.util.logging.LogSupplier;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.irac.IracManager;
import org.infinispan.xsite.spi.SiteEntry;
import org.infinispan.xsite.spi.XSiteEntryMergePolicy;
/**
* Interceptor to handle updates from remote sites.
* <p>
* Remote sites only send {@link PutKeyValueCommand} or {@link RemoveCommand}.
*
* @author Pedro Ruivo
* @since 11.0
*/
public class NonTxIracRemoteSiteInterceptor extends DDAsyncInterceptor implements LogSupplier {
private static final Log log = LogFactory.getLog(NonTxIracRemoteSiteInterceptor.class);
private final boolean needsVersions;
private final InvocationSuccessAction<DataWriteCommand> setMetadataForOwnerAction = this::setIracMetadataForOwner;
@Inject XSiteEntryMergePolicy<Object, Object> mergePolicy;
@Inject IracVersionGenerator iracVersionGenerator;
@Inject IracTombstoneManager iracTombstoneManager;
@Inject VersionGenerator versionGenerator;
@Inject ClusteringDependentLogic clusteringDependentLogic;
@Inject IracManager iracManager;
public NonTxIracRemoteSiteInterceptor(boolean needsVersions) {
this.needsVersions = needsVersions;
}
private static SiteEntry<Object> createSiteEntryFrom(CacheEntry<?, ?> entry, String siteName) {
assert entry instanceof MVCCEntry;
MVCCEntry<?, ?> mvccEntry = (MVCCEntry<?, ?>) entry;
return new SiteEntry<>(siteName, mvccEntry.getOldValue(), mvccEntry.getOldMetadata());
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
Ownership ownership = getOwnership(command.getSegment());
switch (ownership) {
case PRIMARY:
// we are on primary and the lock is acquired
// if the update is discarded, command.isSuccessful() will return false.
CompletionStage<Boolean> validationResult = validateOnPrimary(ctx, command);
if (CompletionStages.isCompletedSuccessfully(validationResult)) {
return validate(validationResult.toCompletableFuture().join(), ctx, command);
}
return validationResult.thenApply(isValid -> validate(isValid, ctx, command));
case BACKUP:
if (!ctx.isOriginLocal()) {
// backups only commit when the command are remote (i.e. after validated from
// the originator)
return invokeNextThenAccept(ctx, command, setMetadataForOwnerAction);
}
}
return invokeNext(ctx, command);
}
@Override
public boolean isTraceEnabled() {
return log.isTraceEnabled();
}
@Override
public Log getLog() {
return log;
}
private Object validate(boolean isValid, InvocationContext ctx, DataWriteCommand command) {
return isValid ? invokeNextThenAccept(ctx, command, setMetadataForOwnerAction) : null;
}
/**
* Invoked on the primary owner, it validates if the remote site update is valid
* or not.
* <p>
* It also performs a conflict resolution if a conflict is found.
*/
private CompletionStage<Boolean> validateOnPrimary(InvocationContext ctx, IracPutKeyValueCommand command) {
final Object key = command.getKey();
CacheEntry<?, ?> entry = ctx.lookupEntry(key);
IracMetadata localMetadata = getIracMetadata(entry);
if (localMetadata == null) {
localMetadata = iracTombstoneManager.getTombstone(key);
}
if (needsVersions) {
// we are in the primary owner with the lock acquired.
// create a version for write-skew check before validating and sending to backup
// owners.
PrivateMetadata metadata = PrivateMetadata.getBuilder(command.getInternalMetadata())
.entryVersion(generateWriteSkewVersion(entry)).build();
command.setInternalMetadata(key, metadata);
}
if (localMetadata != null) {
return validateRemoteUpdate(entry, command, localMetadata);
}
return CompletableFutures.completedTrue();
}
/**
* Invoked by backup owners, it makes sure the entry has the same version as set by the primary owner.
*/
private void setIracMetadataForOwner(InvocationContext ctx, DataWriteCommand command,
@SuppressWarnings("unused") Object rv) {
final Object key = command.getKey();
PrivateMetadata metadata = command.getInternalMetadata();
iracVersionGenerator.updateVersion(command.getSegment(), metadata.iracMetadata().getVersion());
setPrivateMetadata(ctx.lookupEntry(key), command.getSegment(), metadata, iracTombstoneManager, this);
}
private CompletionStage<Boolean> validateRemoteUpdate(CacheEntry<?, ?> entry, IracPutKeyValueCommand command,
IracMetadata localMetadata) {
IracMetadata remoteMetadata = command.getInternalMetadata().iracMetadata();
assert remoteMetadata != null;
if (log.isTraceEnabled()) {
log.tracef("[IRAC] Comparing local and remote metadata: %s and %s", localMetadata, remoteMetadata);
}
IracEntryVersion localVersion = localMetadata.getVersion();
IracEntryVersion remoteVersion = remoteMetadata.getVersion();
if (command.isExpiration()) {
// expiration rules
// if the version is newer of equals, then the remove can continue
// if not, or if there is a conflict, we abort
switch (remoteVersion.compareTo(localVersion)) {
case AFTER:
case EQUAL:
return CompletableFutures.completedTrue();
default:
iracManager.incrementNumberOfDiscards();
discardUpdate(entry, command, remoteMetadata);
return CompletableFutures.completedFalse();
}
}
switch (remoteVersion.compareTo(localVersion)) {
case CONFLICTING:
return resolveConflict(entry, command, localMetadata, remoteMetadata);
case EQUAL:
case BEFORE:
iracManager.incrementNumberOfDiscards();
discardUpdate(entry, command, remoteMetadata);
return CompletableFutures.completedFalse();
}
return CompletableFutures.completedTrue();
}
private CompletionStage<Boolean> resolveConflict(CacheEntry<?, ?> entry, IracPutKeyValueCommand command,
IracMetadata localMetadata, IracMetadata remoteMetadata) {
if (log.isTraceEnabled()) {
log.tracef("[IRAC] Conflict found between local and remote metadata: %s and %s", localMetadata,
remoteMetadata);
}
// same site? conflict?
SiteEntry<Object> localSiteEntry = createSiteEntryFrom(entry, localMetadata.getSite());
SiteEntry<Object> remoteSiteEntry = command.createSiteEntry(remoteMetadata.getSite());
return mergePolicy.merge(entry.getKey(), localSiteEntry, remoteSiteEntry).thenApply(resolved -> {
if (log.isTraceEnabled()) {
log.tracef("[IRAC] resolve(%s, %s) = %s", localSiteEntry, remoteSiteEntry, resolved);
}
// fast track, it is the same entry as stored already locally. do nothing!
if (resolved.equals(localSiteEntry)) {
discardUpdate(entry, command, remoteMetadata);
iracManager.incrementNumberOfConflictLocalWins();
return false;
} else if (!resolved.equals(remoteSiteEntry)) {
// new value/metadata to store. Change the command!
Object key = entry.getKey();
command.updateCommand(resolved);
PrivateMetadata.Builder builder = PrivateMetadata.getBuilder(command.getInternalMetadata())
.iracMetadata(mergeVersion(resolved.getSiteName(), localMetadata.getVersion(),
remoteMetadata.getVersion()));
command.setInternalMetadata(key, builder.build());
iracManager.incrementNumberOfConflictMerged();
} else {
iracManager.incrementNumberOfConflictRemoteWins();
}
return true;
});
}
private IracMetadata mergeVersion(String siteName, IracEntryVersion localVersion, IracEntryVersion remoteVersion) {
return new IracMetadata(siteName, localVersion.merge(remoteVersion));
}
private IncrementableEntryVersion generateWriteSkewVersion(CacheEntry<?, ?> entry) {
IncrementableEntryVersion version = WriteSkewHelper.incrementVersion(entry, versionGenerator);
if (log.isTraceEnabled()) {
log.tracef("[IRAC] Generated Write Skew version for %s=%s", entry.getKey(), version);
}
return version;
}
private void discardUpdate(CacheEntry<?, ?> entry, DataWriteCommand command, IracMetadata metadata) {
final Object key = entry.getKey();
logUpdateDiscarded(key, metadata, this);
assert metadata != null : "[IRAC] Metadata must not be null!";
command.fail(); // this prevents the sending to the backup owners
entry.setChanged(false); // this prevents the local node to apply the changes.
// we are discarding the update but try to make it visible for the next write
// operation
iracVersionGenerator.updateVersion(command.getSegment(), metadata.getVersion());
}
private IracMetadata getIracMetadata(CacheEntry<?, ?> entry) {
PrivateMetadata privateMetadata = entry.getInternalMetadata();
if (privateMetadata == null) { // new entry!
return iracTombstoneManager.getTombstone(entry.getKey());
}
IracMetadata metadata = privateMetadata.iracMetadata();
return metadata == null ? iracTombstoneManager.getTombstone(entry.getKey()) : metadata;
}
private Ownership getOwnership(int segment) {
return clusteringDependentLogic.getCacheTopology().getSegmentDistribution(segment).writeOwnership();
}
}
| 11,351
| 44.227092
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/Stages.java
|
package org.infinispan.interceptors.impl;
/**
* @author Dan Berindei
* @since 9.0
*/
class Stages {
static String className(Object o) {
if (o == null) return "null";
String fullName = o.getClass().getName();
return fullName.substring(fullName.lastIndexOf('.') + 1);
}
}
| 300
| 17.8125
| 63
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/SimpleAsyncInvocationStage.java
|
package org.infinispan.interceptors.impl;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.ExceptionSyncInvocationStage;
import org.infinispan.interceptors.InvocationCallback;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.commons.util.logging.TraceException;
/**
* Invocation stage representing a computation that may or may not be done yet.
*
* It is only meant to support the simplest asynchronous invocation,
* {@link org.infinispan.interceptors.BaseAsyncInterceptor#asyncValue(CompletionStage)}.
*
* @author Dan Berindei
* @since 9.0
*/
public class SimpleAsyncInvocationStage extends InvocationStage {
protected final CompletableFuture<Object> future;
@SuppressWarnings("unchecked")
public SimpleAsyncInvocationStage(CompletionStage<?> future) {
this.future = (CompletableFuture<Object>) future;
}
@Override
public Object get() throws Throwable {
try {
return CompletableFutures.await(future);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
cause.addSuppressed(new TraceException());
throw cause;
}
}
@Override
public boolean isDone() {
return future.isDone();
}
@Override
public CompletableFuture<Object> toCompletableFuture() {
return future;
}
@Override
public <C extends VisitableCommand> Object addCallback(InvocationContext ctx, C command,
InvocationCallback<C> function) {
if (future.isDone()) {
Object rv;
Throwable throwable;
try {
rv = future.getNow(null);
throwable = null;
} catch (Throwable t) {
rv = null;
throwable = CompletableFutures.extractException(t);
}
try {
return function.apply(ctx, command, rv, throwable);
} catch (Throwable t) {
return new ExceptionSyncInvocationStage(t);
}
}
return new QueueAsyncInvocationStage(ctx, command, future, function);
}
@Override
public Object thenReturn(InvocationContext ctx, VisitableCommand command, Object returnValue) {
if (future.isDone()) {
return future.isCompletedExceptionally() ? this : returnValue;
}
return new QueueAsyncInvocationStage(ctx, command, future,
(InvocationSuccessFunction) (rCtx, rCommand, rv) -> returnValue);
}
@Override
public String toString() {
return "SimpleAsyncInvocationStage(" + future + ')';
}
}
| 2,881
| 31.022222
| 98
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/AsyncInterceptorChainImpl.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.commons.util.Immutables.immutableListAdd;
import static org.infinispan.commons.util.Immutables.immutableListRemove;
import static org.infinispan.commons.util.Immutables.immutableListReplace;
import java.util.List;
import java.util.ListIterator;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.locks.ReentrantLock;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.util.ImmutableListCopy;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.ExceptionSyncInvocationStage;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Knows how to build and manage a chain of interceptors. Also in charge with invoking methods on the chain.
*
* @author Dan Berindei
* @since 9.0
*/
@Scope(Scopes.NAMED_CACHE)
@SuppressWarnings("deprecation")
public class AsyncInterceptorChainImpl implements AsyncInterceptorChain {
// Using the same list type everywhere may help with the optimization of the invocation context methods
private static final ImmutableListCopy<AsyncInterceptor> EMPTY_INTERCEPTORS_LIST =
new ImmutableListCopy<>();
private static final Log log = LogFactory.getLog(AsyncInterceptorChainImpl.class);
private final ReentrantLock lock = new ReentrantLock();
// Modifications are guarded with "lock", but reads do not need synchronization
private volatile List<AsyncInterceptor> interceptors = EMPTY_INTERCEPTORS_LIST;
private volatile AsyncInterceptor firstInterceptor = null;
@Start
void printChainInfo() {
if (log.isDebugEnabled()) {
log.debugf("Interceptor chain size: %d", size());
log.debugf("Interceptor chain is: %s", toString());
}
}
private void validateCustomInterceptor(Class<? extends AsyncInterceptor> i) {
// Do nothing, custom interceptors extending internal interceptors no longer "inherit" the annotations
}
/**
* Ensures that the interceptor of type passed in isn't already added
*
* @param clazz type of interceptor to check for
*/
private void checkInterceptor(Class<? extends AsyncInterceptor> clazz) {
if (containsInterceptorType(clazz, false))
throw new CacheConfigurationException("Detected interceptor of type [" + clazz.getName() +
"] being added to the interceptor chain " +
System.identityHashCode(this) + " more than once!");
}
@Override
public void addInterceptor(AsyncInterceptor interceptor, int position) {
final ReentrantLock lock = this.lock;
lock.lock();
try {
Class<? extends AsyncInterceptor> interceptorClass = interceptor.getClass();
checkInterceptor(interceptorClass);
validateCustomInterceptor(interceptorClass);
interceptors = immutableListAdd(interceptors, position, interceptor);
rebuildInterceptors();
} finally {
lock.unlock();
}
}
@Override
public void removeInterceptor(int position) {
final ReentrantLock lock = this.lock;
lock.lock();
try {
interceptors = immutableListRemove(interceptors, position);
rebuildInterceptors();
} finally {
lock.unlock();
}
}
@Override
public int size() {
return interceptors.size();
}
@Override
public void removeInterceptor(Class<? extends AsyncInterceptor> clazz) {
final ReentrantLock lock = this.lock;
lock.lock();
try {
for (int i = 0; i < interceptors.size(); i++) {
if (interceptorMatches(interceptors.get(i), clazz)) {
removeInterceptor(i);
break;
}
}
} finally {
lock.unlock();
}
}
private boolean interceptorMatches(AsyncInterceptor interceptor,
Class<? extends AsyncInterceptor> clazz) {
Class<? extends AsyncInterceptor> interceptorType = interceptor.getClass();
return clazz == interceptorType;
}
@Override
public boolean addInterceptorAfter(AsyncInterceptor toAdd,
Class<? extends AsyncInterceptor> afterInterceptor) {
lock.lock();
try {
Class<? extends AsyncInterceptor> interceptorClass = toAdd.getClass();
checkInterceptor(interceptorClass);
validateCustomInterceptor(interceptorClass);
for (int i = 0; i < interceptors.size(); i++) {
if (interceptorMatches(interceptors.get(i), afterInterceptor)) {
interceptors = immutableListAdd(interceptors, i + 1, toAdd);
rebuildInterceptors();
return true;
}
}
return false;
} finally {
lock.unlock();
}
}
@Deprecated
public boolean addInterceptorBefore(AsyncInterceptor toAdd,
Class<? extends AsyncInterceptor> beforeInterceptor,
boolean isCustom) {
if (isCustom)
validateCustomInterceptor(toAdd.getClass());
return addInterceptorBefore(toAdd, beforeInterceptor);
}
@Override
public boolean addInterceptorBefore(AsyncInterceptor toAdd,
Class<? extends AsyncInterceptor> beforeInterceptor) {
lock.lock();
try {
Class<? extends AsyncInterceptor> interceptorClass = toAdd.getClass();
checkInterceptor(interceptorClass);
validateCustomInterceptor(interceptorClass);
for (int i = 0; i < interceptors.size(); i++) {
if (interceptorMatches(interceptors.get(i), beforeInterceptor)) {
interceptors = immutableListAdd(interceptors, i, toAdd);
rebuildInterceptors();
return true;
}
}
return false;
} finally {
lock.unlock();
}
}
@Override
public boolean replaceInterceptor(AsyncInterceptor replacingInterceptor,
Class<? extends AsyncInterceptor> existingInterceptorType) {
final ReentrantLock lock = this.lock;
lock.lock();
try {
Class<? extends AsyncInterceptor> interceptorClass = replacingInterceptor.getClass();
checkInterceptor(interceptorClass);
validateCustomInterceptor(interceptorClass);
for (int i = 0; i < interceptors.size(); i++) {
if (interceptorMatches(interceptors.get(i), existingInterceptorType)) {
interceptors = immutableListReplace(interceptors, i, replacingInterceptor);
rebuildInterceptors();
return true;
}
}
return false;
} finally {
lock.unlock();
}
}
@Override
public void appendInterceptor(AsyncInterceptor ci, boolean isCustom) {
lock.lock();
try {
Class<? extends AsyncInterceptor> interceptorClass = ci.getClass();
if (isCustom)
validateCustomInterceptor(interceptorClass);
checkInterceptor(interceptorClass);
// Called when building interceptor chain and so concurrent start calls are protected already
interceptors = immutableListAdd(interceptors, interceptors.size(), ci);
rebuildInterceptors();
} finally {
lock.unlock();
}
}
@Override
public CompletableFuture<Object> invokeAsync(InvocationContext ctx, VisitableCommand command) {
try {
Object result = firstInterceptor.visitCommand(ctx, command);
if (result instanceof InvocationStage) {
return ((InvocationStage) result).toCompletableFuture();
} else {
// Don't allocate future if result was already null
if (result == null) {
return CompletableFutures.completedNull();
}
return CompletableFuture.completedFuture(result);
}
} catch (Throwable t) {
return CompletableFuture.failedFuture(t);
}
}
@Override
public InvocationStage invokeStage(InvocationContext ctx, VisitableCommand command) {
try {
return InvocationStage.makeStage(firstInterceptor.visitCommand(ctx, command));
} catch (Throwable t) {
return new ExceptionSyncInvocationStage(t);
}
}
@Override
public Object invoke(InvocationContext ctx, VisitableCommand command) {
try {
Object result = firstInterceptor.visitCommand(ctx, command);
if (result instanceof InvocationStage) {
return ((InvocationStage) result).get();
} else {
return result;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException(e);
} catch (TimeoutException e) {
// Create a new exception here for easier debugging
throw new TimeoutException(e.getMessage(), e);
} catch (RuntimeException e) {
throw e;
} catch (Throwable throwable) {
throw new CacheException(throwable);
}
}
@Override
public <T extends AsyncInterceptor> T findInterceptorExtending(Class<T> interceptorClass) {
List<AsyncInterceptor> localInterceptors = this.interceptors;
for (AsyncInterceptor interceptor : localInterceptors) {
boolean isSubclass = interceptorClass.isInstance(interceptor);
if (isSubclass) {
return interceptorClass.cast(interceptor);
}
}
return null;
}
@Override
public <T extends AsyncInterceptor> T findInterceptorWithClass(Class<T> interceptorClass) {
List<AsyncInterceptor> localInterceptors = this.interceptors;
for (AsyncInterceptor interceptor : localInterceptors) {
if (interceptorMatches(interceptor, interceptorClass)) {
return interceptorClass.cast(interceptor);
}
}
return null;
}
public String toString() {
StringBuilder sb = new StringBuilder();
List<AsyncInterceptor> localInterceptors = this.interceptors;
for (AsyncInterceptor interceptor : localInterceptors) {
sb.append("\n\t>> ");
sb.append(interceptor);
}
return sb.toString();
}
@Override
public boolean containsInstance(AsyncInterceptor interceptor) {
List<AsyncInterceptor> localInterceptors = this.interceptors;
for (AsyncInterceptor current : localInterceptors) {
if (current == interceptor) {
return true;
}
}
return false;
}
@Override
public boolean containsInterceptorType(Class<? extends AsyncInterceptor> interceptorType) {
return containsInterceptorType(interceptorType, false);
}
@Override
public boolean containsInterceptorType(Class<? extends AsyncInterceptor> interceptorType,
boolean alsoMatchSubClasses) {
List<AsyncInterceptor> localInterceptors = this.interceptors;
for (AsyncInterceptor interceptor : localInterceptors) {
Class<? extends AsyncInterceptor> currentInterceptorType = interceptor.getClass();
if (alsoMatchSubClasses) {
if (interceptorType.isAssignableFrom(currentInterceptorType)) {
return true;
}
} else {
if (interceptorType == currentInterceptorType) {
return true;
}
}
}
return false;
}
@Override
public List<AsyncInterceptor> getInterceptors() {
return interceptors;
}
private void rebuildInterceptors() {
ListIterator<AsyncInterceptor> it = interceptors.listIterator(interceptors.size());
// The CallInterceptor
AsyncInterceptor nextInterceptor = it.previous();
while (it.hasPrevious()) {
AsyncInterceptor interceptor = it.previous();
interceptor.setNextInterceptor(nextInterceptor);
nextInterceptor = interceptor;
}
this.firstInterceptor = nextInterceptor;
}
}
| 12,715
| 35.22792
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/PessimisticTxIracLocalInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.util.IracUtils.getIracVersionFromCacheEntry;
import java.util.Iterator;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.irac.IracMetadataRequestCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.versioning.irac.IracEntryVersion;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.LocalTxInvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollectors;
import org.infinispan.remoting.transport.ValidSingleResponseCollector;
import org.infinispan.transaction.impl.RemoteTransaction;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.CompletionStages;
/**
* Interceptor used by IRAC for pessimistic transactional caches to handle the local site updates.
* <p>
* On each successful write, a request is made to the primary owner to generate a new {@link IracMetadata}. At this
* moment, the lock is acquired so no other transaction can change the key.
* <p>
* On prepare, the transaction originator waits for all the replies made during the transaction running, sets them in
* the {@link WriteCommand} and sends the {@link PrepareCommand} to all the owners.
* <p>
* The owners only have to retrieve the {@link IracMetadata} from the {@link WriteCommand} and store it.
*
* @author Pedro Ruivo
* @since 11.0
*/
public class PessimisticTxIracLocalInterceptor extends AbstractIracLocalSiteInterceptor {
private static final IracMetadataResponseCollector RESPONSE_COLLECTOR = new IracMetadataResponseCollector();
@Inject CommandsFactory commandsFactory;
@Inject RpcManager rpcManager;
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) {
return command.hasAnyFlag(FlagBitSets.PUT_FOR_EXTERNAL_READ) ?
visitNonTxDataWriteCommand(ctx, command) :
visitDataWriteCommand(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) {
return visitDataWriteCommand(ctx, command);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) {
return visitWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) {
return visitDataWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command) {
return visitDataWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) {
return visitDataWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) {
return visitWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) {
return visitDataWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) {
return visitWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) {
return visitWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) {
return visitWriteCommand(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) {
if (ctx.isOriginLocal()) {
return onLocalPrepare(asLocalTxInvocationContext(ctx), command);
} else {
//noinspection unchecked
return onRemotePrepare(ctx, command);
}
}
@SuppressWarnings("rawtypes")
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) {
throw new UnsupportedOperationException();
}
@SuppressWarnings("rawtypes")
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) {
//nothing extra to be done for rollback.
return invokeNext(ctx, command);
}
private Object visitDataWriteCommand(InvocationContext ctx, DataWriteCommand command) {
final Object key = command.getKey();
if (isIracState(command)) {
setMetadataToCacheEntry(ctx.lookupEntry(key), command.getSegment(), command.getInternalMetadata(key).iracMetadata());
return invokeNext(ctx, command);
}
return skipCommand(ctx, command) ?
invokeNext(ctx, command) :
invokeNextThenAccept(ctx, command, this::afterVisitDataWriteCommand);
}
private void afterVisitDataWriteCommand(InvocationContext ctx, DataWriteCommand command, Object rv) {
if (!command.isSuccessful()) {
return;
}
// at this point, the primary owner has the lock acquired
// we send a request for new IracMetadata
// Only wait for the reply in prepare.
setMetadataForWrite(asLocalTxInvocationContext(ctx), command, command.getKey());
}
private Object visitWriteCommand(InvocationContext ctx, WriteCommand command) {
return skipCommand(ctx, command) ?
invokeNext(ctx, command) :
invokeNextThenAccept(ctx, command, this::afterVisitWriteCommand);
}
private void afterVisitWriteCommand(InvocationContext ctx, WriteCommand command, Object rv) {
if (!command.isSuccessful()) {
return;
}
// at this point, the primary owner has the lock acquired
// we send a request for new IracMetadata
// Only wait for the reply in prepare.
LocalTxInvocationContext txCtx = asLocalTxInvocationContext(ctx);
for (Object key : command.getAffectedKeys()) {
setMetadataForWrite(txCtx, command, key);
}
}
private Object onLocalPrepare(LocalTxInvocationContext ctx, PrepareCommand command) {
if (log.isTraceEnabled()) {
log.tracef("[IRAC] On local prepare for tx %s", command.getGlobalTransaction());
}
//on prepare, we need to wait for all replies from the primary owners that contains the new IracMetadata
//this is required because pessimistic transactions commits in 1 phase!
AggregateCompletionStage<Void> allStages = CompletionStages.aggregateCompletionStage();
Iterator<StreamData> iterator = streamKeysFromModifications(command.getModifications().stream()).iterator();
while (iterator.hasNext()) {
StreamData data = iterator.next();
CompletionStage<IracMetadata> rsp = ctx.getIracMetadata(data.key);
allStages.dependsOn(rsp.thenAccept(iracMetadata -> setMetadataBeforeSendingPrepare(ctx, data, iracMetadata)));
}
return asyncInvokeNext(ctx, command, allStages.freeze());
}
private Object onRemotePrepare(TxInvocationContext<RemoteTransaction> ctx, PrepareCommand command) {
//on remote side, we need to merge the irac metadata from the WriteCommand to CacheEntry
Iterator<StreamData> iterator = streamKeysFromModifications(command.getModifications().stream())
.filter(this::isWriteOwner)
.iterator();
while (iterator.hasNext()) {
StreamData data = iterator.next();
setMetadataToCacheEntry(ctx.lookupEntry(data.key), data.segment, data.command.getInternalMetadata(data.key).iracMetadata());
}
return invokeNext(ctx, command);
}
private void setMetadataBeforeSendingPrepare(LocalTxInvocationContext ctx, StreamData data, IracMetadata metadata) {
CacheEntry<?, ?> entry = ctx.lookupEntry(data.key);
assert entry != null;
updateCommandMetadata(data.key, data.command, metadata);
if (isWriteOwner(data)) {
setMetadataToCacheEntry(entry, data.segment, metadata);
}
}
private void setMetadataForWrite(LocalTxInvocationContext ctx, WriteCommand command, Object key) {
if (ctx.hasIracMetadata(key)) {
return;
}
int segment = getSegment(command, key);
CompletionStage<IracMetadata> metadata = requestNewMetadata(segment, ctx.lookupEntry(key));
ctx.storeIracMetadata(key, metadata);
}
private CompletionStage<IracMetadata> requestNewMetadata(int segment, CacheEntry<?, ?> cacheEntry) {
LocalizedCacheTopology cacheTopology = getCacheTopology();
DistributionInfo dInfo = cacheTopology.getSegmentDistribution(segment);
if (dInfo.isPrimary()) {
IracEntryVersion versionSeen = getIracVersionFromCacheEntry(cacheEntry);
return CompletableFuture.completedFuture(iracVersionGenerator.generateNewMetadata(segment, versionSeen));
} else {
return requestNewMetadataFromPrimaryOwner(dInfo, cacheTopology.getTopologyId(), cacheEntry);
}
}
private CompletionStage<IracMetadata> requestNewMetadataFromPrimaryOwner(DistributionInfo dInfo, int topologyId,
CacheEntry<?, ?> cacheEntry) {
IracEntryVersion versionSeen = getIracVersionFromCacheEntry(cacheEntry);
IracMetadataRequestCommand cmd = commandsFactory.buildIracMetadataRequestCommand(dInfo.segmentId(), versionSeen);
cmd.setTopologyId(topologyId);
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
return rpcManager.invokeCommand(dInfo.primary(), cmd, RESPONSE_COLLECTOR, rpcOptions);
}
private static boolean skipCommand(InvocationContext ctx, FlagAffectedCommand command) {
return !ctx.isOriginLocal() || command.hasAnyFlag(FlagBitSets.IRAC_UPDATE);
}
private static class IracMetadataResponseCollector extends ValidSingleResponseCollector<IracMetadata> {
@Override
protected IracMetadata withValidResponse(Address sender, ValidResponse response) {
Object rv = response.getResponseValue();
assert rv instanceof IracMetadata : "[IRAC] invalid response! Expects IracMetadata but got " + rv;
return (IracMetadata) rv;
}
@Override
protected IracMetadata targetNotFound(Address sender) {
throw ResponseCollectors.remoteNodeSuspected(sender);
}
}
}
| 12,992
| 41.740132
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/VersionInterceptor.java
|
package org.infinispan.interceptors.impl;
import org.infinispan.commands.MetadataAwareCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.metadata.Metadata;
/**
* Interceptor installed when compatiblity is enabled.
* @author wburns
* @since 9.0
*/
public class VersionInterceptor extends DDAsyncInterceptor {
@Inject protected VersionGenerator versionGenerator;
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
addVersionIfNeeded(command);
return super.visitReplaceCommand(ctx, command);
}
protected void addVersionIfNeeded(MetadataAwareCommand cmd) {
Metadata metadata = cmd.getMetadata();
if (metadata.version() == null) {
Metadata newMetadata = metadata.builder()
.version(versionGenerator.generateNew())
.build();
cmd.setMetadata(newMetadata);
}
}
}
| 1,169
| 31.5
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/NotificationInterceptor.java
|
package org.infinispan.interceptors.impl;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.annotation.TransactionCompleted;
/**
* The interceptor in charge of firing off notifications to cache listeners
*
* @author <a href="mailto:manik@jboss.org">Manik Surtani</a>
* @since 9.0
*/
public class NotificationInterceptor extends DDAsyncInterceptor {
@Inject CacheNotifier notifier;
private final InvocationSuccessFunction<VisitableCommand> commitSuccessAction = new InvocationSuccessFunction<VisitableCommand>() {
@Override
public Object apply(InvocationContext rCtx, VisitableCommand rCommand, Object rv)
throws Throwable {
return delayedValue(notifier.notifyTransactionCompleted(((TxInvocationContext) rCtx).getGlobalTransaction(), true, rCtx), rv);
}
};
private final InvocationSuccessFunction<RollbackCommand> rollbackSuccessAction = new InvocationSuccessFunction<RollbackCommand>() {
@Override
public Object apply(InvocationContext rCtx, RollbackCommand rCommand, Object rv)
throws Throwable {
return delayedValue(notifier.notifyTransactionCompleted(((TxInvocationContext) rCtx).getGlobalTransaction(), false, rCtx), rv);
}
};
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (!command.isOnePhaseCommit() || !notifier.hasListener(TransactionCompleted.class)) {
return invokeNext(ctx, command);
}
return invokeNextThenApply(ctx, command, commitSuccessAction);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
if (!notifier.hasListener(TransactionCompleted.class)) {
return invokeNext(ctx, command);
}
return invokeNextThenApply(ctx, command, commitSuccessAction);
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
if (!notifier.hasListener(TransactionCompleted.class)) {
return invokeNext(ctx, command);
}
return invokeNextThenApply(ctx, command, rollbackSuccessAction);
}
}
| 2,723
| 42.238095
| 136
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/BatchingInterceptor.java
|
package org.infinispan.interceptors.impl;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.batch.BatchContainer;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.EvictCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.RemoveExpiredCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Interceptor that captures batched calls and attaches contexts.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @since 9.0
*/
public class BatchingInterceptor extends DDAsyncInterceptor {
@Inject BatchContainer batchContainer;
@Inject TransactionManager transactionManager;
@Inject InvocationContextFactory invocationContextFactory;
private static final Log log = LogFactory.getLog(BatchingInterceptor.class);
@Override
public Object visitEvictCommand(InvocationContext ctx, EvictCommand command) {
// eviction is non-tx, so this interceptor should be no-op for EvictCommands
return invokeNext(ctx, command);
}
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) {
//clear is non transactional and it suspends all running tx before invocation. nothing to do here.
return invokeNext(ctx, command);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
return command.hasAnyFlag(FlagBitSets.PUT_FOR_EXTERNAL_READ) ?
invokeNext(ctx, command) :
handleDefault(ctx, command);
}
@Override
public Object visitRemoveExpiredCommand(InvocationContext ctx, RemoveExpiredCommand command) throws Throwable {
return invokeNext(ctx, command);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
//IRAC updates aren't transactional
return invokeNext(ctx, command);
}
/**
* Simply check if there is an ongoing tx. <ul> <li>If there is one, this is a no-op and just passes the call up the
* chain.</li> <li>If there isn't one and there is a batch in progress, resume the batch's tx, pass up, and finally
* suspend the batch's tx.</li> <li>If there is no batch in progress, just pass the call up the chain.</li> </ul>
*/
@Override
public Object handleDefault(InvocationContext ctx, VisitableCommand command) throws Throwable {
if (!ctx.isOriginLocal()) {
// Nothing to do for remote calls
return invokeNext(ctx, command);
}
Transaction tx;
if (transactionManager.getTransaction() != null || (tx = batchContainer.getBatchTransaction()) == null) {
// The active transaction means we are in an auto-batch.
// No batch means a read-only auto-batch.
// Either way, we don't need to do anything
return invokeNext(ctx, command);
}
try {
transactionManager.resume(tx);
if (ctx.isInTxScope()) {
return invokeNext(ctx, command);
}
log.tracef("Called with a non-tx invocation context: %s", ctx);
InvocationContext txInvocationContext = invocationContextFactory.createInvocationContext(true, -1);
return invokeNext(txInvocationContext, command);
} finally {
suspendTransaction();
}
}
private void suspendTransaction() throws SystemException {
if (transactionManager.getTransaction() != null && batchContainer.isSuspendTxAfterInvocation())
transactionManager.suspend();
}
}
| 4,107
| 38.5
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/CacheMgmtInterceptor.java
|
package org.infinispan.interceptors.impl;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.functional.AbstractWriteManyCommand;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.read.AbstractDataCommand;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.EvictCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.stat.TimerTracker;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.concurrent.StripedCounters;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ClusteringConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.offheap.OffHeapMemoryAllocator;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.eviction.EvictionStrategy;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.functional.impl.StatsEnvelope;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.MeasurementType;
import org.infinispan.jmx.annotations.Units;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.manager.PersistenceManager.AccessMode;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.concurrent.CompletionStages;
/**
* Captures cache management statistics.
*
* @author Jerry Gauthier
* @since 9.0
*/
@MBean(objectName = "Statistics", description = "General statistics such as timings, hit/miss ratio, and so on.")
public final class CacheMgmtInterceptor extends JmxStatsCommandInterceptor {
@Inject ComponentRef<AdvancedCache<?, ?>> cache;
@Inject InternalDataContainer<?, ?> dataContainer;
@Inject TimeService timeService;
@Inject OffHeapMemoryAllocator allocator;
@Inject ComponentRegistry componentRegistry;
@Inject GlobalConfiguration globalConfiguration;
@Inject ComponentRef<PersistenceManager> persistenceManager;
@Inject DistributionManager distributionManager;
private final AtomicLong startNanoseconds = new AtomicLong(0);
private final AtomicLong resetNanoseconds = new AtomicLong(0);
private final StripedCounters<StripeB> counters = new StripedCounters<>(StripeC::new);
private TimerTracker hitTimes;
private TimerTracker missTimes;
private TimerTracker storeTimes;
private TimerTracker removeTimes;
@Start
public void start() {
startNanoseconds.set(timeService.time());
resetNanoseconds.set(startNanoseconds.get());
}
@ManagedAttribute(description = "Hit Times", displayName = "Hit Times", dataType = DataType.TIMER, units = Units.NANOSECONDS)
public void setHitTimes(TimerTracker hitTimes) {
this.hitTimes = hitTimes;
}
@ManagedAttribute(description = "Miss Times", displayName = "Miss Times", dataType = DataType.TIMER, units = Units.NANOSECONDS)
public void setMissTimes(TimerTracker missTimes) {
this.missTimes = missTimes;
}
@ManagedAttribute(description = "Store Times", displayName = "Store Times", dataType = DataType.TIMER, units = Units.NANOSECONDS)
public void setStoreTimes(TimerTracker storeTimes) {
this.storeTimes = storeTimes;
}
@ManagedAttribute(description = "Remove Times", displayName = "Remove Times", dataType = DataType.TIMER, units = Units.NANOSECONDS)
public void setRemoveTimes(TimerTracker removeTimes) {
this.removeTimes = removeTimes;
}
@Override
public Object visitEvictCommand(InvocationContext ctx, EvictCommand command) throws Throwable {
// This is just here to notify that evictions are counted in the ClusteringDependentLogic via NotifyHelper and
// EvictionManager
return super.visitEvictCommand(ctx, command);
}
@Override
public final Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command) {
return visitDataReadCommand(ctx, command);
}
@Override
public final Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) {
return visitDataReadCommand(ctx, command);
}
public void addDataRead(boolean foundValue, long timeNanoSeconds) {
StripeB stripe = counters.stripeForCurrentThread();
if (foundValue) {
counters.add(StripeB.hitTimesFieldUpdater, stripe, timeNanoSeconds);
counters.increment(StripeB.hitsFieldUpdater, stripe);
if (hitTimes != null) hitTimes.update(Duration.ofNanos(timeNanoSeconds));
} else {
counters.add(StripeB.missTimesFieldUpdater, stripe, timeNanoSeconds);
counters.increment(StripeB.missesFieldUpdater, stripe);
if (missTimes != null) missTimes.update(Duration.ofNanos(timeNanoSeconds));
}
}
private Object visitDataReadCommand(InvocationContext ctx, AbstractDataCommand command) {
boolean statisticsEnabled = getStatisticsEnabled(command);
if (!statisticsEnabled || !ctx.isOriginLocal())
return invokeNext(ctx, command);
long start = timeService.time();
return invokeNextAndFinally(ctx, command,
(rCtx, rCommand, rv, t) -> addDataRead(rv != null, timeService.timeDuration(start, TimeUnit.NANOSECONDS)));
}
@Override
public Object visitGetAllCommand(InvocationContext ctx, GetAllCommand command) {
boolean statisticsEnabled = getStatisticsEnabled(command);
if (!statisticsEnabled || !ctx.isOriginLocal())
return invokeNext(ctx, command);
long start = timeService.time();
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
int requests = rCommand.getKeys().size();
int hitCount = 0;
if (t == null) {
for (Entry<?, ?> entry : ((Map<?, ?>) rv).entrySet()) {
if (entry.getValue() != null) {
hitCount++;
}
}
}
int missCount = requests - hitCount;
StripeB stripe = counters.stripeForCurrentThread();
if (hitCount > 0) {
long hitTimesNanos = intervalNanos * hitCount / requests;
counters.add(StripeB.hitsFieldUpdater, stripe, hitCount);
counters.add(StripeB.hitTimesFieldUpdater, stripe, hitTimesNanos);
if (hitTimes != null) hitTimes.update(Duration.ofNanos(hitTimesNanos));
}
if (missCount > 0) {
long missTimesNanos = intervalNanos * missCount / requests;
counters.add(StripeB.missesFieldUpdater, stripe, missCount);
counters.add(StripeB.missTimesFieldUpdater, stripe, missTimesNanos);
if (missTimes != null) missTimes.update(Duration.ofNanos(missTimesNanos));
}
});
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) {
boolean statisticsEnabled = getStatisticsEnabled(command);
if (!statisticsEnabled || !ctx.isOriginLocal())
return invokeNext(ctx, command);
long start = timeService.time();
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
final long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
final Map<Object, Object> data = rCommand.getMap();
if (data != null && !data.isEmpty()) {
StripeB stripe = counters.stripeForCurrentThread();
counters.add(StripeB.storeTimesFieldUpdater, stripe, intervalNanos);
counters.add(StripeB.storesFieldUpdater, stripe, data.size());
if (storeTimes != null) storeTimes.update(Duration.ofNanos(intervalNanos));
}
});
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) {
return updateStoreStatistics(ctx, command);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) throws Throwable {
return updateStoreStatistics(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) {
return updateStoreStatistics(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) {
boolean statisticsEnabled = getStatisticsEnabled(command);
if (!statisticsEnabled || !ctx.isOriginLocal())
return invokeNext(ctx, command);
long start = timeService.time();
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
if (rv == null && rCommand.isSuccessful()) {
increaseRemoveMisses();
} else if (rCommand.isSuccessful()) {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
counters.add(StripeB.storeTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.storesFieldUpdater, stripe);
if (storeTimes != null) storeTimes.update(Duration.ofNanos(intervalNanos));
}
});
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) {
return updateStoreStatistics(ctx, command);
}
private Object updateStoreStatistics(InvocationContext ctx, WriteCommand command) {
boolean statisticsEnabled = getStatisticsEnabled(command);
if (!statisticsEnabled || !ctx.isOriginLocal())
return invokeNext(ctx, command);
long start = timeService.time();
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
if (rCommand.isSuccessful()) {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
counters.add(StripeB.storeTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.storesFieldUpdater, stripe);
if (storeTimes != null) storeTimes.update(Duration.ofNanos(intervalNanos));
}
});
}
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) {
if (!ctx.isOriginLocal() || command.hasAnyFlag(FlagBitSets.SKIP_STATISTICS))
return invokeNext(ctx, command);
if (!getStatisticsEnabled())
return invokeNextThenApply(ctx, command, StatsEnvelope::unpack);
long start = timeService.time();
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
StatsEnvelope envelope = (StatsEnvelope) rv;
if (envelope.isMiss()) {
counters.add(StripeB.missTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.missesFieldUpdater, stripe);
if (missTimes != null) missTimes.update(Duration.ofNanos(intervalNanos));
} else if (envelope.isHit()) {
counters.add(StripeB.hitTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.hitsFieldUpdater, stripe);
if (hitTimes != null) hitTimes.update(Duration.ofNanos(intervalNanos));
}
return envelope.value();
});
}
@Override
public Object visitReadOnlyManyCommand(InvocationContext ctx, ReadOnlyManyCommand command) {
if (!ctx.isOriginLocal() || command.hasAnyFlag(FlagBitSets.SKIP_STATISTICS))
return invokeNext(ctx, command);
if (!getStatisticsEnabled())
return invokeNextThenApply(ctx, command, StatsEnvelope::unpackStream);
long start = timeService.time();
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
ByRef.Integer hitCount = new ByRef.Integer(0);
ByRef.Integer missCount = new ByRef.Integer(0);
int numResults = rCommand.getKeys().size();
Collection<Object> retvals = new ArrayList<>(numResults);
((Stream<StatsEnvelope<Object>>) rv).forEach(e -> {
if (e.isHit()) hitCount.inc();
if (e.isMiss()) missCount.inc();
retvals.add(e.value());
});
if (missCount.get() > 0) {
long missTimesNanos = missCount.get() * intervalNanos / numResults;
counters.add(StripeB.missTimesFieldUpdater, stripe, missTimesNanos);
counters.add(StripeB.missesFieldUpdater, stripe, missCount.get());
if (missTimes != null) missTimes.update(Duration.ofNanos(missTimesNanos));
}
if (hitCount.get() > 0) {
long hitTimesNanos = hitCount.get() * intervalNanos / numResults;
counters.add(StripeB.hitTimesFieldUpdater, stripe, hitTimesNanos);
counters.add(StripeB.hitsFieldUpdater, stripe, hitCount.get());
if (hitTimes != null) hitTimes.update(Duration.ofNanos(hitTimesNanos));
}
return retvals.stream();
});
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) {
return updateStatisticsWriteOnly(ctx, command);
}
private Object updateStatisticsWriteOnly(InvocationContext ctx, AbstractDataCommand command) {
if (!ctx.isOriginLocal() || command.hasAnyFlag(FlagBitSets.SKIP_STATISTICS)) {
return invokeNext(ctx, command);
}
if (!getStatisticsEnabled())
return invokeNextThenApply(ctx, command, StatsEnvelope::unpack);
long start = timeService.time();
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
StatsEnvelope<?> envelope = (StatsEnvelope<?>) rv;
if (envelope.isDelete()) {
counters.add(StripeB.removeTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.removeHitsFieldUpdater, stripe);
if (removeTimes != null) removeTimes.update(Duration.ofNanos(intervalNanos));
} else if ((envelope.flags() & (StatsEnvelope.CREATE | StatsEnvelope.UPDATE)) != 0) {
counters.add(StripeB.storeTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.storesFieldUpdater, stripe);
if (storeTimes != null) storeTimes.update(Duration.ofNanos(intervalNanos));
}
assert envelope.value() == null;
return null;
});
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command) {
return updateStatisticsReadWrite(ctx, command);
}
private Object updateStatisticsReadWrite(InvocationContext ctx, AbstractDataCommand command) {
if (!ctx.isOriginLocal() || command.hasAnyFlag(FlagBitSets.SKIP_STATISTICS)) {
return invokeNext(ctx, command);
}
if (!getStatisticsEnabled())
return invokeNextThenApply(ctx, command, StatsEnvelope::unpack);
long start = timeService.time();
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> {
// FAIL_SILENTLY makes the return value null
if (rv == null && !rCommand.isSuccessful() && rCommand.hasAnyFlag(FlagBitSets.FAIL_SILENTLY))
return null;
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
StatsEnvelope<?> envelope = (StatsEnvelope<?>) rv;
if (envelope.isDelete()) {
counters.add(StripeB.removeTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.removeHitsFieldUpdater, stripe);
if (removeTimes != null) removeTimes.update(Duration.ofNanos(intervalNanos));
} else if ((envelope.flags() & (StatsEnvelope.CREATE | StatsEnvelope.UPDATE)) != 0) {
counters.add(StripeB.storeTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.storesFieldUpdater, stripe);
if (storeTimes != null) storeTimes.update(Duration.ofNanos(intervalNanos));
}
if (envelope.isHit()) {
counters.add(StripeB.hitTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.hitsFieldUpdater, stripe);
if (hitTimes != null) hitTimes.update(Duration.ofNanos(intervalNanos));
} else if (envelope.isMiss()) {
counters.add(StripeB.missTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.missesFieldUpdater, stripe);
if (missTimes != null) missTimes.update(Duration.ofNanos(intervalNanos));
}
return envelope.value();
});
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command) {
return updateStatisticsReadWrite(ctx, command);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) {
return updateStatisticsWriteOnly(ctx, command);
}
// TODO: WriteOnlyManyCommand and WriteOnlyManyEntriesCommand not implemented as the rest of stack
// does not pass the return value.
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) {
return updateStatisticsReadWrite(ctx, command);
}
private Object updateStatisticsReadWrite(InvocationContext ctx, AbstractWriteManyCommand command) {
if (!ctx.isOriginLocal() || command.hasAnyFlag(FlagBitSets.SKIP_STATISTICS)) {
return invokeNext(ctx, command);
}
if (!getStatisticsEnabled())
return invokeNextThenApply(ctx, command, StatsEnvelope::unpackCollection);
long start = timeService.time();
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
int hits = 0;
int misses = 0;
int stores = 0;
int removals = 0;
int numResults = rCommand.getAffectedKeys().size();
List<Object> results = new ArrayList<>(numResults);
for (StatsEnvelope<?> envelope : ((Collection<StatsEnvelope<?>>) rv)) {
if (envelope.isDelete()) {
removals++;
} else if ((envelope.flags() & (StatsEnvelope.CREATE | StatsEnvelope.UPDATE)) != 0) {
stores++;
}
if (envelope.isHit()) {
hits++;
} else if (envelope.isMiss()) {
misses++;
}
results.add(envelope.value());
}
if (removals > 0) {
long removalsTimeNanos = removals * intervalNanos / numResults;
counters.add(StripeB.removeTimesFieldUpdater, stripe, removalsTimeNanos);
counters.add(StripeB.removeHitsFieldUpdater, stripe, removals);
if (removeTimes != null) removeTimes.update(Duration.ofNanos(removalsTimeNanos));
}
if (stores > 0) {
long storesTimeNanos = stores * intervalNanos / numResults;
counters.add(StripeB.storeTimesFieldUpdater, stripe, storesTimeNanos);
counters.add(StripeB.storesFieldUpdater, stripe, stores);
if (storeTimes != null) storeTimes.update(Duration.ofNanos(storesTimeNanos));
}
if (misses > 0) {
long missTimesNanos = misses * intervalNanos / numResults;
counters.add(StripeB.missTimesFieldUpdater, stripe, missTimesNanos);
counters.add(StripeB.missesFieldUpdater, stripe, misses);
if (missTimes != null) missTimes.update(Duration.ofNanos(missTimesNanos));
}
if (hits > 0) {
long hitTimesNanos = hits * intervalNanos / numResults;
counters.add(StripeB.hitTimesFieldUpdater, stripe, hitTimesNanos);
counters.add(StripeB.hitsFieldUpdater, stripe, hits);
if (hitTimes != null) hitTimes.update(Duration.ofNanos(hitTimesNanos));
}
return results;
});
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) {
return updateStatisticsReadWrite(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) {
boolean statisticsEnabled = getStatisticsEnabled(command);
if (!statisticsEnabled || !ctx.isOriginLocal())
return invokeNext(ctx, command);
long start = timeService.time();
return invokeNextAndFinally(ctx, command, (rCtx, removeCommand, rv, t) -> {
if (removeCommand.isConditional()) {
if (removeCommand.isSuccessful())
increaseRemoveHits(start);
else
increaseRemoveMisses();
} else {
if (rv == null)
increaseRemoveMisses();
else
increaseRemoveHits(start);
}
});
}
private void increaseRemoveHits(long start) {
long intervalNanos = timeService.timeDuration(start, TimeUnit.NANOSECONDS);
StripeB stripe = counters.stripeForCurrentThread();
counters.add(StripeB.removeTimesFieldUpdater, stripe, intervalNanos);
counters.increment(StripeB.removeHitsFieldUpdater, stripe);
if (removeTimes != null) removeTimes.update(Duration.ofNanos(intervalNanos));
}
private void increaseRemoveMisses() {
counters.increment(StripeB.removeMissesFieldUpdater, counters.stripeForCurrentThread());
}
@ManagedAttribute(
description = "Number of cache attribute hits",
displayName = "Number of cache hits",
measurementType = MeasurementType.TRENDSUP)
public long getHits() {
return counters.get(StripeB.hitsFieldUpdater);
}
@ManagedAttribute(
description = "Number of cache attribute misses",
displayName = "Number of cache misses",
measurementType = MeasurementType.TRENDSUP
)
public long getMisses() {
return counters.get(StripeB.missesFieldUpdater);
}
@ManagedAttribute(
description = "Number of cache removal hits",
displayName = "Number of cache removal hits",
measurementType = MeasurementType.TRENDSUP
)
public long getRemoveHits() {
return counters.get(StripeB.removeHitsFieldUpdater);
}
@ManagedAttribute(
description = "Number of cache removals where keys were not found",
displayName = "Number of cache removal misses",
measurementType = MeasurementType.TRENDSUP
)
public long getRemoveMisses() {
return counters.get(StripeB.removeMissesFieldUpdater);
}
@ManagedAttribute(
description = "Number of cache attribute put operations",
displayName = "Number of cache puts",
measurementType = MeasurementType.TRENDSUP
)
public long getStores() {
return counters.get(StripeB.storesFieldUpdater);
}
@ManagedAttribute(
description = "Number of cache eviction operations",
displayName = "Number of cache evictions",
measurementType = MeasurementType.TRENDSUP
)
public long getEvictions() {
return counters.get(StripeB.evictionsFieldUpdater);
}
@ManagedAttribute(
description = "Percentage hit/(hit+miss) ratio for the cache",
displayName = "Hit ratio",
units = Units.PERCENTAGE
)
public double getHitRatio() {
long hitsL = counters.get(StripeB.hitsFieldUpdater);
double total = hitsL + counters.get(StripeB.missesFieldUpdater);
// The reason for <= is that equality checks
// should be avoided for floating point numbers.
if (total <= 0)
return 0;
return (hitsL / total);
}
@ManagedAttribute(
description = "Read/writes ratio for the cache",
displayName = "Read/write ratio",
units = Units.PERCENTAGE
)
public double getReadWriteRatio() {
long sum = counters.get(StripeB.storesFieldUpdater);
if (sum == 0)
return 0;
return (double) (counters.get(StripeB.hitsFieldUpdater) + counters.get(StripeB.missesFieldUpdater)) / (double) sum;
}
@ManagedAttribute(
description = "Average number of milliseconds for a read operation on the cache",
displayName = "Average read time",
units = Units.MILLISECONDS
)
public long getAverageReadTime() {
long total = counters.get(StripeB.hitsFieldUpdater) + counters.get(StripeB.missesFieldUpdater);
if (total == 0)
return 0;
total = (counters.get(StripeB.hitTimesFieldUpdater) + counters.get(StripeB.missTimesFieldUpdater)) / total;
return TimeUnit.NANOSECONDS.toMillis(total);
}
@ManagedAttribute(
description = "Average number of nanoseconds for a read operation on the cache",
displayName = "Average read time",
units = Units.NANOSECONDS
)
public long getAverageReadTimeNanos() {
long total = counters.get(StripeB.hitsFieldUpdater) + counters.get(StripeB.missesFieldUpdater);
if (total == 0)
return 0;
return (counters.get(StripeB.hitTimesFieldUpdater) + counters.get(StripeB.missTimesFieldUpdater)) / total;
}
@ManagedAttribute(
description = "Average number of milliseconds for a write operation in the cache",
displayName = "Average write time",
units = Units.MILLISECONDS
)
public long getAverageWriteTime() {
long sum = counters.get(StripeB.storesFieldUpdater);
if (sum == 0)
return 0;
return TimeUnit.NANOSECONDS.toMillis(counters.get(StripeB.storeTimesFieldUpdater) / sum);
}
@ManagedAttribute(
description = "Average number of nanoseconds for a write operation in the cache",
displayName = "Average write time",
units = Units.NANOSECONDS
)
public long getAverageWriteTimeNanos() {
long sum = counters.get(StripeB.storesFieldUpdater);
if (sum == 0)
return 0;
return counters.get(StripeB.storeTimesFieldUpdater) / sum;
}
@ManagedAttribute(
description = "Average number of milliseconds for a remove operation in the cache",
displayName = "Average remove time",
units = Units.MILLISECONDS
)
public long getAverageRemoveTime() {
long removes = getRemoveHits();
if (removes == 0)
return 0;
return TimeUnit.NANOSECONDS.toMillis(counters.get(StripeB.removeTimesFieldUpdater) / removes);
}
@ManagedAttribute(
description = "Average number of nanoseconds for a remove operation in the cache",
displayName = "Average remove time",
units = Units.NANOSECONDS
)
public long getAverageRemoveTimeNanos() {
long removes = getRemoveHits();
if (removes == 0)
return 0;
return counters.get(StripeB.removeTimesFieldUpdater) / removes;
}
@ManagedAttribute(
description = "Approximate number of entries currently in the cache, including persisted and expired entries",
displayName = "Approximate number of entries"
)
public long getApproximateEntries() {
// Don't restrict the segments in case some writes used CACHE_MODE_LOCAL
IntSet allSegments = IntSets.immutableRangeSet(cacheConfiguration.clustering().hash().numSegments());
// Do restrict the segments when counting entries in shared stores
IntSet writeSegments;
if (distributionManager != null) {
writeSegments = distributionManager.getCacheTopology().getLocalWriteSegments();
} else {
writeSegments = allSegments;
}
long persistenceSize = CompletionStages.join(approximatePersistenceSize(allSegments, writeSegments));
return approximateTotalSize(persistenceSize, allSegments);
}
private CompletionStage<Long> approximatePersistenceSize(IntSet privateSegments, IntSet sharedSegments) {
return persistenceManager.running().approximateSize(AccessMode.PRIVATE, privateSegments)
.thenCompose(privateSize -> {
if (privateSize >= 0)
return CompletableFuture.completedFuture(privateSize);
return persistenceManager.running().approximateSize(AccessMode.SHARED, sharedSegments);
});
}
private long approximateTotalSize(long persistenceSize, IntSet segments) {
if (cacheConfiguration.persistence().passivation()) {
long inMemorySize = dataContainer.sizeIncludingExpired(segments);
if (persistenceSize >= 0) {
return inMemorySize + persistenceSize;
} else {
return inMemorySize;
}
} else {
if (persistenceSize >= 0) {
return persistenceSize;
} else {
return dataContainer.sizeIncludingExpired(segments);
}
}
}
@ManagedAttribute(
description = "Approximate number of entries currently in memory, including expired entries",
displayName = "Approximate number of cache entries in memory"
)
public long getApproximateEntriesInMemory() {
return dataContainer.sizeIncludingExpired();
}
@ManagedAttribute(
description = "Approximate number of entries currently in the cache for which the local node is a primary " +
"owner, including persisted and expired entries",
displayName = "Approximate number of entries owned as primary"
)
public long getApproximateEntriesUnique() {
IntSet primarySegments;
if (distributionManager != null) {
LocalizedCacheTopology cacheTopology = distributionManager.getCacheTopology();
primarySegments = cacheTopology.getLocalPrimarySegments();
} else {
primarySegments = IntSets.immutableRangeSet(cacheConfiguration.clustering().hash().numSegments());
}
long persistenceSize = CompletionStages.join(approximatePersistenceSize(primarySegments, primarySegments));
return approximateTotalSize(persistenceSize, primarySegments);
}
@ManagedAttribute(
description = "Number of entries in the cache including passivated entries",
displayName = "Number of current cache entries"
)
@Deprecated
public int getNumberOfEntries() {
return globalConfiguration.metrics().accurateSize() ? cache.wired().withFlags(Flag.CACHE_MODE_LOCAL).size() : -1;
}
@ManagedAttribute(
description = "Number of entries currently in-memory excluding expired entries",
displayName = "Number of in-memory cache entries"
)
@Deprecated
public int getNumberOfEntriesInMemory() {
return globalConfiguration.metrics().accurateSize() ? dataContainer.size() : -1;
}
@ManagedAttribute(
description = "Amount of memory in bytes allocated for use in eviction for data in the cache",
displayName = "Memory used by data in the cache"
)
public long getDataMemoryUsed() {
if (cacheConfiguration.memory().isEvictionEnabled() && cacheConfiguration.memory().maxSizeBytes() > 0) {
return dataContainer.evictionSize();
}
return -1L;
}
@ManagedAttribute(
description = "Amount off-heap memory used by this cache (bytes)",
displayName = "Off-Heap memory used"
)
public long getOffHeapMemoryUsed() {
return allocator.getAllocatedAmount();
}
@ManagedAttribute(
description = "Amount of nodes required to guarantee data consistency",
displayName = "Required Minimum Nodes"
)
public int getRequiredMinimumNumberOfNodes() {
return calculateRequiredMinimumNumberOfNodes(cache.wired(), componentRegistry);
}
public static int calculateRequiredMinimumNumberOfNodes(AdvancedCache<?, ?> cache, ComponentRegistry componentRegistry) {
Configuration config = cache.getCacheConfiguration();
ClusteringConfiguration clusteringConfiguration = config.clustering();
CacheMode mode = clusteringConfiguration.cacheMode();
if (mode.isReplicated() || !mode.isClustered()) {
// Local and replicated only require the 1 node to keep the data
return 1;
}
CacheTopology cacheTopology = cache.getDistributionManager().getCacheTopology();
if (mode.isInvalidation()) {
// Invalidation requires all as we don't know what data is installed on which
return cacheTopology.getMembers().size();
}
int numMembers = cacheTopology.getMembers().size();
int numOwners = clusteringConfiguration.hash().numOwners();
int minNodes = numMembers - numOwners + 1;
long maxSize = config.memory().size();
int evictionRestrictedNodes;
if (maxSize > 0) {
EvictionStrategy evictionStrategy = config.memory().evictionStrategy();
long totalData;
long capacity;
switch (evictionStrategy) {
case REMOVE:
DataContainer dataContainer = cache.getDataContainer();
totalData = dataContainer.evictionSize() * numOwners;
capacity = dataContainer.capacity();
break;
case EXCEPTION:
TransactionalExceptionEvictionInterceptor exceptionInterceptor = componentRegistry.getComponent(
TransactionalExceptionEvictionInterceptor.class);
totalData = exceptionInterceptor.getCurrentSize();
capacity = exceptionInterceptor.getMaxSize();
break;
default:
throw new IllegalArgumentException("We only support remove or exception based strategy here");
}
evictionRestrictedNodes = (int) (totalData / capacity) + (totalData % capacity != 0 ? 1 : 0);
} else {
evictionRestrictedNodes = 1;
}
return Math.max(evictionRestrictedNodes, minNodes);
}
@ManagedAttribute(
description = "Number of seconds since cache started",
displayName = "Seconds since cache started",
units = Units.SECONDS,
measurementType = MeasurementType.TRENDSUP
)
public long getTimeSinceStart() {
return timeService.timeDuration(startNanoseconds.get(), TimeUnit.SECONDS);
}
/**
* Returns number of seconds since cache started
*
* @deprecated use {@link #getTimeSinceStart()} instead.
* @return number of seconds since cache started
*/
@ManagedAttribute(
description = "Number of seconds since cache started",
displayName = "Seconds since cache started",
units = Units.SECONDS,
measurementType = MeasurementType.TRENDSUP
)
@Deprecated
public long getElapsedTime() {
// backward compatibility as we renamed ElapsedTime to TimeSinceStart
return getTimeSinceStart();
}
@ManagedAttribute(
description = "Number of seconds since the cache statistics were last reset",
displayName = "Seconds since cache statistics were reset",
units = Units.SECONDS
)
public long getTimeSinceReset() {
return timeService.timeDuration(resetNanoseconds.get(), TimeUnit.SECONDS);
}
@Override
public void resetStatistics() {
counters.reset(StripeB.hitsFieldUpdater);
counters.reset(StripeB.missesFieldUpdater);
counters.reset(StripeB.storesFieldUpdater);
counters.reset(StripeB.evictionsFieldUpdater);
counters.reset(StripeB.hitTimesFieldUpdater);
counters.reset(StripeB.missTimesFieldUpdater);
counters.reset(StripeB.storeTimesFieldUpdater);
counters.reset(StripeB.removeHitsFieldUpdater);
counters.reset(StripeB.removeTimesFieldUpdater);
counters.reset(StripeB.removeMissesFieldUpdater);
resetNanoseconds.set(timeService.time());
//todo [anistor] how do we reset Micrometer metrics ?
}
private boolean getStatisticsEnabled(FlagAffectedCommand cmd) {
return super.getStatisticsEnabled() && !cmd.hasAnyFlag(FlagBitSets.SKIP_STATISTICS);
}
public void addEvictions(long numEvictions) {
counters.add(StripeB.evictionsFieldUpdater, counters.stripeForCurrentThread(), numEvictions);
}
private static class StripeA {
@SuppressWarnings("unused")
private long slack1, slack2, slack3, slack4, slack5, slack6, slack7, slack8;
}
@SuppressWarnings("VolatileLongOrDoubleField")
private static class StripeB extends StripeA {
static final AtomicLongFieldUpdater<StripeB> hitTimesFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "hitTimes");
static final AtomicLongFieldUpdater<StripeB> missTimesFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "missTimes");
static final AtomicLongFieldUpdater<StripeB> storeTimesFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "storeTimes");
static final AtomicLongFieldUpdater<StripeB> removeHitsFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "removeHits");
static final AtomicLongFieldUpdater<StripeB> removeMissesFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "removeMisses");
static final AtomicLongFieldUpdater<StripeB> storesFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "stores");
static final AtomicLongFieldUpdater<StripeB> evictionsFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "evictions");
static final AtomicLongFieldUpdater<StripeB> missesFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "misses");
static final AtomicLongFieldUpdater<StripeB> hitsFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "hits");
static final AtomicLongFieldUpdater<StripeB> removeTimesFieldUpdater =
AtomicLongFieldUpdater.newUpdater(StripeB.class, "removeTimes");
private volatile long hits = 0;
private volatile long hitTimes = 0;
private volatile long misses = 0;
private volatile long missTimes = 0;
private volatile long stores = 0;
private volatile long storeTimes = 0;
private volatile long evictions = 0;
private volatile long removeHits = 0;
private volatile long removeMisses = 0;
private volatile long removeTimes = 0;
}
private static final class StripeC extends StripeB {
@SuppressWarnings("unused")
private long slack1, slack2, slack3, slack4, slack5, slack6, slack7, slack8;
}
}
| 41,196
| 42.13822
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/CacheWriterInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.persistence.manager.PersistenceManager.AccessMode.BOTH;
import static org.infinispan.persistence.manager.PersistenceManager.AccessMode.PRIVATE;
import java.util.List;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.atomic.AtomicLong;
import jakarta.transaction.InvalidTransactionException;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.SegmentSpecificCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.functional.FunctionalCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.tx.AbstractTransactionBoundaryCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.functional.Param;
import org.infinispan.functional.Param.PersistenceMode;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.MeasurementType;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.transaction.impl.AbstractCacheTransaction;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Writes modifications back to the store on the way out: stores modifications back through the CacheLoader, either
* after each method call (no TXs), or at TX commit.
*
* Only used for LOCAL and INVALIDATION caches.
*
* @author Bela Ban
* @author Dan Berindei
* @author Mircea Markus
* @since 9.0
*/
@MBean(objectName = "CacheStore", description = "Component that handles storing of entries to a CacheStore from memory.")
public class CacheWriterInterceptor extends JmxStatsCommandInterceptor {
private static final Log log = LogFactory.getLog(CacheWriterInterceptor.class);
@Inject protected PersistenceManager persistenceManager;
@Inject InternalEntryFactory entryFactory;
@Inject TransactionManager transactionManager;
@Inject KeyPartitioner keyPartitioner;
@Inject MarshallableEntryFactory<?, ?> marshalledEntryFactory;
final AtomicLong cacheStores = new AtomicLong(0);
private volatile boolean usingTransactionalStores;
protected final InvocationSuccessFunction<PutMapCommand> handlePutMapCommandReturn = this::handlePutMapCommandReturn;
private final InvocationSuccessFunction<AbstractTransactionBoundaryCommand> afterCommit = this::afterCommit;
protected Log getLog() {
return log;
}
@Start(priority = 15)
protected void start() {
this.setStatisticsEnabled(cacheConfiguration.statistics().enabled());
if (cacheConfiguration.transaction().transactionMode().isTransactional()) {
persistenceManager.addStoreListener(persistenceStatus -> {
usingTransactionalStores = persistenceStatus.usingTransactionalStore();
});
usingTransactionalStores = persistenceManager.hasStore(StoreConfiguration::transactional);
}
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
if (usingTransactionalStores) {
// Handled by TransactionalStoreInterceptor
return invokeNext(ctx, command);
}
//note: commit the data after invoking next interceptor.
//The IRAC interceptor will set the versions in the context entries and it is placed later in the chain.
return invokeNextThenApply(ctx, command, afterCommit);
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
if (usingTransactionalStores) {
// Handled by TransactionalStoreInterceptor
return invokeNext(ctx, command);
}
if (command.isOnePhaseCommit()) {
//note: commit the data after invoking next interceptor.
//The IRAC interceptor will set the versions in the context entries and it is placed later in the chain.
return invokeNextThenApply(ctx, command, afterCommit);
}
return invokeNext(ctx, command);
}
protected InvocationStage commitModifications(TxInvocationContext<AbstractCacheTransaction> ctx) throws Throwable {
List<WriteCommand> allModifications = ctx.getCacheTransaction().getAllModifications();
if (!allModifications.isEmpty()) {
GlobalTransaction tx = ctx.getGlobalTransaction();
if (log.isTraceEnabled()) getLog().tracef("Persisting transaction %s modifications: %s",
tx, allModifications);
Transaction xaTx = null;
try {
xaTx = suspendRunningTx(ctx);
return store(ctx);
} finally {
resumeRunningTx(xaTx);
}
} else {
return null;
}
}
private Object afterCommit(InvocationContext context, VisitableCommand command, Object rv) throws Throwable {
InvocationStage stage = commitModifications((TxInvocationContext<AbstractCacheTransaction>) context);
return stage == null ? rv : stage.thenReturn(context, command, rv);
}
private void resumeRunningTx(Transaction xaTx) throws InvalidTransactionException, SystemException {
if (transactionManager != null && xaTx != null) {
transactionManager.resume(xaTx);
}
}
private Transaction suspendRunningTx(TxInvocationContext<?> ctx) throws SystemException {
Transaction xaTx = null;
if (transactionManager != null) {
xaTx = transactionManager.suspend();
if (xaTx != null && !ctx.isOriginLocal())
throw new IllegalStateException("It is only possible to be in the context of an JRA transaction in the local node.");
}
return xaTx;
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, (rCtx, removeCommand, rv) -> {
if (!isStoreEnabled(removeCommand) || rCtx.isInTxScope() || !removeCommand.isSuccessful() ||
!isProperWriter(rCtx, removeCommand, removeCommand.getKey())) {
return rv;
}
Object key = removeCommand.getKey();
CompletionStage<?> stage = persistenceManager.deleteFromAllStores(key, removeCommand.getSegment(), BOTH);
if (log.isTraceEnabled()) {
stage = stage.thenAccept(removed ->
getLog().tracef("Removed entry under key %s and got response %s from CacheStore", key, removed));
}
return delayedValue(stage, rv);
});
}
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) {
if (isStoreEnabled(command) && !ctx.isInTxScope()) {
return asyncInvokeNext(ctx, command, persistenceManager.clearAllStores(ctx.isOriginLocal() ? BOTH : PRIVATE));
} else {
return invokeNext(ctx, command);
}
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) throws Throwable {
return visitDataWriteCommandToStore(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command) throws Throwable {
return visitDataWriteCommandToStore(ctx, command);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
return visitDataWriteCommandToStore(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, (rCtx, computeCommand, rv) -> {
if (!isStoreEnabled(computeCommand) || rCtx.isInTxScope() || !computeCommand.isSuccessful() ||
!isProperWriter(rCtx, computeCommand, computeCommand.getKey()))
return rv;
Object key = computeCommand.getKey();
CompletionStage<?> resultStage;
if(rv == null) {
CompletionStage<Boolean> stage = persistenceManager.deleteFromAllStores(key, computeCommand.getSegment(), BOTH);
if (log.isTraceEnabled()) {
resultStage = stage.thenAccept(removed ->
getLog().tracef("Removed entry under key %s and got response %s from CacheStore", key, removed));
} else {
resultStage = stage;
}
} else {
resultStage = storeEntry(rCtx, key, computeCommand);
}
return delayedValue(resultStage, rv);
});
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return invokeNextThenApply(ctx, command, (rCtx, computeIfAbsentCommand, rv) -> {
if (!isStoreEnabled(computeIfAbsentCommand) || rCtx.isInTxScope() || !computeIfAbsentCommand.isSuccessful())
return rv;
if (!isProperWriter(rCtx, computeIfAbsentCommand, computeIfAbsentCommand.getKey()))
return rv;
if (rv != null) {
Object key = computeIfAbsentCommand.getKey();
return delayedValue(storeEntry(rCtx, key, computeIfAbsentCommand), rv);
}
return null;
});
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
if (!isStoreEnabled(command) || ctx.isInTxScope())
return invokeNext(ctx, command);
return invokeNextThenApply(ctx, command, handlePutMapCommandReturn);
}
protected Object handlePutMapCommandReturn(InvocationContext rCtx, PutMapCommand putMapCommand, Object rv) {
CompletionStage<Long> putMapStage = persistenceManager.writeMapCommand(putMapCommand, rCtx,
((writeCommand, o) -> isProperWriter(rCtx, writeCommand, o)));
if (getStatisticsEnabled()) {
putMapStage.thenAccept(cacheStores::getAndAdd);
}
return delayedValue(putMapStage, rv);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command)
throws Throwable {
return visitWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx, ReadWriteKeyValueCommand command)
throws Throwable {
return visitWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command)
throws Throwable {
return visitWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command)
throws Throwable {
return visitWriteCommand(ctx, command);
}
private <T extends DataWriteCommand & FunctionalCommand> Object visitWriteCommand(InvocationContext ctx, T command) {
return invokeNextThenApply(ctx, command, (rCtx, dataWriteCommand, rv) -> {
if (!isStoreEnabled(dataWriteCommand) || rCtx.isInTxScope() || !dataWriteCommand.isSuccessful() ||
!isProperWriter(rCtx, dataWriteCommand, dataWriteCommand.getKey()))
return rv;
CompletionStage<?> stage = CompletableFutures.completedNull();
Param<PersistenceMode> persistMode = dataWriteCommand.getParams().get(PersistenceMode.ID);
switch (persistMode.get()) {
case LOAD_PERSIST:
case SKIP_LOAD:
Object key = dataWriteCommand.getKey();
CacheEntry<?, ?> entry = rCtx.lookupEntry(key);
if (entry != null) {
if (entry.isRemoved()) {
stage = persistenceManager.deleteFromAllStores(key, dataWriteCommand.getSegment(), BOTH);
if (log.isTraceEnabled()) {
stage = stage.thenAccept(removed ->
getLog().tracef("Removed entry under key %s and got response %s from CacheStore", key, removed));
}
} else if (entry.isChanged()) {
stage = storeEntry(rCtx, key, dataWriteCommand);
if (log.isTraceEnabled()) {
stage = stage.thenAccept(removed ->
getLog().tracef("Stored entry for key %s in CacheStore", key));
}
} else if (log.isTraceEnabled()) {
getLog().tracef("Skipping write for key %s as entry wasn't changed");
}
}
log.trace("Skipping cache store since entry was not found in context");
break;
case SKIP_PERSIST:
case SKIP:
log.trace("Skipping cache store since persistence mode parameter is SKIP");
break;
}
return delayedValue(stage, rv);
});
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command)
throws Throwable {
return visitWriteManyCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command)
throws Throwable {
return visitWriteManyCommand(ctx, command);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command)
throws Throwable {
return visitWriteManyCommand(ctx, command);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command)
throws Throwable {
return visitWriteManyCommand(ctx, command);
}
private <T extends WriteCommand & FunctionalCommand> Object visitWriteManyCommand(InvocationContext ctx, T command) {
return invokeNextThenApply(ctx, command, (rCtx, manyEntriesCommand, rv) -> {
if (!isStoreEnabled(manyEntriesCommand) || rCtx.isInTxScope())
return rv;
CompletionStage<Void> stage = CompletableFutures.completedNull();
Param<PersistenceMode> persistMode = manyEntriesCommand.getParams().get(PersistenceMode.ID);
switch (persistMode.get()) {
case LOAD_PERSIST:
case SKIP_LOAD:
AggregateCompletionStage<Void> composedCompletionStage = CompletionStages.aggregateCompletionStage();
int storedCount = 0;
for (Object key : manyEntriesCommand.getAffectedKeys()) {
CacheEntry<?, ?> entry = rCtx.lookupEntry(key);
if (entry != null) {
if (entry.isRemoved()) {
CompletionStage<?> innerStage = persistenceManager.deleteFromAllStores(key,
keyPartitioner.getSegment(key), BOTH);
if (log.isTraceEnabled()) {
innerStage = innerStage.thenAccept(removed ->
getLog().tracef("Removed entry under key %s and got response %s from CacheStore", key, removed));
}
composedCompletionStage.dependsOn(innerStage);
} else {
if (entry.isChanged() && isProperWriter(rCtx, manyEntriesCommand, key)) {
composedCompletionStage.dependsOn(storeEntry(rCtx, key, manyEntriesCommand, false));
storedCount++;
}
}
}
}
if (getStatisticsEnabled())
cacheStores.getAndAdd(storedCount);
stage = composedCompletionStage.freeze();
break;
case SKIP_PERSIST:
case SKIP:
log.trace("Skipping cache store since persistence mode parameter is SKIP");
break;
}
return delayedValue(stage, rv);
});
}
protected final InvocationStage store(TxInvocationContext<AbstractCacheTransaction> ctx) throws Throwable {
CompletionStage<Long> batchStage = persistenceManager.performBatch(ctx, ((writeCommand, o) -> isProperWriter(ctx, writeCommand, o)));
if (getStatisticsEnabled()) {
batchStage.thenAccept(cacheStores::addAndGet);
}
return asyncValue(batchStage);
}
protected boolean isStoreEnabled(FlagAffectedCommand command) {
if (command.hasAnyFlag(FlagBitSets.SKIP_CACHE_STORE)) {
log.trace("Skipping cache store since the call contain a skip cache store flag");
return false;
}
return true;
}
protected boolean isProperWriter(InvocationContext ctx, FlagAffectedCommand command, Object key) {
return true;
}
@Override
public void resetStatistics() {
cacheStores.set(0);
}
@ManagedAttribute(
description = "Number of writes to the store",
displayName = "Number of writes to the store",
measurementType = MeasurementType.TRENDSUP
)
public long getWritesToTheStores() {
return cacheStores.get();
}
@ManagedAttribute(
description = "Number of entries currently persisted excluding expired entries",
displayName = "Number of persisted entries"
)
public int getNumberOfPersistedEntries() {
long size = CompletionStages.join(persistenceManager.size());
return (int) Math.min(size, Integer.MAX_VALUE);
}
CompletionStage<Void> storeEntry(InvocationContext ctx, Object key, FlagAffectedCommand command) {
return storeEntry(ctx, key, command, true);
}
CompletionStage<Void> storeEntry(InvocationContext ctx, Object key, FlagAffectedCommand command, boolean incrementStats) {
if (persistenceManager.isReadOnly())
return CompletableFutures.completedNull();
MarshallableEntry<?,?> entry = marshalledEntry(ctx, key);
if (entry != null) {
CompletionStage<Void> stage = persistenceManager.writeToAllNonTxStores(entry,
SegmentSpecificCommand.extractSegment(command, key, keyPartitioner),
skipSharedStores(ctx, key, command) ? PRIVATE : BOTH, command.getFlagsBitSet());
if (log.isTraceEnabled()) {
stage = stage.thenAccept(ignore ->
getLog().tracef("Stored entry %s under key %s", entry.getValue(), key));
}
if (incrementStats && getStatisticsEnabled()) {
stage = stage.thenAccept(ignore ->
cacheStores.incrementAndGet());
}
return stage;
}
return CompletableFutures.completedNull();
}
MarshallableEntry<Object, Object> marshalledEntry(InvocationContext ctx, Object key) {
InternalCacheValue<?> sv = entryFactory.getValueFromCtx(key, ctx);
return sv != null ? marshalledEntryFactory.create(key, (InternalCacheValue) sv) : null;
}
protected boolean skipSharedStores(InvocationContext ctx, Object key, FlagAffectedCommand command) {
return !ctx.isOriginLocal() ||
command.hasAnyFlag(FlagBitSets.SKIP_SHARED_CACHE_STORE);
}
private Object visitDataWriteCommandToStore(InvocationContext ctx, DataWriteCommand command) {
return invokeNextThenApply(ctx, command, (rCtx, cmd, rv) -> {
if (!isStoreEnabled(cmd) || rCtx.isInTxScope() || !cmd.isSuccessful())
return rv;
if (!isProperWriter(rCtx, cmd, cmd.getKey()))
return rv;
Object key = cmd.getKey();
return delayedValue(storeEntry(rCtx, key, cmd), rv);
});
}
}
| 21,767
| 42.276342
| 139
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/OptimisticTxIracLocalSiteInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.remoting.responses.PrepareResponse.asPrepareResponse;
import static org.infinispan.util.IracUtils.getIracVersionFromCacheEntry;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Stream;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.container.versioning.irac.IracEntryVersion;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.RemoteTxInvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.InvocationSuccessAction;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.remoting.responses.PrepareResponse;
/**
* Interceptor used by IRAC for optimistic transactional caches to handle the local site updates.
* <p>
* On prepare, if successful, the primary owners generate the {@link IracMetadata} to commit and send it back to the
* transaction originator. When committing, the {@link IracMetadata} is set in the context entries to be stored.
*
* @author Pedro Ruivo
* @since 11.0
*/
public class OptimisticTxIracLocalSiteInterceptor extends AbstractIracLocalSiteInterceptor {
private final InvocationSuccessAction<PrepareCommand> afterLocalPrepare = this::afterLocalTwoPhasePrepare;
private final InvocationSuccessFunction<PrepareCommand> afterRemotePrepare = this::afterRemoteTwoPhasePrepare;
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) {
if (command.hasAnyFlag(FlagBitSets.PUT_FOR_EXTERNAL_READ)) {
return visitNonTxDataWriteCommand(ctx, command);
}
final Object key = command.getKey();
if (isIracState(command)) {
// if this is a state transfer from a remote site, we set the versions here
setMetadataToCacheEntry(ctx.lookupEntry(key), command.getSegment(), command.getInternalMetadata(key).iracMetadata());
}
return invokeNext(ctx, command);
}
@SuppressWarnings("rawtypes")
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) {
//note: both methods ignore PutKeyValueCommand from state transfer. That's why the IracMetadata is set above!
// if the prepare fails, (exception) the methods aren't invoked.
if (ctx.isOriginLocal()) {
return invokeNextThenAccept(ctx, command, afterLocalPrepare);
} else {
return invokeNextThenApply(ctx, command, afterRemotePrepare);
}
}
@SuppressWarnings("rawtypes")
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) {
if (ctx.isOriginLocal()) {
return onLocalCommitCommand(ctx, command);
} else {
return onRemoteCommitCommand(ctx, command);
}
}
@SuppressWarnings("rawtypes")
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) {
//nothing extra to be done for rollback.
return invokeNext(ctx, command);
}
private void afterLocalTwoPhasePrepare(InvocationContext ctx, PrepareCommand command, Object rv) {
if (isTraceEnabled()) {
getLog().tracef("[IRAC] After successful local prepare for tx %s. Return Value: %s",
command.getGlobalTransaction(), rv);
}
PrepareResponse prepareResponse = asPrepareResponse(rv);
Iterator<StreamData> iterator = streamKeysFromModifications(command.getModifications()).iterator();
Map<Integer, IracMetadata> segmentMetadata = new HashMap<>();
while (iterator.hasNext()) {
StreamData data = iterator.next();
IracMetadata metadata;
if (isPrimaryOwner(data)) {
IracEntryVersion versionSeen = getIracVersionFromCacheEntry(ctx.lookupEntry(data.key));
metadata = segmentMetadata.computeIfAbsent(data.segment, segment -> iracVersionGenerator.generateNewMetadata(segment, versionSeen));
} else {
metadata = segmentMetadata.computeIfAbsent(data.segment, prepareResponse::getIracMetadata);
}
assert metadata != null : "[IRAC] metadata is null after successful prepare! Data=" + data;
updateCommandMetadata(data.key, data.command, metadata);
}
}
private Object afterRemoteTwoPhasePrepare(InvocationContext ctx, PrepareCommand command, Object rv) {
if (isTraceEnabled()) {
getLog().tracef("[IRAC] After successful remote prepare for tx %s. Return Value: %s",
command.getGlobalTransaction(), rv);
}
PrepareResponse rsp = PrepareResponse.asPrepareResponse(rv);
Iterator<StreamData> iterator = streamKeysFromModifications(command.getModifications())
.filter(this::isPrimaryOwner)
.distinct()
.iterator();
Map<Integer, IracEntryVersion> maxVersionSeen = new HashMap<>();
while (iterator.hasNext()) {
StreamData data = iterator.next();
IracEntryVersion versionSeen = getIracVersionFromCacheEntry(ctx.lookupEntry(data.key));
if (versionSeen != null) {
maxVersionSeen.merge(data.segment, versionSeen, IracEntryVersion::merge);
} else {
maxVersionSeen.putIfAbsent(data.segment, null);
}
}
Map<Integer, IracMetadata> segmentMetadata = new HashMap<>();
maxVersionSeen.forEach((segment, version) ->
segmentMetadata.put(segment, iracVersionGenerator.generateNewMetadata(segment, version)));
rsp.setNewIracMetadata(segmentMetadata);
if (isTraceEnabled()) {
getLog().tracef("[IRAC] After successful remote prepare for tx %s. New Return Value: %s",
command.getGlobalTransaction(), rsp);
}
return rsp;
}
private Object onLocalCommitCommand(TxInvocationContext<?> ctx, CommitCommand command) {
if (isTraceEnabled()) {
getLog().tracef("[IRAC] On local Commit for tx %s", command.getGlobalTransaction());
}
Iterator<StreamData> iterator = streamKeysFromModifications(ctx.getModifications()).iterator();
while (iterator.hasNext()) {
StreamData data = iterator.next();
IracMetadata metadata = data.command.getInternalMetadata(data.key).iracMetadata();
command.addIracMetadata(data.segment, metadata);
if (isWriteOwner(data)) {
setMetadataToCacheEntry(ctx.lookupEntry(data.key), data.segment, metadata);
}
}
return invokeNext(ctx, command);
}
private Object onRemoteCommitCommand(TxInvocationContext<?> context, CommitCommand command) {
if (isTraceEnabled()) {
getLog().tracef("[IRAC] On remote Commit for tx %s", command.getGlobalTransaction());
}
RemoteTxInvocationContext ctx = asRemoteTxInvocationContext(context);
Iterator<StreamData> iterator = streamKeysFromModifications(ctx.getModifications())
.filter(this::isWriteOwner)
.iterator();
while (iterator.hasNext()) {
StreamData data = iterator.next();
IracMetadata metadata = command.getIracMetadata(data.segment);
setMetadataToCacheEntry(ctx.lookupEntry(data.key), data.segment, metadata);
}
return invokeNext(ctx, command);
}
private Stream<StreamData> streamKeysFromModifications(List<WriteCommand> mods) {
return streamKeysFromModifications(mods.stream());
}
}
| 7,802
| 44.104046
| 144
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/VersionedEntryWrappingInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.remoting.responses.PrepareResponse.asPrepareResponse;
import static org.infinispan.transaction.impl.WriteSkewHelper.mergeInPrepareResponse;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.VersionedCommitCommand;
import org.infinispan.commands.tx.VersionedPrepareCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.versioning.IncrementableEntryVersion;
import org.infinispan.container.versioning.VersionGenerator;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.InvocationFinallyFunction;
import org.infinispan.interceptors.InvocationStage;
import org.infinispan.interceptors.InvocationSuccessFunction;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Interceptor in charge with wrapping entries and add them in caller's context.
*
* @author Mircea Markus
* @since 9.0
*/
public class VersionedEntryWrappingInterceptor extends EntryWrappingInterceptor {
private static final Log log = LogFactory.getLog(VersionedEntryWrappingInterceptor.class);
@Inject protected VersionGenerator versionGenerator;
private final InvocationSuccessFunction<VersionedPrepareCommand> prepareHandler = this::prepareHandler;
private final InvocationSuccessFunction<VersionedPrepareCommand> afterPrepareHandler = this::afterPrepareHandler;
private final InvocationFinallyFunction<VersionedCommitCommand> commitHandler = this::commitHandler;
@SuppressWarnings("rawtypes")
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
VersionedPrepareCommand versionedPrepareCommand = (VersionedPrepareCommand) command;
if (ctx.isOriginLocal()) {
versionedPrepareCommand.setVersionsSeen(ctx.getCacheTransaction().getVersionsRead());
}
return wrapEntriesForPrepareAndApply(ctx, versionedPrepareCommand, prepareHandler);
}
private Object prepareHandler(InvocationContext nonTxCtx, VersionedPrepareCommand command, Object nil) {
TxInvocationContext<?> ctx = (TxInvocationContext<?>) nonTxCtx;
CompletionStage<Map<Object, IncrementableEntryVersion>> originVersionData;
if (ctx.getCacheTransaction().isFromStateTransfer()) {
storeEntryVersionForStateTransfer(ctx);
originVersionData = CompletableFutures.completedNull();
} else if (ctx.isOriginLocal()) {
originVersionData = checkWriteSkew(ctx, command);
} else {
originVersionData = CompletableFutures.completedNull();
}
InvocationStage originVersionStage = makeStage(asyncInvokeNext(ctx, command, originVersionData));
InvocationStage newVersionStage = originVersionStage.thenApplyMakeStage(ctx, command, (rCtx, rCommand, rv) -> {
CompletionStage<Map<Object, IncrementableEntryVersion>> stage = rCtx.isOriginLocal() ?
originVersionData :
checkWriteSkew((TxInvocationContext<?>) rCtx, rCommand);
return asyncValue(stage.thenApply(vMap -> mergeInPrepareResponse(vMap, asPrepareResponse(rv))));
});
return newVersionStage.thenApply(ctx, command, afterPrepareHandler);
}
private Object afterPrepareHandler(InvocationContext ctx, VersionedPrepareCommand command, Object rv) {
if (command.isOnePhaseCommit()) {
TxInvocationContext<?> txCtx = (TxInvocationContext<?>) ctx;
txCtx.getCacheTransaction().setUpdatedEntryVersions(command.getVersionsSeen());
CompletionStage<Void> stage = commitContextEntries(ctx, null);
return delayedValue(stage, rv);
}
return rv;
}
@SuppressWarnings("rawtypes")
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
VersionedCommitCommand versionedCommitCommand = (VersionedCommitCommand) command;
if (ctx.isOriginLocal()) {
versionedCommitCommand.setUpdatedVersions(ctx.getCacheTransaction().getUpdatedEntryVersions());
}
return invokeNextAndHandle(ctx, versionedCommitCommand, commitHandler);
}
@Override
protected CompletionStage<Void> commitContextEntry(CacheEntry<?, ?> entry, InvocationContext ctx,
FlagAffectedCommand command, Flag stateTransferFlag, boolean l1Invalidation) {
if (ctx.isInTxScope() && stateTransferFlag == null) {
storeEntryVersion(entry, (TxInvocationContext<?>) ctx);
}
return cdl.commitEntry(entry, command, ctx, stateTransferFlag, l1Invalidation);
}
private void storeEntryVersion(CacheEntry<?, ?> entry, TxInvocationContext<?> ctx) {
IncrementableEntryVersion entryVersion = ctx.getCacheTransaction().getUpdatedEntryVersions().get(entry.getKey());
if (entryVersion == null) {
return; //nothing to set
}
PrivateMetadata.Builder builder = PrivateMetadata.getBuilder(entry.getInternalMetadata());
builder.entryVersion(entryVersion);
entry.setInternalMetadata(builder.build());
}
private void storeEntryVersionForStateTransfer(TxInvocationContext<?> ctx) {
//the write command has the PrivateMetadata with the version when it is received from other nodes (state transfer).
//we need to set copy the PrivateMetadata to the contxt entry.
for (WriteCommand cmd : ctx.getCacheTransaction().getAllModifications()) {
for (Object key : cmd.getAffectedKeys()) {
PrivateMetadata metadata = cmd.getInternalMetadata(key);
assert metadata != null;
IncrementableEntryVersion entryVersion = metadata.entryVersion();
assert entryVersion != null;
CacheEntry<?, ?> entry = ctx.lookupEntry(key);
PrivateMetadata.Builder builder = PrivateMetadata.getBuilder(entry.getInternalMetadata());
entry.setInternalMetadata(builder.entryVersion(entryVersion).build());
if (log.isTraceEnabled()) {
log.tracef("Updated entry from state transfer: %s", entry);
}
}
}
}
private Object commitHandler(InvocationContext ctx, VersionedCommitCommand command, Object rv, Throwable t) {
return delayedValue(doCommit(ctx, command), rv, t);
}
private CompletionStage<Map<Object, IncrementableEntryVersion>> checkWriteSkew(TxInvocationContext<?> ctx,
VersionedPrepareCommand command) {
return cdl.createNewVersionsAndCheckForWriteSkews(versionGenerator, ctx, command);
}
private CompletionStage<Void> doCommit(InvocationContext ctx, VersionedCommitCommand command) {
if (!ctx.isOriginLocal()) {
((TxInvocationContext<?>) ctx).getCacheTransaction().setUpdatedEntryVersions(command.getUpdatedVersions());
}
return commitContextEntries(ctx, null);
}
}
| 7,363
| 46.509677
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/TransactionalStoreInterceptor.java
|
package org.infinispan.interceptors.impl;
import static org.infinispan.persistence.manager.PersistenceManager.AccessMode.BOTH;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
/**
* An interceptor which ensures that writes to an underlying transactional store are prepared->committed/rolledback as part
* of the 2PC, therefore ensuring that the cache and transactional store(s) remain consistent.
*
* @author Ryan Emerson
* @since 9.0
*/
public class TransactionalStoreInterceptor extends DDAsyncInterceptor {
@Inject PersistenceManager persistenceManager;
@Inject InternalEntryFactory entryFactory;
@Inject MarshallableEntryFactory marshalledEntryFactory;
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) {
if (ctx.isOriginLocal()) {
if (!command.isOnePhaseCommit()) {
return asyncInvokeNext(ctx, command, persistenceManager.prepareAllTxStores(ctx, BOTH));
} else {
return invokeNextThenApply(ctx, command, (rCtx, rCommand, rv) -> {
if (command.isSuccessful())
return null;
// Persist the modifications in one phase
// After they were successfully applied in the data container
return asyncValue(persistenceManager.performBatch(ctx, (writeCommand, o) -> true));
});
}
}
return invokeNext(ctx, command);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) {
if (ctx.isOriginLocal()) {
return asyncInvokeNext(ctx, command, persistenceManager.commitAllTxStores(ctx, BOTH));
}
return invokeNext(ctx, command);
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) {
if (ctx.isOriginLocal()) {
return asyncInvokeNext(ctx, command, persistenceManager.rollbackAllTxStores(ctx, BOTH));
}
return invokeNext(ctx, command);
}
}
| 2,457
| 38.645161
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/PassivationCacheLoaderInterceptor.java
|
package org.infinispan.interceptors.impl;
import java.lang.invoke.MethodHandles;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.eviction.impl.ActivationManager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.DataOperationOrderer;
import org.infinispan.util.concurrent.DataOperationOrderer.Operation;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
public class PassivationCacheLoaderInterceptor<K, V> extends CacheLoaderInterceptor<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@Inject DataOperationOrderer orderer;
@Inject ActivationManager activationManager;
@Override
public CompletionStage<InternalCacheEntry<K, V>> loadAndStoreInDataContainer(InvocationContext ctx, Object key,
int segment, FlagAffectedCommand cmd) {
CompletableFuture<Operation> future = new CompletableFuture<>();
CompletionStage<Operation> delayStage = orderer.orderOn(key, future);
CompletionStage<InternalCacheEntry<K, V>> retrievalStage;
if (delayStage != null && !CompletionStages.isCompletedSuccessfully(delayStage)) {
retrievalStage = delayStage.thenCompose(ignore -> super.loadAndStoreInDataContainer(ctx, key, segment, cmd));
} else {
retrievalStage = super.loadAndStoreInDataContainer(ctx, key, segment, cmd);
}
if (CompletionStages.isCompletedSuccessfully(retrievalStage)) {
InternalCacheEntry<K, V> ice = CompletionStages.join(retrievalStage);
activateAfterLoad(key, segment, orderer, activationManager, future, ice, null);
return retrievalStage;
} else {
return retrievalStage.whenComplete((value, t) -> {
activateAfterLoad(key, segment, orderer, activationManager, future, value, t);
});
}
}
static <K, V> void activateAfterLoad(Object key, int segment, DataOperationOrderer orderer, ActivationManager activationManager, CompletableFuture<Operation> future, InternalCacheEntry<K, V> value, Throwable t) {
if (value != null) {
if (log.isTraceEnabled()) {
log.tracef("Activating key: %s - not waiting for response", value.getKey());
}
// Note we don't wait on this to be removed, which allows the load to continue ahead.
// However, we can't release the orderer acquisition until the remove is complete
CompletionStage<Void> activationStage = activationManager.activateAsync(value.getKey(), segment);
if (!CompletionStages.isCompletedSuccessfully(activationStage)) {
activationStage.whenComplete((ignore, throwable) -> {
if (throwable != null) {
log.warnf("Activation of key %s failed for some reason", t);
}
orderer.completeOperation(key, future, Operation.READ);
});
} else {
orderer.completeOperation(key, future, Operation.READ);
}
} else {
orderer.completeOperation(key, future, Operation.READ);
}
}
}
| 3,459
| 48.428571
| 215
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/interceptors/impl/BaseStateTransferInterceptor.java
|
package org.infinispan.interceptors.impl;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
import java.util.function.BiFunction;
import org.infinispan.commands.AbstractVisitor;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.ClusteringConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.interceptors.InvocationFinallyFunction;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.statetransfer.AllOwnersLostException;
import org.infinispan.statetransfer.OutdatedTopologyException;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A base class for a state transfer interceptor. It contains the base code to avoid duplicating in the two current
* different implementations.
* <p/>
* Also, it has some utilities methods with the most common logic.
*
* @author Pedro Ruivo
* @since 9.0
*/
public abstract class BaseStateTransferInterceptor extends DDAsyncInterceptor {
private final InvocationFinallyFunction<VisitableCommand> handleReadCommandReturn = this::handleReadCommandReturn;
@Inject Configuration configuration;
@Inject protected StateTransferLock stateTransferLock;
@Inject @ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR)
Executor nonBlockingExecutor;
@Inject DistributionManager distributionManager;
@Inject @ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR)
ScheduledExecutorService timeoutExecutor;
private long transactionDataTimeout;
@Start
public void start() {
transactionDataTimeout = configuration.clustering().remoteTimeout();
configuration.clustering().attributes().attribute(ClusteringConfiguration.REMOTE_TIMEOUT)
.addListener((a, ignored) -> {
transactionDataTimeout = a.get();
});
}
protected final void logRetry(int currentTopologyId, TopologyAffectedCommand cmd) {
if (getLog().isTraceEnabled())
getLog().tracef("Retrying command because of topology change, current topology is %d, command topology %d: %s",
currentTopologyId, cmd.getTopologyId(), cmd);
}
protected final int currentTopologyId() {
final CacheTopology cacheTopology = distributionManager.getCacheTopology();
return cacheTopology == null ? -1 : cacheTopology.getTopologyId();
}
protected final void updateTopologyId(TopologyAffectedCommand command) {
// set the topology id if it was not set before (ie. this is local command)
// TODO Make tx commands extend FlagAffectedCommand so we can use CACHE_MODE_LOCAL in TransactionTable.cleanupStaleTransactions
if (command.getTopologyId() == -1) {
CacheTopology cacheTopology = distributionManager.getCacheTopology();
// Before the topology is set in STM/StateConsumer the topology in DistributionManager is 0
int topologyId = cacheTopology == null ? 0 : cacheTopology.getTopologyId();
if (getLog().isTraceEnabled()) getLog().tracef("Setting command topology to %d", topologyId);
command.setTopologyId(topologyId);
}
}
protected <T extends VisitableCommand> Object retryWhenDone(CompletionStage<Void> stage, int topologyId,
InvocationContext ctx, T command,
InvocationFinallyFunction<T> callback) {
CompletableFuture<Void> future = stage.toCompletableFuture();
if (future.toCompletableFuture().isDone()) {
getLog().tracef("Retrying command %s for topology %d", command, topologyId);
return invokeNextAndHandle(ctx, command, callback);
} else {
CancellableRetry<T> cancellableRetry = new CancellableRetry<>(command, topologyId);
// We have to use handleAsync and rethrow the exception in the handler, rather than
// thenComposeAsync(), because if `future` completes with an exception we want to continue in remoteExecutor
CompletableFuture<Void> retryFuture = future.handleAsync(cancellableRetry, nonBlockingExecutor);
cancellableRetry.setRetryFuture(retryFuture);
// We want to time out the current command future, not the main topology-waiting future,
// but the command future can take longer time to finish.
ScheduledFuture<?> timeoutFuture = timeoutExecutor.schedule(cancellableRetry, transactionDataTimeout, TimeUnit.MILLISECONDS);
cancellableRetry.setTimeoutFuture(timeoutFuture);
return makeStage(asyncInvokeNext(ctx, command, retryFuture)).andHandle(ctx, command, callback);
}
}
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command) throws Throwable {
return handleReadCommand(ctx, command);
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command)
throws Throwable {
return handleReadCommand(ctx, command);
}
@Override
public Object visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable {
return handleReadCommand(ctx, command);
}
protected <C extends VisitableCommand & TopologyAffectedCommand & FlagAffectedCommand> Object handleReadCommand(
InvocationContext ctx, C command) {
updateTopologyId(command);
return invokeNextAndHandle(ctx, command, handleReadCommandReturn);
}
private Object handleExceptionOnReadCommandReturn(InvocationContext rCtx, VisitableCommand rCommand, Throwable t) throws Throwable {
Throwable ce = t;
while (ce instanceof RemoteException) {
ce = ce.getCause();
}
TopologyAffectedCommand cmd = (TopologyAffectedCommand) rCommand;
final CacheTopology cacheTopology = distributionManager.getCacheTopology();
int currentTopologyId = cacheTopology.getTopologyId();
int requestedTopologyId;
if (ce instanceof SuspectException) {
// Read commands must ignore CacheNotFoundResponses
throw new IllegalStateException("Read commands must ignore leavers");
} else if (ce instanceof OutdatedTopologyException) {
logRetry(currentTopologyId, cmd);
// We can get OTE for dist reads even if current topology information is sufficient:
// 1. A has topology in phase READ_ALL_WRITE_ALL, sends message to both old owner B and new C
// 2. C has old topology with READ_OLD_WRITE_ALL, so it responds with UnsureResponse
// 3. C updates topology to READ_ALL_WRITE_ALL, B updates to READ_NEW_WRITE_ALL
// 4. B receives the read, but it already can't read: responds with UnsureResponse
// 5. A receives two unsure responses and throws OTE
// However, now we are sure that we can immediately retry the request, because C must have updated its topology
OutdatedTopologyException ote = (OutdatedTopologyException) ce;
requestedTopologyId = cmd.getTopologyId() + ote.topologyIdDelta;
} else if (ce instanceof AllOwnersLostException) {
if (getLog().isTraceEnabled())
getLog().tracef("All owners for command %s have been lost.", cmd);
// During partition the exception is already handled in PartitionHandlingInterceptor,
// and if the handling is not enabled, we can't but return null.
requestedTopologyId = cmd.getTopologyId() + 1;
} else {
throw t;
}
// Only retry once if currentTopologyId > cmdTopologyId + 1
int retryTopologyId = Math.max(currentTopologyId, requestedTopologyId);
cmd.setTopologyId(retryTopologyId);
((FlagAffectedCommand) cmd).addFlags(FlagBitSets.COMMAND_RETRY);
if (retryTopologyId == currentTopologyId) {
return invokeNextAndHandle(rCtx, rCommand, handleReadCommandReturn);
} else {
return makeStage(asyncInvokeNext(rCtx, rCommand, stateTransferLock.transactionDataFuture(retryTopologyId)))
.andHandle(rCtx, rCommand, handleReadCommandReturn);
}
}
private Object handleReadCommandReturn(InvocationContext rCtx, VisitableCommand rCommand, Object rv, Throwable t)
throws Throwable {
if (t == null)
return rv;
// Separate method to allow for inlining of this method since exception should rarely occur
return handleExceptionOnReadCommandReturn(rCtx, rCommand, t);
}
protected int getNewTopologyId(Throwable ce, int currentTopologyId, TopologyAffectedCommand command) {
int requestedDelta;
if (ce instanceof OutdatedTopologyException) {
requestedDelta = ((OutdatedTopologyException) ce).topologyIdDelta;
} else {
// SuspectException
requestedDelta = 1;
}
return Math.max(currentTopologyId, command.getTopologyId() + requestedDelta);
}
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) throws Throwable {
return handleReadCommand(ctx, command);
}
@Override
public Object visitReadOnlyManyCommand(InvocationContext ctx, ReadOnlyManyCommand command) throws Throwable {
return handleReadCommand(ctx, command);
}
protected abstract Log getLog();
private static class CancellableRetry<T extends VisitableCommand> implements BiFunction<Void, Throwable, Void>, Runnable {
private static final AtomicReferenceFieldUpdater<CancellableRetry, Throwable> cancellableRetryUpdater
= AtomicReferenceFieldUpdater.newUpdater(CancellableRetry.class, Throwable.class, "cancelled");
private static final AtomicReferenceFieldUpdater<CancellableRetry, Object> timeoutFutureUpdater
= AtomicReferenceFieldUpdater.newUpdater(CancellableRetry.class, Object.class, "timeoutFuture");
private static final Log log = LogFactory.getLog(CancellableRetry.class);
private static final Throwable DUMMY = new Throwable("Command is retried"); // should not be ever thrown
private final T command;
private final int topologyId;
private volatile Throwable cancelled = null;
// retryFuture is not volatile because it is used only in the timeout handler = run()
// and that is scheduled after retryFuture is set
private CompletableFuture<Void> retryFuture;
// ScheduledFuture does not have any dummy implementations, so we'll use plain Object as the field
@SuppressWarnings("unused")
private volatile Object timeoutFuture;
CancellableRetry(T command, int topologyId) {
this.command = command;
this.topologyId = topologyId;
}
/**
* This is called when the topology future completes (successfully or exceptionally)
*/
@Override
public Void apply(Void nil, Throwable throwable) {
if (!timeoutFutureUpdater.compareAndSet(this, null, DUMMY)) {
((ScheduledFuture) timeoutFuture).cancel(false);
}
if (throwable != null) {
throw CompletableFutures.asCompletionException(throwable);
}
if (!cancellableRetryUpdater.compareAndSet(this, null, DUMMY)) {
log.tracef("Not retrying command %s as it has been cancelled.", command);
throw CompletableFutures.asCompletionException(cancelled);
}
log.tracef("Retrying command %s for topology %d", command, topologyId);
return null;
}
/**
* This is called when the timeout elapses.
*/
@Override
public void run() {
TimeoutException timeoutException = new TimeoutException("Timed out waiting for topology " + topologyId);
if (cancellableRetryUpdater.compareAndSet(this, null, timeoutException)) {
retryFuture.completeExceptionally(timeoutException);
}
}
void setRetryFuture(CompletableFuture<Void> retryFuture) {
this.retryFuture = retryFuture;
}
void setTimeoutFuture(ScheduledFuture<?> timeoutFuture) {
if (!timeoutFutureUpdater.compareAndSet(this, null, timeoutFuture)) {
timeoutFuture.cancel(false);
}
}
}
// We don't need to implement GetAllCommand or ReadManyCommand here because these don't throw AllOwnersLostException
protected static class LostDataVisitor extends AbstractVisitor {
public static final LostDataVisitor INSTANCE = new LostDataVisitor();
@Override
public Object visitGetKeyValueCommand(InvocationContext ctx, GetKeyValueCommand command) throws Throwable {
return null;
}
@Override
public Object visitGetCacheEntryCommand(InvocationContext ctx, GetCacheEntryCommand command) throws Throwable {
return null;
}
@Override
public Object visitReadOnlyKeyCommand(InvocationContext ctx, ReadOnlyKeyCommand command) throws Throwable {
return command.performOnLostData();
}
}
}
| 14,395
| 46.511551
| 135
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.