repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/AffinityTaggedKey.java
|
package org.infinispan.distribution.ch;
public interface AffinityTaggedKey {
/**
* This numeric id is used exclusively for storage affinity in Infinispan.
*
* @return the segment id to be used for storage, or -1 to fall back to normal owner selection.
*/
int getAffinitySegmentId();
}
| 310
| 22.923077
| 98
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/KeyPartitioner.java
|
package org.infinispan.distribution.ch;
import java.util.function.ToIntFunction;
import org.infinispan.commons.configuration.attributes.Matchable;
import org.infinispan.configuration.cache.HashConfiguration;
/**
* Map keys to segments.
*
* @author Dan Berindei
* @since 8.2
*/
public interface KeyPartitioner extends Matchable<KeyPartitioner>, ToIntFunction<Object> {
/**
* Initialization.
*
* <p>The partitioner can also use injection to access other cache-level or global components.
* This method will be called before any other injection methods.</p>
*
* <p>Does not need to be thread-safe (Infinispan safely publishes the instance after initialization).</p>
* @param configuration
*/
default void init(HashConfiguration configuration) {
// Do nothing
}
default void init(KeyPartitioner other) {
// Do nothing
}
@Override
default int applyAsInt(Object value) {
return getSegment(value);
}
/**
* Obtains the segment for a key.
*
* Must be thread-safe.
*/
int getSegment(Object key);
@Override
default boolean matches(KeyPartitioner other) {
if (this == other)
return true;
if (other == null || getClass() != other.getClass())
return false;
return true;
}
}
| 1,313
| 23.792453
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/ConsistentHash.java
|
package org.infinispan.distribution.ch;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.UnaryOperator;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.remoting.transport.Address;
/**
* A consistent hash algorithm implementation. Implementations would typically be constructed via a
* {@link ConsistentHashFactory}.
*
* A consistent hash assigns each key a list of owners; the number of owners is defined at creation time,
* but the consistent hash is free to return a smaller or a larger number of owners, depending on
* circumstances.
*
* The first element in the list of owners is the "primary owner". The other owners are called "backup owners".
* Some implementations guarantee that there will always be a primary owner, others do not.
*
* This interface gives access to some implementation details of the consistent hash.
*
* Our consistent hashes work by splitting the hash space (the set of possible hash codes) into
* fixed segments and then assigning those segments to nodes dynamically. The number of segments
* is defined at creation time, and the mapping of keys to segments never changes.
* The mapping of segments to nodes can change as the membership of the cache changes.
*
* Normally application code doesn't need to know about this implementation detail, but some
* applications may benefit from the knowledge that all the keys that map to one segment are
* always located on the same server.
*
* @see <a href="https://community.jboss.org/wiki/Non-BlockingStateTransferV2">Non-BlockingStateTransferV2</a>
*
* @author Manik Surtani
* @author Mircea.Markus@jboss.com
* @author Dan Berindei
* @author anistor@redhat.com
* @since 4.0
*/
public interface ConsistentHash {
/**
* @return The actual number of hash space segments. Note that it may not be the same as the number
* of segments passed in at creation time.
*/
int getNumSegments();
/**
* Should return the addresses of the nodes used to create this consistent hash.
*
* @return set of node addresses.
*/
List<Address> getMembers();
/**
* @return All the nodes that own a given hash space segment, first address is the primary owner. The returned list is unmodifiable.
*/
List<Address> locateOwnersForSegment(int segmentId);
/**
* @return The primary owner of a given hash space segment. This is equivalent to {@code locateOwnersForSegment(segmentId).get(0)} but is more efficient
*/
Address locatePrimaryOwnerForSegment(int segmentId);
/**
* Check if a segment is local to a given member.
*
* <p>Implementation note: normally key-based method are implemented based on segment-based methods.
* Here, however, we need a default implementation for the segment-based method for
* backwards-compatibility reasons.</p>
*
* @since 8.2
*/
default boolean isSegmentLocalToNode(Address nodeAddress, int segmentId) {
return locateOwnersForSegment(segmentId).contains(nodeAddress);
}
/**
* @return {@code true} if every member owns every segment. This allows callers to skip computing the
* segment of a key in some cases.
*/
default boolean isReplicated() {
// Returning true is only an optimization, so it's ok to return false by default.
return false;
}
/**
* Returns the segments owned by a cache member.
*
* @param owner the address of the member
* @return a non-null set of segment IDs, may or may not be unmodifiable, which shouldn't be modified by caller.
* The set is empty if {@code owner} is not a member of the consistent hash.
*/
Set<Integer> getSegmentsForOwner(Address owner);
/**
* Returns the segments that this cache member is the primary owner for.
* @param owner the address of the member
* @return a non-null set of segment IDs, may or may not be unmodifiable, which shouldn't be modified by caller.
* The set is empty if {@code owner} is not a member of the consistent hash.
*/
Set<Integer> getPrimarySegmentsForOwner(Address owner);
/**
* Returns a string containing all the segments and their associated addresses.
*/
String getRoutingTableAsString();
/**
* Writes this ConsistentHash to the specified scoped state. Before invoking this method, the ConsistentHash
* addresses will have to be replaced with their corresponding {@link org.infinispan.topology.PersistentUUID}s
*
* @param state the state to which this ConsistentHash will be written
*/
default void toScopedState(ScopedPersistentState state) {
throw new UnsupportedOperationException();
}
/**
* Returns a new ConsistentHash with the addresses remapped according to the provided {@link UnaryOperator}.
* If an address cannot me remapped (i.e. the remapper returns null) this method should return null.
*
* @param remapper the remapper which given an address replaces it with another one
* @return the remapped ConsistentHash or null if one of the remapped addresses is null
*/
default ConsistentHash remapAddresses(UnaryOperator<Address> remapper) {
throw new UnsupportedOperationException();
}
/**
* Same as {@link #remapAddresses(UnaryOperator)} but skip missing members.
*
* @param remapper: the remapper which given an address replaces it with another one
* @return the remapped ConsistentHash
*/
default ConsistentHash remapAddressRemoveMissing(UnaryOperator<Address> remapper) {
throw new UnsupportedOperationException();
}
/**
* The capacity factor of each member. Determines the relative capacity of each node compared to the others.
* If {@code null}, all the members are assumed to have a capacity factor of 1.
*/
default Map<Address, Float> getCapacityFactors() {
return null;
}
}
| 5,927
| 38.785235
| 155
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/ConsistentHashFactory.java
|
package org.infinispan.distribution.ch;
import java.util.List;
import java.util.Map;
import org.infinispan.commons.hash.Hash;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.remoting.transport.Address;
/**
* Factory for {@link ConsistentHash} instances.
*
* <p>We say a consistent hash {@code ch} is <em>balanced</em> iif {@code rebalance(ch).equals(ch)}.
*
* <p>The consistent hashes created by {@link #create(int, int, List, Map)} must be balanced,
* but the ones created by {@link #updateMembers(ConsistentHash, List, Map)} and
* {@link #union(ConsistentHash, ConsistentHash)} will likely be unbalanced.
*
* @see <a href="https://community.jboss.org/wiki/Non-BlockingStateTransferV2">Non-BlockingStateTransferV2</a>
*
* @author Dan Berindei
* @since 5.2
* @deprecated Since 11.0. Will be removed in 14.0, the segment allocation will no longer be customizable.
*/
@Deprecated
public interface ConsistentHashFactory<CH extends ConsistentHash> {
/**
* Create a new consistent hash instance.
*
* The consistent hash will be <em>balanced</em>.
*
* @param numOwners The ideal number of owners for each key. The created consistent hash
* can have more or less owners, but each key will have at least one owner.
* @param numSegments Number of hash-space segments. The implementation may round up the number
* of segments for performance, or may ignore the parameter altogether.
* @param members A list of addresses representing the new cache members.
* @param capacityFactors The capacity factor of each member. Determines the relative capacity of each node compared
* to the others. The implementation may ignore this parameter.
* If {@code null}, all the members are assumed to have a capacity factor of 1.
* @deprecated since 11.0. hashFunction is ignored, use {@link #create(int, int, List, Map)} instead.
*/
@Deprecated
default CH create(Hash hashFunction, int numOwners, int numSegments, List<Address> members,
Map<Address, Float> capacityFactors) {
return create(numOwners, numSegments, members, capacityFactors);
}
/**
* Create a new consistent hash instance.
*
* The consistent hash will be <em>balanced</em>.
*
* @param numOwners The ideal number of owners for each key. The created consistent hash
* can have more or less owners, but each key will have at least one owner.
* @param numSegments Number of hash-space segments. The implementation may round up the number
* of segments for performance, or may ignore the parameter altogether.
* @param members A list of addresses representing the new cache members.
* @param capacityFactors The capacity factor of each member. Determines the relative capacity of each node compared
* to the others. The implementation may ignore this parameter.
* If {@code null}, all the members are assumed to have a capacity factor of 1.
*/
CH create(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors);
/**
* Updates an existing consistent hash instance to remove owners that are not in the {@code newMembers} list.
*
* <p>If a segment has at least one owner in {@code newMembers}, this method will not add another owner.
* This guarantees that the new consistent hash can be used immediately, without transferring any state.
*
* <p>If a segment has no owners in {@code newMembers} and the {@link ConsistentHash} implementation
* (e.g. {@link org.infinispan.distribution.ch.impl.DefaultConsistentHash}) requires
* at least one owner for each segment, this method may add one or more owners for that segment.
* Since the data in that segment was lost, the new consistent hash can still be used without transferring state.
*
* @param baseCH An existing consistent hash instance, should not be {@code null}
* @param newMembers A list of addresses representing the new cache members.
* @param capacityFactors The capacity factor of each member. Determines the relative capacity of each node compared
* to the others. The implementation may ignore this parameter.
* If {@code null}, all the members are assumed to have a capacity factor of 1.
* @return A new {@link ConsistentHash} instance, or {@code baseCH} if the existing instance
* does not need any changes.
*/
CH updateMembers(CH baseCH, List<Address> newMembers, Map<Address, Float> capacityFactors);
/**
* Create a new consistent hash instance, based on an existing instance, but <em>balanced</em> according to
* the implementation's rules.
*
* @param baseCH An existing consistent hash instance, should not be {@code null}
* @return A new {@link ConsistentHash} instance, or {@code baseCH} if the existing instance
* does not need any changes.
*/
CH rebalance(CH baseCH);
/**
* Creates a union of two compatible ConsistentHashes (use the same hashing function and have the same configuration
* parameters).
*
* <p>The owners of a segment {@code s} in {@code union(ch1, ch2)} will include both the owners of {@code s}
* in {@code ch1} and the owners of {@code s} in {@code ch2}, so a cache can switch from using
* {@code union(ch1, ch2)} to using {@code ch2} without transferring any state.
*/
CH union(CH ch1, CH ch2);
/**
* Recreates a ConsistentHash from a previously stored persistent state. The returned ConsistentHash will not have
* proper addresses, but {@link org.infinispan.topology.PersistentUUID}s instead so they will need to be replaced
* @param state the state to restore
*/
default CH fromPersistentState(ScopedPersistentState state) {
throw new UnsupportedOperationException();
}
}
| 6,042
| 51.547826
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/DefaultConsistentHashFactory.java
|
package org.infinispan.distribution.ch.impl;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
/**
* Default implementation of {@link ConsistentHashFactory}.
*
* All methods except {@link #union(DefaultConsistentHash, DefaultConsistentHash)} return a consistent hash
* with floor(numOwners*numSegments/numNodes) <= segments per owner <= ceil(numOwners*numSegments/numNodes).
*
* @author Dan Berindei
* @author anistor@redhat.com
* @since 5.2
*/
public class DefaultConsistentHashFactory extends AbstractConsistentHashFactory<DefaultConsistentHash> {
@Override
public DefaultConsistentHash create(int numOwners, int numSegments,
List<Address> members, Map<Address, Float> capacityFactors) {
if (members.size() == 0)
throw new IllegalArgumentException("Can't construct a consistent hash without any members");
if (numOwners <= 0)
throw new IllegalArgumentException("The number of owners should be greater than 0");
checkCapacityFactors(members, capacityFactors);
// Use the CH rebalance algorithm to get an even spread
// Round robin doesn't work properly because a segment's owner must be unique,
Builder builder = new Builder(numOwners, numSegments, members, capacityFactors);
rebalanceBuilder(builder);
return builder.build();
}
@Override
public DefaultConsistentHash fromPersistentState(ScopedPersistentState state) {
String consistentHashClass = state.getProperty("consistentHash");
if (!DefaultConsistentHash.class.getName().equals(consistentHashClass))
throw CONTAINER.persistentConsistentHashMismatch(this.getClass().getName(), consistentHashClass);
return new DefaultConsistentHash(state);
}
/**
* Leavers are removed and segments without owners are assigned new owners. Joiners might get some of the un-owned
* segments but otherwise they are not taken into account (that should happen during a rebalance).
*
* @param baseCH An existing consistent hash instance, should not be {@code null}
* @param actualMembers A list of addresses representing the new cache members.
* @return
*/
@Override
public DefaultConsistentHash updateMembers(DefaultConsistentHash baseCH, List<Address> actualMembers,
Map<Address, Float> actualCapacityFactors) {
if (actualMembers.size() == 0)
throw new IllegalArgumentException("Can't construct a consistent hash without any members");
checkCapacityFactors(actualMembers, actualCapacityFactors);
boolean sameCapacityFactors = actualCapacityFactors == null ? baseCH.getCapacityFactors() == null :
actualCapacityFactors.equals(baseCH.getCapacityFactors());
if (actualMembers.equals(baseCH.getMembers()) && sameCapacityFactors)
return baseCH;
// The builder constructor automatically removes leavers
Builder builder = new Builder(baseCH, actualMembers, actualCapacityFactors);
// If there are segments with 0 owners, fix them
// Try to assign the same owners for those segments as a future rebalance call would.
Builder balancedBuilder = null;
for (int segment = 0; segment < baseCH.getNumSegments(); segment++) {
if (builder.getOwners(segment).isEmpty()) {
if (balancedBuilder == null) {
balancedBuilder = new Builder(builder);
rebalanceBuilder(balancedBuilder);
}
builder.addOwners(segment, balancedBuilder.getOwners(segment));
}
}
return builder.build();
}
@Override
public DefaultConsistentHash rebalance(DefaultConsistentHash baseCH) {
// This method assign new owners to the segments so that
// * num_owners(s) == numOwners, for each segment s
// * floor(numSegments/numNodes) <= num_segments_primary_owned(n) for each node n
// * num_segments_primary_owned(n) <= ceil(numSegments/numNodes) for each node n
// * floor(numSegments*numOwners/numNodes) <= num_segments_owned(n) for each node n
// * num_segments_owned(n) <= ceil(numSegments*numOwners/numNodes) for each node n
Builder builder = new Builder(baseCH);
rebalanceBuilder(builder);
DefaultConsistentHash balancedCH = builder.build();
// we should return the base CH if we didn't change anything
return balancedCH.equals(baseCH) ? baseCH : balancedCH;
}
/**
* Merges two consistent hash objects that have the same number of segments, numOwners and hash function.
* For each segment, the primary owner of the first CH has priority, the other primary owners become backups.
*/
@Override
public DefaultConsistentHash union(DefaultConsistentHash dch1, DefaultConsistentHash dch2) {
return dch1.union(dch2);
}
protected void rebalanceBuilder(Builder builder) {
addPrimaryOwners(builder);
addBackupOwners(builder);
}
protected void addPrimaryOwners(Builder builder) {
addFirstOwner(builder);
// 1. Try to replace primary owners with too many segments with the backups in those segments.
swapPrimaryOwnersWithBackups(builder);
// 2. For segments that don't have enough owners, try to add a new owner as the primary owner.
int actualNumOwners = builder.getActualNumOwners();
replacePrimaryOwners(builder, actualNumOwners);
// 3. If some primary owners still have too many segments, allow adding an extra owner as the primary owner.
// Since a segment only has 1 primary owner, this will be enough to give us a "proper" primary owner
// for each segment.
replacePrimaryOwners(builder, actualNumOwners + 1);
}
private void addFirstOwner(Builder builder) {
for (int segment = 0; segment < builder.getNumSegments(); segment++) {
if (builder.getOwners(segment).size() > 0)
continue;
Address newPrimary = findNewPrimaryOwner(builder, builder.getMembers(), null);
if (newPrimary != null) {
builder.addPrimaryOwner(segment, newPrimary);
}
}
}
protected void replacePrimaryOwners(Builder builder, int maxOwners) {
// Find the node with the worst primary-owned-segments-to-capacity ratio W.
// Iterate over all the segments primary-owned by W, and if possible replace it with another node.
// After replacing, check that W is still the worst node. If not, repeat with the new worst node.
// Keep track of the segments where we already replaced the primary owner, so we don't do it twice. ???
boolean primaryOwnerReplaced = true;
while (primaryOwnerReplaced) {
Address worstNode = findWorstPrimaryOwner(builder, builder.getMembers());
primaryOwnerReplaced = false;
for (int segment = builder.getNumSegments() - 1; segment >= 0; segment--) {
if (builder.getOwners(segment).size() >= maxOwners)
continue;
// Only replace if the worst node is the primary owner
if (!builder.getPrimaryOwner(segment).equals(worstNode))
continue;
Address newPrimary = findNewPrimaryOwner(builder, builder.getMembers(), worstNode);
if (newPrimary != null && !builder.getOwners(segment).contains(newPrimary)) {
builder.addPrimaryOwner(segment, newPrimary);
primaryOwnerReplaced = true;
worstNode = findWorstPrimaryOwner(builder, builder.getMembers());
}
}
}
}
protected void swapPrimaryOwnersWithBackups(Builder builder) {
// If a segment has primaryOwned(primaryOwner(segment)) > maxPrimarySegments,
// try to swap the primary owner with one of the backup owners.
// The new primary owner must primary-own < minPrimarySegments segments.
// Iterate in reverse order so the CH looks more stable in the logs as we add nodes
for (int segment = builder.getNumSegments() - 1; segment >= 0; segment--) {
if (builder.getOwners(segment).isEmpty())
continue;
Address primaryOwner = builder.getPrimaryOwner(segment);
Address newPrimary = findNewPrimaryOwner(builder, builder.getBackupOwners(segment), primaryOwner);
if (newPrimary != null) {
// actually replaces the primary owner
builder.replacePrimaryOwnerWithBackup(segment, newPrimary);
}
}
}
protected void addBackupOwners(Builder builder) {
// 1. Remove extra owners (could be leftovers from addPrimaryOwners).
removeExtraBackupOwners(builder);
// 2. If owners(segment) < numOwners, add new owners.
// We always add the member that is not an owner already and has the best owned-segments-to-capacity ratio.
doAddBackupOwners(builder);
// 3. Now owners(segment) == numOwners for every segment because of steps 1 and 2.
// For each owner, if there exists a non-owner with a better owned-segments-to-capacity-ratio, replace it.
replaceBackupOwners(builder);
}
protected void removeExtraBackupOwners(Builder builder) {
// Find the node with the worst segments-to-capacity ratio, and replace it in one of the owner lists
// Repeat with the next-worst node, and so on.
List<Address> untestedNodes = new ArrayList<Address>(builder.getMembers());
while (!untestedNodes.isEmpty()) {
boolean ownerRemoved = false;
Address worstNode = findWorstBackupOwner(builder, untestedNodes);
// Iterate in reverse order so the CH looks more stable in the logs as we add nodes
for (int segment = builder.getNumSegments() - 1; segment >= 0; segment--) {
List<Address> owners = builder.getOwners(segment);
if (owners.size() <= builder.getActualNumOwners())
continue;
int ownerIdx = owners.indexOf(worstNode);
// Don't remove the primary
if (ownerIdx > 0) {
builder.removeOwner(segment, worstNode);
ownerRemoved = true;
// The worst node might have changed.
untestedNodes = new ArrayList<Address>(builder.getMembers());
worstNode = findWorstBackupOwner(builder, untestedNodes);
}
}
if (!ownerRemoved) {
untestedNodes.remove(worstNode);
}
}
}
/**
* @return The worst backup owner, or {@code null} if the remaining nodes own 0 segments.
*/
private Address findWorstBackupOwner(Builder builder, List<Address> nodes) {
Address worst = null;
float maxSegmentsPerCapacity = -1;
for (Address owner : nodes) {
float capacityFactor = builder.getCapacityFactor(owner);
if (worst == null || builder.getOwned(owner) - 1 >= capacityFactor * maxSegmentsPerCapacity) {
worst = owner;
maxSegmentsPerCapacity = capacityFactor != 0 ? (builder.getOwned(owner) - 1) / capacityFactor : 0;
}
}
return worst;
}
protected void doAddBackupOwners(Builder builder) {
for (int segment = 0; segment < builder.getNumSegments(); segment++) {
List<Address> owners = builder.getOwners(segment);
while (owners.size() < builder.getActualNumOwners()) {
Address newOwner = findNewBackupOwner(builder, owners, null);
builder.addOwner(segment, newOwner);
}
}
}
protected void replaceBackupOwners(Builder builder) {
// Find the node with the worst segments-to-capacity ratio, and replace it in one of the owner lists.
// If it's not possible to replace any owner with the worst node, remove the worst from the untested nodes
// list and try with the new worst, repeating as necessary. After replacing one owner,
// go back to the original untested nodes list.
List<Address> untestedNodes = new ArrayList<Address>(builder.getMembers());
while (!untestedNodes.isEmpty()) {
Address worstNode = findWorstBackupOwner(builder, untestedNodes);
boolean backupOwnerReplaced = false;
// Iterate in reverse order so the CH looks more stable in the logs as we add nodes
for (int segment = builder.getNumSegments() - 1; segment >= 0; segment--) {
List<Address> owners = builder.getOwners(segment);
int ownerIdx = owners.indexOf(worstNode);
// Don't replace the primary
if (ownerIdx <= 0)
continue;
// Surely there is a better node to replace this owner with...
Address replacement = findNewBackupOwner(builder, owners, worstNode);
if (replacement != null) {
//log.tracef("Segment %3d: replacing owner %s with %s", segment, worstNode, replacement);
builder.removeOwner(segment, worstNode);
builder.addOwner(segment, replacement);
backupOwnerReplaced = true;
// The worst node might have changed.
untestedNodes = new ArrayList<Address>(builder.getMembers());
worstNode = findWorstBackupOwner(builder, untestedNodes);
}
}
if (!backupOwnerReplaced) {
untestedNodes.remove(worstNode);
}
}
}
/**
* @return The member with the worst owned segments/capacity ratio that is also not in the excludes list.
*/
protected Address findNewBackupOwner(Builder builder, Collection<Address> excludes, Address owner) {
// We want the owned/capacity ratio of the actual owner after removing the current segment to be bigger
// than the owned/capacity ratio of the new owner after adding the current segment, so that a future pass
// won't try to switch them back.
Address best = null;
float initialCapacityFactor = owner != null ? builder.getCapacityFactor(owner) : 0;
float bestSegmentsPerCapacity = initialCapacityFactor != 0 ? (builder.getOwned(owner) - 1 ) / initialCapacityFactor :
Float.MAX_VALUE;
for (Address candidate : builder.getMembers()) {
if (excludes == null || !excludes.contains(candidate)) {
int owned = builder.getOwned(candidate);
float capacityFactor = builder.getCapacityFactor(candidate);
if ((owned + 1) <= capacityFactor * bestSegmentsPerCapacity) {
best = candidate;
bestSegmentsPerCapacity = (owned + 1) / capacityFactor;
}
}
}
return best;
}
protected static class Builder extends AbstractConsistentHashFactory.Builder {
private final int initialNumOwners;
private final int actualNumOwners;
private final List<Address>[] segmentOwners;
public Builder(int numOwners, int numSegments, List<Address> members,
Map<Address, Float> capacityFactors) {
super(new OwnershipStatistics(members), members, capacityFactors);
this.initialNumOwners = numOwners;
this.actualNumOwners = computeActualNumOwners(numOwners, members, capacityFactors);
this.segmentOwners = new List[numSegments];
for (int segment = 0; segment < numSegments; segment++) {
segmentOwners[segment] = new ArrayList<Address>(actualNumOwners);
}
}
public Builder(DefaultConsistentHash baseCH, List<Address> actualMembers,
Map<Address, Float> actualCapacityFactors) {
super(new OwnershipStatistics(baseCH, actualMembers), actualMembers, actualCapacityFactors);
int numSegments = baseCH.getNumSegments();
Set<Address> actualMembersSet = new HashSet<Address>(actualMembers);
List[] owners = new List[numSegments];
for (int segment = 0; segment < numSegments; segment++) {
owners[segment] = new ArrayList<>(baseCH.locateOwnersForSegment(segment));
owners[segment].retainAll(actualMembersSet);
}
this.initialNumOwners = baseCH.getNumOwners();
this.actualNumOwners = computeActualNumOwners(initialNumOwners, actualMembers, actualCapacityFactors);
this.segmentOwners = owners;
}
public Builder(DefaultConsistentHash baseCH) {
this(baseCH, baseCH.getMembers(), baseCH.getCapacityFactors());
}
public Builder(Builder other) {
super(other);
int numSegments = other.getNumSegments();
List[] owners = new List[numSegments];
for (int segment = 0; segment < numSegments; segment++) {
owners[segment] = new ArrayList<Address>(other.segmentOwners[segment]);
}
this.initialNumOwners = other.initialNumOwners;
this.actualNumOwners = other.actualNumOwners;
this.segmentOwners = owners;
}
public int getActualNumOwners() {
return actualNumOwners;
}
public int getNumSegments() {
return segmentOwners.length;
}
public List<Address> getOwners(int segment) {
return segmentOwners[segment];
}
public Address getPrimaryOwner(int segment) {
return segmentOwners[segment].get(0);
}
public List<Address> getBackupOwners(int segment) {
return segmentOwners[segment].subList(1, segmentOwners[segment].size());
}
public boolean addOwner(int segment, Address owner) {
modCount++;
List<Address> thisSegmentOwners = segmentOwners[segment];
if (thisSegmentOwners.contains(owner))
return false;
thisSegmentOwners.add(owner);
stats.incOwned(owner);
if (thisSegmentOwners.size() == 1) {
// the first owner
stats.incPrimaryOwned(owner);
}
return true;
}
public boolean addOwners(int segment, Collection<Address> newOwners) {
boolean modified = false;
for (Address owner : newOwners) {
modified |= addOwner(segment, owner);
}
return modified;
}
public boolean removeOwner(int segment, Address owner) {
modCount++;
List<Address> segmentOwners = this.segmentOwners[segment];
if (segmentOwners.get(0).equals(owner)) {
stats.decPrimaryOwned(owner);
}
boolean modified = segmentOwners.remove(owner);
if (modified) {
stats.decOwned(owner);
}
return modified;
}
public void addPrimaryOwner(int segment, Address newPrimaryOwner) {
modCount++;
List<Address> segmentOwners = this.segmentOwners[segment];
int ownerIndex = segmentOwners.indexOf(newPrimaryOwner);
if (ownerIndex >= 0) {
throw new IllegalArgumentException("The new primary owner must not be a backup already");
}
if (!segmentOwners.isEmpty()) {
Address oldPrimaryOwner = segmentOwners.get(0);
stats.decPrimaryOwned(oldPrimaryOwner);
}
segmentOwners.add(0, newPrimaryOwner);
stats.incOwned(newPrimaryOwner);
stats.incPrimaryOwned(newPrimaryOwner);
}
public void replacePrimaryOwnerWithBackup(int segment, Address newPrimaryOwner) {
modCount++;
List<Address> segmentOwners = this.segmentOwners[segment];
int ownerIndex = segmentOwners.indexOf(newPrimaryOwner);
if (ownerIndex <= 0) {
throw new IllegalArgumentException("The new primary owner must already be a backup owner");
}
Address oldPrimaryOwner = segmentOwners.get(0);
stats.decPrimaryOwned(oldPrimaryOwner);
segmentOwners.remove(ownerIndex);
segmentOwners.add(0, newPrimaryOwner);
stats.incPrimaryOwned(newPrimaryOwner);
}
public DefaultConsistentHash build() {
return new DefaultConsistentHash(initialNumOwners, segmentOwners.length, members, capacityFactors,
segmentOwners);
}
public int getPrimaryOwned(Address node) {
return stats.getPrimaryOwned(node);
}
public int getOwned(Address node) {
return stats.getOwned(node);
}
public int computeActualNumOwners(int numOwners, List<Address> members, Map<Address, Float> capacityFactors) {
int nodesWithLoad = members.size();
if (capacityFactors != null) {
nodesWithLoad = 0;
for (Address node : members) {
if (capacityFactors.get(node) != 0) {
nodesWithLoad++;
}
}
}
return Math.min(numOwners, nodesWithLoad);
}
}
@Override
public boolean equals(Object other) {
return other != null && other.getClass() == getClass();
}
@Override
public int hashCode() {
return 3853;
}
public static class Externalizer extends AbstractExternalizer<DefaultConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, DefaultConsistentHashFactory chf) throws IOException {
}
@Override
@SuppressWarnings("unchecked")
public DefaultConsistentHashFactory readObject(ObjectInput unmarshaller) throws IOException, ClassNotFoundException {
return new DefaultConsistentHashFactory();
}
@Override
public Integer getId() {
return Ids.DEFAULT_CONSISTENT_HASH_FACTORY;
}
@Override
public Set<Class<? extends DefaultConsistentHashFactory>> getTypeClasses() {
return Collections.singleton(DefaultConsistentHashFactory.class);
}
}
}
| 22,175
| 40.684211
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/TopologyAwareSyncConsistentHashFactory.java
|
package org.infinispan.distribution.ch.impl;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.distribution.topologyaware.TopologyInfo;
import org.infinispan.distribution.topologyaware.TopologyLevel;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
/**
* A {@link org.infinispan.distribution.ch.ConsistentHashFactory} implementation that guarantees caches
* with the same members have the same consistent hash and also tries to distribute segments based on the
* topology information in {@link org.infinispan.configuration.global.TransportConfiguration}.
* <p/>
* It has a drawback compared to {@link org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory}:
* it can potentially move a lot more segments during a rebalance than strictly necessary.
* <p/>
* It is not recommended using the {@code TopologyAwareSyncConsistentHashFactory} with a very small number
* of segments. The distribution of segments to owners gets better with a higher number of segments, and is
* especially bad when {@code numSegments < numNodes}
*
* @author Dan Berindei
* @since 5.2
*/
public class TopologyAwareSyncConsistentHashFactory extends SyncConsistentHashFactory {
@Override
protected Builder createBuilder(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors) {
return new Builder(numOwners, numSegments, members, capacityFactors);
}
protected static class Builder extends SyncConsistentHashFactory.Builder {
final Map<Address, Float> capacityFactorsMap;
protected final TopologyInfo topologyInfo;
// Speed up the site/rack/machine checks by mapping each to an integer
// and comparing only integers in nodeCanOwnSegment()
final int numSites;
final int numRacks;
final int numMachines;
final int[] siteLookup;
final int[] rackLookup;
final int[] machineLookup;
final int[][] ownerSiteIndices;
final int[][] ownerRackIndices;
final int[][] ownerMachineIndices;
protected Builder(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors) {
super(numOwners, numSegments, members, capacityFactors);
capacityFactorsMap = capacityFactors;
topologyInfo = new TopologyInfo(numSegments, this.actualNumOwners, members, capacityFactors);
numSites = topologyInfo.getDistinctLocationsCount(TopologyLevel.SITE);
numRacks = topologyInfo.getDistinctLocationsCount(TopologyLevel.RACK);
numMachines = topologyInfo.getDistinctLocationsCount(TopologyLevel.MACHINE);
siteLookup = new int[numNodes];
rackLookup = new int[numNodes];
machineLookup = new int[numNodes];
for (int n = 0; n < numNodes; n++) {
Address address = sortedMembers.get(n);
siteLookup[n] = topologyInfo.getSiteIndex(address);
rackLookup[n] = topologyInfo.getRackIndex(address);
machineLookup[n] = topologyInfo.getMachineIndex(address);
}
ownerSiteIndices = new int[numSegments][];
ownerRackIndices = new int[numSegments][];
ownerMachineIndices = new int[numSegments][];
for (int s = 0; s < numSegments; s++) {
ownerSiteIndices[s] = new int[actualNumOwners];
ownerRackIndices[s] = new int[actualNumOwners];
ownerMachineIndices[s] = new int[actualNumOwners];
}
}
@Override
int[] computeExpectedSegments(int expectedOwners, float totalCapacity, int iteration) {
TopologyInfo topologyInfo = new TopologyInfo(numSegments, expectedOwners, sortedMembers, capacityFactorsMap);
int[] expectedSegments = new int[numNodes];
float averageSegments = (float) numSegments * expectedOwners / numNodes;
for (int n = 0; n < numNodes; n++) {
float idealOwnedSegments = topologyInfo.getExpectedOwnedSegments(sortedMembers.get(n));
expectedSegments[n] = fudgeExpectedSegments(idealOwnedSegments, averageSegments, iteration);
}
return expectedSegments;
}
@Override
boolean nodeCanOwnSegment(int segment, int ownerPosition, int nodeIndex) {
if (ownerPosition == 0) {
return true;
} else if (ownerPosition < numSites) {
// Must be different site
return !intArrayContains(ownerSiteIndices[segment], ownerPosition, siteLookup[nodeIndex]);
} else if (ownerPosition < numRacks) {
// Must be different rack
return !intArrayContains(ownerRackIndices[segment], ownerPosition, rackLookup[nodeIndex]);
} else if (ownerPosition < numMachines) {
// Must be different machine
return !intArrayContains(ownerMachineIndices[segment], ownerPosition, machineLookup[nodeIndex]);
} else {
// Must be different nodes
return !intArrayContains(ownerIndices[segment], ownerPosition, nodeIndex);
}
}
@Override
protected void assignOwner(int segment, int ownerPosition, int nodeIndex,
int[] nodeSegmentsWanted) {
super.assignOwner(segment, ownerPosition, nodeIndex, nodeSegmentsWanted);
ownerSiteIndices[segment][ownerPosition] = siteLookup[nodeIndex];
ownerRackIndices[segment][ownerPosition] = rackLookup[nodeIndex];
ownerMachineIndices[segment][ownerPosition] = machineLookup[nodeIndex];
}
}
public static class Externalizer extends AbstractExternalizer<TopologyAwareSyncConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, TopologyAwareSyncConsistentHashFactory chf) {
}
@Override
public TopologyAwareSyncConsistentHashFactory readObject(ObjectInput unmarshaller) {
return new TopologyAwareSyncConsistentHashFactory();
}
@Override
public Integer getId() {
return Ids.TOPOLOGY_AWARE_SYNC_CONSISTENT_HASH_FACTORY;
}
@Override
public Set<Class<? extends TopologyAwareSyncConsistentHashFactory>> getTypeClasses() {
return Collections.singleton(TopologyAwareSyncConsistentHashFactory.class);
}
}
}
| 6,460
| 43.868056
| 128
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/CRC16HashFunctionPartitioner.java
|
package org.infinispan.distribution.ch.impl;
import org.infinispan.commons.hash.CRC16;
import org.infinispan.commons.hash.Hash;
/**
* Implementation of {@link HashFunctionPartitioner} using {@link CRC16}.
*
* @since 15.0
* @see HashFunctionPartitioner
*/
public class CRC16HashFunctionPartitioner extends HashFunctionPartitioner {
@Override
protected Hash getHash() {
return CRC16.getInstance();
}
}
| 423
| 21.315789
| 75
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/KeyPartitionerDelegate.java
|
package org.infinispan.distribution.ch.impl;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.persistence.manager.PersistenceManager.StoreChangeListener;
import org.infinispan.persistence.manager.PersistenceStatus;
/**
*
* @since 13.0
*/
class KeyPartitionerDelegate implements KeyPartitioner, StoreChangeListener {
private final KeyPartitioner keyPartitioner;
private volatile boolean needSegments;
public KeyPartitionerDelegate(KeyPartitioner keyPartitioner, Configuration configuration) {
this.keyPartitioner = keyPartitioner;
this.needSegments = Configurations.needSegments(configuration);
}
@Override
public int getSegment(Object key) {
return needSegments ? keyPartitioner.getSegment(key) : 0;
}
@Override
public void storeChanged(PersistenceStatus persistenceStatus) {
synchronized (this) {
needSegments = needSegments || persistenceStatus.usingSegmentedStore();
}
}
}
| 1,096
| 30.342857
| 94
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/AbstractConsistentHashFactory.java
|
package org.infinispan.distribution.ch.impl;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.remoting.transport.Address;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
public abstract class AbstractConsistentHashFactory<CH extends ConsistentHash> implements ConsistentHashFactory<CH> {
protected void checkCapacityFactors(List<Address> members, Map<Address, Float> capacityFactors) {
if (capacityFactors != null) {
float totalCapacity = 0;
for (Address node : members) {
Float capacityFactor = capacityFactors.get(node);
if (capacityFactor == null || capacityFactor < 0)
throw new IllegalArgumentException("Invalid capacity factor for node " + node);
totalCapacity += capacityFactor;
}
if (totalCapacity == 0)
throw new IllegalArgumentException("There must be at least one node with a non-zero capacity factor");
}
}
/**
* @return The worst primary owner, or {@code null} if the remaining nodes own 0 segments.
*/
protected Address findWorstPrimaryOwner(Builder builder, List<Address> nodes) {
Address worst = null;
float maxSegmentsPerCapacity = -1;
for (Address owner : nodes) {
float capacityFactor = builder.getCapacityFactor(owner);
if (builder.getPrimaryOwned(owner) - 1 >= capacityFactor * maxSegmentsPerCapacity) {
worst = owner;
maxSegmentsPerCapacity = capacityFactor != 0 ? (builder.getPrimaryOwned(owner) - 1) / capacityFactor : 0;
}
}
return worst;
}
/**
* @return The candidate with the worst primary-owned segments/capacity ratio that is also not in the excludes list.
*/
protected Address findNewPrimaryOwner(Builder builder, Collection<Address> candidates,
Address primaryOwner) {
float initialCapacityFactor = primaryOwner != null ? builder.getCapacityFactor(primaryOwner) : 0;
// We want the owned/capacity ratio of the actual primary owner after removing the current segment to be bigger
// than the owned/capacity ratio of the new primary owner after adding the current segment, so that a future pass
// won't try to switch them back.
Address best = null;
float bestSegmentsPerCapacity = initialCapacityFactor != 0 ? (builder.getPrimaryOwned(primaryOwner) - 1) /
initialCapacityFactor : Float.MAX_VALUE;
for (Address candidate : candidates) {
int primaryOwned = builder.getPrimaryOwned(candidate);
float capacityFactor = builder.getCapacityFactor(candidate);
if ((primaryOwned + 1) <= capacityFactor * bestSegmentsPerCapacity) {
best = candidate;
bestSegmentsPerCapacity = (primaryOwned + 1) / capacityFactor;
}
}
return best;
}
static abstract class Builder {
protected final OwnershipStatistics stats;
protected final List<Address> members;
protected final Map<Address, Float> capacityFactors;
// For debugging
protected int modCount = 0;
public Builder(OwnershipStatistics stats, List<Address> members, Map<Address, Float> capacityFactors) {
this.stats = stats;
this.members = members;
this.capacityFactors = capacityFactors;
}
public Builder(Builder other) {
this.members = other.members;
this.capacityFactors = other.capacityFactors;
this.stats = new OwnershipStatistics(other.stats);
}
public List<Address> getMembers() {
return members;
}
public int getNumNodes() {
return getMembers().size();
}
public abstract int getPrimaryOwned(Address candidate);
public Map<Address, Float> getCapacityFactors() {
return capacityFactors;
}
public float getCapacityFactor(Address node) {
return capacityFactors != null ? capacityFactors.get(node) : 1;
}
}
}
| 4,163
| 37.915888
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/OwnershipStatistics.java
|
package org.infinispan.distribution.ch.impl;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.remoting.transport.Address;
/**
* This class holds statistics about a consistent hash. It counts how many segments are owned or primary-owned by each
* member.
*
* @author Dan Berindei
* @since 5.2
*/
public class OwnershipStatistics {
private final List<Address> nodes;
private final Map<Address, Integer> nodesMap;
private final int[] primaryOwned;
private final int[] owned;
private int sumPrimary;
private int sumOwned;
public OwnershipStatistics(List<Address> nodes) {
this.nodes = nodes;
this.nodesMap = new HashMap<>(nodes.size());
for (int i = 0; i < nodes.size(); i++) {
this.nodesMap.put(nodes.get(i), i);
}
if (this.nodesMap.size() != nodes.size()) {
throw new IllegalArgumentException("Nodes are not distinct: " + nodes);
}
this.primaryOwned = new int[nodes.size()];
this.owned = new int[nodes.size()];
}
public OwnershipStatistics(ConsistentHash ch, List<Address> activeNodes) {
this(activeNodes);
for (int i = 0; i < ch.getNumSegments(); i++) {
List<Address> owners = ch.locateOwnersForSegment(i);
for (int j = 0; j < owners.size(); j++) {
Address address = owners.get(j);
Integer nodeIndex = nodesMap.get(address);
if (nodeIndex != null) {
if (j == 0) {
primaryOwned[nodeIndex]++;
sumPrimary++;
}
owned[nodeIndex]++;
sumOwned++;
}
}
}
}
public OwnershipStatistics(ConsistentHash ch) {
this(ch, ch.getMembers());
}
public OwnershipStatistics(OwnershipStatistics other) {
this.nodes = other.nodes;
this.nodesMap = other.nodesMap;
this.primaryOwned = Arrays.copyOf(other.primaryOwned, other.primaryOwned.length);
this.owned = Arrays.copyOf(other.owned, other.owned.length);
this.sumPrimary = other.sumPrimary;
this.sumOwned = other.sumOwned;
}
public int getPrimaryOwned(Address a) {
Integer i = nodesMap.get(a);
if (i == null)
return 0;
return primaryOwned[i];
}
public int getOwned(Address a) {
Integer i = nodesMap.get(a);
if (i == null)
return 0;
return owned[i];
}
public void incPrimaryOwned(Address a) {
Integer i = nodesMap.get(a);
if (i == null)
throw new IllegalArgumentException("Trying to modify statistics for a node that doesn't exist: " + a);
primaryOwned[i]++;
sumPrimary++;
}
public void incOwned(Address a) {
Integer i = nodesMap.get(a);
if (i == null)
throw new IllegalArgumentException("Trying to modify statistics for a node that doesn't exist: " + a);
owned[i]++;
sumOwned++;
}
public void decPrimaryOwned(Address a) {
Integer i = nodesMap.get(a);
if (i == null)
throw new IllegalArgumentException("Trying to modify statistics for a node that doesn't exist: " + a);
primaryOwned[i]--;
sumPrimary--;
}
public void decOwned(Address a) {
Integer i = nodesMap.get(a);
if (i == null)
throw new IllegalArgumentException("Trying to modify statistics for a node that doesn't exist: " + a);
owned[i]--;
sumOwned--;
}
public int getPrimaryOwned(int nodeIndex) {
return primaryOwned[nodeIndex];
}
public int getOwned(int nodeIndex) {
return owned[nodeIndex];
}
public void incPrimaryOwned(int nodeIndex) {
primaryOwned[nodeIndex]++;
sumPrimary++;
}
public void incOwned(int nodeIndex) {
owned[nodeIndex]++;
sumOwned++;
}
public void incOwned(int nodeIndex, boolean primary) {
owned[nodeIndex]++;
sumOwned++;
if (primary) {
incPrimaryOwned(nodeIndex);
}
}
public void decPrimaryOwned(int nodeIndex) {
primaryOwned[nodeIndex]--;
sumPrimary--;
}
public void decOwned(int nodeIndex) {
owned[nodeIndex]--;
sumOwned--;
}
public int sumPrimaryOwned() {
return sumPrimary;
}
public int sumOwned() {
return sumOwned;
}
public String toString() {
StringBuilder sb = new StringBuilder("OwnershipStatistics{");
boolean isFirst = true;
for (Address node : nodes) {
if (!isFirst) {
sb.append(", ");
}
Integer index = nodesMap.get(node);
sb.append(node).append(": ")
.append(owned[index]).append('(')
.append(primaryOwned[index]).append("p)");
isFirst = false;
}
sb.append('}');
return sb.toString();
}
}
| 4,906
| 25.668478
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/SyncConsistentHashFactory.java
|
package org.infinispan.distribution.ch.impl;
import static java.lang.Math.min;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Set;
import org.infinispan.commons.hash.MurmurHash3;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.topology.PersistentUUID;
import org.jgroups.util.UUID;
/**
* {@link org.infinispan.distribution.ch.ConsistentHashFactory} implementation
* that guarantees that multiple caches with the same members will
* have the same consistent hash (unlike {@link DefaultConsistentHashFactory}).
*
* <p>It has a drawback compared to {@link DefaultConsistentHashFactory} though:
* it can potentially move a lot more segments during a rebalance than
* strictly necessary.
* E.g. {0:AB, 1:BA, 2:CD, 3:DA} could turn into {0:BC, 1:CA, 2:CB, 3:AB} when D leaves,
* even though {0:AB, 1:BA, 2:CB, 3:AC} would require fewer segment ownership changes.
*
* <p>It may also reorder the owners of a segments, e.g. AB -> BA
* (same as {@linkplain DefaultConsistentHashFactory}).
*
* @author Dan Berindei
* @since 5.2
*/
public class SyncConsistentHashFactory implements ConsistentHashFactory<DefaultConsistentHash> {
@Override
public DefaultConsistentHash create(int numOwners, int numSegments, List<Address> members,
Map<Address, Float> capacityFactors) {
checkCapacityFactors(members, capacityFactors);
Builder builder = createBuilder(numOwners, numSegments, members, capacityFactors);
builder.populateOwners();
return new DefaultConsistentHash(numOwners, numSegments, members, capacityFactors, builder.segmentOwners);
}
@Override
public DefaultConsistentHash fromPersistentState(ScopedPersistentState state) {
String consistentHashClass = state.getProperty("consistentHash");
if (!DefaultConsistentHash.class.getName().equals(consistentHashClass))
throw CONTAINER.persistentConsistentHashMismatch(this.getClass().getName(), consistentHashClass);
return new DefaultConsistentHash(state);
}
Builder createBuilder(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors) {
return new Builder(numOwners, numSegments, members, capacityFactors);
}
void checkCapacityFactors(List<Address> members, Map<Address, Float> capacityFactors) {
if (capacityFactors != null) {
float totalCapacity = 0;
for (Address node : members) {
Float capacityFactor = capacityFactors.get(node);
if (capacityFactor == null || capacityFactor < 0)
throw new IllegalArgumentException("Invalid capacity factor for node " + node + ": " + capacityFactor);
totalCapacity += capacityFactor;
}
if (totalCapacity == 0)
throw new IllegalArgumentException("There must be at least one node with a non-zero capacity factor");
}
}
@Override
public DefaultConsistentHash updateMembers(DefaultConsistentHash baseCH, List<Address> newMembers,
Map<Address, Float> actualCapacityFactors) {
checkCapacityFactors(newMembers, actualCapacityFactors);
// The ConsistentHashFactory contract says we should return the same instance if we're not making changes
boolean sameCapacityFactors = actualCapacityFactors == null ? baseCH.getCapacityFactors() == null :
actualCapacityFactors.equals(baseCH.getCapacityFactors());
if (newMembers.equals(baseCH.getMembers()) && sameCapacityFactors)
return baseCH;
int numSegments = baseCH.getNumSegments();
int numOwners = baseCH.getNumOwners();
// We assume leavers are far fewer than members, so it makes sense to check for leavers
HashSet<Address> leavers = new HashSet<>(baseCH.getMembers());
leavers.removeAll(newMembers);
// Create a new "balanced" CH in case we need to allocate new owners for segments with 0 owners
DefaultConsistentHash rebalancedCH = null;
// Remove leavers
List<Address>[] newSegmentOwners = new List[numSegments];
for (int s = 0; s < numSegments; s++) {
List<Address> owners = new ArrayList<>(baseCH.locateOwnersForSegment(s));
owners.removeAll(leavers);
if (!owners.isEmpty()) {
newSegmentOwners[s] = owners;
} else {
// this segment has 0 owners, fix it
if (rebalancedCH == null) {
rebalancedCH = create(numOwners, numSegments, newMembers, actualCapacityFactors);
}
newSegmentOwners[s] = rebalancedCH.locateOwnersForSegment(s);
}
}
return new DefaultConsistentHash(numOwners, numSegments, newMembers,
actualCapacityFactors, newSegmentOwners);
}
@Override
public DefaultConsistentHash rebalance(DefaultConsistentHash baseCH) {
DefaultConsistentHash rebalancedCH = create(baseCH.getNumOwners(), baseCH.getNumSegments(), baseCH.getMembers(),
baseCH.getCapacityFactors());
// the ConsistentHashFactory contract says we should return the same instance if we're not making changes
if (rebalancedCH.equals(baseCH))
return baseCH;
return rebalancedCH;
}
@Override
public DefaultConsistentHash union(DefaultConsistentHash ch1, DefaultConsistentHash ch2) {
return ch1.union(ch2);
}
@Override
public boolean equals(Object other) {
return other != null && other.getClass() == getClass();
}
@Override
public int hashCode() {
return -10007;
}
static class Builder {
static final int NO_NODE = -1;
// Input
final int numOwners;
final int numSegments;
// Output
final List<Address>[] segmentOwners;
final int[][] ownerIndices;
// Constant data
final List<Address> sortedMembers;
final int numNodes;
final float[] sortedCapacityFactors;
final float[] distanceFactors;
final float totalCapacity;
final int actualNumOwners;
final int numNodeHashes;
// Hashes use only 63 bits, or the interval 0..2^63-1
final long segmentSize;
final long[] segmentHashes;
final long[][] nodeHashes;
int nodeDistanceUpdates;
final OwnershipStatistics stats;
Builder(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors) {
this.numSegments = numSegments;
this.numOwners = numOwners;
this.sortedMembers = sortMembersByCapacity(members, capacityFactors);
this.sortedCapacityFactors = capacityFactorsToArray(sortedMembers, capacityFactors);
this.totalCapacity = computeTotalCapacity();
numNodes = sortedMembers.size();
actualNumOwners = min(numOwners, numNodes);
distanceFactors = capacityFactorsToDistanceFactors();
segmentOwners = new List[numSegments];
ownerIndices = new int[numSegments][];
for (int s = 0; s < numSegments; s++) {
segmentOwners[s] = new ArrayList<>(actualNumOwners);
ownerIndices[s] = new int[actualNumOwners];
}
segmentSize = Long.MAX_VALUE / numSegments;
segmentHashes = computeSegmentHashes(numSegments);
// If we ever make the number of segments dynamic, the number of hashes should be fixed.
// Otherwise the extra hashes would cause extra segment to move on segment number changes.
numNodeHashes = 32 - Integer.numberOfLeadingZeros(numSegments);
nodeHashes = computeNodeHashes();
stats = new OwnershipStatistics(sortedMembers);
}
private float[] capacityFactorsToDistanceFactors() {
// Nodes with capacity factor 0 have been removed
float minCapacity = sortedCapacityFactors[numNodes - 1];
float[] distanceFactors = new float[numNodes];
for (int n = 0; n < numNodes; n++) {
distanceFactors[n] = minCapacity / sortedCapacityFactors[n];
}
return distanceFactors;
}
private float[] capacityFactorsToArray(List<Address> sortedMembers, Map<Address, Float> capacityFactors) {
float[] capacityFactorsArray = new float[sortedMembers.size()];
for (int n = 0; n < sortedMembers.size(); n++) {
capacityFactorsArray[n] = capacityFactors != null ? capacityFactors.get(sortedMembers.get(n)) : 1f;
}
return capacityFactorsArray;
}
private List<Address> sortMembersByCapacity(List<Address> members, Map<Address, Float> capacityFactors) {
if (capacityFactors == null)
return members;
// Only add members with non-zero capacity
List<Address> sortedMembers = new ArrayList<>();
for (Address member : members) {
if (!capacityFactors.get(member).equals(0f)) {
sortedMembers.add(member);
}
}
// Sort in descending order
sortedMembers.sort((a1, a2) -> Float.compare(capacityFactors.get(a2), capacityFactors.get(a1)));
return sortedMembers;
}
int[] computeExpectedSegments(int expectedOwners, float totalCapacity, int iteration) {
int[] expected = new int[numNodes];
float remainingCapacity = totalCapacity;
int remainingCopies = expectedOwners * numSegments;
float averageSegments = (float) remainingCopies / numNodes;
for (int n = 0; n < numNodes; n++) {
float capacityFactor = sortedCapacityFactors[n];
if (capacityFactor == 0f) {
expected[n] = 0;
}
float idealOwnedSegments = remainingCopies * capacityFactor / remainingCapacity;
if (idealOwnedSegments > numSegments) {
remainingCapacity -= capacityFactor;
remainingCopies -= numSegments;
expected[n] = numSegments;
} else {
// All the nodes from now on will have less than numSegments segments,
// so we can stop updating remainingCapacity/remainingCopies
expected[n] = fudgeExpectedSegments(idealOwnedSegments, averageSegments, iteration);
}
}
return expected;
}
static int fudgeExpectedSegments(float idealOwnedSegments, float averageSegments, int iteration) {
// In the first rounds reduce the number of expected segments so every node has a chance
// In the later rounds increase the number of expected segments so every segment eventually finds an owner
// It's harder to allocate the last segments to the nodes with large capacity,
// so the step by which we reduce/increase is not linear with the number of ideal expected segments
// But assign at least one extra segment per node every 5 iterations, in case there are too few segments
float step = Math.max(Math.min(averageSegments * 0.05f, idealOwnedSegments * 0.15f), 1f);
return Math.max((int) (idealOwnedSegments + (iteration - 2.5f ) * step), 0);
}
private long[] computeSegmentHashes(int numSegments) {
assert segmentSize != 0;
long[] segmentHashes = new long[numSegments];
long currentSegmentHash = segmentSize >> 1;
for (int s = 0; s < numSegments; s++) {
segmentHashes[s] = currentSegmentHash;
currentSegmentHash += segmentSize;
}
return segmentHashes;
}
private long[][] computeNodeHashes() {
long[][] nodeHashes = new long[numNodes][];
for (int n = 0; n < numNodes; n++) {
nodeHashes[n] = new long[this.numNodeHashes];
for (int h = 0; h < this.numNodeHashes; h++) {
nodeHashes[n][h] = nodeHash(sortedMembers.get(n), h);
}
Arrays.sort(nodeHashes[n]);
}
return nodeHashes;
}
float computeTotalCapacity() {
if (sortedCapacityFactors == null)
return sortedMembers.size();
float totalCapacity = 0;
for (float sortedCapacityFactor : sortedCapacityFactors) {
totalCapacity += sortedCapacityFactor;
}
return totalCapacity;
}
long nodeHash(Address address, int virtualNode) {
// 64-bit hashes from 32-bit hashes have a non-negligible chance of collision,
// so we try to get all 128 bits from UUID addresses
long[] key = new long[2];
if (address instanceof JGroupsAddress) {
org.jgroups.Address jGroupsAddress = ((JGroupsAddress) address).getJGroupsAddress();
if (jGroupsAddress instanceof UUID) {
key[0] = ((UUID) jGroupsAddress).getLeastSignificantBits();
key[1] = ((UUID) jGroupsAddress).getMostSignificantBits();
} else {
key[0] = address.hashCode();
}
} else if (address instanceof PersistentUUID) {
key[0] = ((PersistentUUID) address).getLeastSignificantBits();
key[1] = ((PersistentUUID) address).getMostSignificantBits();
} else {
key[0] = address.hashCode();
}
return MurmurHash3.MurmurHash3_x64_64(key, virtualNode) & Long.MAX_VALUE;
}
/**
* @return distance between 2 points in the 0..2^63-1 range, max 2^62-1
*/
long distance(long a, long b) {
long distance = a < b ? b - a : a - b;
if ((distance & (1L << 62)) != 0) {
distance = -distance - Long.MIN_VALUE;
}
// For the -2^63..2^63-1 range, the code would be
// if (distance < 0) {
// distance = -distance;
// }
return distance;
}
void populateOwners() {
// List k contains each segment's kth closest available node
PriorityQueue<SegmentInfo>[] segmentQueues = new PriorityQueue[Math.max(1, actualNumOwners)];
for (int i = 0; i < segmentQueues.length; i++) {
segmentQueues[i] = new PriorityQueue<>(numSegments);
}
// Temporary priority queue for one segment's potential owners
PriorityQueue<SegmentInfo> temporaryQueue = new PriorityQueue<>(numNodes);
assignSegments(1, totalCapacity, 1, segmentQueues, temporaryQueue);
assert stats.sumPrimaryOwned() == numSegments;
// The minimum queue count we can use is actualNumOwners - 1
// A bigger queue count improves stability, i.e. a rebalance after a join/leave moves less segments around
// However, the queueCount==1 case is optimized, so actualNumOwners-1 has better performance for numOwners=2
assignSegments(actualNumOwners, totalCapacity, actualNumOwners - 1, segmentQueues, temporaryQueue);
assert stats.sumOwned() == actualNumOwners * numSegments;
}
private void assignSegments(int currentNumOwners, float totalCapacity, int queuesCount,
PriorityQueue<SegmentInfo>[] segmentQueues,
PriorityQueue<SegmentInfo> temporaryQueue) {
int totalCopies = currentNumOwners * numSegments;
// We try to assign the closest node as the first owner, then the 2nd closest node etc.
// But we also try to keep the number of owned segments per node close to the "ideal" number,
// so we start by allocating a smaller number of segments to each node and slowly allow more segments.
for (int loadIteration = 0; stats.sumOwned() < totalCopies; loadIteration++) {
int[] nodeSegmentsToAdd = computeExpectedSegments(currentNumOwners, totalCapacity, loadIteration);
int iterationCopies = 0;
for (int n = 0; n < numNodes; n++) {
iterationCopies += nodeSegmentsToAdd[n];
nodeSegmentsToAdd[n] -= stats.getOwned(n);
}
iterationCopies = Math.max(iterationCopies, totalCopies);
for (int distanceIteration = 0; distanceIteration < numNodes; distanceIteration++) {
if (stats.sumOwned() >= iterationCopies)
break;
populateQueues(currentNumOwners, nodeSegmentsToAdd, queuesCount, segmentQueues, temporaryQueue);
if (!assignQueuedOwners(currentNumOwners, nodeSegmentsToAdd, queuesCount, iterationCopies, segmentQueues))
break;
}
}
}
// Useful for debugging
private BitSet[] computeAvailableSegmentsPerNode(int currentNumOwners) {
BitSet[] nodeSegmentsAvailable = new BitSet[numNodes];
for (int s = 0; s < numSegments; s++) {
if (!segmentIsAvailable(s, currentNumOwners))
continue;
for (int n = 0; n < numNodes; n++) {
if (nodeCanOwnSegment(s, segmentOwners[s].size(), n)) {
if (nodeSegmentsAvailable[n] == null) {
nodeSegmentsAvailable[n] = new BitSet();
}
nodeSegmentsAvailable[n].set(s);
}
}
}
return nodeSegmentsAvailable;
}
private boolean assignQueuedOwners(int currentNumOwners, int[] nodeSegmentsToAdd, int queuesCount,
int iterationCopies, PriorityQueue<SegmentInfo>[] segmentQueues) {
boolean assigned = false;
for (int i = 0; i < queuesCount; i++) {
SegmentInfo si;
while ((si = segmentQueues[i].poll()) != null) {
int ownerPosition = segmentOwners[si.segment].size();
if (nodeSegmentsToAdd[si.nodeIndex] <= 0)
continue;
if (i == 0 ||
segmentIsAvailable(si.segment, currentNumOwners) &&
nodeCanOwnSegment(si.segment, ownerPosition, si.nodeIndex)) {
assignOwner(si.segment, ownerPosition, si.nodeIndex, nodeSegmentsToAdd);
assigned = true;
}
if (stats.sumOwned() >= iterationCopies) {
return assigned;
}
}
segmentQueues[i].clear();
}
return assigned;
}
private void populateQueues(int currentNumOwners, int[] nodeSegmentsToAdd, int queueCount,
PriorityQueue<SegmentInfo>[] segmentQueues,
PriorityQueue<SegmentInfo> temporaryQueue) {
// Bypass the temporary queue if the queue count is 1
SegmentInfo best = null;
for (int s = 0; s < numSegments; s++) {
if (!segmentIsAvailable(s, currentNumOwners))
continue;
for (int n = 0; n < numNodes; n++) {
if (nodeSegmentsToAdd[n] > 0 && nodeCanOwnSegment(s, segmentOwners[s].size(), n)) {
long scaledDistance = nodeSegmentDistance(n, segmentHashes[s]);
if (queueCount > 1) {
SegmentInfo si = new SegmentInfo(s, n, scaledDistance);
temporaryQueue.add(si);
} else {
if (best == null) {
best = new SegmentInfo(s, n, scaledDistance);
} else if (scaledDistance < best.distance) {
best.update(n, scaledDistance);
}
}
}
}
if (queueCount > 1) {
for (int i = 0; i < queueCount && !temporaryQueue.isEmpty(); i++) {
segmentQueues[i].add(temporaryQueue.remove());
}
temporaryQueue.clear();
} else {
if (best != null) {
segmentQueues[0].add(best);
}
best = null;
}
}
}
private boolean segmentIsAvailable(int segment, int currentNumOwners) {
return segmentOwners[segment].size() < currentNumOwners;
}
private long nodeSegmentDistance(int nodeIndex, long segmentHash) {
nodeDistanceUpdates++;
long[] currentNodeHashes = nodeHashes[nodeIndex];
int hashIndex = Arrays.binarySearch(currentNodeHashes, segmentHash);
long scaledDistance;
if (hashIndex > 0) {
// Found an exact match
scaledDistance = 0L;
} else {
// Flip to get the insertion point
hashIndex = -(hashIndex + 1);
long hashBefore = hashIndex > 0 ? currentNodeHashes[hashIndex - 1] : currentNodeHashes[numNodeHashes - 1];
long hashAfter = hashIndex < numNodeHashes ? currentNodeHashes[hashIndex] : currentNodeHashes[0];
long distance = min(distance(hashBefore, segmentHash), distance(hashAfter, segmentHash));
scaledDistance = (long) (distance * distanceFactors[nodeIndex]);
}
return scaledDistance;
}
protected void assignOwner(int segment, int ownerPosition, int nodeIndex, int[] nodeSegmentsWanted) {
assert nodeSegmentsWanted[nodeIndex] > 0;
// One less segment needed for the assigned node
--nodeSegmentsWanted[nodeIndex];
assert segmentOwners[segment].size() == ownerPosition;
segmentOwners[segment].add(sortedMembers.get(nodeIndex));
ownerIndices[segment][ownerPosition] = nodeIndex;
stats.incOwned(nodeIndex, ownerPosition == 0);
// System.out.printf("owners[%d][%d] = %s (%d)\n", segment, ownerPosition, sortedMembers.get(nodeIndex), nodeIndex);
}
boolean nodeCanOwnSegment(int segment, int ownerPosition, int nodeIndex) {
// Return false the node exists in the owners list
return !intArrayContains(ownerIndices[segment], ownerPosition, nodeIndex);
}
boolean intArrayContains(int[] array, int end, int value) {
for (int i = 0; i < end; i++) {
if (array[i] == value)
return true;
}
return false;
}
static class SegmentInfo implements Comparable<SegmentInfo> {
static final int NO_AVAILABLE_OWNERS = -2;
final int segment;
int nodeIndex;
long distance;
SegmentInfo(int segment) {
this.segment = segment;
reset();
}
public SegmentInfo(int segment, int nodeIndex, long distance) {
this.segment = segment;
this.nodeIndex = nodeIndex;
this.distance = distance;
}
void update(int closestNode, long minDistance) {
this.nodeIndex = closestNode;
this.distance = minDistance;
}
boolean isValid() {
return nodeIndex >= 0;
}
void reset() {
update(NO_NODE, Long.MAX_VALUE);
}
boolean hasNoAvailableOwners() {
return nodeIndex == NO_AVAILABLE_OWNERS;
}
void markNoPotentialOwners() {
update(NO_AVAILABLE_OWNERS, Long.MAX_VALUE);
}
@Override
public int compareTo(SegmentInfo o) {
// Sort ascending by distance
return Long.compare(distance, o.distance);
}
@Override
public String toString() {
if (nodeIndex >= 0) {
return String.format("SegmentInfo#%d{n=%d, distance=%016x}", segment, nodeIndex, distance);
}
return String.format("SegmentInfo#%d{%s}", segment, segmentDescription());
}
private String segmentDescription() {
switch (nodeIndex) {
case NO_NODE:
return "NO_NODE";
case NO_AVAILABLE_OWNERS:
return "NO_AVAILABLE_OWNERS";
default:
return String.valueOf(segment);
}
}
}
}
public static class Externalizer extends AbstractExternalizer<SyncConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, SyncConsistentHashFactory chf) {
}
@Override
public SyncConsistentHashFactory readObject(ObjectInput unmarshaller) {
return new SyncConsistentHashFactory();
}
@Override
public Integer getId() {
return Ids.SYNC_CONSISTENT_HASH_FACTORY;
}
@Override
public Set<Class<? extends SyncConsistentHashFactory>> getTypeClasses() {
return Collections.singleton(SyncConsistentHashFactory.class);
}
}
}
| 25,118
| 39.977162
| 124
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/ReplicatedConsistentHash.java
|
package org.infinispan.distribution.ch.impl;
import static org.infinispan.distribution.ch.impl.AbstractConsistentHash.STATE_CAPACITY_FACTOR;
import static org.infinispan.distribution.ch.impl.AbstractConsistentHash.STATE_CAPACITY_FACTORS;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.UnaryOperator;
import org.infinispan.commons.marshall.InstanceReusingAdvancedExternalizer;
import org.infinispan.commons.util.Immutables;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.PersistentUUID;
/**
* Special implementation of {@link org.infinispan.distribution.ch.ConsistentHash} for replicated caches.
* The hash-space has several segments owned by all members and the primary ownership of each segment is evenly
* spread between members.
*
* @author Dan Berindei
* @author anistor@redhat.com
* @since 5.2
*/
public class ReplicatedConsistentHash implements ConsistentHash {
private static final String STATE_PRIMARY_OWNERS = "primaryOwners.%d";
private static final String STATE_PRIMARY_OWNERS_COUNT = "primaryOwners";
private final int[] primaryOwners;
private final List<Address> members;
private final List<Address> membersWithState;
private final Set<Address> membersWithStateSet;
private final List<Address> membersWithoutState;
private final Map<Address, Float> capacityFactors;
private final Set<Integer> segments;
public ReplicatedConsistentHash(List<Address> members, int[] primaryOwners) {
this(members, null, Collections.emptyList(), primaryOwners);
}
public ReplicatedConsistentHash(List<Address> members, Map<Address, Float> capacityFactors, List<Address> membersWithoutState, int[] primaryOwners) {
this.members = Immutables.immutableListCopy(members);
this.membersWithoutState = Immutables.immutableListCopy(membersWithoutState);
this.membersWithState = computeMembersWithState(members, membersWithoutState);
this.membersWithStateSet = Immutables.immutableSetConvert(this.membersWithState);
this.primaryOwners = primaryOwners;
this.capacityFactors = Immutables.immutableMapCopy(capacityFactors);
this.segments = IntSets.immutableRangeSet(primaryOwners.length);
}
public ReplicatedConsistentHash union(ReplicatedConsistentHash ch2) {
if (this.getNumSegments() != ch2.getNumSegments())
throw new IllegalArgumentException("The consistent hash objects must have the same number of segments");
List<Address> unionMembers = new ArrayList<>(this.members);
for (Address member : ch2.getMembers()) {
if (!members.contains(member)) {
unionMembers.add(member);
}
}
List<Address> unionMembersWithoutState = new ArrayList<>(this.membersWithoutState);
for (Address member : ch2.membersWithoutState) {
if (!ch2.membersWithStateSet.contains(member) && !unionMembersWithoutState.contains(member)) {
unionMembersWithoutState.add(member);
}
}
int[] primaryOwners = new int[this.getNumSegments()];
for (int segmentId = 0; segmentId < primaryOwners.length; segmentId++) {
Address primaryOwner = this.locatePrimaryOwnerForSegment(segmentId);
int primaryOwnerIndex = unionMembers.indexOf(primaryOwner);
primaryOwners[segmentId] = primaryOwnerIndex;
}
Map<Address, Float> unionCapacityFactors;
if (capacityFactors == null && ch2.capacityFactors == null) {
unionCapacityFactors = null;
} else if (capacityFactors == null) {
unionCapacityFactors = new HashMap<>(ch2.capacityFactors);
for (Address address : members) {
unionCapacityFactors.put(address, 1.0f);
}
} else if (ch2.capacityFactors == null) {
unionCapacityFactors = new HashMap<>(capacityFactors);
for (Address address : ch2.members) {
unionCapacityFactors.put(address, 1.0f);
}
} else {
unionCapacityFactors = new HashMap<>(capacityFactors);
unionCapacityFactors.putAll(ch2.capacityFactors);
}
return new ReplicatedConsistentHash(unionMembers, unionCapacityFactors, unionMembersWithoutState, primaryOwners);
}
ReplicatedConsistentHash(ScopedPersistentState state) {
List<Address> members = parseMembers(state, ConsistentHashPersistenceConstants.STATE_MEMBERS,
ConsistentHashPersistenceConstants.STATE_MEMBER);
List<Address> membersWithoutState = parseMembers(state, ConsistentHashPersistenceConstants.STATE_MEMBERS_NO_ENTRIES,
ConsistentHashPersistenceConstants.STATE_MEMBER_NO_ENTRIES);
Map<Address, Float> capacityFactors = parseCapacityFactors(state, members);
int[] primaryOwners = parsePrimaryOwners(state);
this.members = Immutables.immutableListCopy(members);
this.membersWithoutState = Immutables.immutableListCopy(membersWithoutState);
this.membersWithState = computeMembersWithState(members, membersWithoutState);
this.membersWithStateSet = Immutables.immutableSetConvert(this.membersWithState);
this.primaryOwners = primaryOwners;
this.capacityFactors = Immutables.immutableMapCopy(capacityFactors);
this.segments = IntSets.immutableRangeSet(this.primaryOwners.length);
}
private static List<Address> parseMembers(ScopedPersistentState state, String numMembersPropertyName,
String memberPropertyFormat) {
String property = state.getProperty(numMembersPropertyName);
if (property == null) {
return Collections.emptyList();
}
int numMembers = Integer.parseInt(property);
List<Address> members = new ArrayList<>(numMembers);
for (int i = 0; i < numMembers; i++) {
PersistentUUID uuid = PersistentUUID.fromString(state.getProperty(String.format(memberPropertyFormat, i)));
members.add(uuid);
}
return members;
}
private static Map<Address, Float> parseCapacityFactors(ScopedPersistentState state,
List<Address> members) {
String numCapacityFactorsString = state.getProperty(STATE_CAPACITY_FACTORS);
if (numCapacityFactorsString == null) {
// Cache state version 11 did not have capacity factors
Map<Address, Float> map = new HashMap<>();
for (Address a : members) {
map.put(a, 1f);
}
return map;
}
int numCapacityFactors = Integer.parseInt(numCapacityFactorsString);
Map<Address, Float> capacityFactors = new HashMap<>(numCapacityFactors * 2);
for (int i = 0; i < numCapacityFactors; i++) {
float capacityFactor = Float.parseFloat(state.getProperty(String.format(STATE_CAPACITY_FACTOR, i)));
capacityFactors.put(members.get(i), capacityFactor);
}
return capacityFactors;
}
private static int[] parsePrimaryOwners(ScopedPersistentState state) {
int numPrimaryOwners = state.getIntProperty(STATE_PRIMARY_OWNERS_COUNT);
int[] primaryOwners = new int[numPrimaryOwners];
for (int i = 0; i < numPrimaryOwners; i++) {
primaryOwners[i] = state.getIntProperty(String.format(STATE_PRIMARY_OWNERS, i));
}
return primaryOwners;
}
@Override
public int getNumSegments() {
return primaryOwners.length;
}
public int getNumOwners() {
return membersWithState.size();
}
@Override
public List<Address> getMembers() {
return members;
}
@Override
public List<Address> locateOwnersForSegment(int segmentId) {
Address primaryOwner = locatePrimaryOwnerForSegment(segmentId);
List<Address> owners = new ArrayList<>(membersWithState.size());
owners.add(primaryOwner);
for (Address member : membersWithState) {
if (!member.equals(primaryOwner)) {
owners.add(member);
}
}
return owners;
}
@Override
public Address locatePrimaryOwnerForSegment(int segmentId) {
return members.get(primaryOwners[segmentId]);
}
@Override
public Set<Integer> getSegmentsForOwner(Address owner) {
if (owner == null) {
throw new IllegalArgumentException("owner cannot be null");
}
if (membersWithStateSet.contains(owner))
return segments;
return IntSets.immutableEmptySet();
}
@Override
public Set<Integer> getPrimarySegmentsForOwner(Address owner) {
int index = members.indexOf(owner);
if (index == -1) {
return IntSets.immutableEmptySet();
}
IntSet primarySegments = IntSets.mutableEmptySet(primaryOwners.length);
for (int i = 0; i < primaryOwners.length; ++i) {
if (primaryOwners[i] == index) {
primarySegments.set(i);
}
}
return primarySegments;
}
@Override
public String getRoutingTableAsString() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < primaryOwners.length; i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(i).append(": ").append(primaryOwners[i]);
}
if (!membersWithoutState.isEmpty()) {
sb.append("none:");
for (Address a : membersWithoutState) {
sb.append(' ').append(a);
}
}
return sb.toString();
}
@Override
public boolean isSegmentLocalToNode(Address nodeAddress, int segmentId) {
return membersWithStateSet.contains(nodeAddress);
}
@Override
public boolean isReplicated() {
return true;
}
public void toScopedState(ScopedPersistentState state) {
state.setProperty(ConsistentHashPersistenceConstants.STATE_CONSISTENT_HASH, this.getClass().getName());
state.setProperty(ConsistentHashPersistenceConstants.STATE_MEMBERS, Integer.toString(members.size()));
for (int i = 0; i < members.size(); i++) {
state.setProperty(String.format(ConsistentHashPersistenceConstants.STATE_MEMBER, i),
members.get(i).toString());
}
state.setProperty(ConsistentHashPersistenceConstants.STATE_MEMBERS_NO_ENTRIES, Integer.toString(membersWithoutState.size()));
for (int i = 0; i < membersWithoutState.size(); i++) {
state.setProperty(String.format(ConsistentHashPersistenceConstants.STATE_MEMBER_NO_ENTRIES, i),
membersWithoutState.get(i).toString());
}
state.setProperty(STATE_CAPACITY_FACTORS, Integer.toString(capacityFactors.size()));
for (int i = 0; i < members.size(); i++) {
state.setProperty(String.format(STATE_CAPACITY_FACTOR, i),
capacityFactors.get(members.get(i)).toString());
}
state.setProperty(STATE_PRIMARY_OWNERS_COUNT, Integer.toString(primaryOwners.length));
for (int i = 0; i < primaryOwners.length; i++) {
state.setProperty(String.format(STATE_PRIMARY_OWNERS, i), Integer.toString(primaryOwners[i]));
}
}
@Override
public ConsistentHash remapAddresses(UnaryOperator<Address> remapper) {
List<Address> remappedMembers = new ArrayList<>(members.size());
for (Address member : members) {
Address a = remapper.apply(member);
if (a == null) {
return null;
}
remappedMembers.add(a);
}
List<Address> remappedMembersWithoutState = new ArrayList<>(membersWithoutState.size());
for (Address member : membersWithoutState) {
Address a = remapper.apply(member);
if (a == null) {
return null;
}
remappedMembersWithoutState.add(a);
}
Map<Address, Float> remappedCapacityFactors = null;
if (capacityFactors != null) {
remappedCapacityFactors = new HashMap<>(members.size());
for (Address member : members) {
remappedCapacityFactors.put(remapper.apply(member), capacityFactors.get(member));
}
}
return new ReplicatedConsistentHash(remappedMembers, remappedCapacityFactors, remappedMembersWithoutState, primaryOwners);
}
@Override
public Map<Address, Float> getCapacityFactors() {
return capacityFactors;
}
private List<Address> computeMembersWithState(List<Address> members, List<Address> membersWithoutState) {
if (membersWithoutState.isEmpty()) {
return members;
} else {
List<Address> membersWithState = new ArrayList<>(members);
membersWithState.removeAll(membersWithoutState);
return Immutables.immutableListCopy(membersWithState);
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("ReplicatedConsistentHash{");
sb.append("ns = ").append(primaryOwners.length);
sb.append(", owners = (").append(members.size()).append(")[");
int[] primaryOwned = new int[members.size()];
for (int primaryOwner : primaryOwners) {
primaryOwned[primaryOwner]++;
}
boolean first = true;
for (int i = 0; i < members.size(); i++) {
Address a = members.get(i);
if (first) {
first = false;
} else {
sb.append(", ");
}
sb.append(a).append(": ").append(primaryOwned[i]);
sb.append("+");
if (membersWithStateSet.contains(a)) {
sb.append(getNumSegments() - primaryOwned[i]);
} else {
sb.append("0");
}
}
sb.append("]}");
return sb.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((members == null) ? 0 : members.hashCode());
result = prime * result + ((membersWithoutState == null) ? 0 : membersWithoutState.hashCode());
result = prime * result + Arrays.hashCode(primaryOwners);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ReplicatedConsistentHash other = (ReplicatedConsistentHash) obj;
if (members == null) {
if (other.members != null)
return false;
} else if (!members.equals(other.members))
return false;
if (membersWithoutState == null) {
if (other.membersWithoutState != null)
return false;
} else if (!membersWithoutState.equals(other.membersWithoutState))
return false;
if (!Arrays.equals(primaryOwners, other.primaryOwners))
return false;
return true;
}
public static class Externalizer extends InstanceReusingAdvancedExternalizer<ReplicatedConsistentHash> {
@Override
public void doWriteObject(ObjectOutput output, ReplicatedConsistentHash ch) throws IOException {
output.writeObject(ch.members);
output.writeObject(ch.capacityFactors);
output.writeObject(ch.membersWithoutState);
output.writeObject(ch.primaryOwners);
}
@Override
@SuppressWarnings("unchecked")
public ReplicatedConsistentHash doReadObject(ObjectInput unmarshaller) throws IOException,
ClassNotFoundException {
List<Address> members = (List<Address>) unmarshaller.readObject();
Map<Address, Float> capacityFactors = (Map<Address, Float>) unmarshaller.readObject();
List<Address> membersWithoutState = (List<Address>) unmarshaller.readObject();
int[] primaryOwners = (int[]) unmarshaller.readObject();
return new ReplicatedConsistentHash(members, capacityFactors, membersWithoutState, primaryOwners);
}
@Override
public Integer getId() {
return Ids.REPLICATED_CONSISTENT_HASH;
}
@Override
public Set<Class<? extends ReplicatedConsistentHash>> getTypeClasses() {
return Collections.singleton(ReplicatedConsistentHash.class);
}
}
}
| 16,582
| 38.296209
| 152
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/ReplicatedConsistentHashFactory.java
|
package org.infinispan.distribution.ch.impl;
import static org.infinispan.distribution.ch.impl.SyncReplicatedConsistentHashFactory.computeMembersWithoutState;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayDeque;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
/**
* Factory for ReplicatedConsistentHash.
*
* @author Dan Berindei
* @author anistor@redhat.com
* @since 5.2
*/
public class ReplicatedConsistentHashFactory implements ConsistentHashFactory<ReplicatedConsistentHash> {
@Override
public ReplicatedConsistentHash create(int numOwners, int numSegments, List<Address> members,
Map<Address, Float> capacityFactors) {
List<Address> membersWithoutState = computeMembersWithoutState(members, null, capacityFactors);
int[] primaryOwners = new int[numSegments];
int nextPrimaryOwner = 0;
for (int i = 0; i < numSegments; i++) {
// computeMembersWithoutState ensures that there is at least one member *with* state
while (membersWithoutState.contains(members.get(nextPrimaryOwner))) {
nextPrimaryOwner++;
if (nextPrimaryOwner == members.size()) {
nextPrimaryOwner = 0;
}
}
primaryOwners[i] = nextPrimaryOwner;
}
return new ReplicatedConsistentHash(members, capacityFactors, membersWithoutState, primaryOwners);
}
@Override
public ReplicatedConsistentHash fromPersistentState(ScopedPersistentState state) {
String consistentHashClass = state.getProperty("consistentHash");
if (!ReplicatedConsistentHash.class.getName().equals(consistentHashClass))
throw CONTAINER.persistentConsistentHashMismatch(this.getClass().getName(), consistentHashClass);
return new ReplicatedConsistentHash(state);
}
@Override
public ReplicatedConsistentHash updateMembers(ReplicatedConsistentHash baseCH, List<Address> newMembers,
Map<Address, Float> actualCapacityFactors) {
if (newMembers.equals(baseCH.getMembers()))
return baseCH;
return updateCH(baseCH, newMembers, actualCapacityFactors, false);
}
private ReplicatedConsistentHash updateCH(ReplicatedConsistentHash baseCH, List<Address> newMembers,
Map<Address, Float> actualCapacityFactors, boolean rebalance) {
// New members missing from the old CH or with capacity factor 0 should not become primary or backup owners
List<Address> membersWithoutState = computeMembersWithoutState(newMembers, baseCH.getMembers(),
actualCapacityFactors);
// recompute primary ownership based on the new list of members (removes leavers)
int numSegments = baseCH.getNumSegments();
int[] primaryOwners = new int[numSegments];
int[] nodeUsage = new int[newMembers.size()];
boolean foundOrphanSegments = false;
for (int segmentId = 0; segmentId < numSegments; segmentId++) {
Address primaryOwner = baseCH.locatePrimaryOwnerForSegment(segmentId);
int primaryOwnerIndex = newMembers.indexOf(primaryOwner);
primaryOwners[segmentId] = primaryOwnerIndex;
if (primaryOwnerIndex == -1) {
foundOrphanSegments = true;
} else {
nodeUsage[primaryOwnerIndex]++;
}
}
if (!foundOrphanSegments && !rebalance) {
// The primary owners don't need to change
return new ReplicatedConsistentHash(newMembers, actualCapacityFactors, membersWithoutState, primaryOwners);
}
// Exclude members without state by setting their usage to a very high value
for (int i = 0; i < newMembers.size(); i++) {
Address a = newMembers.get(i);
if (membersWithoutState.contains(a)) {
nodeUsage[i] = Integer.MAX_VALUE;
}
}
// ensure leavers are replaced with existing members so no segments are orphan
if (foundOrphanSegments) {
for (int i = 0; i < numSegments; i++) {
if (primaryOwners[i] == -1) {
int leastUsed = findLeastUsedNode(nodeUsage);
primaryOwners[i] = leastUsed;
nodeUsage[leastUsed]++;
}
}
}
// ensure even spread of ownership
int minSegmentsPerNode = numSegments / newMembers.size();
Queue<Integer>[] segmentsByNode = new Queue[newMembers.size()];
for (int segmentId = 0; segmentId < primaryOwners.length; ++segmentId) {
int owner = primaryOwners[segmentId];
Queue<Integer> segments = segmentsByNode[owner];
if (segments == null) {
segmentsByNode[owner] = segments = new ArrayDeque<>(minSegmentsPerNode);
}
segments.add(segmentId);
}
int mostUsedNode = 0;
for (int node = 0; node < nodeUsage.length; node++) {
while (nodeUsage[node] < minSegmentsPerNode) {
// we can take segment from any node that has > minSegmentsPerNode + 1, not only the most used
if (nodeUsage[mostUsedNode] <= minSegmentsPerNode + 1) {
mostUsedNode = findMostUsedNode(nodeUsage);
}
int segmentId = segmentsByNode[mostUsedNode].poll();
// we don't have to add the segmentId to the new owner's queue
primaryOwners[segmentId] = node;
nodeUsage[mostUsedNode]--;
nodeUsage[node]++;
}
}
return new ReplicatedConsistentHash(newMembers, actualCapacityFactors, membersWithoutState, primaryOwners);
}
private int findLeastUsedNode(int[] nodeUsage) {
int res = 0;
for (int node = 1; node < nodeUsage.length; node++) {
if (nodeUsage[node] < nodeUsage[res]) {
res = node;
}
}
return res;
}
private int findMostUsedNode(int[] nodeUsage) {
int res = 0;
for (int node = 1; node < nodeUsage.length; node++) {
if (nodeUsage[node] > nodeUsage[res]) {
res = node;
}
}
return res;
}
@Override
public ReplicatedConsistentHash rebalance(ReplicatedConsistentHash baseCH) {
return updateCH(baseCH, baseCH.getMembers(), baseCH.getCapacityFactors(), true);
}
@Override
public ReplicatedConsistentHash union(ReplicatedConsistentHash ch1, ReplicatedConsistentHash ch2) {
return ch1.union(ch2);
}
@Override
public boolean equals(Object other) {
return other != null && other.getClass() == getClass();
}
@Override
public int hashCode() {
return -6053;
}
public static class Externalizer extends AbstractExternalizer<ReplicatedConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, ReplicatedConsistentHashFactory chf) {
}
@Override
public ReplicatedConsistentHashFactory readObject(ObjectInput unmarshaller) {
return new ReplicatedConsistentHashFactory();
}
@Override
public Integer getId() {
return Ids.REPLICATED_CONSISTENT_HASH_FACTORY;
}
@Override
public Set<Class<? extends ReplicatedConsistentHashFactory>> getTypeClasses() {
return Util.asSet(ReplicatedConsistentHashFactory.class);
}
}
}
| 7,780
| 37.330049
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/DefaultConsistentHash.java
|
package org.infinispan.distribution.ch.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.UnaryOperator;
import org.infinispan.commons.marshall.InstanceReusingAdvancedExternalizer;
import org.infinispan.commons.util.Immutables;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.PersistentUUID;
import net.jcip.annotations.Immutable;
/**
* Default {@link ConsistentHash} implementation. This object is immutable.
*
* Every segment must have a primary owner.
*
* @author Dan Berindei
* @author anistor@redhat.com
* @since 5.2
*/
@Immutable
public class DefaultConsistentHash extends AbstractConsistentHash {
// State constants
private static final String STATE_NUM_OWNERS = "numOwners";
private static final String STATE_SEGMENT_OWNER = "segmentOwner.%d.%d";
private static final String STATE_SEGMENT_OWNERS = "segmentOwners";
private static final String STATE_SEGMENT_OWNER_COUNT = "segmentOwner.%d.num";
private final int numOwners;
/**
* The routing table.
*/
private final List<Address>[] segmentOwners;
public DefaultConsistentHash(int numOwners, int numSegments, List<Address> members,
Map<Address, Float> capacityFactors, List<Address>[] segmentOwners) {
super(numSegments, members, capacityFactors);
if (numOwners < 1)
throw new IllegalArgumentException("The number of owners must be strictly positive");
this.numOwners = numOwners;
this.segmentOwners = new List[numSegments];
for (int s = 0; s < numSegments; s++) {
if (segmentOwners[s] == null || segmentOwners[s].isEmpty()) {
throw new IllegalArgumentException("Segment owner list cannot be null or empty");
}
this.segmentOwners[s] = Immutables.immutableListCopy(segmentOwners[s]);
}
}
// Only used by the externalizer, so we can skip copying collections
private DefaultConsistentHash(int numOwners, int numSegments, List<Address> members,
float[] capacityFactors, List<Address>[] segmentOwners) {
super(numSegments, members, capacityFactors);
if (numOwners < 1)
throw new IllegalArgumentException("The number of owners must be strictly positive");
this.numOwners = numOwners;
for (int i = 0; i < numSegments; i++) {
if (segmentOwners[i] == null || segmentOwners[i].size() == 0) {
throw new IllegalArgumentException("Segment owner list cannot be null or empty");
}
}
this.segmentOwners = segmentOwners;
}
DefaultConsistentHash(ScopedPersistentState state) {
super(state);
this.numOwners = Integer.parseInt(state.getProperty(STATE_NUM_OWNERS));
int numSegments = parseNumSegments(state);
this.segmentOwners = new List[numSegments];
for (int i = 0; i < segmentOwners.length; i++) {
int segmentOwnerCount = Integer.parseInt(state.getProperty(String.format(STATE_SEGMENT_OWNER_COUNT, i)));
segmentOwners[i] = new ArrayList<>();
for (int j = 0; j < segmentOwnerCount; j++) {
PersistentUUID uuid = PersistentUUID.fromString(state.getProperty(String.format(STATE_SEGMENT_OWNER, i, j)));
segmentOwners[i].add(uuid);
}
}
}
@Override
public int getNumSegments() {
return segmentOwners.length;
}
@Override
public Set<Integer> getSegmentsForOwner(Address owner) {
if (owner == null) {
throw new IllegalArgumentException("owner cannot be null");
}
if (!members.contains(owner)) {
return IntSets.immutableEmptySet();
}
IntSet segments = IntSets.mutableEmptySet(segmentOwners.length);
for (int segment = 0; segment < segmentOwners.length; segment++) {
if (segmentOwners[segment].contains(owner)) {
segments.set(segment);
}
}
return segments;
}
@Override
public Set<Integer> getPrimarySegmentsForOwner(Address owner) {
if (owner == null) {
throw new IllegalArgumentException("owner cannot be null");
}
if (!members.contains(owner)) {
return IntSets.immutableEmptySet();
}
IntSet segments = IntSets.mutableEmptySet(segmentOwners.length);
for (int segment = 0; segment < segmentOwners.length; segment++) {
if (owner.equals(segmentOwners[segment].get(0))) {
segments.set(segment);
}
}
return segments;
}
@Override
public List<Address> locateOwnersForSegment(int segmentId) {
return segmentOwners[segmentId];
}
@Override
public Address locatePrimaryOwnerForSegment(int segmentId) {
return segmentOwners[segmentId].get(0);
}
public int getNumOwners() {
return numOwners;
}
@Override
public boolean isSegmentLocalToNode(Address nodeAddress, int segmentId) {
return segmentOwners[segmentId].contains(nodeAddress);
}
@Override
public int hashCode() {
int result = numOwners;
result = 31 * result + members.hashCode();
result = 31 * result + Arrays.hashCode(segmentOwners);
return result;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DefaultConsistentHash that = (DefaultConsistentHash) o;
if (numOwners != that.numOwners) return false;
if (segmentOwners.length != that.segmentOwners.length) return false;
if (!members.equals(that.members)) return false;
for (int i = 0; i < segmentOwners.length; i++) {
if (!segmentOwners[i].equals(that.segmentOwners[i]))
return false;
}
return true;
}
@Override
public String toString() {
OwnershipStatistics stats = new OwnershipStatistics(this, members);
StringBuilder sb = new StringBuilder("DefaultConsistentHash{");
sb.append("ns=").append(segmentOwners.length);
sb.append(", owners = (").append(members.size()).append(")[");
boolean first = true;
for (Address a : members) {
if (first) {
first = false;
} else {
sb.append(", ");
}
int primaryOwned = stats.getPrimaryOwned(a);
int owned = stats.getOwned(a);
sb.append(a).append(": ").append(primaryOwned).append('+').append(owned - primaryOwned);
}
sb.append("]}");
return sb.toString();
}
@Override
public String getRoutingTableAsString() {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < segmentOwners.length; i++) {
if (i > 0) {
sb.append(", ");
}
sb.append(i).append(':');
for (int j = 0; j < segmentOwners[i].size(); j++) {
sb.append(' ').append(members.indexOf(segmentOwners[i].get(j)));
}
}
return sb.toString();
}
/**
* Merges two consistent hash objects that have the same number of segments, numOwners and hash function.
* For each segment, the primary owner of the first CH has priority, the other primary owners become backups.
*/
public DefaultConsistentHash union(DefaultConsistentHash dch2) {
checkSameHashAndSegments(dch2);
if (numOwners != dch2.getNumOwners()) {
throw new IllegalArgumentException("The consistent hash objects must have the same number of owners");
}
List<Address> unionMembers = new ArrayList<>(this.members);
mergeLists(unionMembers, dch2.getMembers());
List<Address>[] unionSegmentOwners = new List[segmentOwners.length];
for (int i = 0; i < segmentOwners.length; i++) {
unionSegmentOwners[i] = new ArrayList<>(locateOwnersForSegment(i));
mergeLists(unionSegmentOwners[i], dch2.locateOwnersForSegment(i));
}
Map<Address, Float> unionCapacityFactors = unionCapacityFactors(dch2);
return new DefaultConsistentHash(numOwners, unionSegmentOwners.length, unionMembers, unionCapacityFactors, unionSegmentOwners);
}
public String prettyPrintOwnership() {
StringBuilder sb = new StringBuilder();
for (Address member : getMembers()) {
sb.append("\n").append(member).append(":");
for (int segment = 0; segment < segmentOwners.length; segment++) {
int index = segmentOwners[segment].indexOf(member);
if (index >= 0) {
sb.append(' ').append(segment);
if (index == 0) {
sb.append('\'');
}
}
}
}
return sb.toString();
}
@Override
public void toScopedState(ScopedPersistentState state) {
super.toScopedState(state);
state.setProperty(STATE_NUM_OWNERS, numOwners);
state.setProperty(STATE_SEGMENT_OWNERS, segmentOwners.length);
for (int i = 0; i < segmentOwners.length; i++) {
List<Address> segmentOwnerAddresses = segmentOwners[i];
state.setProperty(String.format(STATE_SEGMENT_OWNER_COUNT, i), segmentOwnerAddresses.size());
for(int j = 0; j < segmentOwnerAddresses.size(); j++) {
state.setProperty(String.format(STATE_SEGMENT_OWNER, i, j),
segmentOwnerAddresses.get(j).toString());
}
}
}
@Override
public ConsistentHash remapAddresses(UnaryOperator<Address> remapper) {
List<Address> remappedMembers = remapMembers(remapper, false);
if (remappedMembers == null) return null;
// At this point, all members are present in the remapper.
Map<Address, Float> remappedCapacityFactors = remapCapacityFactors(remapper, false);
List<Address>[] remappedSegmentOwners = remapSegmentOwners(remapper, false);
return new DefaultConsistentHash(this.numOwners, this.segmentOwners.length, remappedMembers,
remappedCapacityFactors, remappedSegmentOwners);
}
@Override
public ConsistentHash remapAddressRemoveMissing(UnaryOperator<Address> remapper) {
List<Address> remappedMembers = remapMembers(remapper, true);
if (remappedMembers == null) return null;
Map<Address, Float> remappedCapacityFactors = remapCapacityFactors(remapper, true);
List<Address>[] remappedSegmentOwners = remapSegmentOwners(remapper, true);
return new DefaultConsistentHash(this.numOwners, this.segmentOwners.length, remappedMembers,
remappedCapacityFactors, remappedSegmentOwners);
}
private List<Address>[] remapSegmentOwners(UnaryOperator<Address> remapper, boolean allowMissing) {
List<Address>[] remappedSegmentOwners = new List[segmentOwners.length];
for(int i=0; i < segmentOwners.length; i++) {
List<Address> remappedOwners = new ArrayList<>(segmentOwners[i].size());
for (Address address : segmentOwners[i]) {
Address a = remapper.apply(address);
if (a == null) {
if (allowMissing) continue;
return null;
}
remappedOwners.add(a);
}
remappedSegmentOwners[i] = remappedOwners;
}
return remappedSegmentOwners;
}
public static class Externalizer extends InstanceReusingAdvancedExternalizer<DefaultConsistentHash> {
@Override
public void doWriteObject(ObjectOutput output, DefaultConsistentHash ch) throws IOException {
output.writeInt(ch.segmentOwners.length);
output.writeInt(ch.numOwners);
output.writeObject(ch.members);
output.writeObject(ch.capacityFactors);
// Avoid computing the identityHashCode for every ImmutableListCopy/Address
HashMap<Address, Integer> memberIndexes = getMemberIndexMap(ch.members);
for (int i = 0; i < ch.segmentOwners.length; i++) {
List<Address> owners = ch.segmentOwners[i];
output.writeInt(owners.size());
for (Address owner : owners) {
output.writeInt(memberIndexes.get(owner));
}
}
}
@Override
@SuppressWarnings("unchecked")
public DefaultConsistentHash doReadObject(ObjectInput unmarshaller) throws IOException, ClassNotFoundException {
int numSegments = unmarshaller.readInt();
int numOwners = unmarshaller.readInt();
List<Address> members = (List<Address>) unmarshaller.readObject();
float[] capacityFactors = (float[]) unmarshaller.readObject();
List<Address>[] segmentOwners = new List[numSegments];
for (int i = 0; i < numSegments; i++) {
int size = unmarshaller.readInt();
Address[] owners = new Address[size];
for (int j = 0; j < size; j++) {
int ownerIndex = unmarshaller.readInt();
owners[j] = members.get(ownerIndex);
}
segmentOwners[i] = Immutables.immutableListWrap(owners);
}
return new DefaultConsistentHash(numOwners, numSegments, members, capacityFactors, segmentOwners);
}
private HashMap<Address, Integer> getMemberIndexMap(List<Address> members) {
HashMap<Address, Integer> memberIndexes = new HashMap<>(members.size());
for (int i = 0; i < members.size(); i++) {
memberIndexes.put(members.get(i), i);
}
return memberIndexes;
}
@Override
public Integer getId() {
return Ids.DEFAULT_CONSISTENT_HASH;
}
@Override
public Set<Class<? extends DefaultConsistentHash>> getTypeClasses() {
return Collections.singleton(DefaultConsistentHash.class);
}
}
}
| 14,073
| 36.631016
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/SingleSegmentKeyPartitioner.java
|
package org.infinispan.distribution.ch.impl;
import org.infinispan.distribution.ch.KeyPartitioner;
/**
* KeyPartitioner that always returns 0 for a given segment. This can be useful when segments are not in use, such
* as local or invalidation caches.
* @author wburns
* @since 9.3
*/
public class SingleSegmentKeyPartitioner implements KeyPartitioner {
private SingleSegmentKeyPartitioner() { }
private static final SingleSegmentKeyPartitioner INSTANCE = new SingleSegmentKeyPartitioner();
public static SingleSegmentKeyPartitioner getInstance() {
return INSTANCE;
}
@Override
public int getSegment(Object key) {
return 0;
}
}
| 671
| 25.88
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/KeyPartitionerFactory.java
|
package org.infinispan.distribution.ch.impl;
import org.infinispan.configuration.cache.HashConfiguration;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.group.impl.GroupManager;
import org.infinispan.distribution.group.impl.GroupingPartitioner;
import org.infinispan.factories.AbstractNamedCacheComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.factories.annotations.Inject;
/**
* Key partitioner factory that uses the hash function defined in the configuration.
*
* In the future, we will probably remove the hash function from the configuration and leave only the
* key partitioner.
*
* @author Dan Berindei
* @since 8.2
*/
@DefaultFactoryFor(classes = KeyPartitioner.class)
public class KeyPartitionerFactory extends AbstractNamedCacheComponentFactory
implements AutoInstantiableFactory {
@Inject GroupManager groupManager;
private KeyPartitioner getConfiguredPartitioner() {
HashConfiguration hashConfiguration = configuration.clustering().hash();
KeyPartitioner partitioner = hashConfiguration.keyPartitioner();
partitioner.init(hashConfiguration);
return partitioner;
}
@Override
public Object construct(String componentName) {
KeyPartitioner partitioner = getConfiguredPartitioner();
if (groupManager == null)
return new KeyPartitionerDelegate(partitioner, configuration);
// Grouping is enabled. Since the configured partitioner will not be registered in the component
// registry, we need to inject dependencies explicitly.
basicComponentRegistry.wireDependencies(partitioner, false);
return new KeyPartitionerDelegate(new GroupingPartitioner(partitioner, groupManager), configuration);
}
}
| 1,858
| 40.311111
| 107
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/SyncReplicatedConsistentHashFactory.java
|
package org.infinispan.distribution.ch.impl;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
/**
* {@link SyncConsistentHashFactory} adapted for replicated caches, so that the primary owner of a key
* is the same in replicated and distributed caches.
*
* @author Dan Berindei
* @since 8.2
*/
public class SyncReplicatedConsistentHashFactory implements ConsistentHashFactory<ReplicatedConsistentHash> {
private static final SyncConsistentHashFactory syncCHF = new SyncConsistentHashFactory();
@Override
public ReplicatedConsistentHash create(int numOwners, int numSegments,
List<Address> members, Map<Address, Float> capacityFactors) {
DefaultConsistentHash dch = syncCHF.create(1, numSegments, members, capacityFactors);
List<Address> membersWithoutState = computeMembersWithoutState(members, null, capacityFactors);
return replicatedFromDefault(dch, membersWithoutState);
}
@Override
public ReplicatedConsistentHash fromPersistentState(ScopedPersistentState state) {
String consistentHashClass = state.getProperty("consistentHash");
if (!ReplicatedConsistentHash.class.getName().equals(consistentHashClass))
throw CONTAINER.persistentConsistentHashMismatch(this.getClass().getName(), consistentHashClass);
return new ReplicatedConsistentHash(state);
}
private ReplicatedConsistentHash replicatedFromDefault(DefaultConsistentHash dch,
List<Address> membersWithoutState) {
int numSegments = dch.getNumSegments();
List<Address> members = dch.getMembers();
int[] primaryOwners = new int[numSegments];
for (int segment = 0; segment < numSegments; segment++) {
primaryOwners[segment] = members.indexOf(dch.locatePrimaryOwnerForSegment(segment));
}
return new ReplicatedConsistentHash(members, dch.getCapacityFactors(), membersWithoutState, primaryOwners);
}
@Override
public ReplicatedConsistentHash updateMembers(ReplicatedConsistentHash baseCH, List<Address> newMembers,
Map<Address, Float> actualCapacityFactors) {
DefaultConsistentHash baseDCH = defaultFromReplicated(baseCH);
DefaultConsistentHash dch = syncCHF.updateMembers(baseDCH, newMembers, actualCapacityFactors);
List<Address> membersWithoutState = computeMembersWithoutState(newMembers, baseCH.getMembers(), actualCapacityFactors);
return replicatedFromDefault(dch, membersWithoutState);
}
private DefaultConsistentHash defaultFromReplicated(ReplicatedConsistentHash baseCH) {
int numSegments = baseCH.getNumSegments();
List<Address>[] baseSegmentOwners = new List[numSegments];
for (int segment = 0; segment < numSegments; segment++) {
baseSegmentOwners[segment] = Collections.singletonList(baseCH.locatePrimaryOwnerForSegment(segment));
}
return new DefaultConsistentHash(1,
numSegments, baseCH.getMembers(), baseCH.getCapacityFactors(), baseSegmentOwners);
}
@Override
public ReplicatedConsistentHash rebalance(ReplicatedConsistentHash baseCH) {
return create(baseCH.getNumOwners(), baseCH.getNumSegments(), baseCH.getMembers(), baseCH.getCapacityFactors());
}
@Override
public ReplicatedConsistentHash union(ReplicatedConsistentHash ch1, ReplicatedConsistentHash ch2) {
return ch1.union(ch2);
}
static List<Address> computeMembersWithoutState(List<Address> newMembers, List<Address> oldMembers, Map<Address, Float> capacityFactors) {
List<Address> membersWithoutState = Collections.emptyList();
if (capacityFactors != null) {
boolean hasNodeWithCapacity = false;
for (Address a : newMembers) {
float capacityFactor = capacityFactors.get(a);
if (capacityFactor != 0f && capacityFactor != 1f) {
throw new IllegalArgumentException("Invalid replicated cache capacity factor for node " + a);
}
if (capacityFactor == 0f || (oldMembers != null && !oldMembers.contains(a))) {
if (membersWithoutState.isEmpty()) {
membersWithoutState = new ArrayList<>();
}
membersWithoutState.add(a);
} else {
hasNodeWithCapacity = true;
}
}
if (!hasNodeWithCapacity) {
throw new IllegalArgumentException("There must be at least one node with a non-zero capacity factor");
}
}
return membersWithoutState;
}
public static class Externalizer extends AbstractExternalizer<SyncReplicatedConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, SyncReplicatedConsistentHashFactory chf) {
}
@Override
public SyncReplicatedConsistentHashFactory readObject(ObjectInput unmarshaller) {
return new SyncReplicatedConsistentHashFactory();
}
@Override
public Integer getId() {
return Ids.SYNC_REPLICATED_CONSISTENT_HASH_FACTORY;
}
@Override
public Set<Class<? extends SyncReplicatedConsistentHashFactory>> getTypeClasses() {
return Collections.singleton(SyncReplicatedConsistentHashFactory.class);
}
}
}
| 5,707
| 41.917293
| 141
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/HashFunctionPartitioner.java
|
package org.infinispan.distribution.ch.impl;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import org.infinispan.commons.hash.Hash;
import org.infinispan.commons.hash.MurmurHash3;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.HashConfiguration;
import org.infinispan.distribution.ch.KeyPartitioner;
/**
* Key partitioner that computes a key's segment based on a hash function.
*
* @author Dan Berindei
* @since 8.2
*/
public class HashFunctionPartitioner implements KeyPartitioner, Cloneable {
private Hash hashFunction;
private int numSegments;
private int segmentSize;
public HashFunctionPartitioner() {}
// Should only be used by tests
public HashFunctionPartitioner(int numSegments) {
init(numSegments);
}
public static HashFunctionPartitioner instance(int numSegments) {
HashFunctionPartitioner partitioner = new HashFunctionPartitioner();
partitioner.init(numSegments);
return partitioner;
}
@Override
public void init(HashConfiguration configuration) {
Objects.requireNonNull(configuration);
init(configuration.numSegments());
}
@Override
public void init(KeyPartitioner other) {
if (other instanceof HashFunctionPartitioner) {
HashFunctionPartitioner o = (HashFunctionPartitioner) other;
if (o.numSegments > 0) { // The other HFP has been initialized, so we can use it
init(o.numSegments);
}
}
}
private void init(int numSegments) {
if (numSegments <= 0) {
throw new IllegalArgumentException("numSegments must be strictly positive");
}
this.hashFunction = getHash();
this.numSegments = numSegments;
this.segmentSize = Util.getSegmentSize(numSegments);
}
@Override
public int getSegment(Object key) {
// The result must always be positive, so we make sure the dividend is positive first
return (hashFunction.hash(key) & Integer.MAX_VALUE) / segmentSize;
}
protected Hash getHash() {
return MurmurHash3.getInstance();
}
public List<Integer> getSegmentEndHashes() {
List<Integer> hashes = new ArrayList<>(numSegments);
for (int i = 0; i < numSegments; i++) {
hashes.add(((i + 1) % numSegments) * segmentSize);
}
return hashes;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
HashFunctionPartitioner that = (HashFunctionPartitioner) o;
if (numSegments != that.numSegments)
return false;
return Objects.equals(hashFunction, that.hashFunction);
}
@Override
public int hashCode() {
int result = hashFunction != null ? hashFunction.hashCode() : 0;
result = 31 * result + numSegments;
return result;
}
@Override
public String toString() {
return "HashFunctionPartitioner{" +
"hashFunction=" + hashFunction +
", ns=" + numSegments +
'}';
}
}
| 3,109
| 27.272727
| 91
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/AbstractConsistentHash.java
|
package org.infinispan.distribution.ch.impl;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.function.UnaryOperator;
import org.infinispan.commons.hash.Hash;
import org.infinispan.commons.util.Util;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.PersistentUUID;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
public abstract class AbstractConsistentHash implements ConsistentHash {
// State constants
protected static final String STATE_CAPACITY_FACTOR = "capacityFactor.%d";
protected static final String STATE_CAPACITY_FACTORS = "capacityFactors";
protected static final String STATE_NUM_SEGMENTS = "numSegments";
/**
* The membership of the cache topology that uses this CH.
*/
protected final List<Address> members;
protected final float[] capacityFactors;
protected AbstractConsistentHash(int numSegments, List<Address> members, Map<Address, Float> capacityFactors) {
if (numSegments < 1)
throw new IllegalArgumentException("The number of segments must be strictly positive");
this.members = new ArrayList<>(members);
if (capacityFactors == null) {
this.capacityFactors = null;
} else {
this.capacityFactors = new float[members.size()];
for (int i = 0; i < this.capacityFactors.length; i++) {
this.capacityFactors[i] = capacityFactors.get(members.get(i));
}
}
}
protected AbstractConsistentHash(int numSegments, List<Address> members, float[] capacityFactors) {
if (numSegments < 1)
throw new IllegalArgumentException("The number of segments must be strictly positive");
this.members = members;
this.capacityFactors = capacityFactors;
}
protected AbstractConsistentHash(ScopedPersistentState state) {
this(parseNumSegments(state), parseMembers(state), parseCapacityFactors(state));
}
protected static int parseNumSegments(ScopedPersistentState state) {
return state.getIntProperty(STATE_NUM_SEGMENTS);
}
protected static List<Address> parseMembers(ScopedPersistentState state) {
int numMembers = Integer.parseInt(state.getProperty(ConsistentHashPersistenceConstants.STATE_MEMBERS));
List<Address> members = new ArrayList<>(numMembers);
for(int i = 0; i < numMembers; i++) {
PersistentUUID uuid = PersistentUUID.fromString(state.getProperty(String.format(ConsistentHashPersistenceConstants.STATE_MEMBER, i)));
members.add(uuid);
}
return members;
}
protected static Hash parseHashFunction(ScopedPersistentState state) {
return Util.getInstance(state.getProperty(ConsistentHashPersistenceConstants.STATE_HASH_FUNCTION), null);
}
protected static float[] parseCapacityFactors(ScopedPersistentState state) {
int numCapacityFactors = Integer.parseInt(state.getProperty(STATE_CAPACITY_FACTORS));
float[] capacityFactors = new float[numCapacityFactors];
for (int i = 0; i < numCapacityFactors; i++) {
capacityFactors[i] = Float.parseFloat(state.getProperty(String.format(STATE_CAPACITY_FACTOR, i)));
}
return capacityFactors;
}
@Override
public void toScopedState(ScopedPersistentState state) {
state.setProperty(ConsistentHashPersistenceConstants.STATE_CONSISTENT_HASH, this.getClass().getName());
state.setProperty(STATE_NUM_SEGMENTS, getNumSegments());
state.setProperty(ConsistentHashPersistenceConstants.STATE_MEMBERS, members.size());
for (int i = 0; i < members.size(); i++) {
state.setProperty(String.format(ConsistentHashPersistenceConstants.STATE_MEMBER, i),
members.get(i).toString());
}
state.setProperty(STATE_CAPACITY_FACTORS, capacityFactors.length);
for (int i = 0; i < capacityFactors.length; i++) {
state.setProperty(String.format(STATE_CAPACITY_FACTOR, i), capacityFactors[i]);
}
}
@Override
public List<Address> getMembers() {
return members;
}
/**
* Adds all elements from <code>src</code> list that do not already exist in <code>dest</code> list to the latter.
*
* @param dest List where elements are added
* @param src List of elements to add - this is never modified
*/
protected static void mergeLists(List<Address> dest, List<Address> src) {
for (Address node : src) {
if (!dest.contains(node)) {
dest.add(node);
}
}
}
static HashMap<Address, Integer> getMemberIndexMap(List<Address> members) {
HashMap<Address, Integer> memberIndexes = new HashMap<>(members.size());
for (int i = 0; i < members.size(); i++) {
memberIndexes.put(members.get(i), i);
}
return memberIndexes;
}
public Map<Address, Float> getCapacityFactors() {
if (capacityFactors == null)
return null;
Map<Address, Float> capacityFactorsMap = new HashMap<>(members.size());
for (int i = 0; i < members.size(); i++) {
capacityFactorsMap.put(members.get(i), capacityFactors[i]);
}
return capacityFactorsMap;
}
protected Map<Address, Float> unionCapacityFactors(AbstractConsistentHash ch2) {
Map<Address, Float> unionCapacityFactors = null;
if (this.capacityFactors != null || ch2.capacityFactors != null) {
unionCapacityFactors = new HashMap<>();
if (this.capacityFactors != null) {
unionCapacityFactors.putAll(this.getCapacityFactors());
} else {
for (Address node : this.members) {
unionCapacityFactors.put(node, 1.0f);
}
}
if (ch2.capacityFactors != null) {
unionCapacityFactors.putAll(ch2.getCapacityFactors());
} else {
for (Address node : ch2.members) {
unionCapacityFactors.put(node, 1.0f);
}
}
}
return unionCapacityFactors;
}
protected void checkSameHashAndSegments(AbstractConsistentHash dch2) {
int numSegments = getNumSegments();
if (numSegments != dch2.getNumSegments()) {
throw new IllegalArgumentException("The consistent hash objects must have the same number of segments");
}
}
protected Map<Address, Float> remapCapacityFactors(UnaryOperator<Address> remapper, boolean allowMissing) {
Map<Address, Float> remappedCapacityFactors = null;
if (capacityFactors != null) {
remappedCapacityFactors = new HashMap<>(members.size());
for(int i=0; i < members.size(); i++) {
Address a = remapper.apply(members.get(i));
if (a == null) {
if (allowMissing) continue;
return null;
}
remappedCapacityFactors.put(a, capacityFactors[i]);
}
}
return remappedCapacityFactors;
}
protected List<Address> remapMembers(UnaryOperator<Address> remapper, boolean allowMissing) {
List<Address> remappedMembers = new ArrayList<>(members.size());
for(Iterator<Address> i = members.iterator(); i.hasNext(); ) {
Address a = remapper.apply(i.next());
if (a == null) {
if (allowMissing) continue;
return null;
}
remappedMembers.add(a);
}
return remappedMembers;
}
}
| 7,520
| 37.177665
| 143
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/ConsistentHashPersistenceConstants.java
|
package org.infinispan.distribution.ch.impl;
/**
* Constants used as keys within a persisted consistent hash
*
* @author Tristan Tarrant
* @since 8.2
*/
public class ConsistentHashPersistenceConstants {
public static final String STATE_CONSISTENT_HASH = "consistentHash";
public static final String STATE_HASH_FUNCTION = "hashFunction";
public static final String STATE_MEMBER = "member.%d";
public static final String STATE_MEMBERS = "members";
public static final String STATE_MEMBER_NO_ENTRIES = "memberNoEntries.%d";
public static final String STATE_MEMBERS_NO_ENTRIES = "membersNoEntries";
}
| 627
| 35.941176
| 78
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/TopologyAwareConsistentHashFactory.java
|
package org.infinispan.distribution.ch.impl;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.distribution.topologyaware.TopologyInfo;
import org.infinispan.distribution.topologyaware.TopologyLevel;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.TopologyAwareAddress;
import org.infinispan.util.KeyValuePair;
/**
* Default topology-aware consistent hash factory implementation.
*
* @author Dan Berindei
* @since 5.2
*/
public class TopologyAwareConsistentHashFactory extends DefaultConsistentHashFactory {
@Override
protected void addBackupOwners(Builder builder) {
TopologyInfo topologyInfo = new TopologyInfo(builder.getNumSegments(), builder.getActualNumOwners(),
builder.getMembers(), builder.getCapacityFactors());
// 1. Remove extra owners (could be leftovers from addPrimaryOwners).
// Don't worry about location information yet.
removeExtraBackupOwners(builder);
// 2. If owners(segment) < numOwners, add new owners.
// Unlike the parent class, we allow many more segments for one node just in order to get
// as many different sites, racks and machines in the same owner list.
addBackupOwnersForLevel(builder, topologyInfo, TopologyLevel.SITE);
addBackupOwnersForLevel(builder, topologyInfo, TopologyLevel.RACK);
addBackupOwnersForLevel(builder, topologyInfo, TopologyLevel.MACHINE);
addBackupOwnersForLevel(builder, topologyInfo, TopologyLevel.NODE);
// 3. Now owners(segment) == numOwners for every segment because of steps 1 and 2.
replaceBackupOwnersForLevel(builder, topologyInfo, TopologyLevel.SITE);
replaceBackupOwnersForLevel(builder, topologyInfo, TopologyLevel.RACK);
replaceBackupOwnersForLevel(builder, topologyInfo, TopologyLevel.MACHINE);
// Replace owners that have too many segments with owners that have too few.
replaceBackupOwnerNoLevel(builder, topologyInfo);
}
private void addBackupOwnersForLevel(Builder builder, TopologyInfo topologyInfo, TopologyLevel level) {
// In the first phase, the new owners must own < minSegments segments.
// It may not be possible to fill all the segments with numOwners owners this way,
// so we repeat this in a loop, each iteration with a higher limit of owned segments
int extraSegments = 0;
while (doAddBackupOwnersForLevel(builder, topologyInfo, level, extraSegments)) {
extraSegments++;
}
}
private boolean doAddBackupOwnersForLevel(Builder builder, TopologyInfo topologyInfo, TopologyLevel level, int extraSegments) {
boolean sufficientOwners = true;
for (int segment = 0; segment < builder.getNumSegments(); segment++) {
List<Address> owners = builder.getOwners(segment);
if (owners.size() >= builder.getActualNumOwners())
continue;
int maxDistinctLocations = Math.min(topologyInfo.getDistinctLocationsCount(level),
builder.getActualNumOwners());
int distinctLocations = topologyInfo.getDistinctLocationsCount(level, owners);
if (distinctLocations == maxDistinctLocations)
continue;
float totalCapacity = topologyInfo.computeTotalCapacity(builder.getMembers(), builder.getCapacityFactors());
for (Address candidate : builder.getMembers()) {
float nodeExtraSegments = extraSegments * builder.getCapacityFactor(candidate) / totalCapacity;
int maxSegments = (int) (topologyInfo.getExpectedOwnedSegments(candidate) + nodeExtraSegments);
if (builder.getOwned(candidate) < maxSegments) {
if (!topologyInfo.duplicateLocation(level, owners, candidate, false)) {
builder.addOwner(segment, candidate);
distinctLocations++;
// The owners list is live, no need to query it again
if (owners.size() >= builder.getActualNumOwners())
break;
}
}
}
if (distinctLocations < maxDistinctLocations && owners.size() < builder.getActualNumOwners()) {
sufficientOwners = false;
}
}
return !sufficientOwners;
}
private void replaceBackupOwnersForLevel(Builder builder, TopologyInfo topologyInfo, TopologyLevel level) {
int extraSegments = 0;
while (doReplaceBackupOwnersForLevel(builder, topologyInfo, level, extraSegments)) {
extraSegments++;
}
}
private boolean doReplaceBackupOwnersForLevel(Builder builder, TopologyInfo topologyInfo,
TopologyLevel level, int extraSegments) {
boolean sufficientLocations = true;
// At this point each segment already has actualNumOwners owners.
for (int segment = 0; segment < builder.getNumSegments(); segment++) {
List<Address> owners = builder.getOwners(segment);
int maxDistinctLocations = Math.min(topologyInfo.getDistinctLocationsCount(level),
builder.getActualNumOwners());
int distinctLocations = topologyInfo.getDistinctLocationsCount(level, owners);
if (distinctLocations == maxDistinctLocations)
continue;
float totalCapacity = topologyInfo.computeTotalCapacity(builder.getMembers(), builder.getCapacityFactors());
for (int i = owners.size() - 1; i >= 1; i--) {
Address owner = owners.get(i);
if (topologyInfo.duplicateLocation(level, owners, owner, true)) {
// Got a duplicate site/rack/machine, we might have an alternative for it.
for (Address candidate : builder.getMembers()) {
float expectedSegments = topologyInfo.getExpectedOwnedSegments(candidate);
float nodeExtraSegments = extraSegments * builder.getCapacityFactor(candidate) / totalCapacity;
int maxSegments = (int) (expectedSegments + nodeExtraSegments);
if (builder.getOwned(candidate) < maxSegments) {
if (!topologyInfo.duplicateLocation(level, owners, candidate, false)) {
builder.addOwner(segment, candidate);
builder.removeOwner(segment, owner);
distinctLocations++;
// The owners list is live, no need to query it again
break;
}
}
}
}
}
if (distinctLocations < maxDistinctLocations) {
sufficientLocations = false;
}
}
return !sufficientLocations;
}
private void replaceBackupOwnerNoLevel(Builder builder, TopologyInfo topologyInfo) {
// 3.1. If there is an owner with owned(owner) > maxSegments, find another node
// with owned(node) < maxSegments and replace that owner with it.
doReplaceBackupOwnersNoLevel(builder, topologyInfo, -1, 0);
// 3.2. Same as step 3.1, but also replace owners that own maxSegments segments.
// Doing this in a separate iteration minimizes the number of moves from nodes with
// owned(node) == maxSegments, when numOwners*numSegments doesn't divide evenly with numNodes.
doReplaceBackupOwnersNoLevel(builder, topologyInfo, -1, -1);
// 3.3. Same as step 3.1, but allow replacing with nodes that already have owned(node) = maxSegments - 1.
// Necessary when numOwners*numSegments doesn't divide evenly with numNodes,
// because all nodes could own maxSegments - 1 segments and yet one node could own
// maxSegments + (numOwners*numSegments % numNodes) segments.
doReplaceBackupOwnersNoLevel(builder, topologyInfo, 0, 0);
}
private void doReplaceBackupOwnersNoLevel(Builder builder, TopologyInfo topologyInfo,
int minSegmentsDiff, int maxSegmentsDiff) {
// Iterate over the owners in the outer loop so that we minimize the number of owner changes
// for the same segment. At this point each segment already has actualNumOwners owners.
for (int ownerIdx = builder.getActualNumOwners() - 1; ownerIdx >= 1; ownerIdx--) {
for (int segment = 0; segment < builder.getNumSegments(); segment++) {
List<Address> owners = builder.getOwners(segment);
Address owner = owners.get(ownerIdx);
int maxSegments = (int) (topologyInfo.getExpectedOwnedSegments(owner) + maxSegmentsDiff);
if (builder.getOwned(owner) > maxSegments) {
// Owner has too many segments. Find another node to replace it with.
for (Address candidate : builder.getMembers()) {
int minSegments = (int) (topologyInfo.getExpectedOwnedSegments(candidate) + minSegmentsDiff);
if (builder.getOwned(candidate) < minSegments) {
if (!owners.contains(candidate) && maintainsDiversity(owners, candidate, owner)) {
builder.addOwner(segment, candidate);
builder.removeOwner(segment, owner);
// The owners list is live, no need to query it again
break;
}
}
}
}
}
}
}
private Object getLocationId(Address address, TopologyLevel level) {
TopologyAwareAddress taa = (TopologyAwareAddress) address;
Object locationId;
switch (level) {
case SITE:
locationId = taa.getSiteId();
break;
case RACK:
locationId = new KeyValuePair<>(taa.getSiteId(), taa.getRackId());
break;
case MACHINE:
locationId = new KeyValuePair<>(taa.getSiteId(), new KeyValuePair<>(taa.getRackId(), taa.getMachineId()));
break;
case NODE:
locationId = address;
break;
default:
throw new IllegalStateException("Unknown level: " + level);
}
return locationId;
}
private boolean maintainsDiversity(List<Address> owners, Address candidate, Address replaced) {
return maintainsDiversity(owners, candidate, replaced, TopologyLevel.SITE)
&& maintainsDiversity(owners, candidate, replaced, TopologyLevel.RACK)
&& maintainsDiversity(owners, candidate, replaced, TopologyLevel.MACHINE);
}
private boolean maintainsDiversity(List<Address> owners, Address candidate, Address replaced, TopologyLevel level) {
Set<Object> oldLocations = new HashSet<>(owners.size());
Set<Object> newLocations = new HashSet<>(owners.size());
newLocations.add(getLocationId(candidate, level));
for (Address node : owners) {
oldLocations.add(getLocationId(node, level));
if (!node.equals(replaced)) {
newLocations.add(getLocationId(node, level));
}
}
return newLocations.size() >= oldLocations.size();
}
public static class Externalizer extends AbstractExternalizer<TopologyAwareConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, TopologyAwareConsistentHashFactory chf) {
}
@Override
@SuppressWarnings("unchecked")
public TopologyAwareConsistentHashFactory readObject(ObjectInput unmarshaller) {
return new TopologyAwareConsistentHashFactory();
}
@Override
public Integer getId() {
return Ids.TOPOLOGY_AWARE_CONSISTENT_HASH_FACTORY;
}
@Override
public Set<Class<? extends TopologyAwareConsistentHashFactory>> getTypeClasses() {
return Collections.<Class<? extends TopologyAwareConsistentHashFactory>>singleton(TopologyAwareConsistentHashFactory.class);
}
}
}
| 12,133
| 46.584314
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/distribution/ch/impl/AffinityPartitioner.java
|
package org.infinispan.distribution.ch.impl;
import org.infinispan.distribution.ch.AffinityTaggedKey;
/**
* Key partitioner that maps keys to segments using information contained in {@link AffinityTaggedKey}.
* <p>If the segment is not defined (value -1) or the key is not an AffinityTaggedKey, will fallback to a {@link HashFunctionPartitioner}
*
* @author gustavonalle
* @since 8.2
*/
public class AffinityPartitioner extends HashFunctionPartitioner {
@Override
public int getSegment(Object key) {
if (key instanceof AffinityTaggedKey) {
int affinitySegmentId = ((AffinityTaggedKey) key).getAffinitySegmentId();
if (affinitySegmentId != -1) {
return affinitySegmentId;
}
}
return super.getSegment(key);
}
}
| 785
| 29.230769
| 137
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateTransferLock.java
|
package org.infinispan.statetransfer;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.ClusteringConfiguration;
import org.infinispan.configuration.cache.StateTransferConfiguration;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* We use the state transfer lock for three different things:
* <ol>
* <li>We don't want to execute a command until we have the transaction table for that topology id.
* For this purpose it works like a latch, commands wait on the latch and state transfer opens the latch
* when it has received all the transaction data for that topology id.</li>
* <li>Do not write anything to the data container in a segment that we have already removed.
* For this purpose, ownership checks and data container writes acquire a shared lock, and
* the segment removal acquires an exclusive lock.</li>
* <li>We want to handle state requests only after we have installed the same topology id, because
* this guarantees that we also have installed the corresponding view id and we have all the joiners
* in our JGroups view. Here it works like a latch as well, state requests wait on the latch and state
* transfer opens the latch when it has received all the transaction data for that topology id.</li>
* </ol>
*
* @author anistor@redhat.com
* @author Dan Berindei
* @since 5.2
*/
@Scope(Scopes.NAMED_CACHE)
public interface StateTransferLock {
// topology change lock
void acquireExclusiveTopologyLock();
void releaseExclusiveTopologyLock();
void acquireSharedTopologyLock();
void releaseSharedTopologyLock();
// transaction data latch
void notifyTransactionDataReceived(int topologyId);
/**
* @return a stage that completes successfully when topology {@code expectedTopologyId}
* has been installed and transaction data has been received,
* or with a {@link org.infinispan.util.concurrent.TimeoutException}
* after {@link ClusteringConfiguration#remoteTimeout()} expires.
*/
CompletionStage<Void> transactionDataFuture(int expectedTopologyId);
boolean transactionDataReceived(int expectedTopologyId);
// topology installation latch
// TODO move this to Cluster/LocalTopologyManagerImpl and don't start requesting state until every node has the jgroups view with the local node
void notifyTopologyInstalled(int topologyId);
/**
* @return a stage that completes successfully when topology {@code expectedTopologyId}
* has been installed, or with a {@link org.infinispan.util.concurrent.TimeoutException}
* after {@link StateTransferConfiguration#timeout()} expires.
*/
CompletionStage<Void> topologyFuture(int expectedTopologyId);
@Deprecated
default void waitForTopology(int expectedTopologyId, long timeout, TimeUnit unit) throws InterruptedException, TimeoutException {
try {
CompletionStage<Void> topologyFuture = topologyFuture(expectedTopologyId);
topologyFuture.toCompletableFuture().get(timeout, unit);
} catch (ExecutionException e) {
throw new CacheException(e.getCause());
}
}
boolean topologyReceived(int expectedTopologyId);
}
| 3,431
| 41.37037
| 147
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/OutboundTransferTask.java
|
package org.infinispan.statetransfer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
/**
* Outbound state transfer task. Pushes data segments to another cluster member on request. Instances of
* OutboundTransferTask are created and managed by StateTransferManagerImpl. There should be at most
* one such task per destination at any time.
*
* @author anistor@redhat.com
* @since 5.2
*/
public class OutboundTransferTask {
private static final Log log = LogFactory.getLog(OutboundTransferTask.class);
private final Consumer<Collection<StateChunk>> onChunkReplicated;
private final int topologyId;
private final Address destination;
private final IntSet segments;
private final int chunkSize;
private final RpcManager rpcManager;
private final CommandsFactory commandsFactory;
private final long timeout;
private final String cacheName;
private final boolean applyState;
private final RpcOptions rpcOptions;
private volatile boolean cancelled;
public OutboundTransferTask(Address destination, IntSet segments, int segmentCount, int chunkSize, int topologyId,
Consumer<Collection<StateChunk>> onChunkReplicated, RpcManager rpcManager,
CommandsFactory commandsFactory, long timeout, String cacheName, boolean applyState) {
if (segments == null || segments.isEmpty()) {
throw new IllegalArgumentException("Segments must not be null or empty");
}
if (destination == null) {
throw new IllegalArgumentException("Destination address cannot be null");
}
if (chunkSize <= 0) {
throw new IllegalArgumentException("chunkSize must be greater than 0");
}
this.onChunkReplicated = onChunkReplicated;
this.destination = destination;
this.segments = IntSets.concurrentCopyFrom(segments, segmentCount);
this.chunkSize = chunkSize;
this.topologyId = topologyId;
this.rpcManager = rpcManager;
this.commandsFactory = commandsFactory;
this.timeout = timeout;
this.cacheName = cacheName;
this.applyState = applyState;
this.rpcOptions = new RpcOptions(DeliverOrder.NONE, timeout, TimeUnit.MILLISECONDS);
}
public Address getDestination() {
return destination;
}
public IntSet getSegments() {
return segments;
}
public int getTopologyId() {
return topologyId;
}
/**
* Starts sending entries from the data container and the first loader with fetch persistent data enabled
* to the target node.
*
* @return a completion stage that completes when all the entries have been sent.
* @param notifications a {@code Flowable} with all the entries that need to be sent
*/
public CompletionStage<Void> execute(Flowable<SegmentPublisherSupplier.Notification<InternalCacheEntry<?, ?>>> notifications) {
return notifications
.buffer(chunkSize)
.takeUntil(batch -> cancelled)
// Here we receive a batch of notifications, a list with size up to chunkSize.
// Although the notification list has the chunkSize the list contains not only data segments.
// The notification contains data segments which hold values; lost and completed segments. This means that
// although we are batching the data, our final chunk can be smaller than chunkSize.
// This could be improved.
.concatMapCompletable(batch -> {
Map<Integer, StateChunk> chunks = new HashMap<>();
for(SegmentPublisherSupplier.Notification<InternalCacheEntry<?, ?>> notification: batch) {
if (notification.isValue()) {
StateChunk chunk = chunks.computeIfAbsent(
notification.valueSegment(), segment -> new StateChunk(segment, new ArrayList<>(), false));
chunk.getCacheEntries().add(notification.value());
}
// If the notification identify the segment is completed we mark a chunk as a last chunk.
if (notification.isSegmentComplete()) {
int segment = notification.completedSegment();
chunks.compute(segment, (s, previous) -> previous == null
? new StateChunk(s, Collections.emptyList(), true)
: new StateChunk(segment, previous.getCacheEntries(), true));
}
}
return Completable.fromCompletionStage(sendChunks(chunks));
}, 1)
.toCompletionStage(null);
}
private CompletionStage<Void> sendChunks(Map<Integer, StateChunk> chunks) {
if (chunks.isEmpty())
return CompletableFutures.completedNull();
if (log.isTraceEnabled()) {
long entriesSize = chunks.values().stream().mapToInt(v -> v.getCacheEntries().size()).sum();
log.tracef("Sending to node %s %d cache entries from segments %s", destination, entriesSize, chunks.keySet());
}
StateResponseCommand cmd = commandsFactory.buildStateResponseCommand(topologyId, chunks.values(), applyState);
try {
return rpcManager.invokeCommand(destination, cmd, SingleResponseCollector.validOnly(), rpcOptions)
.handle((response, throwable) -> {
if (throwable == null) {
onChunkReplicated.accept(chunks.values());
return null;
}
logSendException(throwable);
cancel();
return null;
});
} catch (IllegalLifecycleStateException e) {
// Manager is shutting down, ignore the error
cancel();
} catch (Exception e) {
logSendException(e);
cancel();
}
return CompletableFutures.completedNull();
}
private void logSendException(Throwable throwable) {
Throwable t = CompletableFutures.extractException(throwable);
if (t instanceof SuspectException) {
log.debugf("Node %s left cache %s while we were sending state to it, cancelling transfer.",
destination, cacheName);
} else if (isCancelled()) {
log.debugf("Stopping cancelled transfer to node %s, segments %s", destination, segments);
} else {
log.errorf(t, "Failed to send entries to node %s: %s", destination, t.getMessage());
}
}
/**
* Cancel some of the segments. If all segments get cancelled then the whole task will be cancelled.
*
* @param cancelledSegments segments to cancel.
*/
void cancelSegments(IntSet cancelledSegments) {
if (segments.removeAll(cancelledSegments)) {
if (log.isTraceEnabled()) {
log.tracef("Cancelling outbound transfer to node %s, segments %s (remaining segments %s)",
destination, cancelledSegments, segments);
}
if (segments.isEmpty()) {
cancel();
}
}
}
/**
* Cancel the whole task.
*/
public void cancel() {
if (!cancelled) {
log.debugf("Cancelling outbound transfer to node %s, segments %s", destination, segments);
cancelled = true;
}
}
public boolean isCancelled() {
return cancelled;
}
@Override
public String toString() {
return "OutboundTransferTask{" +
"topologyId=" + topologyId +
", destination=" + destination +
", segments=" + segments +
", chunkSize=" + chunkSize +
", timeout=" + timeout +
", cacheName='" + cacheName + '\'' +
'}';
}
}
| 8,998
| 37.788793
| 130
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateConsumer.java
|
package org.infinispan.statetransfer;
import java.util.Collection;
import java.util.concurrent.CompletionStage;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.CacheTopology;
/**
* Handles inbound state transfers.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Scope(Scopes.NAMED_CACHE)
public interface StateConsumer {
boolean isStateTransferInProgress();
boolean isStateTransferInProgressForKey(Object key);
/**
* Returns the number of in-flight requested segments.
*/
long inflightRequestCount();
/**
* Returns the number of in-flight transactional requested segments.
*/
long inflightTransactionSegmentCount();
/**
* Receive notification of topology changes. {@link org.infinispan.commands.statetransfer.StateTransferStartCommand},
* are issued for segments that are new to this
* member and the segments that are no longer owned are discarded.
*
* @return completion stage that is completed when the topology update is processed,
* wrapping another completion stage that is completed when the state transfer has finished
*/
CompletionStage<CompletionStage<Void>> onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance);
CompletionStage<?> applyState(Address sender, int topologyId, Collection<StateChunk> stateChunks);
/**
* Cancels all incoming state transfers. The already received data is not discarded.
* This is executed when the cache is shutting down.
*/
void stop();
/**
* Stops applying incoming state. Also stops tracking updated keys. Should be called at the end of state transfer or
* when a ClearCommand is committed during state transfer.
*
* @param topologyId Topology id at the end of state transfer
*/
void stopApplyingState(int topologyId);
/**
* @return true if this node has already received the first rebalance command
*/
boolean ownsData();
}
| 2,055
| 30.630769
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/package-info.java
|
/**
* Transfer of state to new caches in a cluster.
*
* @api.private
*/
package org.infinispan.statetransfer;
| 114
| 15.428571
| 48
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/AllOwnersLostException.java
|
package org.infinispan.statetransfer;
import org.infinispan.commons.CacheException;
/**
* Signals that all owners of a key have been lost.
*/
public class AllOwnersLostException extends CacheException {
public static final AllOwnersLostException INSTANCE = new AllOwnersLostException();
private AllOwnersLostException() {
super(null, null, false, false);
}
}
| 379
| 24.333333
| 86
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateTransferManager.java
|
package org.infinispan.statetransfer;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.configuration.cache.StateTransferConfiguration;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.transport.Address;
/**
* A component that manages the state transfer when the topology of the cluster changes.
*
* @author Dan Berindei <dan@infinispan.org>
* @author Mircea Markus
* @author anistor@redhat.com
* @since 5.1
*/
@Scope(Scopes.NAMED_CACHE)
public interface StateTransferManager {
//todo [anistor] this is inaccurate. this node does not hold state yet in current implementation
boolean isJoinComplete();
/**
* Checks if an inbound state transfer is in progress.
*/
boolean isStateTransferInProgress();
/**
* Returns the number of requested segments to be transferred.
*/
long getInflightSegmentTransferCount();
/**
* Returns the number of transactional segments requested which are still in-flight.
*/
long getInflightTransactionalSegmentCount();
/**
* Checks if an inbound state transfer is in progress for a given key.
*
* @deprecated since 10.0; to be removed in next major version
*/
@Deprecated
default boolean isStateTransferInProgressForKey(Object key) {
return getStateConsumer().isStateTransferInProgressForKey(key);
}
void start() throws Exception;
/**
* Wait for the local cache to receive initial state from the other members.
*
* <p>Does nothing if {@link StateTransferConfiguration#awaitInitialTransfer()} is disabled.</p>
*/
void waitForInitialStateTransferToComplete();
void stop();
/**
* If there is an state transfer happening at the moment, this method forwards the supplied command to the nodes that
* are new owners of the data, in order to assure consistency.
*
* @deprecated Since 14.0. To be removed without replacement.
*/
@Deprecated
default Map<Address, Response> forwardCommandIfNeeded(TopologyAffectedCommand command, Set<Object> affectedKeys, Address origin) {
return Collections.emptyMap();
}
String getRebalancingStatus() throws Exception;
StateConsumer getStateConsumer();
StateProvider getStateProvider();
}
| 2,452
| 29.283951
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/InboundTransferTask.java
|
package org.infinispan.statetransfer;
import static org.infinispan.util.concurrent.CompletionStages.handleAndCompose;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.PassthroughSingleResponseCollector;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import net.jcip.annotations.GuardedBy;
/**
* Inbound state transfer task. Fetches multiple data segments from a remote source node and applies them to local
* cache. Instances of InboundTransferTask are created and managed by StateTransferManagerImpl. StateTransferManagerImpl
* must have zero or one such task for each segment.
*
* @author anistor@redhat.com
* @since 5.2
*/
public class InboundTransferTask {
private static final Log log = LogFactory.getLog(InboundTransferTask.class);
@GuardedBy("segments")
private final IntSet segments;
@GuardedBy("segments")
private final IntSet unfinishedSegments;
private final Address source;
private volatile boolean isCancelled = false;
/**
* This latch is counted down when all segments are completely received or in case of task cancellation.
*/
private final CompletableFuture<Void> completionFuture = new CompletableFuture<>();
private final int topologyId;
private final RpcManager rpcManager;
private final CommandsFactory commandsFactory;
private final long timeout;
private final String cacheName;
private final boolean applyState;
private final RpcOptions rpcOptions;
public InboundTransferTask(IntSet segments, Address source, int topologyId, RpcManager rpcManager,
CommandsFactory commandsFactory, long timeout, String cacheName, boolean applyState) {
if (segments == null || segments.isEmpty()) {
throw new IllegalArgumentException("segments must not be null or empty");
}
if (source == null) {
throw new IllegalArgumentException("Source address cannot be null");
}
this.segments = IntSets.mutableCopyFrom(segments);
this.unfinishedSegments = IntSets.mutableCopyFrom(segments);
this.source = source;
this.topologyId = topologyId;
this.rpcManager = rpcManager;
this.commandsFactory = commandsFactory;
this.timeout = timeout;
this.cacheName = cacheName;
this.applyState = applyState;
this.rpcOptions = new RpcOptions(DeliverOrder.NONE, timeout, TimeUnit.MILLISECONDS);
}
/**
* Returns a copy of segments currently tied to this task
* @return copy of segments
*/
public IntSet getSegments() {
synchronized (segments) {
return IntSets.mutableCopyFrom(segments);
}
}
/**
* @return a copy of the unfinished segments
*/
public IntSet getUnfinishedSegments() {
synchronized (segments) {
return IntSets.mutableCopyFrom(unfinishedSegments);
}
}
public Address getSource() {
return source;
}
/**
* Send START_STATE_TRANSFER request to source node.
*
* @return a {@code CompletableFuture} that completes when the transfer is done.
*/
public CompletionStage<Void> requestSegments() {
return startTransfer(applyState ?
segments -> commandsFactory.buildStateTransferStartCommand(topologyId, segments) :
segments -> commandsFactory.buildConflictResolutionStartCommand(topologyId, segments));
}
/**
* Request the segments from the source
*
* @return A {@code CompletionStage} that completes when the segments have been applied
*/
private CompletionStage<Void> startTransfer(Function<IntSet, CacheRpcCommand> transferCommand) {
if (isCancelled)
return completionFuture;
IntSet segmentsCopy = getSegments();
if (segmentsCopy.isEmpty()) {
if (log.isTraceEnabled()) log.tracef("Segments list is empty, skipping source %s", source);
completionFuture.complete(null);
return completionFuture;
}
CacheRpcCommand cmd = transferCommand.apply(segmentsCopy);
if (log.isTraceEnabled()) {
log.tracef("Requesting state (%s) from node %s for segments %s", cmd, source, segmentsCopy);
}
CompletionStage<Response> remoteStage =
rpcManager.invokeCommand(source, cmd, PassthroughSingleResponseCollector.INSTANCE, rpcOptions);
return handleAndCompose(remoteStage, (response, throwable) -> {
if (throwable != null) {
if (!isCancelled) {
log.failedToRequestSegments(cacheName, source, segmentsCopy, throwable);
completionFuture.completeExceptionally(throwable);
}
} else if (response instanceof SuccessfulResponse) {
if (log.isTraceEnabled()) {
log.tracef("Successfully requested state (%s) from node %s for segments %s",
cmd, source, segmentsCopy);
}
} else if (response instanceof CacheNotFoundResponse) {
if (log.isTraceEnabled()) log.tracef("State source %s was suspected, another source will be selected", source);
completionFuture.completeExceptionally(new SuspectException());
} else {
Exception e = new CacheException(String.valueOf(response));
log.failedToRequestSegments(cacheName, source, segmentsCopy, e);
completionFuture.completeExceptionally(e);
}
return completionFuture;
});
}
/**
* Cancels a set of segments and marks them as finished.
*
* If all segments are cancelled then the whole task is cancelled, as if {@linkplain #cancel()} was called.
*
* @param cancelledSegments the segments to be cancelled
*/
public void cancelSegments(IntSet cancelledSegments) {
if (isCancelled) {
throw new IllegalArgumentException("The task is already cancelled.");
}
if (log.isTraceEnabled()) {
log.tracef("Partially cancelling inbound state transfer from node %s, segments %s", source, cancelledSegments);
}
synchronized (segments) {
// healthy paranoia
if (!segments.containsAll(cancelledSegments)) {
throw new IllegalArgumentException("Some of the specified segments cannot be cancelled because they were not previously requested");
}
unfinishedSegments.removeAll(cancelledSegments);
if (unfinishedSegments.isEmpty()) {
isCancelled = true;
}
}
sendCancelCommand(cancelledSegments);
if (isCancelled) {
notifyCompletion(false);
}
}
/**
* Cancels all the segments and marks them as finished, sends a cancel command, then completes the task.
*/
public void cancel() {
if (!isCancelled) {
isCancelled = true;
IntSet segmentsCopy = getUnfinishedSegments();
synchronized (segments) {
unfinishedSegments.clear();
}
if (log.isTraceEnabled()) {
log.tracef("Cancelling inbound state transfer from %s with unfinished segments %s", source, segmentsCopy);
}
sendCancelCommand(segmentsCopy);
notifyCompletion(false);
}
}
public boolean isCancelled() {
return isCancelled;
}
private void sendCancelCommand(IntSet cancelledSegments) {
CacheRpcCommand cmd = commandsFactory.buildStateTransferCancelCommand(topologyId, cancelledSegments);
try {
rpcManager.sendTo(source, cmd, DeliverOrder.NONE);
} catch (Exception e) {
// Ignore exceptions here, the worst that can happen is that the provider will send some extra state
log.debugf("Caught an exception while cancelling state transfer from node %s for segments %s",
source, cancelledSegments);
}
}
public void onStateReceived(int segmentId, boolean isLastChunk) {
if (!isCancelled && isLastChunk) {
boolean isCompleted = false;
synchronized (segments) {
if (segments.contains(segmentId)) {
unfinishedSegments.remove(segmentId);
if (unfinishedSegments.isEmpty()) {
log.debugf("Finished receiving state for segments %s", segments);
isCompleted = true;
}
}
}
if (isCompleted) {
notifyCompletion(true);
}
}
}
private void notifyCompletion(boolean success) {
if (success) {
completionFuture.complete(null);
} else {
completionFuture.completeExceptionally(new CancellationException("Inbound transfer was cancelled"));
}
}
public boolean isCompletedSuccessfully() {
return completionFuture.isDone() && !completionFuture.isCompletedExceptionally();
}
/**
* Terminate abruptly regardless if the segments were received or not. This is used when the source node
* is no longer alive.
*/
public void terminate() {
notifyCompletion(false);
}
@Override
public String toString() {
synchronized (segments) {
return "InboundTransferTask{" +
"segments=" + segments +
", unfinishedSegments=" + unfinishedSegments +
", source=" + source +
", isCancelled=" + isCancelled +
", completionFuture=" + completionFuture +
", topologyId=" + topologyId +
", timeout=" + timeout +
", cacheName=" + cacheName +
'}';
}
}
}
| 10,430
| 34.359322
| 144
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateProvider.java
|
package org.infinispan.statetransfer;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commons.util.IntSet;
import org.infinispan.conflict.impl.StateReceiver;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.notifications.cachelistener.cluster.ClusterListenerReplicateCallable;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.CacheTopology;
/**
* Handles outbound state transfers.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Scope(Scopes.NAMED_CACHE)
public interface StateProvider {
boolean isStateTransferInProgress();
/**
* Receive notification of topology changes. Cancels all outbound transfers to destinations that are no longer members.
* The other outbound transfers remain unaffected.
* @param cacheTopology
* @param isRebalance
*/
CompletableFuture<Void> onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance);
/**
* Gets the list of transactions that affect keys from the given segments. This is invoked in response to a
* {@link org.infinispan.commands.statetransfer.StateTransferGetTransactionsCommand}.
*
* @param destination the address of the requester
* @param topologyId required topology before we can start collecting transactions
* @param segments only return transactions affecting these segments
* @return a {@code CompletionStage} that completes with the list transactions and locks for the given segments
*/
CompletionStage<List<TransactionInfo>> getTransactionsForSegments(Address destination, int topologyId, IntSet segments);
Collection<ClusterListenerReplicateCallable<Object, Object>> getClusterListenersToInstall();
/**
* Start to send cache entries that belong to the given set of segments. This is invoked in response to a
* {@link org.infinispan.commands.statetransfer.StateTransferStartCommand}.
*
* If the applyState field is set to false, then upon delivery at the destination the cache entries are processed
* by a {@link StateReceiver} and are not applied to the local cache.
* @param destination the address of the requester
* @param topologyId
* @param segments
* @param applyState
*/
void startOutboundTransfer(Address destination, int topologyId, IntSet segments, boolean applyState);
/**
* Cancel sending of cache entries that belong to the given set of segments. This is invoked in response to a
* {@link org.infinispan.commands.statetransfer.StateTransferCancelCommand}.
*
* @param destination the address of the requester
* @param topologyId
* @param segments the segments that we have to cancel transfer for
*/
void cancelOutboundTransfer(Address destination, int topologyId, IntSet segments);
void start();
/**
* Cancels all outbound state transfers.
* This is executed when the cache is shutting down.
*/
void stop();
}
| 3,094
| 38.177215
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/TransactionSynchronizerInterceptor.java
|
package org.infinispan.statetransfer;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.TransactionBoundaryCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.transaction.impl.RemoteTransaction;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* With the Non-Blocking State Transfer (NBST) in place it is possible for a transactional command to be forwarded
* multiple times, concurrently to the same node. This interceptor makes sure that for any given transaction, the
* interceptor chain, post {@link StateTransferInterceptor}, would only allows a single thread to amend a transaction.
* </p>
* E.g. of when this situation might occur:
* <ul>
* <li>1) Node A broadcasts PrepareCommand to nodes B, C </li>
* <li>2) Node A leaves cluster, causing new topology to be installed </li>
* <li>3) The command arrives to B and C, with lower topology than the current one</li>
* <li>4) Both B and C forward the command to node D</li>
* <li>5) D executes the two commands in parallel and finds out that A has left, therefore executing RollbackCommand></li>
* </ul>
* <p/>
* This interceptor must placed after the logic that handles command forwarding ({@link StateTransferInterceptor}),
* otherwise we can end up in deadlocks when a command is forwarded in a loop to the same cache: e.g. A→B→C→A. This
* scenario is possible when we have chained topology changes (see <a href="https://issues.jboss.org/browse/ISPN-2578">ISPN-2578</a>).
*
* @author Mircea Markus
* @since 5.2
*/
public class TransactionSynchronizerInterceptor extends BaseAsyncInterceptor {
private static final Log log = LogFactory.getLog(TransactionSynchronizerInterceptor.class);
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable {
if (ctx.isOriginLocal() || !(command instanceof TransactionBoundaryCommand)) {
return invokeNext(ctx, command);
}
CompletableFuture<Void> releaseFuture = new CompletableFuture<>();
RemoteTransaction remoteTransaction = ((TxInvocationContext<RemoteTransaction>) ctx).getCacheTransaction();
Object result = asyncInvokeNext(ctx, command, remoteTransaction.enterSynchronizationAsync(releaseFuture));
return makeStage(result).andFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
log.tracef("Completing tx command release future for %s", remoteTransaction);
releaseFuture.complete(null);
});
}
}
| 2,739
| 50.698113
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateTransferManagerImpl.java
|
package org.infinispan.statetransfer;
import static org.infinispan.globalstate.GlobalConfigurationManager.CONFIG_STATE_CACHE_NAME;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.Function;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.container.versioning.irac.IracVersionGenerator;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.SyncConsistentHashFactory;
import org.infinispan.distribution.ch.impl.SyncReplicatedConsistentHashFactory;
import org.infinispan.distribution.ch.impl.TopologyAwareSyncConsistentHashFactory;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.globalstate.GlobalStateManager;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.persistence.manager.PreloadManager;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.topology.CacheJoinInfo;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.CacheTopologyHandler;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.irac.IracManager;
/**
* {@link StateTransferManager} implementation.
*
* @author anistor@redhat.com
* @since 5.2
*/
@MBean(objectName = "StateTransferManager", description = "Component that handles state transfer")
@Scope(Scopes.NAMED_CACHE)
public class StateTransferManagerImpl implements StateTransferManager {
private static final Log log = LogFactory.getLog(StateTransferManagerImpl.class);
@ComponentName(KnownComponentNames.CACHE_NAME)
@Inject protected String cacheName;
@Inject StateConsumer stateConsumer;
@Inject StateProvider stateProvider;
@Inject PartitionHandlingManager partitionHandlingManager;
@Inject DistributionManager distributionManager;
@Inject CacheNotifier<?, ?> cacheNotifier;
@Inject Configuration configuration;
@Inject GlobalConfiguration globalConfiguration;
@Inject RpcManager rpcManager;
@Inject LocalTopologyManager localTopologyManager;
@Inject KeyPartitioner keyPartitioner;
@Inject GlobalStateManager globalStateManager;
// Only join the cluster after preloading
@Inject PreloadManager preloadManager;
// Make sure we can handle incoming requests before joining
@Inject PerCacheInboundInvocationHandler inboundInvocationHandler;
@Inject IracManager iracManager;
@Inject IracVersionGenerator iracVersionGenerator;
private final CompletableFuture<Void> initialStateTransferComplete = new CompletableFuture<>();
@Start(priority = 60)
@Override
public void start() throws Exception {
if (log.isTraceEnabled()) {
log.tracef("Starting StateTransferManager of cache %s on node %s", cacheName, rpcManager.getAddress());
}
Optional<Integer> persistentStateChecksum;
if (globalStateManager != null) {
persistentStateChecksum = globalStateManager.readScopedState(cacheName).map(ScopedPersistentState::getChecksum);
} else {
persistentStateChecksum = Optional.empty();
}
float capacityFactor = globalConfiguration.isZeroCapacityNode() && !CONFIG_STATE_CACHE_NAME.equals(cacheName) ? 0.0f :
configuration.clustering().hash().capacityFactor();
CacheJoinInfo joinInfo = new CacheJoinInfo(pickConsistentHashFactory(globalConfiguration, configuration),
configuration.clustering().hash().numSegments(),
configuration.clustering().hash().numOwners(),
configuration.clustering().stateTransfer().timeout(),
configuration.clustering().cacheMode(),
capacityFactor,
localTopologyManager.getPersistentUUID(),
persistentStateChecksum);
CompletionStage<CacheTopology> stage = localTopologyManager.join(cacheName, joinInfo, new CacheTopologyHandler() {
@Override
public CompletionStage<Void> updateConsistentHash(CacheTopology cacheTopology) {
return doTopologyUpdate(cacheTopology, false);
}
@Override
public CompletionStage<Void> rebalance(CacheTopology cacheTopology) {
return doTopologyUpdate(cacheTopology, true);
}
}, partitionHandlingManager);
CacheTopology initialTopology = CompletionStages.join(stage);
if (log.isTraceEnabled()) {
log.tracef("StateTransferManager of cache %s on node %s received initial topology %s", cacheName, rpcManager.getAddress(), initialTopology);
}
}
/**
* If no ConsistentHashFactory was explicitly configured we choose a suitable one based on cache mode.
*/
public static ConsistentHashFactory pickConsistentHashFactory(GlobalConfiguration globalConfiguration, Configuration configuration) {
ConsistentHashFactory factory = configuration.clustering().hash().consistentHashFactory();
if (factory == null) {
CacheMode cacheMode = configuration.clustering().cacheMode();
if (cacheMode.isClustered()) {
if (cacheMode.isDistributed()) {
if (globalConfiguration.transport().hasTopologyInfo()) {
factory = new TopologyAwareSyncConsistentHashFactory();
} else {
factory = new SyncConsistentHashFactory();
}
} else if (cacheMode.isReplicated() || cacheMode.isInvalidation()) {
factory = new SyncReplicatedConsistentHashFactory();
} else {
throw new CacheException("Unexpected cache mode: " + cacheMode);
}
}
}
return factory;
}
private CompletionStage<Void> doTopologyUpdate(CacheTopology newCacheTopology, boolean isRebalance) {
CacheTopology oldCacheTopology = distributionManager.getCacheTopology();
int newTopologyId = newCacheTopology.getTopologyId();
if (oldCacheTopology != null && oldCacheTopology.getTopologyId() > newTopologyId) {
throw new IllegalStateException(
"Old topology is higher: old=" + oldCacheTopology + ", new=" + newCacheTopology);
}
if (log.isTraceEnabled()) {
log.tracef("Installing new cache topology %s on cache %s", newCacheTopology, cacheName);
}
// No need for extra synchronization here, since LocalTopologyManager already serializes topology updates.
if (newCacheTopology.getMembers().contains(rpcManager.getAddress())) {
if (!distributionManager.getCacheTopology().isConnected() ||
!distributionManager.getCacheTopology().getMembersSet().contains(rpcManager.getAddress())) {
if (log.isTraceEnabled())
log.tracef("This is the first topology %d in which the local node is a member", newTopologyId);
inboundInvocationHandler.setFirstTopologyAsMember(newTopologyId);
}
}
int newRebalanceId = newCacheTopology.getRebalanceId();
CacheTopology.Phase phase = newCacheTopology.getPhase();
iracManager.onTopologyUpdate(oldCacheTopology, newCacheTopology);
return cacheNotifier.notifyTopologyChanged(oldCacheTopology, newCacheTopology, newTopologyId, true)
.thenCompose(
ignored -> updateProviderAndConsumer(isRebalance, newTopologyId, newCacheTopology, newRebalanceId, phase)
).thenCompose(
ignored -> cacheNotifier.notifyTopologyChanged(oldCacheTopology, newCacheTopology, newTopologyId, false)
).thenRun(() -> {
completeInitialTransferIfNeeded(newCacheTopology, phase);
partitionHandlingManager.onTopologyUpdate(newCacheTopology);
iracVersionGenerator.onTopologyChange(newCacheTopology);
});
}
private CompletionStage<?> updateProviderAndConsumer(boolean isRebalance, int newTopologyId, CacheTopology newCacheTopology,
int newRebalanceId, CacheTopology.Phase phase) {
CompletionStage<CompletionStage<Void>> consumerUpdateFuture =
stateConsumer.onTopologyUpdate(newCacheTopology, isRebalance);
CompletionStage<Void> consumerTransferFuture = consumerUpdateFuture.thenCompose(Function.identity());
CompletableFuture<Void> providerFuture = stateProvider.onTopologyUpdate(newCacheTopology, isRebalance);
consumerTransferFuture.runAfterBoth(providerFuture, () -> {
switch (phase) {
case READ_OLD_WRITE_ALL:
case READ_ALL_WRITE_ALL:
case READ_NEW_WRITE_ALL:
localTopologyManager.confirmRebalancePhase(cacheName, newTopologyId, newRebalanceId, null);
}
});
// Block topology updates until the consumer finishes applying the topology update
return consumerUpdateFuture;
}
private void completeInitialTransferIfNeeded(CacheTopology newCacheTopology, CacheTopology.Phase phase) {
if (!initialStateTransferComplete.isDone()) {
assert distributionManager.getCacheTopology().getTopologyId() == newCacheTopology.getTopologyId();
boolean isJoined = phase == CacheTopology.Phase.NO_REBALANCE &&
newCacheTopology.getReadConsistentHash().getMembers().contains(rpcManager.getAddress());
if (isJoined) {
initialStateTransferComplete.complete(null);
log.tracef("Initial state transfer complete for cache %s on node %s", cacheName, rpcManager.getAddress());
}
}
}
@Override
public void waitForInitialStateTransferToComplete() {
if (configuration.clustering().stateTransfer().awaitInitialTransfer()) {
try {
if (!localTopologyManager.isCacheRebalancingEnabled(cacheName) ||
partitionHandlingManager.getAvailabilityMode() == AvailabilityMode.DEGRADED_MODE) {
initialStateTransferComplete.complete(null);
}
if (log.isTraceEnabled())
log.tracef("Waiting for initial state transfer to finish for cache %s on %s", cacheName,
rpcManager.getAddress());
initialStateTransferComplete.get(configuration.clustering().stateTransfer().timeout(),
TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
throw log.initialStateTransferTimeout(cacheName, rpcManager.getAddress());
} catch (CacheException e) {
throw e;
} catch (Exception e) {
throw new CacheException(e);
}
}
}
@Stop(priority = 0)
@Override
public void stop() {
if (log.isTraceEnabled()) {
log.tracef("Shutting down StateTransferManager of cache %s on node %s", cacheName, rpcManager.getAddress());
}
initialStateTransferComplete.complete(null);
localTopologyManager.leave(cacheName, configuration.clustering().remoteTimeout());
}
@ManagedAttribute(description = "If true, the node has successfully joined the grid and is considered to hold state. If false, the join process is still in progress.", displayName = "Is join completed?", dataType = DataType.TRAIT)
@Override
public boolean isJoinComplete() {
return initialStateTransferComplete.isDone();
}
@ManagedAttribute(description = "Retrieves the rebalancing status for this cache. Possible values are PENDING, SUSPENDED, IN_PROGRESS, COMPLETE", displayName = "Rebalancing progress", dataType = DataType.TRAIT)
@Override
public String getRebalancingStatus() throws Exception {
return localTopologyManager.getRebalancingStatus(cacheName).toString();
}
@ManagedAttribute(description = "Checks whether the local node is receiving state from other nodes", displayName = "Is state transfer in progress?", dataType = DataType.TRAIT)
@Override
public boolean isStateTransferInProgress() {
return stateConsumer.isStateTransferInProgress();
}
@ManagedAttribute(description = "The number of in-flight segments the local node requested from other nodes", displayName = "In-flight requested segments", dataType = DataType.MEASUREMENT)
@Override
public long getInflightSegmentTransferCount() {
return stateConsumer.inflightRequestCount();
}
@ManagedAttribute(description = "The number of in-flight transactional segments the local node requested from other nodes", displayName = "In-flight requested transactional segments", dataType = DataType.MEASUREMENT)
@Override
public long getInflightTransactionalSegmentCount() {
return stateConsumer.inflightTransactionSegmentCount();
}
@Override
public StateConsumer getStateConsumer() {
return stateConsumer;
}
@Override
public StateProvider getStateProvider() {
return stateProvider;
}
@Override
public String toString() {
return "StateTransferManagerImpl [" + cacheName + "@" + rpcManager.getAddress() + "]";
}
}
| 14,204
| 46.508361
| 234
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateTransferInterceptor.java
|
package org.infinispan.statetransfer;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.tx.TransactionBoundaryCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.EvictCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.InvalidateL1Command;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.InvocationFinallyFunction;
import org.infinispan.interceptors.impl.BaseStateTransferInterceptor;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.responses.UnsureResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* This interceptor has two tasks:
* <ol>
* <li>If the command's topology id is higher than the current topology id,
* wait for the node to receive transaction data for the new topology id.</li>
* <li>If the topology id changed during a command's execution, retry the command, but only on the
* originator (which replicates it to the new owners).</li>
* </ol>
*
* If the cache is configured with asynchronous replication, owners cannot signal to the originator that they
* saw a new topology, so instead each owner forwards the command to all the other owners in the new topology.
*
* @author anistor@redhat.com
*/
public class StateTransferInterceptor extends BaseStateTransferInterceptor {
private static final Log log = LogFactory.getLog(StateTransferInterceptor.class);
private final InvocationFinallyFunction<TransactionBoundaryCommand> handleTxReturn = this::handleTxReturn;
private final InvocationFinallyFunction<WriteCommand> handleTxWriteReturn = this::handleTxWriteReturn;
private final InvocationFinallyFunction<WriteCommand> handleNonTxWriteReturn = this::handleNonTxWriteReturn;
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command)
throws Throwable {
return handleTxCommand(ctx, command);
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command)
throws Throwable {
return handleTxCommand(ctx, command);
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command)
throws Throwable {
return handleTxCommand(ctx, command);
}
@Override
public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command)
throws Throwable {
if (log.isTraceEnabled()) log.tracef("handleTxCommand for command %s, origin %s", command, getOrigin(ctx));
updateTopologyId(command);
return invokeNextAndHandle(ctx, command, handleTxReturn);
}
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitIracPutKeyValueCommand(InvocationContext ctx, IracPutKeyValueCommand command) {
return handleNonTxWriteCommand(ctx, command);
}
@Override
public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitComputeCommand(InvocationContext ctx, ComputeCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitComputeIfAbsentCommand(InvocationContext ctx, ComputeIfAbsentCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitInvalidateCommand(InvocationContext ctx, InvalidateCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitInvalidateL1Command(InvocationContext ctx, InvalidateL1Command command)
throws Throwable {
// no need to forward this command
return invokeNext(ctx, command);
}
@Override
public Object visitEvictCommand(InvocationContext ctx, EvictCommand command)
throws Throwable {
// it's not necessary to propagate eviction to the new owners in case of state transfer
return invokeNext(ctx, command);
}
@Override
public Object visitReadWriteKeyValueCommand(InvocationContext ctx,
ReadWriteKeyValueCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteKeyCommand(InvocationContext ctx, ReadWriteKeyCommand command)
throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyCommand(InvocationContext ctx, WriteOnlyKeyCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyEntriesCommand(InvocationContext ctx, WriteOnlyManyEntriesCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyKeyValueCommand(InvocationContext ctx, WriteOnlyKeyValueCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitWriteOnlyManyCommand(InvocationContext ctx, WriteOnlyManyCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteManyCommand(InvocationContext ctx, ReadWriteManyCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
@Override
public Object visitReadWriteManyEntriesCommand(InvocationContext ctx, ReadWriteManyEntriesCommand command) throws Throwable {
return handleWriteCommand(ctx, command);
}
/**
* Special processing required for transaction commands.
*
*/
private Object handleTxCommand(TxInvocationContext ctx, TransactionBoundaryCommand command) {
if (log.isTraceEnabled()) log.tracef("handleTxCommand for command %s, origin %s", command, getOrigin(ctx));
updateTopologyId(command);
return invokeNextAndHandle(ctx, command, handleTxReturn);
}
private Address getOrigin(TxInvocationContext ctx) {
// For local commands we may not have a GlobalTransaction yet
return ctx.isOriginLocal() ? ctx.getOrigin() : ctx.getGlobalTransaction().getAddress();
}
private Object handleTxReturn(InvocationContext ctx, TransactionBoundaryCommand txCommand, Object rv, Throwable t)
throws Throwable {
int retryTopologyId = -1;
int currentTopology = currentTopologyId();
if (t instanceof OutdatedTopologyException || t instanceof AllOwnersLostException) {
// This can only happen on the originator
retryTopologyId = Math.max(currentTopology, txCommand.getTopologyId() + 1);
} else if (t != null) {
throw t;
}
if (ctx.isOriginLocal()) {
// On the originator, we only retry if we got an OutdatedTopologyException
// Which could be caused either by an owner leaving or by an owner having a newer topology
// No need to retry just because we have a new topology on the originator, all entries were
// wrapped anyway
if (retryTopologyId > 0) {
// Only the originator can retry the command
txCommand.setTopologyId(retryTopologyId);
if (txCommand instanceof PrepareCommand) {
((PrepareCommand) txCommand).setRetriedCommand(true);
}
CompletionStage<Void> transactionDataStage = stateTransferLock.transactionDataFuture(retryTopologyId);
return retryWhenDone(transactionDataStage, retryTopologyId, ctx, txCommand, handleTxReturn);
}
} else {
if (currentTopology > txCommand.getTopologyId()) {
// Signal the originator to retry
return UnsureResponse.INSTANCE;
}
}
return rv;
}
private Object handleWriteCommand(InvocationContext ctx, WriteCommand command) {
if (ctx.isInTxScope()) {
return handleTxWriteCommand(ctx, command);
} else {
return handleNonTxWriteCommand(ctx, command);
}
}
private Object handleTxWriteCommand(InvocationContext ctx, WriteCommand command) {
if (log.isTraceEnabled()) log.tracef("handleTxWriteCommand for command %s, origin %s", command, ctx.getOrigin());
updateTopologyId(command);
return invokeNextAndHandle(ctx, command, handleTxWriteReturn);
}
private Object handleTxWriteReturn(InvocationContext rCtx, WriteCommand writeCommand, Object rv, Throwable t)
throws Throwable {
int retryTopologyId = -1;
if (t instanceof OutdatedTopologyException || t instanceof AllOwnersLostException) {
// This can only happen on the originator
retryTopologyId = Math.max(currentTopologyId(), writeCommand.getTopologyId() + 1);
} else if (t != null) {
throw t;
}
if (rCtx.isOriginLocal()) {
// On the originator, we only retry if we got an OutdatedTopologyException
// Which could be caused either by an owner leaving or by an owner having a newer topology
// No need to retry just because we have a new topology on the originator, all entries were
// wrapped anyway
if (retryTopologyId > 0) {
// Only the originator can retry the command
writeCommand.setTopologyId(retryTopologyId);
CompletionStage<Void> transactionDataStage = stateTransferLock.transactionDataFuture(retryTopologyId);
return retryWhenDone(transactionDataStage, retryTopologyId, rCtx, writeCommand, handleTxWriteReturn);
}
} else {
if (currentTopologyId() > writeCommand.getTopologyId()) {
// Signal the originator to retry
return UnsureResponse.INSTANCE;
}
}
return rv;
}
/**
* For non-tx write commands, we retry the command locally if the topology changed.
* But we only retry on the originator, and only if the command doesn't have
* the {@code CACHE_MODE_LOCAL} flag.
*/
private Object handleNonTxWriteCommand(InvocationContext ctx, WriteCommand command) {
if (log.isTraceEnabled()) log.tracef("handleNonTxWriteCommand for command %s, topology id %d", command, command.getTopologyId());
updateTopologyId(command);
// Only catch OutdatedTopologyExceptions on the originator
if (!ctx.isOriginLocal()) {
return invokeNext(ctx, command);
}
return invokeNextAndHandle(ctx, command, handleNonTxWriteReturn);
}
private Object handleExceptionOnNonTxWriteReturn(InvocationContext rCtx, WriteCommand writeCommand, Throwable t) throws Throwable {
Throwable ce = t;
while (ce instanceof RemoteException) {
ce = ce.getCause();
}
if (!(ce instanceof OutdatedTopologyException) && !(ce instanceof SuspectException) && !(ce instanceof AllOwnersLostException))
throw t;
// We increment the topology id so that updateTopologyIdAndWaitForTransactionData waits for the
// next topology.
// Without this, we could retry the command too fast and we could get the
// OutdatedTopologyException again.
int currentTopologyId = currentTopologyId();
int newTopologyId = getNewTopologyId(ce, currentTopologyId, writeCommand);
if (log.isTraceEnabled())
log.tracef("Retrying command because of %s, current topology is %d (requested: %d): %s",
ce, currentTopologyId, newTopologyId, writeCommand);
writeCommand.setTopologyId(newTopologyId);
writeCommand.addFlags(FlagBitSets.COMMAND_RETRY);
// In non-tx context, waiting for transaction data is equal to waiting for topology
CompletionStage<Void> transactionDataStage = stateTransferLock.transactionDataFuture(newTopologyId);
return retryWhenDone(transactionDataStage, newTopologyId, rCtx, writeCommand, handleNonTxWriteReturn);
}
private Object handleNonTxWriteReturn(InvocationContext rCtx, WriteCommand rCommand, Object rv, Throwable t)
throws Throwable {
if (t == null)
return rv;
// Separate method to allow for inlining of this method since exception should rarely occur
return handleExceptionOnNonTxWriteReturn(rCtx, rCommand, t);
}
@Override
public Object handleDefault(InvocationContext ctx, VisitableCommand command)
throws Throwable {
if (command instanceof TopologyAffectedCommand) {
return handleTopologyAffectedCommand(ctx, command, ctx.getOrigin());
} else {
return invokeNext(ctx, command);
}
}
private Object handleTopologyAffectedCommand(InvocationContext ctx,
VisitableCommand command, Address origin) {
if (log.isTraceEnabled()) log.tracef("handleTopologyAffectedCommand for command %s, origin %s", command, origin);
updateTopologyId((TopologyAffectedCommand) command);
return invokeNext(ctx, command);
}
@Override
protected Log getLog() {
return log;
}
}
| 15,303
| 40.250674
| 135
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/OutdatedTopologyException.java
|
package org.infinispan.statetransfer;
import org.infinispan.commons.CacheException;
/**
* An exception signalling that a command should be retried because a newer topology was seen during execution.
*
* <p>Most of the time, read commands can be retried in the same topology, so they use a delta of 0,
* see {@link #RETRY_SAME_TOPOLOGY}.
* Write commands cannot be retried in the same topology, so they always use a delta of 1 (or more).</p>
*
* <p>This exception can be thrown very often when node is joining or leaving, so it has not stack trace information,
* and using the constants is preferred.</p>
*
* @author Dan Berindei
* @since 6.0
*/
public class OutdatedTopologyException extends CacheException {
private static final long serialVersionUID = -7405935610562980779L;
public final int topologyIdDelta;
/**
* A cached instance that requests the command's topology id + 1.
*/
public static final OutdatedTopologyException RETRY_NEXT_TOPOLOGY =
new OutdatedTopologyException("Retry in the next topology", 1);
/**
* A cached instance, used for read commands that need to be retried in the same topology.
*
* <p>This happens because we read from backup owners when the primary owners no longer have the entry,
* so we only retry when all of the owners reply with an UnsureResponse.
* Topologies T and T+1 always have at least one read owner in common, so receiving UnsureResponse from all the
* owners means either one owner had topology T+2 and by now we have at least T+1, or one owner had topology T-1
* and another had T+1, and by now all should have at least T.</p>
*/
public static final OutdatedTopologyException RETRY_SAME_TOPOLOGY =
new OutdatedTopologyException("Retry command in the same topology", 0);
private OutdatedTopologyException(String message, int topologyIdDelta) {
super(message, null, false, false);
this.topologyIdDelta = topologyIdDelta;
}
/**
* Request the next topology (delta = 1) and use a custom message.
*
* @deprecated Since 10.0, please use the constants
*/
@Deprecated
public OutdatedTopologyException(String msg) {
super(msg, null, false, false);
this.topologyIdDelta = 1;
}
/**
* Request retrying the command in explicitly set topology (or later one).
*
* @deprecated Since 10.0, the explicit topology is ignored and the delta is set to 1
*/
@Deprecated
public OutdatedTopologyException(int topologyIdDelta) {
this(null, 1);
}
}
| 2,551
| 37.089552
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateTransferLockImpl.java
|
package org.infinispan.statetransfer;
import static org.infinispan.factories.KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR;
import static org.infinispan.util.logging.Log.CLUSTER;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.StampedLock;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.configuration.cache.ClusteringConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.ConditionFuture;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* {@code StateTransferLock} implementation.
*
* @author anistor@redhat.com
* @author Dan Berindei
* @since 5.2
*/
@Scope(Scopes.NAMED_CACHE)
public class StateTransferLockImpl implements StateTransferLock {
private static final Log log = LogFactory.getLog(StateTransferLockImpl.class);
private static final int TOPOLOGY_ID_STOPPED = Integer.MAX_VALUE;
private final StampedLock ownershipLock = new StampedLock();
private final Lock writeLock = ownershipLock.asWriteLock();
private final Lock readLock = ownershipLock.asReadLock();
private volatile int topologyId = -1;
private ConditionFuture<StateTransferLockImpl> topologyFuture;
private volatile int transactionDataTopologyId = -1;
private ConditionFuture<StateTransferLockImpl> transactionDataFuture;
private long stateTransferTimeout;
private long remoteTimeout;
@Inject
void inject(@ComponentName(TIMEOUT_SCHEDULE_EXECUTOR) ScheduledExecutorService timeoutExecutor,
Configuration configuration) {
topologyFuture = new ConditionFuture<>(timeoutExecutor);
transactionDataFuture = new ConditionFuture<>(timeoutExecutor);
stateTransferTimeout = configuration.clustering().stateTransfer().timeout();
remoteTimeout = configuration.clustering().remoteTimeout();
configuration.clustering()
.attributes().attribute(ClusteringConfiguration.REMOTE_TIMEOUT)
.addListener((a, ignored) -> {
remoteTimeout = a.get();
});
}
@Stop
void stop() {
notifyTopologyInstalled(TOPOLOGY_ID_STOPPED);
notifyTransactionDataReceived(TOPOLOGY_ID_STOPPED);
}
@SuppressWarnings("LockAcquiredButNotSafelyReleased")
@Override
public void acquireExclusiveTopologyLock() {
if (log.isTraceEnabled()) log.tracef("Acquire exclusive state transfer lock, readers = %d", ownershipLock.getReadLockCount());
writeLock.lock();
}
@Override
public void releaseExclusiveTopologyLock() {
if (log.isTraceEnabled()) log.tracef("Release exclusive state transfer lock");
writeLock.unlock();
}
@SuppressWarnings("LockAcquiredButNotSafelyReleased")
@Override
public void acquireSharedTopologyLock() {
readLock.lock();
}
@Override
public void releaseSharedTopologyLock() {
readLock.unlock();
}
@Override
public void notifyTransactionDataReceived(int topologyId) {
if (topologyId < transactionDataTopologyId) {
log.debugf("Trying to set a topology id (%d) that is lower than the current one (%d)", topologyId,
this.topologyId);
return;
}
if (log.isTraceEnabled()) {
log.tracef("Signalling transaction data received for topology %d", topologyId);
}
transactionDataTopologyId = topologyId;
transactionDataFuture.update(this);
}
@Override
public CompletionStage<Void> transactionDataFuture(int expectedTopologyId) {
if (topologyId == TOPOLOGY_ID_STOPPED)
return CompletableFuture.failedFuture(new IllegalLifecycleStateException());
if (transactionDataTopologyId >= expectedTopologyId)
return CompletableFutures.completedNull();
if (log.isTraceEnabled()) {
log.tracef("Waiting for transaction data for topology %d, current topology is %d", expectedTopologyId,
transactionDataTopologyId);
}
return transactionDataFuture.newConditionStage(stli -> stli.transactionDataTopologyId >= expectedTopologyId,
() -> transactionDataTimeoutException(expectedTopologyId),
remoteTimeout, TimeUnit.MILLISECONDS);
}
private TimeoutException transactionDataTimeoutException(int expectedTopologyId) {
int currentTopologyId = this.topologyId;
if (expectedTopologyId > currentTopologyId) {
return CLUSTER.transactionDataTimeout(expectedTopologyId);
} else {
return CLUSTER.topologyTimeout(expectedTopologyId, currentTopologyId);
}
}
@Override
public boolean transactionDataReceived(int expectedTopologyId) {
if (log.isTraceEnabled()) log.tracef("Checking if transaction data was received for topology %s, current topology is %s",
expectedTopologyId, transactionDataTopologyId);
return transactionDataTopologyId >= expectedTopologyId;
}
@Override
public void notifyTopologyInstalled(int topologyId) {
if (topologyId < this.topologyId) {
log.debugf("Trying to set a topology id (%d) that is lower than the current one (%d)", topologyId,
this.topologyId);
return;
}
if (log.isTraceEnabled()) {
log.tracef("Signalling topology %d is installed", topologyId);
}
this.topologyId = topologyId;
topologyFuture.update(this);
}
@Override
public CompletionStage<Void> topologyFuture(int expectedTopologyId) {
if (topologyId == TOPOLOGY_ID_STOPPED)
return CompletableFuture.failedFuture(new IllegalLifecycleStateException());
if (topologyId >= expectedTopologyId)
return CompletableFutures.completedNull();
if (log.isTraceEnabled()) {
log.tracef("Waiting for topology %d to be installed, current topology is %d", expectedTopologyId, topologyId);
}
return topologyFuture.newConditionStage(stli -> stli.topologyId >= expectedTopologyId,
() -> CLUSTER.topologyTimeout(expectedTopologyId, topologyId),
stateTransferTimeout, TimeUnit.MILLISECONDS);
}
@Override
public boolean topologyReceived(int expectedTopologyId) {
return topologyId >= expectedTopologyId;
}
}
| 7,009
| 37.306011
| 132
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateProviderImpl.java
|
package org.infinispan.statetransfer;
import static org.infinispan.context.Flag.STATE_TRANSFER_PROGRESS;
import static org.infinispan.util.logging.Log.CLUSTER;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ScheduledExecutorService;
import java.util.function.Consumer;
import java.util.function.Function;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.notifications.cachelistener.cluster.ClusterCacheNotifier;
import org.infinispan.notifications.cachelistener.cluster.ClusterListenerReplicateCallable;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.reactive.publisher.impl.DeliveryGuarantee;
import org.infinispan.reactive.publisher.impl.LocalPublisherManager;
import org.infinispan.reactive.publisher.impl.SegmentAwarePublisherSupplier;
import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.topology.CacheTopology;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.TransactionOriginatorChecker;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.xa.CacheTransaction;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.Flowable;
/**
* {@link StateProvider} implementation.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Scope(Scopes.NAMED_CACHE)
public class StateProviderImpl implements StateProvider {
private static final Log log = LogFactory.getLog(StateProviderImpl.class);
@ComponentName(KnownComponentNames.CACHE_NAME)
@Inject protected String cacheName;
@Inject Configuration configuration;
@Inject protected RpcManager rpcManager;
@Inject protected CommandsFactory commandsFactory;
@Inject ClusterCacheNotifier clusterCacheNotifier;
@Inject TransactionTable transactionTable; // optional
@Inject protected InternalDataContainer<Object, Object> dataContainer;
@Inject protected PersistenceManager persistenceManager; // optional
@Inject protected StateTransferLock stateTransferLock;
@Inject protected InternalEntryFactory entryFactory;
@Inject protected KeyPartitioner keyPartitioner;
@Inject protected DistributionManager distributionManager;
@Inject protected TransactionOriginatorChecker transactionOriginatorChecker;
@Inject protected LocalPublisherManager<?, ?> localPublisherManager;
@ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR)
@Inject ScheduledExecutorService timeoutExecutor;
protected long timeout;
protected int chunkSize;
/**
* A map that keeps track of current outbound state transfers by destination address. There could be multiple transfers
* flowing to the same destination (but for different segments) so the values are lists.
*/
private final Map<Address, List<OutboundTransferTask>> transfersByDestination = new HashMap<>();
/**
* Flags used when requesting the local publisher for the entries.
*/
private static final long STATE_TRANSFER_ENTRIES_FLAGS = EnumUtil.bitSetOf(
// Indicate the command to not use shared stores.
STATE_TRANSFER_PROGRESS
);
public StateProviderImpl() {
}
public boolean isStateTransferInProgress() {
synchronized (transfersByDestination) {
return !transfersByDestination.isEmpty();
}
}
public CompletableFuture<Void> onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance) {
// Cancel outbound state transfers for destinations that are no longer members in new topology
// If the rebalance was cancelled, stop every outbound transfer. This will prevent "leaking" transfers
// from one rebalance to the next.
Set<Address> members = new HashSet<>(cacheTopology.getWriteConsistentHash().getMembers());
synchronized (transfersByDestination) {
for (Iterator<Map.Entry<Address, List<OutboundTransferTask>>> it = transfersByDestination.entrySet().iterator(); it.hasNext(); ) {
Map.Entry<Address, List<OutboundTransferTask>> destination = it.next();
Address address = destination.getKey();
if (!members.contains(address)) {
List<OutboundTransferTask> transfers = destination.getValue();
it.remove();
for (OutboundTransferTask outboundTransfer : transfers) {
outboundTransfer.cancel();
}
}
}
}
return CompletableFutures.completedNull();
//todo [anistor] must cancel transfers for all segments that we no longer own
}
// Must start before StateTransferManager sends the join request
@Start(priority = 50)
@Override
public void start() {
timeout = configuration.clustering().stateTransfer().timeout();
chunkSize = configuration.clustering().stateTransfer().chunkSize();
}
@Stop(priority = 0)
@Override
public void stop() {
if (log.isTraceEnabled()) {
log.tracef("Shutting down StateProvider of cache %s on node %s", cacheName, rpcManager.getAddress());
}
// cancel all outbound transfers
try {
synchronized (transfersByDestination) {
for (Iterator<List<OutboundTransferTask>> it = transfersByDestination.values().iterator(); it.hasNext(); ) {
List<OutboundTransferTask> transfers = it.next();
it.remove();
for (OutboundTransferTask outboundTransfer : transfers) {
outboundTransfer.cancel();
}
}
}
} catch (Throwable t) {
log.errorf(t, "Failed to stop StateProvider of cache %s on node %s", cacheName, rpcManager.getAddress());
}
}
public CompletionStage<List<TransactionInfo>> getTransactionsForSegments(Address destination, int requestTopologyId,
IntSet segments) {
if (log.isTraceEnabled()) {
log.tracef("Received request for transactions from node %s for cache %s, topology id %d, segments %s",
destination, cacheName, requestTopologyId, segments);
}
return getCacheTopology(requestTopologyId, destination, true)
.thenApply(topology -> {
final ConsistentHash readCh = topology.getReadConsistentHash();
IntSet ownedSegments = IntSets.from(readCh.getSegmentsForOwner(rpcManager.getAddress()));
if (!ownedSegments.containsAll(segments)) {
segments.removeAll(ownedSegments);
throw new IllegalArgumentException(
"Segments " + segments + " are not owned by " + rpcManager.getAddress());
}
List<TransactionInfo> transactions = new ArrayList<>();
//we migrate locks only if the cache is transactional and distributed
if (configuration.transaction().transactionMode().isTransactional()) {
collectTransactionsToTransfer(destination, transactions, transactionTable.getRemoteTransactions(),
segments,
topology);
collectTransactionsToTransfer(destination, transactions, transactionTable.getLocalTransactions(),
segments,
topology);
if (log.isTraceEnabled()) {
log.tracef("Found %d transaction(s) to transfer", transactions.size());
}
}
return transactions;
});
}
@Override
public Collection<ClusterListenerReplicateCallable<Object, Object>> getClusterListenersToInstall() {
return clusterCacheNotifier.retrieveClusterListenerCallablesToInstall();
}
private CompletionStage<CacheTopology> getCacheTopology(int requestTopologyId, Address destination,
boolean isReqForTransactions) {
CacheTopology cacheTopology = distributionManager.getCacheTopology();
int currentTopologyId = cacheTopology.getTopologyId();
if (requestTopologyId < currentTopologyId) {
if (isReqForTransactions)
log.debugf("Transactions were requested by node %s with topology %d, older than the local topology (%d)",
destination, requestTopologyId, currentTopologyId);
else
log.debugf("Segments were requested by node %s with topology %d, older than the local topology (%d)",
destination, requestTopologyId, currentTopologyId);
} else if (requestTopologyId > currentTopologyId) {
if (log.isTraceEnabled()) {
log.tracef("%s were requested by node %s with topology %d, greater than the local " +
"topology (%d). Waiting for topology %d to be installed locally.",
isReqForTransactions ? "Transactions" : "Segments", destination,
requestTopologyId, currentTopologyId, requestTopologyId);
}
return stateTransferLock.topologyFuture(requestTopologyId)
.exceptionally(throwable -> {
throw CLUSTER.failedWaitingForTopology(requestTopologyId);
})
.thenApply(ignored -> distributionManager.getCacheTopology());
}
return CompletableFuture.completedFuture(cacheTopology);
}
private void collectTransactionsToTransfer(Address destination,
List<TransactionInfo> transactionsToTransfer,
Collection<? extends CacheTransaction> transactions,
IntSet segments, CacheTopology cacheTopology) {
int topologyId = cacheTopology.getTopologyId();
Set<Address> members = new HashSet<>(cacheTopology.getMembers());
// no need to filter out state transfer generated transactions because there should not be any such transactions running for any of the requested segments
for (CacheTransaction tx : transactions) {
final GlobalTransaction gtx = tx.getGlobalTransaction();
// Skip transactions whose originators left. The topology id check is needed for joiners.
// Also skip transactions that originates after state transfer starts.
if (tx.getTopologyId() == topologyId ||
(transactionOriginatorChecker.isOriginatorMissing(gtx, members))) {
if (log.isTraceEnabled()) log.tracef("Skipping transaction %s as it was started in the current topology or by a leaver", tx);
continue;
}
// transfer only locked keys that belong to requested segments
Set<Object> filteredLockedKeys = new HashSet<>();
//avoids the warning about synchronizing in a local variable.
//and allows us to change the CacheTransaction internals without having to worry about it
Consumer<Object> lockFilter = key -> {
if (segments.contains(keyPartitioner.getSegment(key))) {
filteredLockedKeys.add(key);
}
};
tx.forEachLock(lockFilter);
tx.forEachBackupLock(lockFilter);
if (filteredLockedKeys.isEmpty()) {
if (log.isTraceEnabled()) log.tracef("Skipping transaction %s because the state requestor %s doesn't own any key",
tx, destination);
continue;
}
if (log.isTraceEnabled()) log.tracef("Sending transaction %s to new owner %s", tx, destination);
// If a key affected by a local transaction has a new owner, we must add the new owner to the transaction's
// affected nodes set, so that the it receives the commit/rollback command. See ISPN-3389.
if(tx instanceof LocalTransaction) {
LocalTransaction localTx = (LocalTransaction) tx;
localTx.locksAcquired(Collections.singleton(destination));
if (log.isTraceEnabled()) log.tracef("Adding affected node %s to transferred transaction %s (keys %s)", destination,
gtx, filteredLockedKeys);
}
transactionsToTransfer.add(new TransactionInfo(gtx, tx.getTopologyId(), tx.getModifications(),
filteredLockedKeys));
}
}
@Override
public void startOutboundTransfer(Address destination, int requestTopologyId, IntSet segments, boolean applyState) {
if (log.isTraceEnabled()) {
log.tracef("Starting outbound transfer to node %s for cache %s, topology id %d, segments %s", destination,
cacheName, requestTopologyId, segments);
}
// the destination node must already have an InboundTransferTask waiting for these segments
OutboundTransferTask outboundTransfer =
new OutboundTransferTask(destination, segments, this.configuration.clustering().hash().numSegments(),
chunkSize, requestTopologyId, chunks -> {}, rpcManager,
commandsFactory, timeout, cacheName, applyState);
addTransfer(outboundTransfer);
outboundTransfer.execute(readEntries(segments))
.whenComplete((ignored, throwable) -> {
if (throwable != null) {
logError(outboundTransfer, throwable);
}
onTaskCompletion(outboundTransfer);
});
}
protected Flowable<SegmentPublisherSupplier.Notification<InternalCacheEntry<?, ?>>> readEntries(IntSet segments) {
SegmentAwarePublisherSupplier<?> publisher =
localPublisherManager.entryPublisher(segments, null, null,
STATE_TRANSFER_ENTRIES_FLAGS, DeliveryGuarantee.AT_MOST_ONCE, Function.identity());
return Flowable.fromPublisher(publisher.publisherWithSegments())
.map(notification -> (SegmentPublisherSupplier.Notification<InternalCacheEntry<?, ?>>) notification);
}
protected void addTransfer(OutboundTransferTask transferTask) {
if (log.isTraceEnabled()) {
log.tracef("Adding outbound transfer to %s for segments %s", transferTask.getDestination(),
transferTask.getSegments());
}
synchronized (transfersByDestination) {
List<OutboundTransferTask> transfers = transfersByDestination
.computeIfAbsent(transferTask.getDestination(), k -> new ArrayList<>());
transfers.add(transferTask);
}
}
@Override
public void cancelOutboundTransfer(Address destination, int topologyId, IntSet segments) {
if (log.isTraceEnabled()) {
log.tracef("Cancelling outbound transfer to node %s for cache %s, topology id %d, segments %s", destination,
cacheName, topologyId, segments);
}
// get the outbound transfers for this address and given segments and cancel the transfers
synchronized (transfersByDestination) {
List<OutboundTransferTask> transferTasks = transfersByDestination.get(destination);
if (transferTasks != null) {
// get an array copy of the collection to avoid ConcurrentModificationException if the entire task gets cancelled and removeTransfer(transferTask) is called
OutboundTransferTask[] taskListCopy = transferTasks.toArray(new OutboundTransferTask[0]);
for (OutboundTransferTask transferTask : taskListCopy) {
if (transferTask.getTopologyId() == topologyId) {
transferTask.cancelSegments(segments); //this can potentially result in a call to removeTransfer(transferTask)
}
}
}
}
}
private void removeTransfer(OutboundTransferTask transferTask) {
synchronized (transfersByDestination) {
List<OutboundTransferTask> transferTasks = transfersByDestination.get(transferTask.getDestination());
if (transferTasks != null) {
transferTasks.remove(transferTask);
if (transferTasks.isEmpty()) {
transfersByDestination.remove(transferTask.getDestination());
}
}
}
}
protected void onTaskCompletion(OutboundTransferTask transferTask) {
if (log.isTraceEnabled()) {
log.tracef("Removing %s outbound transfer of segments to %s for cache %s, segments %s",
transferTask.isCancelled() ? "cancelled" : "completed", transferTask.getDestination(),
cacheName, transferTask.getSegments());
}
removeTransfer(transferTask);
}
protected void logError(OutboundTransferTask task, Throwable t) {
if (task.isCancelled()) {
// ignore eventual exceptions caused by cancellation or by the node stopping
if (log.isTraceEnabled()) {
log.tracef("Ignoring error in already cancelled transfer to node %s, segments %s",
task.getDestination(), task.getSegments());
}
} else {
log.failedOutBoundTransferExecution(t);
}
}
private InternalCacheEntry<Object, Object> defaultMapEntryFromStore(MarshallableEntry<Object, Object> me) {
InternalCacheEntry<Object, Object> entry = entryFactory.create(me.getKey(), me.getValue(), me.getMetadata());
entry.setInternalMetadata(me.getInternalMetadata());
return entry;
}
}
| 19,151
| 48.488372
| 168
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateConsumerImpl.java
|
package org.infinispan.statetransfer;
import static org.infinispan.context.Flag.CACHE_MODE_LOCAL;
import static org.infinispan.context.Flag.IGNORE_RETURN_VALUES;
import static org.infinispan.context.Flag.IRAC_STATE;
import static org.infinispan.context.Flag.PUT_FOR_STATE_TRANSFER;
import static org.infinispan.context.Flag.SKIP_LOCKING;
import static org.infinispan.context.Flag.SKIP_OWNERSHIP_CHECK;
import static org.infinispan.context.Flag.SKIP_REMOTE_LOOKUP;
import static org.infinispan.context.Flag.SKIP_SHARED_CACHE_STORE;
import static org.infinispan.context.Flag.SKIP_XSITE_BACKUP;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import static org.infinispan.persistence.manager.PersistenceManager.AccessMode.PRIVATE;
import static org.infinispan.util.concurrent.CompletionStages.handleAndCompose;
import static org.infinispan.util.concurrent.CompletionStages.ignoreValue;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Predicate;
import org.infinispan.Cache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.conflict.impl.InternalConflictManager;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.LocalTxInvocationContext;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.TriangleOrderManager;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.executors.LimitedExecutor;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.metadata.impl.InternalMetadataImpl;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.annotation.DataRehashed;
import org.infinispan.notifications.cachelistener.cluster.ClusterListenerReplicateCallable;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.reactive.publisher.impl.LocalPublisherManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.PassthroughSingleResponseCollector;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.impl.FakeJTATransaction;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.RemoteTransaction;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.xa.CacheTransaction;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.CommandAckCollector;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.statetransfer.XSiteStateTransferManager;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import net.jcip.annotations.GuardedBy;
/**
* {@link StateConsumer} implementation.
*
* @author anistor@redhat.com
* @since 5.2
*/
@Scope(Scopes.NAMED_CACHE)
public class StateConsumerImpl implements StateConsumer {
private static final Log log = LogFactory.getLog(StateConsumerImpl.class);
protected static final int NO_STATE_TRANSFER_IN_PROGRESS = -1;
protected static final long STATE_TRANSFER_FLAGS = EnumUtil.bitSetOf(PUT_FOR_STATE_TRANSFER, CACHE_MODE_LOCAL,
IGNORE_RETURN_VALUES, SKIP_REMOTE_LOOKUP,
SKIP_SHARED_CACHE_STORE, SKIP_OWNERSHIP_CHECK,
SKIP_XSITE_BACKUP, SKIP_LOCKING, IRAC_STATE);
protected static final long INVALIDATE_FLAGS = STATE_TRANSFER_FLAGS & ~FlagBitSets.PUT_FOR_STATE_TRANSFER;
public static final String NO_KEY = "N/A";
@Inject protected ComponentRef<Cache<Object, Object>> cache;
@Inject protected LocalTopologyManager localTopologyManager;
@Inject protected Configuration configuration;
@Inject protected RpcManager rpcManager;
@Inject protected TransactionManager transactionManager; // optional
@Inject protected CommandsFactory commandsFactory;
@Inject protected TransactionTable transactionTable; // optional
@Inject protected InternalDataContainer<Object, Object> dataContainer;
@Inject protected PersistenceManager persistenceManager;
@Inject protected AsyncInterceptorChain interceptorChain;
@Inject protected InvocationContextFactory icf;
@Inject protected StateTransferLock stateTransferLock;
@Inject protected CacheNotifier<?, ?> cacheNotifier;
@Inject protected CommitManager commitManager;
@Inject @ComponentName(NON_BLOCKING_EXECUTOR)
protected Executor nonBlockingExecutor;
@Inject protected CommandAckCollector commandAckCollector;
@Inject protected TriangleOrderManager triangleOrderManager;
@Inject protected DistributionManager distributionManager;
@Inject protected KeyPartitioner keyPartitioner;
@Inject protected InternalConflictManager<?, ?> conflictManager;
@Inject protected LocalPublisherManager<Object, Object> localPublisherManager;
@Inject PerCacheInboundInvocationHandler inboundInvocationHandler;
@Inject XSiteStateTransferManager xSiteStateTransferManager;
protected String cacheName;
protected long timeout;
protected boolean isFetchEnabled;
protected boolean isTransactional;
protected boolean isInvalidationMode;
protected volatile KeyInvalidationListener keyInvalidationListener; //for test purpose only!
protected volatile CacheTopology cacheTopology;
/**
* Indicates if there is a state transfer in progress. It is set to the new topology id when onTopologyUpdate with
* isRebalance==true is called.
* It is changed back to NO_REBALANCE_IN_PROGRESS when a topology update with a null pending CH is received.
*/
protected final AtomicInteger stateTransferTopologyId = new AtomicInteger(NO_STATE_TRANSFER_IN_PROGRESS);
/**
* Indicates if there is a rebalance in progress and there the local node has not yet received
* all the new segments yet. It is set to true when rebalance starts and becomes when all inbound transfers have completed
* (before stateTransferTopologyId is set back to NO_REBALANCE_IN_PROGRESS).
*/
protected final AtomicBoolean waitingForState = new AtomicBoolean(false);
protected CompletableFuture<Void> stateTransferFuture = CompletableFutures.completedNull();
protected final Object transferMapsLock = new Object();
/**
* A map that keeps track of current inbound state transfers by source address. There could be multiple transfers
* flowing in from the same source (but for different segments) so the values are lists. This works in tandem with
* transfersBySegment so they always need to be kept in sync and updates to both of them need to be atomic.
*/
@GuardedBy("transferMapsLock")
private final Map<Address, List<InboundTransferTask>> transfersBySource = new HashMap<>();
/**
* A map that keeps track of current inbound state transfers by segment id. There is at most one transfers per segment.
* This works in tandem with transfersBySource so they always need to be kept in sync and updates to both of them
* need to be atomic.
*/
@GuardedBy("transferMapsLock")
protected final Map<Integer, List<InboundTransferTask>> transfersBySegment = new HashMap<>();
/**
* A set identifying the transactional segments requested by the cache. This is a set so a segment is counted only
* once.
*/
private IntSet requestedTransactionalSegments;
/**
* Limit to one state request at a time.
*/
protected LimitedExecutor stateRequestExecutor;
private volatile boolean ownsData = false;
// Use the state transfer timeout for RPCs instead of the regular remote timeout
protected RpcOptions rpcOptions;
private volatile boolean running;
private int numSegments;
public StateConsumerImpl() {
}
/**
* Stops applying incoming state. Also stops tracking updated keys. Should be called at the end of state transfer or
* when a ClearCommand is committed during state transfer.
*/
@Override
public void stopApplyingState(int topologyId) {
if (log.isTraceEnabled()) log.tracef("Stop keeping track of changed keys for state transfer in topology %d", topologyId);
commitManager.stopTrack(PUT_FOR_STATE_TRANSFER);
}
public boolean hasActiveTransfers() {
synchronized (transferMapsLock) {
return !transfersBySource.isEmpty();
}
}
@Override
public boolean isStateTransferInProgress() {
return stateTransferTopologyId.get() != NO_STATE_TRANSFER_IN_PROGRESS;
}
@Override
public boolean isStateTransferInProgressForKey(Object key) {
if (isInvalidationMode) {
// In invalidation mode it is of not much relevance if the key is actually being transferred right now.
// A false response to this will just mean the usual remote lookup before a write operation is not
// performed and a null is assumed. But in invalidation mode the user must expect the data can disappear
// from cache at any time so this null previous value should not cause any trouble.
return false;
}
DistributionInfo distributionInfo = distributionManager.getCacheTopology().getDistribution(key);
return distributionInfo.isWriteOwner() && !distributionInfo.isReadOwner();
}
@Override
public long inflightRequestCount() {
synchronized(transferMapsLock) {
return transfersBySegment.size();
}
}
@Override
public long inflightTransactionSegmentCount() {
return requestedTransactionalSegments.size();
}
@Override
public boolean ownsData() {
return ownsData;
}
@Override
public CompletionStage<CompletionStage<Void>> onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance) {
final ConsistentHash newWriteCh = cacheTopology.getWriteConsistentHash();
final CacheTopology previousCacheTopology = this.cacheTopology;
final ConsistentHash previousWriteCh =
previousCacheTopology != null ? previousCacheTopology.getWriteConsistentHash() : null;
IntSet newWriteSegments = getOwnedSegments(newWriteCh);
Address address = rpcManager.getAddress();
final boolean isMember = cacheTopology.getMembers().contains(address);
final boolean wasMember = previousWriteCh != null &&
previousWriteCh.getMembers().contains(address);
if (log.isTraceEnabled())
log.tracef("Received new topology for cache %s, isRebalance = %b, isMember = %b, topology = %s", cacheName,
isRebalance, isMember, cacheTopology);
if (!ownsData && isMember) {
ownsData = true;
} else if (ownsData && !isMember) {
// This can happen after a merge, if the local node was in a minority partition.
ownsData = false;
}
// If a member leaves/crashes immediately after a rebalance was started, the new CH_UPDATE
// command may be executed before the REBALANCE_START command, so it has to start the rebalance.
boolean addedPendingCH = cacheTopology.getPendingCH() != null && wasMember &&
previousCacheTopology.getPendingCH() == null;
boolean startConflictResolution =
!isRebalance && cacheTopology.getPhase() == CacheTopology.Phase.CONFLICT_RESOLUTION;
boolean startStateTransfer = isRebalance || (addedPendingCH && !startConflictResolution);
if (startStateTransfer && !isRebalance) {
if (log.isTraceEnabled()) log.tracef("Forcing startRebalance = true");
}
CompletionStage<Void> stage = CompletableFutures.completedNull();
if (startStateTransfer) {
// Only update the rebalance topology id when starting the rebalance, as we're going to ignore any state
// response with a smaller topology id
stateTransferTopologyId.compareAndSet(NO_STATE_TRANSFER_IN_PROGRESS, cacheTopology.getTopologyId());
conflictManager.cancelVersionRequests();
if (cacheNotifier.hasListener(DataRehashed.class)) {
stage = cacheNotifier.notifyDataRehashed(cacheTopology.getCurrentCH(), cacheTopology.getPendingCH(),
cacheTopology.getUnionCH(), cacheTopology.getTopologyId(), true);
}
}
stage = stage.thenCompose(ignored -> {
if (startConflictResolution) {
// This stops state being applied from a prior rebalance and also prevents tracking from being stopped
stateTransferTopologyId.set(NO_STATE_TRANSFER_IN_PROGRESS);
}
// Make sure we don't send a REBALANCE_CONFIRM command before we've added all the transfer tasks
// even if some of the tasks are removed and re-added
waitingForState.set(false);
stateTransferFuture = new CompletableFuture<>();
beforeTopologyInstalled(cacheTopology.getTopologyId(), previousWriteCh, newWriteCh);
if (!configuration.clustering().cacheMode().isInvalidation()) {
// Owned segments
dataContainer.addSegments(newWriteSegments);
// TODO Should we throw an exception if addSegments() returns false?
return ignoreValue(persistenceManager.addSegments(newWriteSegments));
}
return CompletableFutures.completedNull();
});
stage = stage.thenCompose(ignored -> {
// We need to track changes so that user puts during conflict resolution are prioritised over
// state transfer or conflict resolution updates
// Tracking is stopped once the state transfer completes (i.e. all the entries have been inserted)
if (startStateTransfer || startConflictResolution) {
if (commitManager.isTracking(PUT_FOR_STATE_TRANSFER)) {
log.debug("Starting state transfer but key tracking is already enabled");
} else {
if (log.isTraceEnabled()) log.tracef("Start keeping track of keys for state transfer");
commitManager.startTrack(PUT_FOR_STATE_TRANSFER);
}
}
// Ensures writes to the data container use the right consistent hash
// Writers block on the state transfer shared lock, so we keep the exclusive lock as short as possible
stateTransferLock.acquireExclusiveTopologyLock();
try {
this.cacheTopology = cacheTopology;
distributionManager.setCacheTopology(cacheTopology);
} finally {
stateTransferLock.releaseExclusiveTopologyLock();
}
stateTransferLock.notifyTopologyInstalled(cacheTopology.getTopologyId());
inboundInvocationHandler.checkForReadyTasks();
xSiteStateTransferManager.onTopologyUpdated(cacheTopology, isStateTransferInProgress());
if (!wasMember && isMember) {
return fetchClusterListeners(cacheTopology);
}
return CompletableFutures.completedNull();
});
stage = stage.thenCompose(ignored -> {
// fetch transactions and data segments from other owners if this is enabled
if (startConflictResolution || (!isTransactional && !isFetchEnabled)) {
return CompletableFutures.completedNull();
}
IntSet addedSegments, removedSegments;
if (previousWriteCh == null) {
// If we have any segments assigned in the initial CH, it means we are the first member.
// If we are not the first member, we can only add segments via rebalance.
removedSegments = IntSets.immutableEmptySet();
addedSegments = IntSets.immutableEmptySet();
if (log.isTraceEnabled()) {
log.tracef("On cache %s we have: added segments: %s", cacheName, addedSegments);
}
} else {
IntSet previousSegments = getOwnedSegments(previousWriteCh);
if (newWriteSegments.size() == numSegments) {
// Optimization for replicated caches
removedSegments = IntSets.immutableEmptySet();
} else {
removedSegments = IntSets.mutableCopyFrom(previousSegments);
removedSegments.removeAll(newWriteSegments);
}
// This is a rebalance, we need to request the segments we own in the new CH.
addedSegments = IntSets.mutableCopyFrom(newWriteSegments);
addedSegments.removeAll(previousSegments);
if (log.isTraceEnabled()) {
log.tracef("On cache %s we have: new segments: %s; old segments: %s", cacheName, newWriteSegments,
previousSegments);
log.tracef("On cache %s we have: added segments: %s; removed segments: %s", cacheName,
addedSegments, removedSegments);
}
// remove inbound transfers for segments we no longer own
cancelTransfers(removedSegments);
if (!startStateTransfer && !addedSegments.isEmpty()) {
// If the last owner of a segment leaves the cluster, a new set of owners is assigned,
// but the new owners should not try to retrieve the segment from each other.
// If this happens during a rebalance, we might have already sent our rebalance
// confirmation, so the coordinator won't wait for us to retrieve those segments anyway.
log.debugf("Not requesting segments %s because the last owner left the cluster",
addedSegments);
addedSegments.clear();
}
// check if any of the existing transfers should be restarted from a different source because
// the initial source is no longer a member
restartBrokenTransfers(cacheTopology, addedSegments);
}
IntSet transactionOnlySegments = computeTransactionOnlySegments(cacheTopology, address);
return handleSegments(startStateTransfer, addedSegments, removedSegments, transactionOnlySegments);
});
stage = stage.thenCompose(ignored -> {
int stateTransferTopologyId = this.stateTransferTopologyId.get();
if (log.isTraceEnabled())
log.tracef("Topology update processed, stateTransferTopologyId = %d, startRebalance = %s, pending CH = %s",
(Object) stateTransferTopologyId, startStateTransfer, cacheTopology.getPendingCH());
if (stateTransferTopologyId != NO_STATE_TRANSFER_IN_PROGRESS && !startStateTransfer &&
!cacheTopology.getPhase().isRebalance()) {
// we have received a topology update without a pending CH, signalling the end of the rebalance
boolean changed = this.stateTransferTopologyId.compareAndSet(stateTransferTopologyId,
NO_STATE_TRANSFER_IN_PROGRESS);
// if the coordinator changed, we might get two concurrent topology updates,
// but we only want to notify the @DataRehashed listeners once
if (changed) {
stopApplyingState(stateTransferTopologyId);
if (cacheNotifier.hasListener(DataRehashed.class)) {
return cacheNotifier.notifyDataRehashed(previousCacheTopology.getCurrentCH(),
previousCacheTopology.getPendingCH(),
previousCacheTopology.getUnionCH(),
cacheTopology.getTopologyId(), false);
}
}
}
return CompletableFutures.completedNull();
});
return handleAndCompose(stage, (ignored, throwable) -> {
if (log.isTraceEnabled()) {
log.tracef("Unlock State Transfer in Progress for topology ID %s", cacheTopology.getTopologyId());
}
stateTransferLock.notifyTransactionDataReceived(cacheTopology.getTopologyId());
inboundInvocationHandler.checkForReadyTasks();
// Only set the flag here, after all the transfers have been added to the transfersBySource map
if (stateTransferTopologyId.get() != NO_STATE_TRANSFER_IN_PROGRESS && isMember) {
waitingForState.set(true);
}
notifyEndOfStateTransferIfNeeded();
// Remove the transactions whose originators have left the cache.
// Need to do it now, after we have applied any transactions from other nodes,
// and after notifyTransactionDataReceived - otherwise the RollbackCommands would block.
try {
if (transactionTable != null) {
transactionTable.cleanupLeaverTransactions(rpcManager.getTransport().getMembers());
}
} catch (Exception e) {
// Do not fail state transfer when the cleanup fails. See ISPN-7437 for details.
log.transactionCleanupError(e);
}
commandAckCollector.onMembersChange(newWriteCh.getMembers());
// The rebalance (READ_OLD_WRITE_ALL/TRANSITORY) is completed through notifyEndOfStateTransferIfNeeded
// and STABLE does not have to be confirmed at all
switch (cacheTopology.getPhase()) {
case READ_ALL_WRITE_ALL:
case READ_NEW_WRITE_ALL:
stateTransferFuture.complete(null);
}
// Any data for segments we do not own should be removed from data container and cache store
// We need to discard data from all segments we don't own, not just those we previously owned,
// when we lose membership (e.g. because there was a merge, the local partition was in degraded mode
// and the other partition was available) or when L1 is enabled.
if ((isMember || wasMember) && cacheTopology.getPhase() == CacheTopology.Phase.NO_REBALANCE) {
int numSegments = newWriteCh.getNumSegments();
IntSet removedSegments = IntSets.mutableEmptySet(numSegments);
IntSet newSegments = getOwnedSegments(newWriteCh);
for (int i = 0; i < numSegments; ++i) {
if (!newSegments.contains(i)) {
removedSegments.set(i);
}
}
return removeStaleData(removedSegments)
.thenApply(ignored1 -> {
conflictManager.restartVersionRequests();
// rethrow the original exception, if any
CompletableFutures.rethrowExceptionIfPresent(throwable);
return stateTransferFuture;
});
}
CompletableFutures.rethrowExceptionIfPresent(throwable);
return CompletableFuture.completedFuture(stateTransferFuture);
});
}
private IntSet computeTransactionOnlySegments(CacheTopology cacheTopology, Address address) {
if (configuration.transaction().transactionMode() != TransactionMode.TRANSACTIONAL ||
configuration.transaction().lockingMode() != LockingMode.PESSIMISTIC ||
cacheTopology.getPhase() != CacheTopology.Phase.READ_OLD_WRITE_ALL ||
!cacheTopology.getCurrentCH().getMembers().contains(address)) {
return IntSets.immutableEmptySet();
}
// In pessimistic caches, the originator does not send the lock command to backups
// if it is the primary owner for all the keys.
// The idea is that the locks can only be lost completely if the originator crashes (rolling back the tx)
// But this means when the owners of a segment change from AB -> BA or AB -> BC,
// B needs to request the transactions affecting that segment from A,
// even though B already has the entries of that segment
IntSet transactionOnlySegments = IntSets.mutableEmptySet(numSegments);
Set<Integer> pendingPrimarySegments = cacheTopology.getPendingCH().getPrimarySegmentsForOwner(address);
for (Integer segment : pendingPrimarySegments) {
List<Address> currentOwners = cacheTopology.getCurrentCH().locateOwnersForSegment(segment);
if (currentOwners.get(0).equals(address)) {
// Already primary
continue;
}
if (!currentOwners.contains(address)) {
// Not a backup, will receive transactions the normal way
continue;
}
transactionOnlySegments.add(segment);
}
return transactionOnlySegments;
}
private CompletionStage<Void> fetchClusterListeners(CacheTopology cacheTopology) {
if (!configuration.clustering().cacheMode().isDistributed()) {
return CompletableFutures.completedNull();
}
return getClusterListeners(cacheTopology.getTopologyId(), cacheTopology.getReadConsistentHash().getMembers())
.thenAccept(callables -> {
Cache<Object, Object> cache = this.cache.wired();
for (ClusterListenerReplicateCallable<Object, Object> callable : callables) {
try {
// TODO: need security check?
// We have to invoke a separate method as we can't retrieve the cache as it is still starting
callable.accept(cache.getCacheManager(), cache);
} catch (Exception e) {
log.clusterListenerInstallationFailure(e);
}
}
});
}
protected void beforeTopologyInstalled(int topologyId, ConsistentHash previousWriteCh,
ConsistentHash newWriteCh) {
}
protected boolean notifyEndOfStateTransferIfNeeded() {
if (waitingForState.get()) {
if (hasActiveTransfers()) {
if (log.isTraceEnabled())
log.tracef("No end of state transfer notification, active transfers still exist");
return false;
}
if (waitingForState.compareAndSet(true, false)) {
int topologyId = stateTransferTopologyId.get();
log.debugf("Finished receiving of segments for cache %s for topology %d.", cacheName, topologyId);
stopApplyingState(topologyId);
stateTransferFuture.complete(null);
}
if (log.isTraceEnabled())
log.tracef("No end of state transfer notification, waitingForState already set to false by another thread");
return false;
}
if (log.isTraceEnabled())
log.tracef("No end of state transfer notification, waitingForState already set to false by another thread");
return true;
}
protected IntSet getOwnedSegments(ConsistentHash consistentHash) {
Address address = rpcManager.getAddress();
return IntSets.from(consistentHash.getSegmentsForOwner(address));
}
@Override
public CompletionStage<?> applyState(final Address sender, int topologyId, Collection<StateChunk> stateChunks) {
ConsistentHash wCh = cacheTopology.getWriteConsistentHash();
// Ignore responses received after we are no longer a member
if (!wCh.getMembers().contains(rpcManager.getAddress())) {
if (log.isTraceEnabled()) {
log.tracef("Ignoring received state because we are no longer a member of cache %s", cacheName);
}
return CompletableFutures.completedNull();
}
// Ignore segments that we requested for a previous rebalance
// Can happen when the coordinator leaves, and the new coordinator cancels the rebalance in progress
int rebalanceTopologyId = stateTransferTopologyId.get();
if (rebalanceTopologyId == NO_STATE_TRANSFER_IN_PROGRESS) {
log.debugf("Discarding state response with topology id %d for cache %s, we don't have a state transfer in progress",
topologyId, cacheName);
return CompletableFutures.completedNull();
}
if (topologyId < rebalanceTopologyId) {
log.debugf("Discarding state response with old topology id %d for cache %s, state transfer request topology was %b",
topologyId, cacheName, waitingForState);
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Before applying the received state the data container of cache %s has %d keys", cacheName,
dataContainer.sizeIncludingExpired());
}
IntSet mySegments = IntSets.from(wCh.getSegmentsForOwner(rpcManager.getAddress()));
Iterator<StateChunk> iterator = stateChunks.iterator();
return applyStateIteration(sender, mySegments, iterator).whenComplete((v, t) -> {
if (log.isTraceEnabled()) {
log.tracef("After applying the received state the data container of cache %s has %d keys", cacheName,
dataContainer.sizeIncludingExpired());
synchronized (transferMapsLock) {
log.tracef("Segments not received yet for cache %s: %s", cacheName, transfersBySource);
}
}
});
}
private CompletionStage<?> applyStateIteration(Address sender, IntSet mySegments,
Iterator<StateChunk> iterator) {
CompletionStage<?> chunkStage = CompletableFutures.completedNull();
// Replace recursion with iteration if the state was applied synchronously
while (iterator.hasNext() && CompletionStages.isCompletedSuccessfully(chunkStage)) {
StateChunk stateChunk = iterator.next();
chunkStage = applyChunk(sender, mySegments, stateChunk);
}
if (!iterator.hasNext())
return chunkStage;
return chunkStage.thenCompose(v -> applyStateIteration(sender, mySegments, iterator));
}
private CompletionStage<Void> applyChunk(Address sender, IntSet mySegments, StateChunk stateChunk) {
if (!mySegments.contains(stateChunk.getSegmentId())) {
log.debugf("Discarding received cache entries for segment %d of cache %s because they do not belong to this node.", stateChunk.getSegmentId(), cacheName);
return CompletableFutures.completedNull();
}
// Notify the inbound task that a chunk of cache entries was received
InboundTransferTask inboundTransfer;
synchronized (transferMapsLock) {
List<InboundTransferTask> inboundTransfers = transfersBySegment.get(stateChunk.getSegmentId());
if (inboundTransfers != null) {
inboundTransfer = inboundTransfers.stream().filter(task -> task.getSource().equals(sender)).findFirst().orElse(null);
} else {
inboundTransfer = null;
}
}
if (inboundTransfer != null) {
return doApplyState(sender, stateChunk.getSegmentId(), stateChunk.getCacheEntries())
.thenAccept(v -> {
boolean lastChunk = stateChunk.isLastChunk();
inboundTransfer.onStateReceived(stateChunk.getSegmentId(), lastChunk);
if (lastChunk) {
onCompletedSegment(stateChunk.getSegmentId(), inboundTransfer);
}
});
} else {
if (cache.wired().getStatus().allowInvocations()) {
log.ignoringUnsolicitedState(sender, stateChunk.getSegmentId(), cacheName);
}
}
return CompletableFutures.completedNull();
}
private void onCompletedSegment(int segmentId, InboundTransferTask inboundTransfer) {
synchronized (transferMapsLock) {
List<InboundTransferTask> innerTransfers = transfersBySegment.get(segmentId);
if (innerTransfers != null && innerTransfers.remove(inboundTransfer) && innerTransfers.isEmpty()) {
commitManager.stopTrackFor(PUT_FOR_STATE_TRANSFER, segmentId);
transfersBySegment.remove(segmentId);
}
}
}
private CompletionStage<?> doApplyState(Address sender, int segmentId,
Collection<InternalCacheEntry<?, ?>> cacheEntries) {
if (cacheEntries == null || cacheEntries.isEmpty())
return CompletableFutures.completedNull();
if (log.isTraceEnabled()) log.tracef(
"Applying new state chunk for segment %d of cache %s from node %s: received %d cache entries",
segmentId, cacheName, sender, cacheEntries.size());
// CACHE_MODE_LOCAL avoids handling by StateTransferInterceptor and any potential locks in StateTransferLock
boolean transactional = transactionManager != null;
if (transactional) {
Object key = NO_KEY;
Transaction transaction = new FakeJTATransaction();
InvocationContext ctx = icf.createInvocationContext(transaction, false);
LocalTransaction localTransaction = ((LocalTxInvocationContext) ctx).getCacheTransaction();
try {
localTransaction.setStateTransferFlag(PUT_FOR_STATE_TRANSFER);
for (InternalCacheEntry<?, ?> e : cacheEntries) {
key = e.getKey();
CompletableFuture<?> future = invokePut(segmentId, ctx, e);
if (!future.isDone()) {
throw new IllegalStateException("State transfer in-tx put should always be synchronous");
}
}
} catch (Throwable t) {
logApplyException(t, key);
return invokeRollback(localTransaction).handle((rv, t1) -> {
transactionTable.removeLocalTransaction(localTransaction);
if (t1 != null) {
t.addSuppressed(t1);
}
return null;
});
}
return invoke1PCPrepare(localTransaction).whenComplete((rv, t) -> {
transactionTable.removeLocalTransaction(localTransaction);
if (t != null) {
logApplyException(t, NO_KEY);
}
});
} else {
// non-tx cache
AggregateCompletionStage<Void> aggregateStage = CompletionStages.aggregateCompletionStage();
for (InternalCacheEntry<?, ?> e : cacheEntries) {
InvocationContext ctx = icf.createSingleKeyNonTxInvocationContext();
CompletionStage<?> putStage = invokePut(segmentId, ctx, e);
aggregateStage.dependsOn(putStage.exceptionally(t -> {
logApplyException(t, e.getKey());
return null;
}));
}
return aggregateStage.freeze();
}
}
private CompletionStage<?> invoke1PCPrepare(LocalTransaction localTransaction) {
PrepareCommand prepareCommand;
if (Configurations.isTxVersioned(configuration)) {
prepareCommand = commandsFactory.buildVersionedPrepareCommand(localTransaction.getGlobalTransaction(),
localTransaction.getModifications(), true);
} else {
prepareCommand = commandsFactory.buildPrepareCommand(localTransaction.getGlobalTransaction(),
localTransaction.getModifications(), true);
}
LocalTxInvocationContext ctx = icf.createTxInvocationContext(localTransaction);
return interceptorChain.invokeAsync(ctx, prepareCommand);
}
private CompletionStage<?> invokeRollback(LocalTransaction localTransaction) {
RollbackCommand prepareCommand = commandsFactory.buildRollbackCommand(localTransaction.getGlobalTransaction());
LocalTxInvocationContext ctx = icf.createTxInvocationContext(localTransaction);
return interceptorChain.invokeAsync(ctx, prepareCommand);
}
private CompletableFuture<?> invokePut(int segmentId, InvocationContext ctx, InternalCacheEntry<?, ?> e) {
// CallInterceptor will preserve the timestamps if the metadata is an InternalMetadataImpl instance
InternalMetadataImpl metadata = new InternalMetadataImpl(e);
PutKeyValueCommand put = commandsFactory.buildPutKeyValueCommand(e.getKey(), e.getValue(), segmentId,
metadata, STATE_TRANSFER_FLAGS);
put.setInternalMetadata(e.getInternalMetadata());
ctx.setLockOwner(put.getKeyLockOwner());
return interceptorChain.invokeAsync(ctx, put);
}
private void logApplyException(Throwable t, Object key) {
if (!cache.wired().getStatus().allowInvocations()) {
log.tracef("Cache %s is shutting down, stopping state transfer", cacheName);
} else {
log.problemApplyingStateForKey(key, t);
}
}
private void applyTransactions(Address sender, Collection<TransactionInfo> transactions, int topologyId) {
log.debugf("Applying %d transactions for cache %s transferred from node %s", transactions.size(), cacheName, sender);
if (isTransactional) {
for (TransactionInfo transactionInfo : transactions) {
GlobalTransaction gtx = transactionInfo.getGlobalTransaction();
if (rpcManager.getAddress().equals(gtx.getAddress())) {
continue; // it is a transaction originated in this node. can happen with partition handling
}
// Mark the global transaction as remote. Only used for logging, hashCode/equals ignore it.
gtx.setRemote(true);
CacheTransaction tx = transactionTable.getLocalTransaction(gtx);
if (tx == null) {
tx = transactionTable.getRemoteTransaction(gtx);
if (tx == null) {
try {
// just in case, set the previous topology id to make the current topology id check for pending locks.
tx = transactionTable.getOrCreateRemoteTransaction(gtx, transactionInfo.getModifications(), topologyId - 1);
// Force this node to replay the given transaction data by making it think it is 1 behind
((RemoteTransaction) tx).setLookedUpEntriesTopology(topologyId - 1);
} catch (Throwable t) {
if (log.isTraceEnabled())
log.tracef(t, "Failed to create remote transaction %s", gtx);
}
}
}
if (tx != null) {
transactionInfo.getLockedKeys().forEach(tx::addBackupLockForKey);
}
}
}
}
// Must run after the PersistenceManager
@Start(priority = 20)
public void start() {
cacheName = cache.wired().getName();
isInvalidationMode = configuration.clustering().cacheMode().isInvalidation();
isTransactional = configuration.transaction().transactionMode().isTransactional();
timeout = configuration.clustering().stateTransfer().timeout();
numSegments = configuration.clustering().hash().numSegments();
isFetchEnabled = isFetchEnabled();
rpcOptions = new RpcOptions(DeliverOrder.NONE, timeout, TimeUnit.MILLISECONDS);
requestedTransactionalSegments = IntSets.concurrentSet(numSegments);
stateRequestExecutor = new LimitedExecutor("StateRequest-" + cacheName, nonBlockingExecutor, 1);
running = true;
}
private boolean isFetchEnabled() {
return configuration.clustering().cacheMode().needsStateTransfer() &&
configuration.clustering().stateTransfer().fetchInMemoryState();
}
@Stop(priority = 0)
@Override
public void stop() {
if (log.isTraceEnabled()) {
log.tracef("Shutting down StateConsumer of cache %s on node %s", cacheName, rpcManager.getAddress());
}
running = false;
try {
synchronized (transferMapsLock) {
// cancel all inbound transfers
// make a copy and then clear both maps so that cancel doesn't interfere with the iteration
Collection<List<InboundTransferTask>> transfers = new ArrayList<>(transfersBySource.values());
transfersBySource.clear();
transfersBySegment.clear();
for (List<InboundTransferTask> inboundTransfers : transfers) {
inboundTransfers.forEach(InboundTransferTask::cancel);
}
}
requestedTransactionalSegments.clear();
stateRequestExecutor.shutdownNow();
} catch (Throwable t) {
log.errorf(t, "Failed to stop StateConsumer of cache %s on node %s", cacheName, rpcManager.getAddress());
}
}
public void setKeyInvalidationListener(KeyInvalidationListener keyInvalidationListener) {
this.keyInvalidationListener = keyInvalidationListener;
}
protected CompletionStage<Void> handleSegments(boolean startRebalance, IntSet addedSegments,
IntSet removedSegments, IntSet transactionOnlySegments) {
if (addedSegments.isEmpty() && transactionOnlySegments.isEmpty()) {
return CompletableFutures.completedNull();
}
// add transfers for new or restarted segments
log.debugf("Adding inbound state transfer for segments %s", addedSegments);
// the set of nodes that reported errors when fetching data from them - these will not be retried in this topology
Set<Address> excludedSources = new HashSet<>();
// the sources and segments we are going to get from each source
Map<Address, IntSet> sources = new HashMap<>();
CompletionStage<Void> stage = CompletableFutures.completedNull();
if (isTransactional) {
stage = requestTransactions(addedSegments, transactionOnlySegments, sources, excludedSources);
}
if (isFetchEnabled) {
stage = stage.thenRun(() -> requestSegments(addedSegments, sources, excludedSources));
}
return stage;
}
private void findSources(IntSet segments, Map<Address, IntSet> sources, Set<Address> excludedSources,
boolean ignoreOwnedSegments) {
if (cache.wired().getStatus().isTerminated())
return;
IntSet segmentsWithoutSource = IntSets.mutableEmptySet(numSegments);
for (PrimitiveIterator.OfInt iter = segments.iterator(); iter.hasNext(); ) {
int segmentId = iter.nextInt();
Address source = findSource(segmentId, excludedSources, ignoreOwnedSegments);
// ignore all segments for which there are no other owners to pull data from.
// these segments are considered empty (or lost) and do not require a state transfer
if (source != null) {
IntSet segmentsFromSource = sources.computeIfAbsent(source, k -> IntSets.mutableEmptySet(numSegments));
segmentsFromSource.set(segmentId);
} else {
segmentsWithoutSource.set(segmentId);
}
}
if (!segmentsWithoutSource.isEmpty()) {
log.noLiveOwnersFoundForSegments(segmentsWithoutSource, cacheName, excludedSources);
}
}
private Address findSource(int segmentId, Set<Address> excludedSources, boolean ignoreOwnedSegment) {
List<Address> owners = cacheTopology.getReadConsistentHash().locateOwnersForSegment(segmentId);
if (!ignoreOwnedSegment || !owners.contains(rpcManager.getAddress())) {
// We prefer that transactions are sourced from primary owners.
// Needed in pessimistic mode, if the originator is the primary owner of the key than the lock
// command is not replicated to the backup owners. See PessimisticDistributionInterceptor
// .acquireRemoteIfNeeded.
for (Address o : owners) {
if (!o.equals(rpcManager.getAddress()) && !excludedSources.contains(o)) {
return o;
}
}
}
return null;
}
private CompletionStage<Void> requestTransactions(IntSet dataSegments, IntSet transactionOnlySegments,
Map<Address, IntSet> sources,
Set<Address> excludedSources) {
// TODO Remove excludedSources and always only for transactions/segments from the primary owner
findSources(dataSegments, sources, excludedSources, true);
AggregateCompletionStage<Void> aggregateStage = CompletionStages.aggregateCompletionStage();
IntSet failedSegments = IntSets.concurrentSet(numSegments);
Set<Address> sourcesToExclude = ConcurrentHashMap.newKeySet();
int topologyId = cacheTopology.getTopologyId();
sources.forEach((source, segmentsFromSource) -> {
CompletionStage<Response> sourceStage =
requestAndApplyTransactions(failedSegments, sourcesToExclude, topologyId, source, segmentsFromSource);
aggregateStage.dependsOn(sourceStage);
});
Map<Address, IntSet> transactionOnlySources = new HashMap<>();
findSources(transactionOnlySegments, transactionOnlySources, excludedSources, false);
transactionOnlySources.forEach((source, segmentsFromSource) -> {
CompletionStage<Response> sourceStage =
requestAndApplyTransactions(failedSegments, sourcesToExclude, topologyId, source, segmentsFromSource);
aggregateStage.dependsOn(sourceStage);
});
return aggregateStage.freeze().thenCompose(ignored -> {
if (failedSegments.isEmpty()) {
return CompletableFutures.completedNull();
}
excludedSources.addAll(sourcesToExclude);
// look for other sources for all failed segments
sources.clear();
return requestTransactions(dataSegments, transactionOnlySegments, sources, excludedSources);
});
}
private CompletionStage<Response> requestAndApplyTransactions(IntSet failedSegments, Set<Address> sourcesToExclude,
int topologyId, Address source,
IntSet segmentsFromSource) {
// Register the requested transactional segments
requestedTransactionalSegments.addAll(segmentsFromSource);
return getTransactions(source, segmentsFromSource, topologyId)
.whenComplete((response, throwable) -> {
processTransactionsResponse(failedSegments, sourcesToExclude, topologyId,
source, segmentsFromSource, response, throwable);
requestedTransactionalSegments.removeAll(segmentsFromSource);
});
}
private void processTransactionsResponse(IntSet failedSegments,
Set<Address> sourcesToExclude, int topologyId, Address source,
IntSet segmentsFromSource, Response response, Throwable throwable) {
boolean failed = false;
boolean exclude = false;
if (throwable != null) {
if (cache.wired().getStatus().isTerminated()) {
log.debugf("Cache %s has stopped while requesting transactions", cacheName);
return;
} else {
log.failedToRetrieveTransactionsForSegments(cacheName, source, segmentsFromSource, throwable);
}
// The primary owner is still in the cluster, so we can't exclude it - see ISPN-4091
failed = true;
}
if (response instanceof SuccessfulResponse) {
List<TransactionInfo> transactions =
(List<TransactionInfo>) ((SuccessfulResponse) response).getResponseValue();
applyTransactions(source, transactions, topologyId);
} else if (response instanceof CacheNotFoundResponse) {
log.debugf("Cache %s was stopped on node %s before sending transaction information", cacheName, source);
failed = true;
exclude = true;
} else {
log.unsuccessfulResponseRetrievingTransactionsForSegments(source, response);
failed = true;
}
// If requesting the transactions failed we need to retry
if (failed) {
failedSegments.addAll(segmentsFromSource);
}
if (exclude) {
sourcesToExclude.add(source);
}
}
private CompletionStage<Collection<ClusterListenerReplicateCallable<Object, Object>>> getClusterListeners(
int topologyId, List<Address> sources) {
// Try the first member. If the request fails, fall back to the second member and so on.
if (sources.isEmpty()) {
if (log.isTraceEnabled()) // TODO Ignore self again
log.trace("Unable to acquire cluster listeners from other members, assuming none are present");
return CompletableFuture.completedFuture(Collections.emptySet());
}
Address source = sources.get(0);
// Don't send the request to self
if (sources.get(0).equals(rpcManager.getAddress())) {
return getClusterListeners(topologyId, sources.subList(1, sources.size()));
}
if (log.isTraceEnabled())
log.tracef("Requesting cluster listeners of cache %s from node %s", cacheName, sources);
CacheRpcCommand cmd = commandsFactory.buildStateTransferGetListenersCommand(topologyId);
CompletionStage<ValidResponse> remoteStage =
rpcManager.invokeCommand(source, cmd, SingleResponseCollector.validOnly(), rpcOptions);
return handleAndCompose(remoteStage, (response, throwable) -> {
if (throwable != null) {
log.exceptionDuringClusterListenerRetrieval(source, throwable);
}
if (response instanceof SuccessfulResponse) {
return CompletableFuture.completedFuture(
(Collection<ClusterListenerReplicateCallable<Object, Object>>) response.getResponseValue());
} else {
log.unsuccessfulResponseForClusterListeners(source, response);
return getClusterListeners(topologyId, sources.subList(1, sources.size()));
}
});
}
private CompletionStage<Response> getTransactions(Address source, IntSet segments, int topologyId) {
if (log.isTraceEnabled()) {
log.tracef("Requesting transactions from node %s for segments %s", source, segments);
}
// get transactions and locks
CacheRpcCommand cmd = commandsFactory.buildStateTransferGetTransactionsCommand(topologyId, segments);
return rpcManager.invokeCommand(source, cmd, PassthroughSingleResponseCollector.INSTANCE, rpcOptions);
}
private void requestSegments(IntSet segments, Map<Address, IntSet> sources, Set<Address> excludedSources) {
if (sources.isEmpty()) {
findSources(segments, sources, excludedSources, true);
}
for (Map.Entry<Address, IntSet> e : sources.entrySet()) {
addTransfer(e.getKey(), e.getValue());
}
if (log.isTraceEnabled()) log.tracef("Finished adding inbound state transfer for segments %s", segments, cacheName);
}
/**
* Cancel transfers for segments we no longer own.
*
* @param removedSegments segments to be cancelled
*/
protected void cancelTransfers(IntSet removedSegments) {
synchronized (transferMapsLock) {
List<Integer> segmentsToCancel = new ArrayList<>(removedSegments);
while (!segmentsToCancel.isEmpty()) {
int segmentId = segmentsToCancel.remove(0);
List<InboundTransferTask> inboundTransfers = transfersBySegment.get(segmentId);
if (inboundTransfers != null) { // we need to check the transfer was not already completed
for (InboundTransferTask inboundTransfer : inboundTransfers) {
IntSet cancelledSegments = IntSets.mutableCopyFrom(removedSegments);
cancelledSegments.retainAll(inboundTransfer.getSegments());
segmentsToCancel.removeAll(cancelledSegments);
transfersBySegment.keySet().removeAll(cancelledSegments);
//this will also remove it from transfersBySource if the entire task gets cancelled
inboundTransfer.cancelSegments(cancelledSegments);
if (inboundTransfer.isCancelled()) {
removeTransfer(inboundTransfer);
}
}
}
}
}
}
protected CompletionStage<Void> removeStaleData(final IntSet removedSegments) {
// Invalidation doesn't ever remove stale data
if (configuration.clustering().cacheMode().isInvalidation()) {
return CompletableFutures.completedNull();
}
log.debugf("Removing no longer owned entries for cache %s", cacheName);
if (keyInvalidationListener != null) {
keyInvalidationListener.beforeInvalidation(removedSegments, IntSets.immutableEmptySet());
}
// This has to be invoked before removing the segments on the data container
localPublisherManager.segmentsLost(removedSegments);
dataContainer.removeSegments(removedSegments);
// We have to invoke removeSegments above on the data container. This is always done in case if L1 is enabled. L1
// store removes all the temporary entries when removeSegments is invoked. However there is no reason to mess
// with the store if no segments are removed, so just exit early.
if (removedSegments.isEmpty())
return CompletableFutures.completedNull();
return persistenceManager.removeSegments(removedSegments)
.thenCompose(removed -> invalidateStaleEntries(removedSegments, removed));
}
private CompletionStage<Void> invalidateStaleEntries(IntSet removedSegments, Boolean removed) {
// If there are no stores that couldn't remove segments, we don't have to worry about invaliding entries
if (removed) {
return CompletableFutures.completedNull();
}
// All these segments have been removed from the data container, so we only care about private stores
AtomicLong removedEntriesCounter = new AtomicLong();
Predicate<Object> filter = key -> removedSegments.contains(getSegment(key));
Publisher<Object> publisher = persistenceManager.publishKeys(filter, PRIVATE);
return Flowable.fromPublisher(publisher)
.onErrorResumeNext(throwable -> {
PERSISTENCE.failedLoadingKeysFromCacheStore(throwable);
return Flowable.empty();
})
.buffer(configuration.clustering().stateTransfer().chunkSize())
.concatMapCompletable(keysToRemove -> {
removedEntriesCounter.addAndGet(keysToRemove.size());
return Completable.fromCompletionStage(invalidateBatch(keysToRemove));
})
.toCompletionStage(null)
.thenRun(() -> {
if (log.isTraceEnabled()) log.tracef("Removed %d keys, data container now has %d keys",
removedEntriesCounter.get(), dataContainer.sizeIncludingExpired());
});
}
protected CompletionStage<Void> invalidateBatch(Collection<Object> keysToRemove) {
InvalidateCommand invalidateCmd = commandsFactory.buildInvalidateCommand(INVALIDATE_FLAGS, keysToRemove.toArray());
InvocationContext ctx = icf.createNonTxInvocationContext();
ctx.setLockOwner(invalidateCmd.getKeyLockOwner());
return interceptorChain.invokeAsync(ctx, invalidateCmd)
.handle((ignored, throwable) -> {
if (throwable instanceof IllegalLifecycleStateException) {
// Ignore shutdown-related errors, because InvocationContextInterceptor starts
// rejecting commands before any component is stopped
} else if (throwable != null) {
log.failedToInvalidateKeys(throwable);
}
return null;
});
}
/**
* Check if any of the existing transfers should be restarted from a different source because the initial source
* is no longer a member.
*/
private void restartBrokenTransfers(CacheTopology cacheTopology, IntSet addedSegments) {
Set<Address> members = new HashSet<>(cacheTopology.getReadConsistentHash().getMembers());
synchronized (transferMapsLock) {
for (Iterator<Map.Entry<Address, List<InboundTransferTask>>> it =
transfersBySource.entrySet().iterator(); it.hasNext(); ) {
Map.Entry<Address, List<InboundTransferTask>> entry = it.next();
Address source = entry.getKey();
if (!members.contains(source)) {
if (log.isTraceEnabled()) {
log.tracef("Removing inbound transfers from source %s for cache %s", source, cacheName);
}
List<InboundTransferTask> inboundTransfers = entry.getValue();
it.remove();
for (InboundTransferTask inboundTransfer : inboundTransfers) {
// these segments will be restarted if they are still in new write CH
if (log.isTraceEnabled()) {
log.tracef("Removing inbound transfers from node %s for segments %s", source, inboundTransfer.getSegments());
}
IntSet unfinishedSegments = inboundTransfer.getUnfinishedSegments();
inboundTransfer.cancel();
addedSegments.addAll(unfinishedSegments);
transfersBySegment.keySet().removeAll(unfinishedSegments);
}
}
}
// exclude those that are already in progress from a valid source
addedSegments.removeAll(transfersBySegment.keySet());
}
}
private int getSegment(Object key) {
// here we can use any CH version because the routing table is not involved in computing the segment
return keyPartitioner.getSegment(key);
}
private InboundTransferTask addTransfer(Address source, IntSet segmentsFromSource) {
final InboundTransferTask inboundTransfer;
synchronized (transferMapsLock) {
if (log.isTraceEnabled()) {
log.tracef("Adding transfer from %s for segments %s", source, segmentsFromSource);
}
segmentsFromSource.removeAll(transfersBySegment.keySet()); // already in progress segments are excluded
if (segmentsFromSource.isEmpty()) {
if (log.isTraceEnabled()) {
log.tracef("All segments are already in progress, skipping");
}
return null;
}
inboundTransfer = new InboundTransferTask(segmentsFromSource, source, cacheTopology.getTopologyId(),
rpcManager, commandsFactory, timeout, cacheName, true);
addTransfer(inboundTransfer, segmentsFromSource);
}
stateRequestExecutor.executeAsync(() -> {
CompletionStage<Void> transferStarted = inboundTransfer.requestSegments();
return transferStarted.whenComplete((aVoid, throwable) -> onTaskCompletion(inboundTransfer));
});
return inboundTransfer;
}
@GuardedBy("transferMapsLock")
protected void addTransfer(InboundTransferTask inboundTransfer, IntSet segments) {
if (!running)
throw new IllegalLifecycleStateException("State consumer is not running for cache " + cacheName);
for (PrimitiveIterator.OfInt iter = segments.iterator(); iter.hasNext(); ) {
int segmentId = iter.nextInt();
transfersBySegment.computeIfAbsent(segmentId, s -> new ArrayList<>()).add(inboundTransfer);
}
transfersBySource.computeIfAbsent(inboundTransfer.getSource(), s -> new ArrayList<>()).add(inboundTransfer);
}
protected boolean removeTransfer(InboundTransferTask inboundTransfer) {
boolean found = false;
synchronized (transferMapsLock) {
if (log.isTraceEnabled()) log.tracef("Removing inbound transfers from node %s for segments %s",
inboundTransfer.getSegments(), inboundTransfer.getSource(), cacheName);
List<InboundTransferTask> transfers = transfersBySource.get(inboundTransfer.getSource());
if (transfers != null && (found = transfers.remove(inboundTransfer)) && transfers.isEmpty()) {
transfersBySource.remove(inboundTransfer.getSource());
}
// Box the segment as the map uses Integer as key
for (Integer segment : inboundTransfer.getSegments()) {
List<InboundTransferTask> innerTransfers = transfersBySegment.get(segment);
if (innerTransfers != null && innerTransfers.remove(inboundTransfer) && innerTransfers.isEmpty()) {
transfersBySegment.remove(segment);
}
}
}
return found;
}
protected void onTaskCompletion(final InboundTransferTask inboundTransfer) {
if (log.isTraceEnabled()) log.tracef("Inbound transfer finished: %s", inboundTransfer);
if (inboundTransfer.isCompletedSuccessfully()) {
removeTransfer(inboundTransfer);
notifyEndOfStateTransferIfNeeded();
}
}
public interface KeyInvalidationListener {
void beforeInvalidation(IntSet removedSegments, IntSet staleL1Segments);
}
}
| 64,175
| 48.480339
| 163
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/StateChunk.java
|
package org.infinispan.statetransfer;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.marshall.core.Ids;
/**
* Encapsulates a chunk of cache entries that belong to the same segment. This representation is suitable for sending it
* to another cache during state transfer.
*
* @author anistor@redhat.com
* @since 5.2
*/
public class StateChunk {
/**
* The id of the segment for which we push cache entries.
*/
private final int segmentId;
/**
* The cache entries. They are all guaranteed to be long to the same segment: segmentId.
*/
private final Collection<InternalCacheEntry<?, ?>> cacheEntries;
/**
* Indicates to receiver if there are more chunks to come for this segment.
*/
private final boolean isLastChunk;
public StateChunk(int segmentId, Collection<InternalCacheEntry<?, ?>> cacheEntries, boolean isLastChunk) {
this.segmentId = segmentId;
this.cacheEntries = cacheEntries;
this.isLastChunk = isLastChunk;
}
public int getSegmentId() {
return segmentId;
}
public Collection<InternalCacheEntry<?, ?>> getCacheEntries() {
return cacheEntries;
}
public boolean isLastChunk() {
return isLastChunk;
}
@Override
public String toString() {
return "StateChunk{" +
"segmentId=" + segmentId +
", cacheEntries=" + cacheEntries.size() +
", isLastChunk=" + isLastChunk +
'}';
}
public static class Externalizer extends AbstractExternalizer<StateChunk> {
@Override
public Integer getId() {
return Ids.STATE_CHUNK;
}
@Override
public Set<Class<? extends StateChunk>> getTypeClasses() {
return Collections.singleton(StateChunk.class);
}
@Override
public void writeObject(ObjectOutput output, StateChunk object) throws IOException {
output.writeInt(object.segmentId);
output.writeObject(object.cacheEntries);
output.writeBoolean(object.isLastChunk);
}
@Override
@SuppressWarnings("unchecked")
public StateChunk readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int segmentId = input.readInt();
Collection<InternalCacheEntry<?, ?>> cacheEntries = (Collection<InternalCacheEntry<?, ?>>) input.readObject();
boolean isLastChunk = input.readBoolean();
return new StateChunk(segmentId, cacheEntries, isLastChunk);
}
}
}
| 2,745
| 28.212766
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/TransactionInfo.java
|
package org.infinispan.statetransfer;
import static org.infinispan.commons.marshall.MarshallUtil.marshallCollection;
import static org.infinispan.commons.marshall.MarshallUtil.unmarshallCollection;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.marshall.core.Ids;
import org.infinispan.transaction.xa.GlobalTransaction;
/**
* A representation of a transaction that is suitable for transferring between a StateProvider and a StateConsumer
* running on different members of the same cache.
*
* @author anistor@redhat.com
* @since 5.2
*/
public class TransactionInfo {
private final GlobalTransaction globalTransaction;
private final List<WriteCommand> modifications;
private final Set<Object> lockedKeys;
private final int topologyId;
public TransactionInfo(GlobalTransaction globalTransaction, int topologyId, List<WriteCommand> modifications, Set<Object> lockedKeys) {
this.globalTransaction = globalTransaction;
this.topologyId = topologyId;
this.modifications = modifications;
this.lockedKeys = lockedKeys;
}
public GlobalTransaction getGlobalTransaction() {
return globalTransaction;
}
public List<WriteCommand> getModifications() {
return modifications;
}
public Set<Object> getLockedKeys() {
return lockedKeys;
}
public int getTopologyId() {
return topologyId;
}
@Override
public String toString() {
return "TransactionInfo{" +
"globalTransaction=" + globalTransaction +
", topologyId=" + topologyId +
", modifications=" + modifications +
", lockedKeys=" + lockedKeys +
'}';
}
public static class Externalizer extends AbstractExternalizer<TransactionInfo> {
@Override
public Integer getId() {
return Ids.TRANSACTION_INFO;
}
@Override
public Set<Class<? extends TransactionInfo>> getTypeClasses() {
return Collections.singleton(TransactionInfo.class);
}
@Override
public void writeObject(ObjectOutput output, TransactionInfo object) throws IOException {
output.writeObject(object.globalTransaction);
output.writeInt(object.topologyId);
marshallCollection(object.modifications, output);
marshallCollection(object.lockedKeys, output);
}
@Override
public TransactionInfo readObject(ObjectInput input) throws IOException, ClassNotFoundException {
GlobalTransaction globalTransaction = (GlobalTransaction) input.readObject();
int topologyId = input.readInt();
List<WriteCommand> modifications = unmarshallCollection(input, ArrayList::new);
Set<Object> lockedKeys = unmarshallCollection(input, HashSet::new);
return new TransactionInfo(globalTransaction, topologyId, modifications, lockedKeys);
}
}
}
| 3,167
| 30.68
| 138
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/CommitManager.java
|
package org.infinispan.statetransfer;
import static org.infinispan.commons.util.Util.toStr;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ReadCommittedEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Keeps track of the keys updated by normal operation and state transfer. Since the command processing happens
* concurrently with the state transfer, it needs to keep track of the keys updated by normal command in order to reject
* the updates from the state transfer. It assumes that the keys from normal operations are most recent thant the ones
* received by state transfer.
*
* @author Pedro Ruivo
* @since 7.0
*/
@Scope(Scopes.NAMED_CACHE)
public class CommitManager {
private static final Log log = LogFactory.getLog(CommitManager.class);
// Package private for testing only.
final Map<Integer, Map<Object, DiscardPolicy>> tracker = new ConcurrentHashMap<>();
@Inject InternalDataContainer dataContainer;
@Inject PersistenceManager persistenceManager;
@Inject TimeService timeService;
private volatile boolean trackStateTransfer;
private volatile boolean trackXSiteStateTransfer;
/**
* It starts tracking keys committed. All the keys committed will be flagged with this flag. State transfer received
* after the key is tracked will be discarded.
*
* @param track Flag to start tracking keys for local site state transfer or for remote site state transfer.
*/
public final void startTrack(Flag track) {
setTrack(track, true);
}
/**
* It stops tracking keys committed.
*
* @param track Flag to stop tracking keys for local site state transfer or for remote site state transfer.
*/
public final void stopTrack(Flag track) {
setTrack(track, false);
if (!trackStateTransfer && !trackXSiteStateTransfer) {
if (log.isTraceEnabled()) {
log.tracef("Tracking is disabled. Clear tracker: %s", tracker);
}
tracker.clear();
} else {
tracker.values().removeIf(entries -> {
entries.values().removeIf(policy -> policy.update(trackStateTransfer, trackXSiteStateTransfer));
return entries.isEmpty();
});
}
}
/**
* Stop tracking the entries for the given segment if state transfer tracking is enabled.
*
* @param flag: flag to verify if tracking is enabled.
* @param segmentId: segment to stop tracking.
*/
public final void stopTrackFor(Flag flag, int segmentId) {
if (flag == Flag.PUT_FOR_STATE_TRANSFER && trackStateTransfer) {
// We only remove entries that are not related to cross-site state transfer. Different sites may have
// different configurations, thus a single entry may have different segment mapping varying from site to site.
tracker.computeIfPresent(segmentId, (k, entries) -> {
entries.values().removeIf(DiscardPolicy::stopForST);
return entries.isEmpty() ? null : entries;
});
}
}
/**
* It tries to commit the cache entry. The entry is not committed if it is originated from state transfer and other
* operation already has updated it.
* @param entry the entry to commit
* @param operation if {@code null}, it identifies this commit as originated from a normal operation. Otherwise, it
* @param ctx
*/
public final CompletionStage<Void> commit(final CacheEntry entry, final Flag operation, int segment,
boolean l1Only, InvocationContext ctx) {
if (log.isTraceEnabled()) {
log.tracef("Trying to commit. Key=%s. Operation Flag=%s, L1 write/invalidation=%s", toStr(entry.getKey()),
operation, l1Only);
}
if (l1Only || (operation == null && !trackStateTransfer && !trackXSiteStateTransfer)) {
//track == null means that it is a normal put and the tracking is not enabled!
//if it is a L1 invalidation, commit without track it.
if (log.isTraceEnabled()) {
log.tracef("Committing key=%s. It is a L1 invalidation or a normal put and no tracking is enabled!",
toStr(entry.getKey()));
}
return commitEntry(entry, segment, ctx);
}
if (isTrackDisabled(operation)) {
//this a put for state transfer but we are not tracking it. This means that the state transfer has ended
//or canceled due to a clear command.
if (log.isTraceEnabled()) {
log.tracef("Not committing key=%s. It is a state transfer key but no track is enabled!",
toStr(entry.getKey()));
}
return CompletableFutures.completedNull();
}
ByRef<CompletionStage<Void>> byRef = new ByRef<>(null);
Function<DiscardPolicy, DiscardPolicy> renewPolicy = discardPolicy -> {
if (discardPolicy != null && discardPolicy.ignore(operation)) {
if (log.isTraceEnabled()) {
log.tracef("Not committing key=%s. It was already overwritten! Discard policy=%s",
toStr(entry.getKey()), discardPolicy);
}
return discardPolicy;
}
byRef.set(commitEntry(entry, segment, ctx));
DiscardPolicy newDiscardPolicy = calculateDiscardPolicy(operation);
if (log.isTraceEnabled()) {
log.tracef("Committed key=%s. Old discard policy=%s. New discard policy=%s", toStr(entry.getKey()),
discardPolicy, newDiscardPolicy);
}
return newDiscardPolicy;
};
tracker.compute(segment, (key, entries) -> {
if (entries == null) {
DiscardPolicy newDiscardPolicy = renewPolicy.apply(null);
if (newDiscardPolicy != null) {
entries = new ConcurrentHashMap<>();
entries.put(entry.getKey(), newDiscardPolicy);
}
} else {
entries.compute(entry.getKey(), (e, discardPolicy) -> renewPolicy.apply(discardPolicy));
}
return entries;
});
CompletionStage<Void> stage = byRef.get();
if (stage != null) {
return stage;
}
return CompletableFutures.completedNull();
}
private CompletionStage<Void> commitEntry(CacheEntry entry, int segment, InvocationContext ctx) {
if (entry instanceof ReadCommittedEntry) {
return ((ReadCommittedEntry) entry).commit(segment, dataContainer);
} else {
entry.commit(dataContainer);
}
return CompletableFutures.completedNull();
}
/**
* @return {@code true} if the flag is being tracked, {@code false} otherwise.
*/
public final boolean isTracking(Flag trackFlag) {
switch (trackFlag) {
case PUT_FOR_STATE_TRANSFER:
return trackStateTransfer;
case PUT_FOR_X_SITE_STATE_TRANSFER:
return trackXSiteStateTransfer;
}
return false;
}
/**
* @return {@code true} if no keys are tracked, {@code false} otherwise.
*/
public final boolean isEmpty() {
return tracker.isEmpty();
}
@Override
public String toString() {
return "CommitManager{" +
"tracker=" + tracker.size() + " key(s)" +
", trackStateTransfer=" + trackStateTransfer +
", trackXSiteStateTransfer=" + trackXSiteStateTransfer +
'}';
}
private void setTrack(Flag track, boolean value) {
if (log.isTraceEnabled()) {
log.tracef("Set track to %s = %s", track, value);
}
switch (track) {
case PUT_FOR_STATE_TRANSFER:
this.trackStateTransfer = value;
break;
case PUT_FOR_X_SITE_STATE_TRANSFER:
this.trackXSiteStateTransfer = value;
break;
}
}
private boolean isTrackDisabled(Flag track) {
return (track == Flag.PUT_FOR_STATE_TRANSFER && !trackStateTransfer) ||
(track == Flag.PUT_FOR_X_SITE_STATE_TRANSFER && !trackXSiteStateTransfer);
}
private DiscardPolicy calculateDiscardPolicy(Flag operation) {
boolean discardStateTransfer = trackStateTransfer && operation != Flag.PUT_FOR_STATE_TRANSFER;
boolean discardXSiteStateTransfer = trackXSiteStateTransfer && operation != Flag.PUT_FOR_X_SITE_STATE_TRANSFER;
if (!discardStateTransfer && !discardXSiteStateTransfer) {
return null;
}
return new DiscardPolicy(discardStateTransfer, discardXSiteStateTransfer);
}
private static class DiscardPolicy {
private boolean discardST;
private boolean discardXSiteST;
private DiscardPolicy(boolean discardST, boolean discardXSiteST) {
this.discardST = discardST;
this.discardXSiteST = discardXSiteST;
}
public synchronized final boolean ignore(Flag operation) {
return (discardST && operation == Flag.PUT_FOR_STATE_TRANSFER) ||
(discardXSiteST && operation == Flag.PUT_FOR_X_SITE_STATE_TRANSFER);
}
public synchronized boolean update(boolean discardST, boolean discardXSiteST) {
this.discardST = discardST;
this.discardXSiteST = discardXSiteST;
return !this.discardST && !this.discardXSiteST;
}
public boolean stopForST() {
return update(false, discardXSiteST);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DiscardPolicy that = (DiscardPolicy) o;
return discardST == that.discardST && discardXSiteST == that.discardXSiteST;
}
@Override
public int hashCode() {
int result = (discardST ? 1 : 0);
result = 31 * result + (discardXSiteST ? 1 : 0);
return result;
}
@Override
public String toString() {
return "DiscardPolicy{" +
"discardStateTransfer=" + discardST +
", discardXSiteStateTransfer=" + discardXSiteST +
'}';
}
}
}
| 10,782
| 37.373665
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/statetransfer/RebalanceType.java
|
package org.infinispan.statetransfer;
import java.util.Objects;
import org.infinispan.configuration.cache.CacheMode;
public enum RebalanceType {
/**
* Used by local and invalidation cache modes. No state transfer is happening.
*/
NONE,
/**
* Used by distributed and replicated caches. To guarantee consistent results and non-blocking reads,
* cache must undergo a series of 4 topology changes:
* STABLE → READ_OLD_WRITE_ALL → READ_ALL_WRITE_ALL → READ_NEW_WRITE_ALL → STABLE
*/
FOUR_PHASE;
public static RebalanceType from(CacheMode cacheMode) {
switch (Objects.requireNonNull(cacheMode)) {
case LOCAL:
case INVALIDATION_SYNC:
case INVALIDATION_ASYNC:
return NONE;
case REPL_SYNC:
case REPL_ASYNC:
case DIST_SYNC:
case DIST_ASYNC:
return FOUR_PHASE;
default:
throw new IllegalArgumentException();
}
}
}
| 989
| 27.285714
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/BiFunctionMapper.java
|
package org.infinispan.cache.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import java.util.function.BiFunction;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* A Bifuncion wrapper that uses the cache's underlying DataConversion objects to perform its operations.
*/
@Scope(Scopes.NAMED_CACHE)
public class BiFunctionMapper implements BiFunction {
private final DataConversion keyDataConversion;
private final DataConversion valueDataConversion;
private final BiFunction biFunction;
@Inject
public void injectDependencies(ComponentRegistry componentRegistry) {
componentRegistry.wireDependencies(keyDataConversion);
componentRegistry.wireDependencies(valueDataConversion);
}
public BiFunctionMapper(BiFunction remappingFunction,
DataConversion keyDataConversion,
DataConversion valueDataConversion) {
this.biFunction = remappingFunction;
this.keyDataConversion = keyDataConversion;
this.valueDataConversion = valueDataConversion;
}
public DataConversion getKeyDataConversion() {
return keyDataConversion;
}
public DataConversion getValueDataConversion() {
return valueDataConversion;
}
@Override
public Object apply(Object k, Object v) {
Object key = keyDataConversion.fromStorage(k);
Object value = valueDataConversion.fromStorage(v);
Object result = biFunction.apply(key, value);
return result != null ? valueDataConversion.toStorage(result) : null;
}
public static class Externalizer implements AdvancedExternalizer<BiFunctionMapper> {
@Override
public Set<Class<? extends BiFunctionMapper>> getTypeClasses() {
return Collections.singleton(BiFunctionMapper.class);
}
@Override
public Integer getId() {
return Ids.BI_FUNCTION_MAPPER;
}
@Override
public void writeObject(ObjectOutput output, BiFunctionMapper object) throws IOException {
output.writeObject(object.biFunction);
DataConversion.writeTo(output, object.keyDataConversion);
DataConversion.writeTo(output, object.valueDataConversion);
}
@Override
public BiFunctionMapper readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new BiFunctionMapper((BiFunction) input.readObject(),
DataConversion.readFrom(input), DataConversion.readFrom(input));
}
}
}
| 2,871
| 32.788235
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/AbstractDelegatingAdvancedCache.java
|
package org.infinispan.cache.impl;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import java.util.function.Function;
import javax.security.auth.Subject;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.XAResource;
import org.infinispan.AdvancedCache;
import org.infinispan.CacheSet;
import org.infinispan.LockedStream;
import org.infinispan.batch.BatchContainer;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.dataconversion.Encoder;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.encoding.DataConversion;
import org.infinispan.eviction.EvictionManager;
import org.infinispan.expiration.ExpirationManager;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.metadata.Metadata;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.security.AuthorizationManager;
import org.infinispan.stats.Stats;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.util.concurrent.locks.LockManager;
/**
* Similar to {@link org.infinispan.cache.impl.AbstractDelegatingCache}, but for {@link AdvancedCache}.
*
* @author Mircea.Markus@jboss.com
* @author Tristan Tarrant
* @see org.infinispan.cache.impl.AbstractDelegatingCache
*/
public abstract class AbstractDelegatingAdvancedCache<K, V> extends AbstractDelegatingCache<K, V> implements AdvancedCache<K, V> {
protected final AdvancedCache<K, V> cache;
protected AbstractDelegatingAdvancedCache(AdvancedCache<K, V> cache) {
super(cache);
this.cache = cache;
}
/**
* @deprecated Since 10.0, will be removed without a replacement
*/
@Deprecated
@Override
public AsyncInterceptorChain getAsyncInterceptorChain() {
return cache.getAsyncInterceptorChain();
}
@Override
public AdvancedCache<K, V> getAdvancedCache() {
//We need to override the super implementation which returns to the decorated cache;
//otherwise the current operation breaks out of the selected ClassLoader.
return this;
}
@Override
public EvictionManager getEvictionManager() {
return cache.getEvictionManager();
}
@Override
public ExpirationManager<K, V> getExpirationManager() {
return cache.getExpirationManager();
}
@Override
public ComponentRegistry getComponentRegistry() {
return cache.getComponentRegistry();
}
@Override
public DistributionManager getDistributionManager() {
return cache.getDistributionManager();
}
@Override
public AuthorizationManager getAuthorizationManager() {
return cache.getAuthorizationManager();
}
@Override
public AdvancedCache<K, V> lockAs(Object lockOwner) {
AdvancedCache<K, V> lockCache = cache.lockAs(lockOwner);
if (lockCache != cache) {
return rewrap(lockCache);
} else {
return this;
}
}
@Override
public RpcManager getRpcManager() {
return cache.getRpcManager();
}
@Override
public BatchContainer getBatchContainer() {
return cache.getBatchContainer();
}
@Override
public DataContainer<K, V> getDataContainer() {
return cache.getDataContainer();
}
@Override
public TransactionManager getTransactionManager() {
return cache.getTransactionManager();
}
@Override
public LockManager getLockManager() {
return cache.getLockManager();
}
@Override
public XAResource getXAResource() {
return cache.getXAResource();
}
@Override
public AvailabilityMode getAvailability() {
return cache.getAvailability();
}
@Override
public void setAvailability(AvailabilityMode availabilityMode) {
cache.setAvailability(availabilityMode);
}
@ManagedAttribute(
description = "Returns the cache availability",
displayName = "Cache availability",
dataType = DataType.TRAIT,
writable = true
)
public String getCacheAvailability() {
return getAvailability().toString();
}
public void setCacheAvailability(String availabilityString) {
setAvailability(AvailabilityMode.valueOf(availabilityString));
}
@ManagedAttribute(
description = "Returns whether cache rebalancing is enabled",
displayName = "Cache rebalacing",
dataType = DataType.TRAIT,
writable = true
)
public boolean isRebalancingEnabled() {
LocalTopologyManager localTopologyManager = getComponentRegistry().getComponent(LocalTopologyManager.class);
if (localTopologyManager != null) {
try {
return localTopologyManager.isCacheRebalancingEnabled(getName());
} catch (Exception e) {
throw new CacheException(e);
}
} else {
return false;
}
}
public void setRebalancingEnabled(boolean enabled) {
LocalTopologyManager localTopologyManager = getComponentRegistry().getComponent(LocalTopologyManager.class);
if (localTopologyManager != null) {
try {
localTopologyManager.setCacheRebalancingEnabled(getName(), enabled);
} catch (Exception e) {
throw new CacheException(e);
}
}
}
@Override
public CompletionStage<Boolean> touch(Object key, boolean touchEvenIfExpired) {
return cache.touch(key, touchEvenIfExpired);
}
@Override
public CompletionStage<Boolean> touch(Object key, int segment, boolean touchEvenIfExpired) {
return cache.touch(key, segment, touchEvenIfExpired);
}
@Override
public AdvancedCache<K, V> withFlags(Flag flag) {
AdvancedCache<K, V> flagCache = cache.withFlags(flag);
if (flagCache != cache) {
return rewrap(flagCache);
} else {
return this;
}
}
@Override
public AdvancedCache<K, V> withFlags(Flag... flags) {
AdvancedCache<K, V> flagCache = cache.withFlags(flags);
if (flagCache != cache) {
return rewrap(flagCache);
} else {
return this;
}
}
@Override
public AdvancedCache<K, V> withFlags(Collection<Flag> flags) {
AdvancedCache<K, V> flagCache = cache.withFlags(flags);
if (flagCache != cache) {
return rewrap(flagCache);
} else {
return this;
}
}
@Override
public AdvancedCache<K, V> noFlags() {
AdvancedCache<K, V> flagCache = cache.noFlags();
if (flagCache != cache) {
return rewrap(flagCache);
} else {
return this;
}
}
@Override
public AdvancedCache<K, V> transform(Function<AdvancedCache<K, V>, ? extends AdvancedCache<K, V>> transformation) {
AdvancedCache<K, V> newDelegate = cache.transform(transformation);
AdvancedCache<K, V> newInstance = newDelegate != cache ? rewrap(newDelegate) : this;
return transformation.apply(newInstance);
}
@Override
public AdvancedCache<K, V> withSubject(Subject subject) {
AdvancedCache<K, V> newDelegate = cache.withSubject(subject);
if (newDelegate != cache) {
return rewrap(newDelegate);
} else {
return this;
}
}
@Override
public boolean lock(K... key) {
return cache.lock(key);
}
@Override
public boolean lock(Collection<? extends K> keys) {
return cache.lock(keys);
}
@Override
public Stats getStats() {
return cache.getStats();
}
@Override
public ClassLoader getClassLoader() {
return cache.getClassLoader();
}
@Override
public AdvancedCache<K, V> with(ClassLoader classLoader) {
return this;
}
@Override
public Map<K, V> getAll(Set<?> keys) {
return cache.getAll(keys);
}
@Override
public CacheEntry<K, V> getCacheEntry(Object key) {
return cache.getCacheEntry(key);
}
@Override
public CompletableFuture<CacheEntry<K, V>> getCacheEntryAsync(Object key) {
return cache.getCacheEntryAsync(key);
}
@Override
public Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys) {
return cache.getAllCacheEntries(keys);
}
@Override
public Map<K, V> getAndPutAll(Map<? extends K, ? extends V> map) {
return cache.getAndPutAll(map);
}
@Override
public java.util.Map<K, V> getGroup(String groupName) {
return cache.getGroup(groupName);
}
@Override
public void removeGroup(String groupName) {
cache.removeGroup(groupName);
}
@Override
public V put(K key, V value, Metadata metadata) {
return cache.put(key, value, metadata);
}
@Override
public V replace(K key, V value, Metadata metadata) {
return cache.replace(key, value, metadata);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, Metadata metadata) {
return cache.replaceAsync(key, value, metadata);
}
@Override
public CompletableFuture<CacheEntry<K, V>> replaceAsyncEntry(K key, V value, Metadata metadata) {
return cache.replaceAsyncEntry(key, value, metadata);
}
@Override
public boolean replace(K key, V oldValue, V value, Metadata metadata) {
return cache.replace(key, oldValue, value, metadata);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, Metadata metadata) {
return cache.replaceAsync(key, oldValue, newValue, metadata);
}
@Override
public V putIfAbsent(K key, V value, Metadata metadata) {
return cache.putIfAbsent(key, value, metadata);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, Metadata metadata) {
return cache.putIfAbsentAsync(key, value, metadata);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putIfAbsentAsyncEntry(K key, V value, Metadata metadata) {
return cache.putIfAbsentAsyncEntry(key, value, metadata);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, Metadata metadata) {
return cache.putAsync(key, value, metadata);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putAsyncEntry(K key, V value, Metadata metadata) {
return cache.putAsyncEntry(key, value, metadata);
}
@Override
public void putForExternalRead(K key, V value, Metadata metadata) {
cache.putForExternalRead(key, value, metadata);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.compute(key, remappingFunction, metadata);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.computeIfPresent(key, remappingFunction, metadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
return cache.computeIfAbsent(key, mappingFunction, metadata);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.merge(key, value, remappingFunction, metadata);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.computeAsync(key, remappingFunction, metadata);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.computeIfPresentAsync(key, remappingFunction, metadata);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
return cache.computeIfAbsentAsync(key, mappingFunction, metadata);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.mergeAsync(key, value, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.mergeAsync(key, value, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.mergeAsync(key, value, remappingFunction, metadata);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.computeAsync(key, remappingFunction);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeAsync(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeAsync(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction) {
return cache.computeIfAbsentAsync(key, mappingFunction);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfAbsentAsync(key, mappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeIfAbsentAsync(key, mappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.computeIfPresentAsync(key, remappingFunction);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfPresentAsync(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeIfPresentAsync(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return cache.mergeAsync(key, value, remappingFunction);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, Metadata metadata) {
cache.putAll(map, metadata);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> map, Metadata metadata) {
return cache.putAllAsync(map, metadata);
}
@Override
public CacheSet<CacheEntry<K, V>> cacheEntrySet() {
return cache.cacheEntrySet();
}
@Override
public LockedStream<K, V> lockedStream() {
return cache.lockedStream();
}
@Override
public CompletableFuture<Boolean> removeLifespanExpired(K key, V value, Long lifespan) {
return cache.removeLifespanExpired(key, value, lifespan);
}
@Override
public CompletableFuture<Boolean> removeMaxIdleExpired(K key, V value) {
return cache.removeMaxIdleExpired(key, value);
}
@Override
public AdvancedCache<?, ?> withEncoding(Class<? extends Encoder> encoder) {
AdvancedCache encoderCache = cache.withEncoding(encoder);
if (encoderCache != cache) {
return rewrap(encoderCache);
} else {
return this;
}
}
@Override
public AdvancedCache withEncoding(Class<? extends Encoder> keyEncoder, Class<? extends Encoder> valueEncoder) {
AdvancedCache encoderCache = cache.withEncoding(keyEncoder, valueEncoder);
if (encoderCache != cache) {
return rewrap(encoderCache);
} else {
return this;
}
}
@Override
public AdvancedCache<?, ?> withKeyEncoding(Class<? extends Encoder> encoder) {
AdvancedCache encoderCache = cache.withKeyEncoding(encoder);
if (encoderCache != cache) {
return rewrap(encoderCache);
} else {
return this;
}
}
@Deprecated
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> wrapper) {
AdvancedCache<K, V> encoderCache = cache.withWrapping(wrapper);
if (encoderCache != cache) {
return this.rewrap(encoderCache);
} else {
return this;
}
}
@Override
public AdvancedCache<?, ?> withMediaType(String keyMediaType, String valueMediaType) {
AdvancedCache encoderCache = this.cache.withMediaType(keyMediaType, valueMediaType);
if (encoderCache != cache) {
return rewrap(encoderCache);
} else {
return this;
}
}
@Override
public <K1, V1> AdvancedCache<K1, V1> withMediaType(MediaType keyMediaType, MediaType valueMediaType) {
AdvancedCache encoderCache = this.cache.withMediaType(keyMediaType, valueMediaType);
if (encoderCache != cache) {
return rewrap(encoderCache);
} else {
return (AdvancedCache<K1, V1>) this;
}
}
@Override
public AdvancedCache<K, V> withStorageMediaType() {
AdvancedCache<K, V> encoderCache = this.cache.withStorageMediaType();
if (encoderCache != cache) {
return rewrap(encoderCache);
} else {
return this;
}
}
@Deprecated
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> keyWrapper, Class<? extends Wrapper> valueWrapper) {
AdvancedCache<K, V> encoderCache = cache.withWrapping(keyWrapper, valueWrapper);
if (encoderCache != cache) {
return rewrap(encoderCache);
} else {
return this;
}
}
/**
* No generics because some methods return {@code AdvancedCache<?, ?>},
* and returning the proper type would require erasure anyway.
*/
public abstract AdvancedCache rewrap(AdvancedCache newDelegate);
@Override
public DataConversion getKeyDataConversion() {
return cache.getKeyDataConversion();
}
@Override
public DataConversion getValueDataConversion() {
return cache.getValueDataConversion();
}
@Override
public CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys) {
return cache.getAllAsync(keys);
}
@Override
public CompletableFuture<CacheEntry<K, V>> removeAsyncEntry(Object key) {
return cache.removeAsyncEntry(key);
}
}
| 19,905
| 31.15832
| 206
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/CacheImpl.java
|
package org.infinispan.cache.impl;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.infinispan.context.Flag.FAIL_SILENTLY;
import static org.infinispan.context.Flag.FORCE_ASYNCHRONOUS;
import static org.infinispan.context.Flag.IGNORE_RETURN_VALUES;
import static org.infinispan.context.Flag.PUT_FOR_EXTERNAL_READ;
import static org.infinispan.context.Flag.ZERO_LOCK_ACQUISITION_TIMEOUT;
import static org.infinispan.context.InvocationContextFactory.UNBOUNDED;
import static org.infinispan.util.logging.Log.CONFIG;
import java.lang.annotation.Annotation;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import java.util.function.Function;
import javax.security.auth.Subject;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.XAResource;
import org.infinispan.AdvancedCache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.CacheStream;
import org.infinispan.LockedStream;
import org.infinispan.batch.BatchContainer;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.functions.MergeFunction;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.read.SizeCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.EvictCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.RemoveExpiredCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.ValueMatcher;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.dataconversion.Encoder;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.commons.marshall.StreamingMarshaller;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.InfinispanCollections;
import org.infinispan.commons.util.Version;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.configuration.format.PropertyFormatter;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.group.impl.GroupManager;
import org.infinispan.encoding.DataConversion;
import org.infinispan.encoding.impl.StorageConfigurationManager;
import org.infinispan.eviction.EvictionManager;
import org.infinispan.eviction.EvictionStrategy;
import org.infinispan.expiration.ExpirationManager;
import org.infinispan.expiration.impl.InternalExpirationManager;
import org.infinispan.expiration.impl.TouchCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.SurvivesRestarts;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.functional.impl.Params;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.ListenerHolder;
import org.infinispan.notifications.cachelistener.filter.CacheEventConverter;
import org.infinispan.notifications.cachelistener.filter.CacheEventFilter;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifier;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.security.AuthorizationManager;
import org.infinispan.statetransfer.StateTransferManager;
import org.infinispan.stats.Stats;
import org.infinispan.stats.impl.StatsImpl;
import org.infinispan.stream.StreamMarshalling;
import org.infinispan.stream.impl.LockedStreamImpl;
import org.infinispan.stream.impl.TxLockedStreamImpl;
import org.infinispan.stream.impl.local.ValueCacheCollection;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.xa.TransactionXaAdapter;
import org.infinispan.transaction.xa.XaTransactionTable;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @author Mircea.Markus@jboss.com
* @author Galder Zamarreño
* @author Sanne Grinovero
* @author <a href="http://gleamynode.net/">Trustin Lee</a>
* @since 4.0
*/
@Scope(Scopes.NAMED_CACHE)
@SurvivesRestarts
@MBean(objectName = CacheImpl.OBJECT_NAME, description = "Component that represents an individual cache instance.")
public class CacheImpl<K, V> implements AdvancedCache<K, V> {
private static final Log log = LogFactory.getLog(CacheImpl.class);
public static final String OBJECT_NAME = "Cache";
private static final long PFER_FLAGS = EnumUtil.bitSetOf(FAIL_SILENTLY, FORCE_ASYNCHRONOUS, ZERO_LOCK_ACQUISITION_TIMEOUT, PUT_FOR_EXTERNAL_READ, IGNORE_RETURN_VALUES);
@Inject protected InvocationContextFactory invocationContextFactory;
@Inject protected CommandsFactory commandsFactory;
@Inject protected AsyncInterceptorChain invoker;
@Inject protected Configuration config;
@Inject protected CacheNotifier<K,V> notifier;
@Inject protected CacheManagerNotifier cacheManagerNotifier;
@Inject protected BatchContainer batchContainer;
@Inject protected ComponentRegistry componentRegistry;
@Inject protected TransactionManager transactionManager;
@Inject protected RpcManager rpcManager;
@Inject @ComponentName(KnownComponentNames.INTERNAL_MARSHALLER)
protected StreamingMarshaller marshaller;
@Inject protected KeyPartitioner keyPartitioner;
@Inject EvictionManager<K,V> evictionManager;
@Inject InternalExpirationManager<K, V> expirationManager;
@Inject InternalDataContainer<K,V> dataContainer;
@Inject EmbeddedCacheManager cacheManager;
@Inject LockManager lockManager;
@Inject DistributionManager distributionManager;
@Inject TransactionTable txTable;
@Inject AuthorizationManager authorizationManager;
@Inject PartitionHandlingManager partitionHandlingManager;
@Inject GlobalConfiguration globalCfg;
@Inject LocalTopologyManager localTopologyManager;
@Inject StateTransferManager stateTransferManager;
@Inject InvocationHelper invocationHelper;
@Inject StorageConfigurationManager storageConfigurationManager;
// TODO Remove after all ISPN-11584 is fixed and the AdvancedCache methods are implemented in EncoderCache
@Inject ComponentRef<AdvancedCache> encoderCache;
@Inject GroupManager groupManager;
protected Metadata defaultMetadata;
private final String name;
private volatile boolean stopping = false;
private boolean transactional;
private boolean batchingEnabled;
private final ContextBuilder nonTxContextBuilder = this::nonTxContextBuilder;
private final ContextBuilder defaultBuilder = i -> invocationHelper.createInvocationContextWithImplicitTransaction(i, false);
public CacheImpl(String name) {
this.name = name;
}
// This should rather be a @Start method but CacheImpl may be not an actual component but a delegate
// of EncoderCache. ATM there's not method to invoke @Start method, just wireDependencies
@Inject
public void preStart() {
// We have to do this before start, since some components may start before the actual cache and they
// have to have access to the default metadata on some operations
defaultMetadata = Configurations.newDefaultMetadata(config);
transactional = config.transaction().transactionMode().isTransactional();
batchingEnabled = config.invocationBatching().enabled();
}
private void assertKeyNotNull(Object key) {
requireNonNull(key, "Null keys are not supported!");
}
private void assertValueNotNull(Object value) {
requireNonNull(value, "Null values are not supported!");
}
void assertKeyValueNotNull(Object key, Object value) {
assertKeyNotNull(key);
assertValueNotNull(value);
}
private void assertFunctionNotNull(Object function) {
requireNonNull(function, "Null functions are not supported!");
}
// CacheSupport does not extend AdvancedCache, so it cannot really call up
// to the cache methods that take Metadata parameter. Since CacheSupport
// methods are declared final, the easiest is for CacheImpl to stop
// extending CacheSupport and implement the base methods directly.
@Override
public final V put(K key, V value) {
return put(key, value, defaultMetadata);
}
@Override
public final V put(K key, V value, long lifespan, TimeUnit unit) {
return put(key, value, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final V putIfAbsent(K key, V value, long lifespan, TimeUnit unit) {
return putIfAbsent(key, value, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit unit) {
putAll(map, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final V replace(K key, V value, long lifespan, TimeUnit unit) {
return replace(key, value, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit unit) {
return replace(key, oldValue, value, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final V putIfAbsent(K key, V value) {
return putIfAbsent(key, value, defaultMetadata);
}
@Override
public final boolean replace(K key, V oldValue, V newValue) {
return replace(key, oldValue, newValue, defaultMetadata);
}
@Override
public final V replace(K key, V value) {
return replace(key, value, defaultMetadata);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return compute(key, remappingFunction, false);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(defaultMetadata.maxIdle(), MILLISECONDS).build();
return computeInternal(key, remappingFunction, false, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit).build();
return computeInternal(key, remappingFunction, false, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return computeInternal(key, remappingFunction, false, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return compute(key, remappingFunction, true);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit).build();
return computeInternal(key, remappingFunction, true, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit).build();
return computeInternal(key, remappingFunction, true, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return computeInternal(key, remappingFunction, true, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
private V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, boolean computeIfPresent) {
return computeInternal(key, remappingFunction, computeIfPresent, applyDefaultMetadata(defaultMetadata), addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
private V computeInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, boolean computeIfPresent, Metadata metadata, long flags) {
return computeInternal(key, remappingFunction, computeIfPresent, metadata, flags, defaultContextBuilderForWrite());
}
V computeInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, boolean computeIfPresent,
Metadata metadata, long flags, ContextBuilder contextBuilder) {
assertKeyNotNull(key);
assertFunctionNotNull(remappingFunction);
ComputeCommand command = commandsFactory.buildComputeCommand(key, remappingFunction, computeIfPresent,
keyPartitioner.getSegment(key), metadata, flags);
return invocationHelper.invoke(contextBuilder, command, 1);
}
@Override
public final V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
return computeIfAbsent(key, mappingFunction, defaultMetadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit).build();
return computeIfAbsent(key, mappingFunction, metadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit).build();
return computeIfAbsent(key, mappingFunction, metadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
return computeIfAbsentInternal(key, mappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
V computeIfAbsentInternal(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata, long flags,
ContextBuilder contextBuilder) {
assertKeyNotNull(key);
assertFunctionNotNull(mappingFunction);
ComputeIfAbsentCommand command = commandsFactory.buildComputeIfAbsentCommand(key, mappingFunction,
keyPartitioner.getSegment(key), metadata, flags);
return invocationHelper.invoke(contextBuilder, command, 1);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return mergeInternal(key, value, remappingFunction, defaultMetadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(defaultMetadata.maxIdle(), MILLISECONDS).build();
return mergeInternal(key, value, remappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
return mergeInternal(key, value, remappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return mergeInternal(key, value, remappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
V mergeInternal(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata,
long flags, ContextBuilder contextBuilder) {
assertKeyNotNull(key);
assertValueNotNull(value);
assertFunctionNotNull(remappingFunction);
DataConversion keyDataConversion;
DataConversion valueDataConversion;
//TODO: Correctly propagate DataConversion objects https://issues.redhat.com/browse/ISPN-11584
if (remappingFunction instanceof BiFunctionMapper) {
BiFunctionMapper biFunctionMapper = (BiFunctionMapper) remappingFunction;
keyDataConversion = biFunctionMapper.getKeyDataConversion();
valueDataConversion = biFunctionMapper.getValueDataConversion();
} else {
keyDataConversion = encoderCache.running().getKeyDataConversion();
valueDataConversion = encoderCache.running().getValueDataConversion();
}
ReadWriteKeyCommand<K, V, V> command = commandsFactory.buildReadWriteKeyCommand(key,
new MergeFunction<>(value, remappingFunction, metadata), keyPartitioner.getSegment(key),
Params.fromFlagsBitSet(flags), keyDataConversion, valueDataConversion);
return invocationHelper.invoke(contextBuilder, command, 1);
}
@Override
public final CompletableFuture<V> putAsync(K key, V value) {
return putAsync(key, value, defaultMetadata);
}
@Override
public final CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit unit) {
return putAsync(key, value, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data) {
return putAllAsync(data, defaultMetadata);
}
@Override
public final CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit unit) {
return putAllAsync(data, lifespan, MILLISECONDS, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final CompletableFuture<V> putIfAbsentAsync(K key, V value) {
return putIfAbsentAsync(key, value, defaultMetadata);
}
@Override
public final CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit unit) {
return putIfAbsentAsync(key, value, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final CompletableFuture<V> replaceAsync(K key, V value) {
return replaceAsync(key, value, defaultMetadata);
}
@Override
public final CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit unit) {
return replaceAsync(key, value, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue) {
return replaceAsync(key, oldValue, newValue, defaultMetadata);
}
@Override
public final CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit unit) {
return replaceAsync(key, oldValue, newValue, lifespan, unit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public final void putAll(Map<? extends K, ? extends V> m) {
putAll(m, defaultMetadata);
}
@Override
public final boolean remove(Object key, Object value) {
return remove(key, value, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final boolean remove(Object key, Object value, long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
RemoveCommand command = commandsFactory.buildRemoveCommand(key, value, keyPartitioner.getSegment(key), explicitFlags);
return invocationHelper.invoke(contextBuilder, command, 1);
}
@Override
public final int size() {
return size(EnumUtil.EMPTY_BIT_SET);
}
final int size(long explicitFlags) {
SizeCommand command = commandsFactory.buildSizeCommand(null, explicitFlags);
long size = invocationHelper.invoke(invocationContextFactory.createInvocationContext(false, UNBOUNDED), command);
return size > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) size;
}
@Override
public CompletableFuture<Long> sizeAsync() {
return sizeAsync(EnumUtil.EMPTY_BIT_SET);
}
final CompletableFuture<Long> sizeAsync(long explicitFlags) {
SizeCommand command = commandsFactory.buildSizeCommand(null, explicitFlags);
return invocationHelper.invokeAsync(invocationContextFactory.createInvocationContext(false, UNBOUNDED), command);
}
@Override
public final boolean isEmpty() {
return isEmpty(EnumUtil.EMPTY_BIT_SET);
}
final boolean isEmpty(long explicitFlags) {
return entrySet(explicitFlags, null).stream().noneMatch(StreamMarshalling.alwaysTruePredicate());
}
@Override
public final boolean containsKey(Object key) {
return containsKey(key, EnumUtil.EMPTY_BIT_SET, invocationContextFactory.createInvocationContext(false, 1));
}
final boolean containsKey(Object key, long explicitFlags, InvocationContext ctx) {
return get(key, explicitFlags, ctx) != null;
}
@Override
public final boolean containsValue(Object value) {
assertValueNotNull(value);
return values().stream().anyMatch(StreamMarshalling.equalityPredicate(value));
}
@Override
public final V get(Object key) {
return get(key, EnumUtil.EMPTY_BIT_SET, invocationContextFactory.createInvocationContext(false, 1));
}
final V get(Object key, long explicitFlags, InvocationContext ctx) {
assertKeyNotNull(key);
GetKeyValueCommand command = commandsFactory.buildGetKeyValueCommand(key, keyPartitioner.getSegment(key), explicitFlags);
return invocationHelper.invoke(ctx, command);
}
final CacheEntry<K, V> getCacheEntry(Object key, long explicitFlags, InvocationContext ctx) {
assertKeyNotNull(key);
GetCacheEntryCommand command = commandsFactory.buildGetCacheEntryCommand(key, keyPartitioner.getSegment(key),
explicitFlags);
return invocationHelper.invoke(ctx, command);
}
@Override
public final CacheEntry<K, V> getCacheEntry(Object key) {
return getCacheEntry(key, EnumUtil.EMPTY_BIT_SET, invocationContextFactory.createInvocationContext(false, 1));
}
@Override
public CompletableFuture<CacheEntry<K, V>> getCacheEntryAsync(Object key) {
return getCacheEntryAsync(key, EnumUtil.EMPTY_BIT_SET, invocationContextFactory.createInvocationContext(false, 1));
}
final CompletableFuture<CacheEntry<K, V>> getCacheEntryAsync(Object key, long explicitFlags, InvocationContext ctx) {
assertKeyNotNull(key);
GetCacheEntryCommand command = commandsFactory.buildGetCacheEntryCommand(key, keyPartitioner.getSegment(key),
explicitFlags);
return invocationHelper.invokeAsync(ctx, command);
}
@Override
public Map<K, V> getAll(Set<?> keys) {
return getAll(keys, EnumUtil.EMPTY_BIT_SET, invocationContextFactory.createInvocationContext(false, keys.size()));
}
final Map<K, V> getAll(Set<?> keys, long explicitFlags, InvocationContext ctx) {
GetAllCommand command = commandsFactory.buildGetAllCommand(keys, explicitFlags, false);
return dropNullEntries(invocationHelper.invoke(ctx, command));
}
@Override
public CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys) {
return getAllAsync(keys, EnumUtil.EMPTY_BIT_SET, invocationContextFactory.createInvocationContext(false, keys.size()));
}
final CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys, long explicitFlags, InvocationContext ctx) {
GetAllCommand command = commandsFactory.buildGetAllCommand(keys, explicitFlags, false);
return invocationHelper.<Map<K, V>>invokeAsync(ctx, command).thenApply(this::dropNullEntries);
}
private Map<K, V> dropNullEntries(Map<K, V> map) {
Iterator<Entry<K, V>> entryIterator = map.entrySet().iterator();
while (entryIterator.hasNext()) {
Entry<K, V> entry = entryIterator.next();
if (entry.getValue() == null) {
entryIterator.remove();
}
}
return map;
}
@Override
public Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys) {
return getAllCacheEntries(keys, EnumUtil.EMPTY_BIT_SET,
invocationContextFactory.createInvocationContext(false, keys.size()));
}
public final Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys,
long explicitFlags, InvocationContext ctx) {
GetAllCommand command = commandsFactory.buildGetAllCommand(keys, explicitFlags, true);
Map<K, CacheEntry<K, V>> map = invocationHelper.invoke(ctx, command);
map.entrySet().removeIf(entry -> entry.getValue() == null);
return map;
}
@Override
public Map<K, V> getGroup(String groupName) {
return getGroup(groupName, EnumUtil.EMPTY_BIT_SET);
}
final Map<K, V> getGroup(String groupName, long explicitFlags) {
return Collections.unmodifiableMap(internalGetGroup(groupName, explicitFlags, invocationContextFactory.createInvocationContext(false, UNBOUNDED)));
}
private Map<K, V> internalGetGroup(String groupName, long explicitFlagsBitSet, InvocationContext ctx) {
if (groupManager == null) {
return Collections.emptyMap();
}
try (CacheStream<CacheEntry<K, V>> stream = cacheEntrySet(explicitFlagsBitSet, null).stream()) {
return groupManager.collect(stream, ctx, groupName);
}
}
@Override
public void removeGroup(String groupName) {
removeGroup(groupName, EnumUtil.EMPTY_BIT_SET);
}
final void removeGroup(String groupName, long explicitFlags) {
if (!transactional) {
nonTransactionalRemoveGroup(groupName, explicitFlags);
} else {
transactionalRemoveGroup(groupName, explicitFlags);
}
}
private void transactionalRemoveGroup(String groupName, long explicitFlagsBitSet) {
final boolean onGoingTransaction = getOngoingTransaction(true) != null;
if (!onGoingTransaction) {
tryBegin();
}
try {
InvocationContext context = defaultContextBuilderForWrite().create(UNBOUNDED);
Map<K, V> keys = internalGetGroup(groupName, explicitFlagsBitSet, context);
long removeFlags = addIgnoreReturnValuesFlag(explicitFlagsBitSet);
for (K key : keys.keySet()) {
invocationHelper.invoke(context, createRemoveCommand(key, removeFlags, false));
}
if (!onGoingTransaction) {
tryCommit();
}
} catch (RuntimeException e) {
if (!onGoingTransaction) {
tryRollback();
}
throw e;
}
}
private void nonTransactionalRemoveGroup(String groupName, long explicitFlags) {
InvocationContext context = invocationContextFactory.createInvocationContext(false, UNBOUNDED);
Map<K, V> keys = internalGetGroup(groupName, explicitFlags, context);
long removeFlags = addIgnoreReturnValuesFlag(explicitFlags);
for (K key : keys.keySet()) {
//a new context is needed for remove since in the non-owners, the command is sent to the primary owner to be
//executed. If the context is already populated, it throws a ClassCastException because the wrapForRemove is
//not invoked.
assertKeyNotNull(key);
invocationHelper.invoke(createRemoveCommand(key, removeFlags, false), 1);
}
}
@Override
public final V remove(Object key) {
return remove(key, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final V remove(Object key, long explicitFlags, ContextBuilder contextBuilder) {
assertKeyNotNull(key);
RemoveCommand command = createRemoveCommand(key, explicitFlags, false);
return invocationHelper.invoke(contextBuilder, command, 1);
}
private RemoveCommand createRemoveCommand(Object key, long explicitFlags, boolean returnEntry) {
long flags = addUnsafeFlags(explicitFlags);
return commandsFactory.buildRemoveCommand(key, null, keyPartitioner.getSegment(key), flags, returnEntry);
}
@Override
public CompletableFuture<Boolean> removeLifespanExpired(K key, V value, Long lifespan) {
return removeLifespanExpired(key, value, lifespan, EnumUtil.EMPTY_BIT_SET);
}
final CompletableFuture<Boolean> removeLifespanExpired(K key, V value, Long lifespan, long explicitFlags) {
RemoveExpiredCommand command = commandsFactory.buildRemoveExpiredCommand(key, value, keyPartitioner.getSegment(key),
lifespan, explicitFlags | FlagBitSets.SKIP_CACHE_LOAD | FlagBitSets.SKIP_XSITE_BACKUP);
return performVisitableNonTxCommand(command);
}
@Override
public CompletableFuture<Boolean> removeMaxIdleExpired(K key, V value) {
return removeMaxIdleExpired(key, value, EnumUtil.EMPTY_BIT_SET);
}
final CompletableFuture<Boolean> removeMaxIdleExpired(K key, V value, long explicitFlags) {
RemoveExpiredCommand command = commandsFactory.buildRemoveExpiredCommand(key, value, keyPartitioner.getSegment(key),
explicitFlags | FlagBitSets.SKIP_CACHE_LOAD);
return performVisitableNonTxCommand(command);
}
private CompletableFuture<Boolean> performVisitableNonTxCommand(VisitableCommand command) {
Transaction ongoingTransaction = null;
try {
ongoingTransaction = suspendOngoingTransactionIfExists();
return invocationHelper.invokeAsync(nonTxContextBuilder, command, 1);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Caught exception while doing removeExpired()", e);
return CompletableFuture.failedFuture(e);
} finally {
resumePreviousOngoingTransaction(ongoingTransaction,
"Had problems trying to resume a transaction after removeExpired()");
}
}
@Override
public AdvancedCache<K, V> withEncoding(Class<? extends Encoder> encoderClass) {
throw new UnsupportedOperationException("Encoding requires EncoderCache");
}
@Override
public AdvancedCache<?, ?> withKeyEncoding(Class<? extends Encoder> encoderClass) {
throw new UnsupportedOperationException("Encoding requires EncoderCache");
}
@Override
public AdvancedCache<K, V> withEncoding(Class<? extends Encoder> keyEncoderClass, Class<? extends Encoder> valueEncoderClass) {
throw new UnsupportedOperationException("Encoding requires EncoderCache");
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> wrapperClass) {
throw new UnsupportedOperationException("Wrapping requires EncoderCache");
}
@Override
public AdvancedCache<K, V> withMediaType(String keyMediaType, String valueMediaType) {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@Override
public <K1, V1> AdvancedCache<K1, V1> withMediaType(MediaType keyMediaType, MediaType valueMediaType) {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@Override
public AdvancedCache<K, V> withStorageMediaType() {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> keyWrapperClass, Class<? extends Wrapper> valueWrapperClass) {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@Override
public DataConversion getKeyDataConversion() {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@Override
public DataConversion getValueDataConversion() {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@ManagedOperation(
description = "Clears the cache",
displayName = "Clears the cache", name = "clear"
)
public final void clearOperation() {
clear(EnumUtil.EMPTY_BIT_SET);
}
@Override
public final void clear() {
clear(EnumUtil.EMPTY_BIT_SET);
}
final void clear(long explicitFlags) {
final Transaction tx = suspendOngoingTransactionIfExists();
try {
InvocationContext context = invocationContextFactory.createClearNonTxInvocationContext();
ClearCommand command = commandsFactory.buildClearCommand(explicitFlags);
invocationHelper.invoke(context, command);
} finally {
resumePreviousOngoingTransaction(tx, "Had problems trying to resume a transaction after clear()");
}
}
@Override
public CacheSet<K> keySet() {
return keySet(EnumUtil.EMPTY_BIT_SET, null);
}
CacheSet<K> keySet(long explicitFlags, Object lockOwner) {
return new CacheBackedKeySet(this, lockOwner, explicitFlags);
}
@Override
public CacheCollection<V> values() {
return values(EnumUtil.EMPTY_BIT_SET, null);
}
CacheCollection<V> values(long explicitFlags, Object lockOwner) {
return new ValueCacheCollection<>(this, cacheEntrySet(explicitFlags, lockOwner));
}
@Override
public CacheSet<CacheEntry<K, V>> cacheEntrySet() {
return cacheEntrySet(EnumUtil.EMPTY_BIT_SET, null);
}
@Override
public LockedStream<K, V> lockedStream() {
if (transactional) {
if (config.transaction().lockingMode() == LockingMode.OPTIMISTIC) {
throw new UnsupportedOperationException("Method lockedStream is not supported in OPTIMISTIC transactional caches!");
}
return new TxLockedStreamImpl<>(transactionManager, cacheEntrySet().stream(), config.locking().lockAcquisitionTimeout(), TimeUnit.MILLISECONDS);
}
return new LockedStreamImpl<>(cacheEntrySet().stream(), config.locking().lockAcquisitionTimeout(), TimeUnit.MILLISECONDS);
}
CacheSet<CacheEntry<K, V>> cacheEntrySet(long explicitFlags, Object lockOwner) {
return new CacheBackedEntrySet(this, lockOwner, explicitFlags);
}
@Override
public CacheSet<Entry<K, V>> entrySet() {
return entrySet(EnumUtil.EMPTY_BIT_SET, null);
}
CacheSet<Map.Entry<K, V>> entrySet(long explicitFlags, Object lockOwner) {
return new CacheBackedEntrySet(this, lockOwner, explicitFlags);
}
@Override
public final void putForExternalRead(K key, V value) {
putForExternalRead(key, value, EnumUtil.EMPTY_BIT_SET);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit lifespanUnit) {
putForExternalRead(key, value, lifespan, lifespanUnit, defaultMetadata.maxIdle(), MILLISECONDS);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
putForExternalRead(key, value, metadata);
}
@Override
public void putForExternalRead(K key, V value, Metadata metadata) {
Metadata merged = applyDefaultMetadata(metadata);
putForExternalRead(key, value, merged, EnumUtil.EMPTY_BIT_SET);
}
final void putForExternalRead(K key, V value, long explicitFlags) {
putForExternalRead(key, value, defaultMetadata, explicitFlags);
}
final void putForExternalRead(K key, V value, Metadata metadata, long explicitFlags) {
Transaction ongoingTransaction = null;
try {
ongoingTransaction = suspendOngoingTransactionIfExists();
// if the entry exists then this should be a no-op.
putIfAbsent(key, value, metadata, EnumUtil.mergeBitSets(PFER_FLAGS, explicitFlags), nonTxContextBuilder);
} catch (Exception e) {
if (log.isDebugEnabled()) log.debug("Caught exception while doing putForExternalRead()", e);
} finally {
resumePreviousOngoingTransaction(ongoingTransaction, "Had problems trying to resume a transaction after putForExternalRead()");
}
}
@Override
public final void evict(K key) {
evict(key, EnumUtil.EMPTY_BIT_SET);
}
final void evict(K key, long explicitFlags) {
assertKeyNotNull(key);
if (!config.memory().isEvictionEnabled() && config.memory().whenFull() != EvictionStrategy.MANUAL) {
log.evictionDisabled(name);
}
InvocationContext ctx = createSingleKeyNonTxInvocationContext();
EvictCommand command = commandsFactory.buildEvictCommand(key, keyPartitioner.getSegment(key), explicitFlags);
invocationHelper.invoke(ctx, command);
}
private InvocationContext createSingleKeyNonTxInvocationContext() {
return invocationContextFactory.createSingleKeyNonTxInvocationContext();
}
@Override
public Configuration getCacheConfiguration() {
return config;
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
return notifier.addListenerAsync(listener);
}
CompletionStage<Void> addListenerAsync(ListenerHolder listenerHolder) {
return notifier.addListenerAsync(listenerHolder, null, null, null);
}
<C> CompletionStage<Void> addListenerAsync(ListenerHolder listenerHolder, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter) {
return notifier.addListenerAsync(listenerHolder, filter, converter, null);
}
@Override
public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter) {
return notifier.addListenerAsync(listener, filter, converter);
}
@Override
public CompletionStage<Void> removeListenerAsync(Object listener) {
return notifier.removeListenerAsync(listener);
}
@Deprecated
@Override
public Set<Object> getListeners() {
return notifier.getListeners();
}
@Override
public <C> CompletionStage<Void> addFilteredListenerAsync(Object listener,
CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
return notifier.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
return notifier.addStorageFormatFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
<C> CompletionStage<Void> addFilteredListenerAsync(ListenerHolder listener,
CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
return notifier.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
private InvocationContext nonTxContextBuilder(int keyCount) {
return transactional ?
invocationContextFactory.createSingleKeyNonTxInvocationContext() :
invocationContextFactory.createInvocationContext(true, keyCount);
}
@Override
public boolean lock(K... keys) {
assertKeyNotNull(keys);
return lock(Arrays.asList(keys), EnumUtil.EMPTY_BIT_SET);
}
@Override
public boolean lock(Collection<? extends K> keys) {
return lock(keys, EnumUtil.EMPTY_BIT_SET);
}
boolean lock(Collection<? extends K> keys, long flagsBitSet) {
if (!transactional)
throw new UnsupportedOperationException("Calling lock() on non-transactional caches is not allowed");
if (keys == null || keys.isEmpty()) {
throw new IllegalArgumentException("Cannot lock empty list of keys");
}
InvocationContext ctx = invocationContextFactory.createInvocationContext(true, UNBOUNDED);
LockControlCommand command = commandsFactory.buildLockControlCommand(keys, flagsBitSet);
if (ctx.getLockOwner() == null) {
ctx.setLockOwner(command.getKeyLockOwner());
}
return invocationHelper.invoke(ctx, command);
}
@Override
@ManagedOperation(
description = "Starts the cache.",
displayName = "Starts cache."
)
public void start() {
componentRegistry.start();
if (stateTransferManager != null) {
try {
stateTransferManager.waitForInitialStateTransferToComplete();
} catch (Throwable t) {
log.debugf("Stopping cache as exception encountered waiting for state transfer", t);
componentRegistry.stop();
throw t;
}
}
log.debugf("Started cache %s on %s", getName(), managerIdentifier());
}
@Override
@ManagedOperation(
description = "Stops the cache.",
displayName = "Stops cache."
)
public void stop() {
performImmediateShutdown();
}
@Override
@ManagedOperation(
description = "Shuts down the cache across the cluster",
displayName = "Clustered cache shutdown"
)
public void shutdown() {
log.debugf("Shutting down cache %s on %s", getName(), managerIdentifier());
synchronized (this) {
if (!stopping && componentRegistry.getStatus() == ComponentStatus.RUNNING) {
stopping = true;
requestClusterWideShutdown();
}
}
}
private void requestClusterWideShutdown() {
// If the cache is clustered, perform a cluster-wide shutdown, otherwise do it immediately
if (config.clustering().cacheMode().isClustered()) {
try {
localTopologyManager.cacheShutdown(name);
} catch (Exception e) {
throw new CacheException(e);
}
}
performImmediateShutdown();
}
private void performImmediateShutdown() {
log.debugf("Stopping cache %s on %s", getName(), managerIdentifier());
componentRegistry.stop();
}
/**
* @deprecated Since 10.0, will be removed without a replacement
*/
@Deprecated
@Override
public AsyncInterceptorChain getAsyncInterceptorChain() {
return invoker;
}
@Override
public EvictionManager<K, V> getEvictionManager() {
return evictionManager;
}
@Override
public ExpirationManager<K, V> getExpirationManager() {
return expirationManager;
}
@Override
public ComponentRegistry getComponentRegistry() {
return componentRegistry;
}
@Override
public DistributionManager getDistributionManager() {
return distributionManager;
}
@Override
public AuthorizationManager getAuthorizationManager() {
return authorizationManager;
}
@Override
public AdvancedCache<K, V> lockAs(Object lockOwner) {
return new DecoratedCache<>(this, requireNonNull(lockOwner, "lockOwner can't be null"), EnumUtil.EMPTY_BIT_SET);
}
@Override
public ComponentStatus getStatus() {
return componentRegistry.getStatus();
}
/**
* Returns String representation of ComponentStatus enumeration in order to avoid class not found exceptions in JMX
* tools that don't have access to infinispan classes.
*/
@ManagedAttribute(
description = "Returns the cache status",
displayName = "Cache status",
dataType = DataType.TRAIT
)
public String getCacheStatus() {
return getStatus().toString();
}
@Override
public AvailabilityMode getAvailability() {
return partitionHandlingManager.getAvailabilityMode();
}
@Override
public void setAvailability(AvailabilityMode availability) {
if (localTopologyManager != null) {
try {
localTopologyManager.setCacheAvailability(getName(), availability);
} catch (Exception e) {
throw new CacheException(e);
}
}
}
@ManagedAttribute(
description = "Returns the cache availability",
displayName = "Cache availability",
dataType = DataType.TRAIT,
writable = true
)
public String getCacheAvailability() {
return getAvailability().toString();
}
@ManagedAttribute(
description = "Returns whether cache rebalancing is enabled",
displayName = "Cache rebalacing",
dataType = DataType.TRAIT,
writable = true
)
public boolean isRebalancingEnabled() {
if (localTopologyManager != null) {
try {
return localTopologyManager.isCacheRebalancingEnabled(getName());
} catch (Exception e) {
throw new CacheException(e);
}
} else {
return false;
}
}
public void setRebalancingEnabled(boolean enabled) {
if (localTopologyManager != null) {
try {
localTopologyManager.setCacheRebalancingEnabled(getName(), enabled);
} catch (Exception e) {
throw new CacheException(e);
}
}
}
@Override
public boolean startBatch() {
if (!batchingEnabled) {
throw CONFIG.invocationBatchingNotEnabled();
}
return batchContainer.startBatch();
}
@Override
public void endBatch(boolean successful) {
if (!batchingEnabled) {
throw CONFIG.invocationBatchingNotEnabled();
}
batchContainer.endBatch(successful);
}
@Override
public String getName() {
return name;
}
/**
* Returns the cache name. If this is the default cache, it returns a more friendly name.
*/
@ManagedAttribute(
description = "Returns the cache name",
displayName = "Cache name",
dataType = DataType.TRAIT
)
public String getCacheName() {
return getName() + "(" + getCacheConfiguration().clustering().cacheMode().toString().toLowerCase() + ")";
}
/**
* Returns the version of Infinispan.
*/
@ManagedAttribute(
description = "Returns the version of Infinispan",
displayName = "Infinispan version",
dataType = DataType.TRAIT
)
@Override
public String getVersion() {
return Version.getVersion();
}
@Override
public String toString() {
return "Cache '" + name + "'@" + managerIdentifier();
}
private String managerIdentifier() {
if (rpcManager != null) {
return rpcManager.getAddress().toString();
} else if (globalCfg.transport().nodeName() != null){
return globalCfg.transport().nodeName();
} else {
return globalCfg.cacheManagerName();
}
}
@Override
public CompletionStage<Boolean> touch(Object key, boolean touchEvenIfExpired) {
return touch(key, -1, touchEvenIfExpired, EnumUtil.EMPTY_BIT_SET);
}
@Override
public CompletionStage<Boolean> touch(Object key, int segment, boolean touchEvenIfExpired) {
return touch(key, segment, touchEvenIfExpired, EnumUtil.EMPTY_BIT_SET);
}
public CompletionStage<Boolean> touch(Object key, int segment, boolean touchEvenIfExpired, long flagBitSet) {
if (segment < 0) {
segment = keyPartitioner.getSegment(key);
}
TouchCommand command = commandsFactory.buildTouchCommand(key, segment, touchEvenIfExpired, flagBitSet);
return performVisitableNonTxCommand(command);
}
@Override
public BatchContainer getBatchContainer() {
return batchContainer;
}
@Override
public DataContainer<K, V> getDataContainer() {
return dataContainer;
}
@Override
public TransactionManager getTransactionManager() {
return transactionManager;
}
@Override
public LockManager getLockManager() {
return this.lockManager;
}
@Override
public EmbeddedCacheManager getCacheManager() {
return cacheManager;
}
@Override
public Stats getStats() {
return StatsImpl.create(config, invoker);
}
@Override
public XAResource getXAResource() {
return new TransactionXaAdapter((XaTransactionTable) txTable);
}
@Override
public final V put(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
return put(key, value, metadata);
}
final V put(K key, V value, Metadata metadata, long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
DataWriteCommand command = createPutCommand(key, value, metadata, explicitFlags, false);
return invocationHelper.invoke(contextBuilder, command, 1);
}
private PutKeyValueCommand createPutCommand(K key, V value, Metadata metadata, long explicitFlags, boolean returnEntry) {
long flags = addUnsafeFlags(explicitFlags);
Metadata merged = applyDefaultMetadata(metadata);
return commandsFactory.buildPutKeyValueCommand(key, value, keyPartitioner.getSegment(key), merged, flags, returnEntry);
}
private long addIgnoreReturnValuesFlag(long flagBitSet) {
return EnumUtil.mergeBitSets(flagBitSet, FlagBitSets.IGNORE_RETURN_VALUES);
}
private long addUnsafeFlags(long flagBitSet) {
return config.unsafe().unreliableReturnValues() ? addIgnoreReturnValuesFlag(flagBitSet) :
flagBitSet;
}
@Override
public final V putIfAbsent(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
return putIfAbsent(key, value, metadata, EnumUtil.EMPTY_BIT_SET);
}
private V putIfAbsent(K key, V value, Metadata metadata, long explicitFlags) {
return putIfAbsent(key, value, metadata, explicitFlags, defaultContextBuilderForWrite());
}
final V putIfAbsent(K key, V value, Metadata metadata, long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
DataWriteCommand command = createPutIfAbsentCommand(key, value, metadata, explicitFlags, false);
return invocationHelper.invoke(contextBuilder, command, 1);
}
private PutKeyValueCommand createPutIfAbsentCommand(K key, V value, Metadata metadata, long explicitFlags, boolean returnEntry) {
long flags = addUnsafeFlags(explicitFlags);
Metadata merged = applyDefaultMetadata(metadata);
PutKeyValueCommand command = commandsFactory.buildPutKeyValueCommand(key, value, keyPartitioner.getSegment(key),
merged, flags, returnEntry);
command.setPutIfAbsent(true);
command.setValueMatcher(ValueMatcher.MATCH_EXPECTED);
return command;
}
@Override
public final void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
putAll(map, metadata);
}
final void putAll(Map<? extends K, ? extends V> map, Metadata metadata, long explicitFlags, ContextBuilder contextBuilder) {
// Vanilla PutMapCommand returns previous values; add IGNORE_RETURN_VALUES as the API will drop the return value.
// Interceptors are free to clear this flag if appropriate (since interceptors are the only consumers of the
// return value).
explicitFlags = EnumUtil.mergeBitSets(explicitFlags, FlagBitSets.IGNORE_RETURN_VALUES);
PutMapCommand command = createPutAllCommand(map, metadata, explicitFlags);
invocationHelper.invoke(contextBuilder, command, map.size());
}
public final Map<K, V> getAndPutAll(Map<? extends K, ? extends V> map) {
return getAndPutAll(map, defaultMetadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final Map<K, V> getAndPutAll(Map<? extends K, ? extends V> map, Metadata metadata, long explicitFlags,
ContextBuilder contextBuilder) {
PutMapCommand command = createPutAllCommand(map, metadata, explicitFlags);
return dropNullEntries(invocationHelper.invoke(contextBuilder, command, map.size()));
}
private PutMapCommand createPutAllCommand(Map<? extends K, ? extends V> map, Metadata metadata, long explicitFlags) {
InfinispanCollections.assertNotNullEntries(map, "map");
Metadata merged = applyDefaultMetadata(metadata);
return commandsFactory.buildPutMapCommand(map, merged, explicitFlags);
}
@Override
public final V replace(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
return replace(key, value, metadata);
}
final V replace(K key, V value, Metadata metadata, long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
ReplaceCommand command = createReplaceCommand(key, value, metadata, explicitFlags, false);
return invocationHelper.invoke(contextBuilder, command, 1);
}
private ReplaceCommand createReplaceCommand(K key, V value, Metadata metadata, long explicitFlags, boolean returnEntry) {
long flags = addUnsafeFlags(explicitFlags);
Metadata merged = applyDefaultMetadata(metadata);
return commandsFactory.buildReplaceCommand(key, null, value, keyPartitioner.getSegment(key), merged, flags, returnEntry);
}
@Override
public final boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
return replace(key, oldValue, value, metadata);
}
final boolean replace(K key, V oldValue, V value, Metadata metadata, long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
assertValueNotNull(oldValue);
ReplaceCommand command = createReplaceConditionalCommand(key, oldValue, value, metadata, explicitFlags);
return invocationHelper.invoke(contextBuilder, command, 1);
}
private ReplaceCommand createReplaceConditionalCommand(K key, V oldValue, V value, Metadata metadata, long explicitFlags) {
Metadata merged = applyDefaultMetadata(metadata);
return commandsFactory.buildReplaceCommand(key, oldValue, value, keyPartitioner.getSegment(key), merged, explicitFlags);
}
@Override
public final CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return putAsync(key, value, metadata);
}
final CompletableFuture<V> putAsync(final K key, final V value, final Metadata metadata, final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
PutKeyValueCommand command = createPutCommand(key, value, metadata, explicitFlags, false);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
final CompletableFuture<CacheEntry<K, V>> putAsyncEntry(final K key, final V value, final Metadata metadata, final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
PutKeyValueCommand command = createPutCommand(key, value, metadata, explicitFlags, true);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public final CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return putAllAsync(data, metadata);
}
@Override
public final CompletableFuture<Void> putAllAsync(final Map<? extends K, ? extends V> data, final Metadata metadata) {
return putAllAsync(data, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<Void> putAllAsync(final Map<? extends K, ? extends V> data, final Metadata metadata,
long explicitFlags, ContextBuilder contextBuilder) {
explicitFlags = EnumUtil.mergeBitSets(explicitFlags, FlagBitSets.IGNORE_RETURN_VALUES);
PutMapCommand command = createPutAllCommand(data, metadata, explicitFlags);
return invocationHelper.invokeAsync(contextBuilder, command, data.size());
}
@Override
public final CompletableFuture<Void> clearAsync() {
return clearAsync(EnumUtil.EMPTY_BIT_SET);
}
final CompletableFuture<Void> clearAsync(final long explicitFlags) {
InvocationContext context = invocationContextFactory.createClearNonTxInvocationContext();
ClearCommand command = commandsFactory.buildClearCommand(explicitFlags);
return invocationHelper.invokeAsync(context, command).thenApply(nil -> null);
}
@Override
public final CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return putIfAbsentAsync(key, value, metadata);
}
@Override
public final CompletableFuture<V> putIfAbsentAsync(final K key, final V value, final Metadata metadata) {
return putIfAbsentAsync(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<V> putIfAbsentAsync(final K key, final V value, final Metadata metadata,
final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
PutKeyValueCommand command = createPutIfAbsentCommand(key, value, metadata, explicitFlags, false);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putIfAbsentAsyncEntry(K key, V value, Metadata metadata) {
return putIfAbsentAsyncEntry(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<CacheEntry<K, V>> putIfAbsentAsyncEntry(final K key, final V value, final Metadata metadata,
final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
PutKeyValueCommand command = createPutIfAbsentCommand(key, value, metadata, explicitFlags, true);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public final CompletableFuture<V> removeAsync(Object key) {
return removeAsync(key, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<V> removeAsync(final Object key, final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyNotNull(key);
RemoveCommand command = createRemoveCommand(key, explicitFlags, false);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public CompletableFuture<CacheEntry<K, V>> removeAsyncEntry(Object key) {
return removeAsyncEntry(key, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<CacheEntry<K, V>> removeAsyncEntry(final Object key, final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyNotNull(key);
RemoveCommand command = createRemoveCommand(key, explicitFlags, true);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public final CompletableFuture<Boolean> removeAsync(Object key, Object value) {
return removeAsync(key, value, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<Boolean> removeAsync(final Object key, final Object value, final long explicitFlags,
ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
RemoveCommand command = commandsFactory.buildRemoveCommand(key, value, keyPartitioner.getSegment(key), explicitFlags);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public final CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return replaceAsync(key, value, metadata);
}
@Override
public final CompletableFuture<V> replaceAsync(final K key, final V value, final Metadata metadata) {
return replaceAsync(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<V> replaceAsync(final K key, final V value, final Metadata metadata,
final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
ReplaceCommand command = createReplaceCommand(key, value, metadata, explicitFlags, false);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public CompletableFuture<CacheEntry<K, V>> replaceAsyncEntry(K key, V value, Metadata metadata) {
return replaceAsyncEntry(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<CacheEntry<K, V>> replaceAsyncEntry(final K key, final V value, final Metadata metadata,
final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, value);
ReplaceCommand command = createReplaceCommand(key, value, metadata, explicitFlags, true);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public final CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return replaceAsync(key, oldValue, newValue, metadata);
}
@Override
public final CompletableFuture<Boolean> replaceAsync(final K key, final V oldValue, final V newValue, final Metadata metadata) {
return replaceAsync(key, oldValue, newValue, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
final CompletableFuture<Boolean> replaceAsync(final K key, final V oldValue, final V newValue,
final Metadata metadata, final long explicitFlags, ContextBuilder contextBuilder) {
assertKeyValueNotNull(key, newValue);
assertValueNotNull(oldValue);
ReplaceCommand command = createReplaceConditionalCommand(key, oldValue, newValue, metadata, explicitFlags);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public CompletableFuture<V> getAsync(K key) {
return getAsync(key, EnumUtil.EMPTY_BIT_SET, invocationContextFactory.createInvocationContext(false, 1));
}
CompletableFuture<V> getAsync(final K key, final long explicitFlags, InvocationContext ctx) {
assertKeyNotNull(key);
GetKeyValueCommand command = commandsFactory.buildGetKeyValueCommand(key, keyPartitioner.getSegment(key), explicitFlags);
return invocationHelper.invokeAsync(ctx, command);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return computeAsync(key, remappingFunction, false);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return computeAsyncInternal(key, remappingFunction, false, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(defaultMetadata.maxIdle(), MILLISECONDS).build();
return computeAsyncInternal(key, remappingFunction, false, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET), defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return computeAsyncInternal(key, remappingFunction, false, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET), defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return computeAsync(key, remappingFunction, true);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return computeAsyncInternal(key, remappingFunction, true, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
private CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, boolean computeIfPresent) {
return computeAsyncInternal(key, remappingFunction, computeIfPresent, applyDefaultMetadata(defaultMetadata), addUnsafeFlags(EnumUtil.EMPTY_BIT_SET));
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(defaultMetadata.maxIdle(), MILLISECONDS).build();
return computeAsyncInternal(key, remappingFunction, false, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET), defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return computeAsyncInternal(key, remappingFunction, true, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET), defaultContextBuilderForWrite());
}
private CompletableFuture<V> computeAsyncInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, boolean computeIfPresent, Metadata metadata, long flags) {
return computeAsyncInternal(key, remappingFunction, computeIfPresent, metadata, flags, defaultContextBuilderForWrite());
}
CompletableFuture<V> computeAsyncInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, boolean computeIfPresent,
Metadata metadata, long flags, ContextBuilder contextBuilder) {
assertKeyNotNull(key);
assertFunctionNotNull(remappingFunction);
ComputeCommand command = commandsFactory.buildComputeCommand(key, remappingFunction, computeIfPresent,
keyPartitioner.getSegment(key), metadata, flags);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction) {
return computeIfAbsentAsync(key, mappingFunction, defaultMetadata);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
return computeIfAbsentAsyncInternal(key, mappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(defaultMetadata.maxIdle(), MILLISECONDS).build();
return computeIfAbsentAsyncInternal(key, mappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET), defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit).build();
return computeIfAbsentAsyncInternal(key, mappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET), defaultContextBuilderForWrite());
}
CompletableFuture<V> computeIfAbsentAsyncInternal(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata, long flags,
ContextBuilder contextBuilder) {
assertKeyNotNull(key);
assertFunctionNotNull(mappingFunction);
ComputeIfAbsentCommand command = commandsFactory.buildComputeIfAbsentCommand(key, mappingFunction,
keyPartitioner.getSegment(key), metadata, flags);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return mergeInternalAsync(key, value, remappingFunction, defaultMetadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(defaultMetadata.maxIdle(), MILLISECONDS).build();
return mergeInternalAsync(key, value, remappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit idleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, idleTimeUnit).build();
return mergeInternalAsync(key, value, remappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return mergeInternalAsync(key, value, remappingFunction, metadata, addUnsafeFlags(EnumUtil.EMPTY_BIT_SET),
defaultContextBuilderForWrite());
}
CompletableFuture<V> mergeInternalAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata,
long flags, ContextBuilder contextBuilder) {
assertKeyNotNull(key);
assertValueNotNull(value);
assertFunctionNotNull(remappingFunction);
DataConversion keyDataConversion;
DataConversion valueDataConversion;
//TODO: Correctly propagate DataConversion objects https://issues.redhat.com/browse/ISPN-11584
if (remappingFunction instanceof BiFunctionMapper) {
BiFunctionMapper biFunctionMapper = (BiFunctionMapper) remappingFunction;
keyDataConversion = biFunctionMapper.getKeyDataConversion();
valueDataConversion = biFunctionMapper.getValueDataConversion();
} else {
keyDataConversion = encoderCache.running().getKeyDataConversion();
valueDataConversion = encoderCache.running().getValueDataConversion();
}
ReadWriteKeyCommand<K, V, V> command = commandsFactory.buildReadWriteKeyCommand(key,
new MergeFunction<>(value, remappingFunction, metadata), keyPartitioner.getSegment(key),
Params.fromFlagsBitSet(flags), keyDataConversion, valueDataConversion);
return invocationHelper.invokeAsync(contextBuilder, command, 1);
}
@Override
public AdvancedCache<K, V> getAdvancedCache() {
return this;
}
@Override
public RpcManager getRpcManager() {
return rpcManager;
}
@Override
public AdvancedCache<K, V> withFlags(Flag flag) {
return new DecoratedCache<>(this, EnumUtil.bitSetOf(flag));
}
@Override
public AdvancedCache<K, V> withFlags(final Flag... flags) {
if (flags == null || flags.length == 0)
return this;
else
return new DecoratedCache<>(this, EnumUtil.bitSetOf(flags));
}
@Override
public AdvancedCache<K, V> withFlags(Collection<Flag> flags) {
if (flags == null || flags.isEmpty())
return this;
else
return new DecoratedCache<>(this, EnumUtil.bitSetOf(flags));
}
@Override
public AdvancedCache<K, V> noFlags() {
return this;
}
@Override
public AdvancedCache<K, V> transform(Function<AdvancedCache<K, V>, ? extends AdvancedCache<K, V>> transformation) {
return transformation.apply(this);
}
@Override
public AdvancedCache<K, V> withSubject(Subject subject) {
return this; // NO-OP
}
private Transaction getOngoingTransaction(boolean includeBatchTx) {
try {
Transaction transaction = null;
if (transactionManager != null) {
transaction = transactionManager.getTransaction();
if (includeBatchTx && transaction == null && batchingEnabled) {
transaction = batchContainer.getBatchTransaction();
}
}
return transaction;
} catch (SystemException e) {
throw new CacheException("Unable to get transaction", e);
}
}
private void tryBegin() {
if (transactionManager == null) {
return;
}
try {
transactionManager.begin();
final Transaction transaction = getOngoingTransaction(true);
if (log.isTraceEnabled()) {
log.tracef("Implicit transaction started! Transaction: %s", transaction);
}
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new CacheException("Unable to begin implicit transaction.", e);
}
}
private void tryRollback() {
try {
if (transactionManager != null) transactionManager.rollback();
} catch (Throwable t) {
if (log.isTraceEnabled()) log.trace("Could not rollback", t);//best effort
}
}
private void tryCommit() {
if (transactionManager == null) {
return;
}
if (log.isTraceEnabled())
log.tracef("Committing transaction as it was implicit: %s", getOngoingTransaction(true));
try {
transactionManager.commit();
} catch (Throwable e) {
log.couldNotCompleteInjectedTransaction(e);
throw new CacheException("Could not commit implicit transaction", e);
}
}
@Override
public ClassLoader getClassLoader() {
return globalCfg.classLoader();
}
@Override
public AdvancedCache<K, V> with(ClassLoader classLoader) {
return this;
}
@Override
public V put(K key, V value, Metadata metadata) {
return put(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
@Override
public void putAll(Map<? extends K, ? extends V> map, Metadata metadata) {
putAll(map, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
private Metadata applyDefaultMetadata(Metadata metadata) {
if (metadata == null) {
return defaultMetadata;
}
Metadata.Builder builder = metadata.builder();
return builder != null ? builder.merge(defaultMetadata).build() : metadata;
}
@Override
public V replace(K key, V value, Metadata metadata) {
return replace(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
@Override
public boolean replace(K key, V oldValue, V value, Metadata metadata) {
return replace(key, oldValue, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
@Override
public V putIfAbsent(K key, V value, Metadata metadata) {
return putIfAbsent(key, value, metadata, EnumUtil.EMPTY_BIT_SET);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, Metadata metadata) {
return putAsync(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
@Override
public CompletableFuture<CacheEntry<K, V>> putAsyncEntry(K key, V value, Metadata metadata) {
return putAsyncEntry(key, value, metadata, EnumUtil.EMPTY_BIT_SET, defaultContextBuilderForWrite());
}
private Transaction suspendOngoingTransactionIfExists() {
final Transaction tx = getOngoingTransaction(false);
if (tx != null) {
try {
transactionManager.suspend();
} catch (SystemException e) {
throw new CacheException("Unable to suspend transaction.", e);
}
}
return tx;
}
private void resumePreviousOngoingTransaction(Transaction transaction, String failMessage) {
if (transaction != null) {
try {
transactionManager.resume(transaction);
} catch (Exception e) {
if (log.isDebugEnabled()) {
log.debug(failMessage);
}
}
}
}
@ManagedAttribute(
description = "Returns the cache configuration in form of properties",
displayName = "Cache configuration properties",
dataType = DataType.TRAIT
)
public Properties getConfigurationAsProperties() {
return new PropertyFormatter().format(config);
}
/**
* @return The default {@link ContextBuilder} implementation for write operations.
*/
public ContextBuilder defaultContextBuilderForWrite() {
return defaultBuilder;
}
}
| 82,657
| 41.388718
| 203
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/AbstractCacheBackedSet.java
|
package org.infinispan.cache.impl;
import static org.infinispan.context.InvocationContextFactory.UNBOUNDED;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import java.lang.reflect.Array;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.Cache;
import org.infinispan.CacheSet;
import org.infinispan.CacheStream;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.CloseableSpliterator;
import org.infinispan.commons.util.Closeables;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.PublisherManagerFactory;
import org.infinispan.reactive.publisher.impl.ClusterPublisherManager;
import org.infinispan.stream.StreamMarshalling;
import org.infinispan.stream.impl.DistributedCacheStream;
import org.infinispan.transaction.impl.LocalTransaction;
/**
* Entry or key set backed by a cache.
*
* @since 13.0
*/
public abstract class AbstractCacheBackedSet<K, V, E> implements CacheSet<E> {
protected final CacheImpl<K, V> cache;
protected final Object lockOwner;
protected final long explicitFlags;
private final int batchSize;
private final ClusterPublisherManager<K, V> clusterPublisherManager;
private final ClusterPublisherManager<K, V> localPublisherManager;
private final Executor nonBlockingExecutor;
public AbstractCacheBackedSet(CacheImpl<K, V> cache, Object lockOwner, long explicitFlags) {
this.cache = cache;
this.lockOwner = lockOwner;
this.explicitFlags = explicitFlags;
batchSize = cache.config.clustering().stateTransfer().chunkSize();
clusterPublisherManager = cache.componentRegistry.getComponent(ClusterPublisherManager.class);
localPublisherManager = cache.componentRegistry.getComponent(ClusterPublisherManager.class,
PublisherManagerFactory.LOCAL_CLUSTER_PUBLISHER);
nonBlockingExecutor = cache.componentRegistry.getComponent(Executor.class, NON_BLOCKING_EXECUTOR);
}
@Override
public int size() {
return cache.size(explicitFlags);
}
@Override
public boolean isEmpty() {
return getStream(false).noneMatch(StreamMarshalling.alwaysTruePredicate());
}
@Override
public abstract boolean contains(Object o);
@Override
public CloseableIterator<E> iterator() {
CacheStream<E> stream = getStream(false);
Iterator<E> iterator = stream.iterator();
return new CloseableIterator<E>() {
private E last;
@Override
public void close() {
stream.close();
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public E next() {
last = iterator.next();
return wrapElement(last);
}
@Override
public void remove() {
Object key = extractKey(last);
cache.remove(key, explicitFlags, decoratedWriteContextBuilder());
}
};
}
@Override
public void forEach(Consumer<? super E> action) {
try (CacheStream<E> stream = getStream(false)) {
Iterator<E> iterator = stream.iterator();
iterator.forEachRemaining(action);
}
}
@Override
public Object[] toArray() {
return toArray(new Object[0]);
}
@Override
public <T> T[] toArray(T[] a) {
return stream().toArray(n -> (T[]) Array.newInstance(a.getClass().getComponentType(), n));
}
/**
* Adding new cache entries via a set is not allowed.
*
* <p>Please use {@link Cache#put(Object, Object)} etc.</p>
*/
@Override
public boolean add(E e) {
throw new UnsupportedOperationException();
}
@Override
public boolean remove(Object o) {
Object key = entryToKeyFunction() != null ? extractKey(o) : o;
V removedValue = cache.remove(key, explicitFlags, decoratedWriteContextBuilder());
return removedValue != null;
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object o : c) {
if (!contains(o))
return false;
}
return true;
}
/**
* Adding new cache entries via a set is not allowed.
*
* <p>Please use {@link Cache#put(Object, Object)} etc.</p>
*/
@Override
public boolean addAll(Collection<? extends E> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean removeAll(Collection<?> c) {
boolean modified = false;
for (Object o : c) {
modified |= remove(o);
}
return modified;
}
@Override
public boolean removeIf(Predicate<? super E> filter) {
Objects.requireNonNull(filter);
boolean removed = false;
try (CacheStream<E> stream = getStream(false)) {
Iterator<E> iterator = stream.iterator();
while (iterator.hasNext()) {
E next = iterator.next();
if (filter.test(next)) {
Object key = extractKey(next);
cache.remove(key, explicitFlags, decoratedWriteContextBuilder());
removed = true;
}
}
}
return removed;
}
@Override
public boolean retainAll(Collection<?> c) {
return removeIf(e -> !c.contains(e));
}
@Override
public void clear() {
cache.clear(explicitFlags);
}
@Override
public CloseableSpliterator<E> spliterator() {
CacheStream<E> stream = getStream(false);
return Closeables.spliterator(stream);
}
@Override
public CacheStream<E> stream() {
return getStream(false);
}
@Override
public CacheStream<E> parallelStream() {
return getStream(true);
}
@Override
public String toString() {
return this.getClass().getSimpleName() + "(" + cache + ')';
}
private CacheStream<E> getStream(boolean parallel) {
ClusterPublisherManager<K, V> publisherManager;
if (EnumUtil.containsAll(explicitFlags, FlagBitSets.CACHE_MODE_LOCAL)) {
publisherManager = localPublisherManager;
} else {
publisherManager = clusterPublisherManager;
}
InvocationContext ctx = cache.invocationContextFactory.createInvocationContext(false, UNBOUNDED);
if (ctx.isInTxScope()) {
// Register the cache transaction as a TM resource, so that it is cleaned up on commit
// The EntrySetCommand/KeySetCommand invocations use a new context, so they may not enlist
// E.g. when running from TxLockedStreamImpl.forEach
TxInvocationContext txCtx = (TxInvocationContext)ctx;
cache.txTable.enlist(txCtx.getTransaction(), (LocalTransaction) txCtx.getCacheTransaction());
}
if (lockOwner != null) {
ctx.setLockOwner(lockOwner);
}
CacheStream<E> cacheStream =
new DistributedCacheStream<>(cache.getCacheManager().getAddress(), parallel,
ctx, explicitFlags, batchSize, nonBlockingExecutor,
cache.componentRegistry, entryToKeyFunction(),
publisherManager);
return cacheStream.timeout(cache.config.clustering().remoteTimeout(), TimeUnit.MILLISECONDS);
}
protected ContextBuilder decoratedWriteContextBuilder() {
return lockOwner == null ? cache.defaultContextBuilderForWrite() : this::createContextWithLockOwner;
}
private InvocationContext createContextWithLockOwner(int numKeys) {
InvocationContext ctx = cache.defaultContextBuilderForWrite().create(numKeys);
ctx.setLockOwner(lockOwner);
return ctx;
}
protected abstract Function<Map.Entry<K, V>, ?> entryToKeyFunction();
/**
* Extract the key from a set element.
*/
protected abstract Object extractKey(Object e);
/**
* Wrap the element if needed
*/
protected abstract E wrapElement(E e);
}
| 8,359
| 30.428571
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/EncodingFunction.java
|
package org.infinispan.cache.impl;
import java.util.function.UnaryOperator;
import org.infinispan.commons.util.InjectiveFunction;
/**
* This is a marker interface to signal that this function may perform an encoding of the provided value. The returned
* value therefore will always be equivalent to the provided value, but may be in a slightly different form (whether
* due to unwrapping, encoding or transcoding. This may allow certain optimizations knowing that the value is
* equivalent to what it was before.
* @author wburns
* @since 10.1
*/
public interface EncodingFunction<T> extends UnaryOperator<T>, InjectiveFunction<T, T> {
}
| 648
| 37.176471
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/EncoderValueMapper.java
|
package org.infinispan.cache.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* {@link java.util.function.Function} that uses a valueEncoder to converter values from the configured storage format
* to the requested format.
*
* @since 9.1
*/
@Scope(Scopes.NONE)
public class EncoderValueMapper<V> implements EncodingFunction<V> {
private final DataConversion dataConversion;
public EncoderValueMapper(DataConversion dataConversion) {
this.dataConversion = dataConversion;
}
@Inject
public void injectDependencies(ComponentRegistry registry) {
registry.wireDependencies(dataConversion);
}
@Override
@SuppressWarnings("unchecked")
public V apply(V v) {
return (V) dataConversion.fromStorage(v);
}
public static class Externalizer implements AdvancedExternalizer<EncoderValueMapper> {
@Override
public Set<Class<? extends EncoderValueMapper>> getTypeClasses() {
return Collections.singleton(EncoderValueMapper.class);
}
@Override
public Integer getId() {
return Ids.ENCODER_VALUE_MAPPER;
}
@Override
public void writeObject(ObjectOutput output, EncoderValueMapper object) throws IOException {
DataConversion.writeTo(output, object.dataConversion);
}
@Override
@SuppressWarnings("unchecked")
public EncoderValueMapper readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new EncoderValueMapper(DataConversion.readFrom(input));
}
}
}
| 1,989
| 28.264706
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/SimpleCacheImpl.java
|
package org.infinispan.cache.impl;
import static org.infinispan.util.logging.Log.CONFIG;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.util.AbstractSet;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import javax.security.auth.Subject;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.XAResource;
import org.infinispan.AdvancedCache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.CacheStream;
import org.infinispan.LockedStream;
import org.infinispan.batch.BatchContainer;
import org.infinispan.commons.dataconversion.Encoder;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.CloseableSpliterator;
import org.infinispan.commons.util.Closeables;
import org.infinispan.commons.util.IteratorMapper;
import org.infinispan.commons.util.SpliteratorMapper;
import org.infinispan.commons.util.Util;
import org.infinispan.commons.util.Version;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.format.PropertyFormatter;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.Flag;
import org.infinispan.context.impl.ImmutableContext;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.encoding.DataConversion;
import org.infinispan.eviction.EvictionManager;
import org.infinispan.expiration.ExpirationManager;
import org.infinispan.expiration.impl.InternalExpirationManager;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.EmptyAsyncInterceptorChain;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.annotation.CacheEntriesEvicted;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryCreated;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryInvalidated;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryVisited;
import org.infinispan.notifications.cachelistener.filter.CacheEventConverter;
import org.infinispan.notifications.cachelistener.filter.CacheEventFilter;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.security.AuthorizationManager;
import org.infinispan.stats.Stats;
import org.infinispan.stream.impl.local.EntryStreamSupplier;
import org.infinispan.stream.impl.local.KeyStreamSupplier;
import org.infinispan.stream.impl.local.LocalCacheStream;
import org.infinispan.util.DataContainerRemoveIterator;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Simple local cache without interceptor stack.
* The cache still implements {@link AdvancedCache} since it is too troublesome to omit that.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
@MBean(objectName = CacheImpl.OBJECT_NAME, description = "Component that represents a simplified cache instance.")
@Scope(Scopes.NAMED_CACHE)
public class SimpleCacheImpl<K, V> implements AdvancedCache<K, V> {
private final static Log log = LogFactory.getLog(SimpleCacheImpl.class);
private final static String NULL_KEYS_NOT_SUPPORTED = "Null keys are not supported!";
private final static String NULL_VALUES_NOT_SUPPORTED = "Null values are not supported!";
private final static String NULL_FUNCTION_NOT_SUPPORTED = "Null functions are not supported!";
private final static Class<? extends Annotation>[] FIRED_EVENTS = new Class[]{
CacheEntryCreated.class, CacheEntryRemoved.class, CacheEntryVisited.class,
CacheEntryModified.class, CacheEntriesEvicted.class, CacheEntryInvalidated.class,
CacheEntryExpired.class};
private final String name;
@Inject ComponentRegistry componentRegistry;
@Inject Configuration configuration;
@Inject EmbeddedCacheManager cacheManager;
@Inject InternalDataContainer<K, V> dataContainer;
@Inject CacheNotifier<K, V> cacheNotifier;
@Inject TimeService timeService;
@Inject KeyPartitioner keyPartitioner;
private Metadata defaultMetadata;
private boolean hasListeners = false;
public SimpleCacheImpl(String cacheName) {
this.name = cacheName;
}
@Override
@ManagedOperation(
description = "Starts the cache.",
displayName = "Starts cache."
)
public void start() {
this.defaultMetadata = new EmbeddedMetadata.Builder()
.lifespan(configuration.expiration().lifespan())
.maxIdle(configuration.expiration().maxIdle()).build();
componentRegistry.start();
}
@Override
@ManagedOperation(
description = "Stops the cache.",
displayName = "Stops cache."
)
public void stop() {
if (log.isDebugEnabled())
log.debugf("Stopping cache %s on %s", getName(), getCacheManager().getAddress());
dataContainer = null;
componentRegistry.stop();
}
@Override
public void putForExternalRead(K key, V value) {
ByRef.Boolean isCreatedRef = new ByRef.Boolean(false);
putForExternalReadInternal(key, value, defaultMetadata, isCreatedRef);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = createMetadata(lifespan, unit);
ByRef.Boolean isCreatedRef = new ByRef.Boolean(false);
putForExternalReadInternal(key, value, metadata, isCreatedRef);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = createMetadata(lifespan, lifespanUnit, maxIdle, maxIdleUnit);
ByRef.Boolean isCreatedRef = new ByRef.Boolean(false);
putForExternalReadInternal(key, value, metadata, isCreatedRef);
}
@Override
public void putForExternalRead(K key, V value, Metadata metadata) {
ByRef.Boolean isCreatedRef = new ByRef.Boolean(false);
putForExternalReadInternal(key, value, applyDefaultMetadata(metadata), isCreatedRef);
}
protected void putForExternalReadInternal(K key, V value, Metadata metadata, ByRef.Boolean isCreatedRef) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
boolean hasListeners = this.hasListeners;
getDataContainer().compute(key, (k, oldEntry, factory) -> {
// entry cannot be marked for removal in DC but it compute does not deal with expiration
if (isNull(oldEntry)) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(k, value, metadata, true, ImmutableContext.INSTANCE, null));
}
isCreatedRef.set(true);
return factory.create(k, value, metadata);
} else {
return oldEntry;
}
});
if (hasListeners && isCreatedRef.get()) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(key, value, metadata, false, ImmutableContext.INSTANCE, null));
}
}
@Override
public CompletableFuture<V> putAsync(K key, V value, Metadata metadata) {
return CompletableFuture.completedFuture(getAndPutInternal(key, value, applyDefaultMetadata(metadata)));
}
@Override
public CompletableFuture<CacheEntry<K, V>> putAsyncEntry(K key, V value, Metadata metadata) {
return CompletableFuture.completedFuture(getAndPutInternalEntry(key, value, applyDefaultMetadata(metadata)));
}
@Override
public Map<K, V> getAll(Set<?> keys) {
Map<K, V> map = new HashMap<>(keys.size());
AggregateCompletionStage<Void> aggregateCompletionStage = null;
if (hasListeners && cacheNotifier.hasListener(CacheEntryVisited.class)) {
aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
}
for (Object k : keys) {
Objects.requireNonNull(k, NULL_KEYS_NOT_SUPPORTED);
InternalCacheEntry<K, V> entry = getDataContainer().get(k);
if (entry != null) {
K key = entry.getKey();
V value = entry.getValue();
if (aggregateCompletionStage != null) {
// Notify each key in parallel, but each key must be notified of the post after the pre completes
aggregateCompletionStage.dependsOn(
cacheNotifier.notifyCacheEntryVisited(key, value, true, ImmutableContext.INSTANCE, null)
.thenCompose(ignore -> cacheNotifier.notifyCacheEntryVisited(key, value, false, ImmutableContext.INSTANCE, null)));
}
map.put(key, value);
}
}
if (aggregateCompletionStage != null) {
CompletionStages.join(aggregateCompletionStage.freeze());
}
return map;
}
@Override
public CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys) {
return CompletableFuture.completedFuture(getAll(keys));
}
@Override
public CacheEntry<K, V> getCacheEntry(Object k) {
InternalCacheEntry<K, V> entry = getDataContainer().get(k);
if (entry != null) {
K key = entry.getKey();
V value = entry.getValue();
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryVisited(key, value, true, ImmutableContext.INSTANCE, null));
CompletionStages.join(cacheNotifier.notifyCacheEntryVisited(key, value, false, ImmutableContext.INSTANCE, null));
}
}
return entry;
}
@Override
public CompletableFuture<CacheEntry<K, V>> getCacheEntryAsync(Object key) {
return CompletableFuture.completedFuture(getCacheEntry(key));
}
@Override
public Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys) {
Map<K, CacheEntry<K, V>> map = new HashMap<>(keys.size());
AggregateCompletionStage<Void> aggregateCompletionStage = null;
if (hasListeners && cacheNotifier.hasListener(CacheEntryVisited.class)) {
aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
}
for (Object key : keys) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
InternalCacheEntry<K, V> entry = getDataContainer().get(key);
if (entry != null) {
V value = entry.getValue();
if (aggregateCompletionStage != null) {
aggregateCompletionStage.dependsOn(cacheNotifier.notifyCacheEntryVisited((K) key, value, true, ImmutableContext.INSTANCE, null));
aggregateCompletionStage.dependsOn(cacheNotifier.notifyCacheEntryVisited((K) key, value, false, ImmutableContext.INSTANCE, null));
}
map.put(entry.getKey(), entry);
}
}
if (aggregateCompletionStage != null) {
CompletionStages.join(aggregateCompletionStage.freeze());
}
return map;
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return CompletableFuture.completedFuture(compute(key, remappingFunction, metadata));
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return CompletableFuture.completedFuture(computeIfPresent(key, remappingFunction, metadata));
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
return CompletableFuture.completedFuture(computeIfAbsent(key, mappingFunction, metadata));
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return CompletableFuture.completedFuture(merge(key, value, remappingFunction, lifespan, lifespanUnit));
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return CompletableFuture.completedFuture(merge(key, value, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit));
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return CompletableFuture.completedFuture(merge(key, value, remappingFunction, metadata));
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return CompletableFuture.completedFuture(compute(key, remappingFunction));
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return CompletableFuture.completedFuture(compute(key, remappingFunction, lifespan, lifespanUnit));
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return CompletableFuture.completedFuture(compute(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit));
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction) {
return CompletableFuture.completedFuture(computeIfAbsent(key, mappingFunction));
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
return CompletableFuture.completedFuture(computeIfAbsent(key, mappingFunction, lifespan, lifespanUnit));
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return CompletableFuture.completedFuture(computeIfAbsent(key, mappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit));
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return CompletableFuture.completedFuture(computeIfPresent(key, remappingFunction));
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return CompletableFuture.completedFuture(computeIfPresent(key, remappingFunction, lifespan, lifespanUnit));
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return CompletableFuture.completedFuture(computeIfPresent(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit));
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return CompletableFuture.completedFuture(merge(key, value, remappingFunction));
}
@Override
public Map<K, V> getGroup(String groupName) {
return Collections.emptyMap();
}
@Override
public void removeGroup(String groupName) {
}
@Override
public AvailabilityMode getAvailability() {
return AvailabilityMode.AVAILABLE;
}
@Override
public void setAvailability(AvailabilityMode availabilityMode) {
throw new UnsupportedOperationException();
}
@Override
public CompletionStage<Boolean> touch(Object key, boolean touchEvenIfExpired) {
return touch(key, -1, touchEvenIfExpired);
}
@Override
public CompletionStage<Boolean> touch(Object key, int segment, boolean touchEvenIfExpired) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
if (segment < 0) {
segment = keyPartitioner.getSegment(key);
}
InternalCacheEntry<K, V> entry = dataContainer.peek(segment, key);
if (entry != null) {
long currentTime = timeService.wallClockTime();
if (touchEvenIfExpired || !entry.isExpired(currentTime)) {
return CompletableFutures.booleanStage(dataContainer.touch(segment, key, currentTime));
}
}
return CompletableFutures.completedFalse();
}
@Override
public void evict(K key) {
ByRef<InternalCacheEntry<K, V>> oldEntryRef = new ByRef<>(null);
getDataContainer().compute(key, (k, oldEntry, factory) -> {
if (!isNull(oldEntry)) {
oldEntryRef.set(oldEntry);
}
return null;
});
InternalCacheEntry<K, V> oldEntry = oldEntryRef.get();
if (hasListeners && oldEntry != null) {
CompletionStages.join(cacheNotifier.notifyCacheEntriesEvicted(Collections.singleton(oldEntry), ImmutableContext.INSTANCE, null));
}
}
@Override
public Configuration getCacheConfiguration() {
return configuration;
}
@Override
public EmbeddedCacheManager getCacheManager() {
return cacheManager;
}
@Override
public AdvancedCache<K, V> getAdvancedCache() {
return this;
}
@Override
public ComponentStatus getStatus() {
return componentRegistry.getStatus();
}
@ManagedAttribute(
description = "Returns the cache status",
displayName = "Cache status",
dataType = DataType.TRAIT
)
public String getCacheStatus() {
return getStatus().toString();
}
protected boolean checkExpiration(InternalCacheEntry<K, V> entry, long now) {
if (entry.isExpired(now)) {
// we have to check the expiration under lock
return null == dataContainer.compute(entry.getKey(), (key, oldEntry, factory) -> {
if (entry.isExpired(now)) {
CompletionStages.join(cacheNotifier.notifyCacheEntryExpired(key, oldEntry.getValue(),
oldEntry.getMetadata(), ImmutableContext.INSTANCE));
return null;
}
return oldEntry;
});
}
return false;
}
@Override
public int size() {
// we have to iterate in order to provide precise result in case of expiration
long now = Long.MIN_VALUE;
int size = 0;
DataContainer<K, V> dataContainer = getDataContainer();
for (InternalCacheEntry<K, V> entry : dataContainer) {
if (entry.canExpire()) {
if (now == Long.MIN_VALUE) now = timeService.wallClockTime();
if (!checkExpiration(entry, now)) {
++size;
if (size < 0) {
return Integer.MAX_VALUE;
}
}
} else {
++size;
if (size < 0) {
return Integer.MAX_VALUE;
}
}
}
return size;
}
@Override
public CompletableFuture<Long> sizeAsync() {
// we have to iterate in order to provide precise result in case of expiration
long now = Long.MIN_VALUE;
long size = 0;
DataContainer<K, V> dataContainer = getDataContainer();
for (InternalCacheEntry<K, V> entry : dataContainer) {
if (entry.canExpire()) {
if (now == Long.MIN_VALUE) now = timeService.wallClockTime();
if (!checkExpiration(entry, now)) {
++size;
}
} else {
++size;
}
}
return CompletableFuture.completedFuture(size);
}
@Override
public boolean isEmpty() {
long now = Long.MIN_VALUE;
DataContainer<K, V> dataContainer = getDataContainer();
for (InternalCacheEntry<K, V> entry : dataContainer) {
if (entry.canExpire()) {
if (now == Long.MIN_VALUE) now = timeService.wallClockTime();
if (!checkExpiration(entry, now)) {
return false;
}
} else {
return false;
}
}
return true;
}
@Override
public boolean containsKey(Object key) {
return get(key) != null;
}
@Override
public boolean containsValue(Object value) {
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
for (InternalCacheEntry<K, V> ice : getDataContainer()) {
if (Objects.equals(ice.getValue(), value)) return true;
}
return false;
}
@Override
public V get(Object key) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
InternalCacheEntry<K, V> entry = getDataContainer().get(key);
if (entry == null) {
return null;
} else {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryVisited(entry.getKey(), entry.getValue(), true, ImmutableContext.INSTANCE, null));
CompletionStages.join(cacheNotifier.notifyCacheEntryVisited(entry.getKey(), entry.getValue(), false, ImmutableContext.INSTANCE, null));
}
return entry.getValue();
}
}
@Override
public CacheSet<K> keySet() {
return new KeySet();
}
@Override
public CacheCollection<V> values() {
return new Values();
}
@Override
public CacheSet<Entry<K, V>> entrySet() {
return new EntrySet();
}
@Override
public CacheSet<CacheEntry<K, V>> cacheEntrySet() {
return new CacheEntrySet();
}
@Override
public LockedStream<K, V> lockedStream() {
throw new UnsupportedOperationException("Simple cache doesn't support lock stream!");
}
@Override
public CompletableFuture<Boolean> removeLifespanExpired(K key, V value, Long lifespan) {
checkExpiration(getDataContainer().get(key), timeService.wallClockTime());
return CompletableFutures.completedTrue();
}
@Override
public CompletableFuture<Boolean> removeMaxIdleExpired(K key, V value) {
if (checkExpiration(getDataContainer().get(key), timeService.wallClockTime())) {
return CompletableFutures.completedTrue();
}
return CompletableFutures.completedFalse();
}
@Override
public AdvancedCache<?, ?> withEncoding(Class<? extends Encoder> encoder) {
throw new UnsupportedOperationException();
}
@Override
public AdvancedCache<?, ?> withEncoding(Class<? extends Encoder> keyEncoder, Class<? extends Encoder> valueEncoder) {
throw new UnsupportedOperationException();
}
@Override
public AdvancedCache<Object, Object> withKeyEncoding(Class<? extends Encoder> encoder) {
throw new UnsupportedOperationException();
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> wrapper) {
throw new UnsupportedOperationException();
}
@Override
public AdvancedCache<?, ?> withMediaType(String keyMediaType, String valueMediaType) {
throw new UnsupportedOperationException();
}
@Override
public <K1, V1> AdvancedCache<K1, V1> withMediaType(MediaType keyMediaType, MediaType valueMediaType) {
throw new UnsupportedOperationException();
}
@Override
public AdvancedCache<K, V> withStorageMediaType() {
throw new UnsupportedOperationException();
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> keyWrapper, Class<? extends Wrapper> valueWrapper) {
throw new UnsupportedOperationException();
}
@Override
public DataConversion getKeyDataConversion() {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@Override
public DataConversion getValueDataConversion() {
throw new UnsupportedOperationException("Conversion requires EncoderCache");
}
@Override
@ManagedOperation(
description = "Clears the cache",
displayName = "Clears the cache", name = "clear"
)
public void clear() {
DataContainer<K, V> dataContainer = getDataContainer();
boolean hasListeners = this.hasListeners;
ArrayList<InternalCacheEntry<K, V>> copyEntries;
if (hasListeners && cacheNotifier.hasListener(CacheEntryRemoved.class)) {
copyEntries = new ArrayList<>(dataContainer.sizeIncludingExpired());
dataContainer.forEach(entry -> {
copyEntries.add(entry);
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(entry.getKey(), entry.getValue(), entry.getMetadata(), true, ImmutableContext.INSTANCE, null));
});
} else {
copyEntries = null;
}
dataContainer.clear();
if (copyEntries != null) {
for (InternalCacheEntry<K, V> entry : copyEntries) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(entry.getKey(), entry.getValue(), entry.getMetadata(), false, ImmutableContext.INSTANCE, null));
}
}
}
@Override
public String getName() {
return name;
}
@ManagedAttribute(
description = "Returns the cache name",
displayName = "Cache name",
dataType = DataType.TRAIT
)
public String getCacheName() {
return getName() + "(" + getCacheConfiguration().clustering().cacheMode().toString().toLowerCase() + ")";
}
@Override
@ManagedAttribute(
description = "Returns the version of Infinispan",
displayName = "Infinispan version",
dataType = DataType.TRAIT
)
public String getVersion() {
return Version.getVersion();
}
@ManagedAttribute(
description = "Returns the cache configuration in form of properties",
displayName = "Cache configuration properties",
dataType = DataType.TRAIT
)
public Properties getConfigurationAsProperties() {
return new PropertyFormatter().format(configuration);
}
@Override
public V put(K key, V value) {
return getAndPutInternal(key, value, defaultMetadata);
}
@Override
public V put(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = createMetadata(lifespan, unit);
return getAndPutInternal(key, value, metadata);
}
protected V getAndPutInternal(K key, V value, Metadata metadata) {
CacheEntry<K, V> oldEntry = getAndPutInternalEntry(key, value, metadata);
return oldEntry != null ? oldEntry.getValue() : null;
}
private CacheEntry<K, V> getAndPutInternalEntry(K key, V value, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
ByRef<CacheEntry<K, V>> oldEntryRef = new ByRef<>(null);
boolean hasListeners = this.hasListeners;
getDataContainer().compute(key, (k, oldEntry, factory) -> {
if (isNull(oldEntry)) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(key, value, metadata, true, ImmutableContext.INSTANCE, null));
}
} else {
// Have to clone because the value can be updated.
oldEntryRef.set(oldEntry.clone());
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(key, value, metadata, oldEntry.getValue(), oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
}
if (oldEntry == null) {
return factory.create(k, value, metadata);
} else {
return factory.update(oldEntry, value, metadata);
}
});
CacheEntry<K, V> oldEntry = oldEntryRef.get();
if (hasListeners) {
V oldValue = oldEntry != null ? oldEntry.getValue() : null;
if (oldValue == null) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(key, value, metadata, false, ImmutableContext.INSTANCE, null));
} else {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(key, value, metadata, oldValue, oldEntry.getMetadata(), false, ImmutableContext.INSTANCE, null));
}
}
return oldEntry;
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = createMetadata(lifespan, unit);
return putIfAbsentInternal(key, value, metadata);
}
@Override
public V putIfAbsent(K key, V value, Metadata metadata) {
return putIfAbsentInternal(key, value, applyDefaultMetadata(metadata));
}
protected V putIfAbsentInternal(K key, V value, Metadata metadata) {
CacheEntry<K, V> entry = putIfAbsentInternalEntry(key, value, metadata);
return entry != null ? entry.getValue() : null;
}
private CacheEntry<K, V> putIfAbsentInternalEntry(K key, V value, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
ByRef<CacheEntry<K, V>> previousEntryRef = new ByRef<>(null);
boolean hasListeners = this.hasListeners;
getDataContainer().compute(key, (k, oldEntry, factory) -> {
if (isNull(oldEntry)) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(key, value, metadata, true, ImmutableContext.INSTANCE, null));
}
return factory.create(k, value, metadata);
} else {
// Have to clone because the value can be updated.
previousEntryRef.set(oldEntry.clone());
return oldEntry;
}
});
CacheEntry<K, V> previousEntry = previousEntryRef.get();
if (hasListeners && previousEntry == null) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(key, value, metadata, false, ImmutableContext.INSTANCE, null));
}
return previousEntry;
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit unit) {
putAllInternal(map, createMetadata(lifespan, unit));
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit unit) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
Metadata metadata = createMetadata(lifespan, unit);
return getAndReplaceInternal(key, value, metadata);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit unit) {
return replaceInternal(key, oldValue, value, createMetadata(lifespan, unit));
}
protected V getAndReplaceInternal(K key, V value, Metadata metadata) {
CacheEntry<K, V> oldEntry = getAndReplaceInternalEntry(key, value, metadata);
return oldEntry != null ? oldEntry.getValue() : null;
}
private CacheEntry<K, V> getAndReplaceInternalEntry(K key, V value, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
ByRef<CacheEntry<K, V>> ref = new ByRef<>(null);
boolean hasListeners = this.hasListeners;
getDataContainer().compute(key, (k, oldEntry, factory) -> {
if (!isNull(oldEntry)) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(key, value, metadata, oldEntry.getValue(), oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
// Have to clone because the value can be updated.
ref.set(oldEntry.clone());
return factory.update(oldEntry, value, metadata);
} else {
return oldEntry;
}
});
CacheEntry<K, V> oldRef = ref.get();
if (hasListeners && oldRef != null && oldRef.getValue() != null) {
V oldValue = oldRef.getValue();
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(key, value, metadata, oldValue, oldRef.getMetadata(), false, ImmutableContext.INSTANCE, null));
}
return oldRef;
}
@Override
public V put(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return getAndPutInternal(key, value, metadata);
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return putIfAbsentInternal(key, value, metadata);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
putAllInternal(map, createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit));
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return getAndReplaceInternal(key, value, metadata);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return replaceInternal(key, oldValue, value, metadata);
}
@Override
public boolean replace(K key, V oldValue, V value, Metadata metadata) {
return replaceInternal(key, oldValue, value, applyDefaultMetadata(metadata));
}
protected boolean replaceInternal(K key, V oldValue, V value, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
Objects.requireNonNull(oldValue, NULL_VALUES_NOT_SUPPORTED);
ValueAndMetadata<V> oldRef = new ValueAndMetadata<>();
boolean hasListeners = this.hasListeners;
getDataContainer().compute(key, (k, oldEntry, factory) -> {
V prevValue = getValue(oldEntry);
if (Objects.equals(prevValue, oldValue)) {
oldRef.set(prevValue, oldEntry.getMetadata());
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(key, value, metadata, prevValue, oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
return factory.update(oldEntry, value, metadata);
} else {
return oldEntry;
}
});
if (oldRef.getValue() != null) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(key, value, metadata, oldRef.getValue(), oldRef.getMetadata(), false, ImmutableContext.INSTANCE, null));
}
return true;
} else {
return false;
}
}
@Override
public V remove(Object key) {
CacheEntry<K, V> oldEntry = removeEntry(key);
return oldEntry != null ? oldEntry.getValue() : null;
}
private CacheEntry<K, V> removeEntry(Object key) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
ByRef<InternalCacheEntry<K, V>> oldEntryRef = new ByRef<>(null);
boolean hasListeners = this.hasListeners;
getDataContainer().compute((K) key, (k, oldEntry, factory) -> {
if (!isNull(oldEntry)) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(oldEntry.getKey(), oldEntry.getValue(), oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
oldEntryRef.set(oldEntry);
}
return null;
});
InternalCacheEntry<K, V> oldEntry = oldEntryRef.get();
if (oldEntry != null && hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(oldEntry.getKey(), oldEntry.getValue(), oldEntry.getMetadata(), false, ImmutableContext.INSTANCE, null));
}
return oldEntry;
}
@Override
public void putAll(Map<? extends K, ? extends V> map) {
putAllInternal(map, defaultMetadata);
}
@Override
public CompletableFuture<V> putAsync(K key, V value) {
return CompletableFuture.completedFuture(put(key, value));
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit unit) {
return CompletableFuture.completedFuture(put(key, value, lifespan, unit));
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return CompletableFuture.completedFuture(put(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit));
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data) {
putAll(data);
return CompletableFutures.completedNull();
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit unit) {
putAll(data, lifespan, unit);
return CompletableFutures.completedNull();
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
putAll(data, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
return CompletableFutures.completedNull();
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> map, Metadata metadata) {
putAll(map, metadata);
return CompletableFutures.completedNull();
}
@Override
public CompletableFuture<Void> clearAsync() {
clear();
return CompletableFutures.completedNull();
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value) {
return CompletableFuture.completedFuture(putIfAbsent(key, value));
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit unit) {
return CompletableFuture.completedFuture(putIfAbsent(key, value, lifespan, unit));
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return CompletableFuture.completedFuture(putIfAbsent(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit));
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, Metadata metadata) {
return CompletableFuture.completedFuture(putIfAbsent(key, value, metadata));
}
@Override
public CompletableFuture<CacheEntry<K, V>> putIfAbsentAsyncEntry(K key, V value, Metadata metadata) {
return CompletableFuture.completedFuture(putIfAbsentInternalEntry(key, value, metadata));
}
@Override
public CompletableFuture<V> removeAsync(Object key) {
return CompletableFuture.completedFuture(remove(key));
}
@Override
public CompletableFuture<CacheEntry<K, V>> removeAsyncEntry(Object key) {
return CompletableFuture.completedFuture(removeEntry(key));
}
@Override
public CompletableFuture<Boolean> removeAsync(Object key, Object value) {
return CompletableFuture.completedFuture(remove(key, value));
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value) {
return CompletableFuture.completedFuture(replace(key, value));
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit unit) {
return CompletableFuture.completedFuture(replace(key, value, lifespan, unit));
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return CompletableFuture.completedFuture(replace(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit));
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue) {
return CompletableFuture.completedFuture(replace(key, oldValue, newValue));
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit unit) {
return CompletableFuture.completedFuture(replace(key, oldValue, newValue, lifespan, unit));
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return CompletableFuture.completedFuture(replace(key, oldValue, newValue, lifespan, lifespanUnit, maxIdle, maxIdleUnit));
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, Metadata metadata) {
return CompletableFuture.completedFuture(replace(key, value, metadata));
}
@Override
public CompletableFuture<CacheEntry<K, V>> replaceAsyncEntry(K key, V value, Metadata metadata) {
return CompletableFuture.completedFuture(getAndReplaceInternalEntry(key, value, metadata));
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, Metadata metadata) {
return CompletableFuture.completedFuture(replace(key, oldValue, newValue, metadata));
}
@Override
public CompletableFuture<V> getAsync(K key) {
return CompletableFuture.completedFuture(get(key));
}
@Override
public boolean startBatch() {
// invocation batching implies CacheImpl
throw CONFIG.invocationBatchingNotEnabled();
}
@Override
public void endBatch(boolean successful) {
// invocation batching implies CacheImpl
throw CONFIG.invocationBatchingNotEnabled();
}
@Override
public V putIfAbsent(K key, V value) {
return putIfAbsentInternal(key, value, defaultMetadata);
}
@Override
public boolean remove(Object key, Object value) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
ByRef<InternalCacheEntry<K, V>> oldEntryRef = new ByRef<>(null);
boolean hasListeners = this.hasListeners;
getDataContainer().compute((K) key, (k, oldEntry, factory) -> {
V oldValue = getValue(oldEntry);
if (Objects.equals(oldValue, value)) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(oldEntry.getKey(), oldValue, oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
oldEntryRef.set(oldEntry);
return null;
} else {
return oldEntry;
}
});
InternalCacheEntry<K, V> oldEntry = oldEntryRef.get();
if (oldEntry != null) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(oldEntry.getKey(), oldEntry.getValue(), oldEntry.getMetadata(), false, ImmutableContext.INSTANCE, null));
}
return true;
} else {
return false;
}
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
return replaceInternal(key, oldValue, newValue, defaultMetadata);
}
@Override
public V replace(K key, V value) {
return getAndReplaceInternal(key, value, defaultMetadata);
}
@Override
public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter) {
if (!hasListeners && canFire(listener)) {
hasListeners = true;
}
return cacheNotifier.addListenerAsync(listener, filter, converter);
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
if (!hasListeners && canFire(listener)) {
hasListeners = true;
}
return cacheNotifier.addListenerAsync(listener);
}
@Override
public CompletionStage<Void> removeListenerAsync(Object listener) {
return cacheNotifier.removeListenerAsync(listener);
}
@Deprecated
@Override
public Set<Object> getListeners() {
return cacheNotifier.getListeners();
}
@Override
public <C> CompletionStage<Void> addFilteredListenerAsync(Object listener,
CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
if (!hasListeners && canFire(listener)) {
hasListeners = true;
}
return cacheNotifier.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
throw new UnsupportedOperationException();
}
private boolean canFire(Object listener) {
for (Method m : listener.getClass().getMethods()) {
for (Class<? extends Annotation> annotation : FIRED_EVENTS) {
if (m.isAnnotationPresent(annotation)) {
return true;
}
}
}
return false;
}
private Metadata applyDefaultMetadata(Metadata metadata) {
Metadata.Builder builder = metadata.builder();
return builder != null ? builder.merge(defaultMetadata).build() : metadata;
}
private Metadata createMetadata(long lifespan, TimeUnit unit) {
return new EmbeddedMetadata.Builder().lifespan(lifespan, unit).maxIdle(configuration.expiration().maxIdle()).build();
}
private Metadata createMetadata(long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit)
.build();
}
@Override
public AdvancedCache<K, V> withFlags(Flag... flags) {
// the flags are mostly ignored
return this;
}
@Override
public AdvancedCache<K, V> withFlags(Collection<Flag> flags) {
return this;
}
@Override
public AdvancedCache<K, V> noFlags() {
return this;
}
@Override
public AdvancedCache<K, V> transform(Function<AdvancedCache<K, V>, ? extends AdvancedCache<K, V>> transformation) {
return transformation.apply(this);
}
@Override
public AdvancedCache<K, V> withSubject(Subject subject) {
return this; // NO-OP
}
/**
* @deprecated Since 10.0, will be removed without a replacement
*/
@Deprecated
@Override
public AsyncInterceptorChain getAsyncInterceptorChain() {
return EmptyAsyncInterceptorChain.INSTANCE;
}
@Override
public EvictionManager getEvictionManager() {
return getComponentRegistry().getComponent(EvictionManager.class);
}
@Override
public ExpirationManager<K, V> getExpirationManager() {
return getComponentRegistry().getComponent(InternalExpirationManager.class);
}
@Override
public ComponentRegistry getComponentRegistry() {
return componentRegistry;
}
@Override
public DistributionManager getDistributionManager() {
return getComponentRegistry().getComponent(DistributionManager.class);
}
@Override
public AuthorizationManager getAuthorizationManager() {
return getComponentRegistry().getComponent(AuthorizationManager.class);
}
@Override
public AdvancedCache<K, V> lockAs(Object lockOwner) {
throw new UnsupportedOperationException("lockAs method not supported with Simple Cache!");
}
@Override
public boolean lock(K... keys) {
throw CONTAINER.lockOperationsNotSupported();
}
@Override
public boolean lock(Collection<? extends K> keys) {
throw CONTAINER.lockOperationsNotSupported();
}
@Override
public RpcManager getRpcManager() {
return null;
}
@Override
public BatchContainer getBatchContainer() {
return null;
}
@Override
public DataContainer<K, V> getDataContainer() {
DataContainer<K, V> dataContainer = this.dataContainer;
if (dataContainer == null) {
ComponentStatus status = getStatus();
switch (status) {
case STOPPING:
throw CONTAINER.cacheIsStopping(name);
case TERMINATED:
case FAILED:
throw CONTAINER.cacheIsTerminated(name, status.toString());
default:
throw new IllegalStateException("Status: " + status);
}
}
return dataContainer;
}
@Override
public TransactionManager getTransactionManager() {
return null;
}
@Override
public LockManager getLockManager() {
return null;
}
@Override
public Stats getStats() {
return null;
}
@Override
public XAResource getXAResource() {
return null;
}
@Override
public ClassLoader getClassLoader() {
return null;
}
@Override
public AdvancedCache<K, V> with(ClassLoader classLoader) {
return this;
}
@Override
public V put(K key, V value, Metadata metadata) {
return getAndPutInternal(key, value, applyDefaultMetadata(metadata));
}
@Override
public void putAll(Map<? extends K, ? extends V> map, Metadata metadata) {
putAllInternal(map, applyDefaultMetadata(metadata));
}
protected void putAllInternal(Map<? extends K, ? extends V> map, Metadata metadata) {
for (Entry<? extends K, ? extends V> entry : map.entrySet()) {
Objects.requireNonNull(entry.getKey(), NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(entry.getValue(), NULL_VALUES_NOT_SUPPORTED);
}
for (Entry<? extends K, ? extends V> entry : map.entrySet()) {
getAndPutInternal(entry.getKey(), entry.getValue(), metadata);
}
}
@Override
public V replace(K key, V value, Metadata metadata) {
return getAndReplaceInternal(key, value, applyDefaultMetadata(metadata));
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
ByRef<V> newValueRef = new ByRef<>(null);
return computeIfAbsentInternal(key, mappingFunction, newValueRef, defaultMetadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
ByRef<V> newValueRef = new ByRef<>(null);
return computeIfAbsentInternal(key, mappingFunction, newValueRef, metadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
ByRef<V> newValueRef = new ByRef<>(null);
return computeIfAbsentInternal(key, mappingFunction, newValueRef, createMetadata(lifespan, lifespanUnit));
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
ByRef<V> newValueRef = new ByRef<>(null);
return computeIfAbsentInternal(key, mappingFunction, newValueRef, createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit));
}
protected V computeIfAbsentInternal(K key, Function<? super K, ? extends V> mappingFunction, ByRef<V> newValueRef) {
return computeIfAbsentInternal(key, mappingFunction, newValueRef, defaultMetadata);
}
private V computeIfAbsentInternal(K key, Function<? super K, ? extends V> mappingFunction, ByRef<V> newValueRef, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(mappingFunction, NULL_FUNCTION_NOT_SUPPORTED);
boolean hasListeners = this.hasListeners;
componentRegistry.wireDependencies(mappingFunction);
InternalCacheEntry<K, V> returnEntry = getDataContainer().compute(key, (k, oldEntry, factory) -> {
V oldValue = getValue(oldEntry);
if (oldValue == null) {
V newValue = mappingFunction.apply(k);
if (newValue == null) {
return null;
} else {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(k, newValue, metadata, true, ImmutableContext.INSTANCE, null));
}
newValueRef.set(newValue);
return factory.create(k, newValue, metadata);
}
} else {
return oldEntry;
}
});
V newValue = newValueRef.get();
if (hasListeners && newValue != null) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(key, newValueRef.get(), metadata, false, ImmutableContext.INSTANCE, null));
}
return returnEntry == null ? null : returnEntry.getValue();
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
CacheEntryChange<K, V> ref = new CacheEntryChange<>();
return computeIfPresentInternal(key, remappingFunction, ref, defaultMetadata);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
CacheEntryChange<K, V> ref = new CacheEntryChange<>();
return computeIfPresentInternal(key, remappingFunction, ref, createMetadata(lifespan, lifespanUnit));
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
CacheEntryChange<K, V> ref = new CacheEntryChange<>();
return computeIfPresentInternal(key, remappingFunction, ref, createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit));
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
CacheEntryChange<K, V> ref = new CacheEntryChange<>();
return computeIfPresentInternal(key, remappingFunction, ref, metadata);
}
protected V computeIfPresentInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref) {
return computeIfPresentInternal(key, remappingFunction, ref, defaultMetadata);
}
private V computeIfPresentInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(remappingFunction, NULL_FUNCTION_NOT_SUPPORTED);
boolean hasListeners = this.hasListeners;
componentRegistry.wireDependencies(remappingFunction);
getDataContainer().compute(key, (k, oldEntry, factory) -> {
V oldValue = getValue(oldEntry);
if (oldValue != null) {
V newValue = remappingFunction.apply(k, oldValue);
if (newValue == null) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(k, oldValue, oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
ref.set(k, null, oldValue, oldEntry.getMetadata());
return null;
} else {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(k, newValue, metadata, oldValue, oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
ref.set(k, newValue, oldValue, oldEntry.getMetadata());
return factory.update(oldEntry, newValue, metadata);
}
} else {
return null;
}
});
V newValue = ref.getNewValue();
if (hasListeners) {
if (newValue != null) {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(ref.getKey(), newValue, metadata, ref.getOldValue(), ref.getOldMetadata(), false, ImmutableContext.INSTANCE, null));
} else {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(ref.getKey(), ref.getOldValue(), ref.getOldMetadata(), false, ImmutableContext.INSTANCE, null));
}
}
return newValue;
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
CacheEntryChange<K, V> ref = new CacheEntryChange<>();
return computeInternal(key, remappingFunction, ref, defaultMetadata);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return computeInternal(key, remappingFunction, new CacheEntryChange<>(), createMetadata(lifespan, lifespanUnit));
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return computeInternal(key, remappingFunction, new CacheEntryChange<>(), createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit));
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
CacheEntryChange<K, V> ref = new CacheEntryChange<>();
return computeInternal(key, remappingFunction, ref, metadata);
}
protected V computeInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref) {
return computeInternal(key, remappingFunction, ref, defaultMetadata);
}
private V computeInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(remappingFunction, NULL_FUNCTION_NOT_SUPPORTED);
boolean hasListeners = this.hasListeners;
componentRegistry.wireDependencies(remappingFunction);
getDataContainer().compute(key, (k, oldEntry, factory) -> {
V oldValue = getValue(oldEntry);
V newValue = remappingFunction.apply(k, oldValue);
return getUpdatedEntry(k, oldEntry, factory, oldValue, newValue, metadata, ref, hasListeners);
});
return notifyAndReturn(ref, hasListeners, metadata);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return mergeInternal(key, value, remappingFunction, new CacheEntryChange<>(), defaultMetadata);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return mergeInternal(key, value, remappingFunction, new CacheEntryChange<>(), createMetadata(lifespan, lifespanUnit));
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return mergeInternal(key, value, remappingFunction, new CacheEntryChange<>(), createMetadata(lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit));
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return mergeInternal(key, value, remappingFunction, new CacheEntryChange<>(), metadata);
}
protected V mergeInternal(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref, Metadata metadata) {
Objects.requireNonNull(key, NULL_KEYS_NOT_SUPPORTED);
Objects.requireNonNull(value, NULL_VALUES_NOT_SUPPORTED);
Objects.requireNonNull(remappingFunction, NULL_FUNCTION_NOT_SUPPORTED);
boolean hasListeners = this.hasListeners;
getDataContainer().compute(key, (k, oldEntry, factory) -> {
V oldValue = getValue(oldEntry);
V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value);
return getUpdatedEntry(k, oldEntry, factory, oldValue, newValue, metadata, ref, hasListeners);
});
return notifyAndReturn(ref, hasListeners, metadata);
}
private V notifyAndReturn(CacheEntryChange<K, V> ref, boolean hasListeners, Metadata metadata) {
K key = ref.getKey();
V newValue = ref.getNewValue();
if (key != null) {
V oldValue = ref.getOldValue();
if (hasListeners) {
if (newValue == null) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(key, oldValue, ref.getOldMetadata(), false, ImmutableContext.INSTANCE, null));
} else if (oldValue == null) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(key, newValue, metadata, false, ImmutableContext.INSTANCE, null));
} else {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(key, newValue, metadata, oldValue, ref.getOldMetadata(), false, ImmutableContext.INSTANCE, null));
}
}
}
return newValue;
}
private InternalCacheEntry<K, V> getUpdatedEntry(K k, InternalCacheEntry<K, V> oldEntry, InternalEntryFactory factory, V oldValue, V newValue, Metadata metadata, CacheEntryChange<K, V> ref, boolean hasListeners) {
if (newValue == null) {
if (oldValue != null) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryRemoved(k, oldValue, oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
ref.set(k, null, oldValue, oldEntry.getMetadata());
}
return null;
} else if (oldValue == null) {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryCreated(k, newValue, metadata, true, ImmutableContext.INSTANCE, null));
}
ref.set(k, newValue, null, null);
return factory.create(k, newValue, metadata);
} else if (Objects.equals(oldValue, newValue)) {
return oldEntry;
} else {
if (hasListeners) {
CompletionStages.join(cacheNotifier.notifyCacheEntryModified(k, newValue, metadata, oldValue, oldEntry.getMetadata(), true, ImmutableContext.INSTANCE, null));
}
ref.set(k, newValue, oldValue, oldEntry.getMetadata());
return factory.update(oldEntry, newValue, metadata);
}
}
// This method can be called only from dataContainer.compute()'s action;
// as we'll replace the old value when it's expired
private boolean isNull(InternalCacheEntry<K, V> entry) {
if (entry == null) {
return true;
} else if (entry.canExpire()) {
if (entry.isExpired(timeService.wallClockTime())) {
if (cacheNotifier.hasListener(CacheEntryExpired.class)) {
CompletionStages.join(cacheNotifier.notifyCacheEntryExpired(entry.getKey(), entry.getValue(),
entry.getMetadata(), ImmutableContext.INSTANCE));
}
return true;
}
}
return false;
}
// This method can be called only from dataContainer.compute()'s action!
private V getValue(InternalCacheEntry<K, V> entry) {
return isNull(entry) ? null : entry.getValue();
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
for (Iterator<InternalCacheEntry<K, V>> it = dataContainer.iterator(); it.hasNext(); ) {
InternalCacheEntry<K, V> ice = it.next();
action.accept(ice.getKey(), ice.getValue());
}
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
AggregateCompletionStage<Void> aggregateCompletionStage;
if (hasListeners && cacheNotifier.hasListener(CacheEntryModified.class)) {
aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
} else {
aggregateCompletionStage = null;
}
CacheEntryChange<K, V> ref = new CacheEntryChange<>();
for (Iterator<InternalCacheEntry<K, V>> it = dataContainer.iterator(); it.hasNext(); ) {
InternalCacheEntry<K, V> ice = it.next();
getDataContainer().compute(ice.getKey(), (k, oldEntry, factory) -> {
V oldValue = getValue(oldEntry);
if (oldValue != null) {
V newValue = function.apply(k, oldValue);
Objects.requireNonNull(newValue, NULL_VALUES_NOT_SUPPORTED);
if (aggregateCompletionStage != null) {
aggregateCompletionStage.dependsOn(cacheNotifier.notifyCacheEntryModified(k, newValue, defaultMetadata, oldValue, oldEntry.getMetadata(),
true, ImmutableContext.INSTANCE, null));
}
ref.set(k, newValue, oldValue, oldEntry.getMetadata());
return factory.update(oldEntry, newValue, defaultMetadata);
} else {
return null;
}
});
if (aggregateCompletionStage != null) {
aggregateCompletionStage.dependsOn(cacheNotifier.notifyCacheEntryModified(ref.getKey(), ref.getNewValue(), defaultMetadata, ref.getOldValue(),
ref.getOldMetadata(), false, ImmutableContext.INSTANCE, null));
}
}
if (aggregateCompletionStage != null) {
CompletionStages.join(aggregateCompletionStage.freeze());
}
}
protected static class ValueAndMetadata<V> {
private V value;
private Metadata metadata;
public void set(V value, Metadata metadata) {
this.value = value;
this.metadata = metadata;
}
public V getValue() {
return value;
}
public Metadata getMetadata() {
return metadata;
}
}
protected static class CacheEntryChange<K, V> {
private K key;
private V newValue;
private V oldValue;
private Metadata oldMetadata;
public void set(K key, V newValue, V oldValue, Metadata oldMetadata) {
this.key = key;
this.newValue = newValue;
this.oldValue = oldValue;
this.oldMetadata = oldMetadata;
}
public K getKey() {
return key;
}
public V getNewValue() {
return newValue;
}
public V getOldValue() {
return oldValue;
}
public Metadata getOldMetadata() {
return oldMetadata;
}
}
protected abstract class EntrySetBase<T extends Entry<K, V>> extends AbstractSet<T> implements CacheSet<T> {
private final DataContainer<K, V> delegate = getDataContainer();
@Override
public int size() {
return SimpleCacheImpl.this.size();
}
@Override
public boolean isEmpty() {
return SimpleCacheImpl.this.isEmpty();
}
@Override
public boolean contains(Object o) {
return delegate.get(o) != null;
}
@Override
public Object[] toArray() {
return StreamSupport.stream(delegate.spliterator(), false).toArray();
}
@Override
public boolean remove(Object o) {
if (o instanceof Entry) {
Entry<K, V> entry = (Entry<K, V>) o;
return SimpleCacheImpl.this.remove(entry.getKey(), entry.getValue());
}
return false;
}
@Override
public boolean retainAll(Collection<?> c) {
boolean changed = false;
for (InternalCacheEntry<K, V> entry : getDataContainer()) {
if (!c.contains(entry)) {
changed |= SimpleCacheImpl.this.remove(entry.getKey(), entry.getValue());
}
}
return changed;
}
@Override
public boolean removeAll(Collection<?> c) {
boolean changed = false;
for (Object o : c) {
if (o instanceof Entry) {
Entry<K, V> entry = (Entry<K, V>) o;
changed |= SimpleCacheImpl.this.remove(entry.getKey(), entry.getValue());
}
}
return changed;
}
@Override
public void clear() {
SimpleCacheImpl.this.clear();
}
}
protected class EntrySet extends EntrySetBase<Entry<K, V>> implements CacheSet<Entry<K, V>> {
@Override
public CloseableIterator<Entry<K, V>> iterator() {
return Closeables.iterator(new DataContainerRemoveIterator<>(SimpleCacheImpl.this));
}
@Override
public CloseableSpliterator<Entry<K, V>> spliterator() {
// Cast to raw since we need to convert from ICE to Entry
return Closeables.spliterator((Spliterator) dataContainer.spliterator());
}
@Override
public boolean add(Entry<K, V> entry) {
throw new UnsupportedOperationException();
}
@Override
public boolean addAll(Collection<? extends Entry<K, V>> c) {
throw new UnsupportedOperationException();
}
@Override
public CacheStream<Entry<K, V>> stream() {
return cacheStreamCast(new LocalCacheStream<>(new EntryStreamSupplier<>(SimpleCacheImpl.this, null,
getStreamSupplier(false)), false, componentRegistry));
}
@Override
public CacheStream<Entry<K, V>> parallelStream() {
return cacheStreamCast(new LocalCacheStream<>(new EntryStreamSupplier<>(SimpleCacheImpl.this, null,
getStreamSupplier(false)), true, componentRegistry));
}
}
// This is a hack to allow the cast to work. Java doesn't like subtypes in generics
private static <K, V> CacheStream<Entry<K, V>> cacheStreamCast(CacheStream stream) {
return stream;
}
protected class CacheEntrySet extends EntrySetBase<CacheEntry<K, V>> implements CacheSet<CacheEntry<K, V>> {
@Override
public CloseableIterator<CacheEntry<K, V>> iterator() {
return Closeables.iterator(new DataContainerRemoveIterator<>(SimpleCacheImpl.this));
}
@Override
public CloseableSpliterator<CacheEntry<K, V>> spliterator() {
// Cast to raw since we need to convert from ICE to CE
return Closeables.spliterator((Spliterator) dataContainer.spliterator());
}
@Override
public boolean add(CacheEntry<K, V> entry) {
throw new UnsupportedOperationException();
}
@Override
public boolean addAll(Collection<? extends CacheEntry<K, V>> c) {
throw new UnsupportedOperationException();
}
@Override
public CacheStream<CacheEntry<K, V>> stream() {
return new LocalCacheStream<>(new EntryStreamSupplier<>(SimpleCacheImpl.this, null, getStreamSupplier(false)),
false, componentRegistry);
}
@Override
public CacheStream<CacheEntry<K, V>> parallelStream() {
return new LocalCacheStream<>(new EntryStreamSupplier<>(SimpleCacheImpl.this, null, getStreamSupplier(true)),
true, componentRegistry);
}
}
protected class Values extends AbstractSet<V> implements CacheCollection<V> {
@Override
public boolean retainAll(Collection<?> c) {
Set<Object> retained = new HashSet<>(c.size());
retained.addAll(c);
boolean changed = false;
for (InternalCacheEntry<K, V> entry : getDataContainer()) {
if (!retained.contains(entry.getValue())) {
changed |= SimpleCacheImpl.this.remove(entry.getKey(), entry.getValue());
}
}
return changed;
}
@Override
public boolean removeAll(Collection<?> c) {
int removeSize = c.size();
if (removeSize == 0) {
return false;
} else if (removeSize == 1) {
return remove(c.iterator().next());
}
Set<Object> removed = new HashSet<>(removeSize);
removed.addAll(c);
boolean changed = false;
for (InternalCacheEntry<K, V> entry : getDataContainer()) {
if (removed.contains(entry.getValue())) {
changed |= SimpleCacheImpl.this.remove(entry.getKey(), entry.getValue());
}
}
return changed;
}
@Override
public boolean remove(Object o) {
for (InternalCacheEntry<K, V> entry : getDataContainer()) {
if (Objects.equals(entry.getValue(), o)) {
if (SimpleCacheImpl.this.remove(entry.getKey(), entry.getValue())) {
return true;
}
}
}
return false;
}
@Override
public void clear() {
SimpleCacheImpl.this.clear();
}
@Override
public CloseableIterator<V> iterator() {
return Closeables.iterator(new IteratorMapper<>(new DataContainerRemoveIterator<>(SimpleCacheImpl.this), Map.Entry::getValue));
}
@Override
public CloseableSpliterator<V> spliterator() {
return Closeables.spliterator(new SpliteratorMapper<>(getDataContainer().spliterator(), Map.Entry::getValue));
}
@Override
public int size() {
return SimpleCacheImpl.this.size();
}
@Override
public boolean isEmpty() {
return SimpleCacheImpl.this.isEmpty();
}
@Override
public CacheStream<V> stream() {
LocalCacheStream<CacheEntry<K, V>> lcs = new LocalCacheStream<>(new EntryStreamSupplier<>(SimpleCacheImpl.this,
null, getStreamSupplier(false)), false, componentRegistry);
return lcs.map(CacheEntry::getValue);
}
@Override
public CacheStream<V> parallelStream() {
LocalCacheStream<CacheEntry<K, V>> lcs = new LocalCacheStream<>(new EntryStreamSupplier<>(SimpleCacheImpl.this,
null, getStreamSupplier(false)), true, componentRegistry);
return lcs.map(CacheEntry::getValue);
}
}
protected Supplier<Stream<CacheEntry<K, V>>> getStreamSupplier(boolean parallel) {
// This is raw due to not being able to cast inner ICE to CE
Spliterator spliterator = dataContainer.spliterator();
return () -> StreamSupport.stream(spliterator, parallel);
}
protected class KeySet extends AbstractSet<K> implements CacheSet<K> {
@Override
public boolean retainAll(Collection<?> c) {
Set<Object> retained = new HashSet<>(c.size());
retained.addAll(c);
boolean changed = false;
for (InternalCacheEntry<K, V> entry : getDataContainer()) {
if (!retained.contains(entry.getKey())) {
changed |= SimpleCacheImpl.this.remove(entry.getKey()) != null;
}
}
return changed;
}
@Override
public boolean remove(Object o) {
return SimpleCacheImpl.this.remove(o) != null;
}
@Override
public boolean removeAll(Collection<?> c) {
boolean changed = false;
for (Object key : c) {
changed |= SimpleCacheImpl.this.remove(key) != null;
}
return changed;
}
@Override
public void clear() {
SimpleCacheImpl.this.clear();
}
@Override
public CloseableIterator<K> iterator() {
return Closeables.iterator(new IteratorMapper<>(new DataContainerRemoveIterator<>(SimpleCacheImpl.this), Map.Entry::getKey));
}
@Override
public CloseableSpliterator<K> spliterator() {
return new SpliteratorMapper<>(dataContainer.spliterator(), Map.Entry::getKey);
}
@Override
public int size() {
return SimpleCacheImpl.this.size();
}
@Override
public boolean isEmpty() {
return SimpleCacheImpl.this.isEmpty();
}
@Override
public CacheStream<K> stream() {
return new LocalCacheStream<>(new KeyStreamSupplier<>(SimpleCacheImpl.this, null, super::stream), false,
componentRegistry);
}
@Override
public CacheStream<K> parallelStream() {
return new LocalCacheStream<>(new KeyStreamSupplier<>(SimpleCacheImpl.this, null, super::stream), true,
componentRegistry);
}
}
@Override
public String toString() {
return "SimpleCache '" + getName() + "'@" + Util.hexIdHashCode(getCacheManager());
}
}
| 78,552
| 37.868382
| 247
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/CacheBackedEntrySet.java
|
package org.infinispan.cache.impl;
import java.util.Map;
import java.util.function.Function;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ForwardingCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.stream.StreamMarshalling;
/**
* Entry set backed by a cache.
*
* <p>Implements {@code CacheSet<CacheEntry<K, V>>} but it is also (mis-)used as a {@code CacheSet<Map.Entry<K, V>>}.
* This works because {@code add()} and {@code addAll()} are not implemented.</p>
*
* @since 13.0
*/
public class CacheBackedEntrySet<K, V> extends AbstractCacheBackedSet<K, V, CacheEntry<K, V>> {
public CacheBackedEntrySet(CacheImpl<K, V> cache, Object lockOwner, long explicitFlags) {
super(cache, lockOwner, explicitFlags);
}
@Override
public boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?, ?> entry = (Map.Entry<?, ?>) o;
InvocationContext ctx = cache.invocationContextFactory.createInvocationContext(false, 1);
if (lockOwner != null) {
ctx.setLockOwner(lockOwner);
}
V cacheValue = cache.get(entry.getKey(), explicitFlags, ctx);
return cacheValue != null && cacheValue.equals(entry.getValue());
}
@Override
protected Function<Map.Entry<K, V>, ?> entryToKeyFunction() {
return StreamMarshalling.entryToKeyFunction();
}
@Override
protected Object extractKey(Object e) {
if (!(e instanceof Map.Entry))
return null;
return ((Map.Entry<?, ?>) e).getKey();
}
@Override
protected CacheEntry<K, V> wrapElement(CacheEntry<K, V> e) {
return new ForwardingCacheEntry<K, V>() {
@Override
protected CacheEntry<K, V> delegate() {
return e;
}
@Override
public V setValue(V value) {
cache.put(getKey(), value, cache.defaultMetadata, EnumUtil.EMPTY_BIT_SET,
decoratedWriteContextBuilder());
return super.setValue(value);
}
};
}
}
| 2,137
| 29.112676
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/EncoderCache.java
|
package org.infinispan.cache.impl;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.lang.annotation.Annotation;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.commons.dataconversion.Encoder;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.commons.util.InjectiveFunction;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ForwardingCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
import org.infinispan.notifications.cachelistener.ListenerHolder;
import org.infinispan.notifications.cachelistener.filter.CacheEventConverter;
import org.infinispan.notifications.cachelistener.filter.CacheEventFilter;
import org.infinispan.util.WriteableCacheCollectionMapper;
import org.infinispan.util.WriteableCacheSetMapper;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.function.SerializableBiFunction;
import org.infinispan.util.function.SerializableFunction;
/**
* Cache decoration that makes use of the {@link Encoder} and {@link Wrapper} to convert between storage value and
* read/write value.
*
* @since 9.1
*/
@Scope(Scopes.NAMED_CACHE)
public class EncoderCache<K, V> extends AbstractDelegatingAdvancedCache<K, V> {
// InternalCacheFactory.buildEncodingCache doesn't have a component registry to pass to the constructor.
// We inject these after the component registry has been created,
// and every other caller of the constructor passes non-null values.
@Inject InternalEntryFactory entryFactory;
@Inject BasicComponentRegistry componentRegistry;
private final DataConversion keyDataConversion;
private final DataConversion valueDataConversion;
private final Function<V, V> decodedValueForRead = this::valueFromStorage;
public EncoderCache(AdvancedCache<K, V> cache, InternalEntryFactory entryFactory,
BasicComponentRegistry componentRegistry,
DataConversion keyDataConversion, DataConversion valueDataConversion) {
super(cache);
this.entryFactory = entryFactory;
this.componentRegistry = componentRegistry;
this.keyDataConversion = keyDataConversion;
this.valueDataConversion = valueDataConversion;
}
@Override
public AdvancedCache rewrap(AdvancedCache newDelegate) {
return new EncoderCache(newDelegate, entryFactory, componentRegistry, keyDataConversion, valueDataConversion);
}
private Set<?> encodeKeysForWrite(Set<?> keys) {
if (needsEncoding(keys)) {
return keys.stream().map(this::keyToStorage).collect(Collectors.toCollection(LinkedHashSet::new));
}
return keys;
}
private boolean needsEncoding(Collection<?> keys) {
return keys.stream().anyMatch(k -> !k.equals(keyToStorage(k)));
}
private Collection<? extends K> encodeKeysForWrite(Collection<? extends K> keys) {
if (needsEncoding(keys)) {
return keys.stream().map(this::keyToStorage).collect(Collectors.toCollection(ArrayList::new));
}
return keys;
}
public K keyToStorage(Object key) {
return (K) keyDataConversion.toStorage(key);
}
public V valueToStorage(Object value) {
return (V) valueDataConversion.toStorage(value);
}
public K keyFromStorage(Object key) {
return (K) keyDataConversion.fromStorage(key);
}
public V valueFromStorage(Object value) {
return (V) valueDataConversion.fromStorage(value);
}
@Inject
public void wireRealCache() {
componentRegistry.wireDependencies(keyDataConversion, false);
componentRegistry.wireDependencies(valueDataConversion, false);
componentRegistry.wireDependencies(cache, false);
}
private Map<K, V> encodeMapForWrite(Map<? extends K, ? extends V> map) {
Map<K, V> newMap = new HashMap<>(map.size());
map.forEach((k, v) -> newMap.put(keyToStorage(k), valueToStorage(v)));
return newMap;
}
private Map<K, V> decodeMapForRead(Map<? extends K, ? extends V> map) {
Map<K, V> newMap = new LinkedHashMap<>(map.size());
map.forEach((k, v) -> newMap.put(keyFromStorage(k), valueFromStorage(v)));
return newMap;
}
private CacheEntry<K, V> convertEntry(K newKey, V newValue, CacheEntry<K, V> entry) {
if (entry instanceof InternalCacheEntry) {
return entryFactory.create(newKey, newValue, (InternalCacheEntry) entry);
} else {
return entryFactory.create(newKey, newValue, entry.getMetadata().version(), entry.getCreated(),
entry.getLifespan(), entry.getLastUsed(), entry.getMaxIdle());
}
}
private BiFunction<? super K, ? super V, ? extends V> convertFunction(
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return (k, v) -> valueToStorage(remappingFunction.apply(keyFromStorage(k), valueFromStorage(v)));
}
private Map<K, CacheEntry<K, V>> decodeEntryMapForRead(Map<K, CacheEntry<K, V>> map) {
Map<K, CacheEntry<K, V>> entryMap = new HashMap<>(map.size());
map.values().forEach(v -> {
K originalKey = v.getKey();
K unwrappedKey = keyFromStorage(originalKey);
V originalValue = v.getValue();
V unwrappedValue = valueFromStorage(originalValue);
CacheEntry<K, V> entryToPut;
if (unwrappedKey != originalKey || unwrappedValue != originalValue) {
entryToPut = convertEntry(unwrappedKey, unwrappedValue, v);
} else {
entryToPut = v;
}
entryMap.put(unwrappedKey, entryToPut);
});
return entryMap;
}
@Override
public void putForExternalRead(K key, V value) {
cache.putForExternalRead(keyToStorage(key), valueToStorage(value));
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit unit) {
cache.putForExternalRead(keyToStorage(key), valueToStorage(value), lifespan, unit);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
cache.putForExternalRead(keyToStorage(key), valueToStorage(value), lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public void evict(K key) {
cache.evict(keyToStorage(key));
}
@Override
public V put(K key, V value, long lifespan, TimeUnit unit) {
V ret = cache.put(keyToStorage(key), valueToStorage(value), lifespan, unit);
return valueFromStorage(ret);
}
@Override
public DataConversion getKeyDataConversion() {
return keyDataConversion;
}
@Override
public DataConversion getValueDataConversion() {
return valueDataConversion;
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit unit) {
V v = cache.putIfAbsent(keyToStorage(key), valueToStorage(value), lifespan, unit);
return valueFromStorage(v);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit unit) {
cache.putAll(encodeMapForWrite(map), lifespan, unit);
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit unit) {
V ret = cache.replace(keyToStorage(key), valueToStorage(value), lifespan, unit);
return valueFromStorage(ret);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit unit) {
return cache.replace(keyToStorage(key), valueToStorage(oldValue), valueToStorage(value), lifespan, unit);
}
@Override
public V put(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
V ret = cache.put(keyToStorage(key), valueToStorage(value), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return valueFromStorage(ret);
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
V ret = cache.putIfAbsent(keyToStorage(key), valueToStorage(value), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return valueFromStorage(ret);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
cache.putAll(encodeMapForWrite(map), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
V ret = cache.replace(keyToStorage(key), valueToStorage(value), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return valueFromStorage(ret);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.replace(keyToStorage(key), valueToStorage(oldValue), valueToStorage(value), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
cache.replaceAll(convertFunction(function));
}
@Override
public CompletableFuture<V> putAsync(K key, V value) {
return cache.putAsync(keyToStorage(key), valueToStorage(value)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit unit) {
return cache.putAsync(keyToStorage(key), valueToStorage(value), lifespan, unit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.putAsync(keyToStorage(key), valueToStorage(value), lifespan, lifespanUnit, maxIdle, maxIdleUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data) {
return cache.putAllAsync(encodeMapForWrite(data));
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit unit) {
return cache.putAllAsync(encodeMapForWrite(data), lifespan, unit);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.putAllAsync(encodeMapForWrite(data), lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> map, Metadata metadata) {
return cache.putAllAsync(encodeMapForWrite(map), metadata);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value) {
return cache.putIfAbsentAsync(keyToStorage(key), valueToStorage(value)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit unit) {
return cache.putIfAbsentAsync(keyToStorage(key), valueToStorage(value), lifespan, unit).thenApply(decodedValueForRead);
}
@Override
public boolean lock(K... keys) {
K[] encoded = (K[]) Arrays.stream(keys).map(this::keyToStorage).toArray();
return cache.lock(encoded);
}
@Override
public boolean lock(Collection<? extends K> keys) {
return cache.lock(encodeKeysForWrite(keys));
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.putIfAbsentAsync(keyToStorage(key), valueToStorage(value), lifespan, lifespanUnit, maxIdle, maxIdleUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, Metadata metadata) {
return cache.putIfAbsentAsync(keyToStorage(key), valueToStorage(value), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putIfAbsentAsyncEntry(K key, V value, Metadata metadata) {
K keyToStorage = keyToStorage(key);
return cache.putIfAbsentAsyncEntry(keyToStorage, valueToStorage(value), metadata)
.thenApply(e -> unwrapCacheEntry(key, keyToStorage, e));
}
@Override
public CompletableFuture<V> removeAsync(Object key) {
return cache.removeAsync(keyToStorage(key)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<Boolean> removeAsync(Object key, Object value) {
return cache.removeAsync(keyToStorage(key), valueToStorage(value));
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(value)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit unit) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(value), lifespan, unit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, Metadata metadata) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(value), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<CacheEntry<K, V>> replaceAsyncEntry(K key, V value, Metadata metadata) {
K keyToStorage = keyToStorage(key);
return cache.replaceAsyncEntry(keyToStorage, valueToStorage(value), metadata)
.thenApply(e -> unwrapCacheEntry(key, keyToStorage, e));
}
@Override
public Map<K, V> getAll(Set<?> keys) {
Map<K, V> ret = cache.getAll(encodeKeysForWrite(keys));
return decodeMapForRead(ret);
}
@Override
public CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys) {
return cache.getAllAsync(encodeKeysForWrite(keys)).thenApply(this::decodeMapForRead);
}
@Override
public CacheEntry<K, V> getCacheEntry(Object key) {
K keyToStorage = keyToStorage(key);
CacheEntry<K, V> returned = cache.getCacheEntry(keyToStorage);
return unwrapCacheEntry(key, keyToStorage, returned);
}
@Override
public CompletableFuture<CacheEntry<K, V>> getCacheEntryAsync(Object key) {
K keyToStorage = keyToStorage(key);
CompletableFuture<CacheEntry<K, V>> stage = cache.getCacheEntryAsync(keyToStorage);
if (stage.isDone() && !stage.isCompletedExceptionally()) {
return CompletableFuture.completedFuture(unwrapCacheEntry(key, keyToStorage, stage.join()));
}
return stage.thenApply(returned -> unwrapCacheEntry(key, keyToStorage, returned));
}
private CacheEntry<K, V> unwrapCacheEntry(Object key, K keyToStorage, CacheEntry<K, V> returned) {
if (returned != null) {
V originalValue = returned.getValue();
V valueFromStorage = valueFromStorage(originalValue);
if (keyToStorage != key || valueFromStorage != originalValue) {
return convertEntry((K) key, valueFromStorage, returned);
}
}
return returned;
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(value), lifespan, lifespanUnit, maxIdle, maxIdleUnit)
.thenApply(decodedValueForRead);
}
@Override
public Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys) {
Map<K, CacheEntry<K, V>> returned = cache.getAllCacheEntries(encodeKeysForWrite(keys));
return decodeEntryMapForRead(returned);
}
@Override
public Map<K, V> getGroup(String groupName) {
Map<K, V> ret = cache.getGroup(groupName);
return decodeMapForRead(ret);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(oldValue), valueToStorage(newValue));
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit unit) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(oldValue), valueToStorage(newValue), lifespan, unit);
}
@Override
public V put(K key, V value, Metadata metadata) {
V ret = cache.put(keyToStorage(key), valueToStorage(value), metadata);
return valueFromStorage(ret);
}
@Override
public V replace(K key, V value, Metadata metadata) {
V ret = cache.replace(keyToStorage(key), valueToStorage(value), metadata);
return valueFromStorage(ret);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(oldValue), valueToStorage(newValue), lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public boolean replace(K key, V oldValue, V value, Metadata metadata) {
return cache.replace(keyToStorage(key), valueToStorage(oldValue), valueToStorage(value), metadata);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, Metadata metadata) {
return cache.replaceAsync(keyToStorage(key), valueToStorage(oldValue), valueToStorage(newValue), metadata);
}
@Override
public V putIfAbsent(K key, V value, Metadata metadata) {
V ret = cache.putIfAbsent(keyToStorage(key), valueToStorage(value), metadata);
return valueFromStorage(ret);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, Metadata metadata) {
return cache.putAsync(keyToStorage(key), valueToStorage(value), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putAsyncEntry(K key, V value, Metadata metadata) {
K keyToStorage = keyToStorage(key);
return cache.putAsyncEntry(keyToStorage, valueToStorage(value), metadata)
.thenApply(e -> unwrapCacheEntry(key, keyToStorage, e));
}
@Override
public void putForExternalRead(K key, V value, Metadata metadata) {
cache.putForExternalRead(keyToStorage(key), valueToStorage(value), metadata);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, Metadata metadata) {
cache.putAll(encodeMapForWrite(map), metadata);
}
@Override
public CacheSet<CacheEntry<K, V>> cacheEntrySet() {
EncoderEntryMapper<K, V, CacheEntry<K, V>> cacheEntryMapper = EncoderEntryMapper.newCacheEntryMapper(
keyDataConversion, valueDataConversion, entryFactory);
return new WriteableCacheSetMapper<>(cache.cacheEntrySet(), cacheEntryMapper,
e -> new CacheEntryWrapper<>(cacheEntryMapper.apply(e), e), this::toCacheEntry, this::keyToStorage);
}
@Override
public CompletableFuture<Boolean> removeLifespanExpired(K key, V value, Long lifespan) {
return cache.removeLifespanExpired(keyToStorage(key), valueToStorage(value), lifespan);
}
@Override
public CompletableFuture<Boolean> removeMaxIdleExpired(K key, V value) {
return cache.removeMaxIdleExpired(keyToStorage(key), valueToStorage(value));
}
@Override
public V putIfAbsent(K key, V value) {
V ret = cache.putIfAbsent(keyToStorage(key), valueToStorage(value));
return valueFromStorage(ret);
}
private void lookupEncoderWrapper() {
componentRegistry.wireDependencies(keyDataConversion, true);
componentRegistry.wireDependencies(valueDataConversion, true);
}
@Override
public AdvancedCache<K, V> withEncoding(Class<? extends Encoder> keyEncoderClass, Class<? extends Encoder> valueEncoderClass) {
checkSubclass(keyEncoderClass, Encoder.class);
checkSubclass(valueEncoderClass, Encoder.class);
DataConversion newKeyDataConversion = keyDataConversion.withEncoding(keyEncoderClass);
DataConversion newValueDataConversion = valueDataConversion.withEncoding(valueEncoderClass);
EncoderCache<K, V> encoderCache = new EncoderCache<>(cache, entryFactory, componentRegistry,
newKeyDataConversion, newValueDataConversion);
encoderCache.lookupEncoderWrapper();
return encoderCache;
}
@Override
public AdvancedCache<K, V> withEncoding(Class<? extends Encoder> encoderClass) {
checkSubclass(encoderClass, Encoder.class);
DataConversion newKeyDataConversion = keyDataConversion.withEncoding(encoderClass);
DataConversion newValueDataConversion = valueDataConversion.withEncoding(encoderClass);
EncoderCache<K, V> encoderCache = new EncoderCache<>(cache, entryFactory, componentRegistry,
newKeyDataConversion, newValueDataConversion);
encoderCache.lookupEncoderWrapper();
return encoderCache;
}
@Override
public AdvancedCache<K, V> withKeyEncoding(Class<? extends Encoder> encoderClass) {
checkSubclass(encoderClass, Encoder.class);
DataConversion newKeyDataConversion = keyDataConversion.withEncoding(encoderClass);
EncoderCache<K, V> encoderCache = new EncoderCache<>(cache, entryFactory, componentRegistry,
newKeyDataConversion, valueDataConversion);
encoderCache.lookupEncoderWrapper();
return encoderCache;
}
private void checkSubclass(Class<?> configured, Class<?> required) {
if (!required.isAssignableFrom(configured)) {
throw CONTAINER.invalidEncodingClass(configured, required);
}
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> keyWrapperClass, Class<? extends Wrapper> valueWrapperClass) {
checkSubclass(keyWrapperClass, Wrapper.class);
checkSubclass(valueWrapperClass, Wrapper.class);
DataConversion newKeyDataConversion = keyDataConversion.withWrapping(keyWrapperClass);
DataConversion newValueDataConversion = valueDataConversion.withWrapping(valueWrapperClass);
EncoderCache<K, V> encoderCache = new EncoderCache<>(cache, entryFactory, componentRegistry,
newKeyDataConversion, newValueDataConversion);
encoderCache.lookupEncoderWrapper();
return encoderCache;
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> wrapper) {
return withWrapping(wrapper, wrapper);
}
@Override
public AdvancedCache<K, V> withMediaType(String keyMediaType, String valueMediaType) {
MediaType kType = MediaType.fromString(keyMediaType);
MediaType vType = MediaType.fromString(valueMediaType);
return withMediaType(kType, vType);
}
@Override
public AdvancedCache<K, V> withMediaType(MediaType kType, MediaType vType) {
DataConversion newKeyDataConversion = keyDataConversion.withRequestMediaType(kType);
DataConversion newValueDataConversion = valueDataConversion.withRequestMediaType(vType);
EncoderCache<K, V> encoderCache = new EncoderCache<>(cache, entryFactory, componentRegistry,
newKeyDataConversion, newValueDataConversion);
encoderCache.lookupEncoderWrapper();
return encoderCache;
}
@Override
public AdvancedCache<K, V> withStorageMediaType() {
MediaType keyStorageMediaType = keyDataConversion.getStorageMediaType();
MediaType valueStorageMediaType = valueDataConversion.getStorageMediaType();
return withMediaType(keyStorageMediaType, valueStorageMediaType);
}
@Override
public boolean remove(Object key, Object value) {
return cache.remove(keyToStorage(key), valueToStorage(value));
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
return cache.replace(keyToStorage(key), valueToStorage(oldValue), valueToStorage(newValue));
}
@Override
public V replace(K key, V value) {
V ret = cache.replace(keyToStorage(key), valueToStorage(value));
return valueFromStorage(ret);
}
@Override
public boolean containsKey(Object key) {
return cache.containsKey(keyToStorage(key));
}
@Override
public boolean containsValue(Object value) {
return cache.containsValue(valueToStorage(value));
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
Object returned = cache.compute(keyToStorage(key), wrapBiFunction(remappingFunction));
return valueFromStorage(returned);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
Object returned = cache.compute(keyToStorage(key), wrapBiFunction(remappingFunction), metadata);
return valueFromStorage(returned);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Object returned = cache.compute(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit);
return valueFromStorage(returned);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Object returned = cache.compute(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return valueFromStorage(returned);
}
@Override
public V compute(K key, SerializableBiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
Object ret = cache.compute(keyToStorage(key), wrapBiFunction(remappingFunction), metadata);
return valueFromStorage(ret);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
Object returned = cache.computeIfPresent(keyToStorage(key), wrapBiFunction(remappingFunction));
return valueFromStorage(returned);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
Object returned = cache.computeIfPresent(keyToStorage(key), wrapBiFunction(remappingFunction), metadata);
return valueFromStorage(returned);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Object returned = cache.computeIfPresent(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit);
return valueFromStorage(returned);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Object returned = cache.computeIfPresent(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return valueFromStorage(returned);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
Object ret = cache.computeIfAbsent(keyToStorage(key), wrapFunction(mappingFunction));
return valueFromStorage(ret);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
Object ret = cache.computeIfAbsent(keyToStorage(key), wrapFunction(mappingFunction), metadata);
return valueFromStorage(ret);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
Object ret = cache.computeIfAbsent(keyToStorage(key), wrapFunction(mappingFunction), lifespan, lifespanUnit);
return valueFromStorage(ret);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Object ret = cache.computeIfAbsent(keyToStorage(key), wrapFunction(mappingFunction), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return valueFromStorage(ret);
}
@Override
public V computeIfAbsent(K key, SerializableFunction<? super K, ? extends V> mappingFunction, Metadata metadata) {
Object ret = cache.computeIfAbsent(keyToStorage(key), wrapFunction(mappingFunction), metadata);
return valueFromStorage(ret);
}
@Override
public V merge(K key, V value, SerializableBiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
Object ret = cache.merge(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), metadata);
return valueFromStorage(ret);
}
@Override
public CompletableFuture<V> computeAsync(K key, SerializableBiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.computeAsync(keyToStorage(key), wrapBiFunction(remappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, SerializableFunction<? super K, ? extends V> mappingFunction, Metadata metadata) {
return cache.computeIfAbsentAsync(keyToStorage(key), wrapFunction(mappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public V get(Object key) {
V v = cache.get(keyToStorage(key));
return valueFromStorage(v);
}
@Override
public V getOrDefault(Object key, V defaultValue) {
V returned = cache.getOrDefault(keyToStorage(key), defaultValue);
if (returned == defaultValue) {
return returned;
}
return valueFromStorage(returned);
}
@Override
public V put(K key, V value) {
V ret = cache.put(keyToStorage(key), valueToStorage(value));
if (ret == null) {
return null;
}
return valueFromStorage(ret);
}
@Override
public V remove(Object key) {
V ret = cache.remove(keyToStorage(key));
return valueFromStorage(ret);
}
@Override
public void putAll(Map<? extends K, ? extends V> t) {
cache.putAll(encodeMapForWrite(t));
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
Object returned = cache.merge(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction));
return valueFromStorage(returned);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
Object returned = cache.merge(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), metadata);
return valueFromStorage(returned);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
Object returned = cache.merge(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), lifespan, lifespanUnit);
return valueFromStorage(returned);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Object returned = cache.merge(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
return valueFromStorage(returned);
}
@Override
public V computeIfPresent(K key, SerializableBiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
Object returned = cache.computeIfPresent(keyToStorage(key), wrapBiFunction(remappingFunction), metadata);
return valueFromStorage(returned);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, SerializableBiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.computeIfPresentAsync(keyToStorage(key), wrapBiFunction(remappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.computeIfPresentAsync(keyToStorage(key), wrapBiFunction(remappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.computeIfPresentAsync(keyToStorage(key), wrapBiFunction(remappingFunction)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfPresentAsync(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeIfPresentAsync(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit, maxIdle, maxIdleUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, SerializableBiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.mergeAsync(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.mergeAsync(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), lifespan, lifespanUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.mergeAsync(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.mergeAsync(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return cache.mergeAsync(keyToStorage(key), valueToStorage(value), wrapBiFunction(remappingFunction)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cache.computeAsync(keyToStorage(key), wrapBiFunction(remappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
return cache.computeIfAbsentAsync(keyToStorage(key), wrapFunction(mappingFunction), metadata).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.computeAsync(keyToStorage(key), wrapBiFunction(remappingFunction)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeAsync(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeAsync(keyToStorage(key), wrapBiFunction(remappingFunction), lifespan, lifespanUnit, maxIdle, maxIdleUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction) {
return cache.computeIfAbsentAsync(keyToStorage(key), wrapFunction(mappingFunction)).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfAbsentAsync(keyToStorage(key), wrapFunction(mappingFunction), lifespan, lifespanUnit).thenApply(decodedValueForRead);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeIfAbsentAsync(keyToStorage(key), wrapFunction(mappingFunction), lifespan, lifespanUnit, maxIdle, maxIdleUnit).thenApply(decodedValueForRead);
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
cache.forEach((k, v) -> {
K newK = keyFromStorage(k);
V newV = valueFromStorage(v);
action.accept(newK, newV);
});
}
@Override
public CacheSet<K> keySet() {
InjectiveFunction<Object, K> keyToStorage = this::keyToStorage;
return new WriteableCacheSetMapper<>(cache.keySet(), new EncoderKeyMapper<>(keyDataConversion),
keyToStorage, keyToStorage);
}
@Override
public CacheSet<Map.Entry<K, V>> entrySet() {
EncoderEntryMapper<K, V, Map.Entry<K, V>> entryMapper = EncoderEntryMapper.newEntryMapper(keyDataConversion,
valueDataConversion, entryFactory);
return new WriteableCacheSetMapper<>(cache.entrySet(), entryMapper,
e -> new EntryWrapper<>(e, entryMapper.apply(e)), this::toEntry, this::keyToStorage);
}
@Override
public CompletionStage<Boolean> touch(Object key, boolean touchEvenIfExpired) {
return super.touch(keyToStorage(key), touchEvenIfExpired);
}
@Override
public CompletionStage<Boolean> touch(Object key, int segment, boolean touchEvenIfExpired) {
return super.touch(keyToStorage(key), segment, touchEvenIfExpired);
}
Map.Entry<K, V> toEntry(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<K, V> entry = (Map.Entry<K, V>) o;
K key = entry.getKey();
K newKey = keyToStorage(key);
V value = entry.getValue();
V newValue = valueToStorage(value);
if (key != newKey || value != newValue) {
return new AbstractMap.SimpleEntry<>(newKey, newValue);
}
return entry;
}
return null;
}
CacheEntry<K, V> toCacheEntry(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<K, V> entry = (Map.Entry<K, V>) o;
K key = entry.getKey();
K newKey = keyToStorage(key);
V value = entry.getValue();
V newValue = valueToStorage(value);
if (key != newKey || value != newValue) {
if (o instanceof CacheEntry) {
CacheEntry returned = (CacheEntry) o;
return convertEntry(newKey, newValue, returned);
} else {
return entryFactory.create(newKey, newValue, (Metadata) null);
}
}
if (entry instanceof CacheEntry) {
return (CacheEntry<K, V>) entry;
} else {
return entryFactory.create(key, value, (Metadata) null);
}
}
return null;
}
@Override
public CacheCollection<V> values() {
return new WriteableCacheCollectionMapper<>(cache.values(), new EncoderValueMapper<>(valueDataConversion),
this::valueToStorage, this::keyToStorage);
}
private class EntryWrapper<A, B> implements Entry<A, B> {
private final Entry<A, B> storageEntry;
private final Entry<A, B> entry;
EntryWrapper(Entry<A, B> storageEntry, Entry<A, B> entry) {
this.storageEntry = storageEntry;
this.entry = entry;
}
@Override
public A getKey() {
return entry.getKey();
}
@Override
public B getValue() {
return entry.getValue();
}
@Override
public B setValue(B value) {
storageEntry.setValue((B) valueToStorage(value));
return entry.setValue(value);
}
@Override
public String toString() {
return "EntryWrapper{" +
"key=" + entry.getKey() +
", value=" + entry.getValue() +
"}";
}
}
private class CacheEntryWrapper<A, B> extends ForwardingCacheEntry<A, B> {
private final CacheEntry<A, B> previousEntry;
private final CacheEntry<A, B> entry;
CacheEntryWrapper(CacheEntry<A, B> previousEntry, CacheEntry<A, B> entry) {
this.previousEntry = previousEntry;
this.entry = entry;
}
@Override
protected CacheEntry<A, B> delegate() {
return entry;
}
@Override
public B setValue(B value) {
previousEntry.setValue((B) valueToStorage(value));
return super.setValue(value);
}
}
@Override
public CompletableFuture<V> getAsync(K key) {
return cache.getAsync(keyToStorage(key)).thenApply(decodedValueForRead);
}
@Override
public void addListener(Object listener) {
CompletionStages.join(addListenerAsync(listener));
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
Cache unwrapped = unwrapCache(this.cache);
if (unwrapped instanceof CacheImpl) {
ListenerHolder listenerHolder = new ListenerHolder(listener, keyDataConversion, valueDataConversion, false);
return ((CacheImpl) unwrapped).addListenerAsync(listenerHolder);
} else {
return cache.addListenerAsync(listener);
}
}
@Override
public <C> void addListener(Object listener, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter) {
CompletionStages.join(addListenerAsync(listener, filter, converter));
}
@Override
public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter) {
Cache unwrapped = unwrapCache(this.cache);
if (unwrapped instanceof CacheImpl) {
ListenerHolder listenerHolder = new ListenerHolder(listener, keyDataConversion, valueDataConversion, false);
return ((CacheImpl) unwrapped).addListenerAsync(listenerHolder, filter, converter);
} else {
return cache.addListenerAsync(listener, filter, converter );
}
}
@Override
public <C> void addFilteredListener(Object listener,
CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
CompletionStages.join(addFilteredListenerAsync(listener, filter, converter, filterAnnotations));
}
@Override
public <C> CompletionStage<Void> addFilteredListenerAsync(Object listener,
CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
Cache unwrapped = unwrapCache(this.cache);
if (unwrapped instanceof CacheImpl) {
ListenerHolder listenerHolder = new ListenerHolder(listener, keyDataConversion, valueDataConversion, false);
return ((CacheImpl) unwrapped).addFilteredListenerAsync(listenerHolder, filter, converter, filterAnnotations);
} else {
return cache.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
}
@Override
public <C> void addStorageFormatFilteredListener(Object listener, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
CompletionStages.join(addStorageFormatFilteredListenerAsync(listener, filter, converter, filterAnnotations));
}
@Override
public <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
Cache<?, ?> unwrapped = unwrapCache(this.cache);
if (unwrapped instanceof CacheImpl) {
ListenerHolder listenerHolder = new ListenerHolder(listener, keyDataConversion, valueDataConversion, true);
return ((CacheImpl) unwrapped).addFilteredListenerAsync(listenerHolder, filter, converter, filterAnnotations);
} else {
return cache.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
}
private BiFunctionMapper wrapBiFunction(BiFunction<?, ?, ?> biFunction) {
return biFunction == null ?
null :
new BiFunctionMapper(biFunction, keyDataConversion, valueDataConversion);
}
private FunctionMapper wrapFunction(Function<?, ?> function) {
return function == null ?
null :
new FunctionMapper(function, keyDataConversion, valueDataConversion);
}
@Override
public CompletableFuture<CacheEntry<K, V>> removeAsyncEntry(Object key) {
K keyToStorage = keyToStorage(key);
return cache.removeAsyncEntry(keyToStorage).thenApply(e -> unwrapCacheEntry(key, keyToStorage, e));
}
}
| 47,486
| 41.665768
| 247
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/CacheBackedKeySet.java
|
package org.infinispan.cache.impl;
import java.util.Map;
import java.util.function.Function;
import org.infinispan.context.InvocationContext;
/**
* Key set backed by a cache.
*
* @since 13.0
*/
public class CacheBackedKeySet<K, V> extends AbstractCacheBackedSet<K, V, K> {
public CacheBackedKeySet(CacheImpl<K, V> cache, Object lockOwner, long explicitFlags) {
super(cache, lockOwner, explicitFlags);
}
@Override
public boolean contains(Object o) {
InvocationContext ctx = cache.invocationContextFactory.createInvocationContext(false, 1);
return cache.containsKey(o, explicitFlags, ctx);
}
@Override
protected Function<Map.Entry<K, V>, ?> entryToKeyFunction() {
return null;
}
@Override
protected Object extractKey(Object e) {
return e;
}
@Override
protected K wrapElement(K e) {
return e;
}
}
| 892
| 21.897436
| 96
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/ContextBuilder.java
|
package org.infinispan.cache.impl;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
/**
* An interface to build {@link InvocationContext}.
*
* @author Pedro Ruivo
* @since 11.0
*/
@FunctionalInterface
public interface ContextBuilder {
/**
* Creates a new {@link InvocationContext}.
* <p>
* The {@code keyCount} specifies the number of keys affected that this context will handle. Use {@link
* InvocationContextFactory#UNBOUNDED} to specify an unbound number of keys.
* <p>
* Some implementation may ignore {@code keyCount}.
*
* @param keyCount The number of keys affected.
* @return An {@link InvocationContext} to use.
*/
InvocationContext create(int keyCount);
}
| 775
| 26.714286
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/EncoderEntryMapper.java
|
package org.infinispan.cache.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
/**
* {@link java.util.function.Function} that uses an encoder to converter entries from the configured storage format to
* the requested format.
*/
@Scope(Scopes.NAMED_CACHE)
public class EncoderEntryMapper<K, V, T extends Map.Entry<K, V>> implements EncodingFunction<T> {
@Inject
transient InternalEntryFactory entryFactory;
private final DataConversion keyDataConversion;
private final DataConversion valueDataConversion;
public static <K, V> EncoderEntryMapper<K, V, Map.Entry<K, V>> newEntryMapper(DataConversion keyDataConversion,
DataConversion valueDataConversion, InternalEntryFactory entryFactory) {
EncoderEntryMapper<K, V, Map.Entry<K, V>> mapper = new EncoderEntryMapper<>(keyDataConversion, valueDataConversion);
mapper.entryFactory = entryFactory;
return mapper;
}
public static <K, V> EncoderEntryMapper<K, V, CacheEntry<K, V>> newCacheEntryMapper(
DataConversion keyDataConversion, DataConversion valueDataConversion, InternalEntryFactory entryFactory) {
EncoderEntryMapper<K, V, CacheEntry<K, V>> mapper = new EncoderEntryMapper<>(keyDataConversion, valueDataConversion);
mapper.entryFactory = entryFactory;
return mapper;
}
private EncoderEntryMapper(DataConversion keyDataConversion, DataConversion valueDataConversion) {
this.keyDataConversion = keyDataConversion;
this.valueDataConversion = valueDataConversion;
}
@Inject
public void injectDependencies(ComponentRegistry registry) {
registry.wireDependencies(keyDataConversion);
registry.wireDependencies(valueDataConversion);
}
@Override
public T apply(T e) {
K key = e.getKey();
V value = e.getValue();
Object newKey = keyDataConversion.fromStorage(key);
Object newValue = valueDataConversion.fromStorage(value);
if (key != newKey || value != newValue) {
if (e instanceof CacheEntry) {
CacheEntry<K, V> ce = (CacheEntry<K, V>) e;
return (T) entryFactory.create(newKey, newValue, ce.getMetadata().version(), ce.getCreated(),
ce.getLifespan(), ce.getLastUsed(), ce.getMaxIdle());
} else {
return (T) entryFactory.create(newKey, newValue, (Metadata) null);
}
}
return e;
}
public static class Externalizer implements AdvancedExternalizer<EncoderEntryMapper> {
@Override
public Set<Class<? extends EncoderEntryMapper>> getTypeClasses() {
return Collections.singleton(EncoderEntryMapper.class);
}
@Override
public Integer getId() {
return Ids.ENCODER_ENTRY_MAPPER;
}
@Override
public void writeObject(ObjectOutput output, EncoderEntryMapper object) throws IOException {
DataConversion.writeTo(output, object.keyDataConversion);
DataConversion.writeTo(output, object.valueDataConversion);
}
@Override
@SuppressWarnings("unchecked")
public EncoderEntryMapper readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new EncoderEntryMapper(DataConversion.readFrom(input), DataConversion.readFrom(input));
}
}
}
| 3,963
| 38.247525
| 153
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/FunctionMapper.java
|
package org.infinispan.cache.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import java.util.function.Function;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
@Scope(Scopes.NONE)
public class FunctionMapper implements Function {
private final DataConversion keyDataConversion;
private final DataConversion valueDataConversion;
private final Function function;
@Inject
public void injectDependencies(ComponentRegistry registry) {
registry.wireDependencies(keyDataConversion);
registry.wireDependencies(valueDataConversion);
}
public FunctionMapper(Function mappingFunction,
DataConversion keyDataConversion,
DataConversion valueDataConversion) {
this.function = mappingFunction;
this.keyDataConversion = keyDataConversion;
this.valueDataConversion = valueDataConversion;
}
@Override
public Object apply(Object k) {
Object key = keyDataConversion.fromStorage(k);
Object result = function.apply(key);
return result != null ? valueDataConversion.toStorage(result) : null;
}
public static class Externalizer implements AdvancedExternalizer<FunctionMapper> {
@Override
public Set<Class<? extends FunctionMapper>> getTypeClasses() {
return Collections.singleton(FunctionMapper.class);
}
@Override
public Integer getId() {
return Ids.FUNCTION_MAPPER;
}
@Override
public void writeObject(ObjectOutput output, FunctionMapper object) throws IOException {
output.writeObject(object.function);
DataConversion.writeTo(output, object.keyDataConversion);
DataConversion.writeTo(output, object.valueDataConversion);
}
@Override
public FunctionMapper readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new FunctionMapper((Function) input.readObject(),
DataConversion.readFrom(input), DataConversion.readFrom(input));
}
}
}
| 2,424
| 32.219178
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/DecoratedCache.java
|
package org.infinispan.cache.impl;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.lang.annotation.Annotation;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import java.util.function.Function;
import org.infinispan.AdvancedCache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.LockedStream;
import org.infinispan.commons.dataconversion.Encoder;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.notifications.cachelistener.filter.CacheEventConverter;
import org.infinispan.notifications.cachelistener.filter.CacheEventFilter;
import org.infinispan.stream.StreamMarshalling;
import org.infinispan.stream.impl.local.ValueCacheCollection;
/**
* A decorator to a cache, which can be built with a specific set of {@link Flag}s. This
* set of {@link Flag}s will be applied to all cache invocations made via this decorator.
* <p/>
* In addition to cleaner and more readable code, this approach offers a performance benefit to using
* {@link AdvancedCache#withFlags(Flag...)} API, thanks to
* internal optimizations that can be made when the {@link Flag} set is unchanging.
* <p/>
* Note that {@link DecoratedCache} must be the closest Delegate to the actual Cache implementation. All others
* must delegate to this DecoratedCache.
* @author Manik Surtani
* @author Sanne Grinovero
* @author Tristan Tarrant
* @see AdvancedCache#withFlags(Flag...)
* @since 5.1
*/
public class DecoratedCache<K, V> extends AbstractDelegatingAdvancedCache<K, V> {
private final long flags;
private final Object lockOwner;
private final CacheImpl<K, V> cacheImplementation;
private final ContextBuilder contextBuilder = this::writeContext;
public DecoratedCache(CacheImpl<K, V> delegate, long flagsBitSet) {
this(delegate, null, flagsBitSet);
}
public DecoratedCache(CacheImpl<K, V> delegate, Object lockOwner, long newFlags) {
super(delegate);
this.flags = newFlags;
this.lockOwner = lockOwner;
this.cacheImplementation = delegate;
}
@Override
public AdvancedCache rewrap(AdvancedCache newDelegate) {
throw new UnsupportedOperationException("Decorated caches should not delegate wrapping operations");
}
@Override
public AdvancedCache<K, V> with(final ClassLoader classLoader) {
if (classLoader == null) throw new IllegalArgumentException("ClassLoader cannot be null!");
return this;
}
@Override
public AdvancedCache<K, V> withFlags(final Flag... flags) {
return withFlags(EnumUtil.bitSetOf(flags));
}
@Override
public AdvancedCache<K, V> withFlags(Collection<Flag> flags) {
return withFlags(EnumUtil.bitSetOf(flags));
}
@Override
public AdvancedCache<K, V> withFlags(Flag flag) {
return withFlags(EnumUtil.bitSetOf(flag));
}
private AdvancedCache<K, V> withFlags(long newFlags) {
if (EnumUtil.containsAll(this.flags, newFlags)) {
//we already have all specified flags
return this;
} else {
return new DecoratedCache<>(this.cacheImplementation, lockOwner, EnumUtil.mergeBitSets(this.flags, newFlags));
}
}
@Override
public AdvancedCache<K, V> noFlags() {
if (lockOwner == null) {
return this.cacheImplementation;
} else {
return new DecoratedCache<>(this.cacheImplementation, lockOwner, 0L);
}
}
@Override
public AdvancedCache<K, V> withEncoding(Class<? extends Encoder> encoderClass) {
throw new UnsupportedOperationException("Encoding requires EncoderCache");
}
@Override
public AdvancedCache<K, V> withEncoding(Class<? extends Encoder> keyEncoderClass, Class<? extends Encoder> valueEncoderClass) {
throw new UnsupportedOperationException("Encoding requires EncoderCache");
}
@Deprecated
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> wrapperClass) {
throw new UnsupportedOperationException("Wrapping requires EncoderCache");
}
@Deprecated
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> keyWrapperClass, Class<? extends Wrapper> valueWrapperClass) {
throw new UnsupportedOperationException("Wrapping requires EncoderCache");
}
@Override
public AdvancedCache<K, V> lockAs(Object lockOwner) {
Objects.requireNonNull(lockOwner);
if (lockOwner != this.lockOwner) {
return new DecoratedCache<>(cacheImplementation, lockOwner, flags);
}
return this;
}
public Object getLockOwner() {
return lockOwner;
}
@Override
public LockedStream<K, V> lockedStream() {
assertNoLockOwner("lockedStream");
return cache.lockedStream();
}
@Override
public ClassLoader getClassLoader() {
return cacheImplementation.getClassLoader();
}
@Override
public void stop() {
cacheImplementation.stop();
}
@Override
public boolean lock(K... keys) {
assertNoLockOwner("lock");
return cacheImplementation.lock(Arrays.asList(keys), flags);
}
@Override
public boolean lock(Collection<? extends K> keys) {
assertNoLockOwner("lock");
return cacheImplementation.lock(keys, flags);
}
@Override
public void putForExternalRead(K key, V value) {
putForExternalRead(key, value, cacheImplementation.defaultMetadata);
}
@Override
public void putForExternalRead(K key, V value, Metadata metadata) {
assertNoLockOwner("putForExternalRead");
cacheImplementation.putForExternalRead(key, value, metadata, flags);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
putForExternalRead(key, value, metadata);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit)
.build();
putForExternalRead(key, value, metadata);
}
@Override
public void evict(K key) {
cacheImplementation.evict(key, flags);
}
@Override
public V put(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return put(key, value, metadata);
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return putIfAbsent(key, value, metadata);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
putAll(map, metadata);
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return replace(key, value, metadata);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return replace(key, oldValue, value, metadata);
}
@Override
public V put(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit)
.build();
return put(key, value, metadata);
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit)
.build();
return putIfAbsent(key, value, metadata);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit)
.build();
putAll(map, metadata);
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit)
.build();
return replace(key, value, metadata);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdleTime, maxIdleTimeUnit)
.build();
return replace(key, oldValue, value, metadata);
}
@Override
public CompletableFuture<V> putAsync(K key, V value) {
return putAsync(key, value, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return putAsync(key, value, metadata);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit)
.build();
return putAsync(key, value, metadata);
}
private void assertNoLockOwner(String name) {
if (lockOwner != null) {
throw new IllegalStateException(name + " method cannot be used when a lock owner is configured");
}
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data) {
return putAllAsync(data, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return putAllAsync(data, metadata);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit)
.build();
return putAllAsync(data, metadata);
}
@Override
public CompletableFuture<Void> putAllAsync(final Map<? extends K, ? extends V> data, final Metadata metadata) {
return cacheImplementation.putAllAsync(data, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<Void> clearAsync() {
return cacheImplementation.clearAsync(flags);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value) {
return putIfAbsentAsync(key, value, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return putIfAbsentAsync(key, value, metadata);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit)
.build();
return putIfAbsentAsync(key, value, metadata);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(final K key, final V value, final Metadata metadata) {
return cacheImplementation.putIfAbsentAsync(key, value, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putIfAbsentAsyncEntry(K key, V value, Metadata metadata) {
return cacheImplementation.putIfAbsentAsyncEntry(key, value, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<V> removeAsync(Object key) {
return cacheImplementation.removeAsync(key, flags, contextBuilder);
}
@Override
public CompletableFuture<CacheEntry<K, V>> removeAsyncEntry(Object key) {
return cacheImplementation.removeAsyncEntry(key, flags, contextBuilder);
}
@Override
public CompletableFuture<Boolean> removeAsync(Object key, Object value) {
return cacheImplementation.removeAsync(key, value, flags, contextBuilder);
}
@Override
public CompletableFuture<Boolean> removeLifespanExpired(K key, V value, Long lifespan) {
return cacheImplementation.removeLifespanExpired(key, value, lifespan, flags);
}
@Override
public CompletableFuture<Boolean> removeMaxIdleExpired(K key, V value) {
return cacheImplementation.removeMaxIdleExpired(key, value, flags);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value) {
return replaceAsync(key, value, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return replaceAsync(key, value, metadata);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit)
.build();
return replaceAsync(key, value, metadata);
}
@Override
public CompletableFuture<V> replaceAsync(final K key, final V value, final Metadata metadata) {
return cacheImplementation.replaceAsync(key, value, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<CacheEntry<K, V>> replaceAsyncEntry(K key, V value, Metadata metadata) {
return cacheImplementation.replaceAsyncEntry(key, value, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue) {
return replaceAsync(key, oldValue, newValue, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit unit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, unit)
.maxIdle(cacheImplementation.defaultMetadata.maxIdle(), MILLISECONDS)
.build();
return replaceAsync(key, oldValue, newValue, metadata);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan, lifespanUnit)
.maxIdle(maxIdle, maxIdleUnit)
.build();
return replaceAsync(key, oldValue, newValue, metadata);
}
@Override
public CompletableFuture<Boolean> replaceAsync(final K key, final V oldValue, final V newValue,
final Metadata metadata) {
return cacheImplementation.replaceAsync(key, oldValue, newValue, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<V> getAsync(K key) {
return cacheImplementation.getAsync(key, flags, readContext(1));
}
@Override
public CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys) {
return cacheImplementation.getAllAsync(keys, flags, readContext(keys.size()));
}
@Override
public int size() {
return cacheImplementation.size(flags);
}
@Override
public CompletableFuture<Long> sizeAsync() {
return cacheImplementation.sizeAsync(flags);
}
@Override
public boolean isEmpty() {
return cacheImplementation.isEmpty(flags);
}
@Override
public boolean containsKey(Object key) {
return cacheImplementation.containsKey(key, flags, readContext(1));
}
@Override
public boolean containsValue(Object value) {
Objects.requireNonNull(value);
return values().stream().anyMatch(StreamMarshalling.equalityPredicate(value));
}
@Override
public V get(Object key) {
return cacheImplementation.get(key, flags, readContext(1));
}
@Override
public Map<K, V> getAll(Set<?> keys) {
return cacheImplementation.getAll(keys, flags, readContext(keys.size()));
}
@Override
public Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys) {
return cacheImplementation.getAllCacheEntries(keys, flags, readContext(keys.size()));
}
@Override
public V put(K key, V value) {
return put(key, value, cacheImplementation.defaultMetadata);
}
@Override
public V remove(Object key) {
return cacheImplementation.remove(key, flags, contextBuilder);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, Metadata metadata) {
cacheImplementation.putAll(map, metadata, flags, contextBuilder);
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
putAll(m, cacheImplementation.defaultMetadata);
}
@Override
public void clear() {
cacheImplementation.clear(flags);
}
@Override
public CacheSet<K> keySet() {
return cacheImplementation.keySet(flags, lockOwner);
}
@Override
public Map<K, V> getGroup(String groupName) {
return cacheImplementation.getGroup(groupName, flags);
}
@Override
public void removeGroup(String groupName) {
assertNoLockOwner("removeGroup");
cacheImplementation.removeGroup(groupName, flags);
}
@Override
public CacheCollection<V> values() {
return new ValueCacheCollection<>(this, cacheEntrySet());
}
@Override
public CacheSet<Entry<K, V>> entrySet() {
return cacheImplementation.entrySet(flags, lockOwner);
}
@Override
public CacheSet<CacheEntry<K, V>> cacheEntrySet() {
return cacheImplementation.cacheEntrySet(flags, lockOwner);
}
@Override
public V putIfAbsent(K key, V value) {
return putIfAbsent(key, value, cacheImplementation.defaultMetadata);
}
@Override
public boolean remove(Object key, Object value) {
return cacheImplementation.remove(key, value, flags, contextBuilder);
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
return replace(key, oldValue, newValue, cacheImplementation.defaultMetadata);
}
@Override
public V replace(K key, V value) {
return replace(key, value, cacheImplementation.defaultMetadata);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return compute(key, remappingFunction, cacheImplementation.defaultMetadata);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return computeIfPresent(key, remappingFunction, cacheImplementation.defaultMetadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
return computeIfAbsent(key, mappingFunction, cacheImplementation.defaultMetadata);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return merge(key, value, remappingFunction, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return computeAsync(key, remappingFunction, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return computeIfPresentAsync(key, remappingFunction, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction) {
return computeIfAbsentAsync(key, mappingFunction, cacheImplementation.defaultMetadata);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return mergeAsync(key, value, remappingFunction, cacheImplementation.defaultMetadata);
}
//Not exposed on interface
public long getFlagsBitSet() {
return flags;
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
return cacheImplementation.notifier.addListenerAsync(listener, null);
}
@Override
public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter) {
return cacheImplementation.notifier.addListenerAsync(listener, filter, converter);
}
@Override
public <C> CompletionStage<Void> addFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
return cacheImplementation.notifier.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
return cacheImplementation.notifier.addStorageFormatFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public V put(K key, V value, Metadata metadata) {
return cacheImplementation.put(key, value, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, Metadata metadata) {
return cacheImplementation.putAsync(key, value, metadata, flags, contextBuilder);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putAsyncEntry(K key, V value, Metadata metadata) {
return cacheImplementation.putAsyncEntry(key, value, metadata, flags, contextBuilder);
}
@Override
public V putIfAbsent(K key, V value, Metadata metadata) {
return cacheImplementation.putIfAbsent(key, value, metadata, flags, contextBuilder);
}
@Override
public boolean replace(K key, V oldValue, V value, Metadata metadata) {
return cacheImplementation.replace(key, oldValue, value, metadata, flags, contextBuilder);
}
@Override
public V replace(K key, V value, Metadata metadata) {
return cacheImplementation.replace(key, value, metadata, flags, contextBuilder);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cacheImplementation.computeInternal(key, remappingFunction, false, metadata, flags, contextBuilder);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cacheImplementation.computeInternal(key, remappingFunction, true, metadata, flags, contextBuilder);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
return cacheImplementation.computeIfAbsentInternal(key, mappingFunction, metadata, flags, contextBuilder);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
return cacheImplementation.mergeInternal(key, value, remappingFunction, metadata, flags, contextBuilder);
}
@Override
public CacheEntry<K, V> getCacheEntry(Object key) {
return cacheImplementation.getCacheEntry(key, flags, readContext(1));
}
@Override
public CompletableFuture<CacheEntry<K, V>> getCacheEntryAsync(Object key) {
return cacheImplementation.getCacheEntryAsync(key, flags, readContext(1));
}
protected InvocationContext readContext(int size) {
InvocationContext ctx = cacheImplementation.invocationContextFactory.createInvocationContext(false, size);
if (lockOwner != null) {
ctx.setLockOwner(lockOwner);
}
return ctx;
}
protected InvocationContext writeContext(int size) {
InvocationContext ctx = cacheImplementation.defaultContextBuilderForWrite().create(size);
if (lockOwner != null) {
ctx.setLockOwner(lockOwner);
}
return ctx;
}
}
| 26,880
| 34.046936
| 247
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/StatsCollectingCache.java
|
package org.infinispan.cache.impl;
import java.util.Map;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.Function;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.metadata.Metadata;
import org.infinispan.stats.Stats;
import org.infinispan.stats.impl.StatsCollector;
import org.infinispan.stats.impl.StatsImpl;
/**
* Wraps existing {@link AdvancedCache} to collect statistics
*
* @author Radim Vansa <rvansa@redhat.com>
*/
public class StatsCollectingCache<K, V> extends SimpleCacheImpl<K, V> {
@Inject StatsCollector statsCollector;
@Inject TimeService timeService;
public StatsCollectingCache(String cacheName) {
super(cacheName);
}
@Override
public AdvancedCache<K, V> withFlags(Flag... flags) {
return this;
}
@Override
public AdvancedCache<K, V> with(ClassLoader classLoader) {
return this;
}
@Override
public Stats getStats() {
return StatsImpl.create(statsCollector);
}
@Override
public V get(Object key) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V value = super.get(key);
if (statisticsEnabled) {
long end = timeService.time();
if (value == null) {
statsCollector.recordMisses(1, end - start);
} else {
statsCollector.recordHits(1, end - start);
}
}
return value;
}
@Override
public CacheEntry<K, V> getCacheEntry(Object k) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
CacheEntry<K, V> entry = super.getCacheEntry(k);
if (statisticsEnabled) {
long end = timeService.time();
if (entry == null) {
statsCollector.recordMisses(1, end - start);
} else {
statsCollector.recordHits(1, end - start);
}
}
return entry;
}
@Override
public Map<K, V> getAll(Set<?> keys) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
Map<K, V> map = super.getAll(keys);
if (statisticsEnabled) {
long end = timeService.time();
int requests = keys.size();
int hits = 0;
for (V value : map.values()) {
if (value != null) hits++;
}
int misses = requests - hits;
if (hits > 0) {
statsCollector.recordHits(hits, hits * (end - start) / requests);
}
if (misses > 0) {
statsCollector.recordMisses(misses, misses * (end - start) / requests);
}
}
return map;
}
@Override
public Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
Map<K, CacheEntry<K, V>> map = super.getAllCacheEntries(keys);
if (statisticsEnabled) {
long end = timeService.time();
int requests = keys.size();
int hits = 0;
for (CacheEntry<K, V> entry : map.values()) {
if (entry != null && entry.getValue() != null) hits++;
}
int misses = requests - hits;
if (hits > 0) {
statsCollector.recordHits(hits, hits * (end - start) / requests);
}
if (misses > 0) {
statsCollector.recordMisses(misses, misses * (end - start) / requests);
}
}
return map;
}
@Override
public Map<K, V> getAndPutAll(Map<? extends K, ? extends V> entries) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
Map<K, V> map = super.getAndPutAll(entries);
if (statisticsEnabled) {
long end = timeService.time();
statsCollector.recordStores(entries.size(), end - start);
}
return map;
}
@Override
public void evict(K key) {
super.evict(key);
statsCollector.recordEviction();
}
@Override
protected V getAndPutInternal(K key, V value, Metadata metadata) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.getAndPutInternal(key, value, metadata);
if (statisticsEnabled) {
long end = timeService.time();
statsCollector.recordStores(1, end - start);
}
return ret;
}
@Override
protected V getAndReplaceInternal(K key, V value, Metadata metadata) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.getAndReplaceInternal(key, value, metadata);
if (statisticsEnabled && ret != null) {
long end = timeService.time();
statsCollector.recordStores(1, end - start);
}
return ret;
}
@Override
protected void putForExternalReadInternal(K key, V value, Metadata metadata, ByRef.Boolean isCreatedRef) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
super.putForExternalReadInternal(key, value, metadata, isCreatedRef);
if (statisticsEnabled && isCreatedRef.get()) {
long end = timeService.time();
statsCollector.recordStores(1, end - start);
}
}
@Override
protected V putIfAbsentInternal(K key, V value, Metadata metadata) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.putIfAbsentInternal(key, value, metadata);
if (statisticsEnabled && ret == null) {
long end = timeService.time();
statsCollector.recordStores(1, end - start);
}
return ret;
}
@Override
public V remove(Object key) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.remove(key);
if (statisticsEnabled) {
long end = timeService.time();
if (ret != null) {
statsCollector.recordRemoveHits(1, end - start);
} else {
statsCollector.recordRemoveMisses(1);
}
}
return ret;
}
@Override
public boolean remove(Object key, Object value) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
boolean removed = super.remove(key, value);
if (statisticsEnabled) {
long end = timeService.time();
if (removed) {
statsCollector.recordRemoveHits(1, end - start);
} else {
statsCollector.recordRemoveMisses(1);
}
}
return removed;
}
@Override
protected boolean replaceInternal(K key, V oldValue, V value, Metadata metadata) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
boolean replaced = super.replaceInternal(key, oldValue, value, metadata);
if (statisticsEnabled && replaced) {
long end = timeService.time();
statsCollector.recordStores(1, end - start);
}
return replaced;
}
@Override
protected V computeIfAbsentInternal(K key, Function<? super K, ? extends V> mappingFunction, ByRef<V> newValueRef) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.computeIfAbsentInternal(key, mappingFunction, newValueRef);
if (statisticsEnabled) {
long end = timeService.time();
if (newValueRef.get() != null) {
statsCollector.recordStores(1, end - start);
}
}
return ret;
}
@Override
protected V computeIfPresentInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.computeIfPresentInternal(key, remappingFunction, ref);
if (statisticsEnabled) {
long end = timeService.time();
if (ref.getNewValue() != null) {
statsCollector.recordStores(1, end - start);
} else if (ref.getKey() != null) {
statsCollector.recordRemoveHits(1, end - start);
}
}
return ret;
}
@Override
protected V computeInternal(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.computeInternal(key, remappingFunction, ref);
if (statisticsEnabled) {
long end = timeService.time();
if (ref.getNewValue() != null) {
statsCollector.recordStores(1, end - start);
} else if (ref.getKey() != null) {
statsCollector.recordRemoveHits(1, end - start);
}
}
return ret;
}
@Override
protected V mergeInternal(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, CacheEntryChange<K, V> ref, Metadata metadata) {
boolean statisticsEnabled = statsCollector.getStatisticsEnabled();
long start = 0;
if (statisticsEnabled) {
start = timeService.time();
}
V ret = super.mergeInternal(key, value, remappingFunction, ref, metadata);
if (statisticsEnabled) {
long end = timeService.time();
if (ref.getNewValue() != null) {
statsCollector.recordStores(1, end - start);
} else if (ref.getKey() != null) {
statsCollector.recordRemoveHits(1, end - start);
}
}
return ret;
}
@Override
public String toString() {
return "StatsCollectingCache '" + getName() + "'";
}
}
| 11,064
| 30.704871
| 158
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/AbstractDelegatingCache.java
|
package org.infinispan.cache.impl;
import java.lang.annotation.Annotation;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.configuration.format.PropertyFormatter;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.notifications.cachelistener.filter.CacheEventConverter;
import org.infinispan.notifications.cachelistener.filter.CacheEventFilter;
/**
* This is a convenient base class for implementing a cache delegate. The only constructor takes a {@link Cache}
* argument, to which each method call is delegated. One can extend this class and override the method sub-set it is
* interested in. There is also an similar implementation for {@link org.infinispan.AdvancedCache}: {@link
* org.infinispan.cache.impl.AbstractDelegatingAdvancedCache}.
*
* @author Mircea.Markus@jboss.com
* @see org.infinispan.cache.impl.AbstractDelegatingAdvancedCache
*/
@MBean(objectName = CacheImpl.OBJECT_NAME, description = "Component that represents an individual cache instance.")
public abstract class AbstractDelegatingCache<K, V> implements Cache<K, V> {
protected final Cache<K, V> cache;
public AbstractDelegatingCache(Cache<K, V> cache) {
this.cache = cache;
if (cache == null) throw new IllegalArgumentException("Delegate cache cannot be null!");
}
@Override
public void putForExternalRead(K key, V value) {
cache.putForExternalRead(key, value);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit unit) {
cache.putForExternalRead(key, value, lifespan, unit);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
cache.putForExternalRead(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public void evict(K key) {
cache.evict(key);
}
@Override
public org.infinispan.configuration.cache.Configuration getCacheConfiguration() {
return cache.getCacheConfiguration();
}
@Override
public boolean startBatch() {
return cache.startBatch();
}
@Override
public void endBatch(boolean successful) {
cache.endBatch(successful);
}
@Override
public String getName() {
return cache.getName();
}
@ManagedAttribute(
description = "Returns the cache name",
displayName = "Cache name",
dataType = DataType.TRAIT
)
public String getCacheName() {
return getName() + "(" + getCacheConfiguration().clustering().cacheMode().toString().toLowerCase() + ")";
}
@Override
@ManagedAttribute(
description = "Returns the version of Infinispan",
displayName = "Infinispan version",
dataType = DataType.TRAIT
)
public String getVersion() {
return cache.getVersion();
}
@Override
public EmbeddedCacheManager getCacheManager() {
return cache.getCacheManager();
}
@Override
public V put(K key, V value, long lifespan, TimeUnit unit) {
return cache.put(key, value, lifespan, unit);
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit unit) {
return cache.putIfAbsent(key, value, lifespan, unit);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit unit) {
cache.putAll(map, lifespan, unit);
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit unit) {
return cache.replace(key, value, lifespan, unit);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit unit) {
return cache.replace(key, oldValue, value, lifespan, unit);
}
@Override
public V put(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.put(key, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.putIfAbsent(key, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
cache.putAll(map, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.replace(key, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.replace(key, oldValue, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
cache.replaceAll(function);
}
@Override
public CompletableFuture<V> putAsync(K key, V value) {
return cache.putAsync(key, value);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit unit) {
return cache.putAsync(key, value, lifespan, unit);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.putAsync(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data) {
return cache.putAllAsync(data);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit unit) {
return cache.putAllAsync(data, lifespan, unit);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.putAllAsync(data, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<Void> clearAsync() {
return cache.clearAsync();
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value) {
return cache.putIfAbsentAsync(key, value);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit unit) {
return cache.putIfAbsentAsync(key, value, lifespan, unit);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.putIfAbsentAsync(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> removeAsync(Object key) {
return cache.removeAsync(key);
}
@Override
public CompletableFuture<Boolean> removeAsync(Object key, Object value) {
return cache.removeAsync(key, value);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value) {
return cache.replaceAsync(key, value);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit unit) {
return cache.replaceAsync(key, value, lifespan, unit);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.replaceAsync(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue) {
return cache.replaceAsync(key, oldValue, newValue);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit unit) {
return cache.replaceAsync(key, oldValue, newValue, lifespan, unit);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.replaceAsync(key, oldValue, newValue, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.computeAsync(key, remappingFunction);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeAsync(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeAsync(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction) {
return cache.computeIfAbsentAsync(key, mappingFunction);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfAbsentAsync(key, mappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeIfAbsentAsync(key, mappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.computeIfPresentAsync(key, remappingFunction);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfPresentAsync(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.computeIfPresentAsync(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return cache.mergeAsync(key, value, remappingFunction);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.mergeAsync(key, value, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
return cache.mergeAsync(key, value, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public AdvancedCache<K, V> getAdvancedCache() {
return cache.getAdvancedCache();
}
@Override
public ComponentStatus getStatus() {
return cache.getStatus();
}
/**
* Returns String representation of ComponentStatus enumeration in order to avoid class not found exceptions in JMX
* tools that don't have access to infinispan classes.
*/
@ManagedAttribute(
description = "Returns the cache status",
displayName = "Cache status",
dataType = DataType.TRAIT
)
public String getCacheStatus() {
return getStatus().toString();
}
@Override
public V putIfAbsent(K key, V value) {
return cache.putIfAbsent(key, value);
}
@Override
public boolean remove(Object key, Object value) {
return cache.remove(key, value);
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
return cache.replace(key, oldValue, newValue);
}
@Override
public V replace(K key, V value) {
return cache.replace(key, value);
}
@Override
public int size() {
return cache.size();
}
@Override
public CompletableFuture<Long> sizeAsync() {
return cache.sizeAsync();
}
@Override
public boolean isEmpty() {
return cache.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return cache.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return cache.containsValue(value);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.compute(key, remappingFunction);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.compute(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.compute(key, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return cache.computeIfPresent(key, remappingFunction);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfPresent(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.computeIfPresent(key, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
return cache.computeIfAbsent(key, mappingFunction);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.computeIfAbsent(key, mappingFunction, lifespan, lifespanUnit);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.computeIfAbsent(key, mappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V get(Object key) {
return cache.get(key);
}
@Override
public V getOrDefault(Object key, V defaultValue) {
return cache.getOrDefault(key, defaultValue);
}
@Override
public V put(K key, V value) {
return cache.put(key, value);
}
@Override
public V remove(Object key) {
return cache.remove(key);
}
@Override
public void putAll(Map<? extends K, ? extends V> t) {
cache.putAll(t);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
return cache.merge(key, value, remappingFunction);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
return cache.merge(key, value, remappingFunction, lifespan, lifespanUnit);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
return cache.merge(key, value, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
cache.forEach(action);
}
@Override
@ManagedOperation(
description = "Clears the cache",
displayName = "Clears the cache", name = "clear"
)
public void clear() {
cache.clear();
}
@Override
public CacheSet<K> keySet() {
return cache.keySet();
}
@Override
public CacheSet<Entry<K, V>> entrySet() {
return cache.entrySet();
}
@Override
public CacheCollection<V> values() {
return cache.values();
}
@Override
@ManagedOperation(
description = "Starts the cache.",
displayName = "Starts cache."
)
public void start() {
cache.start();
}
@Override
@ManagedOperation(
description = "Stops the cache.",
displayName = "Stops cache."
)
public void stop() {
cache.stop();
}
@Override
@ManagedOperation(
description = "Shuts down the cache across the cluster",
displayName = "Clustered cache shutdown"
)
public void shutdown() {
cache.shutdown();
}
@Override
public void addListener(Object listener) {
cache.addListener(listener);
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
return cache.addListenerAsync(listener);
}
@Override
public <C> void addListener(Object listener, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter) {
cache.addListener(listener, filter, converter);
}
@Override
public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter) {
return cache.addListenerAsync(listener, filter, converter);
}
@Override
public void removeListener(Object listener) {
cache.removeListener(listener);
}
@Override
public CompletionStage<Void> removeListenerAsync(Object listener) {
return cache.removeListenerAsync(listener);
}
@Deprecated
@Override
public Set<Object> getListeners() {
return cache.getListeners();
}
@Override
public <C> void addFilteredListener(Object listener,
CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
cache.addFilteredListener(listener, filter, converter, filterAnnotations);
}
@Override
public <C> CompletionStage<Void> addFilteredListenerAsync(Object listener,
CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
return cache.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public <C> void addStorageFormatFilteredListener(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
cache.addStorageFormatFilteredListener(listener, filter, converter, filterAnnotations);
}
@Override
public <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
return cache.addStorageFormatFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public CompletableFuture<V> getAsync(K key) {
return cache.getAsync(key);
}
@Override
public CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys) {
return cache.getAllAsync(keys);
}
@ManagedAttribute(
description = "Returns the cache configuration in form of properties",
displayName = "Cache configuration properties",
dataType = DataType.TRAIT
)
public Properties getConfigurationAsProperties() {
return new PropertyFormatter().format(getCacheConfiguration());
}
@Override
public String toString() {
return cache.toString();
}
public Cache<K, V> getDelegate() {
return cache;
}
/**
* Fully unwraps a given cache returning the base cache. Will unwrap all <b>AbstractDelegatingCache</b> wrappers.
* @param cache
* @param <K>
* @param <V>
* @return
*/
public static <K, V> Cache<K, V> unwrapCache(Cache<K, V> cache) {
if (cache instanceof AbstractDelegatingCache) {
return unwrapCache(((AbstractDelegatingCache<K, V>) cache).getDelegate());
}
return cache;
}
}
| 21,719
| 33.585987
| 247
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/EncoderKeyMapper.java
|
package org.infinispan.cache.impl;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* {@link java.util.function.Function} that uses a keyEncoder to converter keys from the configured storage format to
* the requested format.
*
* @since 9.1
*/
@Scope(Scopes.NONE)
public class EncoderKeyMapper<K> implements EncodingFunction<K> {
private final DataConversion dataConversion;
public EncoderKeyMapper(DataConversion dataConversion) {
this.dataConversion = dataConversion;
}
@Inject
public void injectDependencies(ComponentRegistry registry) {
registry.wireDependencies(dataConversion);
}
@Override
@SuppressWarnings("unchecked")
public K apply(K k) {
return (K) dataConversion.fromStorage(k);
}
public static class Externalizer implements AdvancedExternalizer<EncoderKeyMapper> {
@Override
public Set<Class<? extends EncoderKeyMapper>> getTypeClasses() {
return Collections.singleton(EncoderKeyMapper.class);
}
@Override
public Integer getId() {
return Ids.ENCODER_KEY_MAPPER;
}
@Override
public void writeObject(ObjectOutput output, EncoderKeyMapper object) throws IOException {
DataConversion.writeTo(output, object.dataConversion);
}
@Override
@SuppressWarnings("unchecked")
public EncoderKeyMapper readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new EncoderKeyMapper(DataConversion.readFrom(input));
}
}
}
| 1,965
| 28.787879
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/CacheConfigurationMBean.java
|
package org.infinispan.cache.impl;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
/**
* CacheConfigurationMBeanImpl.
*
* @author Tristan Tarrant
* @since 8.1
*/
@Scope(Scopes.NAMED_CACHE)
@MBean(objectName = "Configuration", description = "Runtime cache configuration attributes")
public class CacheConfigurationMBean {
@Inject Configuration configuration;
@ManagedAttribute(description = "Gets the eviction size for the cache",
displayName = "Gets the eviction size for the cache",
writable = true)
public long getEvictionSize() {
return configuration.memory().size();
}
public void setEvictionSize(long newSize) {
configuration.memory().size(newSize);
}
}
| 972
| 28.484848
| 92
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/cache/impl/InvocationHelper.java
|
package org.infinispan.cache.impl;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import jakarta.transaction.InvalidTransactionException;
import jakarta.transaction.Synchronization;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import org.infinispan.batch.BatchContainer;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.tx.AsyncSynchronization;
import org.infinispan.commons.tx.AsyncXaResource;
import org.infinispan.commons.tx.TransactionImpl;
import org.infinispan.commons.tx.TransactionResourceConverter;
import org.infinispan.commons.tx.XidImpl;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.TransactionConfiguration;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.locks.RemoteLockCommand;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* It invokes the {@link VisitableCommand} through this cache {@link AsyncInterceptorChain}.
* <p>
* It creates injected transactions and auto commits them, if the cache is transactional.
*
* @author Pedro Ruivo
* @since 11.0
*/
@Scope(Scopes.NAMED_CACHE)
public class InvocationHelper implements TransactionResourceConverter {
private static final Log log = LogFactory.getLog(InvocationHelper.class);
@Inject protected AsyncInterceptorChain invoker;
@Inject protected InvocationContextFactory invocationContextFactory;
@Inject protected TransactionManager transactionManager;
@Inject protected Configuration config;
@Inject protected BatchContainer batchContainer;
@Inject protected BlockingManager blockingManager;
private static void checkLockOwner(InvocationContext context, VisitableCommand command) {
if (context.getLockOwner() == null && command instanceof RemoteLockCommand) {
context.setLockOwner(((RemoteLockCommand) command).getKeyLockOwner());
}
}
private static boolean isTxInjected(InvocationContext ctx) {
return ctx.isInTxScope() && ((TxInvocationContext<?>) ctx).isImplicitTransaction();
}
/**
* Same as {@link #invoke(ContextBuilder, VisitableCommand, int)} but using the default {@link ContextBuilder}.
*
* @param command The {@link VisitableCommand} to invoke.
* @param keyCount The number of keys affected by the {@code command}.
* @param <T> The return type.
* @return The invocation result.
*/
public <T> T invoke(VisitableCommand command, int keyCount) {
InvocationContext ctx = createInvocationContextWithImplicitTransaction(keyCount, false);
return invoke(ctx, command);
}
/**
* Same as {@link #invoke(InvocationContext, VisitableCommand)} but using {@code builder} to build the {@link
* InvocationContext} to use.
*
* @param builder The {@link ContextBuilder} to create the {@link InvocationContext} to use.
* @param command The {@link VisitableCommand} to invoke.
* @param keyCount The number of keys affected by the {@code command}.
* @param <T> The return type.
* @return The invocation result.
*/
public <T> T invoke(ContextBuilder builder, VisitableCommand command, int keyCount) {
InvocationContext ctx = builder.create(keyCount);
return invoke(ctx, command);
}
/**
* Invokes the {@code command} using {@code context}.
* <p>
* This method blocks until the {@code command} finishes. Use {@link #invokeAsync(InvocationContext,
* VisitableCommand)} for non-blocking.
*
* @param context The {@link InvocationContext} to use.
* @param command The {@link VisitableCommand} to invoke.
* @param <T> The return type.
* @return The invocation result.
*/
public <T> T invoke(InvocationContext context, VisitableCommand command) {
checkLockOwner(context, command);
return isTxInjected(context) ?
executeCommandWithInjectedTx(context, command) :
doInvoke(context, command);
}
/**
* Same as {@link #invoke(ContextBuilder, VisitableCommand, int)} but using the default {@link ContextBuilder}.
*
* @param command The {@link VisitableCommand} to invoke.
* @param keyCount The number of keys affected by the {@code command}.
* @param <T> The return type.
* @return A {@link CompletableFuture} with the result.
*/
public <T> CompletableFuture<T> invokeAsync(VisitableCommand command, int keyCount) {
InvocationContext ctx = createInvocationContextWithImplicitTransaction(keyCount, false);
return invokeAsync(ctx, command);
}
/**
* Same as {@link #invoke(InvocationContext, VisitableCommand)} but using the {@link InvocationContext} created by
* {@code builder}.
*
* @param builder The {@link ContextBuilder} to create the {@link InvocationContext} to use.
* @param command The {@link VisitableCommand} to invoke.
* @param keyCount The number of keys affected by the {@code command}.
* @param <T> The return type.
* @return A {@link CompletableFuture} with the result.
*/
public <T> CompletableFuture<T> invokeAsync(ContextBuilder builder, VisitableCommand command, int keyCount) {
InvocationContext ctx = builder.create(keyCount);
return invokeAsync(ctx, command);
}
/**
* Invokes the {@code command} using {@code context} and returns a {@link CompletableFuture}.
* <p>
* The {@link CompletableFuture} is completed with the return value of the invocation.
*
* @param context The {@link InvocationContext} to use.
* @param command The {@link VisitableCommand} to invoke.
* @param <T> The return type.
* @return A {@link CompletableFuture} with the result.
*/
public <T> CompletableFuture<T> invokeAsync(InvocationContext context, VisitableCommand command) {
checkLockOwner(context, command);
return isTxInjected(context) ?
executeCommandAsyncWithInjectedTx(context, command) :
doInvokeAsync(context, command);
}
/**
* Creates an invocation context with an implicit transaction if it is required. An implicit transaction is created
* if there is no current transaction and autoCommit is enabled.
*
* @param keyCount how many keys are expected to be changed
* @return the invocation context
*/
public InvocationContext createInvocationContextWithImplicitTransaction(int keyCount,
boolean forceCreateTransaction) {
boolean txInjected = false;
TransactionConfiguration txConfig = config.transaction();
if (txConfig.transactionMode().isTransactional()) {
Transaction transaction = getOngoingTransaction();
if (transaction == null && (forceCreateTransaction || txConfig.autoCommit())) {
transaction = tryBegin();
txInjected = true;
}
return invocationContextFactory.createInvocationContext(transaction, txInjected);
} else {
return invocationContextFactory.createInvocationContext(true, keyCount);
}
}
@Override
public String toString() {
return "InvocationHelper{}";
}
private Transaction getOngoingTransaction() {
try {
Transaction transaction = null;
if (transactionManager != null) {
transaction = transactionManager.getTransaction();
if (transaction == null && config.invocationBatching().enabled()) {
transaction = batchContainer.getBatchTransaction();
}
}
return transaction;
} catch (SystemException e) {
throw new CacheException("Unable to get transaction", e);
}
}
private <T> T executeCommandWithInjectedTx(InvocationContext ctx, VisitableCommand command) {
final T result;
try {
result = doInvoke(ctx, command);
} catch (Throwable e) {
tryRollback();
throw e;
}
tryCommit();
return result;
}
private <T> CompletableFuture<T> executeCommandAsyncWithInjectedTx(InvocationContext ctx, VisitableCommand command) {
CompletableFuture<T> cf;
final Transaction implicitTransaction;
try {
// interceptors must not access thread-local transaction anyway
implicitTransaction = transactionManager.suspend();
assert implicitTransaction != null;
cf = doInvokeAsync(ctx, command);
} catch (SystemException e) {
return CompletableFuture.failedFuture(new CacheException("Cannot suspend implicit transaction", e));
} catch (Throwable e) {
tryRollback();
return CompletableFuture.failedFuture(e);
}
if (implicitTransaction instanceof TransactionImpl) {
return commitInjectedTransactionAsync(cf, (TransactionImpl) implicitTransaction);
} else {
return commitInjectTransaction(cf, implicitTransaction, ctx.getLockOwner());
}
}
private <T> CompletableFuture<T> commitInjectTransaction(CompletionStage<T> cf, Transaction transaction, Object traceId) {
return blockingManager.handleBlocking(cf, (result, throwable) -> {
if (throwable != null) {
try {
transactionManager.resume(transaction);
transactionManager.rollback();
} catch (SystemException | InvalidTransactionException e) {
log.trace("Could not rollback", e);
throwable.addSuppressed(e);
}
throw CompletableFutures.asCompletionException(throwable);
}
try {
transactionManager.resume(transaction);
transactionManager.commit();
} catch (Exception e) {
log.couldNotCompleteInjectedTransaction(e);
throw CompletableFutures.asCompletionException(e);
}
return result;
}, traceId).toCompletableFuture();
}
private <T> CompletableFuture<T> commitInjectedTransactionAsync(CompletionStage<T> cf, TransactionImpl transaction) {
return cf.handle((result, throwable) -> {
if (throwable != null) {
return transaction.rollbackAsync(InvocationHelper.this).thenApply(__ -> result);
} else {
return transaction.commitAsync(InvocationHelper.this).thenApply(__ -> result);
}
})
.thenCompose(Function.identity())
.toCompletableFuture();
}
private Transaction tryBegin() {
if (transactionManager == null) {
return null;
}
try {
transactionManager.begin();
final Transaction transaction = getOngoingTransaction();
if (log.isTraceEnabled()) {
log.tracef("Implicit transaction started! Transaction: %s", transaction);
}
return transaction;
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new CacheException("Unable to begin implicit transaction.", e);
}
}
private void tryRollback() {
try {
if (transactionManager != null) {
transactionManager.rollback();
}
} catch (Throwable t) {
if (log.isTraceEnabled()) {
log.trace("Could not rollback", t);//best effort
}
}
}
private void tryCommit() {
if (transactionManager == null) {
return;
}
if (log.isTraceEnabled()) {
log.tracef("Committing transaction as it was implicit: %s", getOngoingTransaction());
}
try {
transactionManager.commit();
} catch (Throwable e) {
log.couldNotCompleteInjectedTransaction(e);
throw new CacheException("Could not commit implicit transaction", e);
}
}
private <T> CompletableFuture<T> doInvokeAsync(InvocationContext ctx, VisitableCommand command) {
//noinspection unchecked
return (CompletableFuture<T>) invoker.invokeAsync(ctx, command);
}
private <T> T doInvoke(InvocationContext ctx, VisitableCommand command) {
//noinspection unchecked
return (T) invoker.invoke(ctx, command);
}
@Override
public AsyncSynchronization convertSynchronization(Synchronization synchronization) {
return synchronization instanceof AsyncSynchronization ?
(AsyncSynchronization) synchronization :
new Sync(synchronization);
}
@Override
public AsyncXaResource convertXaResource(XAResource resource) {
return resource instanceof AsyncXaResource ?
(AsyncXaResource) resource :
new Xa(resource);
}
private class Sync implements AsyncSynchronization {
private final Synchronization synchronization;
private Sync(Synchronization synchronization) {
this.synchronization = synchronization;
}
@Override
public CompletionStage<Void> asyncBeforeCompletion() {
return blockingManager.runBlocking(synchronization::beforeCompletion, synchronization);
}
@Override
public CompletionStage<Void> asyncAfterCompletion(int status) {
return blockingManager.runBlocking(() -> synchronization.afterCompletion(status), synchronization);
}
}
private class Xa implements AsyncXaResource {
private final XAResource resource;
private Xa(XAResource resource) {
this.resource = resource;
}
@Override
public CompletionStage<Void> asyncEnd(XidImpl xid, int flags) {
return blockingManager.runBlocking(() -> {
try {
resource.end(xid, flags);
} catch (XAException e) {
throw CompletableFutures.asCompletionException(e);
}
}, resource);
}
@Override
public CompletionStage<Integer> asyncPrepare(XidImpl xid) {
return blockingManager.supplyBlocking(() -> {
try {
return resource.prepare(xid);
} catch (XAException e) {
throw CompletableFutures.asCompletionException(e);
}
}, resource);
}
@Override
public CompletionStage<Void> asyncCommit(XidImpl xid, boolean onePhase) {
return blockingManager.runBlocking(() -> {
try {
resource.commit(xid, onePhase);
} catch (XAException e) {
throw CompletableFutures.asCompletionException(e);
}
}, resource);
}
@Override
public CompletionStage<Void> asyncRollback(XidImpl xid) {
return blockingManager.runBlocking(() -> {
try {
resource.rollback(xid);
} catch (XAException e) {
throw CompletableFutures.asCompletionException(e);
}
}, resource);
}
}
}
| 15,570
| 36.793689
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/encoding/DataConversion.java
|
package org.infinispan.encoding;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Objects;
import java.util.Set;
import org.infinispan.commons.dataconversion.ByteArrayWrapper;
import org.infinispan.commons.dataconversion.Encoder;
import org.infinispan.commons.dataconversion.EncoderIds;
import org.infinispan.commons.dataconversion.EncodingException;
import org.infinispan.commons.dataconversion.IdentityEncoder;
import org.infinispan.commons.dataconversion.IdentityWrapper;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Transcoder;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.commons.dataconversion.WrapperIds;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.util.Util;
import org.infinispan.encoding.impl.StorageConfigurationManager;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.marshall.core.EncoderRegistry;
/**
* Handle conversions for Keys or values.
*
* @since 9.2
*/
@Scope(Scopes.NONE)
public final class DataConversion {
/**
* @deprecated Since 11.0. To be removed in 14.0, with no replacement.
*/
@Deprecated
public static final DataConversion DEFAULT_KEY = new DataConversion(IdentityEncoder.INSTANCE, ByteArrayWrapper.INSTANCE, true);
/**
* @deprecated Since 11.0. To be removed in 14.0, with no replacement.
*/
@Deprecated
public static final DataConversion DEFAULT_VALUE = new DataConversion(IdentityEncoder.INSTANCE, ByteArrayWrapper.INSTANCE, false);
/**
* @deprecated Since 11.0. To be removed in 14.0. For internal use only.
*/
@Deprecated
public static final DataConversion IDENTITY_KEY = new DataConversion(IdentityEncoder.INSTANCE, IdentityWrapper.INSTANCE, true);
/**
* @deprecated Since 11.0. To be removed in 14.0. For internal use only.
*/
@Deprecated
public static final DataConversion IDENTITY_VALUE = new DataConversion(IdentityEncoder.INSTANCE, IdentityWrapper.INSTANCE, false);
// On the origin node the conversion is initialized with the encoder/wrapper classes, on remote nodes with the ids
private final transient Class<? extends Encoder> encoderClass;
// TODO Make final after removing overrideWrapper()
private transient Class<? extends Wrapper> wrapperClass;
private final short encoderId;
private final byte wrapperId;
private final MediaType requestMediaType;
private final boolean isKey;
private transient MediaType storageMediaType;
private transient Encoder encoder;
private transient Wrapper customWrapper;
private transient Transcoder transcoder;
private transient EncoderRegistry encoderRegistry;
private transient StorageConfigurationManager storageConfigurationManager;
private DataConversion(Class<? extends Encoder> encoderClass, Class<? extends Wrapper> wrapperClass,
MediaType requestMediaType, boolean isKey) {
this.encoderClass = encoderClass;
this.wrapperClass = wrapperClass;
this.requestMediaType = requestMediaType;
this.isKey = isKey;
this.encoderId = EncoderIds.NO_ENCODER;
this.wrapperId = WrapperIds.NO_WRAPPER;
}
/**
* Used for de-serialization
*/
private DataConversion(Short encoderId, Byte wrapperId, MediaType requestMediaType, boolean isKey) {
this.encoderId = encoderId;
this.wrapperId = wrapperId;
this.requestMediaType = requestMediaType;
this.isKey = isKey;
this.encoderClass = null;
this.wrapperClass = null;
}
private DataConversion(Encoder encoder, Wrapper wrapper, boolean isKey) {
this.encoder = encoder;
this.customWrapper = wrapper;
this.encoderClass = encoder.getClass();
this.wrapperClass = wrapper.getClass();
this.isKey = isKey;
this.storageMediaType = MediaType.APPLICATION_OBJECT;
this.requestMediaType = MediaType.APPLICATION_OBJECT;
encoderId = EncoderIds.NO_ENCODER;
wrapperId = WrapperIds.NO_WRAPPER;
}
public DataConversion withRequestMediaType(MediaType requestMediaType) {
if (Objects.equals(this.requestMediaType, requestMediaType)) return this;
return new DataConversion(this.encoderClass, this.wrapperClass, requestMediaType, this.isKey);
}
/**
* @deprecated Since 12.1, to be removed in a future version.
*/
@Deprecated
public DataConversion withEncoding(Class<? extends Encoder> encoderClass) {
if (Objects.equals(this.encoderClass, encoderClass)) return this;
return new DataConversion(encoderClass, this.wrapperClass, this.requestMediaType, this.isKey);
}
/**
* @deprecated Since 11.0. To be removed in 14.0, with no replacement.
*/
@Deprecated
public DataConversion withWrapping(Class<? extends Wrapper> wrapperClass) {
if (Objects.equals(this.wrapperClass, wrapperClass)) return this;
return new DataConversion(this.encoderClass, wrapperClass, this.requestMediaType, this.isKey);
}
/**
* @deprecated Since 11.0, will be removed with no replacement
*/
@Deprecated
public void overrideWrapper(Class<? extends Wrapper> newWrapper, ComponentRegistry cr) {
this.customWrapper = null;
this.wrapperClass = newWrapper;
cr.wireDependencies(this);
}
/**
* @deprecated Since 11.0, with no replacement.
*/
@Deprecated
public boolean isConversionSupported(MediaType mediaType) {
return storageMediaType == null || encoderRegistry.isConversionSupported(storageMediaType, mediaType);
}
/**
* @deprecated Since 11.0, with no replacement.
*/
@Deprecated
public Object convert(Object o, MediaType from, MediaType to) {
if (o == null) return null;
if (encoderRegistry == null) return o;
Transcoder transcoder = encoderRegistry.getTranscoder(from, to);
return transcoder.transcode(o, from, to);
}
/**
* @deprecated Since 11.0, with no replacement.
*/
@Deprecated
public Object convertToRequestFormat(Object o, MediaType contentType) {
if (o == null) return null;
if (requestMediaType == null) return fromStorage(o);
Transcoder transcoder = encoderRegistry.getTranscoder(contentType, requestMediaType);
return transcoder.transcode(o, contentType, requestMediaType);
}
@Inject
void injectDependencies(StorageConfigurationManager storageConfigurationManager, EncoderRegistry encoderRegistry) {
if (this.encoder != null && this.customWrapper != null) {
// This must be one of the static encoders, we can't inject any component in it
return;
}
this.storageMediaType = storageConfigurationManager.getStorageMediaType(isKey);
this.encoderRegistry = encoderRegistry;
this.storageConfigurationManager = storageConfigurationManager;
this.customWrapper = encoderRegistry.getWrapper(wrapperClass, wrapperId);
this.lookupEncoder();
this.lookupTranscoder();
}
private void lookupEncoder() {
boolean isEncodingEmpty = encoderClass == null && encoderId == EncoderIds.NO_ENCODER;
Class<? extends Encoder> actualEncoderClass = isEncodingEmpty ? IdentityEncoder.class : encoderClass;
this.encoder = encoderRegistry.getEncoder(actualEncoderClass, encoderId);
}
private void lookupTranscoder() {
boolean needsTranscoding = storageMediaType != null && requestMediaType != null && !requestMediaType.matchesAll() && !requestMediaType.equals(storageMediaType);
if (needsTranscoding) {
Transcoder directTranscoder = null;
if (encoder.getStorageFormat() != null) {
try {
directTranscoder = encoderRegistry.getTranscoder(requestMediaType, encoder.getStorageFormat());
} catch (EncodingException ignored) {
}
}
if (directTranscoder != null) {
if (encoder.getStorageFormat().equals(MediaType.APPLICATION_OBJECT)) {
encoder = IdentityEncoder.INSTANCE;
}
transcoder = directTranscoder;
} else {
transcoder = encoderRegistry.getTranscoder(requestMediaType, storageMediaType);
}
}
}
public Object fromStorage(Object stored) {
if (stored == null) return null;
Object fromStorage = encoder.fromStorage(getWrapper().unwrap(stored));
return transcoder == null ? fromStorage : transcoder.transcode(fromStorage, storageMediaType, requestMediaType);
}
public Object toStorage(Object toStore) {
if (toStore == null) return null;
toStore = transcoder == null ? toStore : transcoder.transcode(toStore, requestMediaType, storageMediaType);
return getWrapper().wrap(encoder.toStorage(toStore));
}
/**
* Convert the stored object in a format suitable to be indexed.
*/
public Object extractIndexable(Object stored) {
if (stored == null) return null;
// Keys are indexed as stored, without the wrapper
Wrapper wrapper = getWrapper();
if (isKey) return wrapper.unwrap(stored);
if (wrapper.isFilterable()) {
// If the value wrapper is indexable, return the already wrapped value or wrap it otherwise
return stored.getClass().equals(wrapperClass) ? stored : wrapper.wrap(stored);
}
// Otherwise convert to the request format
Object unencoded = encoder.fromStorage(wrapper.unwrap(stored));
return transcoder == null ? unencoded : transcoder.transcode(unencoded, storageMediaType, requestMediaType);
}
public MediaType getRequestMediaType() {
return requestMediaType;
}
public MediaType getStorageMediaType() {
return storageMediaType;
}
public Encoder getEncoder() {
return encoder;
}
/**
* @deprecated Since 11.0. To be removed in 14.0, with no replacement.
*/
@Deprecated
public Wrapper getWrapper() {
if (customWrapper != null)
return customWrapper;
return storageConfigurationManager.getWrapper(isKey);
}
public Class<? extends Encoder> getEncoderClass() {
return encoderClass;
}
/**
* @deprecated Since 11.0. To be removed in 14.0, with no replacement.
*/
@Deprecated
public Class<? extends Wrapper> getWrapperClass() {
return wrapperClass;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DataConversion that = (DataConversion) o;
return isKey == that.isKey &&
Objects.equals(encoder, that.encoder) &&
Objects.equals(customWrapper, that.customWrapper) &&
Objects.equals(transcoder, that.transcoder) &&
Objects.equals(requestMediaType, that.requestMediaType);
}
@Override
public String toString() {
return "DataConversion{" +
"encoderClass=" + encoderClass +
", wrapperClass=" + wrapperClass +
", requestMediaType=" + requestMediaType +
", storageMediaType=" + storageMediaType +
", encoderId=" + encoderId +
", wrapperId=" + wrapperId +
", encoder=" + encoder +
", wrapper=" + customWrapper +
", isKey=" + isKey +
", transcoder=" + transcoder +
'}';
}
@Override
public int hashCode() {
return Objects.hash(encoderClass, wrapperClass, requestMediaType, isKey);
}
/**
* @return A new instance with an {@link IdentityEncoder} and request type {@link MediaType#APPLICATION_OBJECT}.
* @since 11.0
*/
public static DataConversion newKeyDataConversion() {
return new DataConversion(IdentityEncoder.class, null, MediaType.APPLICATION_OBJECT, true);
}
/**
* @return A new instance with an {@link IdentityEncoder} and request type {@link MediaType#APPLICATION_OBJECT}.
* @since 11.0
*/
public static DataConversion newValueDataConversion() {
return new DataConversion(IdentityEncoder.class, null, MediaType.APPLICATION_OBJECT, false);
}
/**
* @deprecated Since 11.0. To be removed in 14.0. Replaced by {@link #newKeyDataConversion()}.
*/
@Deprecated
public static DataConversion newKeyDataConversion(Class<? extends Encoder> encoderClass,
Class<? extends Wrapper> wrapperClass) {
return new DataConversion(encoderClass, wrapperClass, MediaType.APPLICATION_OBJECT, true);
}
/**
* @deprecated Since 11.0. To be removed in 14.0. Replaced by {@link #newValueDataConversion()}.
*/
@Deprecated
public static DataConversion newValueDataConversion(Class<? extends Encoder> encoderClass,
Class<? extends Wrapper> wrapperClass) {
return new DataConversion(encoderClass, wrapperClass, MediaType.APPLICATION_OBJECT, false);
}
public static void writeTo(ObjectOutput output, DataConversion dataConversion) throws IOException {
byte flags = 0;
if (dataConversion.isKey) flags = (byte) (flags | 2);
output.writeByte(flags);
output.writeShort(dataConversion.encoder.id());
if (dataConversion.customWrapper != null) {
output.writeByte(dataConversion.customWrapper.id());
} else {
output.writeByte(WrapperIds.NO_WRAPPER);
}
output.writeObject(dataConversion.requestMediaType);
}
public static DataConversion readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
byte flags = input.readByte();
boolean isKey = ((flags & 2) == 2);
short encoderId = input.readShort();
byte wrapperId = input.readByte();
MediaType requestMediaType = (MediaType) input.readObject();
return new DataConversion(encoderId, wrapperId, requestMediaType, isKey);
}
public static class Externalizer extends AbstractExternalizer<DataConversion> {
@Override
public Set<Class<? extends DataConversion>> getTypeClasses() {
return Util.asSet(DataConversion.class);
}
@Override
public void writeObject(ObjectOutput output, DataConversion dataConversion) throws IOException {
writeTo(output, dataConversion);
}
@Override
public DataConversion readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return readFrom(input);
}
@Override
public Integer getId() {
return Ids.DATA_CONVERSION;
}
}
}
| 14,793
| 36.548223
| 166
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/encoding/ProtostreamTranscoder.java
|
package org.infinispan.encoding;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_JSON;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_OBJECT;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_OCTET_STREAM;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_PROTOSTREAM;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_UNKNOWN;
import static org.infinispan.commons.dataconversion.MediaType.TEXT_PLAIN;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.StringReader;
import java.util.Optional;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.OneToManyTranscoder;
import org.infinispan.commons.dataconversion.StandardConversions;
import org.infinispan.commons.marshall.MarshallingException;
import org.infinispan.commons.marshall.WrappedByteArray;
import org.infinispan.commons.util.Util;
import org.infinispan.marshall.protostream.impl.SerializationContextRegistry;
import org.infinispan.protostream.ImmutableSerializationContext;
import org.infinispan.protostream.ProtobufUtil;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
*<p>
* Performs conversions between <b>application/x-protostream</b> and commons formats.
*</p>
*
* <br/><p>When converting to <b>application/x-protostream</b>, it will produce payloads
* with {@link org.infinispan.protostream.WrappedMessage} by default, unless the param
* <b>wrapped</b> is supplied in the destination {@link MediaType} with value <b>false</b></p>
*<br/><p>
* Converting back to <b>application/x-java-object</b> requires either a payload that is
* a {@link org.infinispan.protostream.WrappedMessage} or an unwrapped payload plus the
* type of the java object to convert to, specified using the <b>type</b> parameter in
* the <b>application/x-java-object</b> {@link MediaType}.
*</p>
*
* @since 10.0
*/
public class ProtostreamTranscoder extends OneToManyTranscoder {
public static final String WRAPPED_PARAM = "wrapped";
protected static final Log logger = LogFactory.getLog(ProtostreamTranscoder.class, Log.class);
private final SerializationContextRegistry ctxRegistry;
private final ClassLoader classLoader;
public ProtostreamTranscoder(SerializationContextRegistry ctxRegistry, ClassLoader classLoader) {
super(APPLICATION_PROTOSTREAM, APPLICATION_OCTET_STREAM, TEXT_PLAIN, APPLICATION_OBJECT, APPLICATION_JSON, APPLICATION_UNKNOWN);
this.ctxRegistry = ctxRegistry;
this.classLoader = classLoader;
}
@Override
public Object doTranscode(Object content, MediaType contentType, MediaType destinationType) {
try {
if (destinationType.match(MediaType.APPLICATION_PROTOSTREAM)) {
if (contentType.match(APPLICATION_JSON)) {
content = addTypeIfNeeded(content);
return fromJsonCascading(content);
}
if (contentType.match(APPLICATION_UNKNOWN) || contentType.match(APPLICATION_PROTOSTREAM)) {
return content;
}
if (contentType.match(TEXT_PLAIN)) {
content = StandardConversions.convertTextToObject(content, contentType);
}
return marshall(content, destinationType);
}
if (destinationType.match(MediaType.APPLICATION_OCTET_STREAM)) {
Object unmarshalled = content instanceof byte[] ? unmarshall((byte[]) content, contentType, destinationType) : content;
if (unmarshalled instanceof byte[]) {
return unmarshalled;
}
ImmutableSerializationContext ctx = getCtxForMarshalling(unmarshalled);
return StandardConversions.convertJavaToProtoStream(unmarshalled, MediaType.APPLICATION_OBJECT, ctx);
}
if (destinationType.match(MediaType.TEXT_PLAIN)) {
Object decoded = unmarshallCascading((byte[]) content);
if (decoded == null) return null;
return decoded.toString().getBytes(destinationType.getCharset());
}
if (destinationType.match(MediaType.APPLICATION_OBJECT)) {
return unmarshall((byte[]) content, contentType, destinationType);
}
if (destinationType.match(MediaType.APPLICATION_JSON)) {
String converted = toJsonCascading((byte[]) content);
String convertType = destinationType.getClassType();
return convertType == null ? StandardConversions.convertCharset(converted, contentType.getCharset(), destinationType.getCharset()) : converted;
}
if (destinationType.equals(APPLICATION_UNKNOWN)) {
//TODO: Remove wrapping of byte[] into WrappedByteArray from the Hot Rod Multimap operations.
if (content instanceof WrappedByteArray) return content;
ImmutableSerializationContext ctx = getCtxForMarshalling(content);
return StandardConversions.convertJavaToProtoStream(content, MediaType.APPLICATION_OBJECT, ctx);
}
throw logger.unsupportedContent(ProtostreamTranscoder.class.getSimpleName(), content);
} catch (InterruptedException | IOException e) {
throw logger.errorTranscoding(ProtostreamTranscoder.class.getSimpleName(), e);
}
}
private boolean isWrapped(MediaType mediaType) {
Optional<String> wrappedParam = mediaType.getParameter("wrapped");
return (!wrappedParam.isPresent() || !wrappedParam.get().equals("false"));
}
private byte[] marshall(Object decoded, MediaType destinationType) throws IOException {
ImmutableSerializationContext ctx = getCtxForMarshalling(decoded);
if (isWrapped(destinationType)) {
return ProtobufUtil.toWrappedByteArray(ctx, decoded);
}
return ProtobufUtil.toByteArray(ctx, decoded);
}
private Object unmarshall(byte[] bytes, MediaType contentType, MediaType destinationType) throws IOException {
if (isWrapped(contentType))
return unmarshallCascading(bytes);
String type = destinationType.getClassType();
if (type == null) throw logger.missingTypeForUnwrappedPayload();
Class<?> destination = Util.loadClass(type, classLoader);
ImmutableSerializationContext ctx = getCtxForMarshalling(destination);
return ProtobufUtil.fromByteArray(ctx, bytes, destination);
}
// Workaround until protostream provides support for cascading contexts IPROTO-139
private Object unmarshallCascading(byte[] bytes) throws IOException {
// First try to unmarshalling with the user context
try {
return ProtobufUtil.fromWrappedByteArray(ctxRegistry.getUserCtx(), bytes);
} catch (IllegalArgumentException e) {
logger.debugf("Unable to unmarshall bytes with user context, attempting global context");
try {
return ProtobufUtil.fromWrappedByteArray(ctxRegistry.getGlobalCtx(), bytes);
} catch (IllegalArgumentException iae) {
throw new MarshallingException(iae.getMessage());
}
}
}
// Workaround until protostream provides support for cascading contexts IPROTO-139
private byte[] fromJsonCascading(Object content) throws IOException {
try {
return fromJson(content, ctxRegistry.getUserCtx());
} catch (IllegalArgumentException e) {
String message = e.getMessage();
if (message != null && message.contains("Unknown type")) {
logger.debugf("Unable to process json with user context, attempting global context");
return fromJson(content, ctxRegistry.getGlobalCtx());
}
throw e;
}
}
private byte[] fromJson(Object content, ImmutableSerializationContext ctx) throws IOException {
Reader reader;
if (content instanceof byte[]) {
reader = new InputStreamReader(new ByteArrayInputStream((byte[]) content));
} else {
reader = new StringReader(content.toString());
}
return ProtobufUtil.fromCanonicalJSON(ctx, reader);
}
// Workaround until protostream provides support for cascading contexts IPROTO-139
private String toJsonCascading(byte[] bytes) throws IOException {
try {
return ProtobufUtil.toCanonicalJSON(ctxRegistry.getUserCtx(), bytes);
} catch (IllegalArgumentException e) {
logger.debugf("Unable to read bytes with user context, attempting global context");
return ProtobufUtil.toCanonicalJSON(ctxRegistry.getGlobalCtx(), bytes);
}
}
private ImmutableSerializationContext getCtxForMarshalling(Object o) {
Class<?> clazz = o instanceof Class<?> ? (Class<?>) o : o.getClass();
if (isWrappedMessageClass(clazz) || ctxRegistry.getUserCtx().canMarshall(clazz))
return ctxRegistry.getUserCtx();
if (ctxRegistry.getGlobalCtx().canMarshall(clazz))
return ctxRegistry.getGlobalCtx();
throw logger.marshallerMissingFromUserAndGlobalContext(o.getClass().getName());
}
private boolean isWrappedMessageClass(Class<?> c) {
return c.equals(String.class) ||
c.equals(Long.class) ||
c.equals(Integer.class) ||
c.equals(Double.class) ||
c.equals(Float.class) ||
c.equals(Boolean.class) ||
c.equals(byte[].class) ||
c.equals(Byte.class) ||
c.equals(Short.class) ||
c.equals(Character.class) ||
c.equals(java.util.Date.class) ||
c.equals(java.time.Instant.class);
}
private Object addTypeIfNeeded(Object content) {
String wrapped = "{ \"_type\":\"%s\", \"_value\":\"%s\"}";
if (content instanceof Integer || content instanceof Short) {
return String.format(wrapped, "int32", content);
}
if (content instanceof Long) {
return String.format(wrapped, "int64", content);
}
if (content instanceof Double) {
return String.format(wrapped, "double", content);
}
if (content instanceof Float) {
return String.format(wrapped, "float", content);
}
if (content instanceof Boolean) {
return String.format(wrapped, "bool", content);
}
if (content instanceof String && !(content.toString()).contains("_type")) {
return String.format(wrapped, "string", content);
}
return content;
}
}
| 10,521
| 44.747826
| 155
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/encoding/impl/StorageConfigurationManager.java
|
package org.infinispan.encoding.impl;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_UNKNOWN;
import org.infinispan.commons.dataconversion.ByteArrayWrapper;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.configuration.cache.ContentTypeConfiguration;
import org.infinispan.configuration.cache.EncodingConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Key/value storage information (storage media type and wrapping).
*
* @author Dan Berindei
* @since 11
*/
@Scope(Scopes.NAMED_CACHE)
public class StorageConfigurationManager {
private static final Log LOG = LogFactory.getLog(StorageConfigurationManager.class, Log.class);
private Wrapper keyWrapper;
private Wrapper valueWrapper;
private MediaType keyStorageMediaType;
private MediaType valueStorageMediaType;
public Wrapper getKeyWrapper() {
return keyWrapper;
}
public Wrapper getValueWrapper() {
return valueWrapper;
}
public Wrapper getWrapper(boolean isKey) {
return isKey ? keyWrapper : valueWrapper;
}
public void overrideWrapper(Wrapper keyWrapper, Wrapper valueWrapper) {
// TODO If the remote query module could override the wrapper at injection time,
// DataConversion wouldn't need to look up the wrapper every time
this.keyWrapper = keyWrapper;
this.valueWrapper = valueWrapper;
}
public MediaType getKeyStorageMediaType() {
return keyStorageMediaType;
}
public MediaType getValueStorageMediaType() {
return valueStorageMediaType;
}
public MediaType getStorageMediaType(boolean isKey) {
return isKey ? keyStorageMediaType : valueStorageMediaType;
}
public StorageConfigurationManager() {
keyWrapper = ByteArrayWrapper.INSTANCE;
valueWrapper = ByteArrayWrapper.INSTANCE;
}
@Inject
void injectDependencies(@ComponentName(KnownComponentNames.USER_MARSHALLER) Marshaller userMarshaller,
@ComponentName(KnownComponentNames.CACHE_NAME) String cacheName,
InternalCacheRegistry icr, GlobalConfiguration gcr, Configuration configuration) {
boolean internalCache = icr.isInternalCache(cacheName);
boolean embeddedMode = Configurations.isEmbeddedMode(gcr);
this.keyStorageMediaType = getStorageMediaType(configuration, embeddedMode, internalCache, userMarshaller,
true);
this.valueStorageMediaType = getStorageMediaType(configuration, embeddedMode, internalCache, userMarshaller,
false);
if(keyStorageMediaType.equals(APPLICATION_UNKNOWN) || valueStorageMediaType.equals(APPLICATION_UNKNOWN)) {
LOG.unknownEncoding(cacheName);
}
}
private MediaType getStorageMediaType(Configuration configuration, boolean embeddedMode, boolean internalCache,
Marshaller userMarshaller, boolean isKey) {
EncodingConfiguration encodingConfiguration = configuration.encoding();
ContentTypeConfiguration contentTypeConfiguration = isKey ? encodingConfiguration.keyDataType() : encodingConfiguration.valueDataType();
MediaType mediaType = userMarshaller.mediaType();
// If explicitly configured, use the value provided
if (contentTypeConfiguration.isMediaTypeChanged()) {
return contentTypeConfiguration.mediaType();
}
// Indexed caches started by the server will assume application/protostream as storage media type
if (!embeddedMode && configuration.indexing().enabled() && contentTypeConfiguration.mediaType() == null) {
return MediaType.APPLICATION_PROTOSTREAM;
}
if (internalCache) return MediaType.APPLICATION_OBJECT;
if (embeddedMode) {
boolean canStoreReferences = configuration.memory().storage().canStoreReferences();
return canStoreReferences ? MediaType.APPLICATION_OBJECT : mediaType;
}
return APPLICATION_UNKNOWN;
}
/**
* @return true if the storage type allows queries (indexed or non-indexed).
*/
public boolean isQueryable() {
return valueStorageMediaType.match(MediaType.APPLICATION_PROTOSTREAM) ||
valueStorageMediaType.match(MediaType.APPLICATION_OBJECT);
}
}
| 4,990
| 40.247934
| 142
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/encoding/impl/TwoStepTranscoder.java
|
package org.infinispan.encoding.impl;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_OBJECT;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.commons.dataconversion.AbstractTranscoder;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Transcoder;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* <p>
* Performs conversions where there is no direct transcoder, but there are two transcoders available:
* <ul>
* <li>one from source media type to <b>application/x-java-object</b>
* <li>one from <b>application/x-java-object</b> to the destination media type
* </ul>
* </p>
*
* @since 11.0
*/
public class TwoStepTranscoder extends AbstractTranscoder {
private static final Log logger = LogFactory.getLog(TwoStepTranscoder.class, Log.class);
private final Transcoder transcoder1;
private final Transcoder transcoder2;
private final HashSet<MediaType> supportedMediaTypes;
public TwoStepTranscoder(Transcoder transcoder1, Transcoder transcoder2) {
this.transcoder1 = transcoder1;
this.transcoder2 = transcoder2;
supportedMediaTypes = new HashSet<>(this.transcoder1.getSupportedMediaTypes());
supportedMediaTypes.addAll(transcoder2.getSupportedMediaTypes());
}
@Override
public Object doTranscode(Object content, MediaType contentType, MediaType destinationType) {
if (transcoder1.supportsConversion(contentType, APPLICATION_OBJECT)
&& transcoder2.supportsConversion(APPLICATION_OBJECT, destinationType)) {
Object object = transcoder1.transcode(content, contentType, APPLICATION_OBJECT);
return transcoder2.transcode(object, APPLICATION_OBJECT, destinationType);
}
if (transcoder2.supportsConversion(contentType, APPLICATION_OBJECT)
&& transcoder1.supportsConversion(APPLICATION_OBJECT, destinationType)) {
Object object = transcoder2.transcode(content, contentType, APPLICATION_OBJECT);
return transcoder1.transcode(object, APPLICATION_OBJECT, destinationType);
}
throw logger.unsupportedContent(TwoStepTranscoder.class.getSimpleName(), content);
}
@Override
public Set<MediaType> getSupportedMediaTypes() {
return supportedMediaTypes;
}
@Override
public boolean supportsConversion(MediaType mediaType, MediaType other) {
return (transcoder1.supportsConversion(mediaType, APPLICATION_OBJECT) &&
transcoder2.supportsConversion(APPLICATION_OBJECT, other)) ||
(transcoder2.supportsConversion(mediaType, APPLICATION_OBJECT) &&
transcoder1.supportsConversion(APPLICATION_OBJECT, other));
}
}
| 2,765
| 39.086957
| 101
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/encoding/impl/JavaSerializationTranscoder.java
|
package org.infinispan.encoding.impl;
import org.infinispan.commons.configuration.ClassAllowList;
import org.infinispan.commons.dataconversion.TranscoderMarshallerAdapter;
import org.infinispan.commons.marshall.JavaSerializationMarshaller;
/**
* @since 9.2
*/
public class JavaSerializationTranscoder extends TranscoderMarshallerAdapter {
public JavaSerializationTranscoder(ClassAllowList classAllowList) {
super(new JavaSerializationMarshaller(classAllowList));
}
}
| 485
| 27.588235
| 78
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/EventLoggerViewListener.java
|
package org.infinispan.topology;
import static org.infinispan.util.logging.events.Messages.MESSAGES;
import java.util.List;
import java.util.function.Consumer;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachemanagerlistener.annotation.Merged;
import org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged;
import org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.logging.events.EventLogCategory;
import org.infinispan.util.logging.events.EventLogManager;
import org.infinispan.util.logging.events.EventLogger;
@Listener(sync = true)
public class EventLoggerViewListener {
private final EventLogManager manager;
private final Consumer<ViewChangedEvent> afterChange;
public EventLoggerViewListener(EventLogManager manager, Consumer<ViewChangedEvent> afterChange) {
this.manager = manager;
this.afterChange = afterChange;
}
public EventLoggerViewListener(EventLogManager manager) {
this(manager, ignore -> {});
}
@Merged
@ViewChanged
@SuppressWarnings("unused")
public void handleViewChange(ViewChangedEvent event) {
EventLogger eventLogger = manager.getEventLogger().scope(event.getLocalAddress());
logNodeJoined(eventLogger, event.getNewMembers(), event.getOldMembers());
logNodeLeft(eventLogger, event.getNewMembers(), event.getOldMembers());
afterChange.accept(event);
}
private void logNodeJoined(EventLogger logger, List<Address> newMembers, List<Address> oldMembers) {
newMembers.stream()
.filter(address -> !oldMembers.contains(address))
.forEach(address -> logger.info(EventLogCategory.CLUSTER, MESSAGES.nodeJoined(address)));
}
private void logNodeLeft(EventLogger logger, List<Address> newMembers, List<Address> oldMembers) {
oldMembers.stream()
.filter(address -> !newMembers.contains(address))
.forEach(address -> logger.info(EventLogCategory.CLUSTER, MESSAGES.nodeLeft(address)));
}
}
| 2,110
| 38.830189
| 103
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/LocalTopologyManager.java
|
package org.infinispan.topology;
import java.util.concurrent.CompletionStage;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.remoting.transport.Address;
/**
* Runs on every node and handles the communication with the {@link ClusterTopologyManager}.
*
* @author Dan Berindei
* @since 5.2
*/
@Scope(Scopes.GLOBAL)
public interface LocalTopologyManager {
/**
* Forwards the join request to the coordinator.
* @return The current consistent hash.
*/
CompletionStage<CacheTopology> join(String cacheName, CacheJoinInfo joinInfo, CacheTopologyHandler stm, PartitionHandlingManager phm) throws Exception;
/**
* Forwards the leave request to the coordinator.
*/
void leave(String cacheName, long timeout);
/**
* Confirm that the local cache {@code cacheName} has finished receiving the new data for topology
* {@code topologyId}.
*
* <p>The coordinator can change during the state transfer, so we make the rebalance RPC async
* and we send the response as a different command.
* @param cacheName the name of the cache
* @param topologyId the current topology id of the node at the time the rebalance is completed.
* @param rebalanceId the id of the current rebalance
* @param throwable {@code null} unless local rebalance ended because of an error.
*/
void confirmRebalancePhase(String cacheName, int topologyId, int rebalanceId, Throwable throwable);
/**
* Recovers the current topology information for all running caches and returns it to the coordinator.
*
* @param viewId The coordinator's view id
*/
// TODO Add a new class to hold the CacheJoinInfo and the CacheTopology
CompletionStage<ManagerStatusResponse> handleStatusRequest(int viewId);
/**
* Updates the current and/or pending consistent hash, without transferring any state.
*/
CompletionStage<Void> handleTopologyUpdate(String cacheName, CacheTopology cacheTopology,
AvailabilityMode availabilityMode, int viewId, Address sender);
/**
* Update the stable cache topology.
* <p>
* Mostly needed for backup, so that a new coordinator can recover the stable topology of the cluster.
*/
CompletionStage<Void> handleStableTopologyUpdate(String cacheName, CacheTopology cacheTopology, final Address sender,
int viewId);
/**
* Performs the state transfer.
*/
CompletionStage<Void> handleRebalance(String cacheName, CacheTopology cacheTopology, int viewId, Address sender);
/**
* @return the current topology for a cache.
*/
CacheTopology getCacheTopology(String cacheName);
/**
* @return the last stable topology for a cache.
*/
CacheTopology getStableCacheTopology(String cacheName);
/**
* Checks whether rebalancing is enabled for the entire cluster.
*/
boolean isRebalancingEnabled() throws Exception;
/**
* Checks whether rebalancing is enabled for the specified cache.
*/
boolean isCacheRebalancingEnabled(String cacheName) throws Exception;
/**
* Enable or disable rebalancing in the entire cluster.
*/
void setRebalancingEnabled(boolean enabled) throws Exception;
/**
* Enable or disable rebalancing for the specified cache.
*/
void setCacheRebalancingEnabled(String cacheName, boolean enabled) throws Exception;
/**
* Retrieve the rebalancing status for the specified cache
*/
RebalancingStatus getRebalancingStatus(String cacheName) throws Exception;
/**
* Retrieves the availability state of a cache.
*/
AvailabilityMode getCacheAvailability(String cacheName);
/**
* Updates the availability state of a cache (for the entire cluster).
*/
void setCacheAvailability(String cacheName, AvailabilityMode availabilityMode) throws Exception;
/**
* Returns the local UUID of this node. If global state persistence is enabled, this UUID will be saved and reused
* across restarts
*/
PersistentUUID getPersistentUUID();
/**
* Initiates a cluster-wide cache shutdown for the specified cache
*/
void cacheShutdown(String name);
/**
* Handles the local operations related to gracefully shutting down a cache
*/
CompletionStage<Void> handleCacheShutdown(String cacheName);
/**
* Returns a {@link CompletionStage} that completes when the cache with the name {@code cacheName} has a
* stable topology. Returns null if the cache does not exist.
*/
CompletionStage<Void> stableTopologyCompletion(String cacheName);
/**
* Asserts the cache with the given name has a stable topology installed.
*
* @param cacheName: The cache name to search.
* @throws MissingMembersException: Thrown when the cache does not have a stable topology.
*/
default void assertTopologyStable(String cacheName) { }
}
| 5,127
| 33.884354
| 154
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/HeartBeatCommand.java
|
package org.infinispan.topology;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.infinispan.commands.ReplicableCommand;
/**
* A hear-beat command used to ping members in {@link ClusterTopologyManagerImpl#confirmMembersAvailable()}.
*
* @author Pedro Ruivo
* @since 9.2
*/
public class HeartBeatCommand implements ReplicableCommand {
public static final byte COMMAND_ID = 30;
public static final HeartBeatCommand INSTANCE = new HeartBeatCommand();
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public void writeTo(ObjectOutput output){
//nothing to write
}
@Override
public void readFrom(ObjectInput input) {
//nothing to read
}
}
| 820
| 19.525
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/CacheJoinInfo.java
|
package org.infinispan.topology;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Optional;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.marshall.core.Ids;
/**
* This class contains the information that a cache needs to supply to the coordinator when starting up.
*
* @author Dan Berindei
* @since 5.2
*/
public class CacheJoinInfo {
// Global configuration
private final ConsistentHashFactory consistentHashFactory;
private final int numSegments;
private final int numOwners;
private final long timeout;
private final CacheMode cacheMode;
// Per-node configuration
private final float capacityFactor;
// Per-node state info
private final PersistentUUID persistentUUID;
private final Optional<Integer> persistentStateChecksum;
public CacheJoinInfo(ConsistentHashFactory consistentHashFactory, int numSegments, int numOwners, long timeout,
CacheMode cacheMode, float capacityFactor,
PersistentUUID persistentUUID, Optional<Integer> persistentStateChecksum) {
this.consistentHashFactory = consistentHashFactory;
this.numSegments = numSegments;
this.numOwners = numOwners;
this.timeout = timeout;
this.cacheMode = cacheMode;
this.capacityFactor = capacityFactor;
this.persistentUUID = persistentUUID;
this.persistentStateChecksum = persistentStateChecksum;
}
public ConsistentHashFactory getConsistentHashFactory() {
return consistentHashFactory;
}
public int getNumSegments() {
return numSegments;
}
public int getNumOwners() {
return numOwners;
}
public long getTimeout() {
return timeout;
}
public CacheMode getCacheMode() {
return cacheMode;
}
public float getCapacityFactor() {
return capacityFactor;
}
public PersistentUUID getPersistentUUID() {
return persistentUUID;
}
public Optional<Integer> getPersistentStateChecksum() {
return persistentStateChecksum;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + Float.floatToIntBits(capacityFactor);
result = prime * result + ((consistentHashFactory == null) ? 0 : consistentHashFactory.hashCode());
result = prime * result + cacheMode.hashCode();
result = prime * result + numOwners;
result = prime * result + numSegments;
result = prime * result + (int) (timeout ^ (timeout >>> 32));
result = prime * result + ((persistentUUID == null) ? 0 : persistentUUID.hashCode());
result = prime * result + ((persistentStateChecksum == null) ? 0 : persistentStateChecksum.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
CacheJoinInfo other = (CacheJoinInfo) obj;
if (Float.floatToIntBits(capacityFactor) != Float.floatToIntBits(other.capacityFactor))
return false;
if (consistentHashFactory == null) {
if (other.consistentHashFactory != null)
return false;
} else if (!consistentHashFactory.equals(other.consistentHashFactory))
return false;
if (cacheMode != other.cacheMode)
return false;
if (numOwners != other.numOwners)
return false;
if (numSegments != other.numSegments)
return false;
if (timeout != other.timeout)
return false;
if (persistentUUID == null) {
if (other.persistentUUID != null)
return false;
} else if (!persistentUUID.equals(other.persistentUUID))
return false;
if (persistentStateChecksum == null) {
if (other.persistentStateChecksum != null)
return false;
} else if (!persistentStateChecksum.equals(other.persistentStateChecksum))
return false;
return true;
}
@Override
public String toString() {
return "CacheJoinInfo{" +
"consistentHashFactory=" + consistentHashFactory +
", numSegments=" + numSegments +
", numOwners=" + numOwners +
", timeout=" + timeout +
", cacheMode=" + cacheMode +
", persistentUUID=" + persistentUUID +
", persistentStateChecksum=" + persistentStateChecksum +
'}';
}
public static class Externalizer extends AbstractExternalizer<CacheJoinInfo> {
@Override
public void writeObject(ObjectOutput output, CacheJoinInfo cacheJoinInfo) throws IOException {
output.writeObject(cacheJoinInfo.consistentHashFactory);
output.writeInt(cacheJoinInfo.numSegments);
output.writeInt(cacheJoinInfo.numOwners);
output.writeLong(cacheJoinInfo.timeout);
MarshallUtil.marshallEnum(cacheJoinInfo.cacheMode, output);
output.writeFloat(cacheJoinInfo.capacityFactor);
output.writeObject(cacheJoinInfo.persistentUUID);
output.writeObject(cacheJoinInfo.persistentStateChecksum);
}
@Override
public CacheJoinInfo readObject(ObjectInput unmarshaller) throws IOException, ClassNotFoundException {
ConsistentHashFactory consistentHashFactory = (ConsistentHashFactory) unmarshaller.readObject();
int numSegments = unmarshaller.readInt();
int numOwners = unmarshaller.readInt();
long timeout = unmarshaller.readLong();
CacheMode cacheMode = MarshallUtil.unmarshallEnum(unmarshaller, CacheMode::valueOf);
float capacityFactor = unmarshaller.readFloat();
PersistentUUID persistentUUID = (PersistentUUID) unmarshaller.readObject();
Optional<Integer> persistentStateChecksum = (Optional<Integer>) unmarshaller.readObject();
return new CacheJoinInfo(consistentHashFactory, numSegments, numOwners, timeout, cacheMode,
capacityFactor, persistentUUID, persistentStateChecksum);
}
@Override
public Integer getId() {
return Ids.CACHE_JOIN_INFO;
}
@Override
public Set<Class<? extends CacheJoinInfo>> getTypeClasses() {
return Collections.singleton(CacheJoinInfo.class);
}
}
}
| 6,565
| 34.491892
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/RebalancingStatus.java
|
package org.infinispan.topology;
/**
* RebalancingStatus.
*
* @author Tristan Tarrant
* @since 8.1
*/
public enum RebalancingStatus {
SUSPENDED,
PENDING,
IN_PROGRESS,
COMPLETE
}
| 196
| 12.133333
| 32
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/ManagerStatusResponse.java
|
package org.infinispan.topology;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.marshall.InstanceReusingAdvancedExternalizer;
import org.infinispan.marshall.core.Ids;
/**
* @author Dan Berindei
* @since 7.1
*/
public class ManagerStatusResponse implements Serializable {
private final Map<String, CacheStatusResponse> caches;
private final boolean rebalancingEnabled;
public ManagerStatusResponse(Map<String, CacheStatusResponse> caches, boolean rebalancingEnabled) {
this.rebalancingEnabled = rebalancingEnabled;
this.caches = caches;
}
public Map<String, CacheStatusResponse> getCaches() {
return caches;
}
public boolean isRebalancingEnabled() {
return rebalancingEnabled;
}
@Override
public String toString() {
return "ManagerStatusResponse{" +
"caches=" + caches +
", rebalancingEnabled=" + rebalancingEnabled +
'}';
}
public static class Externalizer extends InstanceReusingAdvancedExternalizer<ManagerStatusResponse> {
@Override
public void doWriteObject(ObjectOutput output, ManagerStatusResponse cacheStatusResponse) throws IOException {
output.writeObject(cacheStatusResponse.caches);
output.writeBoolean(cacheStatusResponse.rebalancingEnabled);
}
@Override
public ManagerStatusResponse doReadObject(ObjectInput unmarshaller) throws IOException, ClassNotFoundException {
Map<String, CacheStatusResponse> caches = (Map<String, CacheStatusResponse>) unmarshaller.readObject();
boolean rebalancingEnabled = unmarshaller.readBoolean();
return new ManagerStatusResponse(caches, rebalancingEnabled);
}
@Override
public Integer getId() {
return Ids.MANAGER_STATUS_RESPONSE;
}
@Override
public Set<Class<? extends ManagerStatusResponse>> getTypeClasses() {
return Collections.<Class<? extends ManagerStatusResponse>>singleton(ManagerStatusResponse.class);
}
}
}
| 2,186
| 31.161765
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/PersistentUUIDManager.java
|
package org.infinispan.topology;
import java.util.List;
import java.util.function.UnaryOperator;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.transport.Address;
/**
* PersistentUUIDManager maintains a mapping of {@link PersistentUUID}s present in the cluster
*
* @author Tristan Tarrant
* @since 9.0
*/
@Scope(Scopes.GLOBAL)
public interface PersistentUUIDManager {
/**
* Adds a mapping between an {@link Address} and a {@link PersistentUUID}
* @param address
* @param persistentUUID
*/
void addPersistentAddressMapping(Address address, PersistentUUID persistentUUID);
/**
* Retrieves the {@link PersistentUUID} of a node given its {@link Address}
* @param address the address to lookup
* @return the persistentuuid of the node, null if no mapping is present
*/
PersistentUUID getPersistentUuid(Address address);
/**
* Retrieves the {@link Address} of a node given its {@link PersistentUUID}
* @param persistentUUID the persistent uuid to lookup
* @return the address of the node, null if no mapping is present
*/
Address getAddress(PersistentUUID persistentUUID);
/**
* Removes any address mapping for the specified {@link PersistentUUID}
* @param persistentUUID the {@link PersistentUUID} for which to remove mappings
*/
void removePersistentAddressMapping(PersistentUUID persistentUUID);
/**
* Removes any address mapping for the specified {@link Address}
* @param address the {@link Address} for which to remove mappings
*/
void removePersistentAddressMapping(Address address);
/**
* Returns a list of {@link PersistentUUID}s for the supplied {@link Address}es
* @param addresses
* @return
*/
List<PersistentUUID> mapAddresses(List<Address> addresses);
/**
* Provides a remapping operator which translates addresses to persistentuuids
*/
UnaryOperator<Address> addressToPersistentUUID();
/**
* Provides a remapping operator which translates persistentuuids to addresses
*/
UnaryOperator<Address> persistentUUIDToAddress();
}
| 2,175
| 31
| 94
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/MissingMembersException.java
|
package org.infinispan.topology;
import org.infinispan.commons.CacheException;
/**
* Thrown when members are missing after a cluster shutdown.
*
* A cluster misses members after the cluster shutdown and a restart, and some previous members did not join again.
*
* @since 15.0
*/
public class MissingMembersException extends CacheException {
public MissingMembersException(String msg) {
super(msg);
}
}
| 418
| 23.647059
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/ClusterTopologyManagerFactory.java
|
package org.infinispan.topology;
import org.infinispan.factories.AbstractComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* Factory for ClusterTopologyManager implementations.
*
* @author Dan Berindei
* @since 5.2
*/
@Scope(Scopes.GLOBAL)
@DefaultFactoryFor(classes = ClusterTopologyManager.class)
public class ClusterTopologyManagerFactory extends AbstractComponentFactory implements AutoInstantiableFactory {
@Override
public Object construct(String componentName) {
if (globalConfiguration.transport().transport() == null)
return null;
return new ClusterTopologyManagerImpl();
}
}
| 809
| 29
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/CacheTopology.java
|
package org.infinispan.topology;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.infinispan.commons.marshall.InstanceReusingAdvancedExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* The status of a cache from a distribution/state transfer point of view.
* <p/>
* The pending CH can be {@code null} if we don't have a state transfer in progress.
* <p/>
* The {@code topologyId} is incremented every time the topology changes (e.g. a member leaves, state transfer
* starts or ends).
* The {@code rebalanceId} is not modified when the consistent hashes are updated without requiring state
* transfer (e.g. when a member leaves).
*
* @author Dan Berindei
* @since 5.2
*/
public class CacheTopology {
private static final Log log = LogFactory.getLog(CacheTopology.class);
private final int topologyId;
private final int rebalanceId;
private final boolean restoredFromState;
private final ConsistentHash currentCH;
private final ConsistentHash pendingCH;
private final ConsistentHash unionCH;
private final Phase phase;
private final List<Address> actualMembers;
// The persistent UUID of each actual member
private final List<PersistentUUID> persistentUUIDs;
public CacheTopology(int topologyId, int rebalanceId, ConsistentHash currentCH, ConsistentHash pendingCH,
Phase phase, List<Address> actualMembers, List<PersistentUUID> persistentUUIDs) {
this(topologyId, rebalanceId, currentCH, pendingCH, null, phase, actualMembers, persistentUUIDs);
}
public CacheTopology(int topologyId, int rebalanceId, boolean restoredTopology, ConsistentHash currentCH, ConsistentHash pendingCH,
Phase phase, List<Address> actualMembers, List<PersistentUUID> persistentUUIDs) {
this(topologyId, rebalanceId, restoredTopology, currentCH, pendingCH, null, phase, actualMembers, persistentUUIDs);
}
public CacheTopology(int topologyId, int rebalanceId, ConsistentHash currentCH, ConsistentHash pendingCH,
ConsistentHash unionCH, Phase phase, List<Address> actualMembers, List<PersistentUUID> persistentUUIDs) {
this(topologyId, rebalanceId, false, currentCH, pendingCH, unionCH, phase, actualMembers, persistentUUIDs);
}
public CacheTopology(int topologyId, int rebalanceId, boolean restoredTopology, ConsistentHash currentCH, ConsistentHash pendingCH,
ConsistentHash unionCH, Phase phase, List<Address> actualMembers, List<PersistentUUID> persistentUUIDs) {
if (pendingCH != null && !pendingCH.getMembers().containsAll(currentCH.getMembers())) {
throw new IllegalArgumentException("A cache topology's pending consistent hash must " +
"contain all the current consistent hash's members: currentCH=" + currentCH + ", pendingCH=" + pendingCH);
}
if (persistentUUIDs != null && persistentUUIDs.size() != actualMembers.size()) {
throw new IllegalArgumentException("There must be one persistent UUID for each actual member");
}
this.topologyId = topologyId;
this.rebalanceId = rebalanceId;
this.currentCH = currentCH;
this.pendingCH = pendingCH;
this.unionCH = unionCH;
this.phase = phase;
this.actualMembers = actualMembers;
this.persistentUUIDs = persistentUUIDs;
this.restoredFromState = restoredTopology;
}
public int getTopologyId() {
return topologyId;
}
/**
* The current consistent hash.
*/
public ConsistentHash getCurrentCH() {
return currentCH;
}
/**
* The future consistent hash. Should be {@code null} if there is no rebalance in progress.
*/
public ConsistentHash getPendingCH() {
return pendingCH;
}
/**
* The union of the current and future consistent hashes. Should be {@code null} if there is no rebalance in progress.
*/
public ConsistentHash getUnionCH() {
return unionCH;
}
/**
* The id of the latest started rebalance.
*/
public int getRebalanceId() {
return rebalanceId;
}
/**
* @return The nodes that are members in both consistent hashes (if {@code pendingCH != null},
* otherwise the members of the current CH).
* @see #getActualMembers()
*/
public List<Address> getMembers() {
if (pendingCH != null)
return pendingCH.getMembers();
else if (currentCH != null)
return currentCH.getMembers();
else
return Collections.emptyList();
}
/**
* @return The nodes that are active members of the cache. It should be equal to {@link #getMembers()} when the
* cache is available, and a strict subset if the cache is in degraded mode.
* @see org.infinispan.partitionhandling.AvailabilityMode
*/
public List<Address> getActualMembers() {
return actualMembers;
}
public List<PersistentUUID> getMembersPersistentUUIDs() {
return persistentUUIDs;
}
public boolean wasTopologyRestoredFromState() {
return restoredFromState;
}
/**
* Read operations should always go to the "current" owners.
*/
public ConsistentHash getReadConsistentHash() {
switch (phase) {
case CONFLICT_RESOLUTION:
case NO_REBALANCE:
assert pendingCH == null;
assert unionCH == null;
return currentCH;
case READ_OLD_WRITE_ALL:
assert pendingCH != null;
assert unionCH != null;
return currentCH;
case READ_ALL_WRITE_ALL:
assert pendingCH != null;
return unionCH;
case READ_NEW_WRITE_ALL:
assert unionCH != null;
return pendingCH;
default:
throw new IllegalStateException();
}
}
/**
* When there is a rebalance in progress, write operations should go to the union of the "current" and "future" owners.
*/
public ConsistentHash getWriteConsistentHash() {
switch (phase) {
case CONFLICT_RESOLUTION:
case NO_REBALANCE:
assert pendingCH == null;
assert unionCH == null;
return currentCH;
case READ_OLD_WRITE_ALL:
case READ_ALL_WRITE_ALL:
case READ_NEW_WRITE_ALL:
assert pendingCH != null;
assert unionCH != null;
return unionCH;
default:
throw new IllegalStateException();
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CacheTopology that = (CacheTopology) o;
if (topologyId != that.topologyId) return false;
if (rebalanceId != that.rebalanceId) return false;
if (phase != that.phase) return false;
if (currentCH != null ? !currentCH.equals(that.currentCH) : that.currentCH != null) return false;
if (pendingCH != null ? !pendingCH.equals(that.pendingCH) : that.pendingCH != null) return false;
if (unionCH != null ? !unionCH.equals(that.unionCH) : that.unionCH != null) return false;
if (actualMembers != null ? !actualMembers.equals(that.actualMembers) : that.actualMembers != null) return false;
return true;
}
@Override
public int hashCode() {
int result = topologyId;
result = 31 * result + rebalanceId;
result = 31 * result + (phase != null ? phase.hashCode() : 0);
result = 31 * result + (currentCH != null ? currentCH.hashCode() : 0);
result = 31 * result + (pendingCH != null ? pendingCH.hashCode() : 0);
result = 31 * result + (unionCH != null ? unionCH.hashCode() : 0);
result = 31 * result + (actualMembers != null ? actualMembers.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "CacheTopology{" +
"id=" + topologyId +
", phase=" + phase +
", rebalanceId=" + rebalanceId +
", currentCH=" + currentCH +
", pendingCH=" + pendingCH +
", unionCH=" + unionCH +
", actualMembers=" + actualMembers +
", persistentUUIDs=" + persistentUUIDs +
'}';
}
public final void logRoutingTableInformation(String cacheName) {
if (log.isTraceEnabled()) {
log.tracef("[%s] Current consistent hash's routing table: %s", cacheName, currentCH.getRoutingTableAsString());
if (pendingCH != null) log.tracef("[%s] Pending consistent hash's routing table: %s", cacheName, pendingCH.getRoutingTableAsString());
}
}
public Phase getPhase() {
return phase;
}
public static class Externalizer extends InstanceReusingAdvancedExternalizer<CacheTopology> {
@Override
public void doWriteObject(ObjectOutput output, CacheTopology cacheTopology) throws IOException {
output.writeInt(cacheTopology.topologyId);
output.writeInt(cacheTopology.rebalanceId);
output.writeBoolean(cacheTopology.restoredFromState);
output.writeObject(cacheTopology.currentCH);
output.writeObject(cacheTopology.pendingCH);
output.writeObject(cacheTopology.unionCH);
output.writeObject(cacheTopology.actualMembers);
output.writeObject(cacheTopology.persistentUUIDs);
MarshallUtil.marshallEnum(cacheTopology.phase, output);
}
@Override
public CacheTopology doReadObject(ObjectInput unmarshaller) throws IOException, ClassNotFoundException {
int topologyId = unmarshaller.readInt();
int rebalanceId = unmarshaller.readInt();
boolean possibleDataLoss = unmarshaller.readBoolean();
ConsistentHash currentCH = (ConsistentHash) unmarshaller.readObject();
ConsistentHash pendingCH = (ConsistentHash) unmarshaller.readObject();
ConsistentHash unionCH = (ConsistentHash) unmarshaller.readObject();
List<Address> actualMembers = (List<Address>) unmarshaller.readObject();
List<PersistentUUID> persistentUUIDs = (List<PersistentUUID>) unmarshaller.readObject();
Phase phase = MarshallUtil.unmarshallEnum(unmarshaller, Phase::valueOf);
return new CacheTopology(topologyId, rebalanceId, possibleDataLoss, currentCH, pendingCH, unionCH, phase, actualMembers, persistentUUIDs);
}
@Override
public Integer getId() {
return Ids.CACHE_TOPOLOGY;
}
@Override
public Set<Class<? extends CacheTopology>> getTypeClasses() {
return Collections.singleton(CacheTopology.class);
}
}
/**
* Phase of the rebalance process. Using four phases guarantees these properties:
*
* 1. T(x+1).writeCH contains all nodes from Tx.readCH (this is the requirement for ISPN-5021)
* 2. Tx.readCH and T(x+1).readCH has non-empty subset of nodes (that will allow no blocking for read commands
* and reading only entries node owns according to readCH)
*
* Old entries should be wiped out only after coming to the {@link #NO_REBALANCE} phase.
*/
public enum Phase {
/**
* Only currentCH should be set, this works as both readCH and writeCH
*/
NO_REBALANCE(false),
/**
* Interim state between NO_REBALANCE → READ_OLD_WRITE_ALL
* readCh is set locally using previous Topology (of said node) readCH, whilst writeCH contains all members after merge
*/
CONFLICT_RESOLUTION(false),
/**
* Used during state transfer: readCH == currentCH, writeCH = unionCH
*/
READ_OLD_WRITE_ALL(true),
/**
* Used after state transfer completes: readCH == writeCH = unionCH
*/
READ_ALL_WRITE_ALL(false),
/**
* Intermediate state that prevents ISPN-5021: readCH == pendingCH, writeCH = unionCH
*/
READ_NEW_WRITE_ALL(false);
private static final Phase[] values = Phase.values();
private final boolean rebalance;
Phase(boolean rebalance) {
this.rebalance = rebalance;
}
public boolean isRebalance() {
return rebalance;
}
public static Phase valueOf(int ordinal) {
return values[ordinal];
}
}
}
| 12,582
| 36.673653
| 147
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/RebalanceConfirmationCollector.java
|
package org.infinispan.topology;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Created with
*
* @author Dan Berindei
* @since 5.2
*/
class RebalanceConfirmationCollector {
private final static Log log = LogFactory.getLog(RebalanceConfirmationCollector.class);
private final String cacheName;
private final int topologyId;
private final Set<Address> confirmationsNeeded;
private final Runnable whenCompleted;
public RebalanceConfirmationCollector(String cacheName, int topologyId, Collection<Address> members, Runnable whenCompleted) {
this.cacheName = cacheName;
this.topologyId = topologyId;
this.confirmationsNeeded = new HashSet<Address>(members);
this.whenCompleted = whenCompleted;
log.tracef("Initialized topology confirmation collector %d@%s, initial list is %s",
topologyId, cacheName, confirmationsNeeded);
}
/**
* @return {@code true} if everyone has confirmed
*/
public void confirmPhase(Address node, int receivedTopologyId) {
synchronized (this) {
if (topologyId > receivedTopologyId) {
log.tracef("Ignoring rebalance confirmation with old topology from %s " +
"for cache %s, expecting topology id %d but got %d", node, cacheName, topologyId, receivedTopologyId);
}
boolean removed = confirmationsNeeded.remove(node);
if (!removed) {
log.tracef("Rebalance confirmation collector %d@%s ignored confirmation for %s, which is already confirmed",
topologyId, cacheName, node);
return;
}
log.tracef("Rebalance confirmation collector %d@%s received confirmation for %s, remaining list is %s",
topologyId, cacheName, node, confirmationsNeeded);
if (confirmationsNeeded.isEmpty()) {
whenCompleted.run();
}
}
}
/**
* @return {@code true} if everyone has confirmed
*/
public void updateMembers(Collection<Address> newMembers) {
synchronized (this) {
// only return true the first time
boolean modified = confirmationsNeeded.retainAll(newMembers);
log.tracef("Rebalance confirmation collector %d@%s members list updated, remaining list is %s",
topologyId, cacheName, confirmationsNeeded);
if (modified && confirmationsNeeded.isEmpty()) {
whenCompleted.run();
}
}
}
@Override
public String toString() {
synchronized (this) {
return "RebalanceConfirmationCollector{" +
"cacheName=" + cacheName +
", topologyId=" + topologyId +
", confirmationsNeeded=" + confirmationsNeeded +
'}';
}
}
}
| 2,925
| 33.423529
| 129
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/LocalTopologyManagerFactory.java
|
package org.infinispan.topology;
import org.infinispan.factories.AbstractComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* Factory for ClusterTopologyManager implementations.
*
* @author Dan Berindei
* @since 5.2
*/
@Scope(Scopes.GLOBAL)
@DefaultFactoryFor(classes = LocalTopologyManager.class)
public class LocalTopologyManagerFactory extends AbstractComponentFactory implements AutoInstantiableFactory {
@Override
public Object construct(String componentName) {
if (globalConfiguration.transport().transport() == null)
return null;
return new LocalTopologyManagerImpl();
}
}
| 803
| 28.777778
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/CacheJoinException.java
|
package org.infinispan.topology;
import org.infinispan.commons.CacheException;
/**
* Thrown when a cache fails to join a cluster
*
* @author Tristan Tarrant
* @since 9.0
*/
public class CacheJoinException extends CacheException {
private static final long serialVersionUID = 4394453405294292800L;
public CacheJoinException() {
super();
}
public CacheJoinException(Throwable cause) {
super(cause);
}
public CacheJoinException(String msg) {
super(msg);
}
public CacheJoinException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 595
| 18.866667
| 69
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/CacheTopologyHandler.java
|
package org.infinispan.topology;
import java.util.concurrent.CompletionStage;
import org.infinispan.statetransfer.StateTransferManager;
/**
* The link between {@link LocalTopologyManager} and {@link StateTransferManager}.
*
* @author Dan Berindei
* @since 5.2
*/
public interface CacheTopologyHandler {
/**
* Invoked when the CH has to be immediately updated because of a leave or when the state transfer has completed
* and we have to install a permanent CH (pendingCH == null). A state transfer is not always required.
*/
CompletionStage<Void> updateConsistentHash(CacheTopology cacheTopology);
/**
* Invoked when state transfer has to be started.
*
* The caller will not consider the local rebalance done when this method returns. Instead, the handler
* will have to call {@link LocalTopologyManager#confirmRebalancePhase(String, int, int, Throwable)}
*/
CompletionStage<Void> rebalance(CacheTopology cacheTopology);
}
| 976
| 32.689655
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/PersistentUUID.java
|
package org.infinispan.topology;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import java.util.UUID;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.transport.Address;
/**
* PersistentUUID. A special {@link Address} UUID whose purpose is to remain unchanged across node
* restarts when using global state.
*
* @author Tristan Tarrant
* @since 8.1
*/
public class PersistentUUID implements Address {
final UUID uuid;
final int hashCode;
private PersistentUUID(UUID uuid) {
this.uuid = uuid;
this.hashCode = uuid.hashCode();
}
public PersistentUUID(long msb, long lsb) {
this(new UUID(msb, lsb));
}
public static PersistentUUID randomUUID() {
return new PersistentUUID(Util.threadLocalRandomUUID());
}
public static PersistentUUID fromString(String name) {
return new PersistentUUID(UUID.fromString(name));
}
public long getMostSignificantBits() {
return uuid.getMostSignificantBits();
}
public long getLeastSignificantBits() {
return uuid.getLeastSignificantBits();
}
@Override
public int compareTo(Address o) {
PersistentUUID other = (PersistentUUID) o;
return uuid.compareTo(other.uuid);
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public String toString() {
return uuid.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
PersistentUUID other = (PersistentUUID) obj;
if (uuid == null) {
if (other.uuid != null)
return false;
} else if (!uuid.equals(other.uuid))
return false;
return true;
}
public static class Externalizer extends AbstractExternalizer<PersistentUUID> {
@Override
public Set<Class<? extends PersistentUUID>> getTypeClasses() {
return Collections.<Class<? extends PersistentUUID>>singleton(PersistentUUID.class);
}
@Override
public void writeObject(ObjectOutput output, PersistentUUID uuid) throws IOException {
output.writeLong(uuid.getMostSignificantBits());
output.writeLong(uuid.getLeastSignificantBits());
}
@Override
public PersistentUUID readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new PersistentUUID(input.readLong(), input.readLong());
}
@Override
public Integer getId() {
return Ids.PERSISTENT_UUID;
}
}
}
| 2,819
| 24.87156
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/PersistentUUIDManagerImpl.java
|
package org.infinispan.topology;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.UnaryOperator;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Implementation of the {@link PersistentUUIDManager} interface
*
* @author Tristan Tarrant
* @since 9.0
*/
public class PersistentUUIDManagerImpl implements PersistentUUIDManager {
private static final Log log = LogFactory.getLog(PersistentUUIDManagerImpl.class);
private final ConcurrentMap<Address, PersistentUUID> address2uuid = new ConcurrentHashMap<>();
private final ConcurrentMap<PersistentUUID, Address> uuid2address = new ConcurrentHashMap<>();
@Override
public void addPersistentAddressMapping(Address address, PersistentUUID persistentUUID) {
address2uuid.put(address, persistentUUID);
uuid2address.put(persistentUUID, address);
}
@Override
public PersistentUUID getPersistentUuid(Address address) {
return address2uuid.get(address);
}
@Override
public Address getAddress(PersistentUUID persistentUUID) {
return uuid2address.get(persistentUUID);
}
@Override
public void removePersistentAddressMapping(PersistentUUID persistentUUID) {
if (persistentUUID == null) {
//A null would be invalid here, but letting it proceed would trigger an NPE
//which would hide the real issue.
return;
}
Address address = uuid2address.get(persistentUUID);
if (address != null) {
address2uuid.remove(address);
uuid2address.remove(persistentUUID);
}
}
@Override
public void removePersistentAddressMapping(Address address) {
PersistentUUID uuid = address2uuid.get(address);
if (uuid != null) {
uuid2address.remove(uuid);
address2uuid.remove(address);
}
}
@Override
public List<PersistentUUID> mapAddresses(List<Address> addresses) {
ArrayList<PersistentUUID> list = new ArrayList<>(addresses.size());
for(Address address : addresses) {
PersistentUUID persistentUUID = address2uuid.get(address);
if (persistentUUID == null) {
// This should never happen, but if it does, better log it here to avoid it being swallowed elsewhere
NullPointerException npe = new NullPointerException();
log.fatal("Cannot find mapping for address "+address, npe);
throw npe;
} else {
list.add(persistentUUID);
}
}
return list;
}
@Override
public UnaryOperator<Address> addressToPersistentUUID() {
return (address) -> address2uuid.get(address);
}
@Override
public UnaryOperator<Address> persistentUUIDToAddress() {
return (address) -> uuid2address.get(address);
}
}
| 2,953
| 31.822222
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/LocalTopologyManagerImpl.java
|
package org.infinispan.topology;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.infinispan.commons.util.concurrent.CompletableFutures.completedNull;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import static org.infinispan.factories.KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR;
import static org.infinispan.util.concurrent.CompletionStages.handleAndCompose;
import static org.infinispan.util.logging.Log.CLUSTER;
import static org.infinispan.util.logging.Log.CONFIG;
import static org.infinispan.util.logging.Log.CONTAINER;
import static org.infinispan.util.logging.events.Messages.MESSAGES;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.topology.CacheAvailabilityUpdateCommand;
import org.infinispan.commands.topology.CacheJoinCommand;
import org.infinispan.commands.topology.CacheLeaveCommand;
import org.infinispan.commands.topology.CacheShutdownRequestCommand;
import org.infinispan.commands.topology.RebalancePhaseConfirmCommand;
import org.infinispan.commands.topology.RebalancePolicyUpdateCommand;
import org.infinispan.commands.topology.RebalanceStatusRequestCommand;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.Version;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.globalstate.GlobalStateManager;
import org.infinispan.globalstate.GlobalStateProvider;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.globalstate.impl.GlobalStateManagerImpl;
import org.infinispan.globalstate.impl.ScopedPersistentStateImpl;
import org.infinispan.jmx.annotations.DataType;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifier;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.impl.VoidResponseCollector;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.util.concurrent.ActionSequencer;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.util.logging.events.EventLogCategory;
import org.infinispan.util.logging.events.EventLogManager;
import org.infinispan.util.logging.events.EventLogger;
import net.jcip.annotations.GuardedBy;
/**
* The {@code LocalTopologyManager} implementation.
*
* @author Dan Berindei
* @since 5.2
*/
@MBean(objectName = "LocalTopologyManager", description = "Controls the cache membership and state transfer")
@Scope(Scopes.GLOBAL)
public class LocalTopologyManagerImpl implements LocalTopologyManager, GlobalStateProvider {
private static final Log log = LogFactory.getLog(LocalTopologyManagerImpl.class);
@Inject Transport transport;
@Inject
@ComponentName(NON_BLOCKING_EXECUTOR)
ExecutorService nonBlockingExecutor;
@Inject
BlockingManager blockingManager;
@Inject
@ComponentName(TIMEOUT_SCHEDULE_EXECUTOR)
ScheduledExecutorService timeoutExecutor;
@Inject GlobalComponentRegistry gcr;
@Inject TimeService timeService;
@Inject GlobalStateManager globalStateManager;
@Inject PersistentUUIDManager persistentUUIDManager;
@Inject EventLogManager eventLogManager;
@Inject CacheManagerNotifier cacheManagerNotifier;
// Not used directly, but we have to start the ClusterTopologyManager before sending the join request
@Inject ClusterTopologyManager clusterTopologyManager;
private TopologyManagementHelper helper;
private ActionSequencer actionSequencer;
private EventLogger eventLogger;
// We synchronize on the entire map while handling a status request, to make sure there are no concurrent topology
// updates from the old coordinator.
private final Map<String, LocalCacheStatus> runningCaches =
Collections.synchronizedMap(new HashMap<>());
private volatile boolean running;
@GuardedBy("runningCaches")
private int latestStatusResponseViewId;
private PersistentUUID persistentUUID;
private EventLoggerViewListener viewListener;
// This must be invoked before GlobalStateManagerImpl.start
@Start(priority = 0)
public void preStart() {
helper = new TopologyManagementHelper(gcr);
actionSequencer = new ActionSequencer(nonBlockingExecutor, true, timeService);
if (globalStateManager != null) {
globalStateManager.registerStateProvider(this);
}
}
// Arbitrary value, only need to start after the (optional) GlobalStateManager and JGroupsTransport
@Start(priority = 100)
public void start() {
if (log.isTraceEnabled()) {
log.tracef("Starting LocalTopologyManager on %s", transport.getAddress());
}
if (persistentUUID == null) {
persistentUUID = PersistentUUID.randomUUID();
globalStateManager.writeGlobalState();
}
persistentUUIDManager.addPersistentAddressMapping(transport.getAddress(), persistentUUID);
eventLogger = eventLogManager.getEventLogger()
.scope(transport.getAddress())
.context(this.getClass().getName());
viewListener = new EventLoggerViewListener(eventLogManager);
cacheManagerNotifier.addListener(viewListener);
synchronized (runningCaches) {
latestStatusResponseViewId = transport.getViewId();
}
running = true;
}
// Need to stop after ClusterTopologyManagerImpl and before the JGroupsTransport
@Stop(priority = 110)
public void stop() {
if (log.isTraceEnabled()) {
log.tracef("Stopping LocalTopologyManager on %s", transport.getAddress());
}
cacheManagerNotifier.removeListener(viewListener);
running = false;
}
@Override
public CompletionStage<CacheTopology> join(String cacheName, CacheJoinInfo joinInfo, CacheTopologyHandler stm,
PartitionHandlingManager phm) {
// Use the action sequencer for the initial join request
// This ensures that all topology updates from the coordinator will be delayed
// until the join and the GET_CACHE_LISTENERS request are done
return orderOnCache(cacheName, () -> {
log.debugf("Node %s joining cache %s", transport.getAddress(), cacheName);
LocalCacheStatus cacheStatus = new LocalCacheStatus(joinInfo, stm, phm, getNumberMembersFromState(cacheName, joinInfo));
LocalCacheStatus previousStatus = runningCaches.put(cacheName, cacheStatus);
if (previousStatus != null) {
throw new IllegalStateException("A cache can only join once");
}
long timeout = joinInfo.getTimeout();
long endTime = timeService.expectedEndTime(timeout, MILLISECONDS);
return sendJoinRequest(cacheName, joinInfo, timeout, endTime)
.thenCompose(joinResponse -> handleJoinResponse(cacheName, cacheStatus, joinResponse));
});
}
private CompletionStage<CacheTopology> join(String cacheName, LocalCacheStatus cacheStatus) {
return orderOnCache(cacheName, () -> {
if (runningCaches.get(cacheName) != cacheStatus) {
throw new IllegalStateException("Cache status changed while joining");
}
long timeout = cacheStatus.getJoinInfo().getTimeout();
long endTime = timeService.expectedEndTime(timeout, MILLISECONDS);
return sendJoinRequest(cacheName, cacheStatus.getJoinInfo(), timeout, endTime)
.thenCompose(joinResponse -> handleJoinResponse(cacheName, cacheStatus, joinResponse));
});
}
private CompletionStage<CacheStatusResponse> sendJoinRequest(String cacheName, CacheJoinInfo joinInfo, long timeout,
long endTime) {
int viewId = transport.getViewId();
ReplicableCommand command = new CacheJoinCommand(cacheName, transport.getAddress(), joinInfo, viewId);
return handleAndCompose(helper.executeOnCoordinator(transport, command, timeout), (response, throwable) -> {
int currentViewId = transport.getViewId();
if (viewId != currentViewId) {
log.tracef("Received new view %d before join response for cache %s, retrying", currentViewId, cacheName);
return sendJoinRequest(cacheName, joinInfo, timeout, endTime);
}
if (throwable == null) {
if (response != null) {
return CompletableFuture.completedFuture(((CacheStatusResponse) response));
} else {
log.debugf("Coordinator sent a null join response, retrying in view %d", viewId + 1);
return retryJoinInView(cacheName, joinInfo, timeout, endTime, viewId + 1);
}
}
Throwable t = CompletableFutures.extractException(throwable);
if (t instanceof SuspectException) {
// Either the coordinator is shutting down
// Or the JGroups stack includes FORK and the coordinator hasn't connected its ForkChannel yet.
log.debugf("Join request received CacheNotFoundResponse for cache %s, retrying", cacheName);
} else {
log.debugf(t, "Join request failed for cache %s", cacheName);
if (t instanceof TimeoutException) {
throw (TimeoutException) t;
}
throw (CacheJoinException) t.getCause();
}
// Can't use a value based on the state transfer timeout because cache org.infinispan.CONFIG
// uses the default timeout, which is too long for tests (4 minutes)
long delay = 100;
return CompletionStages.scheduleNonBlocking(
() -> sendJoinRequest(cacheName, joinInfo, timeout, endTime),
timeoutExecutor, delay, MILLISECONDS);
});
}
private CompletionStage<CacheStatusResponse> retryJoinInView(String cacheName, CacheJoinInfo joinInfo,
long timeout, long endTime, int viewId) {
return withView(viewId, timeout, MILLISECONDS)
.thenCompose(v -> sendJoinRequest(cacheName, joinInfo, timeout, endTime));
}
public CompletionStage<CacheTopology> handleJoinResponse(String cacheName, LocalCacheStatus cacheStatus,
CacheStatusResponse initialStatus) {
int viewId = transport.getViewId();
return doHandleTopologyUpdate(cacheName, initialStatus.getCacheTopology(), initialStatus.getAvailabilityMode(),
viewId, transport.getCoordinator(), cacheStatus)
.thenCompose(applied -> {
if (!applied) {
throw new IllegalStateException(
"We already had a newer topology by the time we received the join response");
}
LocalCacheStatus lcs = runningCaches.get(cacheName);
if (initialStatus.getStableTopology() == null && !transport.isCoordinator()) {
CONTAINER.recoverFromStateMissingMembers(cacheName, initialStatus.joinedMembers(), lcs.getStableMembersSize());
}
cacheStatus.setCurrentMembers(initialStatus.joinedMembers());
return doHandleStableTopologyUpdate(cacheName, initialStatus.getStableTopology(), viewId,
transport.getCoordinator(), cacheStatus);
})
.thenApply(ignored -> initialStatus.getCacheTopology());
}
private int getNumberMembersFromState(String cacheName, CacheJoinInfo joinInfo) {
Optional<ScopedPersistentState> optional = globalStateManager.readScopedState(cacheName);
return optional.map(state -> {
ConsistentHash ch = joinInfo.getConsistentHashFactory().fromPersistentState(state);
return ch.getMembers().size();
}).orElse(-1);
}
@Override
public void leave(String cacheName, long timeout) {
log.debugf("Node %s leaving cache %s", transport.getAddress(), cacheName);
runningCaches.remove(cacheName);
ReplicableCommand command = new CacheLeaveCommand(cacheName, transport.getAddress(), transport.getViewId());
try {
CompletionStages.join(helper.executeOnCoordinator(transport, command, timeout));
} catch (Exception e) {
log.debugf(e, "Error sending the leave request for cache %s to coordinator", cacheName);
}
}
@Override
public void confirmRebalancePhase(String cacheName, int topologyId, int rebalanceId, Throwable throwable) {
try {
// Note that if the coordinator changes again after we sent the command, we will get another
// query for the status of our running caches. So we don't need to retry if the command failed.
helper.executeOnCoordinatorAsync(transport,
new RebalancePhaseConfirmCommand(cacheName, transport.getAddress(), throwable, topologyId, transport.getViewId()));
} catch (Exception e) {
log.debugf(e, "Error sending the rebalance completed notification for cache %s to the coordinator",
cacheName);
}
}
// called by the coordinator
@Override
public CompletionStage<ManagerStatusResponse> handleStatusRequest(int viewId) {
// As long as we have an older view, we can still process topologies from the old coordinator
return withView(viewId, getGlobalTimeout(), MILLISECONDS).thenApply(ignored -> {
Map<String, CacheStatusResponse> caches = new HashMap<>();
synchronized (runningCaches) {
latestStatusResponseViewId = viewId;
for (Map.Entry<String, LocalCacheStatus> e : runningCaches.entrySet()) {
String cacheName = e.getKey();
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
// Ignore caches that haven't finished joining yet.
// They will either wait for recovery to finish (if started in the current view)
// or retry (if started in a previous view).
if (cacheStatus.getCurrentTopology() == null) {
// If the cache has a persistent state, it tries to join again.
// The coordinator has cleared the previous information about running caches,
// so we need to send a join again for the caches waiting recovery in order to
// reconstruct them from the persistent state.
// This join only completes *after* the coordinator receives the state from all nodes.
if (cacheStatus.needRecovery()) {
final String name = cacheName;
join(name, cacheStatus)
.whenComplete((ignore, t) -> {
if (t != null) leave(name, getGlobalTimeout());
});
}
continue;
}
caches.put(e.getKey(), new CacheStatusResponse(cacheStatus.getJoinInfo(),
cacheStatus.getCurrentTopology(),
cacheStatus.getStableTopology(),
cacheStatus.getPartitionHandlingManager()
.getAvailabilityMode(),
cacheStatus.knownMembers()));
}
}
log.debugf("Sending cluster status response for view %d", viewId);
return new ManagerStatusResponse(caches, gcr.getClusterTopologyManager().isRebalancingEnabled());
});
}
@Override
public CompletionStage<Void> handleTopologyUpdate(final String cacheName, final CacheTopology cacheTopology,
final AvailabilityMode availabilityMode, final int viewId,
final Address sender) {
if (!running) {
log.tracef("Ignoring consistent hash update %s for cache %s, the local cache manager is not running",
cacheTopology.getTopologyId(), cacheName);
return CompletableFutures.completedNull();
}
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
if (cacheStatus == null) {
log.tracef("Ignoring consistent hash update %s for cache %s that doesn't exist locally",
cacheTopology.getTopologyId(), cacheName);
return CompletableFutures.completedNull();
}
return withView(viewId, cacheStatus.getJoinInfo().getTimeout(), MILLISECONDS)
.thenCompose(ignored -> orderOnCache(cacheName, () -> doHandleTopologyUpdate(cacheName, cacheTopology, availabilityMode, viewId, sender,
cacheStatus)))
.handle((ignored, throwable) -> {
if (throwable != null && !(throwable instanceof IllegalLifecycleStateException)) {
log.topologyUpdateError(cacheName, throwable);
}
return null;
});
}
/**
* Update the cache topology in the LocalCacheStatus and pass it to the CacheTopologyHandler.
*
* @return {@code true} if the topology was applied, {@code false} if it was ignored.
*/
private CompletionStage<Boolean> doHandleTopologyUpdate(String cacheName, CacheTopology cacheTopology,
AvailabilityMode availabilityMode, int viewId,
Address sender, LocalCacheStatus cacheStatus) {
CacheTopology existingTopology;
synchronized (cacheStatus) {
if (cacheTopology == null) {
// No topology yet: happens when a cache is being restarted from state.
// Still, return true because we don't want to re-send the join request.
return CompletableFutures.completedTrue();
}
// Register all persistent UUIDs locally
registerPersistentUUID(cacheTopology);
existingTopology = cacheStatus.getCurrentTopology();
if (existingTopology != null && cacheTopology.getTopologyId() <= existingTopology.getTopologyId()) {
log.debugf("Ignoring late consistent hash update for cache %s, current topology is %s: %s",
cacheName, existingTopology.getTopologyId(), cacheTopology);
return CompletableFutures.completedFalse();
}
if (!updateCacheTopology(cacheName, cacheTopology, viewId, sender, cacheStatus))
return CompletableFutures.completedFalse();
}
CacheTopologyHandler handler = cacheStatus.getHandler();
ConsistentHash currentCH = cacheTopology.getCurrentCH();
ConsistentHash pendingCH = cacheTopology.getPendingCH();
ConsistentHash unionCH;
if (pendingCH != null) {
ConsistentHashFactory chf = cacheStatus.getJoinInfo().getConsistentHashFactory();
switch (cacheTopology.getPhase()) {
case READ_NEW_WRITE_ALL:
// When removing members from topology, we have to make sure that the unionCH has
// owners from pendingCH (which is used as the readCH in this phase) before
// owners from currentCH, as primary owners must match in readCH and writeCH.
unionCH = chf.union(pendingCH, currentCH);
break;
default:
unionCH = chf.union(currentCH, pendingCH);
}
} else {
unionCH = null;
}
List<PersistentUUID> persistentUUIDs = persistentUUIDManager.mapAddresses(cacheTopology.getActualMembers());
CacheTopology unionTopology = new CacheTopology(cacheTopology.getTopologyId(), cacheTopology.getRebalanceId(),
cacheTopology.wasTopologyRestoredFromState(),
currentCH, pendingCH, unionCH, cacheTopology.getPhase(),
cacheTopology.getActualMembers(), persistentUUIDs);
boolean updateAvailabilityModeFirst = availabilityMode != AvailabilityMode.AVAILABLE;
CompletionStage<Void> stage =
resetLocalTopologyBeforeRebalance(cacheName, cacheTopology, existingTopology, handler);
stage = stage.thenCompose(ignored -> {
unionTopology.logRoutingTableInformation(cacheName);
if (updateAvailabilityModeFirst && availabilityMode != null) {
return cacheStatus.getPartitionHandlingManager().setAvailabilityMode(availabilityMode);
}
return CompletableFutures.completedNull();
});
stage = stage.thenCompose(ignored -> {
boolean startConflictResolution =
cacheTopology.getPhase() == CacheTopology.Phase.CONFLICT_RESOLUTION;
if (!startConflictResolution && unionCH != null &&
(existingTopology == null ||
existingTopology.getRebalanceId() != cacheTopology.getRebalanceId())) {
// This CH_UPDATE command was sent after a REBALANCE_START command, but arrived first.
// We will start the rebalance now and ignore the REBALANCE_START command when it arrives.
log.tracef("This topology update has a pending CH, starting the rebalance now");
return handler.rebalance(unionTopology);
} else {
return handler.updateConsistentHash(unionTopology);
}
});
if (!updateAvailabilityModeFirst) {
stage = stage.thenCompose(ignored -> cacheStatus.getPartitionHandlingManager().setAvailabilityMode(availabilityMode));
}
return stage.thenApply(ignored -> true);
}
private void registerPersistentUUID(CacheTopology cacheTopology) {
int count = cacheTopology.getActualMembers().size();
for (int i = 0; i < count; i++) {
persistentUUIDManager.addPersistentAddressMapping(
cacheTopology.getActualMembers().get(i),
cacheTopology.getMembersPersistentUUIDs().get(i)
);
}
}
private boolean updateCacheTopology(String cacheName, CacheTopology cacheTopology, int viewId,
Address sender, LocalCacheStatus cacheStatus) {
synchronized (runningCaches) {
if (!validateCommandViewId(cacheTopology, viewId, sender, cacheName))
return false;
log.debugf("Updating local topology for cache %s: %s", cacheName, cacheTopology);
cacheStatus.setCurrentTopology(cacheTopology);
return true;
}
}
/**
* Synchronization is required to prevent topology updates while preparing the status response.
*/
@GuardedBy("runningCaches")
private boolean validateCommandViewId(CacheTopology cacheTopology, int viewId, Address sender,
String cacheName) {
if (!sender.equals(transport.getCoordinator())) {
log.debugf("Ignoring topology %d for cache %s from old coordinator %s",
cacheTopology.getTopologyId(), cacheName, sender);
return false;
}
if (viewId < latestStatusResponseViewId) {
log.debugf(
"Ignoring topology %d for cache %s from view %d received after status request from view %d",
cacheTopology.getTopologyId(), cacheName, viewId, latestStatusResponseViewId);
return false;
}
return true;
}
private CompletionStage<Void> resetLocalTopologyBeforeRebalance(String cacheName, CacheTopology newCacheTopology,
CacheTopology oldCacheTopology, CacheTopologyHandler handler) {
// Cannot rely on the pending CH, because it is also used for conflict resolution
boolean newRebalance = newCacheTopology.getPhase() != CacheTopology.Phase.NO_REBALANCE &&
newCacheTopology.getPhase() != CacheTopology.Phase.CONFLICT_RESOLUTION;
if (newRebalance) {
// The initial topology doesn't need a reset because we are guaranteed not to be a member
if (oldCacheTopology == null)
return CompletableFutures.completedNull();
// We only need a reset if we missed a topology update
if (newCacheTopology.getTopologyId() <= oldCacheTopology.getTopologyId() + 1)
return CompletableFutures.completedNull();
// We have missed a topology update, and that topology might have removed some of our segments.
// If this rebalance adds those same segments, we need to remove the old data/inbound transfers first.
// This can happen when the coordinator changes, either because the old one left or because there was a merge,
// and the rebalance after merge arrives before the merged topology update.
if (newCacheTopology.getRebalanceId() != oldCacheTopology.getRebalanceId()) {
// The currentCH changed, we need to install a "reset" topology with the new currentCH first
registerPersistentUUID(newCacheTopology);
CacheTopology resetTopology = new CacheTopology(newCacheTopology.getTopologyId() - 1,
newCacheTopology.getRebalanceId() - 1,
newCacheTopology.wasTopologyRestoredFromState(),
newCacheTopology.getCurrentCH(), null,
CacheTopology.Phase.NO_REBALANCE,
newCacheTopology.getActualMembers(), persistentUUIDManager
.mapAddresses(
newCacheTopology
.getActualMembers()));
log.debugf("Installing fake cache topology %s for cache %s", resetTopology, cacheName);
return handler.updateConsistentHash(resetTopology);
}
}
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> handleStableTopologyUpdate(final String cacheName,
final CacheTopology newStableTopology,
final Address sender, final int viewId) {
final LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
if (cacheStatus != null) {
return orderOnCache(cacheName, () -> doHandleStableTopologyUpdate(cacheName, newStableTopology, viewId, sender, cacheStatus));
}
return completedNull();
}
private CompletionStage<Void> doHandleStableTopologyUpdate(String cacheName, CacheTopology newStableTopology,
int viewId,
Address sender, LocalCacheStatus cacheStatus) {
synchronized (runningCaches) {
if (!validateCommandViewId(newStableTopology, viewId, sender, cacheName))
return completedNull();
CacheTopology stableTopology = cacheStatus.getStableTopology();
if (stableTopology == null || stableTopology.getTopologyId() < newStableTopology.getTopologyId()) {
log.tracef("Updating stable topology for cache %s: %s", cacheName, newStableTopology);
cacheStatus.setStableTopology(newStableTopology);
if (newStableTopology != null && cacheStatus.getJoinInfo().getPersistentUUID() != null) {
// Don't use the current CH state for the next restart
deleteCHState(cacheName);
}
}
}
return completedNull();
}
@Override
public CompletionStage<Void> handleRebalance(final String cacheName, final CacheTopology cacheTopology,
final int viewId, final Address sender) {
if (!running) {
log.debugf("Ignoring rebalance request %s for cache %s, the local cache manager is not running",
cacheTopology.getTopologyId(), cacheName);
return CompletableFutures.completedNull();
}
final LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
if (cacheStatus == null) {
log.tracef("Ignoring rebalance %s for cache %s that doesn't exist locally",
cacheTopology.getTopologyId(), cacheName);
return CompletableFutures.completedNull();
}
eventLogger.context(cacheName)
.info(EventLogCategory.LIFECYCLE, MESSAGES.cacheRebalanceStart(cacheTopology.getMembers(), cacheTopology.getPhase(), cacheTopology.getTopologyId()));
return withView(viewId, cacheStatus.getJoinInfo().getTimeout(), MILLISECONDS)
.thenCompose(ignored -> orderOnCache(cacheName, () -> {
return doHandleRebalance(viewId, cacheStatus, cacheTopology, cacheName, sender);
}))
.handle((ignore, throwable) -> {
Collection<Address> members = cacheTopology.getMembers();
int topologyId = cacheTopology.getTopologyId();
if (throwable != null) {
Throwable t = CompletableFutures.extractException(throwable);
// Ignore errors when the cache is shutting down
if (!(t instanceof IllegalLifecycleStateException)) {
log.rebalanceStartError(cacheName, throwable);
eventLogger.context(cacheName)
.error(EventLogCategory.LIFECYCLE, MESSAGES.rebalanceFinishedWithFailure(members, topologyId, t));
}
} else {
eventLogger.context(cacheName)
.info(EventLogCategory.LIFECYCLE, MESSAGES.rebalanceFinished(members, topologyId));
}
return null;
});
}
private CompletionStage<Void> doHandleRebalance(int viewId, LocalCacheStatus cacheStatus,
CacheTopology cacheTopology,
String cacheName, Address sender) {
CacheTopology existingTopology;
synchronized (cacheStatus) {
existingTopology = cacheStatus.getCurrentTopology();
if (existingTopology != null && cacheTopology.getTopologyId() <= existingTopology.getTopologyId()) {
// Start rebalance commands are sent asynchronously to the entire cluster
// So it's possible to receive an old one on a joiner after the joiner has already become a member.
log.debugf("Ignoring old rebalance for cache %s, current topology is %s: %s", cacheName,
existingTopology.getTopologyId(), cacheTopology);
return CompletableFutures.completedNull();
}
if (!updateCacheTopology(cacheName, cacheTopology, viewId, sender, cacheStatus))
return CompletableFutures.completedNull();
}
CacheTopologyHandler handler = cacheStatus.getHandler();
ConsistentHash unionCH = cacheStatus.getJoinInfo().getConsistentHashFactory().union(
cacheTopology.getCurrentCH(), cacheTopology.getPendingCH());
CacheTopology newTopology = new CacheTopology(cacheTopology.getTopologyId(), cacheTopology.getRebalanceId(), cacheTopology.wasTopologyRestoredFromState(),
cacheTopology.getCurrentCH(), cacheTopology.getPendingCH(), unionCH,
cacheTopology.getPhase(),
cacheTopology.getActualMembers(),
cacheTopology.getMembersPersistentUUIDs());
CompletionStage<Void> stage =
resetLocalTopologyBeforeRebalance(cacheName, cacheTopology, existingTopology, handler);
return stage.thenCompose(ignored -> {
log.debugf("Starting local rebalance for cache %s, topology = %s", cacheName, cacheTopology);
cacheTopology.logRoutingTableInformation(cacheName);
return handler.rebalance(newTopology);
});
}
@Override
public CacheTopology getCacheTopology(String cacheName) {
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
return cacheStatus != null ? cacheStatus.getCurrentTopology() : null;
}
@Override
public CacheTopology getStableCacheTopology(String cacheName) {
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
return cacheStatus != null ? cacheStatus.getStableTopology() : null;
}
private CompletionStage<Void> withView(int viewId, long timeout, TimeUnit timeUnit) {
CompletableFuture<Void> viewFuture = transport.withView(viewId);
ScheduledFuture<Boolean> cancelTask = timeoutExecutor.schedule(
() -> viewFuture.completeExceptionally(CLUSTER.timeoutWaitingForView(viewId, transport.getViewId())),
timeout, timeUnit);
viewFuture.whenComplete((v, throwable) -> cancelTask.cancel(false));
return viewFuture;
}
@ManagedAttribute(description = "Rebalancing enabled", displayName = "Rebalancing enabled",
dataType = DataType.TRAIT, writable = true)
@Override
public boolean isRebalancingEnabled() {
return isCacheRebalancingEnabled(null);
}
@Override
public void setRebalancingEnabled(boolean enabled) {
setCacheRebalancingEnabled(null, enabled);
}
@Override
public boolean isCacheRebalancingEnabled(String cacheName) {
int viewId = transport.getViewId();
ReplicableCommand command = new RebalanceStatusRequestCommand(cacheName);
RebalancingStatus status = (RebalancingStatus) CompletionStages.join(
executeOnCoordinatorRetry(command, viewId, timeService.expectedEndTime(getGlobalTimeout(), MILLISECONDS)));
return status != RebalancingStatus.SUSPENDED;
}
public CompletionStage<Object> executeOnCoordinatorRetry(ReplicableCommand command, int viewId, long endNanos) {
long remainingMillis = timeService.remainingTime(endNanos, MILLISECONDS);
return CompletionStages.handleAndCompose(
helper.executeOnCoordinator(transport, command, remainingMillis),
(o, throwable) -> {
if (throwable == null) {
return CompletableFuture.completedFuture(o);
}
Throwable t = CompletableFutures.extractException(throwable);
if (t instanceof SuspectException) {
if (log.isTraceEnabled()) log.tracef("Coordinator left the cluster while querying rebalancing status, retrying");
int newViewId = Math.max(viewId + 1, transport.getViewId());
return executeOnCoordinatorRetry(command, newViewId, endNanos);
} else {
return CompletableFuture.failedFuture(t);
}
});
}
@Override
public void setCacheRebalancingEnabled(String cacheName, boolean enabled) {
if (cacheName != null) {
LocalCacheStatus lcs = runningCaches.get(cacheName);
if (lcs == null) {
log.debugf("Not changing rebalance for unknown cache %s", cacheName);
return;
}
if (!lcs.isTopologyRestored()) {
log.debugf("Not changing rebalance for cache '%s' without stable topology", cacheName);
return;
}
}
ReplicableCommand command = new RebalancePolicyUpdateCommand(cacheName, enabled);
CompletionStages.join(helper.executeOnClusterSync(transport, command, getGlobalTimeout(),
VoidResponseCollector.ignoreLeavers()));
}
@Override
public RebalancingStatus getRebalancingStatus(String cacheName) {
ReplicableCommand command = new RebalanceStatusRequestCommand(cacheName);
return (RebalancingStatus) CompletionStages.join(
executeOnCoordinatorRetry(command, transport.getViewId(), timeService.expectedEndTime(getGlobalTimeout(), MILLISECONDS)));
}
@ManagedAttribute(description = "Cluster availability", displayName = "Cluster availability",
dataType = DataType.TRAIT, writable = false)
public String getClusterAvailability() {
AvailabilityMode clusterAvailability = AvailabilityMode.AVAILABLE;
synchronized (runningCaches) {
for (LocalCacheStatus cacheStatus : runningCaches.values()) {
AvailabilityMode availabilityMode = cacheStatus.getPartitionHandlingManager().getAvailabilityMode();
clusterAvailability = clusterAvailability.min(availabilityMode);
}
}
return clusterAvailability.toString();
}
@Override
public AvailabilityMode getCacheAvailability(String cacheName) {
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
return cacheStatus.getPartitionHandlingManager().getAvailabilityMode();
}
@Override
public void setCacheAvailability(String cacheName, AvailabilityMode availabilityMode) {
ReplicableCommand command = new CacheAvailabilityUpdateCommand(cacheName, availabilityMode);
CompletionStages.join(helper.executeOnCoordinator(transport, command, getGlobalTimeout()));
}
@Override
public void cacheShutdown(String name) {
ReplicableCommand command = new CacheShutdownRequestCommand(name);
CompletionStages.join(helper.executeOnCoordinator(transport, command, getGlobalTimeout()));
}
@Override
public CompletionStage<Void> handleCacheShutdown(String cacheName) {
// The cache has shutdown, write the CH state
writeCHState(cacheName);
return completedNull();
}
@Override
public CompletionStage<Void> stableTopologyCompletion(String cacheName) {
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
if (cacheStatus == null) return null;
return cacheStatus.getStableTopologyCompletion().thenCompose(recovered -> {
// If the topology didn't need recovery or it was manually put to run.
if (!recovered) {
ComponentRegistry cr = gcr.getNamedComponentRegistry(cacheName);
PersistenceManager pm;
if (cr != null && (pm = cr.getComponent(PersistenceManager.class)) != null) {
Predicate<StoreConfiguration> predicate = PersistenceManager.AccessMode.PRIVATE;
// If the cache did not recover completely from state, we force a cleanup.
// Otherwise, it only cleans if it was configured.
if (!cacheStatus.needRecovery()) {
predicate = predicate.and(StoreConfiguration::purgeOnStartup);
}
return pm.clearAllStores(predicate);
}
}
return CompletableFutures.completedNull();
});
}
@Override
public void assertTopologyStable(String cacheName) {
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
if (cacheStatus != null && !cacheStatus.isTopologyRestored()) {
List<Address> members;
if ((members = clusterTopologyManager.currentJoiners(cacheName)) == null) {
members = cacheStatus.knownMembers();
}
throw log.recoverFromStateMissingMembers(cacheName, members, String.valueOf(cacheStatus.getStableMembersSize()));
}
}
private void writeCHState(String cacheName) {
ScopedPersistentState cacheState = new ScopedPersistentStateImpl(cacheName);
cacheState.setProperty(GlobalStateManagerImpl.VERSION, Version.getVersion());
cacheState.setProperty(GlobalStateManagerImpl.TIMESTAMP, timeService.instant().toString());
cacheState.setProperty(GlobalStateManagerImpl.VERSION_MAJOR, Version.getMajor());
LocalCacheStatus cacheStatus = runningCaches.get(cacheName);
ConsistentHash remappedCH = cacheStatus.getCurrentTopology().getCurrentCH()
.remapAddresses(persistentUUIDManager.addressToPersistentUUID());
remappedCH.toScopedState(cacheState);
globalStateManager.writeScopedState(cacheState);
if (log.isTraceEnabled()) log.tracef("Written CH state for cache %s, checksum=%s: %s", cacheName, cacheState.getChecksum(), remappedCH);
}
private void deleteCHState(String cacheName) {
globalStateManager.deleteScopedState(cacheName);
if (log.isTraceEnabled()) log.tracef("Removed CH state for cache %s", cacheName);
}
private int getGlobalTimeout() {
// TODO Rename setting to something like globalRpcTimeout
return (int) gcr.getGlobalConfiguration().transport().distributedSyncTimeout();
}
@Override
public void prepareForPersist(ScopedPersistentState state) {
if (persistentUUID != null) {
state.setProperty("uuid", persistentUUID.toString());
}
}
@Override
public void prepareForRestore(ScopedPersistentState state) {
if (!state.containsProperty("uuid")) {
throw CONFIG.invalidPersistentState(ScopedPersistentState.GLOBAL_SCOPE);
}
persistentUUID = PersistentUUID.fromString(state.getProperty("uuid"));
}
@Override
public PersistentUUID getPersistentUUID() {
return persistentUUID;
}
private <T> CompletionStage<T> orderOnCache(String cacheName, Callable<CompletionStage<T>> action) {
return actionSequencer.orderOnKey(cacheName, () -> {
log.tracef("Acquired cache status %s", cacheName);
return action.call().whenComplete((v, t) -> log.tracef("Released cache status %s", cacheName));
});
}
}
class LocalCacheStatus {
private final CacheJoinInfo joinInfo;
private final CacheTopologyHandler handler;
private final PartitionHandlingManager partitionHandlingManager;
private final CompletableFuture<Boolean> stable;
private final int stableMembersSize;
private volatile List<Address> knownMembers;
private volatile CacheTopology currentTopology;
private volatile CacheTopology stableTopology;
LocalCacheStatus(CacheJoinInfo joinInfo,
CacheTopologyHandler handler,
PartitionHandlingManager phm,
int stableMembersSize) {
this.joinInfo = joinInfo;
this.handler = handler;
this.partitionHandlingManager = phm;
this.stable = stableMembersSize > 0 ? new CompletableFuture<>() : CompletableFutures.completedFalse();
this.knownMembers = Collections.emptyList();
this.stableMembersSize = stableMembersSize;
}
public CacheJoinInfo getJoinInfo() {
return joinInfo;
}
public CacheTopologyHandler getHandler() {
return handler;
}
public PartitionHandlingManager getPartitionHandlingManager() {
return partitionHandlingManager;
}
CacheTopology getCurrentTopology() {
return currentTopology;
}
void setCurrentTopology(CacheTopology currentTopology) {
this.currentTopology = currentTopology;
}
CacheTopology getStableTopology() {
return stableTopology;
}
void setStableTopology(CacheTopology stableTopology) {
this.stableTopology = stableTopology;
partitionHandlingManager.onTopologyUpdate(currentTopology);
if (stableTopology != null) {
stable.complete(stableTopology.wasTopologyRestoredFromState());
}
}
List<Address> knownMembers() {
return Collections.unmodifiableList(knownMembers);
}
void setCurrentMembers(List<Address> members) {
knownMembers = members;
}
CompletionStage<Boolean> getStableTopologyCompletion() {
return stable;
}
boolean isTopologyRestored() {
return (stable.isDone() && stableTopology != null) || stableMembersSize < 0;
}
boolean needRecovery() {
return stableMembersSize > 0;
}
int getStableMembersSize() {
return stableMembersSize;
}
}
| 45,976
| 47.244491
| 161
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/ClusterTopologyManagerImpl.java
|
package org.infinispan.topology;
import static java.util.concurrent.CompletableFuture.runAsync;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import static org.infinispan.factories.KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR;
import static org.infinispan.util.concurrent.CompletionStages.join;
import static org.infinispan.util.logging.Log.CLUSTER;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.topology.CacheShutdownCommand;
import org.infinispan.commands.topology.CacheStatusRequestCommand;
import org.infinispan.commands.topology.RebalanceStartCommand;
import org.infinispan.commands.topology.RebalanceStatusRequestCommand;
import org.infinispan.commands.topology.TopologyUpdateCommand;
import org.infinispan.commands.topology.TopologyUpdateStableCommand;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.InfinispanCollections;
import org.infinispan.commons.util.ProcessorInfo;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.ConfigurationManager;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.executors.LimitedExecutor;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.globalstate.GlobalStateManager;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifier;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.partitionhandling.impl.AvailabilityStrategy;
import org.infinispan.partitionhandling.impl.LostDataCheck;
import org.infinispan.partitionhandling.impl.PreferAvailabilityStrategy;
import org.infinispan.partitionhandling.impl.PreferConsistencyStrategy;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollectors;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.ValidResponseCollector;
import org.infinispan.remoting.transport.impl.VoidResponseCollector;
import org.infinispan.statetransfer.RebalanceType;
import org.infinispan.util.concurrent.ActionSequencer;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.ConditionFuture;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.util.logging.events.EventLogManager;
import net.jcip.annotations.GuardedBy;
/**
* The {@code ClusterTopologyManager} implementation.
*
* @author Dan Berindei
* @author Pedro Ruivo
* @since 5.2
*/
@Scope(Scopes.GLOBAL)
public class ClusterTopologyManagerImpl implements ClusterTopologyManager {
public static final int INITIAL_CONNECTION_ATTEMPTS = 10;
public static final int CLUSTER_RECOVERY_ATTEMPTS = 10;
private static final Log log = LogFactory.getLog(ClusterTopologyManagerImpl.class);
private static final CompletableFuture<CacheStatusResponseCollector> SKIP_RECOVERY_FUTURE =
CompletableFuture.failedFuture(new IllegalStateException());
@Inject Transport transport;
@Inject GlobalConfiguration globalConfiguration;
@Inject ConfigurationManager configurationManager;
@Inject GlobalComponentRegistry gcr;
@Inject CacheManagerNotifier cacheManagerNotifier;
@Inject EmbeddedCacheManager cacheManager;
@Inject
@ComponentName(NON_BLOCKING_EXECUTOR)
ExecutorService nonBlockingExecutor;
@Inject
@ComponentName(TIMEOUT_SCHEDULE_EXECUTOR)
ScheduledExecutorService timeoutScheduledExecutor;
@Inject EventLogManager eventLogManager;
@Inject PersistentUUIDManager persistentUUIDManager;
@Inject TimeService timeService;
private TopologyManagementHelper helper;
private ConditionFuture<ClusterTopologyManagerImpl> joinViewFuture;
private ActionSequencer actionSequencer;
private final Lock updateLock = new ReentrantLock();
@GuardedBy("updateLock")
private int viewId = -1;
@GuardedBy("updateLock")
private ClusterManagerStatus clusterManagerStatus = ClusterManagerStatus.INITIALIZING;
@GuardedBy("updateLock")
private final ConcurrentMap<String, ClusterCacheStatus> cacheStatusMap = new ConcurrentHashMap<>();
private AtomicInteger recoveryAttemptCount = new AtomicInteger();
// The global rebalancing status
private boolean globalRebalancingEnabled = true;
private EventLoggerViewListener viewListener;
@Start(priority = 100)
public void start() {
helper = new TopologyManagementHelper(gcr);
joinViewFuture = new ConditionFuture<>(timeoutScheduledExecutor);
actionSequencer = new ActionSequencer(nonBlockingExecutor, true, timeService);
viewListener = new EventLoggerViewListener(eventLogManager, e -> handleClusterView(e.isMergeView(), e.getViewId()));
cacheManagerNotifier.addListener(viewListener);
// The listener already missed the initial view
handleClusterView(false, transport.getViewId());
globalRebalancingEnabled = join(fetchRebalancingStatusFromCoordinator(INITIAL_CONNECTION_ATTEMPTS));
}
private CompletionStage<Boolean> fetchRebalancingStatusFromCoordinator(int attempts) {
if (transport.isCoordinator()) {
return CompletableFutures.completedTrue();
}
ReplicableCommand command = new RebalanceStatusRequestCommand();
Address coordinator = transport.getCoordinator();
return helper.executeOnCoordinator(transport, command, getGlobalTimeout() / INITIAL_CONNECTION_ATTEMPTS)
.handle((rebalancingStatus, throwable) -> {
if (throwable == null)
return CompletableFuture.completedFuture(rebalancingStatus != RebalancingStatus.SUSPENDED);
if (attempts == 1 || !(throwable instanceof TimeoutException)) {
log.errorReadingRebalancingStatus(coordinator, throwable);
return CompletableFutures.completedTrue();
}
// Assume any timeout is because the coordinator doesn't have a CommandAwareRpcDispatcher yet
// (possible with ForkChannels or JGroupsChannelLookup and shouldConnect = false), and retry.
log.debug("Timed out waiting for rebalancing status from coordinator, trying again");
return fetchRebalancingStatusFromCoordinator(attempts - 1);
}).thenCompose(Function.identity());
}
@Stop(priority = 100)
public void stop() {
// Stop blocking cache topology commands.
acquireUpdateLock();
try {
clusterManagerStatus = ClusterManagerStatus.STOPPING;
joinViewFuture.stop();
} finally {
releaseUpdateLock();
}
cacheManagerNotifier.removeListener(viewListener);
}
// This method is here to augment with blockhound as we allow it to block, but don't want the calls
// inside the lock to block - Do not move or rename without updating the reference
private void acquireUpdateLock() {
updateLock.lock();
}
private void releaseUpdateLock() {
updateLock.unlock();
}
@Override
public ClusterManagerStatus getStatus() {
return clusterManagerStatus;
}
@Override
public List<Address> currentJoiners(String cacheName) {
if (!getStatus().isCoordinator()) return null;
ClusterCacheStatus status = cacheStatusMap.get(cacheName);
return status != null ? status.getExpectedMembers() : null;
}
@Override
public CompletionStage<CacheStatusResponse> handleJoin(String cacheName, Address joiner, CacheJoinInfo joinInfo,
int joinerViewId) {
CompletionStage<Void> viewStage;
if (canHandleJoin(joinerViewId)) {
viewStage = CompletableFutures.completedNull();
} else {
if (log.isTraceEnabled()) {
log.tracef("Delaying join request from %s until view %s is installed (and cluster status is recovered)",
joiner, joinerViewId);
}
viewStage = joinViewFuture.newConditionStage(ctmi -> ctmi.canHandleJoin(joinerViewId),
() -> CLUSTER.coordinatorTimeoutWaitingForView(
joinerViewId, viewId, clusterManagerStatus),
joinInfo.getTimeout(), MILLISECONDS);
}
// After we have the right view, obtain the ClusterCacheStatus
return viewStage.thenCompose(v -> {
ClusterCacheStatus cacheStatus = prepareJoin(cacheName, joiner, joinInfo, joinerViewId);
if (cacheStatus == null) {
// We have a newer view
// Return null so that the joiner is forced to retry
return CompletableFutures.completedNull();
}
return cacheStatus.nodeCanJoinFuture(joinInfo)
.thenApply(ignored -> cacheStatus.doJoin(joiner, joinInfo));
});
}
private ClusterCacheStatus prepareJoin(String cacheName, Address joiner, CacheJoinInfo joinInfo,
int joinerViewId) {
acquireUpdateLock();
try {
if (!clusterManagerStatus.isRunning()) {
log.debugf("Ignoring join request from %s for cache %s, the local cache manager is shutting down",
joiner, cacheName);
throw new IllegalLifecycleStateException();
}
if (joinerViewId < viewId) {
log.debugf("Ignoring join request from %s for cache %s, joiner's view id is too old: %d",
joiner, cacheName, joinerViewId);
return null;
}
return initCacheStatusIfAbsent(cacheName, joinInfo.getCacheMode());
} finally {
releaseUpdateLock();
}
}
private boolean canHandleJoin(int joinerViewId) {
acquireUpdateLock();
try {
return joinerViewId <= viewId &&
clusterManagerStatus != ClusterManagerStatus.RECOVERING_CLUSTER &&
clusterManagerStatus != ClusterManagerStatus.INITIALIZING;
} finally {
releaseUpdateLock();
}
}
@Override
public CompletionStage<Void> handleLeave(String cacheName, Address leaver, int viewId) throws Exception {
if (!clusterManagerStatus.isRunning()) {
log.debugf("Ignoring leave request from %s for cache %s, the local cache manager is shutting down",
leaver, cacheName);
return CompletableFutures.completedNull();
}
ClusterCacheStatus cacheStatus = cacheStatusMap.get(cacheName);
if (cacheStatus == null) {
// This can happen if we've just become coordinator
log.tracef("Ignoring leave request from %s for cache %s because it doesn't have a cache status entry",
leaver, cacheName);
return CompletableFutures.completedNull();
}
return cacheStatus.doLeave(leaver);
}
synchronized void removeCacheStatus(String cacheName) {
cacheStatusMap.remove(cacheName);
}
@Override
public CompletionStage<Void> handleRebalancePhaseConfirm(String cacheName, Address node, int topologyId,
Throwable throwable, int viewId) throws Exception {
if (throwable != null) {
// TODO We could try to update the pending CH such that nodes reporting errors are not considered to hold
// any state
// For now we are just logging the error and proceeding as if the rebalance was successful everywhere
log.rebalanceError(cacheName, node, topologyId, throwable);
}
ClusterCacheStatus cacheStatus = cacheStatusMap.get(cacheName);
if (cacheStatus == null) {
log.debugf("Ignoring rebalance confirmation from %s " +
"for cache %s because it doesn't have a cache status entry", node, cacheName);
return CompletableFutures.completedNull();
}
cacheStatus.confirmRebalancePhase(node, topologyId);
return CompletableFutures.completedNull();
}
private static class CacheStatusResponseCollector extends ValidResponseCollector<CacheStatusResponseCollector> {
private final Map<String, Map<Address, CacheStatusResponse>> responsesByCache = new HashMap<>();
private final List<Address> suspectedMembers = new ArrayList<>();
private final Map<CacheTopology, CacheTopology> seenTopologies = new HashMap<>();
private final Map<CacheJoinInfo, CacheJoinInfo> seenInfos = new HashMap<>();
private boolean rebalancingEnabled = true;
@Override
protected CacheStatusResponseCollector addValidResponse(Address sender, ValidResponse response) {
if (response.isSuccessful()) {
ManagerStatusResponse nodeStatus = (ManagerStatusResponse) response.getResponseValue();
rebalancingEnabled &= nodeStatus.isRebalancingEnabled();
for (Entry<String, CacheStatusResponse> entry : nodeStatus.getCaches().entrySet()) {
String cacheName = entry.getKey();
CacheStatusResponse csr = entry.getValue();
CacheTopology cacheTopology = intern(seenTopologies, csr.getCacheTopology());
CacheTopology stableTopology = intern(seenTopologies, csr.getStableTopology());
CacheJoinInfo info = intern(seenInfos, csr.getCacheJoinInfo());
Map<Address, CacheStatusResponse> cacheResponses =
responsesByCache.computeIfAbsent(cacheName, k -> new HashMap<>());
cacheResponses.put(sender, new CacheStatusResponse(info, cacheTopology, stableTopology,
csr.getAvailabilityMode(), csr.joinedMembers()));
}
}
return null;
}
private <T> T intern(Map<T, T> internMap, T value) {
T replacementValue = internMap.get(value);
if (replacementValue == null) {
internMap.put(value, value);
replacementValue = value;
}
return replacementValue;
}
@Override
protected CacheStatusResponseCollector addTargetNotFound(Address sender) {
suspectedMembers.add(sender);
return null;
}
@Override
protected CacheStatusResponseCollector addException(Address sender, Exception exception) {
throw ResponseCollectors.wrapRemoteException(sender, exception);
}
@Override
public CacheStatusResponseCollector finish() {
return this;
}
public Map<String, Map<Address, CacheStatusResponse>> getResponsesByCache() {
return responsesByCache;
}
public boolean getRebalancingEnabled() {
return rebalancingEnabled;
}
public List<Address> getSuspectedMembers() {
return suspectedMembers;
}
}
private void handleClusterView(boolean mergeView, int newViewId) {
orderOnManager(() -> {
try {
if (!updateClusterState(mergeView, newViewId))
return CompletableFutures.completedNull();
if (clusterManagerStatus == ClusterManagerStatus.RECOVERING_CLUSTER) {
return recoverClusterStatus(newViewId);
} else if (clusterManagerStatus == ClusterManagerStatus.COORDINATOR) {
// Unblock any joiners waiting for the view
joinViewFuture.updateAsync(this, nonBlockingExecutor);
// If we have recovered the cluster status, we rebalance the caches to include minor partitions
// If we processed a regular view, we prune members that left.
return updateCacheMembers(newViewId);
}
} catch (Throwable t) {
log.viewHandlingError(newViewId, t);
}
return CompletableFutures.completedNull();
});
}
private <T> CompletionStage<T> orderOnManager(Callable<CompletionStage<T>> action) {
return actionSequencer.orderOnKey(ClusterTopologyManagerImpl.class, action);
}
private CompletionStage<Void> orderOnCache(String cacheName, Runnable action) {
return actionSequencer.orderOnKey(cacheName, () -> {
action.run();
return CompletableFutures.completedNull();
});
}
private CompletionStage<Void> recoverClusterStatus(int newViewId) {
// Clean up leftover cache status information from the last time we were coordinator.
// E.g. if the local node was coordinator, started a rebalance, and then lost coordinator
// status because of a merge, the existing cache statuses may have a rebalance in progress.
cacheStatusMap.clear();
recoveryAttemptCount.set(0);
return fetchClusterStatus(newViewId).thenCompose(responseCollector -> {
Map<String, Map<Address, CacheStatusResponse>> responsesByCache =
responseCollector.getResponsesByCache();
log.debugf("Cluster recovery found %d caches, members are %s", responsesByCache.size(),
transport.getMembers());
// Compute the new consistent hashes on separate threads
int maxThreads = ProcessorInfo.availableProcessors() / 2 + 1;
AggregateCompletionStage<Void> mergeStage = CompletionStages.aggregateCompletionStage();
LimitedExecutor cs = new LimitedExecutor("Merge-" + newViewId, nonBlockingExecutor, maxThreads);
for (final Entry<String, Map<Address, CacheStatusResponse>> e : responsesByCache.entrySet()) {
CacheJoinInfo joinInfo = e.getValue().values().iterator().next().getCacheJoinInfo();
ClusterCacheStatus cacheStatus = initCacheStatusIfAbsent(e.getKey(), joinInfo.getCacheMode());
mergeStage.dependsOn(runAsync(() -> cacheStatus.doMergePartitions(e.getValue()), cs));
}
return mergeStage.freeze().thenRun(() -> {
acquireUpdateLock();
try {
if (viewId != newViewId) {
log.debugf("View updated while we were recovering the cluster for view %d", newViewId);
return;
}
clusterManagerStatus = ClusterManagerStatus.COORDINATOR;
globalRebalancingEnabled = responseCollector.getRebalancingEnabled();
} finally {
releaseUpdateLock();
}
for (ClusterCacheStatus cacheStatus : cacheStatusMap.values()) {
orderOnCache(cacheStatus.getCacheName(), () -> {
try {
cacheStatus.doHandleClusterView(newViewId);
} catch (Throwable throwable) {
if (clusterManagerStatus.isRunning()) {
log.errorUpdatingMembersList(newViewId, throwable);
}
}
});
}
// Unblock any joiners waiting for the view
joinViewFuture.updateAsync(this, nonBlockingExecutor);
});
});
}
private boolean updateClusterState(boolean mergeView, int newViewId) {
acquireUpdateLock();
try {
if (newViewId < transport.getViewId()) {
log.tracef("Ignoring old cluster view notification: %s", newViewId);
return false;
}
boolean isCoordinator = transport.isCoordinator();
boolean becameCoordinator = isCoordinator && !clusterManagerStatus.isCoordinator();
if (log.isTraceEnabled()) {
log.tracef("Received new cluster view: %d, isCoordinator = %s, old status = %s", (Object) newViewId,
isCoordinator, clusterManagerStatus);
}
if (!isCoordinator) {
clusterManagerStatus = ClusterManagerStatus.REGULAR_MEMBER;
return false;
}
if (becameCoordinator || mergeView) {
clusterManagerStatus = ClusterManagerStatus.RECOVERING_CLUSTER;
}
// notify threads that might be waiting to join
viewId = newViewId;
} finally {
releaseUpdateLock();
}
return true;
}
private ClusterCacheStatus initCacheStatusIfAbsent(String cacheName, CacheMode cacheMode) {
return cacheStatusMap.computeIfAbsent(cacheName, (name) -> {
// We assume that any cache with partition handling configured is already defined on all the nodes
// (including the coordinator) before it starts on any node.
LostDataCheck lostDataCheck = ClusterTopologyManagerImpl::distLostDataCheck;
// TODO Partition handling config should be part of the join info
AvailabilityStrategy availabilityStrategy;
Configuration config = configurationManager.getConfiguration(cacheName, true);
PartitionHandling partitionHandling =
config != null ? config.clustering().partitionHandling().whenSplit() : null;
boolean resolveConflictsOnMerge = resolveConflictsOnMerge(config, cacheMode);
if (partitionHandling != null && partitionHandling != PartitionHandling.ALLOW_READ_WRITES) {
availabilityStrategy = new PreferConsistencyStrategy(eventLogManager, persistentUUIDManager, lostDataCheck);
} else {
availabilityStrategy = new PreferAvailabilityStrategy(eventLogManager, persistentUUIDManager,
lostDataCheck);
}
Optional<GlobalStateManager> globalStateManager = gcr.getOptionalComponent(GlobalStateManager.class);
Optional<ScopedPersistentState> persistedState =
globalStateManager.flatMap(gsm -> gsm.readScopedState(cacheName));
return new ClusterCacheStatus(cacheManager, gcr, cacheName, availabilityStrategy, RebalanceType.from(cacheMode),
this, transport,
persistentUUIDManager, eventLogManager, persistedState, resolveConflictsOnMerge);
});
}
private boolean resolveConflictsOnMerge(Configuration config, CacheMode cacheMode) {
if (config == null || cacheMode.isInvalidation())
return false;
return config.clustering().partitionHandling().resolveConflictsOnMerge();
}
void broadcastRebalanceStart(String cacheName, CacheTopology cacheTopology) {
ReplicableCommand command = new RebalanceStartCommand(cacheName, transport.getAddress(), cacheTopology, viewId);
helper.executeOnClusterAsync(transport, command);
}
private CompletionStage<CacheStatusResponseCollector> fetchClusterStatus(int newViewId) {
int attemptCount = recoveryAttemptCount.getAndIncrement();
if (log.isTraceEnabled())
log.debugf("Recovering cluster status for view %d, attempt %d", newViewId, attemptCount);
ReplicableCommand command = new CacheStatusRequestCommand(newViewId);
CacheStatusResponseCollector responseCollector = new CacheStatusResponseCollector();
int timeout = getGlobalTimeout() / CLUSTER_RECOVERY_ATTEMPTS;
CompletionStage<CacheStatusResponseCollector> remoteStage =
helper.executeOnClusterSync(transport, command, timeout, responseCollector);
return CompletionStages.handleAndCompose(remoteStage, (collector, throwable) -> {
if (newViewId < transport.getViewId()) {
if (log.isTraceEnabled())
log.tracef("Ignoring cluster state responses for view %d, we already have view %d",
newViewId, transport.getViewId());
return SKIP_RECOVERY_FUTURE;
} else if (throwable == null) {
if (log.isTraceEnabled())
log.tracef("Received valid cluster state responses for view %d", newViewId);
if (!collector.getSuspectedMembers().isEmpty()) {
// We got a CacheNotFoundResponse but the view is still the same, assume the JGroups stack
// includes FORK and the suspected node hasn't connected its ForkChannel yet.
// That means the node doesn't have any caches running yet, so we can ignore it.
log.debugf("Missing cache status responses from nodes %s", collector.getSuspectedMembers());
}
return CompletableFuture.completedFuture(collector);
}
Throwable t = CompletableFutures.extractException(throwable);
if (t instanceof IllegalLifecycleStateException) {
// Stop retrying, we are shutting down
return SKIP_RECOVERY_FUTURE;
}
// If we got a TimeoutException, assume JGroupsChannelLookup and shouldConnect == false,
// and the node that timed out hasn't installed its UpHandler yet.
// Retry at most CLUSTER_RECOVERY_ATTEMPTS times, then throw the timeout exception
log.failedToRecoverClusterState(t);
if (t instanceof TimeoutException && attemptCount < CLUSTER_RECOVERY_ATTEMPTS) {
return fetchClusterStatus(newViewId);
}
throw CompletableFutures.asCompletionException(t);
});
}
private CompletionStage<Void> updateCacheMembers(int viewId) {
// Confirm that view's members are all available first, so in a network split scenario
// we can enter degraded mode without starting a rebalance first
// We don't really need to run on the view handling executor because ClusterCacheStatus
// has its own synchronization
return confirmMembersAvailable().whenComplete((ignored, throwable) -> {
if (throwable == null) {
try {
int newViewId = transport.getViewId();
if (newViewId != viewId) {
log.debugf("Skipping cache members update for view %d, newer view received: %d", viewId, newViewId);
return;
}
for (ClusterCacheStatus cacheStatus : cacheStatusMap.values()) {
cacheStatus.doHandleClusterView(viewId);
}
} catch (Throwable t) {
throwable = t;
}
}
if (throwable != null && clusterManagerStatus.isRunning()) {
log.errorUpdatingMembersList(viewId, throwable);
}
});
}
private CompletionStage<Void> confirmMembersAvailable() {
try {
Set<Address> expectedMembers = new HashSet<>();
for (ClusterCacheStatus cacheStatus : cacheStatusMap.values()) {
expectedMembers.addAll(cacheStatus.getExpectedMembers());
}
expectedMembers.retainAll(transport.getMembers());
return transport.invokeCommandOnAll(expectedMembers, HeartBeatCommand.INSTANCE,
VoidResponseCollector.validOnly(),
DeliverOrder.NONE, getGlobalTimeout() / CLUSTER_RECOVERY_ATTEMPTS,
MILLISECONDS);
} catch (Exception e) {
return CompletableFuture.failedFuture(e);
}
}
private int getGlobalTimeout() {
// TODO Rename setting to something like globalRpcTimeout
return (int) globalConfiguration.transport().distributedSyncTimeout();
}
void broadcastTopologyUpdate(String cacheName, CacheTopology cacheTopology, AvailabilityMode availabilityMode) {
ReplicableCommand command = new TopologyUpdateCommand(cacheName, transport.getAddress(), cacheTopology,
availabilityMode, viewId);
helper.executeOnClusterAsync(transport, command);
}
void broadcastStableTopologyUpdate(String cacheName, CacheTopology cacheTopology) {
ReplicableCommand command = new TopologyUpdateStableCommand(cacheName, transport.getAddress(), cacheTopology, viewId);
helper.executeOnClusterAsync(transport, command);
}
@Override
public boolean isRebalancingEnabled() {
return globalRebalancingEnabled;
}
@Override
public boolean isRebalancingEnabled(String cacheName) {
if (cacheName == null) {
return isRebalancingEnabled();
} else {
ClusterCacheStatus s = cacheStatusMap.get(cacheName);
return s != null ? s.isRebalanceEnabled() : isRebalancingEnabled();
}
}
@Override
public CompletionStage<Void> setRebalancingEnabled(String cacheName, boolean enabled) {
if (cacheName == null) {
return setRebalancingEnabled(enabled);
} else {
ClusterCacheStatus clusterCacheStatus = cacheStatusMap.get(cacheName);
if (clusterCacheStatus != null) {
return clusterCacheStatus.setRebalanceEnabled(enabled);
} else {
log.debugf("Trying to enable rebalancing for inexistent cache %s", cacheName);
return CompletableFutures.completedNull();
}
}
}
@Override
public CompletionStage<Void> setRebalancingEnabled(boolean enabled) {
if (enabled) {
if (!globalRebalancingEnabled) {
CLUSTER.rebalancingEnabled();
}
} else {
if (globalRebalancingEnabled) {
CLUSTER.rebalancingSuspended();
}
}
globalRebalancingEnabled = enabled;
cacheStatusMap.values().forEach(ClusterCacheStatus::startQueuedRebalance);
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> forceRebalance(String cacheName) {
ClusterCacheStatus cacheStatus = cacheStatusMap.get(cacheName);
if (cacheStatus != null) {
cacheStatus.forceRebalance();
}
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> forceAvailabilityMode(String cacheName, AvailabilityMode availabilityMode) {
ClusterCacheStatus cacheStatus = cacheStatusMap.get(cacheName);
if (cacheStatus != null) {
return cacheStatus.forceAvailabilityMode(availabilityMode);
}
return CompletableFutures.completedNull();
}
@Override
public RebalancingStatus getRebalancingStatus(String cacheName) {
ClusterCacheStatus cacheStatus = cacheStatusMap.get(cacheName);
if (cacheStatus != null) {
return cacheStatus.getRebalancingStatus();
} else {
return RebalancingStatus.PENDING;
}
}
public CompletionStage<Void> broadcastShutdownCache(String cacheName) {
ReplicableCommand command = new CacheShutdownCommand(cacheName);
return helper.executeOnClusterSync(transport, command, getGlobalTimeout(),
VoidResponseCollector.validOnly());
}
@Override
public void setInitialCacheTopologyId(String cacheName, int topologyId) {
// TODO Include cache mode in join info
Configuration configuration = configurationManager.getConfiguration(cacheName, true);
ClusterCacheStatus cacheStatus = initCacheStatusIfAbsent(cacheName, configuration.clustering().cacheMode());
cacheStatus.setInitialTopologyId(topologyId);
}
@Override
public CompletionStage<Void> handleShutdownRequest(String cacheName) throws Exception {
ClusterCacheStatus cacheStatus = cacheStatusMap.get(cacheName);
return cacheStatus.shutdownCache();
}
@Override
public boolean useCurrentTopologyAsStable(String cacheName, boolean force) {
ClusterCacheStatus status = cacheStatusMap.get(cacheName);
if (status == null) return false;
if (!status.setCurrentTopologyAsStable(force)) return false;
// We are sure this one is completed.
status.forceRebalance();
return true;
}
public static boolean distLostDataCheck(ConsistentHash stableCH, List<Address> newMembers) {
for (int i = 0; i < stableCH.getNumSegments(); i++) {
if (!InfinispanCollections.containsAny(newMembers, stableCH.locateOwnersForSegment(i)))
return true;
}
return false;
}
}
| 33,614
| 43.523179
| 124
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/TopologyManagementHelper.java
|
package org.infinispan.topology;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.infinispan.util.logging.Log.CLUSTER;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.GlobalRpcCommand;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.topology.AbstractCacheControlCommand;
import org.infinispan.commons.util.Util;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
public class TopologyManagementHelper {
private static final Log log = LogFactory.getLog(TopologyManagementHelper.class);
private GlobalComponentRegistry gcr;
private BasicComponentRegistry bcr;
public TopologyManagementHelper(GlobalComponentRegistry gcr) {
this.gcr = gcr;
this.bcr = gcr.getComponent(BasicComponentRegistry.class);
}
public <T> CompletionStage<T> executeOnClusterSync(Transport transport, ReplicableCommand command,
int timeout, ResponseCollector<T> responseCollector) {
// First invoke the command remotely, but make sure we don't call finish() on the collector
ResponseCollector<Void> delegatingCollector = new DelegatingResponseCollector<>(responseCollector);
CompletionStage<Void> remoteFuture =
transport.invokeCommandOnAll(command, delegatingCollector, DeliverOrder.NONE, timeout, MILLISECONDS);
// Then invoke the command on the local node
CompletionStage<?> localFuture;
try {
if (log.isTraceEnabled())
log.tracef("Attempting to execute command on self: %s", command);
bcr.wireDependencies(command, true);
localFuture = invokeAsync(command);
} catch (Throwable throwable) {
localFuture = CompletableFuture.failedFuture(throwable);
}
return addLocalResult(responseCollector, remoteFuture, localFuture, transport.getAddress());
}
public void executeOnClusterAsync(Transport transport, ReplicableCommand command) {
// invoke remotely
try {
DeliverOrder deliverOrder = DeliverOrder.NONE;
transport.sendToAll(command, deliverOrder);
} catch (Exception e) {
throw Util.rewrapAsCacheException(e);
}
// invoke the command on the local node
try {
if (log.isTraceEnabled())
log.tracef("Attempting to execute command on self: %s", command);
bcr.wireDependencies(command, true);
invokeAsync(command);
} catch (Throwable throwable) {
// The command already logs any exception in invoke()
}
}
public CompletionStage<Object> executeOnCoordinator(Transport transport, ReplicableCommand command,
long timeoutMillis) {
CompletionStage<? extends Response> responseStage;
Address coordinator = transport.getCoordinator();
if (transport.getAddress().equals(coordinator)) {
try {
if (log.isTraceEnabled())
log.tracef("Attempting to execute command on self: %s", command);
bcr.wireDependencies(command, true);
responseStage = invokeAsync(command).thenApply(v -> makeResponse(v, null, transport.getAddress()));
} catch (Throwable t) {
throw CompletableFutures.asCompletionException(t);
}
} else {
// this node is not the coordinator
responseStage = transport.invokeCommand(coordinator, command, SingleResponseCollector.validOnly(),
DeliverOrder.NONE, timeoutMillis, TimeUnit.MILLISECONDS);
}
return responseStage.thenApply(response -> {
if (!(response instanceof SuccessfulResponse)) {
throw CLUSTER.unexpectedResponse(coordinator, response);
}
return ((SuccessfulResponse) response).getResponseValue();
});
}
public void executeOnCoordinatorAsync(Transport transport, AbstractCacheControlCommand command) throws Exception {
if (transport.isCoordinator()) {
if (log.isTraceEnabled())
log.tracef("Attempting to execute command on self: %s", command);
try {
// ignore the result
invokeAsync(command);
} catch (Throwable t) {
log.errorf(t, "Failed to execute ReplicableCommand %s on coordinator async: %s", command, t.getMessage());
}
} else {
Address coordinator = transport.getCoordinator();
// ignore the response
transport.sendTo(coordinator, command, DeliverOrder.NONE);
}
}
private <T> CompletionStage<T> addLocalResult(ResponseCollector<T> responseCollector,
CompletionStage<Void> remoteFuture,
CompletionStage<?> localFuture, Address localAddress) {
return remoteFuture.thenCompose(ignore -> localFuture.handle((v, t) -> {
Response localResponse = makeResponse(v, t, localAddress);
// No more responses are coming, so we don't need to synchronize
responseCollector.addResponse(localAddress, localResponse);
return responseCollector.finish();
}));
}
private Response makeResponse(Object v, Throwable t, Address localAddress) {
Response localResponse;
if (t != null) {
localResponse = new ExceptionResponse(
CLUSTER.remoteException(localAddress, CompletableFutures.extractException(t)));
} else {
if (v instanceof Response) {
localResponse = ((Response) v);
} else {
localResponse = SuccessfulResponse.create(v);
}
}
return localResponse;
}
private CompletionStage<?> invokeAsync(ReplicableCommand command) throws Throwable {
if (command instanceof GlobalRpcCommand)
return ((GlobalRpcCommand) command).invokeAsync(gcr);
return command.invokeAsync();
}
private static class DelegatingResponseCollector<T> implements ResponseCollector<Void> {
private final ResponseCollector<T> responseCollector;
public DelegatingResponseCollector(ResponseCollector<T> responseCollector) {
this.responseCollector = responseCollector;
}
@Override
public Void addResponse(Address sender, Response response) {
responseCollector.addResponse(sender, response);
return null;
}
@Override
public Void finish() {
return null;
}
}
}
| 7,258
| 40.48
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/ClusterCacheStatus.java
|
package org.infinispan.topology;
import static org.infinispan.util.logging.Log.CLUSTER;
import static org.infinispan.util.logging.events.Messages.MESSAGES;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.util.Immutables;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.conflict.impl.InternalConflictManager;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.AvailabilityStrategy;
import org.infinispan.partitionhandling.impl.AvailabilityStrategyContext;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.statetransfer.RebalanceType;
import org.infinispan.util.concurrent.ConditionFuture;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.util.logging.events.EventLogCategory;
import org.infinispan.util.logging.events.EventLogManager;
import org.infinispan.util.logging.events.EventLogger;
import net.jcip.annotations.GuardedBy;
/**
* Keeps track of a cache's status: members, current/pending consistent hashes, and rebalance status
*
* @author Dan Berindei
* @since 5.2
*/
public class ClusterCacheStatus implements AvailabilityStrategyContext {
// The HotRod client starts with topology 0, so we start with 1 to force an update
public static final int INITIAL_TOPOLOGY_ID = 1;
public static final int INITIAL_REBALANCE_ID = 1;
private static final Log log = LogFactory.getLog(ClusterCacheStatus.class);
private final EmbeddedCacheManager cacheManager;
private final GlobalComponentRegistry gcr;
private final String cacheName;
private final AvailabilityStrategy availabilityStrategy;
private final ClusterTopologyManagerImpl clusterTopologyManager;
private final PersistentUUIDManager persistentUUIDManager;
private EventLogger eventLogger;
private final boolean resolveConflictsOnMerge;
private final RebalanceType rebalanceType;
private Transport transport;
private int initialTopologyId = INITIAL_TOPOLOGY_ID;
// Minimal cache clustering configuration
private volatile CacheJoinInfo joinInfo;
// Cache members, some of which may not have received state yet
private volatile List<Address> expectedMembers;
// Capacity factors for all the members
private volatile Map<Address, Float> capacityFactors;
// Cache members that have not yet received state. Always included in the members list.
private volatile List<Address> joiners;
// Persistent state (if it exists)
private final Optional<ScopedPersistentState> persistentState;
// Cache topology. Its consistent hashes contain only members that did receive/are receiving state
// The members of both consistent hashes must be included in the members list.
private volatile CacheTopology currentTopology;
private volatile CacheTopology stableTopology;
private volatile AvailabilityMode availabilityMode = AvailabilityMode.AVAILABLE;
private volatile List<Address> queuedRebalanceMembers;
private volatile boolean rebalancingEnabled = true;
private volatile boolean rebalanceInProgress = false;
private volatile ConflictResolution conflictResolution;
private RebalanceConfirmationCollector rebalanceConfirmationCollector;
private ComponentStatus status;
private final ConditionFuture<ClusterCacheStatus> hasInitialTopologyFuture;
public ClusterCacheStatus(EmbeddedCacheManager cacheManager, GlobalComponentRegistry gcr, String cacheName,
AvailabilityStrategy availabilityStrategy,
RebalanceType rebalanceType, ClusterTopologyManagerImpl clusterTopologyManager,
Transport transport,
PersistentUUIDManager persistentUUIDManager, EventLogManager eventLogManager,
Optional<ScopedPersistentState> state, boolean resolveConflictsOnMerge) {
this.cacheManager = cacheManager;
this.gcr = gcr;
this.cacheName = cacheName;
this.availabilityStrategy = availabilityStrategy;
this.clusterTopologyManager = clusterTopologyManager;
this.transport = transport;
this.persistentState = state;
this.resolveConflictsOnMerge = resolveConflictsOnMerge;
this.rebalanceType = rebalanceType;
this.currentTopology = null;
this.stableTopology = null;
this.expectedMembers = Collections.emptyList();
this.capacityFactors = Collections.emptyMap();
this.joiners = Collections.emptyList();
this.persistentUUIDManager = persistentUUIDManager;
eventLogger = eventLogManager.getEventLogger().context(cacheName);
state.ifPresent(scopedPersistentState -> {
rebalancingEnabled = false;
availabilityMode = AvailabilityMode.DEGRADED_MODE;
});
status = ComponentStatus.INSTANTIATED;
hasInitialTopologyFuture = new ConditionFuture<>(clusterTopologyManager.timeoutScheduledExecutor);
if (log.isTraceEnabled()) {
log.tracef("Cache %s initialized. Persisted state? %s", cacheName, persistentState.isPresent());
}
}
@Override
public CacheJoinInfo getJoinInfo() {
return joinInfo;
}
@Override
public List<Address> getExpectedMembers() {
return expectedMembers;
}
@Override
public synchronized void queueRebalance(List<Address> newMembers) {
if (newMembers != null && !newMembers.isEmpty() && totalCapacityFactors() != 0f) {
log.debugf("Queueing rebalance for cache %s with members %s", cacheName, newMembers);
queuedRebalanceMembers = newMembers;
startQueuedRebalance();
}
}
@Override
public Map<Address, Float> getCapacityFactors() {
return capacityFactors;
}
@Override
public CacheTopology getCurrentTopology() {
return currentTopology;
}
@Override
public CacheTopology getStableTopology() {
return stableTopology;
}
@Override
public AvailabilityMode getAvailabilityMode() {
return availabilityMode;
}
@Override
public synchronized void updateAvailabilityMode(List<Address> actualMembers, AvailabilityMode newAvailabilityMode,
boolean cancelRebalance) {
AvailabilityMode oldAvailabilityMode = this.availabilityMode;
boolean modeChanged = setAvailabilityMode(newAvailabilityMode);
if (modeChanged || !actualMembers.equals(currentTopology.getActualMembers())) {
ConsistentHash newPendingCH = currentTopology.getPendingCH();
CacheTopology.Phase newPhase = currentTopology.getPhase();
if (cancelRebalance) {
newPendingCH = null;
newPhase = CacheTopology.Phase.NO_REBALANCE;
rebalanceConfirmationCollector = null;
}
CacheTopology newTopology = new CacheTopology(currentTopology.getTopologyId() + 1,
currentTopology.getRebalanceId(), currentTopology.getCurrentCH(), newPendingCH, newPhase, actualMembers, persistentUUIDManager.mapAddresses(actualMembers));
setCurrentTopology(newTopology);
CLUSTER.updatingAvailabilityMode(cacheName, oldAvailabilityMode, newAvailabilityMode, newTopology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheAvailabilityModeChange(
newAvailabilityMode, newTopology.getTopologyId()));
clusterTopologyManager.broadcastTopologyUpdate(cacheName, newTopology, newAvailabilityMode);
}
}
@Override
public synchronized void updateTopologiesAfterMerge(CacheTopology currentTopology, CacheTopology stableTopology, AvailabilityMode availabilityMode) {
Log.CLUSTER.cacheRecoveredAfterMerge(cacheName, currentTopology, availabilityMode);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheRecoveredAfterMerge(
currentTopology.getMembers(), currentTopology.getTopologyId()));
this.currentTopology = currentTopology;
this.stableTopology = stableTopology;
this.availabilityMode = availabilityMode;
clusterTopologyManager.broadcastTopologyUpdate(cacheName, currentTopology, availabilityMode);
if (stableTopology != null) {
log.updatingStableTopology(cacheName, stableTopology);
clusterTopologyManager.broadcastStableTopologyUpdate(cacheName, stableTopology);
}
}
/**
* @return {@code true} if the joiner was not already a member, {@code false} otherwise
*/
@GuardedBy("this")
private boolean addMember(Address joiner, CacheJoinInfo joinInfo) {
if (expectedMembers.contains(joiner)) {
return false;
}
if (this.joinInfo == null) {
this.joinInfo = joinInfo;
}
HashMap<Address, Float> newCapacityFactors = new HashMap<>(capacityFactors);
newCapacityFactors.put(joiner, joinInfo.getCapacityFactor());
capacityFactors = Immutables.immutableMapWrap(newCapacityFactors);
expectedMembers = immutableAdd(expectedMembers, joiner);
persistentUUIDManager.addPersistentAddressMapping(joiner, joinInfo.getPersistentUUID());
joiners = immutableAdd(joiners, joiner);
if (log.isTraceEnabled())
log.tracef("Added joiner %s to cache %s with persistent uuid %s: members = %s, joiners = %s", joiner, cacheName,
joinInfo.getPersistentUUID(), expectedMembers, joiners);
return true;
}
/**
* Validate if the member is allowed to join
*/
@GuardedBy("this")
private void validateJoiner(Address joiner, CacheJoinInfo joinInfo) {
if (persistentState.isPresent()) {
if (!joinInfo.getPersistentStateChecksum().isPresent()) {
if (status == ComponentStatus.INSTANTIATED) {
throw CLUSTER.nodeWithoutPersistentStateJoiningCacheWithState(joiner, cacheName);
}
} else if (persistentState.get().getChecksum() != joinInfo.getPersistentStateChecksum().get()) {
throw CLUSTER.nodeWithIncompatibleStateJoiningCache(joiner, cacheName);
}
} else {
if (joinInfo.getPersistentStateChecksum().isPresent()) {
throw CLUSTER.nodeWithPersistentStateJoiningClusterWithoutState(joiner, cacheName);
}
}
}
/**
* @return {@code true} if the leaver was a member, {@code false} otherwise
*/
@GuardedBy("this")
private boolean removeMember(Address leaver) {
if (!expectedMembers.contains(leaver)) {
if (log.isTraceEnabled()) log.tracef("Trying to remove node %s from cache %s, but it is not a member: " +
"members = %s", leaver, cacheName, expectedMembers);
return false;
}
expectedMembers = immutableRemove(expectedMembers, leaver);
HashMap<Address, Float> newCapacityFactors = new HashMap<>(capacityFactors);
newCapacityFactors.remove(leaver);
capacityFactors = Immutables.immutableMapWrap(newCapacityFactors);
joiners = immutableRemove(joiners, leaver);
if (log.isTraceEnabled()) log.tracef("Removed node %s from cache %s: members = %s, joiners = %s", leaver,
cacheName, expectedMembers, joiners);
return true;
}
/**
* @return {@code true} if the members list has changed, {@code false} otherwise
*/
@GuardedBy("this")
private boolean retainMembers(List<Address> newClusterMembers) {
if (newClusterMembers.containsAll(expectedMembers)) {
if (log.isTraceEnabled()) log.tracef("Cluster members updated for cache %s, no abrupt leavers detected: " +
"cache members = %s. Existing members = %s", cacheName, newClusterMembers, expectedMembers);
return false;
}
expectedMembers = immutableRetainAll(expectedMembers, newClusterMembers);
joiners = immutableRetainAll(joiners, newClusterMembers);
if (log.isTraceEnabled()) log.tracef("Cluster members updated for cache %s: members = %s, joiners = %s", cacheName,
expectedMembers, joiners);
return true;
}
@GuardedBy("this")
private void setCurrentTopology(CacheTopology newTopology) {
this.currentTopology = newTopology;
// update the joiners list
if (newTopology != null) {
joiners = immutableRemoveAll(expectedMembers, newTopology.getCurrentCH().getMembers());
}
if (log.isTraceEnabled()) log.tracef("Cache %s topology updated: %s, members = %s, joiners = %s",
cacheName, currentTopology, expectedMembers, joiners);
if (newTopology != null) {
newTopology.logRoutingTableInformation(cacheName);
}
}
@GuardedBy("this")
private void setStableTopology(CacheTopology newTopology) {
this.stableTopology = newTopology;
if (log.isTraceEnabled()) log.tracef("Cache %s stable topology updated: members = %s, joiners = %s, topology = %s",
cacheName, expectedMembers, joiners, newTopology);
}
private boolean needConsistentHashUpdate() {
// The list of current members is always included in the list of pending members,
// so we only need to check one list.
// Also returns false if both CHs are null
return !expectedMembers.equals(currentTopology.getMembers());
}
private List<Address> pruneInvalidMembers(List<Address> possibleMembers) {
return immutableRetainAll(possibleMembers, expectedMembers);
}
public boolean isRebalanceInProgress() {
return rebalanceConfirmationCollector != null;
}
public RebalancingStatus getRebalancingStatus() {
if (!isRebalanceEnabled()) {
return RebalancingStatus.SUSPENDED;
} else if (rebalanceInProgress) {
return RebalancingStatus.IN_PROGRESS;
} else if (queuedRebalanceMembers != null) {
return RebalancingStatus.PENDING;
} else {
return RebalancingStatus.COMPLETE;
}
}
public synchronized void confirmRebalancePhase(Address member, int receivedTopologyId) throws Exception {
if (currentTopology == null) {
log.debugf("Ignoring rebalance confirmation from %s for cache %s because the cache has no members",
member, cacheName);
return;
}
if (receivedTopologyId < currentTopology.getTopologyId()) {
log.debugf("Ignoring rebalance confirmation from %s " +
"for cache %s because the topology id is old (%d, expected %d)",
member, cacheName, receivedTopologyId, currentTopology.getTopologyId());
return;
}
if (rebalanceConfirmationCollector == null) {
throw new CacheException(String.format("Received invalid rebalance confirmation from %s " +
"for cache %s, we don't have a rebalance in progress", member, cacheName));
}
CLUSTER.rebalancePhaseConfirmedOnNode(currentTopology.getPhase(), cacheName, member, receivedTopologyId);
rebalanceConfirmationCollector.confirmPhase(member, receivedTopologyId);
}
/**
* Should be called after the members list was updated in any other way ({@link #removeMember(Address)},
* {@link #retainMembers} etc.)
*/
@GuardedBy("this")
private void updateMembers() {
if (rebalanceConfirmationCollector != null) {
// We rely on the AvailabilityStrategy updating the current topology beforehand.
rebalanceConfirmationCollector.updateMembers(currentTopology.getMembers());
}
}
public synchronized void doHandleClusterView(int viewId) {
// TODO Clean up ClusterCacheStatus instances once they no longer have any members
if (currentTopology == null)
return;
List<Address> newClusterMembers = transport.getMembers();
int newViewId = transport.getViewId();
if (newViewId != viewId) {
log.debugf("Cache %s skipping members update for view %d, newer view received: %d",
cacheName, viewId, newViewId);
return;
}
if (log.isTraceEnabled()) log.tracef("Cache %s updating members for view %d: %s", cacheName, viewId, newClusterMembers);
boolean cacheMembersModified = retainMembers(newClusterMembers);
availabilityStrategy.onClusterViewChange(this, newClusterMembers);
if (cacheMembersModified) {
updateMembers();
}
}
@GuardedBy("this")
private void endRebalance() {
CacheTopology newTopology;
rebalanceInProgress = false;
CacheTopology currentTopology = getCurrentTopology();
if (currentTopology == null) {
log.tracef("Rebalance finished because there are no more members in cache %s", cacheName);
return;
}
assert currentTopology.getPhase().isRebalance();
int currentTopologyId = currentTopology.getTopologyId();
List<Address> members = currentTopology.getMembers();
switch (rebalanceType) {
case FOUR_PHASE:
newTopology = new CacheTopology(currentTopologyId + 1, currentTopology.getRebalanceId(),
currentTopology.getCurrentCH(), currentTopology.getPendingCH(),
CacheTopology.Phase.READ_ALL_WRITE_ALL, members,
persistentUUIDManager.mapAddresses(members));
break;
default:
throw new IllegalStateException();
}
setCurrentTopology(newTopology);
if (newTopology.getPhase() != CacheTopology.Phase.NO_REBALANCE) {
rebalanceConfirmationCollector = new RebalanceConfirmationCollector(cacheName, currentTopologyId + 1,
members, this::endReadAllPhase);
} else {
rebalanceConfirmationCollector = null;
}
availabilityStrategy.onRebalanceEnd(this);
CLUSTER.startingRebalancePhase(cacheName, newTopology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheRebalancePhaseChange(
newTopology.getPhase(), newTopology.getTopologyId()));
// TODO: to members only?
clusterTopologyManager.broadcastTopologyUpdate(cacheName, newTopology, availabilityMode);
if (newTopology.getPhase() == CacheTopology.Phase.NO_REBALANCE) {
startQueuedRebalance();
}
}
@GuardedBy("this") // called from doHandleClusterView/doLeave/confirmRebalancePhase
private void endReadAllPhase() {
CacheTopology newTopology;
CacheTopology currentTopology = getCurrentTopology();
assert currentTopology != null; // can this happen?
assert currentTopology.getPhase() == CacheTopology.Phase.READ_ALL_WRITE_ALL;
List<Address> members = currentTopology.getMembers();
newTopology = new CacheTopology(currentTopology.getTopologyId() + 1, currentTopology.getRebalanceId(),
currentTopology.getCurrentCH(), currentTopology.getPendingCH(), CacheTopology.Phase.READ_NEW_WRITE_ALL, members,
persistentUUIDManager.mapAddresses(members));
setCurrentTopology(newTopology);
rebalanceConfirmationCollector = new RebalanceConfirmationCollector(cacheName, currentTopology.getTopologyId() + 1,
members, this::endReadNewPhase);
CLUSTER.startingRebalancePhase(cacheName, newTopology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheRebalancePhaseChange(
newTopology.getPhase(), newTopology.getTopologyId()));
// TODO: to members only?
clusterTopologyManager.broadcastTopologyUpdate(cacheName, newTopology, availabilityMode);
}
@GuardedBy("this") // called from doHandleClusterView/doLeave/confirmRebalancePhase
private void endReadNewPhase() {
CacheTopology newTopology;
CacheTopology currentTopology = getCurrentTopology();
assert currentTopology != null;
assert currentTopology.getPhase() == CacheTopology.Phase.READ_NEW_WRITE_ALL;
List<Address> members = currentTopology.getMembers();
newTopology = new CacheTopology(currentTopology.getTopologyId() + 1, currentTopology.getRebalanceId(),
currentTopology.getPendingCH(), null, CacheTopology.Phase.NO_REBALANCE, members,
persistentUUIDManager.mapAddresses(members));
setCurrentTopology(newTopology);
rebalanceConfirmationCollector = null;
CLUSTER.finishedRebalance(cacheName, newTopology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.rebalanceFinished(
newTopology.getMembers(), newTopology.getTopologyId()));
clusterTopologyManager.broadcastTopologyUpdate(cacheName, newTopology, availabilityMode);
startQueuedRebalance();
}
// TODO: newMembers isn't really used, pruneInvalidMembers uses expectedMembers
@Override
public synchronized void updateCurrentTopology(List<Address> newMembers) {
// The current topology might be null just after a joiner became the coordinator
if (currentTopology == null) {
createInitialCacheTopology();
}
ConsistentHashFactory<ConsistentHash> consistentHashFactory = getJoinInfo().getConsistentHashFactory();
int topologyId = currentTopology.getTopologyId();
int rebalanceId = currentTopology.getRebalanceId();
ConsistentHash currentCH = currentTopology.getCurrentCH();
ConsistentHash pendingCH = currentTopology.getPendingCH();
if (!needConsistentHashUpdate()) {
log.tracef("Cache %s members list was updated, but the cache topology doesn't need to change: %s",
cacheName, currentTopology);
return;
}
if (newMembers.isEmpty()) {
log.tracef("Cache %s no longer has any members, removing topology", cacheName);
setCurrentTopology(null);
setStableTopology(null);
rebalanceConfirmationCollector = null;
status = ComponentStatus.INSTANTIATED;
return;
}
if (totalCapacityFactors() == 0f) {
CLUSTER.debugf("All members have capacity factor 0, delaying topology update");
return;
}
List<Address> newCurrentMembers = pruneInvalidMembers(currentCH.getMembers());
ConsistentHash newCurrentCH, newPendingCH = null;
CacheTopology.Phase newPhase = CacheTopology.Phase.NO_REBALANCE;
List<Address> actualMembers;
if (newCurrentMembers.isEmpty()) {
// All the current members left, try to replace them with the joiners
log.tracef("All current members left, re-initializing status for cache %s", cacheName);
rebalanceConfirmationCollector = null;
newCurrentMembers = getExpectedMembers();
actualMembers = newCurrentMembers;
newCurrentCH = joinInfo.getConsistentHashFactory().create(joinInfo.getNumOwners(),
joinInfo.getNumSegments(), newCurrentMembers, getCapacityFactors());
} else {
// ReplicatedConsistentHashFactory allocates segments to all its members, so we can't add any members here
newCurrentCH = consistentHashFactory.updateMembers(currentCH, newCurrentMembers, getCapacityFactors());
actualMembers = newCurrentMembers;
if (pendingCH != null) {
newPhase = currentTopology.getPhase();
List<Address> newPendingMembers = pruneInvalidMembers(pendingCH.getMembers());
newPendingCH = consistentHashFactory.updateMembers(pendingCH, newPendingMembers, getCapacityFactors());
actualMembers = pruneInvalidMembers(newPendingMembers);
}
}
// Losing members during state transfer could lead to a state where we have more than two topologies
// concurrently in the cluster. We need to make sure that all the topologies are compatible (properties set
// in CacheTopology docs hold) - we just remove lost members.
CacheTopology newTopology = new CacheTopology(topologyId + 1, rebalanceId, newCurrentCH, newPendingCH,
newPhase, actualMembers, persistentUUIDManager.mapAddresses(actualMembers));
setCurrentTopology(newTopology);
if (rebalanceConfirmationCollector != null) {
// The node that will cancel the state transfer because of another topology update won't send topology confirm
log.debugf("Cancelling topology confirmation %s because of another topology update", rebalanceConfirmationCollector);
rebalanceConfirmationCollector = null;
}
CLUSTER.updatingTopology(cacheName, newTopology, availabilityMode);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheMembersUpdated(
actualMembers, newTopology.getTopologyId()));
clusterTopologyManager.broadcastTopologyUpdate(cacheName, newTopology, availabilityMode);
}
@GuardedBy("this")
private float totalCapacityFactors() {
float totalCapacityFactors = 0f;
for (Float factor : capacityFactors.values()) {
totalCapacityFactors += factor;
}
return totalCapacityFactors;
}
private boolean setAvailabilityMode(AvailabilityMode newAvailabilityMode) {
if (newAvailabilityMode == availabilityMode)
return false;
log.tracef("Cache %s availability changed: %s -> %s", cacheName, availabilityMode, newAvailabilityMode);
availabilityMode = newAvailabilityMode;
return true;
}
// Helpers for working with immutable lists
private <T> List<T> immutableAdd(List<T> list, T element) {
List<T> result = new ArrayList<>(list);
result.add(element);
return Collections.unmodifiableList(result);
}
private <T> List<T> immutableRemove(List<T> list, T element) {
List<T> result = new ArrayList<>(list);
result.remove(element);
return Collections.unmodifiableList(result);
}
private <T> List<T> immutableRemoveAll(List<T> list, List<T> otherList) {
List<T> result = new ArrayList<>(list);
result.removeAll(otherList);
return Collections.unmodifiableList(result);
}
private <T> List<T> immutableRetainAll(List<T> list, List<T> otherList) {
List<T> result = new ArrayList<>(list);
result.retainAll(otherList);
return Collections.unmodifiableList(result);
}
@Override
public String toString() {
return "ClusterCacheStatus{" +
"cacheName='" + cacheName + '\'' +
", members=" + expectedMembers +
", joiners=" + joiners +
", currentTopology=" + currentTopology +
", rebalanceConfirmationCollector=" + rebalanceConfirmationCollector +
'}';
}
public synchronized void doMergePartitions(Map<Address, CacheStatusResponse> statusResponses) {
try {
if (statusResponses.isEmpty()) {
throw new IllegalArgumentException("Should have at least one current topology");
}
HashMap<Address, CacheJoinInfo> joinInfos = new HashMap<>();
Set<CacheTopology> currentTopologies = new HashSet<>();
Set<CacheTopology> stableTopologies = new HashSet<>();
for (Map.Entry<Address, CacheStatusResponse> e : statusResponses.entrySet()) {
Address sender = e.getKey();
CacheStatusResponse response = e.getValue();
joinInfos.put(sender, response.getCacheJoinInfo());
if (response.getCacheTopology() != null) {
currentTopologies.add(response.getCacheTopology());
}
if (response.getStableTopology() != null) {
stableTopologies.add(response.getStableTopology());
}
}
log.debugf("Recovered %d partition(s) for cache %s: %s", currentTopologies.size(), cacheName, currentTopologies);
recoverMembers(joinInfos, currentTopologies, stableTopologies);
// TODO Should automatically detect when the coordinator has left and there is only one partition
// and continue any in-progress rebalance without resetting the cache topology.
availabilityStrategy.onPartitionMerge(this, statusResponses);
} catch (IllegalLifecycleStateException e) {
// Retrieving the conflict manager fails during shutdown, because internalGetCache checks the manager status
// Remote invocations also fail if the transport is stopped before recovery finishes
} catch (Exception e) {
log.failedToRecoverCacheState(cacheName, e);
}
}
@GuardedBy("this")
private void recoverMembers(Map<Address, CacheJoinInfo> joinInfos,
Collection<CacheTopology> currentTopologies, Collection<CacheTopology> stableTopologies) {
expectedMembers = Collections.emptyList();
// Try to preserve the member order at least for the first partition
// TODO First partition is random, it would be better to use the topology selected by the availability strategy
for (CacheTopology topology : stableTopologies) {
addMembers(topology.getMembers(), joinInfos);
}
for (CacheTopology topology : currentTopologies) {
addMembers(topology.getMembers(), joinInfos);
}
// Add the joiners that are not part of any topology at the end
for (Map.Entry<Address, CacheJoinInfo> e : joinInfos.entrySet()) {
if (!expectedMembers.contains(e.getKey())) {
addMember(e.getKey(), e.getValue());
}
}
}
@GuardedBy("this")
private void addMembers(Collection<Address> membersToAdd, Map<Address, CacheJoinInfo> joinInfos) {
for (Address member : membersToAdd) {
if (!expectedMembers.contains(member)) {
CacheJoinInfo joinInfo = joinInfos.get(member);
// Some members of the stable/current topology may not be members any more
if (joinInfo != null) {
addMember(member, joinInfo);
}
}
}
}
@Override
public String getCacheName() {
return cacheName;
}
public synchronized CacheStatusResponse doJoin(Address joiner, CacheJoinInfo joinInfo) {
validateJoiner(joiner, joinInfo);
boolean isFirstMember = getCurrentTopology() == null;
boolean memberJoined = addMember(joiner, joinInfo);
if (!memberJoined) {
if (log.isTraceEnabled()) log.tracef("Trying to add node %s to cache %s, but it is already a member: " +
"members = %s, joiners = %s", joiner, cacheName, expectedMembers, joiners);
return new CacheStatusResponse(null, currentTopology, stableTopology, availabilityMode, expectedMembers);
}
final List<Address> current = Collections.unmodifiableList(expectedMembers);
if (status == ComponentStatus.INSTANTIATED) {
if (persistentState.isPresent()) {
if (log.isTraceEnabled()) log.tracef("Node %s joining. Attempting to reform previous cluster", joiner);
if (restoreTopologyFromState()) {
return new CacheStatusResponse(null, currentTopology, stableTopology, availabilityMode, current);
}
} else {
if (isFirstMember) {
// This node was the first to join. We need to install the initial CH
CacheTopology initialTopology = createInitialCacheTopology();
// Change our status
status = ComponentStatus.RUNNING;
// Don't need to broadcast the initial CH update, just return the cache topology to the joiner
// But we do need to broadcast the initial topology as the stable topology
clusterTopologyManager.broadcastStableTopologyUpdate(cacheName, initialTopology);
// Allow nodes with zero capacity that were waiting to join,
// but do it on another thread to avoid reentrancy
hasInitialTopologyFuture.updateAsync(this, clusterTopologyManager.nonBlockingExecutor);
}
}
}
CacheTopology topologyBeforeRebalance = getCurrentTopology();
// Only trigger availability strategy if we have a topology installed
if (topologyBeforeRebalance != null)
availabilityStrategy.onJoin(this, joiner);
return new CacheStatusResponse(null, topologyBeforeRebalance, stableTopology, availabilityMode, current);
}
CompletionStage<Void> nodeCanJoinFuture(CacheJoinInfo joinInfo) {
if (joinInfo.getCapacityFactor() != 0f || getCurrentTopology() != null)
return CompletableFutures.completedNull();
// Creating the initial topology requires at least one node with a non-zero capacity factor
return hasInitialTopologyFuture.newConditionStage(ccs -> ccs.getCurrentTopology() != null,
() -> new TimeoutException("Timed out waiting for initial cache topology"),
joinInfo.getTimeout(), TimeUnit.MILLISECONDS);
}
@GuardedBy("this")
protected CacheTopology restoreCacheTopology(ScopedPersistentState state) {
if (log.isTraceEnabled()) log.tracef("Attempting to restore CH for cache %s", cacheName);
ConsistentHash originalCH = joinInfo.getConsistentHashFactory().fromPersistentState(state);
ConsistentHash persistedCH = originalCH.remapAddresses(persistentUUIDManager.persistentUUIDToAddress());
if (persistedCH == null || !getExpectedMembers().containsAll(persistedCH.getMembers())) {
log.recoverFromStateMissingMembers(cacheName, expectedMembers, originalCH.getMembers().size());
return null;
}
if (getExpectedMembers().size() > persistedCH.getMembers().size()) {
List<Address> extraneousMembers = new ArrayList<>(getExpectedMembers());
extraneousMembers.removeAll(persistedCH.getMembers());
throw CLUSTER.extraneousMembersJoinRestoredCache(extraneousMembers, cacheName);
}
int topologyId = currentTopology == null ? initialTopologyId : currentTopology.getTopologyId() + 1;
CacheTopology initialTopology = new CacheTopology(topologyId, INITIAL_REBALANCE_ID, true, persistedCH, null,
CacheTopology.Phase.NO_REBALANCE, persistedCH.getMembers(), persistentUUIDManager.mapAddresses(persistedCH.getMembers()));
return cacheTopologyCreated(initialTopology);
}
@GuardedBy("this")
private CacheTopology cacheTopologyCreated(CacheTopology topology) {
setCurrentTopology(topology);
setStableTopology(topology);
rebalancingEnabled = true;
availabilityMode = AvailabilityMode.AVAILABLE;
return topology;
}
@GuardedBy("this")
private boolean restoreTopologyFromState() {
assert persistentState.isPresent() : "Persistent state not available";
CacheTopology topology = restoreCacheTopology(persistentState.get());
if (topology != null) {
restoreTopologyFromState(topology);
return true;
}
return false;
}
@GuardedBy("this")
private void restoreTopologyFromState(CacheTopology topology) {
// Change our status
status = ComponentStatus.RUNNING;
CLUSTER.updatingTopology(cacheName, topology, availabilityMode);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheMembersUpdated(
topology.getMembers(), topology.getTopologyId()));
clusterTopologyManager.broadcastTopologyUpdate(cacheName, topology, availabilityMode);
clusterTopologyManager.broadcastStableTopologyUpdate(cacheName, topology);
}
public synchronized boolean setCurrentTopologyAsStable(boolean force) {
if (currentTopology != null) return false;
if (persistentState.isPresent()) {
List<Address> members = getExpectedMembers();
ConsistentHash pastConsistentHash = joinInfo.getConsistentHashFactory()
.fromPersistentState(persistentState.get());
int missing = pastConsistentHash.getMembers().size() - members.size();
int owners = joinInfo.getNumOwners();
if (!force && missing >= owners) {
throw log.missingTooManyMembers(cacheName, owners, missing, pastConsistentHash.getMembers().size());
}
boolean safelyRecovered = missing < owners;
boolean isReplicated = gcr.getCacheManager().getCacheConfiguration(cacheName).clustering().cacheMode().isReplicated();
ConsistentHash ch;
if (safelyRecovered && !isReplicated) {
// We reuse the previous topology, only changing it to reflect the current members.
// This is necessary to keep the same segments mapping as before.
// If another node joins, it will trigger rebalance, properly redistributing the segments.
ch = pastConsistentHash.remapAddressRemoveMissing(persistentUUIDManager.persistentUUIDToAddress());
} else {
// We don't have enough members to safely recover the previous topology, so we create a new one, as the
// node will clear the storage, so we don't need the same segments mapping.
ConsistentHashFactory<ConsistentHash> chf = joinInfo.getConsistentHashFactory();
ch = chf.create(joinInfo.getNumOwners(), joinInfo.getNumSegments(), members, getCapacityFactors());
}
CacheTopology topology = new CacheTopology(initialTopologyId, INITIAL_REBALANCE_ID, true, ch, null,
CacheTopology.Phase.NO_REBALANCE, members, persistentUUIDManager.mapAddresses(members));
restoreTopologyFromState(cacheTopologyCreated(topology));
return true;
}
return false;
}
@GuardedBy("this")
protected CacheTopology createInitialCacheTopology() {
log.tracef("Initializing status for cache %s", cacheName);
List<Address> initialMembers = getExpectedMembers();
ConsistentHash initialCH = joinInfo.getConsistentHashFactory().create(joinInfo.getNumOwners(),
joinInfo.getNumSegments(), initialMembers, getCapacityFactors());
CacheTopology initialTopology = new CacheTopology(initialTopologyId, INITIAL_REBALANCE_ID, initialCH, null,
CacheTopology.Phase.NO_REBALANCE, initialMembers, persistentUUIDManager.mapAddresses(initialMembers));
setCurrentTopology(initialTopology);
setStableTopology(initialTopology);
return initialTopology;
}
public synchronized CompletionStage<Void> doLeave(Address leaver) throws Exception {
boolean actualLeaver = removeMember(leaver);
if (!actualLeaver)
return CompletableFutures.completedNull();
if (expectedMembers.isEmpty())
clusterTopologyManager.removeCacheStatus(cacheName);
if (currentTopology == null)
return CompletableFutures.completedNull();
availabilityStrategy.onGracefulLeave(this, leaver);
updateMembers();
return CompletableFutures.completedNull();
}
public synchronized void startQueuedRebalance() {
// We cannot start rebalance until queued CR is complete
if (conflictResolution != null) {
log.tracef("Postponing rebalance for cache %s as conflict resolution is in progress", cacheName);
return;
}
// We don't have a queued rebalance.
if (queuedRebalanceMembers == null) {
// The previous topology was not restored. We do nothing, waiting for the members to get back in and install topology.
if (currentTopology == null && persistentState.isPresent()) {
log.debugf("Skipping rebalance for cache %s as the previous topology was not restored", cacheName);
return;
}
// We may need to broadcast a stable topology update
if (stableTopology == null || stableTopology.getTopologyId() < currentTopology.getTopologyId()) {
stableTopology = currentTopology;
log.updatingStableTopology(cacheName, stableTopology);
clusterTopologyManager.broadcastStableTopologyUpdate(cacheName, stableTopology);
}
return;
}
CacheTopology cacheTopology = getCurrentTopology();
if (!isRebalanceEnabled()) {
log.tracef("Postponing rebalance for cache %s, rebalancing is disabled", cacheName);
return;
}
if (rebalanceConfirmationCollector != null) {
log.tracef("Postponing rebalance for cache %s, there's already a topology change in progress: %s",
cacheName, rebalanceConfirmationCollector);
return;
}
if (queuedRebalanceMembers.isEmpty()) {
log.tracef("Ignoring request to rebalance cache %s, it doesn't have any member", cacheName);
return;
}
if (cacheTopology == null) {
createInitialCacheTopology();
return;
}
List<Address> newMembers = updateMembersPreservingOrder(cacheTopology.getMembers(), queuedRebalanceMembers);
queuedRebalanceMembers = null;
log.tracef("Rebalancing consistent hash for cache %s, members are %s", cacheName, newMembers);
int newTopologyId = cacheTopology.getTopologyId() + 1;
int newRebalanceId = cacheTopology.getRebalanceId() + 1;
ConsistentHash currentCH = cacheTopology.getCurrentCH();
if (currentCH == null) {
// There was one node in the cache before, and it left after the rebalance was triggered
// but before the rebalance actually started.
log.tracef("Ignoring request to rebalance cache %s, it doesn't have a consistent hash", cacheName);
return;
}
if (!expectedMembers.containsAll(newMembers)) {
newMembers.removeAll(expectedMembers);
log.tracef("Ignoring request to rebalance cache %s, we have new leavers: %s", cacheName, newMembers);
return;
}
ConsistentHashFactory chFactory = getJoinInfo().getConsistentHashFactory();
// This update will only add the joiners to the CH, we have already checked that we don't have leavers
ConsistentHash updatedMembersCH = chFactory.updateMembers(currentCH, newMembers, getCapacityFactors());
ConsistentHash balancedCH = chFactory.rebalance(updatedMembersCH);
boolean removeMembers = !expectedMembers.containsAll(currentCH.getMembers());
if (removeMembers) {
// Leavers should have been removed before starting a rebalance, but that might have failed
// e.g. if all the remaining members had capacity factor 0
Collection<Address> unwantedMembers = new LinkedList<>(currentCH.getMembers());
unwantedMembers.removeAll(expectedMembers);
CLUSTER.debugf("Removing unwanted members from the current consistent hash: %s", unwantedMembers);
currentCH = updatedMembersCH;
}
boolean updateTopology = false;
boolean rebalance = false;
boolean updateStableTopology = false;
if (rebalanceType == RebalanceType.NONE) {
updateTopology = true;
} else if (balancedCH.equals(currentCH)) {
if (log.isTraceEnabled()) log.tracef("The balanced CH is the same as the current CH, not rebalancing");
updateTopology = cacheTopology.getPendingCH() != null || removeMembers;
// After a cluster view change that leaves only 1 node, we don't need either a topology update or a rebalance
// but we must still update the stable topology
updateStableTopology =
cacheTopology.getPendingCH() == null &&
(stableTopology == null || cacheTopology.getTopologyId() != stableTopology.getTopologyId());
} else {
rebalance = true;
}
if (updateTopology) {
CacheTopology newTopology = new CacheTopology(newTopologyId, cacheTopology.getRebalanceId(), balancedCH, null,
CacheTopology.Phase.NO_REBALANCE, balancedCH.getMembers(), persistentUUIDManager.mapAddresses(balancedCH.getMembers()));
log.tracef("Updating cache %s topology without rebalance: %s", cacheName, newTopology);
setCurrentTopology(newTopology);
CLUSTER.updatingTopology(cacheName, newTopology, availabilityMode);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheMembersUpdated(
newTopology.getMembers(), newTopology.getTopologyId()));
clusterTopologyManager.broadcastTopologyUpdate(cacheName, newTopology, getAvailabilityMode());
} else if (rebalance) {
CacheTopology.Phase newPhase;
if (Objects.requireNonNull(rebalanceType) == RebalanceType.FOUR_PHASE) {
newPhase = CacheTopology.Phase.READ_OLD_WRITE_ALL;
} else {
throw new IllegalStateException();
}
CacheTopology newTopology = new CacheTopology(newTopologyId, newRebalanceId, currentCH, balancedCH,
newPhase, balancedCH.getMembers(), persistentUUIDManager.mapAddresses(balancedCH.getMembers()));
log.tracef("Updating cache %s topology for rebalance: %s", cacheName, newTopology);
setCurrentTopology(newTopology);
rebalanceInProgress = true;
assert rebalanceConfirmationCollector == null;
rebalanceConfirmationCollector = new RebalanceConfirmationCollector(cacheName, newTopology.getTopologyId(),
newTopology.getMembers(), this::endRebalance);
CLUSTER.startingRebalancePhase(cacheName, newTopology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.cacheRebalanceStart(
newTopology.getMembers(), newTopology.getPhase(), newTopology.getTopologyId()));
clusterTopologyManager.broadcastRebalanceStart(cacheName, newTopology);
} else if (updateStableTopology) {
stableTopology = currentTopology;
clusterTopologyManager.broadcastStableTopologyUpdate(cacheName, stableTopology);
}
}
private static List<Address> updateMembersPreservingOrder(List<Address> oldMembers, List<Address> newMembers) {
List<Address> membersPreservingOrder = new ArrayList<>(oldMembers);
membersPreservingOrder.retainAll(newMembers);
for (Address a : newMembers) {
if (!membersPreservingOrder.contains(a)) {
membersPreservingOrder.add(a);
}
}
return membersPreservingOrder;
}
public boolean isRebalanceEnabled() {
return rebalancingEnabled && clusterTopologyManager.isRebalancingEnabled();
}
public synchronized CompletionStage<Void> setRebalanceEnabled(boolean enabled) {
rebalancingEnabled = enabled;
if (rebalancingEnabled) {
log.debugf("Rebalancing is now enabled for cache %s", cacheName);
startQueuedRebalance();
} else {
log.debugf("Rebalancing is now disabled for cache %s", cacheName);
}
return CompletableFutures.completedNull();
}
public void forceRebalance() {
queueRebalance(getCurrentTopology().getMembers());
}
public synchronized CompletionStage<Void> forceAvailabilityMode(AvailabilityMode newAvailabilityMode) {
if (currentTopology != null && newAvailabilityMode != availabilityMode) {
availabilityStrategy.onManualAvailabilityChange(this, newAvailabilityMode);
}
return CompletableFutures.completedNull();
}
public synchronized CompletionStage<Void> shutdownCache() throws Exception {
if (status == ComponentStatus.RUNNING) {
status = ComponentStatus.STOPPING;
clusterTopologyManager.setRebalancingEnabled(cacheName, false);
return clusterTopologyManager.broadcastShutdownCache(cacheName)
.thenRun(() -> status = ComponentStatus.TERMINATED);
}
return CompletableFutures.completedNull();
}
public synchronized void setInitialTopologyId(int initialTopologyId) {
this.initialTopologyId = initialTopologyId;
}
@Override
public boolean resolveConflictsOnMerge() {
// It doesn't make sense to resolve conflicts if we are not going to rebalance the cache as entries on "old" owners
// will not be deleted when no rebalance occurs.
return resolveConflictsOnMerge &&
cacheManager.getStatus().allowInvocations() &&
clusterTopologyManager.isRebalancingEnabled() &&
rebalancingEnabled;
}
@Override
public ConsistentHash calculateConflictHash(ConsistentHash preferredHash, Set<ConsistentHash> distinctHashes,
List<Address> actualMembers) {
// If we are required to resolveConflicts, then we utilise a union of all distinct CHs. This is necessary
// to ensure that we read the entries associated with all possible read owners before the rebalance occurs
ConsistentHashFactory chf = getJoinInfo().getConsistentHashFactory();
ConsistentHash unionHash = distinctHashes.stream().reduce(preferredHash, chf::union);
unionHash = chf.union(unionHash, chf.rebalance(unionHash));
return chf.updateMembers(unionHash, actualMembers, capacityFactors);
}
@Override
public synchronized void queueConflictResolution(final CacheTopology conflictTopology, final Set<Address> preferredNodes) {
if (resolveConflictsOnMerge()) {
conflictResolution = new ConflictResolution();
CompletableFuture<Void> resolutionFuture = conflictResolution.queue(conflictTopology, preferredNodes);
resolutionFuture.thenRun(this::completeConflictResolution);
}
}
private synchronized void completeConflictResolution() {
if (log.isTraceEnabled()) log.tracef("Cache %s conflict resolution future complete", cacheName);
// CR is only queued for PreferConsistencyStrategy when a merge it is determined that the newAvailabilityMode will be AVAILABLE
// therefore if this method is called we know that the partition must be set to AVAILABLE
availabilityMode = AvailabilityMode.AVAILABLE;
// Install a NO_REBALANCE topology with pendingCh == null to signal conflict resolution has finished
CacheTopology conflictTopology = conflictResolution.topology;
CacheTopology newTopology = new CacheTopology(conflictTopology.getTopologyId() + 1, conflictTopology.getRebalanceId(), conflictTopology.getCurrentCH(),
null, CacheTopology.Phase.NO_REBALANCE, conflictTopology.getActualMembers(), persistentUUIDManager.mapAddresses(conflictTopology.getActualMembers()));
conflictResolution = null;
setCurrentTopology(newTopology);
clusterTopologyManager.broadcastTopologyUpdate(cacheName, newTopology, availabilityMode);
List<Address> actualMembers = conflictTopology.getActualMembers();
List<Address> newMembers = getExpectedMembers();
updateAvailabilityMode(actualMembers, availabilityMode, false);
// Update the topology to remove leavers - in case there is a rebalance in progress, or rebalancing is disabled
updateCurrentTopology(newMembers);
// Then queue a rebalance to include the joiners as well
queueRebalance(newMembers);
}
@Override
public synchronized boolean restartConflictResolution(List<Address> members) {
// If conflictResolution is null then no CR in progress
if (!resolveConflictsOnMerge() || conflictResolution == null )
return false;
// No need to reattempt CR if only one node remains, so cancel CR, cleanup and queue rebalance
if (members.size() == 1) {
log.debugf("Cache %s cancelling conflict resolution as only one cluster member: members=%s", cacheName, members);
conflictResolution.cancelCurrentAttempt();
conflictResolution = null;
return false;
}
// CR members are the same as newMembers, so no need to restart
if (!conflictResolution.restartRequired(members)) {
if (log.isTraceEnabled()) log.tracef("Cache %s not restarting conflict resolution, existing conflict topology contains all members (%s)", cacheName, members);
return false;
}
CacheTopology conflictTopology = conflictResolution.topology;
ConsistentHashFactory chf = getJoinInfo().getConsistentHashFactory();
ConsistentHash newHash = chf.updateMembers(conflictTopology.getCurrentCH(), members, capacityFactors);
conflictTopology = new CacheTopology(currentTopology.getTopologyId() + 1, currentTopology.getRebalanceId(),
newHash, null, CacheTopology.Phase.CONFLICT_RESOLUTION, members, persistentUUIDManager.mapAddresses(members));
currentTopology = conflictTopology;
log.debugf("Cache %s restarting conflict resolution with topology %s", cacheName, currentTopology);
clusterTopologyManager.broadcastTopologyUpdate(cacheName, conflictTopology, availabilityMode);
queueConflictResolution(conflictTopology, conflictResolution.preferredNodes);
return true;
}
private synchronized void cancelConflictResolutionPhase(CacheTopology resolutionTopology) {
if (conflictResolution != null) {
// If the passed topology is not the same as the passed topologyId, then we know that a new
// ConflictResolution attempt has been queued and therefore we should let this proceed.
// This check is necessary as it is possible that the call to this method is blocked by
// a concurrent operation on ClusterCacheStatus that may invalidate the cancel request
if (conflictResolution.topology.getTopologyId() > resolutionTopology.getTopologyId())
return;
completeConflictResolution();
}
}
private class ConflictResolution {
final CompletableFuture<Void> future = new CompletableFuture<>();
final AtomicBoolean cancelledLocally = new AtomicBoolean();
final InternalConflictManager<?, ?> manager;
volatile CacheTopology topology;
volatile Set<Address> preferredNodes;
ConflictResolution() {
ComponentRegistry componentRegistry = gcr.getNamedComponentRegistry(cacheName);
this.manager = componentRegistry.getComponent(InternalConflictManager.class);
}
synchronized CompletableFuture<Void> queue(CacheTopology topology, Set<Address> preferredNodes) {
this.topology = topology;
this.preferredNodes = preferredNodes;
log.debugf("Cache %s queueing conflict resolution with members %s", cacheName, topology.getMembers());
Log.CLUSTER.startingConflictResolution(cacheName, currentTopology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.conflictResolutionStarting(
currentTopology.getMembers(), currentTopology.getTopologyId()));
manager.resolveConflicts(topology, preferredNodes).whenComplete((Void, t) -> {
if (t == null) {
Log.CLUSTER.finishedConflictResolution(cacheName, currentTopology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.conflictResolutionFinished(
topology.getMembers(), topology.getTopologyId()));
future.complete(null);
} else {
// TODO Add log event for cancel/restart
if (cancelledLocally.get()) {
// We have explicitly cancelled the request, therefore return and do nothing
Log.CLUSTER.cancelledConflictResolution(cacheName, topology);
eventLogger.info(EventLogCategory.CLUSTER, MESSAGES.conflictResolutionCancelled(
topology.getMembers(), topology.getTopologyId()));
cancelConflictResolutionPhase(topology);
} else if (t instanceof CompletionException) {
Throwable cause;
Throwable rootCause = t;
while ((cause = rootCause.getCause()) != null && (rootCause != cause)) {
rootCause = cause;
}
// TODO When CR fails because a node left the cluster, the new CR can start before we cancel the old one
Log.CLUSTER.failedConflictResolution(cacheName, topology, rootCause);
eventLogger.error(EventLogCategory.CLUSTER, MESSAGES.conflictResolutionFailed(
topology.getMembers(), topology.getTopologyId(), rootCause.getMessage()));
// If a node is suspected then we can't restart the CR until a new view is received, so we leave conflictResolution != null
// so that on a new view restartConflictResolution can return true
if (!(rootCause instanceof SuspectException)) {
cancelConflictResolutionPhase(topology);
}
}
}
});
return future;
}
synchronized void cancelCurrentAttempt() {
cancelledLocally.set(true);
manager.cancelConflictResolution();
}
synchronized boolean restartRequired(List<Address> newMembers) {
assert newMembers != null;
return !newMembers.equals(topology.getMembers());
}
}
}
| 57,649
| 45.908055
| 171
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/CacheStatusResponse.java
|
package org.infinispan.topology;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.marshall.core.Ids;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.remoting.transport.Address;
/**
* @author Dan Berindei
* @since 7.0
*/
public class CacheStatusResponse implements Serializable {
private final CacheJoinInfo cacheJoinInfo;
private final CacheTopology cacheTopology;
private final CacheTopology stableTopology;
private final AvailabilityMode availabilityMode;
private final List<Address> current;
public CacheStatusResponse(CacheJoinInfo cacheJoinInfo, CacheTopology cacheTopology, CacheTopology stableTopology,
AvailabilityMode availabilityMode, List<Address> current) {
this.cacheJoinInfo = cacheJoinInfo;
this.cacheTopology = cacheTopology;
this.stableTopology = stableTopology;
this.availabilityMode = availabilityMode;
this.current = current;
}
public CacheJoinInfo getCacheJoinInfo() {
return cacheJoinInfo;
}
public CacheTopology getCacheTopology() {
return cacheTopology;
}
/**
* @see org.infinispan.partitionhandling.impl.AvailabilityStrategyContext#getStableTopology()
*/
public CacheTopology getStableTopology() {
return stableTopology;
}
public AvailabilityMode getAvailabilityMode() {
return availabilityMode;
}
public List<Address> joinedMembers() {
return current;
}
@Override
public String toString() {
return "StatusResponse{" +
"cacheJoinInfo=" + cacheJoinInfo +
", cacheTopology=" + cacheTopology +
", stableTopology=" + stableTopology +
'}';
}
public static class Externalizer extends AbstractExternalizer<CacheStatusResponse> {
@Override
public void writeObject(ObjectOutput output, CacheStatusResponse cacheStatusResponse) throws IOException {
output.writeObject(cacheStatusResponse.cacheJoinInfo);
output.writeObject(cacheStatusResponse.cacheTopology);
output.writeObject(cacheStatusResponse.stableTopology);
output.writeObject(cacheStatusResponse.availabilityMode);
MarshallUtil.marshallCollection(cacheStatusResponse.current, output);
}
@Override
public CacheStatusResponse readObject(ObjectInput unmarshaller) throws IOException, ClassNotFoundException {
CacheJoinInfo cacheJoinInfo = (CacheJoinInfo) unmarshaller.readObject();
CacheTopology cacheTopology = (CacheTopology) unmarshaller.readObject();
CacheTopology stableTopology = (CacheTopology) unmarshaller.readObject();
AvailabilityMode availabilityMode = (AvailabilityMode) unmarshaller.readObject();
List<Address> current = MarshallUtil.unmarshallCollection(unmarshaller, ArrayList::new);
return new CacheStatusResponse(cacheJoinInfo, cacheTopology, stableTopology, availabilityMode, current);
}
@Override
public Integer getId() {
return Ids.CACHE_STATUS_RESPONSE;
}
@Override
public Set<Class<? extends CacheStatusResponse>> getTypeClasses() {
return Collections.<Class<? extends CacheStatusResponse>>singleton(CacheStatusResponse.class);
}
}
}
| 3,587
| 34.524752
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/topology/ClusterTopologyManager.java
|
package org.infinispan.topology;
import java.util.List;
import java.util.concurrent.CompletionStage;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.remoting.transport.Address;
/**
* Maintains the topology for all the caches in the cluster.
*
* @author Dan Berindei
* @since 5.2
*/
@Scope(Scopes.GLOBAL)
public interface ClusterTopologyManager {
enum ClusterManagerStatus {
INITIALIZING,
REGULAR_MEMBER,
COORDINATOR,
RECOVERING_CLUSTER,
STOPPING;
boolean isRunning() {
return this != STOPPING;
}
boolean isCoordinator() {
return this == COORDINATOR || this == RECOVERING_CLUSTER;
}
}
/**
* Returns the list of nodes that joined the cache with the given {@code cacheName} if the current
* node is the coordinator. If the node is not the coordinator, the method returns null.
*/
List<Address> currentJoiners(String cacheName);
/**
* Signals that a new member is joining the cache.
*
* The returned {@code CacheStatusResponse.cacheTopology} is the current cache topology before the node joined.
* If the node is the first to join the cache, the returned topology does include the joiner,
* and it is never {@code null}.
*/
CompletionStage<CacheStatusResponse> handleJoin(String cacheName, Address joiner, CacheJoinInfo joinInfo, int viewId) throws Exception;
/**
* Signals that a member is leaving the cache.
*/
CompletionStage<Void> handleLeave(String cacheName, Address leaver, int viewId) throws Exception;
/**
* Marks the rebalance as complete on the sender.
*/
CompletionStage<Void> handleRebalancePhaseConfirm(String cacheName, Address node, int topologyId, Throwable throwable, int viewId) throws Exception;
boolean isRebalancingEnabled();
/**
* Returns whether rebalancing is enabled or disabled for this container.
*/
boolean isRebalancingEnabled(String cacheName);
/**
* Globally enables or disables whether automatic rebalancing should occur.
*/
CompletionStage<Void> setRebalancingEnabled(boolean enabled);
/**
* Enables or disables rebalancing for the specified cache
*/
CompletionStage<Void> setRebalancingEnabled(String cacheName, boolean enabled);
/**
* Retrieves the rebalancing status of a cache
*/
RebalancingStatus getRebalancingStatus(String cacheName);
CompletionStage<Void> forceRebalance(String cacheName);
CompletionStage<Void> forceAvailabilityMode(String cacheName, AvailabilityMode availabilityMode);
CompletionStage<Void> handleShutdownRequest(String cacheName) throws Exception;
boolean useCurrentTopologyAsStable(String cacheName, boolean force);
/**
* Sets the id of the initial topology in given cache. This is necessary when using entry versions
* that contain topology id; had we started with topology id 1, newer versions would not be recognized properly.
*/
void setInitialCacheTopologyId(String cacheName, int topologyId);
ClusterManagerStatus getStatus();
}
| 3,191
| 31.242424
| 151
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/package-info.java
|
/**
* Remote communication between cache instances.
*/
package org.infinispan.remoting;
| 90
| 17.2
| 48
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/RemoteException.java
|
package org.infinispan.remoting;
import org.infinispan.commons.CacheException;
/**
* Represents an application-level exception originating in a remote node.
*
* @author Galder Zamarreño
* @since 5.2
*/
public class RemoteException extends CacheException {
public RemoteException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 354
| 18.722222
| 74
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/RpcException.java
|
package org.infinispan.remoting;
import org.infinispan.commons.CacheException;
/**
* Thrown when an RPC problem occurred on the caller.
*
* @author (various)
* @since 4.0
*/
public class RpcException extends CacheException {
private static final long serialVersionUID = 33172388691879866L;
public RpcException() {
super();
}
public RpcException(Throwable cause) {
super(cause);
}
public RpcException(String msg) {
super(msg);
}
public RpcException(String msg, Throwable cause) {
super(msg, cause);
}
}
| 565
| 17.258065
| 67
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/CacheUnreachableException.java
|
package org.infinispan.remoting;
import org.infinispan.commons.CacheException;
import org.jgroups.UnreachableException;
/**
* Signals a backup site was unreachable.
*
* @author Pedro Ruivo
* @since 7.0
*/
public class CacheUnreachableException extends CacheException {
public CacheUnreachableException(String message) {
super(message);
}
public CacheUnreachableException(UnreachableException e) {
super(e.toString());
}
}
| 456
| 18.869565
| 63
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/LocalInvocation.java
|
package org.infinispan.remoting;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.Cache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.ResponseGenerator;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.commons.util.concurrent.CompletableFutures;
/**
* Simulates a remote invocation on the local node. This is needed because the transport does not redirect to itself the
* replicable commands.
*
* @author Pedro Ruivo
* @since 7.0
*/
public class LocalInvocation implements Callable<Response>, Function<Object, Response> {
private final ResponseGenerator responseGenerator;
private final CacheRpcCommand command;
private final ComponentRegistry componentRegistry;
private final CommandsFactory commandsFactory;
private final Address self;
private final BlockingManager blockingManager;
private LocalInvocation(ResponseGenerator responseGenerator, CacheRpcCommand command,
ComponentRegistry componentRegistry, Address self) {
this.responseGenerator = responseGenerator;
this.command = command;
this.componentRegistry = componentRegistry;
this.commandsFactory = componentRegistry.getCommandsFactory();
this.self = self;
this.blockingManager = componentRegistry.getComponent(BlockingManager.class);
}
@Override
public Response call() throws Exception {
return CompletableFutures.await(callAsync().toCompletableFuture());
}
public static LocalInvocation newInstanceFromCache(Cache<?, ?> cache, CacheRpcCommand command) {
return newInstance(cache.getAdvancedCache().getComponentRegistry(), command);
}
public static LocalInvocation newInstance(ComponentRegistry componentRegistry, CacheRpcCommand command) {
ResponseGenerator responseGenerator = componentRegistry.getResponseGenerator();
Address self = componentRegistry.getComponent(ClusteringDependentLogic.class).getAddress();
return new LocalInvocation(responseGenerator, command, componentRegistry, self);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
LocalInvocation that = (LocalInvocation) o;
return command.equals(that.command);
}
@Override
public int hashCode() {
return command.hashCode();
}
public CompletionStage<Response> callAsync() {
commandsFactory.initializeReplicableCommand(command, false);
command.setOrigin(self);
try {
CompletionStage<?> stage;
if (command.canBlock()) {
stage = blockingManager.supplyBlocking(() -> {
try {
return command.invokeAsync(componentRegistry);
} catch (Throwable t) {
throw CompletableFutures.asCompletionException(t);
}
}, command.getCommandId())
.thenCompose(Function.identity());
} else {
stage = command.invokeAsync(componentRegistry);
}
return stage.thenApply(this);
} catch (Throwable throwable) {
return CompletableFuture.failedFuture(throwable);
}
}
@Override
public Response apply(Object retVal) {
if (retVal instanceof Response) {
return (Response) retVal;
} else {
return responseGenerator.getResponse(command, retVal);
}
}
}
| 3,882
| 34.953704
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/UnsureResponse.java
|
package org.infinispan.remoting.responses;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.marshall.core.Ids;
/**
* An unsure response - used with Dist - essentially asks the caller to check the next response from the next node since
* the sender is in a state of flux (probably in the middle of rebalancing)
*
* @author Manik Surtani
* @since 4.0
*/
public class UnsureResponse extends ValidResponse {
public static final UnsureResponse INSTANCE = new UnsureResponse();
@Override
public boolean isSuccessful() {
return false;
}
@Override
public Object getResponseValue() {
throw new UnsupportedOperationException();
}
public static class Externalizer extends AbstractExternalizer<UnsureResponse> {
@Override
public void writeObject(ObjectOutput output, UnsureResponse subject) throws IOException {
}
@Override
public UnsureResponse readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return INSTANCE;
}
@Override
public Integer getId() {
return Ids.UNSURE_RESPONSE;
}
@Override
public Set<Class<? extends UnsureResponse>> getTypeClasses() {
return Util.<Class<? extends UnsureResponse>>asSet(UnsureResponse.class);
}
}
}
| 1,480
| 27.480769
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/ResponseGenerator.java
|
package org.infinispan.remoting.responses;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* A component that generates responses as is expected by different cache setups
*
* @author Manik Surtani
* @since 4.0
*/
@Scope(Scopes.NAMED_CACHE)
public interface ResponseGenerator {
Response getResponse(CacheRpcCommand command, Object returnValue);
}
| 460
| 26.117647
| 80
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/BiasRevocationResponse.java
|
package org.infinispan.remoting.responses;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.commons.util.Util;
import org.infinispan.remoting.transport.Address;
public class BiasRevocationResponse extends SuccessfulResponse {
private final Address[] waitFor;
public BiasRevocationResponse(Object responseValue, Address[] waitFor) {
super(responseValue);
this.waitFor = waitFor;
}
public Address[] getWaitList() {
return waitFor;
}
public static class Externalizer implements AdvancedExternalizer<BiasRevocationResponse> {
@Override
public Set<Class<? extends BiasRevocationResponse>> getTypeClasses() {
return Util.asSet(BiasRevocationResponse.class);
}
@Override
public Integer getId() {
return Ids.BIAS_REVOCATION_RESPONSE;
}
@Override
public void writeObject(ObjectOutput output, BiasRevocationResponse object) throws IOException {
output.writeObject(object.getResponseValue());
MarshallUtil.marshallArray(object.waitFor, output);
}
@Override
public BiasRevocationResponse readObject(ObjectInput input) throws IOException, ClassNotFoundException {
Object value = input.readObject();
Address[] waitFor = MarshallUtil.unmarshallArray(input, Address[]::new);
return new BiasRevocationResponse(value, waitFor);
}
}
}
| 1,637
| 31.117647
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/package-info.java
|
/**
* Abstractions of the different response types allowed during RPC.
*/
package org.infinispan.remoting.responses;
| 119
| 23
| 67
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/SuccessfulResponse.java
|
package org.infinispan.remoting.responses;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.util.Util;
import org.infinispan.marshall.core.Ids;
/**
* A successful response
*
* @author Manik Surtani
* @since 4.0
*/
public class SuccessfulResponse<T> extends ValidResponse {
public static final SuccessfulResponse SUCCESSFUL_EMPTY_RESPONSE = new SuccessfulResponse<>(null);
private final T responseValue;
protected SuccessfulResponse(T responseValue) {
this.responseValue = responseValue;
}
@SuppressWarnings("unchecked")
public static <T> SuccessfulResponse<T> create(T responseValue) {
return responseValue == null ? SUCCESSFUL_EMPTY_RESPONSE : new SuccessfulResponse<>(responseValue);
}
@Override
public boolean isSuccessful() {
return true;
}
public T getResponseValue() {
return responseValue;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SuccessfulResponse that = (SuccessfulResponse) o;
if (responseValue != null ? !responseValue.equals(that.responseValue) : that.responseValue != null) return false;
return true;
}
@Override
public int hashCode() {
return responseValue != null ? responseValue.hashCode() : 0;
}
@Override
public String toString() {
return "SuccessfulResponse(" + Util.toStr(responseValue) + ")";
}
public static class Externalizer extends AbstractExternalizer<SuccessfulResponse> {
@Override
public void writeObject(ObjectOutput output, SuccessfulResponse response) throws IOException {
output.writeObject(response.responseValue);
}
@Override
public SuccessfulResponse readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return create(input.readObject());
}
@Override
public Integer getId() {
return Ids.SUCCESSFUL_RESPONSE;
}
@Override
public Set<Class<? extends SuccessfulResponse>> getTypeClasses() {
return Util.<Class<? extends SuccessfulResponse>>asSet(SuccessfulResponse.class);
}
}
}
| 2,342
| 26.244186
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/DefaultResponseGenerator.java
|
package org.infinispan.remoting.responses;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* The default response generator for most cache modes
*
* @author Manik Surtani
* @author Dan Berindei
* @since 4.0
*/
public class DefaultResponseGenerator implements ResponseGenerator {
private static final Log log = LogFactory.getLog(DefaultResponseGenerator.class);
public Response getResponse(CacheRpcCommand command, Object returnValue) {
if (returnValue instanceof Response)
return (Response) returnValue;
if (command.isReturnValueExpected()) {
return command.isSuccessful() ? SuccessfulResponse.create(returnValue) : UnsuccessfulResponse.create(returnValue);
} else {
if (returnValue != null) {
if (log.isTraceEnabled()) log.tracef("Ignoring non-null response for command %s: %s", command, returnValue);
}
return null; // saves on serializing a response!
}
}
}
| 1,058
| 33.16129
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/InvalidResponse.java
|
package org.infinispan.remoting.responses;
/**
* An invalid response
*
* @author Manik Surtani
* @since 4.0
*/
public abstract class InvalidResponse implements Response {
@Override
public boolean isValid() {
return false;
}
@Override
public boolean isSuccessful() {
return false;
}
@Override
public String toString() {
return getClass().getSimpleName();
}
}
| 411
| 15.48
| 59
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/remoting/responses/ClusteredGetResponseValidityFilter.java
|
package org.infinispan.remoting.responses;
import java.util.Collection;
import java.util.HashSet;
import org.infinispan.remoting.rpc.ResponseFilter;
import org.infinispan.remoting.transport.Address;
/**
* A filter that tests the validity of {@link org.infinispan.commands.remote.ClusteredGetCommand}s.
*
* JGroups calls our handler while holding a lock, so we don't need any synchronization.
*
* @author Manik Surtani
* @since 4.0
*/
public class ClusteredGetResponseValidityFilter implements ResponseFilter {
private Collection<Address> targets;
private int acceptableResponses;
private int missingResponses;
public ClusteredGetResponseValidityFilter(Collection<Address> targets, Address self) {
this.targets = new HashSet<Address>(targets);
this.acceptableResponses = 0;
this.missingResponses = targets.size();
if (this.targets.contains(self)) {
this.missingResponses--;
}
}
@Override
public boolean isAcceptable(Response response, Address address) {
if (targets.contains(address)) {
missingResponses--;
if (response instanceof SuccessfulResponse || response instanceof ExceptionResponse) {
acceptableResponses++;
return true;
}
}
return false;
}
@Override
public boolean needMoreResponses() {
return acceptableResponses < 1 && missingResponses > 0;
}
}
| 1,418
| 27.38
| 99
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.