repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/main/java/org/infinispan/security/actions/GetPersistenceManagerAction.java
|
package org.infinispan.security.actions;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.manager.PersistenceManager;
public class GetPersistenceManagerAction extends AbstractEmbeddedCacheManagerAction<PersistenceManager> {
private final String cacheName;
public GetPersistenceManagerAction(EmbeddedCacheManager cacheManager, String cacheName) {
super(cacheManager);
this.cacheName = cacheName;
}
@Override
public PersistenceManager get() {
ComponentRegistry cr = cacheManager.getGlobalComponentRegistry().getNamedComponentRegistry(cacheName);
if (cr == null)
throw new IllegalLifecycleStateException();
return cr.getComponent(PersistenceManager.class);
}
}
| 871
| 32.538462
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/actions/AddLoggerListenerAsyncAction.java
|
package org.infinispan.security.actions;
import java.util.concurrent.CompletionStage;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.util.logging.events.EventLogManager;
/**
* Add logger listener action.
*
* @since 14.0
*/
public class AddLoggerListenerAsyncAction extends AbstractEmbeddedCacheManagerAction<CompletionStage<Void>> {
private final Object listener;
public AddLoggerListenerAsyncAction(EmbeddedCacheManager cacheManager, Object listener) {
super(cacheManager);
this.listener = listener;
}
@Override
public CompletionStage<Void> get() {
return EventLogManager.getEventLogger(cacheManager).addListenerAsync(listener);
}
}
| 706
| 26.192308
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/actions/GetOrCreateCacheAction.java
|
package org.infinispan.security.actions;
import java.util.function.Supplier;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.manager.EmbeddedCacheManager;
/**
* GetOrCreateCacheAction.
*
* @since 15.0
*/
public class GetOrCreateCacheAction<A extends Cache<K, V>, K, V> implements Supplier<A> {
private final String cacheName;
private final EmbeddedCacheManager cacheManager;
private final Configuration configuration;
public GetOrCreateCacheAction(EmbeddedCacheManager cacheManager, String cacheName, Configuration configuration) {
this.cacheManager = cacheManager;
this.cacheName = cacheName;
this.configuration = configuration;
}
@SuppressWarnings("unchecked")
@Override
public A get() {
return (A) cacheManager.administration().getOrCreateCache(cacheName, configuration);
}
}
| 901
| 26.333333
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/actions/GetEmbeddedCacheManagerAction.java
|
package org.infinispan.security.actions;
import org.infinispan.AdvancedCache;
import org.infinispan.manager.EmbeddedCacheManager;
/**
* GetEmbeddedCacheManagerAction.
*
* @author Pedro Ruivo
* @since 12.0
*/
public class GetEmbeddedCacheManagerAction extends AbstractAdvancedCacheAction<EmbeddedCacheManager> {
public GetEmbeddedCacheManagerAction(AdvancedCache<?, ?> cache) {
super(cache);
}
@Override
public EmbeddedCacheManager get() {
return cache.getCacheManager();
}
}
| 511
| 21.26087
| 102
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/actions/GetCacheEntryAction.java
|
package org.infinispan.security.actions;
import org.infinispan.AdvancedCache;
import org.infinispan.container.entries.CacheEntry;
/**
* GetCacheEntryAction.
*
* @author Tristan Tarrant
* @since 7.2
*/
public class GetCacheEntryAction<K, V> extends AbstractAdvancedCacheAction<CacheEntry<K, V>> {
private final K key;
public GetCacheEntryAction(AdvancedCache<?, ?> cache, K key) {
super(cache);
this.key = key;
}
@Override
public CacheEntry<K, V> get() {
return ((AdvancedCache<K, V>) cache).getCacheEntry(key);
}
}
| 561
| 20.615385
| 94
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/actions/GetCacheConfigurationFromManagerAction.java
|
package org.infinispan.security.actions;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.manager.EmbeddedCacheManager;
/**
* GetCacheManagerConfigurationAction.
*
* @author Dan Berindei
* @since 10.0
*/
public class GetCacheConfigurationFromManagerAction extends AbstractEmbeddedCacheManagerAction<Configuration> {
private final String name;
public GetCacheConfigurationFromManagerAction(EmbeddedCacheManager cacheManager, String name) {
super(cacheManager);
this.name = name;
}
@Override
public Configuration get() {
return cacheManager.getCacheConfiguration(name);
}
}
| 648
| 23.961538
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/mappers/IdentityRoleMapper.java
|
package org.infinispan.security.mappers;
import java.security.Principal;
import java.util.Collections;
import java.util.Set;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.security.PrincipalRoleMapper;
/**
* IdentityRoleMapper. A very simple role which simply returns the principal's name as the role name.
*
* @author Tristan Tarrant
* @since 7.0
* @api.public
*/
@Scope(Scopes.GLOBAL)
public class IdentityRoleMapper implements PrincipalRoleMapper {
@Override
public Set<String> principalToRoles(Principal principal) {
return Collections.singleton(principal.getName());
}
@Override
public boolean equals(Object obj) {
return obj instanceof IdentityRoleMapper;
}
}
| 775
| 24.032258
| 101
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/mappers/NameRewriter.java
|
package org.infinispan.security.mappers;
/**
* @since 15.0
**/
@FunctionalInterface
public interface NameRewriter {
NameRewriter IDENTITY_REWRITER = original -> original;
/**
* Rewrite a name. Must not return {@code null}.
*
* @param original the original name (must not be {@code null})
* @return the rewritten name, or {@code null} if the name is invalid
*/
String rewriteName(String original);
}
| 433
| 23.111111
| 72
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/mappers/ClusterRoleMapper.java
|
package org.infinispan.security.mappers;
import java.security.Principal;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.Cache;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.context.Flag;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.security.MutablePrincipalRoleMapper;
import org.infinispan.security.actions.SecurityActions;
/**
* ClusterRoleMapper. This class implements both a {@link MutablePrincipalRoleMapper} storing the mappings in a
* persistent replicated internal cache named <tt>org.infinispan.ROLES</tt>
*
* @author Tristan Tarrant
* @since 7.0
*/
@Scope(Scopes.GLOBAL)
public class ClusterRoleMapper implements MutablePrincipalRoleMapper {
@Inject
EmbeddedCacheManager cacheManager;
@Inject
InternalCacheRegistry internalCacheRegistry;
public static final String CLUSTER_ROLE_MAPPER_CACHE = "org.infinispan.ROLES";
private Cache<String, RoleSet> clusterRoleMap;
private Cache<String, RoleSet> clusterRoleReadMap;
private NameRewriter nameRewriter = NameRewriter.IDENTITY_REWRITER;
@Start
void start() {
initializeInternalCache();
clusterRoleMap = cacheManager.getCache(CLUSTER_ROLE_MAPPER_CACHE);
clusterRoleReadMap = clusterRoleMap.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.CACHE_MODE_LOCAL);
}
@Override
public Set<String> principalToRoles(Principal principal) {
String name = nameRewriter.rewriteName(principal.getName());
if (clusterRoleReadMap == null) {
return Collections.singleton(name);
}
RoleSet roleSet = clusterRoleReadMap.get(name);
if (roleSet != null && !roleSet.roles.isEmpty()) {
return roleSet.roles;
} else {
return Collections.singleton(name);
}
}
private void initializeInternalCache() {
GlobalConfiguration globalConfiguration = SecurityActions.getCacheManagerConfiguration(cacheManager);
CacheMode cacheMode = globalConfiguration.isClustered() ? CacheMode.REPL_SYNC : CacheMode.LOCAL;
ConfigurationBuilder cfg = new ConfigurationBuilder();
cfg.clustering().cacheMode(cacheMode)
.stateTransfer().fetchInMemoryState(true).awaitInitialTransfer(false)
.security().authorization().disable();
internalCacheRegistry.registerInternalCache(CLUSTER_ROLE_MAPPER_CACHE, cfg.build(), EnumSet.of(InternalCacheRegistry.Flag.PERSISTENT));
}
@Override
public void grant(String roleName, String principalName) {
RoleSet roleSet = clusterRoleMap.computeIfAbsent(principalName, n -> new RoleSet());
roleSet.roles.add(roleName);
clusterRoleMap.put(principalName, roleSet);
}
@Override
public void deny(String roleName, String principalName) {
RoleSet roleSet = clusterRoleMap.computeIfAbsent(principalName, n -> new RoleSet());
roleSet.roles.remove(roleName);
clusterRoleMap.put(principalName, roleSet);
}
@Override
public Set<String> list(String principalName) {
RoleSet roleSet = clusterRoleReadMap.get(principalName);
if (roleSet != null) {
return Collections.unmodifiableSet(roleSet.roles);
} else {
return Collections.singleton(principalName);
}
}
@Override
public String listAll() {
StringBuilder sb = new StringBuilder();
for (RoleSet set : clusterRoleReadMap.values()) {
sb.append(set.roles.toString());
}
return sb.toString();
}
public void nameRewriter(NameRewriter nameRewriter) {
this.nameRewriter = nameRewriter;
}
public NameRewriter nameRewriter() {
return nameRewriter;
}
@ProtoTypeId(ProtoStreamTypeIds.ROLE_SET)
public static class RoleSet {
@ProtoField(number = 1, collectionImplementation = HashSet.class)
final Set<String> roles;
RoleSet() {
this(new HashSet<>());
}
@ProtoFactory
RoleSet(Set<String> roles) {
this.roles = roles;
}
public Set<String> getRoles() {
return roles;
}
}
}
| 4,757
| 33.729927
| 141
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/mappers/RegexNameRewriter.java
|
package org.infinispan.security.mappers;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.wildfly.common.Assert;
/**
* A simple regular expression-based name rewriter.
*/
public final class RegexNameRewriter implements NameRewriter {
private final Pattern pattern;
private final String replacement;
private final boolean replaceAll;
/**
* Construct a new instance.
*
* @param pattern the substitution pattern (must not be {@code null})
* @param replacement the replacement string
* @param replaceAll {@code true} to replace all occurrences of the pattern; {@code false} to replace only the first occurrence
*/
public RegexNameRewriter(final Pattern pattern, final String replacement, final boolean replaceAll) {
this.pattern = Assert.checkNotNullParam("pattern", pattern);
this.replacement = replacement;
this.replaceAll = replaceAll;
}
/**
* Rewrite a name. Must not return {@code null}.
*
* @param original the original name
*
* @return the rewritten name
*/
public String rewriteName(final String original) {
if (original == null) return null;
final Matcher matcher = pattern.matcher(original);
return replaceAll ? matcher.replaceAll(replacement) : matcher.replaceFirst(replacement);
}
/**
* Get the pattern.
*
* @return the pattern
*/
public Pattern getPattern() {
return pattern;
}
/**
* Get the replacement string.
*
* @return the replacement string
*/
public String getReplacement() {
return replacement;
}
/**
* Whether this rewriter replaces all occurrences of the patter or just the first
*/
public boolean isReplaceAll() {
return replaceAll;
}
}
| 1,848
| 26.597015
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/mappers/ClusterPermissionMapper.java
|
package org.infinispan.security.mappers;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletionStage;
import java.util.stream.Collectors;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.context.Flag;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.security.MutableRolePermissionMapper;
import org.infinispan.security.Role;
import org.infinispan.security.actions.SecurityActions;
/**
* ClusterPermissionMapper. This class implements both a {@link MutableRolePermissionMapper} storing the mappings in a
* persistent replicated internal cache named <tt>org.infinispan.PERMISSIONS</tt>
*
* @author Tristan Tarrant
* @since 14.0
*/
@Scope(Scopes.GLOBAL)
public class ClusterPermissionMapper implements MutableRolePermissionMapper {
public static final String CLUSTER_PERMISSION_MAPPER_CACHE = "org.infinispan.PERMISSIONS";
@Inject
EmbeddedCacheManager cacheManager;
@Inject
InternalCacheRegistry internalCacheRegistry;
private Cache<String, Role> clusterPermissionMap;
private Cache<String, Role> clusterPermissionReadMap;
@Start
void start() {
initializeInternalCache();
clusterPermissionMap = cacheManager.getCache(CLUSTER_PERMISSION_MAPPER_CACHE);
clusterPermissionReadMap = clusterPermissionMap.getAdvancedCache().withFlags(Flag.SKIP_CACHE_LOAD, Flag.CACHE_MODE_LOCAL);
}
private void initializeInternalCache() {
GlobalConfiguration globalConfiguration = SecurityActions.getCacheManagerConfiguration(cacheManager);
CacheMode cacheMode = globalConfiguration.isClustered() ? CacheMode.REPL_SYNC : CacheMode.LOCAL;
ConfigurationBuilder cfg = new ConfigurationBuilder();
cfg.clustering().cacheMode(cacheMode)
.stateTransfer().fetchInMemoryState(true).awaitInitialTransfer(false)
.security().authorization().disable();
internalCacheRegistry.registerInternalCache(CLUSTER_PERMISSION_MAPPER_CACHE, cfg.build(), EnumSet.of(InternalCacheRegistry.Flag.PERSISTENT));
}
@Override
public CompletionStage<Void> addRole(Role role) {
return clusterPermissionMap.putAsync(role.getName(), role).thenApply(ignore -> null);
}
@Override
public CompletionStage<Boolean> removeRole(String name) {
return clusterPermissionMap.removeAsync(name).thenApply(Objects::nonNull);
}
@Override
public Map<String, Role> getAllRoles() {
return isActive() ? clusterPermissionReadMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) : Collections.emptyMap();
}
@Override
public Role getRole(String name) {
return isActive() ? clusterPermissionReadMap.get(name) : null;
}
@Override
public boolean hasRole(String name) {
return !isActive() || clusterPermissionReadMap.containsKey(name);
}
private boolean isActive() {
return clusterPermissionReadMap != null && cacheManager.getStatus().allowInvocations();
}
}
| 3,467
| 38.409091
| 162
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/mappers/CommonNameRoleMapper.java
|
package org.infinispan.security.mappers;
import java.security.Principal;
import java.util.Collections;
import java.util.Set;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.security.PrincipalRoleMapper;
/**
* CommonNameRoleMapper. A simple mapper which extracts the Common Name (CN) from an
* LDAP-style Distinguished Name (DN) and returns it as the role.
*
* @author Tristan Tarrant
* @since 7.0
* @api.public
*/
@Scope(Scopes.GLOBAL)
public class CommonNameRoleMapper implements PrincipalRoleMapper {
@Override
public Set<String> principalToRoles(Principal principal) {
String name = principal.getName();
if (name.regionMatches(true, 0, "CN=", 0, 3)) {
return Collections.singleton(name.split(",")[0].substring(3));
} else {
return null;
}
}
@Override
public boolean equals(Object obj) {
return obj instanceof CommonNameRoleMapper;
}
}
| 983
| 25.594595
| 84
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/mappers/CaseNameRewriter.java
|
package org.infinispan.security.mappers;
import java.util.Locale;
/**
* A rewriter which can convert a name to uppercase or lowercase
*/
public final class CaseNameRewriter implements NameRewriter {
private final boolean upperCase;
public CaseNameRewriter() {
this(true);
}
public CaseNameRewriter(boolean upperCase) {
this.upperCase = upperCase;
}
public String rewriteName(String original) {
if (original == null) {
return null;
} else {
return this.upperCase ? original.toUpperCase(Locale.ROOT) : original.toLowerCase(Locale.ROOT);
}
}
}
| 617
| 21.888889
| 103
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/IdentityRoleMapper.java
|
package org.infinispan.security.impl;
/**
* @author Tristan Tarrant <tristan@infinispan.org>
* @deprecated use {@link org.infinispan.security.mappers.IdentityRoleMapper} instead
**/
@Deprecated
public class IdentityRoleMapper extends org.infinispan.security.mappers.IdentityRoleMapper {
}
| 299
| 29
| 92
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/ClusterRoleMapper.java
|
package org.infinispan.security.impl;
/**
* @author Tristan Tarrant <tristan@infinispan.org>
* @deprecated use {@link org.infinispan.security.mappers.ClusterRoleMapper} instead. This class will be removed in Infinispan 14.0
**/
@Deprecated
public class ClusterRoleMapper extends org.infinispan.security.mappers.ClusterRoleMapper {
}
| 343
| 33.4
| 131
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/AuditMessages.java
|
package org.infinispan.security.impl;
import java.security.Principal;
import org.infinispan.security.AuditContext;
import org.infinispan.security.AuditResponse;
import org.infinispan.security.AuthorizationPermission;
import org.jboss.logging.annotations.LogMessage;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
/**
* AuditMessages. Messages used by the security audit
*
* @author Tristan Tarrant
* @since 7.0
*/
@MessageLogger(projectCode="")
public interface AuditMessages {
@LogMessage
@Message("[%s] %s %s %s[%s]")
void auditMessage(AuditResponse response, Principal principal, AuthorizationPermission permission, AuditContext context, String contextName);
}
| 733
| 28.36
| 144
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/AuthorizationMapperContextImpl.java
|
package org.infinispan.security.impl;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.PrincipalRoleMapperContext;
/**
* PrincipalRoleMapperContextImpl.
*
* @author Tristan Tarrant
* @since 7.0
*/
public class AuthorizationMapperContextImpl implements PrincipalRoleMapperContext {
private final EmbeddedCacheManager cacheManager;
public AuthorizationMapperContextImpl(EmbeddedCacheManager cacheManager) {
this.cacheManager = cacheManager;
}
@Override
public EmbeddedCacheManager getCacheManager() {
return cacheManager;
}
}
| 599
| 22.076923
| 83
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/SubjectACL.java
|
package org.infinispan.security.impl;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Set;
import org.infinispan.security.AuthorizationPermission;
/**
* SubjectACL.
*
* @author Tristan Tarrant
* @since 8.1
*/
public class SubjectACL {
private final Set<String> roles;
private final int mask;
public SubjectACL(Set<String> roles, int mask) {
this.roles = Collections.unmodifiableSet(roles);
this.mask = mask;
}
public int getMask() {
return mask;
}
public Set<String> getRoles() {
return roles;
}
public boolean containsRole(String role) {
return roles.contains(role);
}
public boolean matches(int permissionMask) {
return (mask & permissionMask) == permissionMask;
}
@Override
public String toString() {
return "SubjectACL [roles=" + roles + ", mask=" + mask + "]";
}
public EnumSet<AuthorizationPermission> getPermissions() {
EnumSet<AuthorizationPermission> permissions = EnumSet.noneOf(AuthorizationPermission.class);
for(AuthorizationPermission permission : AuthorizationPermission.values()) {
if ((mask & permission.getMask()) != 0) {
permissions.add(permission);
}
}
return permissions;
}
}
| 1,284
| 22.363636
| 99
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/SecureCacheImpl.java
|
package org.infinispan.security.impl;
import java.lang.annotation.Annotation;
import java.util.Collection;
import java.util.EnumSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import java.util.function.Function;
import javax.security.auth.Subject;
import javax.transaction.xa.XAResource;
import org.infinispan.AdvancedCache;
import org.infinispan.CacheCollection;
import org.infinispan.CacheSet;
import org.infinispan.LockedStream;
import org.infinispan.batch.BatchContainer;
import org.infinispan.commons.dataconversion.Encoder;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.Wrapper;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.encoding.DataConversion;
import org.infinispan.eviction.EvictionManager;
import org.infinispan.expiration.ExpirationManager;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.metadata.Metadata;
import org.infinispan.notifications.cachelistener.filter.CacheEventConverter;
import org.infinispan.notifications.cachelistener.filter.CacheEventFilter;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.security.AuthorizationManager;
import org.infinispan.security.AuthorizationPermission;
import org.infinispan.security.SecureCache;
import org.infinispan.stats.Stats;
import org.infinispan.util.concurrent.locks.LockManager;
import jakarta.transaction.TransactionManager;
/**
* SecureCacheImpl.
*
* @author Tristan Tarrant
* @since 7.0
*/
public final class SecureCacheImpl<K, V> implements SecureCache<K, V> {
private final AuthorizationManager authzManager;
private final AdvancedCache<K, V> delegate;
private final Subject subject;
private final AuthorizationPermission writePermission;
public SecureCacheImpl(AdvancedCache<K, V> delegate) {
this(delegate, delegate.getAuthorizationManager(), null);
}
private SecureCacheImpl(AdvancedCache<K, V> delegate, AuthorizationManager authzManager, Subject subject) {
this.authzManager = authzManager;
this.delegate = delegate;
this.subject = subject;
this.writePermission = authzManager.getWritePermission();
}
public AdvancedCache<K, V> getDelegate() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate;
}
@Override
public AdvancedCache<K, V> withSubject(Subject subject) {
if (this.subject == null) {
return new SecureCacheImpl<>(delegate, authzManager, subject);
} else {
throw new IllegalArgumentException("Cannot set a Subject on a SecureCache more than once");
}
}
@Override
public boolean startBatch() {
authzManager.checkPermission(subject, writePermission);
return delegate.startBatch();
}
@Override
public <C> CompletionStage<Void> addListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter,
CacheEventConverter<? super K, ? super V, C> converter) {
authzManager.checkPermission(subject, AuthorizationPermission.LISTEN);
return delegate.addListenerAsync(listener, filter, converter);
}
@Override
public CompletionStage<Void> addListenerAsync(Object listener) {
authzManager.checkPermission(subject, AuthorizationPermission.LISTEN);
return delegate.addListenerAsync(listener);
}
@Override
public <C> CompletionStage<Void> addFilteredListenerAsync(Object listener,
CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter,
Set<Class<? extends Annotation>> filterAnnotations) {
authzManager.checkPermission(subject, AuthorizationPermission.LISTEN);
return delegate.addFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public <C> CompletionStage<Void> addStorageFormatFilteredListenerAsync(Object listener, CacheEventFilter<? super K, ? super V> filter, CacheEventConverter<? super K, ? super V, C> converter, Set<Class<? extends Annotation>> filterAnnotations) {
authzManager.checkPermission(subject, AuthorizationPermission.LISTEN);
return delegate.addStorageFormatFilteredListenerAsync(listener, filter, converter, filterAnnotations);
}
@Override
public void shutdown() {
authzManager.checkPermission(subject, AuthorizationPermission.LIFECYCLE);
delegate.shutdown();
}
@Override
public void start() {
authzManager.checkPermission(subject, AuthorizationPermission.LIFECYCLE);
delegate.start();
}
@Override
public void stop() {
authzManager.checkPermission(subject, AuthorizationPermission.LIFECYCLE);
delegate.stop();
}
@Override
public CompletableFuture<V> putAsync(K key, V value) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAsync(key, value);
}
@Override
public void endBatch(boolean successful) {
authzManager.checkPermission(subject, writePermission);
delegate.endBatch(successful);
}
@Override
public CompletionStage<Void> removeListenerAsync(Object listener) {
authzManager.checkPermission(subject, AuthorizationPermission.LISTEN);
return delegate.removeListenerAsync(listener);
}
@Deprecated
@Override
public Set<Object> getListeners() {
authzManager.checkPermission(subject, AuthorizationPermission.LISTEN);
return delegate.getListeners();
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAsync(key, value, lifespan, unit);
}
@Override
public AdvancedCache<K, V> withFlags(Flag... flags) {
return new SecureCacheImpl<>(delegate.withFlags(flags), authzManager, subject);
}
@Override
public AdvancedCache<K, V> withFlags(Collection<Flag> flags) {
return new SecureCacheImpl<>(delegate.withFlags(flags), authzManager, subject);
}
@Override
public AdvancedCache<K, V> noFlags() {
return new SecureCacheImpl<>(delegate.noFlags(), authzManager, subject);
}
@Override
public AdvancedCache<K, V> transform(Function<AdvancedCache<K, V>, ? extends AdvancedCache<K, V>> transformation) {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
AdvancedCache<K, V> newDelegate = delegate.transform(transformation);
AdvancedCache<K, V> newInstance = newDelegate != delegate ? new SecureCacheImpl<>(newDelegate, authzManager, subject) : this;
return transformation.apply(newInstance);
}
@Override
public V putIfAbsent(K key, V value) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsent(key, value);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle,
TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAsync(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public String getName() {
return delegate.getName();
}
@Override
public String getVersion() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getVersion();
}
@Override
public V put(K key, V value) {
authzManager.checkPermission(subject, writePermission);
return delegate.put(key, value);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAllAsync(data);
}
@Override
public V put(K key, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.put(key, value, lifespan, unit);
}
@Override
public boolean remove(Object key, Object value) {
authzManager.checkPermission(subject, writePermission);
return delegate.remove(key, value);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAllAsync(data, lifespan, unit);
}
/**
* @deprecated Since 10.0, will be removed without a replacement
*/
@Deprecated
@Override
public AsyncInterceptorChain getAsyncInterceptorChain() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getAsyncInterceptorChain();
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsent(key, value, lifespan, unit);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> data, long lifespan, TimeUnit lifespanUnit,
long maxIdle, TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAllAsync(data, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
delegate.putAll(map, lifespan, unit);
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, oldValue, newValue);
}
@Override
public CompletableFuture<Void> clearAsync() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_WRITE);
return delegate.clearAsync();
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, value, lifespan, unit);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsentAsync(key, value);
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, oldValue, value, lifespan, unit);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsentAsync(key, value, lifespan, unit);
}
@Override
public V replace(K key, V value) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, value);
}
@Override
public EvictionManager getEvictionManager() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getEvictionManager();
}
@Override
public ExpirationManager<K, V> getExpirationManager() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getExpirationManager();
}
@Override
public V put(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.put(key, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public void putForExternalRead(K key, V value) {
authzManager.checkPermission(subject, writePermission);
delegate.putForExternalRead(key, value);
}
@Override
public void putForExternalRead(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
delegate.putForExternalRead(key, value);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.compute(key, remappingFunction);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.merge(key, value, remappingFunction);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.merge(key, value, remappingFunction, lifespan, lifespanUnit);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.merge(key, value, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.merge(key, value, remappingFunction, metadata);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.compute(key, remappingFunction, metadata);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.compute(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.compute(key, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresent(key, remappingFunction);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresent(key, remappingFunction, metadata);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresent(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresent(key, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsent(key, mappingFunction);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsent(key, mappingFunction, metadata);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsent(key, mappingFunction, lifespan, lifespanUnit);
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsent(key, mappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeAsync(key, remappingFunction, metadata);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresentAsync(key, remappingFunction, metadata);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsentAsync(key, mappingFunction, metadata);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.mergeAsync(key, value, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.mergeAsync(key, value, remappingFunction, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.mergeAsync(key, value, remappingFunction, metadata);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeAsync(key, remappingFunction);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeAsync(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeAsync(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsentAsync(key, mappingFunction);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsentAsync(key, mappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeIfAbsentAsync(K key, Function<? super K, ? extends V> mappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfAbsentAsync(key, mappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresentAsync(key, remappingFunction);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresentAsync(key, remappingFunction, lifespan, lifespanUnit);
}
@Override
public CompletableFuture<V> computeIfPresentAsync(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.computeIfPresentAsync(key, remappingFunction, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CompletableFuture<V> mergeAsync(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
authzManager.checkPermission(subject, writePermission);
return delegate.mergeAsync(key, value, remappingFunction);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
delegate.putForExternalRead(key, value);
}
@Override
public void putForExternalRead(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle, TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
delegate.putForExternalRead(key, value);
}
@Override
public ComponentRegistry getComponentRegistry() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getComponentRegistry();
}
@Override
public DistributionManager getDistributionManager() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getDistributionManager();
}
@Override
public AuthorizationManager getAuthorizationManager() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return new AuthorizationManager() {
@Override
public void checkPermission(AuthorizationPermission permission) {
authzManager.checkPermission(subject, permission);
}
@Override
public void checkPermission(Subject subject, AuthorizationPermission permission) {
authzManager.checkPermission(subject, permission);
}
@Override
public void checkPermission(AuthorizationPermission permission, String role) {
authzManager.checkPermission(subject, permission, role);
}
@Override
public void checkPermission(Subject subject, AuthorizationPermission permission, String role) {
authzManager.checkPermission(subject, permission, role);
}
@Override
public EnumSet<AuthorizationPermission> getPermissions(Subject subject) {
return authzManager.getPermissions(subject);
}
@Override
public AuthorizationPermission getWritePermission() {
return authzManager.getWritePermission();
}
};
}
@Override
public AdvancedCache<K, V> lockAs(Object lockOwner) {
return new SecureCacheImpl<>(delegate.lockAs(lockOwner));
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle,
TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsentAsync(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public boolean isEmpty() {
authzManager.checkPermission(subject, AuthorizationPermission.READ);
return delegate.isEmpty();
}
@Override
public boolean lock(K... keys) {
authzManager.checkPermission(subject, writePermission);
return delegate.lock(keys);
}
@Override
public boolean containsKey(Object key) {
authzManager.checkPermission(subject, AuthorizationPermission.READ);
return delegate.containsKey(key);
}
@Override
public V putIfAbsent(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsent(key, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public boolean lock(Collection<? extends K> keys) {
authzManager.checkPermission(subject, writePermission);
return delegate.lock(keys);
}
@Override
public CompletableFuture<V> removeAsync(Object key) {
authzManager.checkPermission(subject, writePermission);
return delegate.removeAsync(key);
}
@Override
public CompletableFuture<CacheEntry<K, V>> removeAsyncEntry(Object key) {
authzManager.checkPermission(subject, writePermission);
return delegate.removeAsyncEntry(key);
}
@Override
public boolean containsValue(Object value) {
authzManager.checkPermission(subject, AuthorizationPermission.READ);
return delegate.containsValue(value);
}
@Override
public void putAll(Map<? extends K, ? extends V> map, long lifespan, TimeUnit lifespanUnit, long maxIdleTime,
TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
delegate.putAll(map, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public CompletableFuture<Boolean> removeAsync(Object key, Object value) {
authzManager.checkPermission(subject, writePermission);
return delegate.removeAsync(key, value);
}
@Override
public void evict(K key) {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
delegate.evict(key);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, value);
}
@Override
public RpcManager getRpcManager() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getRpcManager();
}
@Override
public V replace(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime, TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public V get(Object key) {
authzManager.checkPermission(subject, AuthorizationPermission.READ);
return delegate.get(key);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, value, lifespan, unit);
}
@Override
public BatchContainer getBatchContainer() {
authzManager.checkPermission(subject, writePermission);
return delegate.getBatchContainer();
}
@Override
public Configuration getCacheConfiguration() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getCacheConfiguration();
}
@Override
public EmbeddedCacheManager getCacheManager() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getCacheManager();
}
@Override
public AdvancedCache<K, V> getAdvancedCache() {
return this;
}
@Override
public ComponentStatus getStatus() {
return delegate.getStatus();
}
@Override
public AvailabilityMode getAvailability() {
return delegate.getAvailability();
}
@Override
public void setAvailability(AvailabilityMode availabilityMode) {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
delegate.setAvailability(availabilityMode);
}
@Override
public CacheSet<CacheEntry<K, V>> cacheEntrySet() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.cacheEntrySet();
}
@Override
public LockedStream<K, V> lockedStream() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_WRITE);
return delegate.lockedStream();
}
@Override
public CompletableFuture<Boolean> removeLifespanExpired(K key, V value, Long lifespan) {
authzManager.checkPermission(subject, writePermission);
return delegate.removeLifespanExpired(key, value, lifespan);
}
@Override
public CompletableFuture<Boolean> removeMaxIdleExpired(K key, V value) {
authzManager.checkPermission(subject, writePermission);
return delegate.removeMaxIdleExpired(key, value);
}
@Override
public AdvancedCache<?, ?> withEncoding(Class<? extends Encoder> encoderClass) {
return new SecureCacheImpl<>(delegate.withEncoding(encoderClass), authzManager, subject);
}
@Override
public AdvancedCache<?, ?> withKeyEncoding(Class<? extends Encoder> encoder) {
return new SecureCacheImpl<>(delegate.withKeyEncoding(encoder), authzManager, subject);
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> wrapperClass) {
return new SecureCacheImpl<>(delegate.withWrapping(wrapperClass), authzManager, subject);
}
@Override
public AdvancedCache<?, ?> withMediaType(String keyMediaType, String valueMediaType) {
return new SecureCacheImpl<>(delegate.withMediaType(keyMediaType, valueMediaType), authzManager, subject);
}
@Override
public <K1, V1> AdvancedCache<K1, V1> withMediaType(MediaType keyMediaType, MediaType valueMediaType) {
return new SecureCacheImpl<>(delegate.withMediaType(keyMediaType, valueMediaType), authzManager, subject);
}
@Override
public AdvancedCache<K, V> withStorageMediaType() {
return new SecureCacheImpl<>(delegate.withStorageMediaType(), authzManager, subject);
}
@Override
public AdvancedCache<?, ?> withEncoding(Class<? extends Encoder> keyEncoderClass,
Class<? extends Encoder> valueEncoderClass) {
return new SecureCacheImpl<>(delegate.withEncoding(keyEncoderClass, valueEncoderClass), authzManager, subject);
}
@Override
public AdvancedCache<K, V> withWrapping(Class<? extends Wrapper> keyWrapperClass,
Class<? extends Wrapper> valueWrapperClass) {
return new SecureCacheImpl<>(delegate.withWrapping(keyWrapperClass, valueWrapperClass), authzManager, subject);
}
@Override
public DataConversion getKeyDataConversion() {
return delegate.getKeyDataConversion();
}
@Override
public DataConversion getValueDataConversion() {
return delegate.getValueDataConversion();
}
@Override
public int size() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.size();
}
@Override
public CompletableFuture<Long> sizeAsync() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.sizeAsync();
}
@Override
public boolean replace(K key, V oldValue, V value, long lifespan, TimeUnit lifespanUnit, long maxIdleTime,
TimeUnit maxIdleTimeUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, oldValue, value, lifespan, lifespanUnit, maxIdleTime, maxIdleTimeUnit);
}
@Override
public DataContainer<K, V> getDataContainer() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getDataContainer();
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, long lifespan, TimeUnit lifespanUnit, long maxIdle,
TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, value, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public TransactionManager getTransactionManager() {
return delegate.getTransactionManager();
}
@Override
public CacheSet<K> keySet() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.keySet();
}
@Override
public V remove(Object key) {
authzManager.checkPermission(subject, writePermission);
return delegate.remove(key);
}
@Override
public Map<K, V> getAll(Set<?> keys) {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.getAll(keys);
}
@Override
public CompletableFuture<Map<K, V>> getAllAsync(Set<?> keys) {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.getAllAsync(keys);
}
@Override
public LockManager getLockManager() {
authzManager.checkPermission(subject, writePermission);
return delegate.getLockManager();
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, oldValue, newValue);
}
@Override
public Stats getStats() {
authzManager.checkPermission(subject, AuthorizationPermission.MONITOR);
return delegate.getStats();
}
@Override
public XAResource getXAResource() {
authzManager.checkPermission(subject, AuthorizationPermission.ADMIN);
return delegate.getXAResource();
}
@Override
public ClassLoader getClassLoader() {
return delegate.getClassLoader();
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit unit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, oldValue, newValue, lifespan, unit);
}
@Override
public CacheCollection<V> values() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.values();
}
@Override
public AdvancedCache<K, V> with(ClassLoader classLoader) {
return this;
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, long lifespan, TimeUnit lifespanUnit,
long maxIdle, TimeUnit maxIdleUnit) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, oldValue, newValue, lifespan, lifespanUnit, maxIdle, maxIdleUnit);
}
@Override
public CacheSet<Entry<K, V>> entrySet() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.entrySet();
}
@Override
public CompletableFuture<V> getAsync(K key) {
authzManager.checkPermission(subject, AuthorizationPermission.READ);
return delegate.getAsync(key);
}
@Override
public V put(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.put(key, value, metadata);
}
@Override
public void putAll(Map<? extends K, ? extends V> m, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
delegate.putAll(m, metadata);
}
@Override
public CompletableFuture<Void> putAllAsync(Map<? extends K, ? extends V> map, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAllAsync(map, metadata);
}
@Override
public void putAll(Map<? extends K, ? extends V> m) {
authzManager.checkPermission(subject, writePermission);
delegate.putAll(m);
}
@Override
public V replace(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, value, metadata);
}
@Override
public CompletableFuture<V> replaceAsync(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, value, metadata);
}
@Override
public CompletableFuture<CacheEntry<K, V>> replaceAsyncEntry(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsyncEntry(key, value, metadata);
}
@Override
public boolean replace(K key, V oldValue, V newValue, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.replace(key, oldValue, newValue, metadata);
}
@Override
public CompletableFuture<Boolean> replaceAsync(K key, V oldValue, V newValue, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.replaceAsync(key, oldValue, newValue, metadata);
}
@Override
public void clear() {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_WRITE);
delegate.clear();
}
@Override
public V putIfAbsent(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsent(key, value, metadata);
}
@Override
public CompletableFuture<V> putIfAbsentAsync(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsentAsync(key, value, metadata);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putIfAbsentAsyncEntry(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.putIfAbsentAsyncEntry(key, value, metadata);
}
@Override
public CompletableFuture<V> putAsync(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAsync(key, value, metadata);
}
@Override
public CompletableFuture<CacheEntry<K, V>> putAsyncEntry(K key, V value, Metadata metadata) {
authzManager.checkPermission(subject, writePermission);
return delegate.putAsyncEntry(key, value, metadata);
}
@Override
public CacheEntry<K, V> getCacheEntry(Object key) {
authzManager.checkPermission(subject, AuthorizationPermission.READ);
return delegate.getCacheEntry(key);
}
@Override
public CompletableFuture<CacheEntry<K, V>> getCacheEntryAsync(Object key) {
authzManager.checkPermission(subject, AuthorizationPermission.READ);
return delegate.getCacheEntryAsync(key);
}
@Override
public Map<K, CacheEntry<K, V>> getAllCacheEntries(Set<?> keys) {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.getAllCacheEntries(keys);
}
@Override
public Map<K, V> getAndPutAll(Map<? extends K, ? extends V> map) {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_WRITE);
return delegate.getAndPutAll(map);
}
@Override
public Map<K, V> getGroup(String groupName) {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_READ);
return delegate.getGroup(groupName);
}
@Override
public void removeGroup(String groupName) {
authzManager.checkPermission(subject, AuthorizationPermission.BULK_WRITE);
delegate.removeGroup(groupName);
}
@Override
public boolean equals(Object o) {
return delegate.equals(o);
}
@Override
public int hashCode() {
return delegate.hashCode();
}
@Override
public String toString() {
return "Secure " + delegate;
}
@Override
public CompletionStage<Boolean> touch(Object key, int segment, boolean touchEvenIfExpired) {
authzManager.checkPermission(subject, writePermission);
return delegate.touch(key, segment, touchEvenIfExpired);
}
@Override
public CompletionStage<Boolean> touch(Object key, boolean touchEvenIfExpired) {
authzManager.checkPermission(subject, writePermission);
return delegate.touch(key, touchEvenIfExpired);
}
}
| 41,502
| 36.661525
| 247
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/SubjectAdapter.java
|
package org.infinispan.security.impl;
import java.security.Principal;
import java.util.Set;
import javax.security.auth.Subject;
import org.infinispan.protostream.annotations.ProtoAdapter;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoName;
/**
* @since 14.0
**/
@ProtoAdapter(Subject.class)
@ProtoName("Subject")
public class SubjectAdapter {
@ProtoFactory
Subject create(String[] principals) {
Subject subject = new Subject();
Set<Principal> p = subject.getPrincipals();
for (String principal : principals) {
p.add(new SimplePrincipal(principal));
}
return subject;
}
@ProtoField(1)
String[] getPrincipals(Subject subject) {
return subject.getPrincipals().stream().map(Principal::getName).toArray(String[]::new);
}
public static class SimplePrincipal implements Principal {
final String name;
public SimplePrincipal(String name) {
this.name = name;
}
@Override
public String getName() {
return name;
}
@Override
public String toString() {
return "SimplePrincipal{" +
"name='" + name + '\'' +
'}';
}
}
}
| 1,319
| 23
| 93
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/CreatePermissionConfigurationBuilder.java
|
package org.infinispan.security.impl;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractModuleConfigurationBuilder;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
/**
* @author Tristan Tarrant <tristan@infinispan.org>
* @since 12.1
**/
public class CreatePermissionConfigurationBuilder extends AbstractModuleConfigurationBuilder implements Builder<CreatePermissionConfiguration> {
public CreatePermissionConfigurationBuilder(ConfigurationBuilder builder) {
super(builder);
}
@Override
public AttributeSet attributes() {
return null;
}
@Override
public CreatePermissionConfiguration create() {
return new CreatePermissionConfiguration();
}
@Override
public Builder<?> read(CreatePermissionConfiguration template, Combine combine) {
return this;
}
@Override
public void validate(GlobalConfiguration globalConfig) {
}
}
| 1,155
| 29.421053
| 145
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/GlobalSecurityManagerFactory.java
|
package org.infinispan.security.impl;
import org.infinispan.factories.AbstractComponentFactory;
import org.infinispan.factories.AutoInstantiableFactory;
import org.infinispan.factories.annotations.DefaultFactoryFor;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.security.GlobalSecurityManager;
/**
* Factory for GlobalSecurityManager implementations
*
* @author Tristan Tarrant
* @since 8.1
*/
@Scope(Scopes.GLOBAL)
@DefaultFactoryFor(classes = GlobalSecurityManager.class)
public class GlobalSecurityManagerFactory extends AbstractComponentFactory implements AutoInstantiableFactory {
@Override
public Object construct(String componentName) {
if (globalConfiguration.security().authorization().enabled())
return new GlobalSecurityManagerImpl();
else
return null;
}
}
| 884
| 30.607143
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/GlobalSecurityManagerImpl.java
|
package org.infinispan.security.impl;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.manager.ClusterExecutor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.GlobalSecurityManager;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.util.logging.Log;
import com.github.benmanes.caffeine.cache.Caffeine;
/**
* GlobalSecurityManagerImpl. Initialize the global ACL cache.
*
* @author Tristan Tarrant
* @since 8.1
*/
@MBean(objectName = "GlobalSecurityManager", description = "Controls global ACL caches")
@Scope(Scopes.GLOBAL)
public class GlobalSecurityManagerImpl implements GlobalSecurityManager {
private boolean cacheEnabled;
private com.github.benmanes.caffeine.cache.Cache<CacheSubjectPair, SubjectACL> cache;
private EmbeddedCacheManager embeddedCacheManager;
@Inject
public void init(GlobalConfiguration globalConfiguration, EmbeddedCacheManager embeddedCacheManager, Authorizer authorizer) {
long timeout = globalConfiguration.security().securityCacheTimeout();
long size = globalConfiguration.security().securityCacheSize();
if (timeout > 0 && size > 0) {
cache = Caffeine.newBuilder().maximumSize(size).expireAfterWrite(timeout, TimeUnit.MILLISECONDS).build();
authorizer.setAclCache(cache.asMap());
cacheEnabled = true;
} else {
cacheEnabled = false;
}
this.embeddedCacheManager = embeddedCacheManager;
}
@Override
public Map<?, ?> globalACLCache() {
if (cacheEnabled) {
return cache.asMap();
} else {
return null;
}
}
@ManagedOperation(name = "Flush ACL Cache", displayName = "Flush ACL Cache", description = "Flushes the global ACL cache across the entire cluster")
@Override
public CompletionStage<Void> flushGlobalACLCache() {
if (cacheEnabled) {
ClusterExecutor executor = SecurityActions.getClusterExecutor(embeddedCacheManager);
return executor.submitConsumer(cm -> {
GlobalSecurityManager globalSecurityManager = SecurityActions.getGlobalComponentRegistry(cm).getComponent(GlobalSecurityManager.class);
globalSecurityManager.flushLocalACLCache();
return null;
}, (a, v, t) -> {
});
} else {
return CompletableFutures.completedNull();
}
}
@Override
public void flushLocalACLCache() {
if (cacheEnabled) {
globalACLCache().clear();
Log.CONTAINER.flushedACLCache();
}
}
}
| 3,001
| 35.609756
| 151
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/CommonNameRoleMapper.java
|
package org.infinispan.security.impl;
/**
* @author Tristan Tarrant <tristan@infinispan.org>
* @deprecated use {@link org.infinispan.security.mappers.CommonNameRoleMapper} instead
**/
public class CommonNameRoleMapper extends org.infinispan.security.mappers.CommonNameRoleMapper {
}
| 293
| 31.666667
| 96
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/CacheSubjectPair.java
|
package org.infinispan.security.impl;
import java.util.Objects;
import javax.security.auth.Subject;
/**
* CachePrincipalPair.
*
* @author Tristan Tarrant
* @since 8.1
*/
final public class CacheSubjectPair {
private final Subject subject;
private final String cacheName;
private final int hashCode;
CacheSubjectPair(Subject subject, String cacheName) {
this.subject = subject;
this.cacheName = cacheName;
this.hashCode = computeHashCode();
}
private int computeHashCode() {
return Objects.hash(subject, cacheName);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CacheSubjectPair that = (CacheSubjectPair) o;
return subject.equals(that.subject) && cacheName.equals(that.cacheName);
}
@Override
public int hashCode() {
return hashCode;
}
@Override
public String toString() {
return "CacheSubjectPair{" +
"cacheName=" + cacheName +
", subject=" + subject +
'}';
}
}
| 1,106
| 21.591837
| 78
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/Authorizer.java
|
package org.infinispan.security.impl;
import static org.infinispan.util.logging.Log.SECURITY;
import java.security.Principal;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.security.auth.Subject;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.AuthorizationConfiguration;
import org.infinispan.configuration.global.GlobalAuthorizationConfiguration;
import org.infinispan.configuration.global.GlobalSecurityConfiguration;
import org.infinispan.security.AuditContext;
import org.infinispan.security.AuditLogger;
import org.infinispan.security.AuditResponse;
import org.infinispan.security.AuthorizationPermission;
import org.infinispan.security.GroupPrincipal;
import org.infinispan.security.PrincipalRoleMapper;
import org.infinispan.security.Role;
import org.infinispan.security.Security;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Authorizer. Some utility methods for computing access masks and verifying them against permissions
*
* @author Tristan Tarrant
* @since 7.0
*/
public class Authorizer {
private static final Log log = LogFactory.getLog(Authorizer.class);
public static final SubjectACL SUPERUSER = new SubjectACL(Collections.emptySet(), AuthorizationPermission.ALL.getMask());
private final GlobalSecurityConfiguration globalConfiguration;
private final AuditLogger audit;
private final AuditContext context;
private final String name;
private Map<CacheSubjectPair, SubjectACL> aclCache;
public Authorizer(GlobalSecurityConfiguration globalConfiguration, AuditContext context, String name, Map<CacheSubjectPair, SubjectACL> aclCache) {
this.globalConfiguration = globalConfiguration;
this.audit = globalConfiguration.authorization().auditLogger();
this.context = context;
this.name = name;
this.aclCache = aclCache;
}
public void setAclCache(Map<CacheSubjectPair, SubjectACL> aclCache) {
this.aclCache = aclCache;
}
public void checkPermission(AuthorizationPermission perm) {
checkPermission(null, null, name, context, null, perm);
}
public void checkPermission(AuthorizationPermission perm, String role) {
checkPermission(null, null, name, context, role, perm);
}
public void checkPermission(AuthorizationConfiguration configuration, AuthorizationPermission perm) {
checkPermission(configuration, null, name, context, null, perm);
}
public void checkPermission(Subject subject, AuthorizationPermission perm) {
checkPermission(null, subject, name, context, null, perm);
}
public SubjectACL getACL(Subject subject) {
return getACL(subject, null);
}
public SubjectACL getACL(Subject subject, AuthorizationConfiguration configuration) {
if (globalConfiguration.authorization().enabled() && (configuration == null || configuration.enabled())) {
return computeSubjectACL(subject, configuration);
} else {
return SUPERUSER;
}
}
public void checkPermission(AuthorizationConfiguration configuration, Subject subject, AuthorizationPermission perm, String role) {
checkPermission(configuration, subject, null, context, role, perm);
}
public void checkPermission(Subject subject, AuthorizationPermission perm, AuditContext explicitContext) {
checkPermission(null, subject, null, explicitContext, null, perm);
}
public void checkPermission(Subject subject, AuthorizationPermission perm, String contextName, AuditContext auditContext) {
checkPermission(null, subject, contextName, auditContext, null, perm);
}
public void checkPermission(AuthorizationConfiguration configuration, Subject subject, String explicitName, AuditContext explicitContext, String role, AuthorizationPermission perm) {
if (globalConfiguration.authorization().enabled()) {
if (!Security.isPrivileged()) {
subject = subject != null ? subject : Security.getSubject();
if (checkSubjectPermissionAndRole(subject, configuration, perm, role)) {
audit.audit(subject, explicitContext, explicitName, perm, AuditResponse.ALLOW);
} else {
audit.audit(subject, explicitContext, explicitName, perm, AuditResponse.DENY);
throw SECURITY.unauthorizedAccess(Util.prettyPrintSubject(subject), perm.toString());
}
}
}
}
public EnumSet<AuthorizationPermission> getPermissions(AuthorizationConfiguration configuration, Subject subject) {
if (globalConfiguration.authorization().enabled()) {
return computeSubjectACL(subject, configuration).getPermissions();
} else {
return EnumSet.allOf(AuthorizationPermission.class);
}
}
private boolean checkSubjectPermissionAndRole(Subject subject, AuthorizationConfiguration configuration,
AuthorizationPermission requiredPermission, String requestedRole) {
if (subject != null) {
CacheSubjectPair csp = new CacheSubjectPair(subject, name);
SubjectACL subjectACL;
if (aclCache != null)
subjectACL = aclCache.computeIfAbsent(csp, s -> computeSubjectACL(subject, configuration));
else
subjectACL = computeSubjectACL(subject, configuration);
int permissionMask = requiredPermission.getMask();
boolean authorized = subjectACL.matches(permissionMask) && (requestedRole == null || subjectACL.containsRole(requestedRole));
if (log.isTraceEnabled()) {
log.tracef("Check subject '%s' with ACL '%s' has permission '%s' and role '%s' = %b", subject, subjectACL, requiredPermission, requestedRole, authorized);
}
return authorized;
} else {
return false;
}
}
private SubjectACL computeSubjectACL(Subject subject, AuthorizationConfiguration configuration) {
GlobalAuthorizationConfiguration authorization = globalConfiguration.authorization();
PrincipalRoleMapper roleMapper = authorization.principalRoleMapper();
Set<Principal> principals = subject.getPrincipals();
Set<String> allRoles = new HashSet<>(principals.size());
boolean groupOnlyMapping = authorization.groupOnlyMapping();
// Map all the Subject's principals to roles using the role mapper. There may be more than one role per principal
for (Principal principal : principals) {
if (groupOnlyMapping && !(principal instanceof GroupPrincipal)) {
continue;
}
Set<String> roleNames = roleMapper.principalToRoles(principal);
if (roleNames != null) {
allRoles.addAll(roleNames);
}
}
// Create a bitmask of the permissions this Subject has for the resource identified by the configuration
int subjectMask = 0;
// If this resource has not declared any roles, all the inheritable global roles will be checked
boolean implicit = configuration != null && configuration.roles().isEmpty();
for (String role : allRoles) {
if (configuration == null || implicit || configuration.roles().contains(role)) {
Role globalRole = authorization.getRole(role);
if (globalRole != null && (!implicit || globalRole.isInheritable())) {
subjectMask |= globalRole.getMask();
}
}
}
if (log.isTraceEnabled()) {
log.tracef("Subject '%s' has roles '%s' and permission mask %d", subject, allRoles, subjectMask);
}
return new SubjectACL(allRoles, subjectMask);
}
}
| 7,705
| 43.287356
| 185
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/AuthorizationManagerImpl.java
|
package org.infinispan.security.impl;
import java.util.EnumSet;
import java.util.Map;
import javax.security.auth.Subject;
import org.infinispan.configuration.cache.AuthorizationConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.security.AuditContext;
import org.infinispan.security.AuthorizationManager;
import org.infinispan.security.AuthorizationPermission;
import org.infinispan.security.GlobalSecurityManager;
/**
* AuthorizationManagerImpl. An implementation of the {@link AuthorizationManager} interface.
*
* @author Tristan Tarrant
* @since 7.0
*/
@Scope(Scopes.NAMED_CACHE)
public class AuthorizationManagerImpl implements AuthorizationManager {
private AuthorizationConfiguration configuration;
private Authorizer authorizer;
private AuthorizationPermission writePermission;
public AuthorizationManagerImpl() {
}
@Inject
public void init(@ComponentName(KnownComponentNames.CACHE_NAME) String cacheName,
GlobalConfiguration globalConfiguration, Configuration configuration,
GlobalSecurityManager globalSecurityManager) {
this.configuration = configuration.security().authorization();
Map<CacheSubjectPair, SubjectACL> globalACLCache = globalSecurityManager.globalACLCache();
this.authorizer = new Authorizer(globalConfiguration.security(), AuditContext.CACHE, cacheName, globalACLCache);
this.writePermission = configuration.module(CreatePermissionConfiguration.class) != null ?
AuthorizationPermission.CREATE : AuthorizationPermission.WRITE;
this.configuration.attributes().attribute(AuthorizationConfiguration.ROLES).addListener((roles, ignore) -> globalSecurityManager.flushGlobalACLCache());
}
@Override
public void checkPermission(AuthorizationPermission perm) {
authorizer.checkPermission(configuration, null, perm, null);
}
@Override
public void checkPermission(Subject subject, AuthorizationPermission perm) {
authorizer.checkPermission(configuration, subject, perm, null);
}
@Override
public void checkPermission(AuthorizationPermission perm, String role) {
authorizer.checkPermission(configuration, null, perm, role);
}
@Override
public void checkPermission(Subject subject, AuthorizationPermission perm, String role) {
authorizer.checkPermission(configuration, subject, perm, role);
}
@Override
public EnumSet<AuthorizationPermission> getPermissions(Subject subject) {
return authorizer.getPermissions(configuration, subject);
}
@Override
public AuthorizationPermission getWritePermission() {
return writePermission;
}
}
| 3,017
| 37.692308
| 158
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/CacheRoleImpl.java
|
package org.infinispan.security.impl;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.infinispan.security.AuthorizationPermission;
import org.infinispan.security.Role;
/**
* CacheRoleImpl.
*
* @author Tristan Tarrant
* @since 7.0
*/
@ProtoTypeId(ProtoStreamTypeIds.ROLE)
public class CacheRoleImpl implements Role {
@ProtoField(number = 1, required = true)
final String name;
@ProtoField(number = 2, required = true)
final boolean inheritable;
@ProtoField(number = 3, collectionImplementation = HashSet.class)
final Set<AuthorizationPermission> permissions;
private final int mask;
public CacheRoleImpl(String name, boolean inheritable, AuthorizationPermission... authorizationPermissions) {
this(name, inheritable, EnumSet.copyOf(Arrays.asList(authorizationPermissions)));
}
@ProtoFactory
public CacheRoleImpl(String name, boolean inheritable, Set<AuthorizationPermission> permissions) {
this.name = name;
this.permissions = Collections.unmodifiableSet(permissions);
int permMask = 0;
for (AuthorizationPermission permission : permissions) {
permMask |= permission.getMask();
}
this.mask = permMask;
this.inheritable = inheritable;
}
@Override
public String getName() {
return name;
}
@Override
public Collection<AuthorizationPermission> getPermissions() {
return permissions;
}
@Override
public int getMask() {
return mask;
}
@Override
public boolean isInheritable() {
return inheritable;
}
@Override
public String toString() {
return "CacheRoleImpl{" +
"name='" + name + '\'' +
", permissions=" + permissions +
", mask=" + mask +
", inheritable=" + inheritable +
'}';
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CacheRoleImpl cacheRole = (CacheRoleImpl) o;
return inheritable == cacheRole.inheritable && mask == cacheRole.mask && name.equals(cacheRole.name);
}
@Override
public int hashCode() {
return Objects.hash(name, inheritable, mask);
}
}
| 2,613
| 27.107527
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/CreatePermissionConfiguration.java
|
package org.infinispan.security.impl;
import org.infinispan.commons.configuration.BuiltBy;
@BuiltBy(CreatePermissionConfigurationBuilder.class)
public class CreatePermissionConfiguration {
CreatePermissionConfiguration() {
}
}
| 236
| 20.545455
| 52
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/NullAuditLogger.java
|
package org.infinispan.security.impl;
/**
* @author Tristan Tarrant <tristan@infinispan.org>
* @deprecated use {@link org.infinispan.security.audit.NullAuditLogger} instead
**/
@Deprecated
public class NullAuditLogger extends org.infinispan.security.audit.NullAuditLogger {
}
| 286
| 27.7
| 84
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/security/impl/LoggingAuditLogger.java
|
package org.infinispan.security.impl;
/**
* @author Tristan Tarrant <tristan@infinispan.org>
* @deprecated use {@link org.infinispan.security.audit.LoggingAuditLogger} instead
**/
@Deprecated
public class LoggingAuditLogger extends org.infinispan.security.audit.LoggingAuditLogger {
}
| 295
| 28.6
| 90
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/RxJavaInterop.java
|
package org.infinispan.reactive;
import java.lang.invoke.MethodHandles;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.atomic.AtomicBoolean;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.processors.AsyncProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
/**
* Static factory class that provides methods to obtain commonly used instances for interoperation between RxJava
* and standard JRE.
* @author wburns
* @since 10.0
*/
public class RxJavaInterop extends org.infinispan.commons.reactive.RxJavaInterop {
private RxJavaInterop() { }
protected final static Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
public static <R> Flowable<R> voidCompletionStageToFlowable(CompletionStage<Void> stage) {
if (CompletionStages.isCompletedSuccessfully(stage)) {
return Flowable.empty();
}
AsyncProcessor<R> ap = AsyncProcessor.create();
stage.whenComplete((value, t) -> {
if (t != null) {
ap.onError(t);
} else {
ap.onComplete();
}
});
return ap;
}
/**
* Same as {@link #voidCompletionStageToFlowable(CompletionStage)} except that you can optionally have it so that if
* a throwable occurs in the stage that it isn't propagated if the returned Flowable's subscription was cancelled.
* <p>
* This method also only allows for a single subscriber to the Flowable, any additional subscribers will receive
* an exception when subscribing to the returned Flowable.
* {@link #voidCompletionStageToFlowable(CompletionStage)} can support any number of subscribers.
* @param stage stage to complete
* @param ignoreErrorIfCancelled whether to ignore an error if cancelled
* @param <R> stage type
* @return a Flowable that is completed when the stage is
*/
public static <R> Flowable<R> voidCompletionStageToFlowable(CompletionStage<Void> stage, boolean ignoreErrorIfCancelled) {
if (!ignoreErrorIfCancelled) {
return voidCompletionStageToFlowable(stage);
}
if (CompletionStages.isCompletedSuccessfully(stage)) {
return Flowable.empty();
}
AtomicBoolean cancelled = new AtomicBoolean();
UnicastProcessor<R> ap = UnicastProcessor.create(1, () -> cancelled.set(true));
stage.whenComplete((value, t) -> {
if (t != null) {
if (!cancelled.get()) {
ap.onError(t);
} else {
log.debug("Ignoring throwable as the UnicastProcessor is already completed", t);
}
} else {
ap.onComplete();
}
});
return ap;
}
}
| 2,860
| 34.7625
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/FlowableCreate.java
|
/**
* Copyright (c) 2016-present, RxJava Contributors.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package org.infinispan.reactive;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.reactivestreams.Subscriber;
import org.reactivestreams.Subscription;
import io.reactivex.rxjava3.core.BackpressureStrategy;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.FlowableEmitter;
import io.reactivex.rxjava3.core.FlowableOnSubscribe;
import io.reactivex.rxjava3.disposables.Disposable;
import io.reactivex.rxjava3.exceptions.Exceptions;
import io.reactivex.rxjava3.exceptions.MissingBackpressureException;
import io.reactivex.rxjava3.functions.Cancellable;
import io.reactivex.rxjava3.internal.disposables.CancellableDisposable;
import io.reactivex.rxjava3.internal.disposables.SequentialDisposable;
import io.reactivex.rxjava3.internal.subscriptions.SubscriptionHelper;
import io.reactivex.rxjava3.internal.util.AtomicThrowable;
import io.reactivex.rxjava3.internal.util.BackpressureHelper;
import io.reactivex.rxjava3.internal.util.ExceptionHelper;
import io.reactivex.rxjava3.operators.SimplePlainQueue;
import io.reactivex.rxjava3.operators.SpscLinkedArrayQueue;
import io.reactivex.rxjava3.plugins.RxJavaPlugins;
/**
* Copied from rxjava3 80c83a4e000f0d054ea88a3bd500d36c2c041b05
* This has been modified to allow for Flowable create method to be invoked per subscription request instead of
* during the initial subscribe, which may not even include a request of any size. The callback <b>must</b> provide
* at least as many elements that were requested or be completed otherwise it can cause exhaustion as the upstream
* may not request any more elements and there is no one to call the callback again.
*
* @param <T>
*/
public final class FlowableCreate<T> extends Flowable<T> {
final FlowableOnSubscribe<T> source;
final BackpressureStrategy backpressure;
public FlowableCreate(FlowableOnSubscribe<T> source, BackpressureStrategy backpressure) {
this.source = source;
this.backpressure = backpressure;
}
@Override
public void subscribeActual(Subscriber<? super T> t) {
BaseEmitter<T> emitter;
switch (backpressure) {
case MISSING: {
emitter = new MissingEmitter<>(t, source);
break;
}
case ERROR: {
emitter = new ErrorAsyncEmitter<>(t, source);
break;
}
case DROP: {
emitter = new DropAsyncEmitter<>(t, source);
break;
}
case LATEST: {
emitter = new LatestAsyncEmitter<>(t, source);
break;
}
default: {
emitter = new BufferAsyncEmitter<>(t, source, bufferSize());
break;
}
}
t.onSubscribe(emitter);
}
/**
* Serializes calls to onNext, onError and onComplete.
*
* @param <T> the value type
*/
static final class SerializedEmitter<T>
extends AtomicInteger
implements FlowableEmitter<T> {
private static final long serialVersionUID = 4883307006032401862L;
final BaseEmitter<T> emitter;
final AtomicThrowable errors;
final SimplePlainQueue<T> queue;
volatile boolean done;
SerializedEmitter(BaseEmitter<T> emitter) {
this.emitter = emitter;
this.errors = new AtomicThrowable();
this.queue = new SpscLinkedArrayQueue<>(16);
}
@Override
public void onNext(T t) {
if (emitter.isCancelled() || done) {
return;
}
if (t == null) {
onError(ExceptionHelper.createNullPointerException("onNext called with a null value."));
return;
}
if (get() == 0 && compareAndSet(0, 1)) {
emitter.onNext(t);
if (decrementAndGet() == 0) {
return;
}
} else {
SimplePlainQueue<T> q = queue;
synchronized (q) {
q.offer(t);
}
if (getAndIncrement() != 0) {
return;
}
}
drainLoop();
}
@Override
public void onError(Throwable t) {
if (!tryOnError(t)) {
RxJavaPlugins.onError(t);
}
}
@Override
public boolean tryOnError(Throwable t) {
if (emitter.isCancelled() || done) {
return false;
}
if (t == null) {
t = ExceptionHelper.createNullPointerException("onError called with a null Throwable.");
}
if (errors.tryAddThrowable(t)) {
done = true;
drain();
return true;
}
return false;
}
@Override
public void onComplete() {
if (emitter.isCancelled() || done) {
return;
}
done = true;
drain();
}
void drain() {
if (getAndIncrement() == 0) {
drainLoop();
}
}
void drainLoop() {
BaseEmitter<T> e = emitter;
SimplePlainQueue<T> q = queue;
AtomicThrowable errors = this.errors;
int missed = 1;
for (; ; ) {
for (; ; ) {
if (e.isCancelled()) {
q.clear();
return;
}
if (errors.get() != null) {
q.clear();
errors.tryTerminateConsumer(e);
return;
}
boolean d = done;
T v = q.poll();
boolean empty = v == null;
if (d && empty) {
e.onComplete();
return;
}
if (empty) {
break;
}
e.onNext(v);
}
missed = addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
@Override
public void setDisposable(Disposable d) {
emitter.setDisposable(d);
}
@Override
public void setCancellable(Cancellable c) {
emitter.setCancellable(c);
}
@Override
public long requested() {
return emitter.requested();
}
@Override
public boolean isCancelled() {
return emitter.isCancelled();
}
@Override
public FlowableEmitter<T> serialize() {
return this;
}
@Override
public String toString() {
return emitter.toString();
}
}
abstract static class BaseEmitter<T>
extends AtomicLong
implements FlowableEmitter<T>, Subscription {
private static final long serialVersionUID = 7326289992464377023L;
final Subscriber<? super T> downstream;
final FlowableOnSubscribe<T> source;
final SequentialDisposable serial;
final AtomicInteger sip;
BaseEmitter(Subscriber<? super T> downstream, FlowableOnSubscribe<T> source) {
this.downstream = downstream;
this.source = source;
this.serial = new SequentialDisposable();
this.sip = new AtomicInteger();
}
@Override
public void onComplete() {
completeDownstream();
}
protected void completeDownstream() {
if (isCancelled()) {
return;
}
try {
downstream.onComplete();
} finally {
serial.dispose();
}
}
@Override
public final void onError(Throwable e) {
if (e == null) {
e = ExceptionHelper.createNullPointerException("onError called with a null Throwable.");
}
if (!signalError(e)) {
RxJavaPlugins.onError(e);
}
}
@Override
public final boolean tryOnError(Throwable e) {
if (e == null) {
e = ExceptionHelper.createNullPointerException("tryOnError called with a null Throwable.");
}
return signalError(e);
}
public boolean signalError(Throwable e) {
return errorDownstream(e);
}
protected boolean errorDownstream(Throwable e) {
if (isCancelled()) {
return false;
}
try {
downstream.onError(e);
} finally {
serial.dispose();
}
return true;
}
@Override
public final void cancel() {
serial.dispose();
onUnsubscribed();
}
public final void attemptSubscribe() {
if (sip.getAndIncrement() == 0) {
int missed = 1;
for (; ; ) {
// It is possible the last subscribe consumed all requests, but we haven't caught up to sip
// so double check we still have outstanding requests
if (get() > 0) {
try {
source.subscribe(this);
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
onError(ex);
}
}
missed = sip.addAndGet(-missed);
// missed will be 0 if there were no other "concurrent" subscribe calls
// or if the flowable was completed it will be disposed then no more to do
if (missed == 0 || serial.isDisposed()) {
break;
}
}
}
}
void onUnsubscribed() {
// default is no-op
}
@Override
public final boolean isCancelled() {
return serial.isDisposed();
}
@Override
public final void request(long n) {
if (SubscriptionHelper.validate(n) && !serial.isDisposed()) {
BackpressureHelper.add(this, n);
attemptSubscribe();
onRequested();
}
}
void onRequested() {
// default is no-op
}
@Override
public final void setDisposable(Disposable d) {
serial.update(d);
}
@Override
public final void setCancellable(Cancellable c) {
setDisposable(new CancellableDisposable(c));
}
@Override
public final long requested() {
return get();
}
@Override
public final FlowableEmitter<T> serialize() {
return new SerializedEmitter<>(this);
}
@Override
public String toString() {
return String.format("%s{%s}", getClass().getSimpleName(), super.toString());
}
}
static final class MissingEmitter<T> extends BaseEmitter<T> {
private static final long serialVersionUID = 3776720187248809713L;
MissingEmitter(Subscriber<? super T> downstream, FlowableOnSubscribe<T> source) {
super(downstream, source);
}
@Override
public void onNext(T t) {
if (isCancelled()) {
return;
}
if (t != null) {
downstream.onNext(t);
} else {
onError(ExceptionHelper.createNullPointerException("onNext called with a null value."));
return;
}
for (; ; ) {
long r = get();
if (r == 0L || compareAndSet(r, r - 1)) {
return;
}
}
}
}
static abstract class NoOverflowBaseAsyncEmitter<T> extends BaseEmitter<T> {
private static final long serialVersionUID = 4127754106204442833L;
NoOverflowBaseAsyncEmitter(Subscriber<? super T> downstream, FlowableOnSubscribe<T> source) {
super(downstream, source);
}
@Override
public final void onNext(T t) {
if (isCancelled()) {
return;
}
if (t == null) {
onError(ExceptionHelper.createNullPointerException("onNext called with a null value."));
return;
}
if (get() != 0) {
downstream.onNext(t);
BackpressureHelper.produced(this, 1);
} else {
onOverflow();
}
}
abstract void onOverflow();
}
static final class DropAsyncEmitter<T> extends NoOverflowBaseAsyncEmitter<T> {
private static final long serialVersionUID = 8360058422307496563L;
DropAsyncEmitter(Subscriber<? super T> downstream, FlowableOnSubscribe<T> source) {
super(downstream, source);
}
@Override
void onOverflow() {
// nothing to do
}
}
static final class ErrorAsyncEmitter<T> extends NoOverflowBaseAsyncEmitter<T> {
private static final long serialVersionUID = 338953216916120960L;
ErrorAsyncEmitter(Subscriber<? super T> downstream, FlowableOnSubscribe<T> source) {
super(downstream, source);
}
@Override
void onOverflow() {
onError(new MissingBackpressureException("create: could not emit value due to lack of requests"));
}
}
static final class BufferAsyncEmitter<T> extends BaseEmitter<T> {
private static final long serialVersionUID = 2427151001689639875L;
final SpscLinkedArrayQueue<T> queue;
Throwable error;
volatile boolean done;
final AtomicInteger wip;
BufferAsyncEmitter(Subscriber<? super T> actual, FlowableOnSubscribe<T> source, int capacityHint) {
super(actual, source);
this.queue = new SpscLinkedArrayQueue<>(capacityHint);
this.wip = new AtomicInteger();
}
@Override
public void onNext(T t) {
if (done || isCancelled()) {
return;
}
if (t == null) {
onError(ExceptionHelper.createNullPointerException("onNext called with a null value."));
return;
}
queue.offer(t);
drain();
}
@Override
public boolean signalError(Throwable e) {
if (done || isCancelled()) {
return false;
}
error = e;
done = true;
drain();
return true;
}
@Override
public void onComplete() {
done = true;
drain();
}
@Override
void onRequested() {
drain();
}
@Override
void onUnsubscribed() {
if (wip.getAndIncrement() == 0) {
queue.clear();
}
}
void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
final Subscriber<? super T> a = downstream;
final SpscLinkedArrayQueue<T> q = queue;
for (; ; ) {
long r = get();
long e = 0L;
while (e != r) {
if (isCancelled()) {
q.clear();
return;
}
boolean d = done;
T o = q.poll();
boolean empty = o == null;
if (d && empty) {
Throwable ex = error;
if (ex != null) {
errorDownstream(ex);
} else {
completeDownstream();
}
return;
}
if (empty) {
break;
}
a.onNext(o);
e++;
}
if (e == r) {
if (isCancelled()) {
q.clear();
return;
}
boolean d = done;
boolean empty = q.isEmpty();
if (d && empty) {
Throwable ex = error;
if (ex != null) {
errorDownstream(ex);
} else {
completeDownstream();
}
return;
}
}
if (e != 0) {
BackpressureHelper.produced(this, e);
}
missed = wip.addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
}
static final class LatestAsyncEmitter<T> extends BaseEmitter<T> {
private static final long serialVersionUID = 4023437720691792495L;
final AtomicReference<T> queue;
Throwable error;
volatile boolean done;
final AtomicInteger wip;
LatestAsyncEmitter(Subscriber<? super T> downstream, FlowableOnSubscribe<T> source) {
super(downstream, source);
this.queue = new AtomicReference<>();
this.wip = new AtomicInteger();
}
@Override
public void onNext(T t) {
if (done || isCancelled()) {
return;
}
if (t == null) {
onError(ExceptionHelper.createNullPointerException("onNext called with a null value."));
return;
}
queue.set(t);
drain();
}
@Override
public boolean signalError(Throwable e) {
if (done || isCancelled()) {
return false;
}
error = e;
done = true;
drain();
return true;
}
@Override
public void onComplete() {
done = true;
drain();
}
@Override
void onRequested() {
drain();
}
@Override
void onUnsubscribed() {
if (wip.getAndIncrement() == 0) {
queue.lazySet(null);
}
}
void drain() {
if (wip.getAndIncrement() != 0) {
return;
}
int missed = 1;
final Subscriber<? super T> a = downstream;
final AtomicReference<T> q = queue;
for (; ; ) {
long r = get();
long e = 0L;
while (e != r) {
if (isCancelled()) {
q.lazySet(null);
return;
}
boolean d = done;
T o = q.getAndSet(null);
boolean empty = o == null;
if (d && empty) {
Throwable ex = error;
if (ex != null) {
errorDownstream(ex);
} else {
completeDownstream();
}
return;
}
if (empty) {
break;
}
a.onNext(o);
e++;
}
if (e == r) {
if (isCancelled()) {
q.lazySet(null);
return;
}
boolean d = done;
boolean empty = q.get() == null;
if (d && empty) {
Throwable ex = error;
if (ex != null) {
errorDownstream(ex);
} else {
completeDownstream();
}
return;
}
}
if (e != 0) {
BackpressureHelper.produced(this, e);
}
missed = wip.addAndGet(-missed);
if (missed == 0) {
break;
}
}
}
}
}
| 21,655
| 27.161248
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/PublisherReducers.java
|
package org.infinispan.reactive.publisher;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionStage;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.function.IntFunction;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collector;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.util.Util;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
/**
* Static factory method class to provide various reducers and finalizers for use with distributed Publisher. Note
* that these functions are all serializable by Infinispan assuming that any passed arguments are as well.
* @author wburns
* @since 10.0
*/
public class PublisherReducers {
private PublisherReducers() { }
public static Function<Publisher<Boolean>, CompletionStage<Boolean>> and() {
return AndFinalizer.INSTANCE;
}
public static <E> Function<Publisher<E>, CompletionStage<Boolean>> allMatch(Predicate<? super E> predicate) {
return new AllMatchReducer<>(predicate);
}
public static <E> Function<Publisher<E>, CompletionStage<Boolean>> anyMatch(Predicate<? super E> predicate) {
return new AnyMatchReducer<>(predicate);
}
public static <I, E> Function<Publisher<I>, CompletionStage<E>> collect(Supplier<E> supplier, BiConsumer<E, ? super I> consumer) {
return new CollectReducer<>(supplier, consumer);
}
public static <I, E> Function<Publisher<I>, CompletionStage<E>> collectorReducer(Collector<? super I, E, ?> collector) {
return new CollectorReducer<>(collector);
}
public static <E> Function<Publisher<E>, CompletionStage<E>> collectorFinalizer(Collector<?, E, ?> collector) {
return new CollectorFinalizer<>(collector);
}
public static <E> Function<Publisher<E>, CompletionStage<E>> accumulate(BiConsumer<E, E> biConsumer) {
return new CombinerFinalizer<>(biConsumer);
}
public static <E> Function<Publisher<E>, CompletionStage<E>> findFirst() {
return FindFirstReducerFinalizer.INSTANCE;
}
public static <E> Function<Publisher<E>, CompletionStage<E>> max(Comparator<? super E> comparator) {
return new MaxReducerFinalizer<>(comparator);
}
public static <E> Function<Publisher<E>, CompletionStage<E>> min(Comparator<? super E> comparator) {
return new MinReducerFinalizer<>(comparator);
}
public static <E> Function<Publisher<E>, CompletionStage<Boolean>> noneMatch(Predicate<? super E> predicate) {
return new NoneMatchReducer<>(predicate);
}
public static Function<Publisher<Boolean>, CompletionStage<Boolean>> or() {
return OrFinalizer.INSTANCE;
}
/**
* Provides a reduction where the initial value must be the identity value that is not modified via the provided
* biFunction. Failure to do so will cause unexpected results.
* <p>
* If the initial value needs to be modified, you should use {@link #reduceWith(Callable, BiFunction)} instead.
* @param identity initial identity value to use (this value must not be modified by the provide biFunction)
* @param biFunction biFunction used to reduce the values into a single one
* @param <I> input type
* @param <E> output reduced type
* @return function that will map a publisher of the input type to a completion stage of the output type
*/
public static <I, E> Function<Publisher<I>, CompletionStage<E>> reduce(E identity,
BiFunction<E, ? super I, E> biFunction) {
return new ReduceWithIdentityReducer<>(identity, biFunction);
}
public static <I, E> Function<Publisher<I>, CompletionStage<E>> reduceWith(Callable<? extends E> initialSupplier,
BiFunction<E, ? super I, E> biFunction) {
return new ReduceWithInitialSupplierReducer<>(initialSupplier, biFunction);
}
public static <E> Function<Publisher<E>, CompletionStage<E>> reduce(BinaryOperator<E> operator) {
return new ReduceReducerFinalizer<>(operator);
}
public static Function<Publisher<?>, CompletionStage<Long>> count() {
return SumReducer.INSTANCE;
}
public static Function<Publisher<Long>, CompletionStage<Long>> add() {
return SumFinalizer.INSTANCE;
}
public static <I> Function<Publisher<I>, CompletionStage<Object[]>> toArrayReducer() {
return toArrayReducer(null);
}
public static <I extends E, E> Function<Publisher<I>, CompletionStage<E[]>> toArrayReducer(IntFunction<E[]> generator) {
return new ToArrayReducer<>(generator);
}
public static <E> Function<Publisher<E[]>, CompletionStage<E[]>> toArrayFinalizer() {
return toArrayFinalizer(null);
}
public static <E> Function<Publisher<E[]>, CompletionStage<E[]>> toArrayFinalizer(IntFunction<E[]> generator) {
return new ToArrayFinalizer<>(generator);
}
private static class AllMatchReducer<E> implements Function<Publisher<E>, CompletionStage<Boolean>> {
private final Predicate<? super E> predicate;
private AllMatchReducer(Predicate<? super E> predicate) {
this.predicate = predicate;
}
@Override
public CompletionStage<Boolean> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.all(predicate::test)
.toCompletionStage();
}
}
private static class AnyMatchReducer<E> implements Function<Publisher<E>, CompletionStage<Boolean>> {
private final Predicate<? super E> predicate;
private AnyMatchReducer(Predicate<? super E> predicate) {
this.predicate = predicate;
}
@Override
public CompletionStage<Boolean> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.any(predicate::test)
.toCompletionStage();
}
}
private static final class AndFinalizer implements Function<Publisher<Boolean>, CompletionStage<Boolean>> {
private static final AndFinalizer INSTANCE = new AndFinalizer();
@Override
public CompletionStage<Boolean> apply(Publisher<Boolean> booleanPublisher) {
return Flowable.fromPublisher(booleanPublisher)
.all(bool -> bool == Boolean.TRUE)
.toCompletionStage();
}
}
private static final class CollectorFinalizer<E, R> implements Function<Publisher<E>, CompletionStage<E>> {
private final Collector<?, E, ?> collector;
private CollectorFinalizer(Collector<?, E, ?> collector) {
this.collector = collector;
}
@Override
public CompletionStage<E> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.reduce(collector.combiner()::apply)
// This is to ensure at least the default value is provided - this shouldnt be required - but
// the disconnect between reducer and finalizer for collector leaves this ambiguous
.switchIfEmpty(Single.fromCallable(collector.supplier()::get))
.toCompletionStage();
}
}
private static final class CollectReducer<I, E> implements Function<Publisher<I>, CompletionStage<E>> {
private final Supplier<E> supplier;
private final BiConsumer<E, ? super I> accumulator;
private CollectReducer(Supplier<E> supplier, BiConsumer<E, ? super I> accumulator) {
this.supplier = supplier;
this.accumulator = accumulator;
}
@Override
public CompletionStage<E> apply(Publisher<I> iPublisher) {
return Flowable.fromPublisher(iPublisher)
.collect(supplier::get, accumulator::accept)
.toCompletionStage();
}
}
private static final class CollectorReducer<I, E> implements Function<Publisher<I>, CompletionStage<E>> {
private final Collector<? super I, E, ?> collector;
private CollectorReducer(Collector<? super I, E, ?> collector) {
this.collector = collector;
}
@Override
public CompletionStage<E> apply(Publisher<I> iPublisher) {
return Flowable.fromPublisher(iPublisher)
.collect(collector.supplier()::get, collector.accumulator()::accept)
.toCompletionStage();
}
}
private static final class CombinerFinalizer<E> implements Function<Publisher<E>, CompletionStage<E>> {
private final BiConsumer<E, E> biConsumer;
private CombinerFinalizer(BiConsumer<E, E> biConsumer) {
this.biConsumer = biConsumer;
}
@Override
public CompletionStage<E> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.reduce((e1, e2) -> {
biConsumer.accept(e1, e2);
return e1;
})
.toCompletionStage(null);
}
}
private static final class FindFirstReducerFinalizer<E> implements Function<Publisher<E>, CompletionStage<E>> {
private static final FindFirstReducerFinalizer INSTANCE = new FindFirstReducerFinalizer();
@Override
public CompletionStage<E> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.firstElement()
.toCompletionStage(null);
}
}
private static class MaxReducerFinalizer<E> implements Function<Publisher<E>, CompletionStage<E>> {
private final Comparator<? super E> comparator;
private MaxReducerFinalizer(Comparator<? super E> comparator) {
this.comparator = comparator;
}
@Override
public CompletionStage<E> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.reduce((e1, e2) -> {
if (comparator.compare(e1, e2) > 0) {
return e1;
}
return e2;
})
.toCompletionStage(null);
}
}
private static class MinReducerFinalizer<E> implements Function<Publisher<E>, CompletionStage<E>> {
private final Comparator<? super E> comparator;
private MinReducerFinalizer(Comparator<? super E> comparator) {
this.comparator = comparator;
}
@Override
public CompletionStage<E> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.reduce((e1, e2) -> {
if (comparator.compare(e1, e2) > 0) {
return e2;
}
return e1;
})
.toCompletionStage(null);
}
}
private static class NoneMatchReducer<E> implements Function<Publisher<E>, CompletionStage<Boolean>> {
private final Predicate<? super E> predicate;
private NoneMatchReducer(Predicate<? super E> predicate) {
this.predicate = predicate;
}
@Override
public CompletionStage<Boolean> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.all(predicate.negate()::test)
.toCompletionStage();
}
}
private static final class OrFinalizer implements Function<Publisher<Boolean>, CompletionStage<Boolean>> {
private static final OrFinalizer INSTANCE = new OrFinalizer();
@Override
public CompletionStage<Boolean> apply(Publisher<Boolean> booleanPublisher) {
return Flowable.fromPublisher(booleanPublisher)
.any(bool -> bool == Boolean.TRUE)
.toCompletionStage();
}
}
private static class ReduceWithIdentityReducer<I, E> implements Function<Publisher<I>, CompletionStage<E>> {
private final E identity;
private final BiFunction<E, ? super I, E> biFunction;
private ReduceWithIdentityReducer(E identity, BiFunction<E, ? super I, E> biFunction) {
this.identity = identity;
this.biFunction = biFunction;
}
@Override
public CompletionStage<E> apply(Publisher<I> iPublisher) {
return Flowable.fromPublisher(iPublisher)
.reduce(identity, biFunction::apply)
.toCompletionStage();
}
}
private static class ReduceWithInitialSupplierReducer<I, E> implements Function<Publisher<I>, CompletionStage<E>> {
private final Callable<? extends E> initialSupplier;
private final BiFunction<E, ? super I, E> biFunction;
private ReduceWithInitialSupplierReducer(Callable<? extends E> initialSupplier, BiFunction<E, ? super I, E> biFunction) {
this.initialSupplier = initialSupplier;
this.biFunction = biFunction;
}
@Override
public CompletionStage<E> apply(Publisher<I> iPublisher) {
return Flowable.fromPublisher(iPublisher)
.reduceWith(initialSupplier::call, biFunction::apply)
.toCompletionStage();
}
}
private static class ReduceReducerFinalizer<E> implements Function<Publisher<E>, CompletionStage<E>> {
private final BinaryOperator<E> operator;
private ReduceReducerFinalizer(BinaryOperator<E> operator) {
this.operator = operator;
}
@Override
public CompletionStage<E> apply(Publisher<E> ePublisher) {
return Flowable.fromPublisher(ePublisher)
.reduce(operator::apply)
.toCompletionStage(null);
}
}
private static class SumReducer implements Function<Publisher<?>, CompletionStage<Long>> {
private static final SumReducer INSTANCE = new SumReducer();
@Override
public CompletionStage<Long> apply(Publisher<?> longPublisher) {
return Flowable.fromPublisher(longPublisher)
.count()
.toCompletionStage();
}
}
private static class SumFinalizer implements Function<Publisher<Long>, CompletionStage<Long>> {
private static final SumFinalizer INSTANCE = new SumFinalizer();
@Override
public CompletionStage<Long> apply(Publisher<Long> longPublisher) {
return Flowable.fromPublisher(longPublisher)
.reduce((long) 0, Long::sum)
.toCompletionStage();
}
}
private static class ToArrayReducer<I extends E, E> implements Function<Publisher<I>, CompletionStage<E[]>> {
private final IntFunction<E[]> generator;
private ToArrayReducer(IntFunction<E[]> generator) {
this.generator = generator;
}
@Override
public CompletionStage<E[]> apply(Publisher<I> ePublisher) {
Single<List<I>> listSingle = Flowable.fromPublisher(ePublisher).toList();
Single<E[]> arraySingle;
if (generator != null) {
arraySingle = listSingle.map(l -> {
E[] array = generator.apply(l.size());
int offset = 0;
for (E e : l) {
array[offset++] = e;
}
return array;
});
} else {
arraySingle = listSingle.map(l -> l.toArray((E[]) Util.EMPTY_OBJECT_ARRAY));
}
return arraySingle.toCompletionStage();
}
}
private static class ToArrayFinalizer<E> implements Function<Publisher<E[]>, CompletionStage<E[]>> {
private final IntFunction<E[]> generator;
private ToArrayFinalizer(IntFunction<E[]> generator) {
this.generator = generator;
}
@Override
public CompletionStage<E[]> apply(Publisher<E[]> publisher) {
Flowable<E[]> flowable = Flowable.fromPublisher(publisher);
Single<E[]> arraySingle;
if (generator != null) {
arraySingle = flowable.reduce((v1, v2) -> {
E[] array = generator.apply(v1.length + v2.length);
System.arraycopy(v1, 0, array, 0, v1.length);
System.arraycopy(v2, 0, array, v1.length, v2.length);
return array;
}).switchIfEmpty(Single.fromCallable(() -> generator.apply(0)));
} else {
arraySingle = flowable.reduce((v1, v2) -> {
E[] array = Arrays.copyOf(v1, v1.length + v2.length);
System.arraycopy(v2, 0, array, v1.length, v2.length);
return array;
}).switchIfEmpty(Single.just((E[]) Util.EMPTY_OBJECT_ARRAY));
}
return arraySingle.toCompletionStage();
}
}
public static final class PublisherReducersExternalizer implements AdvancedExternalizer<Object> {
enum ExternalizerId {
ALL_MATCH_REDUCER(AllMatchReducer.class),
ANY_MATCH_REDUCER(AnyMatchReducer.class),
AND_FINALIZER(AndFinalizer.class),
COLLECT_REDUCER(CollectReducer.class),
COLLECTOR_FINALIZER(CollectorFinalizer.class),
COLLECTOR_REDUCER(CollectorReducer.class),
COMBINER_FINALIZER(CombinerFinalizer.class),
FIND_FIRST_REDUCER_FINALIZER(FindFirstReducerFinalizer.class),
MAX_REDUCER_FINALIZER(MaxReducerFinalizer.class),
MIN_REDUCER_FINALIZER(MinReducerFinalizer.class),
NONE_MATCH_REDUCER(NoneMatchReducer.class),
OR_FINALIZER(OrFinalizer.class),
REDUCE_WITH_IDENTITY_REDUCER(ReduceWithIdentityReducer.class),
REDUCE_WITH_INITIAL_SUPPLIER_REDUCER(ReduceWithInitialSupplierReducer.class),
REDUCE_REDUCER_FINALIZER(ReduceReducerFinalizer.class),
SUM_REDUCER(SumReducer.class),
SUM_FINALIZER(SumFinalizer.class),
TO_ARRAY_FINALIZER(ToArrayFinalizer.class),
TO_ARRAY_REDUCER(ToArrayReducer.class),
;
private final Class<?> marshalledClass;
ExternalizerId(Class<?> marshalledClass) {
this.marshalledClass = marshalledClass;
}
}
private static final ExternalizerId[] VALUES = ExternalizerId.values();
private final Map<Class<?>, ExternalizerId> objects = new HashMap<>();
public PublisherReducersExternalizer() {
for (ExternalizerId id : ExternalizerId.values()) {
objects.put(id.marshalledClass, id);
}
}
@Override
public Set<Class<?>> getTypeClasses() {
return objects.keySet();
}
@Override
public Integer getId() {
return Ids.PUBLISHER_REDUCERS;
}
@Override
public void writeObject(ObjectOutput output, Object object) throws IOException {
ExternalizerId id = objects.get(object.getClass());
if (id == null) {
throw new IllegalArgumentException("Unsupported class " + object.getClass() + " was provided!");
}
output.writeByte(id.ordinal());
switch (id) {
case ALL_MATCH_REDUCER:
output.writeObject(((AllMatchReducer) object).predicate);
break;
case ANY_MATCH_REDUCER:
output.writeObject(((AnyMatchReducer) object).predicate);
break;
case COLLECT_REDUCER:
output.writeObject(((CollectReducer) object).supplier);
output.writeObject(((CollectReducer) object).accumulator);
break;
case COLLECTOR_FINALIZER:
output.writeObject(((CollectorFinalizer) object).collector);
break;
case COLLECTOR_REDUCER:
output.writeObject(((CollectorReducer) object).collector);
break;
case COMBINER_FINALIZER:
output.writeObject(((CombinerFinalizer) object).biConsumer);
break;
case MAX_REDUCER_FINALIZER:
output.writeObject(((MaxReducerFinalizer) object).comparator);
break;
case MIN_REDUCER_FINALIZER:
output.writeObject(((MinReducerFinalizer) object).comparator);
break;
case NONE_MATCH_REDUCER:
output.writeObject(((NoneMatchReducer) object).predicate);
break;
case REDUCE_WITH_IDENTITY_REDUCER:
output.writeObject(((ReduceWithIdentityReducer) object).identity);
output.writeObject(((ReduceWithIdentityReducer) object).biFunction);
break;
case REDUCE_WITH_INITIAL_SUPPLIER_REDUCER:
output.writeObject(((ReduceWithInitialSupplierReducer) object).initialSupplier);
output.writeObject(((ReduceWithInitialSupplierReducer) object).biFunction);
break;
case REDUCE_REDUCER_FINALIZER:
output.writeObject(((ReduceReducerFinalizer) object).operator);
break;
case TO_ARRAY_REDUCER:
output.writeObject(((ToArrayReducer) object).generator);
break;
case TO_ARRAY_FINALIZER:
output.writeObject(((ToArrayFinalizer) object).generator);
break;
}
}
@Override
public Object readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int number = input.readUnsignedByte();
ExternalizerId[] ids = VALUES;
if (number < 0 || number >= ids.length) {
throw new IllegalArgumentException("Found invalid number " + number);
}
ExternalizerId id = ids[number];
switch (id) {
case AND_FINALIZER:
return AndFinalizer.INSTANCE;
case ALL_MATCH_REDUCER:
return new AllMatchReducer((Predicate) input.readObject());
case ANY_MATCH_REDUCER:
return new AnyMatchReducer((Predicate) input.readObject());
case COLLECT_REDUCER:
return new CollectReducer((Supplier) input.readObject(), (BiConsumer) input.readObject());
case COLLECTOR_FINALIZER:
return new CollectorFinalizer((Collector) input.readObject());
case COLLECTOR_REDUCER:
return new CollectorReducer((Collector) input.readObject());
case COMBINER_FINALIZER:
return new CombinerFinalizer((BiConsumer) input.readObject());
case FIND_FIRST_REDUCER_FINALIZER:
return FindFirstReducerFinalizer.INSTANCE;
case MAX_REDUCER_FINALIZER:
return new MaxReducerFinalizer<>((Comparator) input.readObject());
case MIN_REDUCER_FINALIZER:
return new MinReducerFinalizer((Comparator) input.readObject());
case NONE_MATCH_REDUCER:
return new NoneMatchReducer((Predicate) input.readObject());
case OR_FINALIZER:
return OrFinalizer.INSTANCE;
case REDUCE_WITH_IDENTITY_REDUCER:
return new ReduceWithIdentityReducer(input.readObject(), (BiFunction) input.readObject());
case REDUCE_WITH_INITIAL_SUPPLIER_REDUCER:
return new ReduceWithInitialSupplierReducer<>((Callable) input.readObject(), (BiFunction) input.readObject());
case REDUCE_REDUCER_FINALIZER:
return new ReduceReducerFinalizer((BinaryOperator) input.readObject());
case SUM_REDUCER:
return SumReducer.INSTANCE;
case SUM_FINALIZER:
return SumFinalizer.INSTANCE;
case TO_ARRAY_REDUCER:
return new ToArrayReducer((IntFunction) input.readObject());
case TO_ARRAY_FINALIZER:
return new ToArrayFinalizer((IntFunction) input.readObject());
default:
throw new IllegalArgumentException("ExternalizerId not supported: " + id);
}
}
}
}
| 23,963
| 37.965854
| 133
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/PublisherTransformers.java
|
package org.infinispan.reactive.publisher;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.reactivestreams.Publisher;
/**
* Static factory method class to provide various transformers for use with distributed Publisher. Note
* that these functions are all serializable by Infinispan assuming that any passed arguments are as well.
* @author wburns
* @since 11.0
*/
public class PublisherTransformers {
private PublisherTransformers() { }
public static <I> Function<Publisher<I>, Publisher<I>> identity() {
return IdentityTransformer.INSTANCE;
}
private static final class IdentityTransformer<I> implements Function<Publisher<I>, Publisher<I>> {
private static final IdentityTransformer INSTANCE = new IdentityTransformer();
@Override
public Publisher<I> apply(Publisher<I> publisher) {
return publisher;
}
}
public static final class PublisherTransformersExternalizer implements AdvancedExternalizer<Object> {
enum ExternalizerId {
IDENTITY_TRANSFORMER(IdentityTransformer.class),
;
private final Class<?> marshalledClass;
ExternalizerId(Class<?> marshalledClass) {
this.marshalledClass = marshalledClass;
}
}
private static final ExternalizerId[] VALUES = ExternalizerId.values();
private final Map<Class<?>, ExternalizerId> objects = new HashMap<>();
public PublisherTransformersExternalizer() {
for (ExternalizerId id : ExternalizerId.values()) {
objects.put(id.marshalledClass, id);
}
}
@Override
public Set<Class<?>> getTypeClasses() {
return objects.keySet();
}
@Override
public Integer getId() {
return Ids.PUBLISHER_TRANSFORMERS;
}
@Override
public void writeObject(ObjectOutput output, Object object) throws IOException {
ExternalizerId id = objects.get(object.getClass());
if (id == null) {
throw new IllegalArgumentException("Unsupported class " + object.getClass() + " was provided!");
}
output.writeByte(id.ordinal());
// add switch case here when we have types that require state
}
@Override
public Object readObject(ObjectInput input) throws IOException, ClassNotFoundException {
int number = input.readUnsignedByte();
ExternalizerId[] ids = VALUES;
if (number < 0 || number >= ids.length) {
throw new IllegalArgumentException("Found invalid number " + number);
}
ExternalizerId id = ids[number];
switch (id) {
case IDENTITY_TRANSFORMER:
return IdentityTransformer.INSTANCE;
default:
throw new IllegalArgumentException("ExternalizerId not supported: " + id);
}
}
}
}
| 3,121
| 31.520833
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/ModifiedValueFunction.java
|
package org.infinispan.reactive.publisher.impl;
import java.util.function.Function;
/**
* This is a special interface that can mark a Function so that a user can know if the actual value will change
* when the function is applied or not. This way a function can communicate if it is only observing values or modifying
* them to the invoker. This can allow the invoker in some cases to optimize a given code path knowing that the values
* are unchanged.
* <p>
* It should be noted that "changed" can be different in a given context. For example if the underlying implementation
* utilized something similar to a {@link org.infinispan.cache.impl.EncodingFunction} the result could be a different
* object completely but is essentially the same.
* @param <I> the input type
* @param <O> the output type
*/
public interface ModifiedValueFunction<I, O> extends Function<I, O> {
/**
* This method should return true when this function changes the actual values of the Publisher. This
* can be useful for some optimizations that may need to track produced values from the original.
* @return if the values in the publisher are changed
*/
boolean isModified();
}
| 1,190
| 46.64
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/SegmentPublisherSupplier.java
|
package org.infinispan.reactive.publisher.impl;
import java.util.function.IntConsumer;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscriber;
/**
* A {@link Publisher} that also notifies in a thread safe manner when a segment has sent all values upstream. To more
* specifically detail the guarantee, the {@code accept} method of the provided {@link IntConsumer} will be invoked
* serially inline with {@code onNext}, {@code onError}, {@code onComplete} and will only be invoked after all values
* from the given segment have already been notified via {@code onNext). Note that there is no guarantee that the previous
* values was from the given segment, only that all have been notified prior.
* <p>
* If segment completion is not needed, use the {@link Publisher#subscribe(Subscriber)} or provided
* {@link #EMPTY_CONSUMER} as the argument to the {@link #subscribe(Subscriber, IntConsumer)} method. This allows
* implementors to optimize for the case when segment completion is not needed as this may require additional overhead.
* @param <R> value type
*/
public interface SegmentPublisherSupplier<R> {
/**
* Wrapper around an element returned that can either be a value or a segment completion. Note that the user
* should invoke {@link #isSegmentComplete()} or {@link #isValue()} to determine which type it is.
*
* @param <R> the value type if present
*/
interface Notification<R> {
/**
* Whether this notification contains a value, always non null if so
*
* @return true if a value is present
*/
boolean isValue();
/**
* Whether this notification is for a completed segmented
*
* @return true if a segment is complete
*/
boolean isSegmentComplete();
/**
* The value when present for this notification
*
* @return the value
* @throws IllegalStateException if this notification is segment complete
*/
default R value() {
throw new IllegalStateException("Notification does not contain a value, please check with isValue first!");
}
/**
* The segment that maps for the value of this notification
*
* @return the segment
* @throws IllegalStateException if this notification is segment complete
*/
default int valueSegment() {
throw new IllegalStateException("Notification does not contain a value segment, please check with isValue first!");
}
/**
* The segment that was complete for this notification
*
* @return the segment
* @throws IllegalStateException if this notification contains a value
*/
default int completedSegment() {
throw new IllegalStateException("Notification does not contain a completed segment, please check with isSegmentComplete first!");
}
}
Publisher<R> publisherWithoutSegments();
Publisher<Notification<R>> publisherWithSegments();
}
| 3,005
| 38.038961
| 138
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/LocalPublisherManagerImpl.java
|
package org.infinispan.reactive.publisher.impl;
import static org.infinispan.context.InvocationContextFactory.UNBOUNDED;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import java.lang.invoke.MethodHandles;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.function.Function;
import java.util.function.IntConsumer;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.CacheSet;
import org.infinispan.cache.impl.AbstractDelegatingCache;
import org.infinispan.cache.impl.InvocationHelper;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.read.EntrySetCommand;
import org.infinispan.commands.read.KeySetCommand;
import org.infinispan.commands.read.SizeCommand;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.ProcessorInfo;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.ClusteringConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.container.entries.NullCacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.manager.PersistenceManager.StoreChangeListener;
import org.infinispan.reactive.publisher.impl.commands.reduction.PublisherResult;
import org.infinispan.reactive.publisher.impl.commands.reduction.SegmentPublisherResult;
import org.infinispan.stream.StreamMarshalling;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.FlowableConverter;
import io.reactivex.rxjava3.core.Maybe;
import io.reactivex.rxjava3.core.Scheduler;
import io.reactivex.rxjava3.core.Single;
import io.reactivex.rxjava3.functions.Predicate;
import io.reactivex.rxjava3.parallel.ParallelFlowable;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
import io.reactivex.rxjava3.schedulers.Schedulers;
/**
* LocalPublisherManager that publishes entries from the local node only. This class handles suspecting segments
* if they are lost while still processing them. The notification of the segments being lost is done by invoking
* the {@link #segmentsLost(IntSet)} method.
* @author wburns
* @since 10.0
*/
@Scope(Scopes.NAMED_CACHE)
public class LocalPublisherManagerImpl<K, V> implements LocalPublisherManager<K, V> {
private final static Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
static final int PARALLEL_BATCH_SIZE = 1024;
@Inject ComponentRef<Cache<K, V>> cacheComponentRef;
@Inject DistributionManager distributionManager;
@Inject PersistenceManager persistenceManager;
@Inject Configuration configuration;
@Inject KeyPartitioner keyPartitioner;
@Inject ComponentRef<InvocationHelper> invocationHelper;
@Inject CommandsFactory commandsFactory;
@Inject InvocationContextFactory invocationContextFactory;
// This cache should only be used for retrieving entries via Cache#get
protected AdvancedCache<K, V> remoteCache;
// This cache should be used for iteration purposes or Cache#get that are local only
protected AdvancedCache<K, V> cache;
protected Scheduler nonBlockingScheduler;
protected int maxSegment;
protected final int cpuCount = ProcessorInfo.availableProcessors();
protected final Set<IntConsumer> changeListener = ConcurrentHashMap.newKeySet();
private final LocalEntryPublisherStrategy nonSegmentedPublisher = new NonSegmentedEntryPublisherStrategy();
private final LocalEntryPublisherStrategy segmentedPublisher = new SegmentedLocalPublisherStrategyLocal();
private volatile LocalEntryPublisherStrategy localPublisherStrategy;
private final StoreChangeListener storeChangeListener = pm -> updateStrategy(pm.usingSegmentedStore());
/**
* Injects the cache - unfortunately this cannot be in start. Tests will rewire certain components which will in
* turn reinject the cache, but they won't call the start method! If the latter is fixed we can add this to start
* method and add @Inject to the variable.
*/
@Inject
public void inject(@ComponentName(NON_BLOCKING_EXECUTOR) ExecutorService nonBlockingExecutor) {
this.nonBlockingScheduler = Schedulers.from(nonBlockingExecutor);
}
@Start
public void start() {
// We need to unwrap the cache as a local stream should only deal with BOXED values
// Any mappings will be provided by the originator node in their intermediate operation stack in the operation itself.
this.remoteCache = AbstractDelegatingCache.unwrapCache(cacheComponentRef.running()).getAdvancedCache();
// The iteration caches should only deal with local entries.
this.cache = remoteCache.withFlags(Flag.CACHE_MODE_LOCAL);
ClusteringConfiguration clusteringConfiguration = cache.getCacheConfiguration().clustering();
this.maxSegment = clusteringConfiguration.hash().numSegments();
updateStrategy(configuration.persistence().usingSegmentedStore());
persistenceManager.addStoreListener(storeChangeListener);
}
@Stop
public void stop() {
persistenceManager.removeStoreListener(storeChangeListener);
}
private void updateStrategy(boolean usingSegmentedStored) {
if (configuration.persistence().usingStores() && !usingSegmentedStored) {
localPublisherStrategy = nonSegmentedPublisher;
} else {
localPublisherStrategy = segmentedPublisher;
}
}
@Override
public <R> CompletionStage<PublisherResult<R>> keyReduction(boolean parallelPublisher, IntSet segments,
Set<K> keysToInclude, Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
if (keysToInclude != null) {
return handleSpecificKeys(parallelPublisher, keysToInclude, keysToExclude, explicitFlags, deliveryGuarantee,
collator, finalizer);
}
CacheSet<K> keySet = getKeySet(explicitFlags);
Function<K, K> toKeyFunction = Function.identity();
switch (deliveryGuarantee) {
case AT_MOST_ONCE:
CompletionStage<R> stage = atMostOnce(parallelPublisher, keySet, keysToExclude, toKeyFunction,
segments, collator, finalizer);
return stage.thenApply(ignoreSegmentsFunction());
case AT_LEAST_ONCE:
return atLeastOnce(parallelPublisher, keySet, keysToExclude, toKeyFunction, segments, collator, finalizer);
case EXACTLY_ONCE:
return exactlyOnce(parallelPublisher, keySet, keysToExclude, toKeyFunction, segments, collator, finalizer);
default:
throw new UnsupportedOperationException("Unsupported delivery guarantee: " + deliveryGuarantee);
}
}
@Override
public <R> CompletionStage<PublisherResult<R>> entryReduction(boolean parallelPublisher, IntSet segments,
Set<K> keysToInclude, Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
if (keysToInclude != null) {
return handleSpecificEntries(parallelPublisher, keysToInclude, keysToExclude, explicitFlags, deliveryGuarantee,
collator, finalizer);
}
CacheSet<CacheEntry<K, V>> entrySet = getEntrySet(explicitFlags);
// We have to cast to Function, since we can't cast our inner generic
Function<CacheEntry<K, V>, K> toKeyFunction = (Function) StreamMarshalling.entryToKeyFunction();
switch (deliveryGuarantee) {
case AT_MOST_ONCE:
CompletionStage<R> stage = atMostOnce(parallelPublisher, entrySet, keysToExclude, toKeyFunction,
segments, collator, finalizer);
return stage.thenApply(ignoreSegmentsFunction());
case AT_LEAST_ONCE:
return atLeastOnce(parallelPublisher, entrySet, keysToExclude, toKeyFunction, segments, collator, finalizer);
case EXACTLY_ONCE:
return exactlyOnce(parallelPublisher, entrySet, keysToExclude, toKeyFunction, segments, collator, finalizer);
default:
throw new UnsupportedOperationException("Unsupported delivery guarantee: " + deliveryGuarantee);
}
}
@Override
public <R> SegmentAwarePublisherSupplier<R> keyPublisher(IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends Publisher<R>> transformer) {
if (keysToInclude != null) {
AdvancedCache<K, V> cache = getCache(deliveryGuarantee, explicitFlags);
return specificKeyPublisher(segments, keysToInclude, keyFlowable -> keyFlowable.filter(cache::containsKey),
transformer);
}
return new SegmentAwarePublisherSupplierImpl<>(segments, getKeySet(explicitFlags), Function.identity(),
keysToExclude, deliveryGuarantee, transformer);
}
private Flowable<CacheEntry<K, V>> filterEntries(AdvancedCache<K, V> cacheToUse, Flowable<K> entryFlowable) {
return entryFlowable.concatMapMaybe(k -> {
CompletableFuture<CacheEntry<K, V>> future = cacheToUse.getCacheEntryAsync(k);
future = future.thenApply(entry -> {
if (entry == null) {
return NullCacheEntry.<K, V>getInstance();
} else if (entry instanceof MVCCEntry) {
// Scattered cache can return MVCCEntry instances
entry = new ImmortalCacheEntry(entry.getKey(), entry.getValue());
}
return entry;
});
return Maybe.fromCompletionStage(future);
}).filter(e -> e != NullCacheEntry.getInstance());
}
@Override
public <R> SegmentAwarePublisherSupplier<R> entryPublisher(IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends Publisher<R>> transformer) {
if (keysToInclude != null) {
AdvancedCache<K, V> cacheToUse = getCache(deliveryGuarantee, explicitFlags);
return specificKeyPublisher(segments, keysToInclude, entryFlowable ->
filterEntries(cacheToUse, entryFlowable)
, transformer);
}
return new SegmentAwarePublisherSupplierImpl<>(segments, getEntrySet(explicitFlags),
StreamMarshalling.entryToKeyFunction(), keysToExclude, deliveryGuarantee, transformer);
}
private <I, R> SegmentAwarePublisherSupplier<R> specificKeyPublisher(IntSet segments, Set<K> keysToInclude,
FlowableConverter<K, Flowable<I>> conversionFunction,
Function<? super Publisher<I>, ? extends Publisher<R>> transformer) {
return new BaseSegmentAwarePublisherSupplier<R>() {
@Override
public Publisher<R> publisherWithoutSegments() {
return Flowable.fromIterable(keysToInclude)
.to(conversionFunction)
.to(transformer::apply);
}
@Override
Flowable<NotificationWithLost<R>> flowableWithNotifications(boolean reuseNotifications) {
return Flowable.fromIterable(keysToInclude)
.groupBy(keyPartitioner::getSegment)
// We use concatMapEager instead of flatMap (groupBy needs either to prevent starvation) to ensure
// ordering guarantees defined in the LocalPublisherManager entryPublisher method.
// Due to eager subscription we cannot reuse notifications
.concatMapEager(group -> {
int segment = group.getKey();
// Shouldn't be possible to get a key that doesn't belong to the required segment - but just so
// we don't accidentally starve the groupBy
if (!segments.remove(segment)) {
throw new IllegalArgumentException("Key: " + blockingFirst(group) + " maps to segment: " + segment +
", which was not included in segments provided: " + segments);
}
return Flowable.fromPublisher(conversionFunction.apply(group)
.to(transformer::apply))
.map(r -> Notifications.value(r, segment))
.concatWith(Single.just(Notifications.segmentComplete(segment)));
}, segments.size(), Math.min(keysToInclude.size(), Flowable.bufferSize()))
.concatWith(Flowable.fromIterable(segments).map(Notifications::segmentComplete));
}
};
}
// This method is here for checkstyle, only reason we use this method is for throwing an exception, when we know
// there is a guaranteed first value, so it will never actually block.
@SuppressWarnings("checkstyle:forbiddenmethod")
static Object blockingFirst(Flowable<?> flowable) {
return flowable.blockingFirst();
}
private abstract static class BaseSegmentAwarePublisherSupplier<R> implements SegmentAwarePublisherSupplier<R> {
@Override
public Publisher<Notification<R>> publisherWithSegments() {
return flowableWithNotifications(false).filter(notification -> !notification.isLostSegment())
.map(n -> n);
}
@Override
public Publisher<NotificationWithLost<R>> publisherWithLostSegments(boolean reuseNotifications) {
return flowableWithNotifications(reuseNotifications);
}
abstract Flowable<NotificationWithLost<R>> flowableWithNotifications(boolean reuseNotifications);
}
private class SegmentAwarePublisherSupplierImpl<I, R> extends BaseSegmentAwarePublisherSupplier<R> {
private final IntSet segments;
private final CacheSet<I> cacheSet;
private final Predicate<? super I> predicate;
private final DeliveryGuarantee deliveryGuarantee;
private final Function<? super Publisher<I>, ? extends Publisher<R>> transformer;
private SegmentAwarePublisherSupplierImpl(IntSet segments, CacheSet<I> cacheSet,
Function<? super I, K> toKeyFunction, Set<K> keysToExclude,
DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<I>, ? extends Publisher<R>> transformer) {
this.segments = segments;
this.cacheSet = cacheSet;
this.predicate = keysToExclude != null ? v -> !keysToExclude.contains(toKeyFunction.apply(v)) : null;
this.deliveryGuarantee = deliveryGuarantee;
this.transformer = transformer;
}
@Override
public Publisher<R> publisherWithoutSegments() {
Publisher<I> publisher = cacheSet.localPublisher(segments);
if (predicate != null) {
publisher = Flowable.fromPublisher(publisher)
.filter(predicate);
}
return transformer.apply(publisher);
}
Flowable<NotificationWithLost<R>> flowableWithNotifications(boolean reuseNotifications) {
switch (deliveryGuarantee) {
case AT_MOST_ONCE:
Notifications.NotificationBuilder<R> atMostBuilder = reuseNotifications ? Notifications.reuseBuilder() :
Notifications.newBuilder();
return Flowable.fromIterable(segments)
.concatMap(segment -> {
Flowable<I> flowable = Flowable.fromPublisher(cacheSet.localPublisher(segment));
if (predicate != null) {
flowable = flowable.filter(predicate);
}
return flowable.compose(transformer::apply)
.map(r -> atMostBuilder.value(r, segment))
.concatWith(Single.fromSupplier(() -> atMostBuilder.segmentComplete(segment)));
});
case AT_LEAST_ONCE:
case EXACTLY_ONCE:
// Need to use defer to have the shared variables between the various inner publishers but also
// isolate between multiple subscriptions
return Flowable.defer(() -> {
Notifications.NotificationBuilder<R> builder = reuseNotifications ? Notifications.reuseBuilder() :
Notifications.newBuilder();
IntSet concurrentSet = IntSets.concurrentCopyFrom(segments, maxSegment);
RemoveSegmentListener listener = new RemoveSegmentListener(concurrentSet);
changeListener.add(listener);
// Check topology before submitting
listener.verifyTopology(distributionManager.getCacheTopology());
return Flowable.fromIterable(segments).concatMap(segment -> {
if (!concurrentSet.contains(segment)) {
return Flowable.just(builder.segmentLost(segment));
}
Flowable<I> flowable = Flowable.fromPublisher(cacheSet.localPublisher(segment));
if (predicate != null) {
flowable = flowable.filter(predicate);
}
return flowable.compose(transformer::apply)
.map(r -> builder.value(r, segment))
.concatWith(Single.fromSupplier(() ->
concurrentSet.remove(segment) ?
builder.segmentComplete(segment) : builder.segmentLost(segment)
));
}).doFinally(() -> changeListener.remove(listener));
});
default:
throw new UnsupportedOperationException("Unsupported delivery guarantee: " + deliveryGuarantee);
}
}
}
@Override
public void segmentsLost(IntSet lostSegments) {
if (log.isTraceEnabled()) {
log.tracef("Notifying listeners of lost segments %s", lostSegments);
}
changeListener.forEach(lostSegments::forEach);
}
@Override
public CompletionStage<Long> sizePublisher(IntSet segments, long flags) {
SizeCommand command = commandsFactory.buildSizeCommand(
segments, EnumUtil.mergeBitSets(flags, FlagBitSets.CACHE_MODE_LOCAL));
InvocationContext ctx = invocationContextFactory.createInvocationContext(false, UNBOUNDED);
return CompletableFuture.completedFuture(invocationHelper.running().invoke(ctx, command));
}
private static Function<Object, PublisherResult<Object>> ignoreSegmentsFunction = value ->
new SegmentPublisherResult<>(IntSets.immutableEmptySet(), value);
static <R> Function<R, PublisherResult<R>> ignoreSegmentsFunction() {
return (Function) ignoreSegmentsFunction;
}
private <I, R> void handleParallelSegment(PrimitiveIterator.OfInt segmentIter, int initialSegment, CacheSet<I> set,
Set<K> keysToExclude, Function<I, K> toKeyFunction, Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
FlowableProcessor<R> processor, IntSet concurrentSegments, SegmentListener listener) {
try {
while (true) {
// The first run initialSegment will be 0 or greater. We use that segment and set it to -1 to notify
// our next run to try to steal a segment from the iterator to process until the iterator runs out
// of segments
int nextSegment;
if (initialSegment != -1) {
nextSegment = initialSegment;
initialSegment = -1;
} else {
nextSegment = getNextSegment(segmentIter);
if (nextSegment == -1) {
break;
}
}
Flowable<I> innerFlowable = Flowable.fromPublisher(set.localPublisher(nextSegment));
if (keysToExclude != null) {
innerFlowable = innerFlowable.filter(i -> !keysToExclude.contains(toKeyFunction.apply(i)));
}
CompletionStage<R> stage = collator.apply(innerFlowable);
// This will always be true if there isn't a store, however in most cases with a store this will
// be false as we have to wait until the store can publish all the entries (which is done asynchronously)
if (CompletionStages.isCompletedSuccessfully(stage)) {
// If we complete the iteration try to remove the segment - so it can't be suspected
concurrentSegments.remove(nextSegment);
// If we didn't lose the segment we can use its value, otherwise we just ignore it
if (!listener.segmentsLost.contains(nextSegment)) {
R result = CompletionStages.join(stage);
if (result != null) {
processor.onNext(result);
}
}
} else {
final FlowableProcessor<R> processorToUse = processor;
stage.whenComplete((value, t) -> {
if (t != null) {
processorToUse.onError(t);
} else {
// If we complete the iteration try to remove the segment - so it can't be suspected
concurrentSegments.remove(nextSegment);
// This segment was lost before we could complete our iteration - so we have to discard the result
if (!listener.segmentsLost.contains(nextSegment) && value != null) {
processor.onNext(value);
}
handleParallelSegment(segmentIter, -1, set, keysToExclude, toKeyFunction, collator,
processor, concurrentSegments, listener);
}
});
return;
}
}
processor.onComplete();
} catch (Throwable t) {
processor.onError(t);
}
}
/**
* Retrieves the next int from the iterator in a thread safe manner. This method
* synchronizes on the iterator instance, so be sure not to mix this object monitor with other invocations.
* If the iterator has been depleted this method will return -1 instead.
*
* @param segmentIter the iterator to retrieve the next segment from
* @return the next segment or -1 if there are none left
*/
private int getNextSegment(PrimitiveIterator.OfInt segmentIter) {
synchronized (segmentIter) {
if (segmentIter.hasNext()) {
return segmentIter.nextInt();
}
return -1;
}
}
private <I, R> CompletionStage<PublisherResult<R>> exactlyOnce(boolean parallelPublisher, CacheSet<I> set,
Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
// This has to be concurrent to allow for different threads to update it (ie. parallel) or even ensure
// that a state transfer segment lost can see completed
IntSet concurrentSegments = IntSets.concurrentCopyFrom(segments, maxSegment);
SegmentListener listener = new SegmentListener(concurrentSegments);
changeListener.add(listener);
listener.verifyTopology(distributionManager.getCacheTopology());
Flowable<R> resultFlowable;
if (parallelPublisher) {
resultFlowable = exactlyOnceParallel(set, keysToExclude, toKeyFunction, segments, collator,
listener, concurrentSegments);
} else {
resultFlowable = exactlyOnceSequential(set, keysToExclude, toKeyFunction, segments, collator,
listener, concurrentSegments);
}
return exactlyOnceHandleLostSegments(finalizer.apply(resultFlowable), listener);
}
protected <R> CompletionStage<PublisherResult<R>> exactlyOnceHandleLostSegments(CompletionStage<R> finalValue, SegmentListener listener) {
return localPublisherStrategy.exactlyOnceHandleLostSegments(finalValue, listener);
}
/**
* This method iteratively submits a task to operate on the cpu bound thread pool up to the number of cores - 1.
* The tasks perform a type of work stealing where they attempt to retrieve the next available segment and process
* them as fast as possible. It is entirely possible that a given task is never submitted due to the other tasks
* completing all the segments asynchronously. After the main thread has submitted all the tasks it will attempt
* to steal a segment and run it if possible and if it can will subsequently attempt to complete all remaining
* segments in the same fashion as the other threads. NOTE that this behavior is not normally how reactive
* streams are done as given operations are not normally performed until the returned Flowable is subscribed to, but
* for performance reasons this method eagerly publishes entries. This is because we do not have to context
* switch an additional thread and we know that it is subscribed to immediately after.
* <p>
* The results of each segment data will then be published each as a single result in the returned Flowable. Due
* to the results being retrieved eagerly it is entirely possible that if the Subscriber of the Flowable is slow
* that that results queue up. But due to that the fact that results are reduced to single values for each segment
* this shouldn't become an issue.
* @param set CacheSet to retrieve the publisher for (non-nullable)
* @param keysToExclude whether given keys should be excluded from the processing (nullable)
* @param toKeyFunction function to convert an entry to a key to determine if it is excluded (must be non null if keysToExclude is)
* @param segments the segments to process results for (non-nullable)
* @param collator reducer to collate all the entries for a given segment into a single result (non-nullable)
* @param listener listener that handles segments being lost and determining what results should be discarded (non-nullable)
* @param concurrentSegments segments map of semgnets left to complete. remove an entry when a segment is completed to
* prevent a data rehash causing a retry for the given segment
* @param <I> input type of the data
* @param <R> resulting value
* @return Flowable that publishes a result for each segment
*/
protected <I, R> Flowable<R> exactlyOnceParallel(CacheSet<I> set,
Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
SegmentListener listener, IntSet concurrentSegments) {
return localPublisherStrategy.exactlyOnceParallel(set, keysToExclude, toKeyFunction, segments, collator, listener, concurrentSegments);
}
protected <I, R> Flowable<R> exactlyOnceSequential(CacheSet<I> set,
Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
SegmentListener listener, IntSet concurrentSegments) {
return localPublisherStrategy.exactlyOnceSequential(set, keysToExclude, toKeyFunction, segments, collator, listener, concurrentSegments);
}
private AdvancedCache<K, V> getCache(DeliveryGuarantee deliveryGuarantee, long explicitFlags) {
AdvancedCache<K, V> cache = deliveryGuarantee == DeliveryGuarantee.AT_MOST_ONCE ? this.cache : remoteCache;
if (explicitFlags != EnumUtil.EMPTY_BIT_SET) {
return cache.withFlags(EnumUtil.enumSetOf(explicitFlags, Flag.class));
}
return cache;
}
private <R> CompletionStage<PublisherResult<R>> handleSpecificKeys(boolean parallelPublisher, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
AdvancedCache<K, V> cache = getCache(deliveryGuarantee, explicitFlags);
return handleSpecificObjects(parallelPublisher, keysToInclude, keysToExclude, keyFlowable ->
// Filter out all the keys that aren't in the cache
keyFlowable.concatMapMaybe(key ->
Maybe.fromCompletionStage(cache.containsKeyAsync(key)
.thenApply(contains -> contains ? key : null))
)
, collator, finalizer);
}
private <R> CompletionStage<PublisherResult<R>> handleSpecificEntries(boolean parallelPublisher, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
AdvancedCache<K, V> cache = getCache(deliveryGuarantee, explicitFlags);
return handleSpecificObjects(parallelPublisher, keysToInclude, keysToExclude, keyFlowable ->
keyFlowable.concatMapMaybe(k -> {
CompletableFuture<CacheEntry<K, V>> future = cache.getCacheEntryAsync(k);
future = future.thenApply(entry -> {
if (entry instanceof MVCCEntry) {
// Scattered cache can return MVCCEntry instances
entry = new ImmortalCacheEntry(entry.getKey(), entry.getValue());
}
return entry;
});
return Maybe.fromCompletionStage(future);
}).filter(e -> e != NullCacheEntry.getInstance())
, collator, finalizer);
}
private <I, R> CompletionStage<PublisherResult<R>> handleSpecificObjects(boolean parallelPublisher, Set<K> keysToInclude,
Set<K> keysToExclude, Function<? super Flowable<K>, ? extends Flowable<I>> keyTransformer,
Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
Flowable<K> keyFlowable = Flowable.fromIterable(keysToInclude);
if (keysToExclude != null) {
keyFlowable = keyFlowable.filter(k -> !keysToExclude.contains(k));
}
if (parallelPublisher) {
// We send 16 keys to each rail to be parallelized - if ParallelFlowable had a method like railCompose
// we could use it, but unfortunately it does not.
Flowable<R> stageFlowable = keyFlowable.window(16)
.flatMapMaybe(keys -> {
// Due to window abandonment (check RxJava3 docs) we must subscribe synchronously and then
// observe on the publisher for parallelism
CompletionStage<R> stage = keyTransformer.apply(keys.observeOn(nonBlockingScheduler))
.to(collator::apply);
return Maybe.fromCompletionStage(stage);
});
return finalizer.apply(stageFlowable).thenApply(ignoreSegmentsFunction());
} else {
return keyTransformer.apply(keyFlowable)
.to(collator::apply)
.thenApply(ignoreSegmentsFunction());
}
}
private <I, R> CompletionStage<R> parallelAtMostOnce(CacheSet<I> set, Set<K> keysToExclude,
Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
Flowable<R> stageFlowable = Flowable.fromIterable(segments)
.parallel(cpuCount)
.runOn(nonBlockingScheduler)
.concatMap(segment -> {
Flowable<I> innerFlowable = Flowable.fromPublisher(set.localPublisher(segment));
if (keysToExclude != null) {
innerFlowable = innerFlowable.filter(i -> !keysToExclude.contains(toKeyFunction.apply(i)));
}
// TODO Make the collator return a Flowable/Maybe
CompletionStage<R> stage = collator.apply(innerFlowable);
return Maybe.fromCompletionStage(stage).toFlowable();
})
.sequential();
return finalizer.apply(stageFlowable);
}
private <I, R> CompletionStage<R> atMostOnce(boolean parallel, CacheSet<I> set, Set<K> keysToExclude,
Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
if (parallel) {
return parallelAtMostOnce(set, keysToExclude, toKeyFunction, segments, collator, finalizer);
} else {
Flowable<I> flowable = Flowable.fromPublisher(set.localPublisher(segments));
if (keysToExclude != null) {
flowable = flowable.filter(i -> !keysToExclude.contains(toKeyFunction.apply(i)));
}
return collator.apply(flowable);
}
}
private <I, R> CompletionStage<PublisherResult<R>> atLeastOnce(boolean parallel, CacheSet<I> set,
Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> collator,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
SegmentListener listener = new SegmentListener(segments);
changeListener.add(listener);
listener.verifyTopology(distributionManager.getCacheTopology());
CompletionStage<R> stage = atMostOnce(parallel, set, keysToExclude, toKeyFunction, segments, collator, finalizer);
return handleLostSegments(stage, listener);
}
protected <R> CompletionStage<PublisherResult<R>> handleLostSegments(CompletionStage<R> stage, SegmentListener segmentListener) {
return stage.thenApply(value -> {
IntSet lostSegments = segmentListener.segmentsLost;
if (lostSegments.isEmpty()) {
return LocalPublisherManagerImpl.<R>ignoreSegmentsFunction().apply(value);
} else {
return new SegmentPublisherResult<>(lostSegments, value);
}
}).whenComplete((u, t) -> changeListener.remove(segmentListener));
}
private CacheSet<K> getKeySet(long explicitFlags) {
KeySetCommand<?, ?> command = commandsFactory.buildKeySetCommand(explicitFlags);
InvocationContext ctx = invocationContextFactory.createInvocationContext(false, UNBOUNDED);
return invocationHelper.running().invoke(ctx, command);
}
private CacheSet<CacheEntry<K, V>> getEntrySet(long explicitFlags) {
EntrySetCommand<?,?> command = commandsFactory.buildEntrySetCommand(explicitFlags);
InvocationContext ctx = invocationContextFactory.createInvocationContext(false, UNBOUNDED);
return invocationHelper.running().invoke(ctx, command);
}
class RemoveSegmentListener implements IntConsumer {
private final IntSet segments;
RemoveSegmentListener(IntSet segments) {
this.segments = segments;
}
@Override
public void accept(int segment) {
if (segments.remove(segment)) {
if (log.isTraceEnabled()) {
log.tracef("Listener %s lost segment %d", this, segment);
}
}
}
void verifyTopology(LocalizedCacheTopology localizedCacheTopology) {
for (PrimitiveIterator.OfInt segmentIterator = segments.iterator(); segmentIterator.hasNext(); ) {
int segment = segmentIterator.nextInt();
if (!localizedCacheTopology.isSegmentReadOwner(segment)) {
if (log.isTraceEnabled()) {
log.tracef("Listener %s lost segment %d before invocation", this, segment);
}
segmentIterator.remove();
}
}
}
}
class SegmentListener implements IntConsumer {
protected final IntSet segments;
protected final IntSet segmentsLost;
SegmentListener(IntSet segments) {
this.segments = segments;
// This is a concurrent set for visibility and technically because state transfer could call this concurrently
this.segmentsLost = IntSets.concurrentSet(maxSegment);
}
@Override
public void accept(int segment) {
if (segments.contains(segment)) {
if (log.isTraceEnabled()) {
log.tracef("Listener %s lost segment %d", this, segment);
}
segmentsLost.set(segment);
}
}
void verifyTopology(LocalizedCacheTopology localizedCacheTopology) {
for (PrimitiveIterator.OfInt segmentIterator = segments.iterator(); segmentIterator.hasNext(); ) {
int segment = segmentIterator.nextInt();
if (!localizedCacheTopology.isSegmentReadOwner(segment)) {
segmentsLost.set(segment);
}
}
}
}
abstract class LocalEntryPublisherStrategy {
abstract <I, R> Flowable<R> exactlyOnceParallel(CacheSet<I> set,
Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
SegmentListener listener, IntSet concurrentSegments);
abstract <I, R> Flowable<R> exactlyOnceSequential(CacheSet<I> set,
Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
SegmentListener listener, IntSet concurrentSegments);
abstract <R> CompletionStage<PublisherResult<R>> exactlyOnceHandleLostSegments(CompletionStage<R> finalValue, SegmentListener listener);
}
class NonSegmentedEntryPublisherStrategy extends LocalEntryPublisherStrategy {
@Override
<I, R> Flowable<R> exactlyOnceParallel(CacheSet<I> set, Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments, Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer, SegmentListener listener, IntSet concurrentSegments) {
Flowable<I> flowable = Flowable.fromPublisher(set.localPublisher(segments));
if (keysToExclude != null) {
flowable = flowable.filter(i -> !keysToExclude.contains(toKeyFunction.apply(i)));
}
return flowable.buffer(PARALLEL_BATCH_SIZE)
.parallel(cpuCount)
.runOn(nonBlockingScheduler)
.flatMap(buffer -> Flowable.fromCompletionStage(transformer.apply(Flowable.fromIterable(buffer))), false, cpuCount)
.sequential();
}
@Override
<I, R> Flowable<R> exactlyOnceSequential(CacheSet<I> set, Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments, Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer, SegmentListener listener, IntSet concurrentSegments) {
Flowable<I> flowable = Flowable.fromPublisher(set.localPublisher(segments));
if (keysToExclude != null) {
flowable = flowable.filter(i -> !keysToExclude.contains(toKeyFunction.apply(i)));
}
return Flowable.fromCompletionStage(transformer.apply(flowable));
}
@Override
<R> CompletionStage<PublisherResult<R>> exactlyOnceHandleLostSegments(CompletionStage<R> finalValue, SegmentListener listener) {
return finalValue.thenApply(value -> {
IntSet lostSegments = listener.segmentsLost;
if (lostSegments.isEmpty()) {
return LocalPublisherManagerImpl.<R>ignoreSegmentsFunction().apply(value);
} else {
// We treat all segments as being lost if any are lost in ours
// NOTE: we never remove any segments from this set at all - so it will contain all requested segments
return new SegmentPublisherResult<R>(listener.segments, null);
}
}).whenComplete((u, t) -> changeListener.remove(listener));
}
}
class SegmentedLocalPublisherStrategyLocal extends LocalEntryPublisherStrategy {
@Override
public <I, R> Flowable<R> exactlyOnceParallel(CacheSet<I> set, Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments, Function<? super Publisher<I>, ? extends CompletionStage<R>> collator, SegmentListener listener, IntSet concurrentSegments) {
// The invoking thread will process entries so make sure we only have cpuCount number of tasks
int extraThreadCount = cpuCount - 1;
Flowable<R>[] processors = new Flowable[extraThreadCount + 1];
PrimitiveIterator.OfInt segmentIter = segments.iterator();
for (int i = 0; i < extraThreadCount; i++) {
int initialSegment = getNextSegment(segmentIter);
// If the iterator is already exhausted, don't submit to the remaining threads
if (initialSegment == -1) {
processors[i] = Flowable.empty();
continue;
}
// This is specifically a UnicastProcessor as it allows for queueing of elements before you have subscribed
// to the Flowable. It may be worth investigating using a PublishProcessor and eagerly subscribing to avoid
// the cost of queueing the results.
FlowableProcessor<R> processor = UnicastProcessor.create();
processors[i] = processor;
nonBlockingScheduler.scheduleDirect(() ->
handleParallelSegment(segmentIter, initialSegment, set, keysToExclude, toKeyFunction, collator, processor,
concurrentSegments, listener));
}
// After we have submitted all the tasks to other threads attempt to run the segments in our invoking thread
int initialSegment = getNextSegment(segmentIter);
if (initialSegment != -1) {
FlowableProcessor<R> processor = UnicastProcessor.create();
processors[extraThreadCount] = processor;
handleParallelSegment(segmentIter, initialSegment, set, keysToExclude, toKeyFunction, collator, processor,
concurrentSegments, listener);
} else {
processors[extraThreadCount] = Flowable.empty();
}
return ParallelFlowable.fromArray(processors).sequential();
}
@Override
public <I, R> Flowable<R> exactlyOnceSequential(CacheSet<I> set, Set<K> keysToExclude, Function<I, K> toKeyFunction, IntSet segments, Function<? super Publisher<I>, ? extends CompletionStage<R>> collator, SegmentListener listener, IntSet concurrentSegments) {
return Flowable.fromIterable(segments).concatMapMaybe(segment -> {
Flowable<I> innerFlowable = Flowable.fromPublisher(set.localPublisher(segment))
// If we complete the iteration try to remove the segment - so it can't be suspected
.doOnComplete(() -> concurrentSegments.remove(segment));
if (keysToExclude != null) {
innerFlowable = innerFlowable.filter(i -> !keysToExclude.contains(toKeyFunction.apply(i)));
}
CompletionStage<R> stage = collator.apply(innerFlowable);
// This will always be true unless there is a store
if (CompletionStages.isCompletedSuccessfully(stage)) {
if (listener.segmentsLost.contains(segment)) {
return Maybe.empty();
}
return Maybe.fromCompletionStage(stage);
}
return Maybe.fromCompletionStage(stage.thenCompose(value -> {
// This means the segment was lost in the middle of processing
if (listener.segmentsLost.contains(segment)) {
return CompletableFutures.completedNull();
}
return CompletableFuture.completedFuture(value);
}));
});
}
@Override
public <R> CompletionStage<PublisherResult<R>> exactlyOnceHandleLostSegments(CompletionStage<R> finalValue, SegmentListener listener) {
return handleLostSegments(finalValue, listener);
}
}
}
| 46,952
| 52.355682
| 265
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/LocalPublisherManager.java
|
package org.infinispan.reactive.publisher.impl;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.reactive.publisher.impl.commands.reduction.PublisherResult;
import org.reactivestreams.Publisher;
/**
* Handles locally publishing entries from the cache. This manager will return results that contains suspected segments
* @param <K> The key type for the underlying cache
* @param <V> the value type for the underlying cache
* @author wburns
* @since 10.0
*/
public interface LocalPublisherManager<K, V> {
/**
* Same as {@link #entryReduction(boolean, IntSet, Set, Set, long, DeliveryGuarantee, Function, Function)}
* except that the source publisher provided to the <b>transformer</b> is made up of keys only.
* @param <R> return value type
* @return CompletionStage that contains the resulting value when complete
*/
<R> CompletionStage<PublisherResult<R>> keyReduction(boolean parallelPublisher, IntSet segments,
Set<K> keysToInclude, Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer);
/**
* Performs the given <b>transformer</b> and <b>finalizer</b> on data in the cache that is local, resulting in a
* single value. Depending on the <b>deliveryGuarantee</b> the <b>transformer</b> may be invoked <b>1..numSegments</b>
* times. It could be that the <b>transformer</b> is invoked for every segment and produces a result. All of these
* results are then fed into the <b>finalizer</b> to produce a final result. If publisher is parallel the <b>finalizer</b>
* will be invoked on each node to ensure there is only a single result per node.
* <p>
* The effects of the provided <b>deliveryGuarantee</b> are as follows:
* <table>
* <tr>
* <th>Guarantee</th><th>Parallel</th><th>Behavior></th>
* </tr>
* <tr>
* <td>AT_MOST_ONCE</td> <td>TRUE</td><td>Each segment is a publisher passed to the transformer individually. Each result of the transformer is supplied to the finalizer. All segments are always complete, ignoring loss of data</td>
* </tr>
* <tr>
* <td>AT_MOST_ONCE</td> <td>FALSE</td><td>A single publisher for all segments is created and passed to the transformer. That result is returned, finalizer is never used All segments are always complete, ignoring loss of data</td>
* </tr>
* <tr>
* <td>AT_LEAST_ONCE</td> <td>TRUE</td><td>Same as AT_MOST_ONCE, but if a segment is lost in the middle it is returned as a suspected segment always returning all values</td>
* </tr>
* <tr>
* <td>AT_LEAST_ONCE</td> <td>FALSE</td><td>Same as AT_MOST_ONCE, but if a segment is lost in the middle it is returned as a suspected segment always returning all values</td>
* </tr>
* <tr>
* <td>EXACTLY_ONCE</td> <td>TRUE</td><td>Each segment is a publisher passed to the transformer individually. Each result is only accepted if the segment was owned the entire duration of the Subscription.</td>
* </tr>
* <tr>
* <td>EXACTLY_ONCE</td> <td>FALSE</td><td>Same as EXACTLY_ONCE/TRUE, except the publishers are consumed one at a time.</td>
* </tr>
* </table>
*
* @param <R> return value type
* @param parallelPublisher Whether the publisher should be parallelized
* @param segments determines what entries should be evaluated by only using ones that map to the given segments (must not be null)
* @param keysToInclude set of keys that should only be used. May be null, in which case all provided entries for the given segments will be evaluated
* @param keysToExclude set of keys that should not be used. May be null, in which case all provided entries will be evaluated
* @param explicitFlags cache flags
* @param deliveryGuarantee delivery guarantee for given entries
* @param transformer reduces the given publisher of data eventually into a single value. Must not be null.
* @param finalizer reduces all of the single values produced by the transformer or this finalizer into one final value. May be null if not parallel
* @return CompletionStage that contains the resulting value when complete
*/
<R> CompletionStage<PublisherResult<R>> entryReduction(boolean parallelPublisher, IntSet segments,
Set<K> keysToInclude, Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer);
/**
* Same as {@link #entryPublisher( IntSet, Set, Set, long, DeliveryGuarantee, Function)}
* except that the source publisher provided to the <b>transformer</b> is made up of keys only.
* @param <R> return value type
* @return SegmentAwarePublisher that will publish the values when subscribed to along with segment completions and losses
*/
<R> SegmentAwarePublisherSupplier<R> keyPublisher(IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends Publisher<R>> transformer);
/**
* Performs the given <b>transformer</b> on data in the cache that is local, resulting in a stream of values of
* possibly varying size. The <b>transformer</b> will be invoked
* <b>numSegments</b> times. The table below shows the behavior for the various delivery guarantees.
* <p>
* The effects of the provided <b>deliveryGuarantee</b> are as follows:
* <table>
* <tr>
* <th>Guarantee</th><th>Behavior></th>
* </tr>
* <tr>
* <td>AT_MOST_ONCE</td> <td>For each segment a publisher passed to the transformer sequentially. All segments are always complete, ignoring loss of data</td>
* </tr>
* <td>AT_LEAST_ONCE</td> <td>Same as AT_MOST_ONCE, but if a segment is lost in the middle it is returned as a suspected segment possibly dropping values in that segment.</td>
* </tr>
* <td>EXACTLY_ONCE</td> <td>Same as AT_LEAST_ONCE except whenever as segment is lost the value(s) collected in the same response for that segment are always dropped.</td>
* </tr>
* </table>
* <p>
* The returned publisher supplier method {@link SegmentAwarePublisherSupplier#publisherWithLostSegments()} will
* guarantee that all entries from a given segment are returned first proceeded by a segment lost or completed notification.
* This publisher will not intermingle entries from different segment together.
* This guarantee should allow for callers to be able to optimize knowing this since segments can be completed
* quicker and fewer entries should have to be retained in memory.
* @param segments determines what entries should be evaluated by only using ones that map to the given segments (must not be null)
* @param keysToInclude set of keys that should only be used. May be null, in which case all provided entries for the given segments will be evaluated
* @param keysToExclude set of keys that should not be used. May be null, in which case all provided entries will be evaluated
* @param explicitFlags cache flags
* @param deliveryGuarantee delivery guarantee for given entries
* @param transformer transforms the values to another value (0 to many). Must not be null.
* @param <R> return value type
* @return SegmentAwarePublisher that will publish the values when subscribed to along with segment completions and losses
*/
<R> SegmentAwarePublisherSupplier<R> entryPublisher(IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends Publisher<R>> transformer);
/**
* Method to invoke when a set of segments are being removed from this node. This way operations can be aware
* of possible data loss while processing.
* @param lostSegments the segments that are being removed from this node
*/
void segmentsLost(IntSet lostSegments);
CompletionStage<Long> sizePublisher(IntSet segments, long flags);
}
| 8,774
| 64
| 240
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/PublisherHandler.java
|
package org.infinispan.reactive.publisher.impl;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.function.Function;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifier;
import org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged;
import org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent;
import org.infinispan.reactive.RxJavaInterop;
import org.infinispan.reactive.publisher.impl.commands.batch.InitialPublisherCommand;
import org.infinispan.reactive.publisher.impl.commands.batch.KeyPublisherResponse;
import org.infinispan.reactive.publisher.impl.commands.batch.PublisherResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscription;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.FlowableSubscriber;
import io.reactivex.rxjava3.core.Single;
import net.jcip.annotations.GuardedBy;
/**
* Handler for holding publisher results between requests of data
* @since 10.1
*/
@Scope(Scopes.NAMED_CACHE)
@Listener(observation = Listener.Observation.POST)
public class PublisherHandler {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final ConcurrentMap<Object, PublisherState> currentRequests = new ConcurrentHashMap<>();
@Inject CacheManagerNotifier managerNotifier;
@Inject @ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR)
ExecutorService nonBlockingExecutor;
@Inject LocalPublisherManager localPublisherManager;
@ViewChanged
public void viewChange(ViewChangedEvent event) {
List<Address> newMembers = event.getNewMembers();
Iterator<PublisherState> iter = currentRequests.values().iterator();
while (iter.hasNext()) {
PublisherState state = iter.next();
Address owner = state.getOrigin();
// If an originating node is no longer here then we have to close their publishers - null means local node
// so that can't be suspected
if (owner != null && !newMembers.contains(owner)) {
log.tracef("View changed and no longer contains %s, closing %s publisher", owner, state.requestId);
state.cancel();
iter.remove();
}
}
}
@Start
public void start() {
managerNotifier.addListener(this);
}
@Stop
public void stop() {
// If our cache is stopped we should remove our listener, since this doesn't mean the cache manager is stopped
managerNotifier.removeListener(this);
}
/**
* Registers a publisher given the initial command arguments. The value returned will eventually contain the
* first batched response for the publisher of the given id.
* @param command the command with arguments to start a publisher with
* @param <I> input type
* @param <R> output type
* @return future that will or eventually will contain the first response
*/
public <I, R> CompletableFuture<PublisherResponse> register(InitialPublisherCommand<?, I, R> command) {
PublisherState publisherState;
String requestId = command.getRequestId();
if (command.isTrackKeys()) {
publisherState = new KeyPublisherState(requestId, command.getOrigin(), command.getBatchSize());
} else {
publisherState = new PublisherState(requestId, command.getOrigin(), command.getBatchSize());
}
PublisherState previousState;
if ((previousState = currentRequests.put(requestId, publisherState)) != null) {
if (!previousState.complete) {
currentRequests.remove(requestId);
throw new IllegalStateException("There was already a publisher registered for id " + requestId + " that wasn't complete!");
}
// We have a previous state that is already completed - this is most likely due to a failover and our node
// now owns another segment but the async thread hasn't yet cleaned up our state.
if (log.isTraceEnabled()) {
log.tracef("Closing prior state for %s to make room for a new request", requestId);
}
previousState.cancel();
}
publisherState.startProcessing(command);
return publisherState.results();
}
/**
* Retrieves the next response for the same request id that was configured on the command when invoking
* {@link #register(InitialPublisherCommand)}.
* @param requestId the unique request id to continue the response with
* @return future that will or eventually will contain the next response
*/
public CompletableFuture<PublisherResponse> getNext(String requestId) {
PublisherState publisherState = currentRequests.get(requestId);
if (publisherState == null) {
throw new IllegalStateException("Publisher for requestId " + requestId + " doesn't exist!");
}
return publisherState.results();
}
/**
* Returns how many publishers are currently open
* @return how many publishers are currently open
*/
public int openPublishers() {
return currentRequests.size();
}
/**
* Closes the publisher that maps to the given request id
* @param requestId unique identifier for the request
*/
public void closePublisher(String requestId) {
PublisherState state;
if ((state = currentRequests.remove(requestId)) != null) {
if (log.isTraceEnabled()) {
log.tracef("Closed publisher using requestId %s", requestId);
}
state.cancel();
}
}
/**
* Optionally closes the state if this state is still registered for the given requestId
* @param requestId unique identifier for the given request
* @param state state to cancel if it is still registered
*/
private void closePublisher(String requestId, PublisherState state) {
if (currentRequests.remove(requestId, state)) {
if (log.isTraceEnabled()) {
log.tracef("Closed publisher from completion using requestId %s", requestId);
}
state.cancel();
} else if (log.isTraceEnabled()) {
log.tracef("A concurrent request already closed the prior state for %s", requestId);
}
}
public static class SegmentResult {
private final int segment;
private final int entryCount;
public SegmentResult(int segment, int entryCount) {
this.segment = segment;
this.entryCount = entryCount;
}
public int getEntryCount() {
return entryCount;
}
public int getSegment() {
return segment;
}
@Override
public String toString() {
return "SegmentResult{" +
"segment=" + segment +
", entryCount=" + entryCount +
'}';
}
}
/**
* Actual subscriber that listens to the local publisher and stores state and prepares responses as they are ready.
* This subscriber works by initially requesting {@code batchSize + 1} entries when it is subscribed. The {@code +1}
* is done purposefully due to how segment completion is guaranteed to be notified just before the next value of
* a different segment is returned. This way a given batchSize will have a complete view of which segments were
* completed in it. Subsequent requests will only request {@code batchSize} since our outstanding request count
* is always 1 more.
* <p>
* When a batch size is retrieved or the publisher is complete we create a PublisherResponse that is either
* passed to the waiting CompletableFuture or registers a new CompletableFuture for a pending request to receive.
* <p>
* The state keeps track of all segments that have completed or lost during the publisher response and are returned
* on the next response. The state keeps track of where the last segment completed {@code segmentStart}, thus
* our response can tell which which values were not part of the completed segments. It also allows us to drop entries
* from a segment that was just lost. This is preferable since otherwise the coordinator will have to resend this
* value or retrieve the value a second time, thus reducing how often they keys need to be replicated.
* <p>
* This class relies heavily upon the fact that the reactive stream spec specifies that {@code onNext},
* {@code onError}, and {@code onComplete} are invoked in a thread safe manner as well as the {@code accept} method
* on the {@code IntConsumer} when a segment is completed or lost. This allows us to use a simple array with an offset
* that is used to collect the response.
*/
private class PublisherState implements FlowableSubscriber<SegmentAwarePublisherSupplier.NotificationWithLost<Object>>, Runnable {
final String requestId;
final Address origin;
final int batchSize;
// Stores future responses - Normally this only ever contains zero or one result. This can contain two in the
// case of having a single entry in the last result. Due to the nature of having to request one additional
// entry to see segment completion, this is the tradeoff
@GuardedBy("this")
private CompletableFuture<PublisherResponse> futureResponse = null;
Subscription upstream;
// The remainder of the values hold the values between results received - These do not need synchronization
// as the Subscriber contract guarantees these are invoked serially and has proper visibility
Object[] results;
List<SegmentResult> segmentResults;
int pos;
IntSet completedSegments;
IntSet lostSegments;
int currentSegment = -1;
int segmentEntries;
// Set to true when the last futureResponse has been set - meaning the next response will be the last
volatile boolean complete;
private PublisherState(String requestId, Address origin, int batchSize) {
this.requestId = requestId;
this.origin = origin;
this.batchSize = batchSize;
results = new Object[batchSize];
}
void startProcessing(InitialPublisherCommand command) {
SegmentAwarePublisherSupplier<Object> sap;
if (command.isEntryStream()) {
sap = localPublisherManager.entryPublisher(command.getSegments(), command.getKeys(), command.getExcludedKeys(),
command.getExplicitFlags(), command.getDeliveryGuarantee(), command.getTransformer());
} else {
sap = localPublisherManager.keyPublisher(command.getSegments(), command.getKeys(), command.getExcludedKeys(),
command.getExplicitFlags(), command.getDeliveryGuarantee(), command.getTransformer());
}
Flowable.fromPublisher(sap.publisherWithLostSegments(true))
.subscribe(this);
}
@Override
public void onSubscribe(Subscription s) {
if (upstream != null) {
throw new IllegalStateException("Subscription was already set!");
}
this.upstream = Objects.requireNonNull(s);
requestMore(s, batchSize);
}
protected void requestMore(Subscription subscription, int requestAmount) {
subscription.request(requestAmount);
}
@Override
public void onError(Throwable t) {
complete = true;
log.trace("Exception encountered while processing publisher", t);
synchronized (this) {
if (futureResponse == null) {
futureResponse = CompletableFuture.failedFuture(t);
} else {
futureResponse.completeExceptionally(t);
}
}
}
@Override
public void onComplete() {
prepareResponse(true);
if (log.isTraceEnabled()) {
log.tracef("Completed state for %s", requestId);
}
}
@Override
public void onNext(SegmentAwarePublisherSupplier.NotificationWithLost notification) {
if (!notification.isValue()) {
int segment;
if (notification.isSegmentComplete()) {
segment = notification.completedSegment();
if (segmentEntries > 0) {
addToSegmentResults(segment, segmentEntries);
}
segmentComplete(segment);
} else {
segment = notification.lostSegment();
segmentLost(segment);
}
// Need to request more data as our responses are based on entries and not segments
requestMore(upstream, 1);
return;
}
int segment = notification.valueSegment();
assert currentSegment == segment || currentSegment == -1;
currentSegment = segment;
segmentEntries++;
results[pos++] = notification.value();
// Means we just finished a batch
if (pos == results.length) {
prepareResponse(false);
}
}
public void segmentComplete(int segment) {
assert currentSegment == segment || currentSegment == -1;
if (log.isTraceEnabled()) {
log.tracef("Completing segment %s for %s", segment, requestId);
}
if (completedSegments == null) {
completedSegments = IntSets.mutableEmptySet();
}
completedSegments.set(segment);
segmentEntries = 0;
currentSegment = -1;
}
public void segmentLost(int segment) {
assert currentSegment == segment || currentSegment == -1;
if (log.isTraceEnabled()) {
log.tracef("Lost segment %s for %s", segment, requestId);
}
if (lostSegments == null) {
lostSegments = IntSets.mutableEmptySet();
}
lostSegments.set(segment);
// Just reset the pos back to the segment start - ignoring those entries
// This saves us from sending these entries back and then having to resend the key to the new owner
pos -= segmentEntries;
segmentEntries = 0;
currentSegment = -1;
}
public void cancel() {
Subscription subscription = upstream;
if (subscription != null) {
subscription.cancel();
}
}
void resetValues() {
this.results = new Object[batchSize];
this.segmentResults = null;
this.completedSegments = null;
this.lostSegments = null;
this.pos = 0;
this.currentSegment = -1;
this.segmentEntries = 0;
}
PublisherResponse generateResponse(boolean complete) {
return new PublisherResponse(results, completedSegments, lostSegments, pos, complete,
segmentResults == null ? Collections.emptyList() : segmentResults);
}
void prepareResponse(boolean complete) {
if (currentSegment != -1) {
addToSegmentResults(currentSegment, segmentEntries);
}
PublisherResponse response = generateResponse(complete);
if (log.isTraceEnabled()) {
log.tracef("Response ready %s with id %s for requestor %s", response, requestId, origin);
}
if (!complete) {
// Have to reset the values if we expect to send another response
resetValues();
}
this.complete = complete;
CompletableFuture<PublisherResponse> futureToComplete = null;
synchronized (this) {
if (futureResponse != null) {
if (futureResponse.isDone()) {
if (!futureResponse.isCompletedExceptionally()) {
throw new IllegalStateException("Response already completed with " + CompletionStages.join(futureResponse) +
" but we want to complete with " + response);
}
log.tracef("Response %s already completed with an exception, ignoring values", System.identityHashCode(futureResponse));
}
futureToComplete = futureResponse;
futureResponse = null;
} else {
futureResponse = CompletableFuture.completedFuture(response);
if (log.isTraceEnabled()) {
log.tracef("Eager response completed %d for request id %s", System.identityHashCode(futureResponse), requestId);
}
}
}
if (futureToComplete != null) {
if (log.isTraceEnabled()) {
log.tracef("Completing waiting future %d for request id %s", System.identityHashCode(futureToComplete), requestId);
}
// Complete this outside of synchronized block
futureToComplete.complete(response);
}
}
public Address getOrigin() {
return origin;
}
/**
* Retrieves the either already completed result or registers a new future to be completed. This also prestarts
* the next batch to be ready for the next request as it comes, which is submitted on the {@link #nonBlockingExecutor}.
* @return future that will contain the publisher response with the data
*/
CompletableFuture<PublisherResponse> results() {
boolean submitRequest = false;
CompletableFuture<PublisherResponse> currentFuture;
synchronized (this) {
if (futureResponse == null) {
currentFuture = new CompletableFuture<>();
currentFuture.thenRunAsync(this, nonBlockingExecutor);
futureResponse = currentFuture;
} else {
currentFuture = futureResponse;
futureResponse = null;
submitRequest = true;
}
}
if (submitRequest) {
// Handles closing publisher or requests next batch if not complete
// Note this is not done in synchronized block in case if executor is within thread
nonBlockingExecutor.execute(this);
}
if (log.isTraceEnabled()) {
log.tracef("Retrieved future %d for request id %s", System.identityHashCode(currentFuture), requestId);
}
return currentFuture;
}
void addToSegmentResults(int segment, int entryCount) {
if (segmentResults == null) {
segmentResults = new ArrayList<>();
}
segmentResults.add(new SegmentResult(segment, entryCount));
}
/**
* This will either request the next batch of values or completes the request. Note the completion has to be done
* after the last result is returned, thus it cannot be eagerly closed in most cases.
*/
@Override
public void run() {
if (log.isTraceEnabled()) {
log.tracef("Running handler for request id %s", requestId);
}
if (!complete) {
int requestAmount = batchSize;
if (log.isTraceEnabled()) {
log.tracef("Requesting %d additional entries for %s", requestAmount, requestId);
}
requestMore(upstream, requestAmount);
} else {
synchronized (this) {
if (futureResponse == null) {
closePublisher(requestId, this);
} else if (log.isTraceEnabled()) {
log.tracef("Skipping run as handler is complete, but still has some results for id %s", requestId);
}
}
}
}
}
/**
* Special PublisherState that listens also to what key generates a given set of values. This state is only used
* when keys must be tracked (EXACTLY_ONCE guarantee with map or flatMap)
* <p>
* The general idea is the publisher will notify when a key or entry (referred to as just key from now on) is sent
* down the pipeline and we can view all values that result from that. Thus we only send a result when we have enough
* values (>= batchSize) but also get to a new key. This means that we can
* actually return more values than the batchSize when flatMap returns more than 1 value for a given key.
*/
class KeyPublisherState extends PublisherState {
Object[] extraValues;
int extraPos;
Object[] keys;
int keyPos;
int keyStartPosition;
private KeyPublisherState(String requestId, Address origin, int batchSize) {
super(requestId, origin, batchSize);
}
// Class to signal down that a given key was completed for key tracking purposes
class KeyCompleted<E> extends Notifications.ReuseNotificationBuilder<E> {
@Override
public String toString() {
return "KeyCompleted{" +
"key=" + value +
", segment=" + segment +
'}';
}
}
@Override
void startProcessing(InitialPublisherCommand command) {
SegmentAwarePublisherSupplier<Object> sap;
io.reactivex.rxjava3.functions.Function<Object, Object> toKeyFunction;
if (command.isEntryStream()) {
sap = localPublisherManager.entryPublisher(command.getSegments(), command.getKeys(), command.getExcludedKeys(),
command.getExplicitFlags(), DeliveryGuarantee.EXACTLY_ONCE, Function.identity());
toKeyFunction = (io.reactivex.rxjava3.functions.Function) RxJavaInterop.entryToKeyFunction();
} else {
sap = localPublisherManager.keyPublisher(command.getSegments(), command.getKeys(), command.getExcludedKeys(),
command.getExplicitFlags(), DeliveryGuarantee.EXACTLY_ONCE, Function.identity());
toKeyFunction = RxJavaInterop.identityFunction();
}
Function<Publisher<Object>, Publisher<Object>> functionToApply = command.getTransformer();
// We immediately consume the value, so we can reuse a builder for both to save on allocations
Notifications.NotificationBuilder<Object> builder = Notifications.reuseBuilder();
KeyCompleted<Object> keyBuilder = new KeyCompleted<>();
Flowable.fromPublisher(sap.publisherWithLostSegments())
.concatMap(notification -> {
if (!notification.isValue()) {
return Flowable.just(notification);
}
Object originalValue = notification.value();
Object key = toKeyFunction.apply(originalValue);
return Flowable.fromPublisher(functionToApply.apply(Flowable.just(originalValue)))
.map(v -> builder.value(v, notification.valueSegment()))
// Signal the end of the key - flatMap could have 0 or multiple entries
.concatWith(Single.just(keyBuilder.value(key, notification.valueSegment())));
})
.subscribe(this);
}
@Override
PublisherResponse generateResponse(boolean complete) {
return new KeyPublisherResponse(results, completedSegments, lostSegments, pos, complete,
segmentResults == null ? Collections.emptyList() : segmentResults, extraValues, extraPos, keys, keyPos);
}
@Override
public void onNext(SegmentAwarePublisherSupplier.NotificationWithLost notification) {
if (!notification.isValue()) {
super.onNext(notification);
return;
}
boolean requestMore = true;
if (notification instanceof KeyCompleted) {
// If these don't equal that means the key had some values mapped to it, so we need to retain the key
// in case if we can't finish this segment and user needs to retry
if (keyStartPosition != pos) {
Object key = notification.value();
if (keys == null) {
// This is the largest the array can be
keys = new Object[batchSize];
}
keys[keyPos++] = key;
if (pos == results.length) {
prepareResponse(false);
// We don't request more if we completed a batch - we will request later after the result is returned
requestMore = false;
} else {
keyStartPosition = pos;
}
}
if (requestMore) {
requestMore(upstream, 1);
}
return;
}
int segment = notification.valueSegment();
assert currentSegment == segment || currentSegment == -1;
currentSegment = segment;
segmentEntries++;
Object value = notification.value();
if (pos == results.length) {
// Write any overflow into our buffer
if (extraValues == null) {
extraValues = new Object[8];
}
if (extraPos == extraValues.length) {
Object[] expandedArray = new Object[extraValues.length << 1];
System.arraycopy(extraValues, 0, expandedArray, 0, extraPos);
extraValues = expandedArray;
}
extraValues[extraPos++] = value;
// Need to keep requesting until we get to the end of the key
requestMore(upstream, 1);
} else {
results[pos++] = value;
// If we have filled up the array, we need to request until we hit end of key
if (pos == results.length) {
requestMore(upstream, 1);
}
}
}
@Override
void resetValues() {
super.resetValues();
keyResetValues();
}
void keyResetValues() {
extraValues = null;
extraPos = 0;
keys = null;
keyPos = 0;
keyStartPosition = 0;
}
@Override
public void segmentComplete(int segment) {
super.segmentComplete(segment);
keys = null;
keyPos = 0;
keyStartPosition = 0;
}
@Override
public void segmentLost(int segment) {
super.segmentLost(segment);
// We discard any extra values as they would all be in the same segment - the super method already discarded
// the non extra values
keyResetValues();
}
}
}
| 27,390
| 39.45938
| 141
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/DeliveryGuarantee.java
|
package org.infinispan.reactive.publisher.impl;
/**
* Enumeration defining the possible delivery guarantees for entries.
* @author wburns
* @since 10.0
*/
public enum DeliveryGuarantee {
/**
* The least strict guarantee that ensures that data is never read more than once, but may be missed. This guarantee
* is most performant as it never requires retrying data or returning extra data for the sake of consistency. However,
* under a stable topology this will return the same results as {@link #EXACTLY_ONCE}.
*/
AT_MOST_ONCE,
/**
* The in between guarantee that provides a view of all data, but may return duplicates during a toplogy change. This
* guarantee does not send identity values, but instead will retry an operation, most likely returning duplicates.
* However, under a stable topology this will return the same results as {@link #EXACTLY_ONCE}.
*/
AT_LEAST_ONCE,
/**
* The most strict guarantee that guarantees that an entry is seen exactly one time in results. This is the most
* expensive guarantee as it may require copying identity values to the originator (ie. keys) to ensure that a
* value is not returned more than once for a given key.
*/
EXACTLY_ONCE,
;
private static final DeliveryGuarantee[] CACHED_VALUES = DeliveryGuarantee.values();
public static DeliveryGuarantee valueOf(int index) {
return CACHED_VALUES[index];
}
}
| 1,439
| 40.142857
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/PartitionAwareClusterPublisherManager.java
|
package org.infinispan.reactive.publisher.impl;
import static org.infinispan.util.logging.Log.CLUSTER;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import org.infinispan.Cache;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.PartitionStatusChanged;
import org.infinispan.notifications.cachelistener.event.PartitionStatusChangedEvent;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* Cluster stream manager that also pays attention to partition status and properly closes iterators and throws
* exceptions when the availability mode changes.
*/
@Scope(Scopes.NAMED_CACHE)
public class PartitionAwareClusterPublisherManager<K, V> extends ClusterPublisherManagerImpl<K, V> {
volatile AvailabilityMode currentMode = AvailabilityMode.AVAILABLE;
protected final PartitionListener listener = new PartitionListener();
@Inject protected ComponentRef<Cache<?, ?>> cache;
private final Set<AtomicBoolean> pendingOperations = ConcurrentHashMap.newKeySet();
@Listener
private class PartitionListener {
@PartitionStatusChanged
public void onPartitionChange(PartitionStatusChangedEvent<K, ?> event) {
if (!event.isPre()) {
AvailabilityMode newMode = event.getAvailabilityMode();
currentMode = newMode;
if (newMode == AvailabilityMode.DEGRADED_MODE) {
pendingOperations.forEach(ab -> ab.set(true));
}
}
}
}
public void start() {
super.start();
cache.running().addListener(listener);
}
@Override
public <R> CompletionStage<R> keyReduction(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
InvocationContext ctx, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
checkPartitionStatus();
CompletionStage<R> original = super.keyReduction(parallelPublisher, segments, keysToInclude, ctx, explicitFlags,
deliveryGuarantee, transformer, finalizer);
return registerStage(original);
}
@Override
public <R> CompletionStage<R> entryReduction(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
InvocationContext ctx, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
checkPartitionStatus();
CompletionStage<R> original = super.entryReduction(parallelPublisher, segments, keysToInclude, ctx, explicitFlags,
deliveryGuarantee, transformer, finalizer);
return registerStage(original);
}
private <R> CompletionStage<R> registerStage(CompletionStage<R> original) {
AtomicBoolean ab = registerOperation();
return original.handle((value, t) -> {
pendingOperations.remove(ab);
if (ab.get()) {
// Ignore the original exception and throw an AvailabilityException instead
throw CLUSTER.partitionDegraded();
}
CompletableFutures.rethrowExceptionIfPresent(t);
return value;
});
}
private AtomicBoolean registerOperation() {
AtomicBoolean ab = new AtomicBoolean();
pendingOperations.add(ab);
// Recheck after adding to listener map to close small gap between
if (isPartitionDegraded()) {
ab.set(true);
}
return ab;
}
@Override
public <R> SegmentPublisherSupplier<R> keyPublisher(IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<K>, ? extends Publisher<R>> transformer) {
checkPartitionStatus();
SegmentPublisherSupplier<R> original = super.keyPublisher(segments, keysToInclude, invocationContext,
explicitFlags, deliveryGuarantee, batchSize, transformer);
return registerPublisher(original);
}
@Override
public <R> SegmentPublisherSupplier<R> entryPublisher(IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<CacheEntry<K, V>>, ? extends Publisher<R>> transformer) {
checkPartitionStatus();
SegmentPublisherSupplier<R> original = super.entryPublisher(segments, keysToInclude, invocationContext,
explicitFlags, deliveryGuarantee, batchSize, transformer);
return registerPublisher(original);
}
private <R> SegmentPublisherSupplier<R> registerPublisher(SegmentPublisherSupplier<R> original) {
return new SegmentPublisherSupplier<R>() {
@Override
public Publisher<Notification<R>> publisherWithSegments() {
return handleEarlyTermination(SegmentPublisherSupplier::publisherWithSegments);
}
@Override
public Publisher<R> publisherWithoutSegments() {
return handleEarlyTermination(SegmentPublisherSupplier::publisherWithoutSegments);
}
private <S> Flowable<S> handleEarlyTermination(Function<SegmentPublisherSupplier<R>, Publisher<S>> function) {
AtomicBoolean ab = registerOperation();
return Flowable.fromPublisher(function.apply(original))
.doOnNext(s -> checkPendingOperation(ab))
.doOnComplete(() -> checkPendingOperation(ab))
.doFinally(() -> pendingOperations.remove(ab));
}
};
}
private void checkPendingOperation(AtomicBoolean ab) {
if (ab.get())
throw CLUSTER.partitionDegraded();
}
private void checkPartitionStatus() {
if (isPartitionDegraded()) {
throw CLUSTER.partitionDegraded();
}
}
private boolean isPartitionDegraded() {
return currentMode != AvailabilityMode.AVAILABLE;
}
}
| 6,800
| 40.218182
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/LocalClusterPublisherManagerImpl.java
|
package org.infinispan.reactive.publisher.impl;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.commands.functional.functions.InjectableComponent;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.reactive.RxJavaInterop;
import org.infinispan.reactive.publisher.impl.commands.reduction.PublisherResult;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
@Scope(Scopes.NAMED_CACHE)
public class LocalClusterPublisherManagerImpl<K, V> implements ClusterPublisherManager<K, V> {
@Inject LocalPublisherManager<K, V> localPublisherManager;
@Inject Configuration cacheConfiguration;
@Inject KeyPartitioner keyPartitioner;
@Inject ComponentRegistry componentRegistry;
private int maxSegment;
@Start
public void start() {
if (Configurations.needSegments(cacheConfiguration)) {
maxSegment = cacheConfiguration.clustering().hash().numSegments();
} else {
maxSegment = 1;
}
}
static <K, V> Flowable<CacheEntry<K, V>> entryPublisherFromContext(InvocationContext ctx, IntSet segments,
KeyPartitioner keyPartitioner, Set<K> keysToInclude) {
Flowable<CacheEntry<K, V>> flowable = Flowable.fromPublisher(ctx.publisher());
if (segments == null && keysToInclude == null) {
return flowable;
}
return flowable.filter(entry -> (keysToInclude == null || keysToInclude.contains(entry.getKey()))
&& (segments == null || segments.contains(keyPartitioner.getSegment(entry.getKey()))));
}
static <K, V> Flowable<SegmentPublisherSupplier.Notification<CacheEntry<K, V>>> notificationPublisherFromContext(
InvocationContext ctx, IntSet segments, KeyPartitioner keyPartitioner, Set<K> keysToInclude) {
return Flowable.fromPublisher(ctx.<K, V>publisher())
.mapOptional(ce -> {
K key = ce.getKey();
if (keysToInclude == null || keysToInclude.contains(key)) {
int segment = keyPartitioner.getSegment(key);
if (segments == null || segments.contains(segment)) {
return Optional.of(Notifications.value(ce, segment));
}
}
return Optional.empty();
});
}
IntSet handleNullSegments(IntSet segments) {
return segments != null ? segments : IntSets.immutableRangeSet(maxSegment);
}
@Override
public <R> CompletionStage<R> keyReduction(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
if (transformer instanceof InjectableComponent) {
((InjectableComponent) transformer).inject(componentRegistry);
}
if (finalizer instanceof InjectableComponent) {
((InjectableComponent) finalizer).inject(componentRegistry);
}
if (invocationContext == null || invocationContext.lookedUpEntriesCount() == 0) {
return localPublisherManager.keyReduction(parallelPublisher, handleNullSegments(segments), keysToInclude, null,
explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer, finalizer).thenApply(PublisherResult::getResult);
}
CompletionStage<R> stage = localPublisherManager.keyReduction(parallelPublisher, handleNullSegments(segments), keysToInclude,
(Set<K>) invocationContext.getLookedUpEntries().keySet(), explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer, finalizer)
.thenApply(PublisherResult::getResult);
Flowable<K> entryFlowable = entryPublisherFromContext(invocationContext, segments, keyPartitioner, keysToInclude)
.map(RxJavaInterop.entryToKeyFunction());
return transformer.apply(entryFlowable)
.thenCombine(stage, Flowable::just)
.thenCompose(finalizer);
}
@Override
public <R> CompletionStage<R> entryReduction(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
if (transformer instanceof InjectableComponent) {
((InjectableComponent) transformer).inject(componentRegistry);
}
if (finalizer instanceof InjectableComponent) {
((InjectableComponent) finalizer).inject(componentRegistry);
}
if (invocationContext == null || invocationContext.lookedUpEntriesCount() == 0) {
return localPublisherManager.entryReduction(parallelPublisher, handleNullSegments(segments), keysToInclude, null,
explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer, finalizer).thenApply(PublisherResult::getResult);
}
CompletionStage<R> stage = localPublisherManager.entryReduction(parallelPublisher, handleNullSegments(segments), keysToInclude,
(Set<K>) invocationContext.getLookedUpEntries().keySet(), explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer, finalizer)
.thenApply(PublisherResult::getResult);
Flowable<CacheEntry<K, V>> entryFlowable = entryPublisherFromContext(invocationContext, segments, keyPartitioner,
keysToInclude);
return transformer.apply(entryFlowable)
.thenCombine(stage, Flowable::just)
.thenCompose(finalizer);
}
@Override
public <R> SegmentPublisherSupplier<R> keyPublisher(IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
int batchSize, Function<? super Publisher<K>, ? extends Publisher<R>> transformer) {
if (transformer instanceof InjectableComponent) {
((InjectableComponent) transformer).inject(componentRegistry);
}
if (invocationContext == null || invocationContext.lookedUpEntriesCount() == 0) {
return localPublisherManager.keyPublisher(handleNullSegments(segments),
keysToInclude, null, explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer);
}
Set<K> contextKeys = (Set<K>) invocationContext.getLookedUpEntries().keySet();
SegmentAwarePublisherSupplier<R> cachePublisher = localPublisherManager.keyPublisher(handleNullSegments(segments),
keysToInclude, contextKeys, explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer);
return new SegmentPublisherSupplier<R>() {
@Override
public Publisher<Notification<R>> publisherWithSegments() {
Flowable<Notification<CacheEntry<K, V>>> contextFlowable =
notificationPublisherFromContext(invocationContext, segments, keyPartitioner, keysToInclude);
return Flowable.concat(contextFlowable.concatMap(notification ->
Flowable.fromPublisher(transformer.apply(Flowable.just(notification.value().getKey())))
.map(r -> Notifications.value(r, notification.valueSegment()))),
cachePublisher.publisherWithSegments());
}
@Override
public Publisher<R> publisherWithoutSegments() {
Flowable<K> contextFlowable = entryPublisherFromContext(invocationContext, segments, keyPartitioner, keysToInclude)
.map(RxJavaInterop.entryToKeyFunction());
return Flowable.concat(transformer.apply(contextFlowable),
cachePublisher.publisherWithoutSegments());
}
};
}
@Override
public <R> SegmentPublisherSupplier<R> entryPublisher(IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<CacheEntry<K, V>>, ? extends Publisher<R>> transformer) {
if (transformer instanceof InjectableComponent) {
((InjectableComponent) transformer).inject(componentRegistry);
}
if (invocationContext == null || invocationContext.lookedUpEntriesCount() == 0) {
return localPublisherManager.entryPublisher(handleNullSegments(segments),
keysToInclude, null, explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer);
}
Set<K> contextKeys = (Set<K>) invocationContext.getLookedUpEntries().keySet();
SegmentAwarePublisherSupplier<R> cachePublisher = localPublisherManager.entryPublisher(handleNullSegments(segments),
keysToInclude, contextKeys, explicitFlags, DeliveryGuarantee.AT_MOST_ONCE, transformer);
return new SegmentPublisherSupplier<R>() {
@Override
public Publisher<Notification<R>> publisherWithSegments() {
Flowable<Notification<CacheEntry<K, V>>> entryFlowable = notificationPublisherFromContext(invocationContext, segments, keyPartitioner,
keysToInclude);
Flowable<Notification<R>> contextFlowable = entryFlowable
.concatMap(notification -> Flowable.fromPublisher(transformer.apply(Flowable.just(notification.value())))
.map(r -> Notifications.value(r, notification.valueSegment())));
return Flowable.concat(contextFlowable,
cachePublisher.publisherWithSegments());
}
@Override
public Publisher<R> publisherWithoutSegments() {
Flowable<CacheEntry<K, V>> entryFlowable = entryPublisherFromContext(invocationContext, segments, keyPartitioner,
keysToInclude);
return Flowable.concat(transformer.apply(entryFlowable),
cachePublisher.publisherWithoutSegments());
}
};
}
@Override
public CompletionStage<Long> sizePublisher(IntSet segments, InvocationContext ctx, long flags) {
return localPublisherManager.sizePublisher(segments, flags);
}
}
| 10,841
| 49.663551
| 146
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/ClusterPublisherManager.java
|
package org.infinispan.reactive.publisher.impl;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.InvocationContext;
import org.reactivestreams.Publisher;
/**
* Manages distribution of various publisher operations that are ran locally and/or sent to remote nodes.
* @param <K> The key type for the underlying cache
* @param <V> the value type for the underlying cache
* @author wburns
* @since 10.0
*/
public interface ClusterPublisherManager<K, V> {
/**
* Same as {@link #entryReduction(boolean, IntSet, Set, InvocationContext, long, DeliveryGuarantee, Function, Function)}
* except that the source publisher provided to the <b>transformer</b> is made up of keys only.
* @param <R> return value type
* @return CompletionStage that contains the resulting value when complete
*/
<R> CompletionStage<R> keyReduction(boolean parallelPublisher, IntSet segments,
Set<K> keysToInclude, InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer);
/**
* Performs the given <b>transformer</b> and <b>finalizer</b> on data in the cache, resulting in a single value.
* Depending on the <b>deliveryGuarantee</b> the <b>transformer</b> may be invoked <b>1..numSegments</b> times. It
* could be that the <b>transformer</b> is invoked for every segment and produces a result. All of these results
* are then fed into the <b>finalizer</b> to produce a final result. If publisher is parallel the <b>finalizer</b>
* will be invoked on each node to ensure there is only a single result per node.
* <p>
* If the provided <b>transformer</b> internally uses a reduction with a default value, that value must be its identity value.
* This is the same as can be seen at {@link java.util.stream.Stream#reduce(Object, BinaryOperator)}.
* Then as long as the <b>finalizer</b> can handle the identity value it will be properly reduced.
* @param <R> return value type
* @param parallelPublisher Whether on each node the publisher should be parallelized remotely and locally
* @param segments determines what entries should be evaluated by only using ones that map to the given segments (if null assumes all segments)
* @param keysToInclude set of keys that should only be used (if null all entries for the given segments will be evaluated)
* @param invocationContext context of the invoking operation, context entries override the values in the cache (may be null)
* @param explicitFlags cache flags, which are passed to {@link org.infinispan.commands.read.KeySetCommand} or {@link org.infinispan.commands.read.EntrySetCommand}
* @param deliveryGuarantee delivery guarantee for given entries
* @param transformer reduces the given publisher of data eventually into a single value. Must not be null.
* @param finalizer reduces all of the single values produced by the transformer or this finalizer into one final value. Must not be null.
* @return CompletionStage that contains the resulting value when complete
*/
<R> CompletionStage<R> entryReduction(boolean parallelPublisher, IntSet segments,
Set<K> keysToInclude, InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer);
/**
* Same as {@link #entryPublisher(IntSet, Set, InvocationContext, long, DeliveryGuarantee, int, Function)}
* except that the source publisher provided to the <b>transformer</b> is made up of keys only.
* @param <R> return value type
* @return Publisher that when subscribed to will return the results and notify of segment completion if necessary
*/
<R> SegmentPublisherSupplier<R> keyPublisher(IntSet segments, Set<K> keysToInclude, InvocationContext invocationContext,
long explicitFlags, DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<K>, ? extends Publisher<R>> transformer);
/**
* Performs the given <b>transformer</b> on data in the cache, resulting in multiple values. If a single
* value is desired, the user should use {@link #entryReduction(boolean, IntSet, Set, InvocationContext, long, DeliveryGuarantee, Function, Function)}
* instead as it can optimize some things. Depending on the <b>deliveryGuarantee</b> the <b>transformer</b> may be
* invoked <b>1..numSegments</b> times per node. Results from a given node will retrieve values up to
* {@code batchSize} values until some are consumed.
* <p>
* For example when using RxJava and using an intermediate operation such as
* {@link io.reactivex.rxjava3.core.Flowable#switchIfEmpty(Publisher)} this can add elements if the given Publisher
* is empty, and it is very possible that a segment may not have entries and therefore may add the elements the
* switched Publisher returns multiple times.
* <p>
* Methods that add elements to the returned Publisher are fine as long as they are tied to a specific entry, for
* example {@link io.reactivex.rxjava3.core.Flowable#flatMap(io.reactivex.rxjava3.functions.Function)} which can reproduce
* the same elements when provided the same input entry from the cache.
*
* @param segments determines what entries should be evaluated by only using ones that map to the given segments (if null assumes all segments)
* @param keysToInclude set of keys that should only be used (if null all entries for the given segments will be evaluated)
* @param invocationContext context of the invoking operation, context entries override the values in the cache (may be null)
* @param explicitFlags cache flags
* @param deliveryGuarantee delivery guarantee for given entries
* @param batchSize how many entries to be returned at a given time
* @param transformer transform the given stream of data into something else (requires non null)
* @param <R> return value type
* @return Publisher that when subscribed to will return the results and notify of segment completion if necessary
*/
<R> SegmentPublisherSupplier<R> entryPublisher(IntSet segments, Set<K> keysToInclude, InvocationContext invocationContext,
long explicitFlags, DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<CacheEntry<K, V>>, ? extends Publisher<R>> transformer);
CompletionStage<Long> sizePublisher(IntSet segments, InvocationContext ctx, long flags);
}
| 6,974
| 68.75
| 166
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/SegmentAwarePublisherSupplier.java
|
package org.infinispan.reactive.publisher.impl;
import org.reactivestreams.Publisher;
/**
* This is the same as {@link SegmentPublisherSupplier} except that it also allows listening for when a segment is
* lost. The lost segment provides the same notification guarantees as the segment completion of the parent interface.
* <p>
* This interface is normally just for internal Infinispan usage as users shouldn't normally have to care about retrying.
* <p>
* Implementors of this do not do retries and instead notify of lost segments instead of retrying, which implementors
* of {@link SegmentPublisherSupplier} normally do.
*
* @param <R> value type
*/
public interface SegmentAwarePublisherSupplier<R> extends SegmentPublisherSupplier<R> {
/**
* Notification that can also contains lost segments. Note that the lost segments are mutually exclusive with
* value and completed segments.
*
* @param <R> the value type if present
*/
interface NotificationWithLost<R> extends SegmentPublisherSupplier.Notification<R> {
/**
* Whether this notification is for a lost segment
*
* @return true if a segment was lost
*/
default boolean isLostSegment() {
return false;
}
/**
* The segment that was complete for this notification
*
* @return the segment
* @throws IllegalStateException if this notification contains a value or has a completed segment
*/
default int lostSegment() {
throw new IllegalStateException("Notification does not contain a lost segment, please check with isLostSegment first!");
}
}
/**
* When this method is used the {@link DeliveryGuarantee} is ignored as the user isn't listening to completion or
* lost segments
*/
Publisher<R> publisherWithoutSegments();
/**
* Same as {@link SegmentPublisherSupplier#publisherWithSegments()} , except that we also can notify a
* listener when a segment has been lost before publishing all its entries.
* <p>
* The provided {@link DeliveryGuarantee} when creating this <i>SegmentAwarePublisherSupplier</i> will control
* how a lost segment notification is raised {@link NotificationWithLost#isLostSegment()}.
* <h4>Summary of Delivery Guarantee Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Delivery Guarantee Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Delivery Guarantee</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link DeliveryGuarantee#AT_MOST_ONCE}</td>
* <td valign="top">A segment is lost only if this node is not the read owner when starting to read it.
* If ths segment is no longer readable after the publisher started, no more entries are returned
* and the segment is completed normally.</td>
* </tr>
* <tr>
* <td valign="top">{@link DeliveryGuarantee#AT_LEAST_ONCE}</td>
* <td valign="top">Same as {@link DeliveryGuarantee#EXACTLY_ONCE}.</td>
* </tr>
* <tr>
* <td valign="top">{@link DeliveryGuarantee#EXACTLY_ONCE}</td>
* <td valign="top">A segment is lost if at any point while reading entries from it,
* this node is no longer a read owner of the given segment.
* Therefore if the segment is complete, the publisher is guaranteed
* to include all values for the given segment.</td>
* </tr>
* </table>
* <p>
* If the cache is LOCAL only the {@link DeliveryGuarantee#AT_MOST_ONCE} should be used as there is no difference
* between the different guarantees, and it is more performant.
*/
default Publisher<NotificationWithLost<R>> publisherWithLostSegments() {
return publisherWithLostSegments(false);
}
/**
* Same as {@link SegmentPublisherSupplier#publisherWithSegments()} , except that we also can notify a
* listener when a segment has been lost before publishing all its entries
* <p>
* If <b>reuseNotifications</b> parameter is true then the returned Notifications can be the same object containing
* different results. This means any consumer must not store the Notification or process them asynchronously
* or else you could find incorrect values. This parameter is solely for memory and performance uses when it is known
* that the returned Publisher will be consumed synchronously and process the values and segments immediately.
*
* @param reuseNotifications If the returned Publisher can reuse notification objects to save memory
*/
Publisher<NotificationWithLost<R>> publisherWithLostSegments(boolean reuseNotifications);
}
| 4,772
| 45.794118
| 129
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/ClusterPublisherManagerImpl.java
|
package org.infinispan.reactive.publisher.impl;
import java.lang.invoke.MethodHandles;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.ObjIntConsumer;
import java.util.function.Supplier;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.read.SizeCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.ClusteringConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.PersistenceConfiguration;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.marshall.core.MarshallableFunctions;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.manager.PersistenceManager.StoreChangeListener;
import org.infinispan.reactive.RxJavaInterop;
import org.infinispan.reactive.publisher.PublisherReducers;
import org.infinispan.reactive.publisher.impl.commands.batch.CancelPublisherCommand;
import org.infinispan.reactive.publisher.impl.commands.batch.InitialPublisherCommand;
import org.infinispan.reactive.publisher.impl.commands.batch.NextPublisherCommand;
import org.infinispan.reactive.publisher.impl.commands.batch.PublisherResponse;
import org.infinispan.reactive.publisher.impl.commands.reduction.KeyPublisherResult;
import org.infinispan.reactive.publisher.impl.commands.reduction.PublisherResult;
import org.infinispan.reactive.publisher.impl.commands.reduction.ReductionPublisherRequestCommand;
import org.infinispan.reactive.publisher.impl.commands.reduction.SegmentPublisherResult;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ValidResponseCollector;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.remoting.transport.impl.VoidResponseCollector;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.statetransfer.StateTransferLock;
import org.infinispan.util.function.SerializableFunction;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscriber;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
/**
* ClusterPublisherManager that determines targets for the given segments and/or keys and then sends to local and
* remote nodes in parallel collecting results to be returned. This implement prioritizes running as much as possible
* on the local node, in some cases not even going remotely if all keys or segments are available locally.
* @author wburns
* @since 10.0
*/
@Scope(Scopes.NAMED_CACHE)
public class ClusterPublisherManagerImpl<K, V> implements ClusterPublisherManager<K, V> {
protected final static Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@Inject PublisherHandler publisherHandler;
@Inject LocalPublisherManager<K, V> localPublisherManager;
@Inject DistributionManager distributionManager;
@Inject StateTransferLock stateTransferLock;
@Inject RpcManager rpcManager;
@Inject CommandsFactory commandsFactory;
@Inject KeyPartitioner keyPartitioner;
@Inject Configuration cacheConfiguration;
@Inject ComponentRegistry componentRegistry;
@Inject PersistenceManager persistenceManager;
// Make sure we don't create one per invocation
private final KeyComposedType KEY_COMPOSED = new KeyComposedType<>();
private <R> KeyComposedType<R> keyComposedType() {
return KEY_COMPOSED;
}
// Make sure we don't create one per invocation
private final EntryComposedType ENTRY_COMPOSED = new EntryComposedType<>();
private <R> EntryComposedType<R> entryComposedType() {
return ENTRY_COMPOSED;
}
private final SizeComposedType SIZE_COMPOSED = new SizeComposedType();
private SizeComposedType sizeComposedType() {
return SIZE_COMPOSED;
}
private int maxSegment;
private boolean replicatedCache;
private volatile boolean writeBehindShared;
// If they have a shared store we can just read everything locally
private volatile boolean sharedStore;
private final StoreChangeListener storeChangeListener = pm -> {
writeBehindShared = pm.usingSharedAsyncStore();
sharedStore = pm.usingSharedStore();
};
protected RpcOptions rpcOptions;
@Start
public void start() {
maxSegment = cacheConfiguration.clustering().hash().numSegments();
replicatedCache = cacheConfiguration.clustering().cacheMode().isReplicated();
updateStoreInfo(cacheConfiguration.persistence());
persistenceManager.addStoreListener(storeChangeListener);
// Note we use a little extra wiggle room for the timeout of the remote invocation by increasing it by 3 times
// normal. This is due to our responses requiring additional processing time (iteration serialization and normally
// increased payloads)
rpcOptions = new RpcOptions(DeliverOrder.NONE, cacheConfiguration.clustering().remoteTimeout() * 3,
TimeUnit.MILLISECONDS);
cacheConfiguration.clustering()
.attributes().attribute(ClusteringConfiguration.REMOTE_TIMEOUT)
.addListener((a, ignored) -> {
rpcOptions = new RpcOptions(DeliverOrder.NONE, a.get() * 3, TimeUnit.MILLISECONDS);
});
}
@Stop
public void stop() {
persistenceManager.removeStoreListener(storeChangeListener);
}
@Override
public <R> CompletionStage<R> keyReduction(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
InvocationContext ctx, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return reduction(parallelPublisher, segments, keysToInclude, ctx, explicitFlags, deliveryGuarantee, keyComposedType(), transformer, finalizer);
}
@Override
public <R> CompletionStage<R> entryReduction(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
InvocationContext ctx, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return reduction(parallelPublisher, segments, keysToInclude, ctx, explicitFlags, deliveryGuarantee, entryComposedType(), transformer, finalizer);
}
private <I, R> CompletionStage<R> reduction(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude, InvocationContext ctx,
long explicitFlags, DeliveryGuarantee deliveryGuarantee, ComposedType<K, I, R> composedType,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
// Needs to be serialized processor as we can write to it from different threads
FlowableProcessor<R> flowableProcessor = UnicastProcessor.<R>create().toSerialized();
// Apply the finalizer first (which subscribes) before emitting items, to avoid buffering in UnicastProcessor
CompletionStage<R> stage = finalizer.apply(flowableProcessor);
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizerToUse =
requiresFinalizer(parallelPublisher, keysToInclude, deliveryGuarantee) ? finalizer : null;
if (keysToInclude != null) {
startKeyPublisher(parallelPublisher, segments, keysToInclude, ctx, explicitFlags, deliveryGuarantee,
composedType, transformer, finalizerToUse, flowableProcessor);
} else {
startSegmentPublisher(parallelPublisher, segments, ctx, explicitFlags,
deliveryGuarantee, composedType, transformer, finalizerToUse, flowableProcessor);
}
return stage;
}
/**
* This method is used to determine if a finalizer is required to be sent remotely. For cases we don't have to
* we don't want to serialize it for nothing
* @return whether finalizer is required
*/
private <R> boolean requiresFinalizer(boolean parallelPublisher, Set<K> keysToInclude,
DeliveryGuarantee deliveryGuarantee) {
// Parallel publisher has to use the finalizer to consolidate intermediate values on the remote nodes
return parallelPublisher ||
// Using segments with exactly once does one segment at a time and requires consolidation
keysToInclude == null && deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE;
}
private <I, R> void handleContextInvocation(IntSet segments, Set<K> keysToInclude, InvocationContext ctx, ComposedType<K, I, R> composedType,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
BiConsumer<PublisherResult<R>, Throwable> biConsumer) {
CompletionStage<PublisherResult<R>> localStage = composedType.contextInvocation(segments, keysToInclude, ctx,
transformer);
if (log.isTraceEnabled()) {
// Make sure the trace occurs before response is processed
localStage = localStage.whenComplete((results, t) -> {
if (t != null) {
log.tracef(t, "Received exception while processing context %s", ctx);
} else {
log.tracef("Result was: %s for context %s", results.getResult(), ctx);
}
});
}
// Finally report the result to the BiConsumer so it knows the result
localStage.whenComplete(biConsumer);
}
// Finalizer isn't required as the FlowableProcessor already has that configured upstream
private static <I, R> void handleNoTargets(Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
FlowableProcessor<R> flowableProcessor) {
// Need to do this in case if the transformer or finalizer has additional values such as reduce with identity
// or switchIfEmpty etc.
CompletionStage<R> transformedStage = transformer.apply(Flowable.empty());
transformedStage.whenComplete((value, t) -> {
if (t != null) {
flowableProcessor.onError(t);
} else {
if (value != null) {
flowableProcessor.onNext(value);
}
flowableProcessor.onComplete();
}
});
}
private <I, R> void startKeyPublisher(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude, InvocationContext ctx,
long explicitFlags, DeliveryGuarantee deliveryGuarantee, ComposedType<K, I, R> composedType,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer,
FlowableProcessor<R> flowableProcessor) {
LocalizedCacheTopology topology = distributionManager.getCacheTopology();
Address localAddress = topology.getLocalAddress();
// This excludes the keys from the various address targets
Map<Address, Set<K>> keyTargets = determineKeyTargets(topology, keysToInclude, localAddress, segments, ctx);
int keyTargetSize = keyTargets.size();
if (keyTargetSize == 0) {
handleNoTargets(transformer, flowableProcessor);
return;
}
AtomicInteger parallelCount;
boolean useContext = ctx != null && ctx.lookedUpEntriesCount() > 0;
if (useContext) {
parallelCount = new AtomicInteger(keyTargetSize + 1);
} else {
parallelCount = new AtomicInteger(keyTargetSize);
}
// This way we only have to allocate 1 per request chain
BiConsumer<PublisherResult<R>, Throwable> biConsumer = new KeyBiConsumer<>(flowableProcessor,
parallelCount, topology.getTopologyId(), parallelPublisher, explicitFlags, deliveryGuarantee,
composedType, transformer, finalizer);
Set<K> localKeys = keyTargets.remove(localAddress);
// If any targets left, they are all remote
if (!keyTargets.isEmpty()) {
// We submit the remote ones first as they will not block at all, just to send remote tasks
for (Map.Entry<Address, Set<K>> remoteTarget : keyTargets.entrySet()) {
Address remoteAddress = remoteTarget.getKey();
Set<K> remoteKeys = remoteTarget.getValue();
TopologyAffectedCommand command = composedType.remoteInvocation(parallelPublisher, null, remoteKeys,
null, explicitFlags, deliveryGuarantee, transformer, finalizer);
command.setTopologyId(topology.getTopologyId());
CompletionStage<PublisherResult<R>> stage = rpcManager.invokeCommand(remoteAddress, command,
new KeyPublisherResultCollector<>(remoteKeys), rpcManager.getSyncRpcOptions());
stage.whenComplete(biConsumer);
}
}
if (localKeys != null) {
DeliveryGuarantee guarantee = deliveryToUse(null, deliveryGuarantee);
CompletionStage<PublisherResult<R>> localStage = composedType.localInvocation(parallelPublisher, null,
localKeys, null, explicitFlags, guarantee, transformer, finalizer);
if (log.isTraceEnabled()) {
// Make sure the trace occurs before response is processed
localStage = localStage.whenComplete((results, t) -> {
if (t != null) {
log.tracef(t, "Received exception while processing keys %s from %s", localKeys, localAddress);
} else {
log.tracef("Result was: %s for keys %s from %s with %s suspected keys",
results.getResult(), localKeys, localAddress, results.getSuspectedKeys());
}
});
}
// Map to the same collector, so we can reuse the same BiConsumer
localStage.whenComplete(biConsumer);
}
if (useContext) {
handleContextInvocation(segments, keysToInclude, ctx, composedType, transformer, biConsumer);
}
}
private <I, R> void startSegmentPublisher(boolean parallelPublisher, IntSet segments,
InvocationContext ctx, long explicitFlags, DeliveryGuarantee deliveryGuarantee, ComposedType<K, I, R> composedType,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer,
FlowableProcessor<R> flowableProcessor) {
LocalizedCacheTopology topology = distributionManager.getCacheTopology();
Address localAddress = topology.getLocalAddress();
Map<Address, IntSet> targets = determineSegmentTargets(topology, segments, localAddress);
int targetSize = targets.size();
if (targetSize == 0) {
handleNoTargets(transformer, flowableProcessor);
return;
}
// used to determine that last parallel completion, to either complete or retry
AtomicInteger parallelCount;
boolean useContext = ctx != null && ctx.lookedUpEntriesCount() > 0;
Map<Address, Set<K>> keysToExcludeByAddress;
if (useContext) {
parallelCount = new AtomicInteger(targetSize + 1);
keysToExcludeByAddress = determineKeyTargets(topology, (Set<K>) ctx.getLookedUpEntries().keySet(), localAddress,
segments, null);
} else {
parallelCount = new AtomicInteger(targetSize);
keysToExcludeByAddress = Collections.emptyMap();
}
// This way we only have to allocate 1 per request chain
BiConsumer<PublisherResult<R>, Throwable> biConsumer = new SegmentSpecificConsumer<>(flowableProcessor,
parallelCount, topology.getTopologyId(), parallelPublisher, ctx, explicitFlags, deliveryGuarantee,
composedType, transformer, finalizer);
IntSet localSegments = targets.remove(localAddress);
// If any targets left, they are all remote
if (!targets.isEmpty()) {
// We submit the remote ones first as they will not block at all, just to send remote tasks
for (Map.Entry<Address, IntSet> remoteTarget : targets.entrySet()) {
Address remoteAddress = remoteTarget.getKey();
IntSet remoteSegments = remoteTarget.getValue();
TopologyAffectedCommand command = composedType.remoteInvocation(parallelPublisher, remoteSegments, null,
keysToExcludeByAddress.get(remoteAddress), explicitFlags, deliveryGuarantee, transformer, finalizer);
command.setTopologyId(topology.getTopologyId());
CompletionStage<PublisherResult<R>> stage = rpcManager.invokeCommand(remoteAddress, command,
new SegmentPublisherResultCollector<>(remoteSegments), rpcManager.getSyncRpcOptions());
stage.whenComplete(biConsumer);
}
}
if (localSegments != null) {
DeliveryGuarantee guarantee = deliveryToUse(null, deliveryGuarantee);
CompletionStage<PublisherResult<R>> localStage = composedType.localInvocation(parallelPublisher, localSegments,
null, keysToExcludeByAddress.get(localAddress), explicitFlags, guarantee, transformer, finalizer);
if (log.isTraceEnabled()) {
// Make sure the trace occurs before response is processed
localStage = localStage.whenComplete((results, t) -> {
if (t != null) {
log.tracef(t, "Received exception while processing segments %s from %s", localSegments, localAddress);
} else {
log.tracef("Result was: %s for segments %s from %s with %s suspected segments",
results.getResult(), localSegments, localAddress, results.getSuspectedSegments());
}
});
}
// Map to the same collector, so we can reuse the same BiConsumer
localStage.whenComplete(biConsumer);
}
if (useContext) {
handleContextInvocation(segments, null, ctx, composedType, transformer, biConsumer);
}
}
private class SegmentSpecificConsumer<I, R> implements BiConsumer<PublisherResult<R>, Throwable> {
private final FlowableProcessor<R> flowableProcessor;
private final AtomicInteger parallelCount;
private final IntSet segmentsToRetry = IntSets.concurrentSet(maxSegment);
private final int currentTopologyId;
private final boolean parallelPublisher;
private final InvocationContext ctx;
private final long explicitFlags;
private final DeliveryGuarantee deliveryGuarantee;
private final ComposedType<K, I, R> composedType;
private final Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer;
private final Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer;
SegmentSpecificConsumer(FlowableProcessor<R> flowableProcessor, AtomicInteger parallelCount,
int currentTopologyId, boolean parallelPublisher, InvocationContext ctx, long explicitFlags,
DeliveryGuarantee deliveryGuarantee, ComposedType<K, I, R> composedType,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
this.flowableProcessor = flowableProcessor;
this.parallelCount = parallelCount;
this.currentTopologyId = currentTopologyId;
this.parallelPublisher = parallelPublisher;
this.ctx = ctx;
this.explicitFlags = explicitFlags;
this.deliveryGuarantee = deliveryGuarantee;
this.composedType = composedType;
this.transformer = transformer;
this.finalizer = finalizer;
}
@Override
public void accept(PublisherResult<R> resultCollector, Throwable t) {
if (t != null) {
if (log.isTraceEnabled()) {
log.tracef(t, "General error encountered when executing publisher request command");
}
flowableProcessor.onError(t);
} else {
handleResult(resultCollector);
// We were the last one to complete if zero, so we have to complete or resubmit
if (parallelCount.decrementAndGet() == 0) {
onCompletion();
}
}
}
private void handleResult(PublisherResult<R> result) {
IntSet suspectedSegments = result.getSuspectedSegments();
if (suspectedSegments != null && !suspectedSegments.isEmpty()) {
segmentsToRetry.addAll(suspectedSegments);
}
R actualValue = result.getResult();
if (actualValue != null) {
flowableProcessor.onNext(actualValue);
}
}
private void onCompletion() {
if (segmentsToRetry.isEmpty()) {
flowableProcessor.onComplete();
} else {
int nextTopology = currentTopologyId + 1;
if (log.isTraceEnabled()) {
log.tracef("Retrying segments %s after %d is installed", segmentsToRetry, nextTopology);
}
// If we had an issue with segments, we need to wait until the next topology is installed to try again
stateTransferLock.topologyFuture(nextTopology).whenComplete((ign, innerT) -> {
if (innerT != null) {
if (log.isTraceEnabled()) {
log.tracef(innerT, "General error encountered when waiting on topology future for publisher request command");
}
flowableProcessor.onError(innerT);
} else {
// Restart with the missing segments
startSegmentPublisher(parallelPublisher, segmentsToRetry, ctx, explicitFlags, deliveryGuarantee,
composedType, transformer, finalizer, flowableProcessor);
}
});
}
}
}
private class KeyBiConsumer<I, R> implements BiConsumer<PublisherResult<R>, Throwable> {
private final FlowableProcessor<R> flowableProcessor;
private final AtomicInteger parallelCount;
private final Set<K> keysToRetry = ConcurrentHashMap.newKeySet();
private final int currentTopologyId;
private final boolean parallelPublisher;
private final long explicitFlags;
private final DeliveryGuarantee deliveryGuarantee;
private final ComposedType<K, I, R> composedType;
private final Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer;
private final Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer;
KeyBiConsumer(FlowableProcessor<R> flowableProcessor, AtomicInteger parallelCount, int currentTopologyId,
boolean parallelPublisher, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
ComposedType<K, I, R> composedType, Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
this.flowableProcessor = flowableProcessor;
this.parallelCount = parallelCount;
this.currentTopologyId = currentTopologyId;
this.parallelPublisher = parallelPublisher;
this.explicitFlags = explicitFlags;
this.deliveryGuarantee = deliveryGuarantee;
this.composedType = composedType;
this.transformer = transformer;
this.finalizer = finalizer;
}
@Override
public void accept(PublisherResult<R> resultCollector, Throwable t) {
if (t != null) {
if (log.isTraceEnabled()) {
log.tracef(t, "General error encountered when executing publisher request command");
}
flowableProcessor.onError(t);
} else {
handleResult(resultCollector);
// We were the last one to complete if zero, so we have to complete
if (parallelCount.decrementAndGet() == 0) {
onCompletion();
}
}
}
private void handleResult(PublisherResult<R> result) {
// DistributedCacheStream.reduce(accumulator) works with null as defaultItem when identity is not given
// -> So null is a valid actualValue and does not mean we have suspect keys
Set<?> suspectedKeys = result.getSuspectedKeys();
if (suspectedKeys != null && !suspectedKeys.isEmpty()) {
keysToRetry.addAll((Set) suspectedKeys);
}
R actualValue = result.getResult();
if (actualValue != null) {
flowableProcessor.onNext(actualValue);
}
}
private void onCompletion() {
if (keysToRetry.isEmpty()) {
flowableProcessor.onComplete();
} else {
int nextTopology = currentTopologyId + 1;
if (log.isTraceEnabled()) {
log.tracef("Retrying keys %s after %d is installed", keysToRetry, nextTopology);
}
// If we had an issue with segments, we need to wait until the next topology is installed to try again
stateTransferLock.topologyFuture(nextTopology).whenComplete((ign, innerT) -> {
if (innerT != null) {
if (log.isTraceEnabled()) {
log.tracef(innerT, "General error encountered when waiting on topology future for publisher request command");
}
flowableProcessor.onError(innerT);
} else {
// Restart with keys that were missing - note that segments and exclude is always null - as we
// already filtered those out in the first startKeyPublisher invocation
startKeyPublisher(parallelPublisher, null, keysToRetry, null, explicitFlags, deliveryGuarantee,
composedType, transformer, finalizer, flowableProcessor);
}
});
}
}
}
private class KeyPublisherResultCollector<R> extends ValidResponseCollector<PublisherResult<R>> {
private final Set<K> keys;
KeyPublisherResultCollector(Set<K> keys) {
this.keys = keys;
}
@Override
public PublisherResult<R> finish() {
throw new IllegalStateException("Should never be invoked!");
}
@Override
protected PublisherResult<R> addValidResponse(Address sender, ValidResponse response) {
PublisherResult<R> results = (PublisherResult<R>) response.getResponseValue();
if (log.isTraceEnabled()) {
log.tracef("Result was: %s for keys %s from %s", results.getResult(), keys, sender);
}
return results;
}
@Override
protected PublisherResult<R> addTargetNotFound(Address sender) {
if (log.isTraceEnabled()) {
log.tracef("Cache is no longer running for keys %s from %s - must retry", Util.toStr(keys), sender);
}
return new KeyPublisherResult<>(keys);
}
@Override
protected PublisherResult<R> addException(Address sender, Exception exception) {
if (log.isTraceEnabled()) {
log.tracef(exception, "Exception encountered while requesting keys %s from %s", Util.toStr(keys), sender);
}
// Throw the exception so it is propagated to caller
if (exception instanceof CacheException) {
throw (CacheException) exception;
}
throw new CacheException(exception);
}
}
private class SegmentPublisherResultCollector<R> extends ValidResponseCollector<PublisherResult<R>> {
private final IntSet targetSegments;
SegmentPublisherResultCollector(IntSet targetSegments) {
this.targetSegments = targetSegments;
}
@Override
public PublisherResult<R> finish() {
throw new IllegalStateException("Should never be invoked!");
}
@Override
protected PublisherResult<R> addValidResponse(Address sender, ValidResponse response) {
Object value = response.getResponseValue();
if (value instanceof PublisherResult) {
PublisherResult<R> results = (PublisherResult<R>) value;
if (log.isTraceEnabled()) {
log.tracef("Result was: %s for segments %s from %s with %s suspected segments", results.getResult(),
targetSegments, sender, results.getSuspectedSegments());
}
return results;
}
if (log.isTraceEnabled()) {
log.tracef("Result was: %s for segments %s from %s.", value, targetSegments, sender);
}
return new SegmentPublisherResult<>(null, (R) value);
}
@Override
protected PublisherResult<R> addTargetNotFound(Address sender) {
if (log.isTraceEnabled()) {
log.tracef("Cache is no longer running for segments %s from %s - must retry", targetSegments, sender);
}
return new SegmentPublisherResult<>(targetSegments, null);
}
@Override
protected PublisherResult<R> addException(Address sender, Exception exception) {
if (log.isTraceEnabled()) {
log.tracef(exception, "Exception encountered while requesting segments %s from %s", targetSegments, sender);
}
// Throw the exception so it is propagated to caller
if (exception instanceof CacheException) {
throw (CacheException) exception;
}
throw new CacheException(exception);
}
}
private Map<Address, IntSet> determineSegmentTargets(LocalizedCacheTopology topology, IntSet segments, Address localAddress) {
if ((sharedStore || replicatedCache) && !writeBehindShared) {
// A shared store without write behind will have all values available on all nodes, so just do local lookup
var map = new HashMap<Address, IntSet>();
map.put(localAddress, segments == null ? IntSets.immutableRangeSet(maxSegment) : segments);
return map;
}
Map<Address, IntSet> targets = new HashMap<>();
if (segments == null) {
for (int segment = 0; segment < maxSegment; ++segment) {
handleSegment(segment, topology, localAddress, targets);
}
} else {
for (PrimitiveIterator.OfInt iter = segments.iterator(); iter.hasNext(); ) {
int segment = iter.nextInt();
handleSegment(segment, topology, localAddress, targets);
}
}
if (log.isTraceEnabled()) {
log.tracef("Targets determined to be %s on topology " + topology.getTopologyId(), targets);
}
return targets;
}
private void handleSegment(int segment, LocalizedCacheTopology topology, Address localAddress,
Map<Address, IntSet> targets) {
DistributionInfo distributionInfo = topology.getSegmentDistribution(segment);
Address targetAddres = determineOwnerToReadFrom(distributionInfo, localAddress);
// Scattered cache can have a state when it has no primary owner - thus we ignore those segments. The retry
// will wait for a new topology to try again
if (targetAddres != null) {
addToMap(targets, targetAddres, segment);
} else if (log.isTraceEnabled()) {
log.tracef("No owner was found for segment %s.", segment);
}
}
private void addToMap(Map<Address, IntSet> map, Address owner, int segment) {
IntSet set = map.get(owner);
if (set == null) {
set = IntSets.mutableEmptySet(maxSegment);
map.put(owner, set);
}
set.set(segment);
}
private Address determineOwnerToReadFrom(DistributionInfo distributionInfo, Address localAddress) {
// Prioritize local node even if it is backup when we don't have a shared write behind store
if (!writeBehindShared && distributionInfo.isReadOwner()) {
return localAddress;
} else {
return distributionInfo.primary();
}
}
private Map<Address, Set<K>> determineKeyTargets(LocalizedCacheTopology topology, Set<K> keys, Address localAddress,
IntSet segments, InvocationContext ctx) {
Map<Address, Set<K>> filteredKeys = new HashMap<>();
for (K key : keys) {
if (ctx != null && ctx.lookupEntry(key) != null) {
continue;
}
DistributionInfo distributionInfo = topology.getDistribution(key);
if (segments != null && !segments.contains(distributionInfo.segmentId())) {
continue;
}
addToMap(filteredKeys, determineOwnerToReadFrom(distributionInfo, localAddress), key);
}
return filteredKeys;
}
private void addToMap(Map<Address, Set<K>> map, Address owner, K key) {
Set<K> set = map.get(owner);
if (set == null) {
set = new HashSet<>();
map.put(owner, set);
}
set.add(key);
}
interface ComposedType<K, I, R> {
CompletionStage<PublisherResult<R>> localInvocation(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer);
TopologyAffectedCommand remoteInvocation(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer);
CompletionStage<PublisherResult<R>> contextInvocation(IntSet segments, Set<K> keysToInclude, InvocationContext ctx,
Function<? super Publisher<I>, ? extends CompletionStage<R>> transformer);
boolean isEntry();
K toKey(I value);
I fromCacheEntry(CacheEntry entry);
}
private class KeyComposedType<R> implements ComposedType<K, K, R> {
@Override
public CompletionStage<PublisherResult<R>> localInvocation(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return localPublisherManager.keyReduction(parallelPublisher, segments, keysToInclude, keysToExclude,
explicitFlags, deliveryGuarantee, transformer, finalizer);
}
@Override
public ReductionPublisherRequestCommand<K> remoteInvocation(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return commandsFactory.buildKeyReductionPublisherCommand(parallelPublisher, deliveryGuarantee, segments, keysToInclude,
keysToExclude, explicitFlags, transformer, finalizer);
}
@Override
public CompletionStage<PublisherResult<R>> contextInvocation(IntSet segments, Set<K> keysToInclude,
InvocationContext ctx, Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer) {
Flowable<K> flowable = LocalClusterPublisherManagerImpl.entryPublisherFromContext(ctx, segments, keyPartitioner, keysToInclude)
.map(RxJavaInterop.entryToKeyFunction());
return transformer.apply(flowable)
.thenApply(LocalPublisherManagerImpl.ignoreSegmentsFunction());
}
@Override
public boolean isEntry() {
return false;
}
@Override
public K toKey(K value) {
return value;
}
@Override
public K fromCacheEntry(CacheEntry entry) {
return (K) entry.getKey();
}
}
private class EntryComposedType<R> implements ComposedType<K, CacheEntry<K, V>, R> {
@Override
public CompletionStage<PublisherResult<R>> localInvocation(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return localPublisherManager.entryReduction(parallelPublisher, segments, keysToInclude, keysToExclude,
explicitFlags, deliveryGuarantee, transformer, finalizer);
}
@Override
public ReductionPublisherRequestCommand<K> remoteInvocation(boolean parallelPublisher, IntSet segments, Set<K> keysToInclude,
Set<K> keysToExclude, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return commandsFactory.buildEntryReductionPublisherCommand(parallelPublisher, deliveryGuarantee, segments, keysToInclude,
keysToExclude, explicitFlags, transformer, finalizer);
}
@Override
public CompletionStage<PublisherResult<R>> contextInvocation(IntSet segments, Set<K> keysToInclude,
InvocationContext ctx, Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer) {
return transformer.apply(LocalClusterPublisherManagerImpl.entryPublisherFromContext(ctx, segments, keyPartitioner, keysToInclude))
.thenApply(LocalPublisherManagerImpl.ignoreSegmentsFunction());
}
@Override
public boolean isEntry() {
return true;
}
@Override
public K toKey(CacheEntry<K, V> value) {
return value.getKey();
}
@Override
public CacheEntry<K, V> fromCacheEntry(CacheEntry entry) {
return entry;
}
}
private class SizeComposedType implements ComposedType<K, Long, Long> {
@Override
public CompletionStage<PublisherResult<Long>> localInvocation(boolean parallelPublisher,
IntSet segments, Set<K> keysToInclude, Set<K> keysToExclude, long explicitFlags,
DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<Long>, ? extends CompletionStage<Long>> transformer,
Function<? super Publisher<Long>, ? extends CompletionStage<Long>> finalizer) {
return localPublisherManager.sizePublisher(segments, explicitFlags)
.thenApply(size -> new SegmentPublisherResult<>(null, size));
}
@Override
public SizeCommand remoteInvocation(boolean parallelPublisher, IntSet segments,
Set<K> keysToInclude, Set<K> keysToExclude,
long explicitFlags,
DeliveryGuarantee deliveryGuarantee,
Function<? super Publisher<Long>, ? extends CompletionStage<Long>> transformer,
Function<? super Publisher<Long>, ? extends CompletionStage<Long>> finalizer) {
return commandsFactory.buildSizeCommand(segments, EnumUtil.mergeBitSets(explicitFlags, FlagBitSets.CACHE_MODE_LOCAL));
}
@Override
public CompletionStage<PublisherResult<Long>> contextInvocation(IntSet segments, Set<K> keysToInclude,
InvocationContext ctx, Function<? super Publisher<Long>, ? extends CompletionStage<Long>> transformer) {
throw new IllegalStateException("Should never be invoked!");
}
@Override
public boolean isEntry() {
return false;
}
@Override
public K toKey(Long value) {
throw new IllegalStateException("Should never be invoked!");
}
@Override
public Long fromCacheEntry(CacheEntry entry) {
throw new IllegalStateException("Should never be invoked!");
}
}
private void updateStoreInfo(PersistenceConfiguration persistenceConfiguration) {
for (StoreConfiguration storeConfiguration : persistenceConfiguration.stores()) {
if (storeConfiguration.shared()) {
sharedStore = true;
if (storeConfiguration.async().enabled()) {
writeBehindShared = true;
}
}
}
}
@Override
public <R> SegmentPublisherSupplier<R> keyPublisher(IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags,
DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<K>, ? extends Publisher<R>> transformer) {
if (keysToInclude != null) {
return new KeyAwarePublisherImpl<>(keysToInclude, keyComposedType(), segments, invocationContext,
explicitFlags, deliveryGuarantee, batchSize, transformer);
}
return new SegmentAwarePublisherImpl<>(segments, keyComposedType(), invocationContext, explicitFlags,
deliveryGuarantee, batchSize, transformer);
}
@Override
public <R> SegmentPublisherSupplier<R> entryPublisher(IntSet segments, Set<K> keysToInclude,
InvocationContext invocationContext, long explicitFlags,
DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<CacheEntry<K, V>>, ?
extends Publisher<R>> transformer) {
if (keysToInclude != null) {
return new KeyAwarePublisherImpl<>(keysToInclude, entryComposedType(), segments, invocationContext,
explicitFlags, deliveryGuarantee, batchSize, transformer);
}
return new SegmentAwarePublisherImpl<>(segments, entryComposedType(), invocationContext, explicitFlags,
deliveryGuarantee, batchSize, transformer);
}
@Override
public CompletionStage<Long> sizePublisher(IntSet segments, InvocationContext ctx, long flags) {
return reduction(false, segments, null, ctx, flags, DeliveryGuarantee.EXACTLY_ONCE,
sizeComposedType(), null, PublisherReducers.add());
}
private final static AtomicInteger requestCounter = new AtomicInteger();
private final static Function<ValidResponse, PublisherResponse> responseHandler = vr -> {
if (vr instanceof SuccessfulResponse) {
return (PublisherResponse) vr.getResponseValue();
} else {
throw new IllegalArgumentException("Unsupported response received: " + vr);
}
};
// We only allow 4 concurrent inner publishers to subscribe at a given time (arbitrary to keep request count down
// but also provide adequate concurrent processing)
private static final int MAX_INNER_SUBSCRIBERS = 4;
/**
* This class handles whenever a new subscriber is registered. This class handles the retry mechanism and submission
* of requests to various nodes. All details regarding a specific subscriber should be stored in this class, such
* as the completed segments.
* @param <I>
* @param <R>
*/
class SubscriberHandler<I, R> implements ObjIntConsumer<I> {
final AbstractSegmentAwarePublisher<I, R> publisher;
final String requestId;
final AtomicReferenceArray<Collection<K>> keysBySegment;
final IntSet segmentsToComplete;
// Only allow the first child publisher to use the context values
final AtomicBoolean useContext = new AtomicBoolean(true);
// Variable used to ensure we only read the context once - so it is not read again during a retry
volatile int currentTopology = -1;
SubscriberHandler(AbstractSegmentAwarePublisher<I, R> publisher, boolean withSegments) {
this.publisher = publisher;
this.requestId = rpcManager.getAddress() + "#" + requestCounter.incrementAndGet();
this.keysBySegment = publisher.deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE ?
new AtomicReferenceArray<>(maxSegment) : null;
this.segmentsToComplete = IntSets.concurrentCopyFrom(publisher.segments, maxSegment);
}
/**
* This method creates a Flowable that when published to will return the values while ensuring retries are
* performed to guarantee the configured delivery guarantees.
* <p>
* This method starts up to 4 concurrent inner
* subscriptions at the same time. These subscriptions will request values from the target node with the given
* segments. If the local node is requested it is given to the last subscription to ensure the others are subscribed
* to first, to allow for concurrent processing reliably.
* <p>
* An inner subscriber will publish their returned values and whether a segment has been completed or not to
* us. When all subscribers have completed we see if all segments have been completed, if not we restart
* the entire process again with the segments that haven't yet completed.
*/
private <E> Flowable<E> getValuesFlowable(BiFunction<InnerPublisherSubscription.InnerPublisherSubscriptionBuilder<K, I, R>, Map.Entry<Address, IntSet>, Publisher<E>> subToFlowableFunction) {
return Flowable.defer(() -> {
if (!componentRegistry.getStatus().allowInvocations() && !componentRegistry.getStatus().startingUp()) {
return Flowable.error(new IllegalLifecycleStateException());
}
LocalizedCacheTopology topology = distributionManager.getCacheTopology();
int previousTopology = currentTopology;
// Store the current topology in case if we have to retry
int currentTopology = topology.getTopologyId();
this.currentTopology = currentTopology;
Address localAddress = rpcManager.getAddress();
Map<Address, IntSet> targets = determineSegmentTargets(topology, segmentsToComplete, localAddress);
if (previousTopology != -1 && previousTopology == currentTopology || targets.isEmpty()) {
int nextTopology = currentTopology + 1;
if (log.isTraceEnabled()) {
log.tracef("Request id %s needs a new topology to retry segments %s. Current topology is %d, with targets %s",
requestId, segmentsToComplete, currentTopology, targets);
}
// When this is complete - the retry below will kick in again and we will have a new topology
return RxJavaInterop.voidCompletionStageToFlowable(stateTransferLock.topologyFuture(nextTopology), true);
}
IntSet localSegments = targets.remove(localAddress);
Iterator<Map.Entry<Address, IntSet>> iterator = targets.entrySet().iterator();
Supplier<Map.Entry<Address, IntSet>> targetSupplier = () -> {
synchronized (this) {
if (iterator.hasNext()) {
return iterator.next();
}
return null;
}
};
Map<Address, Set<K>> excludedKeys;
if (publisher.invocationContext == null) {
excludedKeys = Collections.emptyMap();
} else {
excludedKeys = determineKeyTargets(topology,
(Set<K>) publisher.invocationContext.getLookedUpEntries().keySet(), localAddress,
segmentsToComplete, null);
}
int concurrentPublishers = Math.min(MAX_INNER_SUBSCRIBERS, targets.size() + (localSegments != null ? 1 : 0));
int targetBatchSize = (publisher.batchSize + concurrentPublishers - 1) / concurrentPublishers;
InnerPublisherSubscription.InnerPublisherSubscriptionBuilder<K, I, R> builder =
new InnerPublisherSubscription.InnerPublisherSubscriptionBuilder<>(this, targetBatchSize,
targetSupplier, excludedKeys, currentTopology);
Publisher<E>[] publisherArray = new Publisher[concurrentPublishers];
for (int i = 0; i < concurrentPublishers - 1; ++i) {
publisherArray[i] = subToFlowableFunction.apply(builder, null);
}
// Submit the local target last if necessary (otherwise is a normal submission)
// This is done last as we want to send all the remote requests first and only process the local
// container concurrently with the remote requests
if (localSegments != null) {
publisherArray[concurrentPublishers - 1] = subToFlowableFunction.apply(builder, new AbstractMap.SimpleEntry<>(localAddress, localSegments));
} else {
publisherArray[concurrentPublishers - 1] = subToFlowableFunction.apply(builder, null);
}
return Flowable.mergeArray(publisherArray);
}).repeatUntil(() -> {
boolean complete = segmentsToComplete.isEmpty();
if (log.isTraceEnabled()) {
if (complete) {
log.tracef("All segments complete for %s", requestId);
} else {
log.tracef("Segments %s not completed - retrying", segmentsToComplete);
}
}
return complete;
});
}
public Flowable<R> start() {
return getValuesFlowable(InnerPublisherSubscription.InnerPublisherSubscriptionBuilder::createValuePublisher);
}
public Flowable<SegmentPublisherSupplier.Notification<R>> startWithSegments() {
return getValuesFlowable(InnerPublisherSubscription.InnerPublisherSubscriptionBuilder::createNotificationPublisher);
}
void completeSegment(int segment) {
segmentsToComplete.remove(segment);
if (keysBySegment != null) {
keysBySegment.set(segment, null);
}
}
CompletionStage<PublisherResponse> sendInitialCommand(Address target, IntSet segments, int batchSize,
Set<K> excludedKeys, int topologyId) {
if (keysBySegment != null) {
for (PrimitiveIterator.OfInt iter = segments.iterator(); iter.hasNext(); ) {
int segment = iter.nextInt();
Collection<K> keys = keysBySegment.get(segment);
if (keys != null) {
if (excludedKeys == null) {
excludedKeys = new HashSet<>();
}
excludedKeys.addAll(keys);
}
}
}
if (log.isTraceEnabled()) {
log.tracef("Request: %s is initiating publisher request with batch size %d from %s in segments %s", requestId, batchSize,
target, segments);
}
boolean local = target == rpcManager.getAddress();
InitialPublisherCommand cmd = publisher.buildInitialCommand(target, requestId, segments, excludedKeys, batchSize,
local && useContext.getAndSet(false));
if (cmd == null) {
return CompletableFuture.completedFuture(PublisherResponse.emptyResponse(segments, null));
}
// This means the target is local - so skip calling the rpcManager
if (local) {
try {
return (CompletableFuture) cmd.invokeAsync(componentRegistry);
} catch (Throwable throwable) {
return CompletableFuture.failedFuture(throwable);
}
}
cmd.setTopologyId(topologyId);
return rpcManager.invokeCommand(target, cmd, SingleResponseCollector.validOnly(), rpcOptions)
.thenApply(responseHandler);
}
CompletionStage<PublisherResponse> sendNextCommand(Address target, int topologyId) {
if (log.isTraceEnabled()) {
log.tracef("Request: %s is continuing publisher request from %s", requestId, target);
}
// Local command so just return the handler
if (target == rpcManager.getAddress()) {
return publisherHandler.getNext(requestId);
}
NextPublisherCommand cmd = publisher.buildNextCommand(requestId);
cmd.setTopologyId(topologyId);
return rpcManager.invokeCommand(target, cmd, SingleResponseCollector.validOnly(), rpcOptions)
.thenApply(responseHandler);
}
/**
* Handles logging the throwable and cancelling if necessary. Returns if publisher should continue processing or not.
* If <b>false</b> is returned, it is expected that the caller propagates the {@link Throwable} instance, normally
* via {@link Subscriber#onError(Throwable)}.
*/
boolean handleThrowable(Throwable t, Address target, IntSet segments) {
if (t instanceof SuspectException) {
if (log.isTraceEnabled()) {
log.tracef("Received suspect exception for id %s from node %s when requesting segments %s", requestId,
target, segments);
}
return true;
}
if (log.isTraceEnabled()) {
log.tracef(t, "Received exception for id %s from node %s when requesting segments %s", requestId, target,
segments);
}
// Cancel out the command for the provided publisher - other should be cancelled
sendCancelCommand(target);
return false;
}
void sendCancelCommand(Address target) {
CancelPublisherCommand command = commandsFactory.buildCancelPublisherCommand(requestId);
CompletionStage<?> stage;
if (target == rpcManager.getAddress()) {
try {
stage = command.invokeAsync(componentRegistry);
} catch (Throwable throwable) {
stage = CompletableFuture.failedFuture(throwable);
}
} else {
stage = rpcManager.invokeCommand(target, command, VoidResponseCollector.ignoreLeavers(),
rpcOptions);
}
if (log.isTraceEnabled()) {
stage.exceptionally(t -> {
log.tracef("There was a problem cancelling publisher for id %s at address %s", requestId, target);
return null;
});
}
}
@Override
public void accept(I value, int segment) {
if (keysBySegment != null) {
Collection<K> keys = keysBySegment.get(segment);
if (keys == null) {
// It really is a Set, but we trust response has unique keys. Also ArrayList uses up less memory per
// entry and resizes better than a Set and we don't know how many entries we may receive
keys = new ArrayList<>();
keysBySegment.set(segment, keys);
}
K key;
// When tracking keys we always send back the key
if (publisher.shouldTrackKeys) {
key = (K) value;
} else {
// When keys are not tracked we return the key or entry as is and we have to convert if necessary (entry)
key = publisher.composedType.toKey(value);
}
if (log.isTraceEnabled()) {
log.tracef("Saving key %s for segment %d for id %s", Util.toStr(key), segment, requestId);
}
keys.add(key);
}
}
}
/**
* Whether the response should contain the keys for the current non completed segment. Note that we currently
* optimize the case where we know that we get back keys or entries without mapping to a new value. We only require
* key tracking when delivery guarantee is EXACTLY_ONCE. In somes case we don't need to track keys if the transformer
* is the identity function (delineated by being the same as {@link MarshallableFunctions#identity()} or the function
* implements a special interface {@link ModifiedValueFunction} and it retains the original value.
* @param deliveryGuarantee guarantee of the data
* @param transformer provided transformer
* @return should keys for the current segment be returned in the response
*/
private static boolean shouldTrackKeys(DeliveryGuarantee deliveryGuarantee, Function<?, ?> transformer) {
if (deliveryGuarantee == DeliveryGuarantee.EXACTLY_ONCE) {
if (transformer == MarshallableFunctions.identity()) {
return false;
} else if (transformer instanceof ModifiedValueFunction) {
return ((ModifiedValueFunction<?, ?>) transformer).isModified();
}
return true;
}
// EXACTLY_ONCE is the only mode where keys are tracked
return false;
}
abstract class AbstractSegmentAwarePublisher<I, R> implements SegmentPublisherSupplier<R> {
final ComposedType<K, I, R> composedType;
final IntSet segments;
final InvocationContext invocationContext;
final long explicitFlags;
final DeliveryGuarantee deliveryGuarantee;
final int batchSize;
final Function<? super Publisher<I>, ? extends Publisher<R>> transformer;
final boolean shouldTrackKeys;
// Prevents the context from being applied for every segment - only the first
final AtomicBoolean usedContext = new AtomicBoolean();
private AbstractSegmentAwarePublisher(ComposedType<K, I, R> composedType, IntSet segments, InvocationContext invocationContext,
long explicitFlags, DeliveryGuarantee deliveryGuarantee, int batchSize,
Function<? super Publisher<I>, ? extends Publisher<R>> transformer) {
this.composedType = composedType;
this.segments = segments != null ? segments : IntSets.immutableRangeSet(maxSegment);
this.invocationContext = invocationContext;
this.explicitFlags = explicitFlags;
this.deliveryGuarantee = deliveryGuarantee;
this.batchSize = batchSize;
this.transformer = transformer;
this.shouldTrackKeys = shouldTrackKeys(deliveryGuarantee, transformer);
}
public Publisher<Notification<R>> publisherWithSegments() {
return new SubscriberHandler<I, R>(this, true).startWithSegments();
}
public Publisher<R> publisherWithoutSegments() {
return new SubscriberHandler<I, R>(this, false).start();
}
abstract InitialPublisherCommand buildInitialCommand(Address target, String requestId, IntSet segments,
Set<K> excludedKeys, int batchSize, boolean useContext);
NextPublisherCommand buildNextCommand(String requestId) {
return commandsFactory.buildNextPublisherCommand(requestId);
}
}
private class KeyAwarePublisherImpl<I, R> extends AbstractSegmentAwarePublisher<I, R> {
final Set<K> keysToInclude;
private KeyAwarePublisherImpl(Set<K> keysToInclude, ComposedType<K, I, R> composedType, IntSet segments,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
int batchSize, Function<? super Publisher<I>, ? extends Publisher<R>> transformer) {
super(composedType, segments, invocationContext, explicitFlags, deliveryGuarantee, batchSize, transformer);
this.keysToInclude = Objects.requireNonNull(keysToInclude);
}
Set<K> calculateKeysToUse(Set<K> keys, IntSet segments, Set<K> excludedKeys) {
Set<K> results = null;
for (K key : keys) {
if ((excludedKeys == null || !excludedKeys.contains(key)) &&
segments.contains(keyPartitioner.getSegment(key))) {
if (results == null) {
results = new HashSet<>();
}
results.add(key);
}
}
return results;
}
@Override
InitialPublisherCommand buildInitialCommand(Address target, String requestId, IntSet segments, Set<K> excludedKeys,
int batchSize, final boolean useContext) {
Set<K> keysToUse = calculateKeysToUse(keysToInclude, segments, excludedKeys);
if (keysToUse == null) {
return null;
}
Function<? super Publisher<I>, ? extends Publisher<R>> functionToUse;
int lookupEntryCount;
if (useContext && invocationContext != null && (lookupEntryCount = invocationContext.lookedUpEntriesCount()) > 0) {
// We have context values so we must prepend them to the publisher that is provided
functionToUse = (SerializableFunction<Publisher<I>, Publisher<R>>) publisher -> {
if (usedContext.getAndSet(true)) {
return transformer.apply(publisher);
}
List<I> contextValues = new ArrayList<>(lookupEntryCount);
invocationContext.forEachValue((key, entry) -> {
if (keysToInclude.contains(key)) {
contextValues.add(composedType.fromCacheEntry(entry));
}
});
return transformer.apply(Flowable.concat(Flowable.fromIterable(contextValues), publisher));
};
} else {
functionToUse = transformer;
}
DeliveryGuarantee guarantee = deliveryToUse(target, deliveryGuarantee);
return commandsFactory.buildInitialPublisherCommand(requestId, guarantee,
batchSize, segments, keysToUse, excludedKeys, explicitFlags, composedType.isEntry(), shouldTrackKeys,
functionToUse);
}
}
private class SegmentAwarePublisherImpl<I, R> extends AbstractSegmentAwarePublisher<I, R> {
private SegmentAwarePublisherImpl(IntSet segments, ComposedType<K, I, R> composedType,
InvocationContext invocationContext, long explicitFlags, DeliveryGuarantee deliveryGuarantee,
int batchSize, Function<? super Publisher<I>, ? extends Publisher<R>> transformer) {
super(composedType, segments, invocationContext, explicitFlags, deliveryGuarantee, batchSize, transformer);
}
@Override
InitialPublisherCommand buildInitialCommand(Address target, String requestId, IntSet segments, Set<K> excludedKeys,
int batchSize, boolean useContext) {
Function<? super Publisher<I>, ? extends Publisher<R>> functionToUse;
int lookupEntryCount;
if (useContext && invocationContext != null && (lookupEntryCount = invocationContext.lookedUpEntriesCount()) > 0) {
// We have context values so we must prepend them to the publisher that is provided
functionToUse = (SerializableFunction<Publisher<I>, Publisher<R>>) publisher -> {
if (usedContext.getAndSet(true)) {
return transformer.apply(publisher);
}
List<I> contextValues = new ArrayList<>(lookupEntryCount);
invocationContext.forEachValue((key, entry) ->
contextValues.add(composedType.fromCacheEntry(entry)));
return transformer.apply(Flowable.concat(Flowable.fromIterable(contextValues), publisher));
};
} else {
functionToUse = transformer;
}
DeliveryGuarantee guarantee = deliveryToUse(target, deliveryGuarantee);
return commandsFactory.buildInitialPublisherCommand(requestId, guarantee,
batchSize, segments, null, excludedKeys, explicitFlags, composedType.isEntry(), shouldTrackKeys, functionToUse);
}
}
private DeliveryGuarantee deliveryToUse(Address target, DeliveryGuarantee desiredGuarantee) {
// When the target is the local node and we have a shared store that doesn't have write behind we don't
// need any special delivery guarantee as our store will hold all entries
return target == null && ((sharedStore || replicatedCache) && !writeBehindShared) ? DeliveryGuarantee.AT_MOST_ONCE : desiredGuarantee;
}
}
| 66,756
| 47.269704
| 196
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/Notifications.java
|
package org.infinispan.reactive.publisher.impl;
import java.util.Objects;
public class Notifications {
private Notifications() {
}
public interface NotificationBuilder<R> {
SegmentAwarePublisherSupplier.NotificationWithLost<R> value(R value, int segment);
SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentComplete(int segment);
SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentLost(int segment);
}
public static <R> NotificationBuilder<R> reuseBuilder() {
return new ReuseNotificationBuilder<>();
}
public static <R> NotificationBuilder<R> newBuilder() {
return new NewBuilder<>();
}
private static class NewBuilder<R> implements NotificationBuilder<R> {
@Override
public SegmentAwarePublisherSupplier.NotificationWithLost<R> value(R value, int segment) {
return Notifications.value(value, segment);
}
@Override
public SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentComplete(int segment) {
return Notifications.segmentComplete(segment);
}
@Override
public SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentLost(int segment) {
return Notifications.segmentLost(segment);
}
}
static class ReuseNotificationBuilder<R> implements SegmentAwarePublisherSupplier.NotificationWithLost<R>, NotificationBuilder<R> {
// value != null => value and segment
// value == null && segment >= 0 => segment completed
// value == null && segment < 0 => -segment-1 lost
R value;
int segment;
@Override
public SegmentAwarePublisherSupplier.NotificationWithLost<R> value(R value, int segment) {
this.value = value;
if (segment < 0) {
throw new IllegalArgumentException("Segment must be 0 or greater");
}
this.segment = segment;
return this;
}
@Override
public SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentComplete(int segment) {
this.value = null;
this.segment = segment;
return this;
}
@Override
public SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentLost(int segment) {
this.value = null;
this.segment = -segment - 1;
return this;
}
@Override
public boolean isLostSegment() {
return value == null && segment < 0;
}
@Override
public int lostSegment() {
if (!isLostSegment()) {
return SegmentAwarePublisherSupplier.NotificationWithLost.super.lostSegment();
}
return -segment - 1;
}
@Override
public boolean isValue() {
return value != null;
}
@Override
public boolean isSegmentComplete() {
return value == null && segment >= 0;
}
@Override
public R value() {
if (!isValue()) {
return SegmentAwarePublisherSupplier.NotificationWithLost.super.value();
}
return value;
}
@Override
public int valueSegment() {
if (!isValue()) {
return SegmentAwarePublisherSupplier.NotificationWithLost.super.valueSegment();
}
return segment;
}
@Override
public int completedSegment() {
if (!isSegmentComplete()) {
return SegmentAwarePublisherSupplier.NotificationWithLost.super.completedSegment();
}
return segment;
}
@Override
public String toString() {
return "ReuseNotificationBuilder{" +
(value != null ? "value=" + value : "") +
(value != null ? ", segment=" : (segment > 0 ? "completed segment=" : "lost segment")) + segment +
'}';
}
}
public static <R> SegmentAwarePublisherSupplier.NotificationWithLost<R> value(R value, int segment) {
return new ValueNotification<>(value, segment);
}
public static <R> SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentComplete(int segment) {
return new ValueNotification<>(segment, true);
}
public static <R> SegmentAwarePublisherSupplier.NotificationWithLost<R> segmentLost(int segment) {
return new ValueNotification<>(segment, false);
}
static class ValueNotification<R> implements SegmentAwarePublisherSupplier.NotificationWithLost<R> {
// value != null => value and segment
// value == null && segment >= 0 => segment completed
// value == null && segment < 0 => -segment-1 lost
protected final R value;
protected final int segment;
public ValueNotification(R value, int segment) {
this.value = value;
if (segment < 0) {
throw new IllegalArgumentException("Segment must be 0 or greater");
}
this.segment = segment;
}
public ValueNotification(int segment, boolean segmentComplete) {
this.value = null;
this.segment = segmentComplete ? segment : -segment - 1;
}
@Override
public boolean isLostSegment() {
return segment < 0;
}
@Override
public boolean isValue() {
return value != null;
}
@Override
public boolean isSegmentComplete() {
return value == null && segment >= 0;
}
@Override
public R value() {
if (value != null)
return value;
return SegmentAwarePublisherSupplier.NotificationWithLost.super.value();
}
@Override
public int valueSegment() {
if (value != null)
return segment;
return SegmentAwarePublisherSupplier.NotificationWithLost.super.valueSegment();
}
@Override
public int completedSegment() {
if (value == null && segment >= 0)
return segment;
return SegmentAwarePublisherSupplier.NotificationWithLost.super.completedSegment();
}
@Override
public int lostSegment() {
if (segment < 0)
return -segment - 1;
return SegmentAwarePublisherSupplier.NotificationWithLost.super.lostSegment();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ValueNotification<?> that = (ValueNotification<?>) o;
return segment == that.segment && Objects.equals(value, that.value);
}
@Override
public int hashCode() {
return segment * 31 + Objects.hashCode(value);
}
@Override
public String toString() {
return "ValueNotification{" +
(value != null ? "value=" + value : "") +
(value != null ? ", segment=" : (segment > 0 ? "completed segment=" : "lost segment")) + segment +
'}';
}
}
}
| 6,907
| 28.775862
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/InnerPublisherSubscription.java
|
package org.infinispan.reactive.publisher.impl;
import java.lang.invoke.MethodHandles;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.IntConsumer;
import java.util.function.Supplier;
import org.infinispan.commons.reactive.RxJavaInterop;
import org.infinispan.commons.util.IntSet;
import org.infinispan.reactive.publisher.impl.commands.batch.KeyPublisherResponse;
import org.infinispan.reactive.publisher.impl.commands.batch.PublisherResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.functions.Action;
import io.reactivex.rxjava3.functions.LongConsumer;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
/**
* Handles the submission and response handling of an arbitrary amount of address
* segments. This class will based upon upstream requests send a request to the target address until has retrieved
* enough entries to satisfy the request threshold. When a given address can no longer return any entries this
* subscription will try to process the next address/segment combination until it can no longer find any more
* address/segment targets.
*
* @param <R>
*/
public class InnerPublisherSubscription<K, I, R, E> implements LongConsumer, Action {
protected final static Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final InnerPublisherSubscriptionBuilder<K, I, R> builder;
private final FlowableProcessor<E> flowableProcessor;
private final AtomicLong requestedAmount = new AtomicLong();
// The current address and segments we are processing or null if another one should be acquired
private volatile Map.Entry<Address, IntSet> currentTarget;
// whether this subscription was cancelled by a caller (means we can stop processing)
private volatile boolean cancelled;
// whether the initial request was already sent or not (if so then a next command is used)
private volatile boolean alreadyCreated;
private InnerPublisherSubscription(InnerPublisherSubscriptionBuilder<K, I, R> builder,
FlowableProcessor<E> flowableProcessor, Map.Entry<Address, IntSet> firstTarget) {
this.builder = builder;
this.flowableProcessor = flowableProcessor;
this.currentTarget = firstTarget;
}
public static class InnerPublisherSubscriptionBuilder<K, I, R> {
private final ClusterPublisherManagerImpl<K, ?>.SubscriberHandler<I, R> parent;
private final int batchSize;
private final Supplier<Map.Entry<Address, IntSet>> supplier;
private final Map<Address, Set<K>> excludedKeys;
private final int topologyId;
public InnerPublisherSubscriptionBuilder(ClusterPublisherManagerImpl<K, ?>.SubscriberHandler<I, R> parent,
int batchSize, Supplier<Map.Entry<Address, IntSet>> supplier, Map<Address, Set<K>> excludedKeys,
int topologyId) {
this.parent = parent;
this.batchSize = batchSize;
this.supplier = supplier;
this.excludedKeys = excludedKeys;
this.topologyId = topologyId;
}
Publisher<R> createValuePublisher(Map.Entry<Address, IntSet> firstTarget) {
FlowableProcessor<R> unicastProcessor = UnicastProcessor.create(batchSize);
InnerPublisherSubscription<K, I, R, R> innerPublisherSubscription = new InnerPublisherSubscription<K, I, R, R>(this,
unicastProcessor, firstTarget) {
@Override
protected void doOnValue(R value, int segment) {
unicastProcessor.onNext(value);
}
};
return unicastProcessor.doOnLifecycle(RxJavaInterop.emptyConsumer(), innerPublisherSubscription,
innerPublisherSubscription);
}
Publisher<SegmentPublisherSupplier.Notification<R>> createNotificationPublisher(
Map.Entry<Address, IntSet> firstTarget) {
FlowableProcessor<SegmentPublisherSupplier.Notification<R>> unicastProcessor = UnicastProcessor.create(batchSize);
InnerPublisherSubscription<K, I, R, SegmentPublisherSupplier.Notification<R>> innerPublisherSubscription =
new InnerPublisherSubscription<K, I, R, SegmentPublisherSupplier.Notification<R>>(this, unicastProcessor, firstTarget) {
@Override
protected void doOnValue(R value, int segment) {
unicastProcessor.onNext(Notifications.value(value, segment));
}
@Override
protected void doOnSegmentComplete(int segment) {
unicastProcessor.onNext(Notifications.segmentComplete(segment));
}
};
return unicastProcessor.doOnLifecycle(RxJavaInterop.emptyConsumer(), innerPublisherSubscription,
innerPublisherSubscription);
}
}
/**
* This is invoked when the flowable is completed - need to close any pending publishers
*/
@Override
public void run() {
cancelled = true;
if (alreadyCreated) {
Map.Entry<Address, IntSet> target = currentTarget;
if (target != null) {
builder.parent.sendCancelCommand(target.getKey());
}
}
}
/**
* This method is invoked every time a new request is sent to the underlying publisher. We need to submit a request
* if there is not a pending one. Whenever requestedAmount is a number greater than 0, that means we must submit or
* there is a pending one.
* @param count request count
*/
@Override
public void accept(long count) {
if (shouldSubmit(count)) {
if (checkCancelled()) {
return;
}
// Find which address and segments we still need to retrieve - when the supplier returns null that means
// we don't need to do anything else (normal termination state)
Map.Entry<Address, IntSet> target = currentTarget;
if (target == null) {
alreadyCreated = false;
target = builder.supplier.get();
if (target == null) {
if (log.isTraceEnabled()) {
log.tracef("Completing processor %s", flowableProcessor);
}
flowableProcessor.onComplete();
return;
} else {
currentTarget = target;
}
}
ClusterPublisherManagerImpl<K, ?>.SubscriberHandler<I, R> parent = builder.parent;
CompletionStage<PublisherResponse> stage;
Address address = target.getKey();
IntSet segments = target.getValue();
try {
if (alreadyCreated) {
stage = parent.sendNextCommand(address, builder.topologyId);
} else {
alreadyCreated = true;
stage = parent.sendInitialCommand(address, segments, builder.batchSize, builder.excludedKeys.remove(address), builder.topologyId);
}
} catch (Throwable t) {
handleThrowableInResponse(t, address, segments);
return;
}
stage.whenComplete((values, t) -> {
if (t != null) {
handleThrowableInResponse(CompletableFutures.extractException(t), address, segments);
return;
}
try {
if (log.isTraceEnabled()) {
// Note the size of the array may not be the amount of entries as it isn't resized (can contain nulls)
log.tracef("Received %s for id %s from %s", values, parent.requestId, address);
}
IntSet completedSegments = values.getCompletedSegments();
if (completedSegments != null) {
if (log.isTraceEnabled()) {
log.tracef("Completed segments %s for id %s from %s", completedSegments, parent.requestId, address);
}
completedSegments.forEach((IntConsumer) parent::completeSegment);
completedSegments.forEach((IntConsumer) segments::remove);
}
IntSet lostSegments = values.getLostSegments();
if (lostSegments != null) {
if (log.isTraceEnabled()) {
log.tracef("Lost segments %s for id %s from %s", completedSegments, parent.requestId, address);
}
lostSegments.forEach((IntConsumer) segments::remove);
}
boolean complete = values.isComplete();
if (complete) {
// Current address has returned all values it can - setting to null will force the next invocation
// of this method try the next target if available
currentTarget = null;
} else {
values.keysForNonCompletedSegments(parent);
}
R[] valueArray = (R[]) values.getResults();
if (values instanceof KeyPublisherResponse) {
KeyPublisherResponse kpr = (KeyPublisherResponse) values;
int extraSize = kpr.getExtraSize();
if (extraSize > 0) {
int arrayLength = valueArray.length;
Object[] newArray = new Object[arrayLength + extraSize];
System.arraycopy(valueArray, 0, newArray, 0, arrayLength);
System.arraycopy(kpr.getExtraObjects(), 0, newArray, arrayLength, extraSize);
valueArray = (R[]) newArray;
}
}
int pos = 0;
for (PublisherHandler.SegmentResult segmentResult : values.getSegmentResults()) {
if (checkCancelled()) {
return;
}
int segment = segmentResult.getSegment();
for (int i = 0; i < segmentResult.getEntryCount(); ++i) {
R value = valueArray[pos++];
doOnValue(value, segment);
}
if (completedSegments != null && completedSegments.remove(segment)) {
doOnSegmentComplete(segment);
}
}
// Any completed segments left were empty, just complete them together
if (completedSegments != null) {
completedSegments.forEach((IntConsumer) this::doOnSegmentComplete);
}
accept(-pos);
} catch (Throwable innerT) {
handleThrowableInResponse(innerT, address, segments);
}
});
}
}
/**
* Method invoked on each value providing the value and segment. This method is designed to be overridden by an
* extended class.
*
* @param value published value
* @param segment segment of the value
*/
protected void doOnValue(R value, int segment) {
}
/**
* Method invoked whenever a segment is completed. This method is designed to be overridden by an extended class.
*
* @param segment completed segment
*/
protected void doOnSegmentComplete(int segment) {
}
private boolean shouldSubmit(long count) {
while (true) {
long prev = requestedAmount.get();
long newValue = prev + count;
if (requestedAmount.compareAndSet(prev, newValue)) {
// This ensures that only a single submission can be done at one time
// It will only submit if there were none prior (prev <= 0) or if it is the current one (count <= 0).
return newValue > 0 && (prev <= 0 || count <= 0);
}
}
}
// If this method is invoked the current thread must not continuing trying to do any additional processing
private void handleThrowableInResponse(Throwable t, Address address, IntSet segments) {
if (cancelled) {
// If we were cancelled just log the exception - it may not be an actual problem
log.tracef("Encountered exception after subscription was cancelled, this can most likely ignored, message is %s", t.getMessage());
} else if (builder.parent.handleThrowable(t, address, segments)) {
// We were told to continue processing - so ignore those segments and try the next target if possible
// Since we never invoked parent.completeSegment they may get retried
currentTarget = null;
// Try to retrieve entries from the next node if possible
accept(0);
} else {
flowableProcessor.onError(t);
}
}
// This method returns whether this subscription has been cancelled
// This method doesn't have to be protected by requestors, but there is no reason for a method who doesn't have
// the requestors "lock" to invoke this
private boolean checkCancelled() {
if (cancelled) {
if (log.isTraceEnabled()) {
log.tracef("Subscription %s was cancelled, terminating early", this);
}
return true;
}
return false;
}
}
| 13,324
| 42.122977
| 145
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/reduction/ReductionPublisherRequestCommand.java
|
package org.infinispan.reactive.publisher.impl.commands.reduction;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.functional.functions.InjectableComponent;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.context.Flag;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.reactive.publisher.impl.DeliveryGuarantee;
import org.infinispan.reactive.publisher.impl.LocalPublisherManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.ByteString;
/**
* Stream request command that is sent to remote nodes handle execution of remote intermediate and terminal operations.
* @param <K> the key type
*/
public class ReductionPublisherRequestCommand<K> extends BaseRpcCommand implements TopologyAffectedCommand {
public static final byte COMMAND_ID = 31;
private boolean parallelStream;
private DeliveryGuarantee deliveryGuarantee;
private IntSet segments;
private Set<K> keys;
private Set<K> excludedKeys;
private long explicitFlags;
private boolean entryStream;
private Function transformer;
private Function finalizer;
private int topologyId = -1;
@Override
public int getTopologyId() {
return topologyId;
}
@Override
public void setTopologyId(int topologyId) {
this.topologyId = topologyId;
}
// Only here for CommandIdUniquenessTest
private ReductionPublisherRequestCommand() { super(null); }
public ReductionPublisherRequestCommand(ByteString cacheName) {
super(cacheName);
}
public ReductionPublisherRequestCommand(ByteString cacheName, boolean parallelStream, DeliveryGuarantee deliveryGuarantee,
IntSet segments, Set<K> keys, Set<K> excludedKeys, long explicitFlags, boolean entryStream,
Function transformer, Function finalizer) {
super(cacheName);
this.parallelStream = parallelStream;
this.deliveryGuarantee = deliveryGuarantee;
this.segments = segments;
this.keys = keys;
this.excludedKeys = excludedKeys;
this.explicitFlags = explicitFlags;
this.entryStream = entryStream;
this.transformer = transformer;
this.finalizer = finalizer;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry componentRegistry) throws Throwable {
if (transformer instanceof InjectableComponent) {
((InjectableComponent) transformer).inject(componentRegistry);
}
if (finalizer instanceof InjectableComponent) {
((InjectableComponent) finalizer).inject(componentRegistry);
}
LocalPublisherManager lpm = componentRegistry.getLocalPublisherManager().running();
if (entryStream) {
return lpm.entryReduction(parallelStream, segments, keys, excludedKeys,
explicitFlags, deliveryGuarantee, transformer, finalizer);
} else {
return lpm.keyReduction(parallelStream, segments, keys, excludedKeys,
explicitFlags, deliveryGuarantee, transformer, finalizer);
}
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeObject(getOrigin());
output.writeBoolean(parallelStream);
MarshallUtil.marshallEnum(deliveryGuarantee, output);
output.writeObject(segments);
MarshallUtil.marshallCollection(keys, output);
MarshallUtil.marshallCollection(excludedKeys, output);
output.writeLong(explicitFlags);
output.writeBoolean(entryStream);
if (transformer == finalizer) {
output.writeBoolean(true);
} else {
output.writeBoolean(false);
output.writeObject(transformer);
}
output.writeObject(finalizer);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
setOrigin((Address) input.readObject());
parallelStream = input.readBoolean();
deliveryGuarantee = MarshallUtil.unmarshallEnum(input, DeliveryGuarantee::valueOf);
segments = (IntSet) input.readObject();
keys = MarshallUtil.unmarshallCollectionUnbounded(input, HashSet::new);
excludedKeys = MarshallUtil.unmarshallCollectionUnbounded(input, HashSet::new);
explicitFlags = input.readLong();
entryStream = input.readBoolean();
boolean same = input.readBoolean();
if (same) {
transformer = (Function) input.readObject();
finalizer = transformer;
} else {
transformer = (Function) input.readObject();
finalizer = (Function) input.readObject();
}
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public String toString() {
return "PublisherRequestCommand{" +
", flags=" + EnumUtil.prettyPrintBitSet(explicitFlags, Flag.class) +
", topologyId=" + topologyId +
", segments=" + segments +
", keys=" + Util.toStr(keys) +
", excludedKeys=" + Util.toStr(excludedKeys) +
", transformer= " + transformer +
", finalizer=" + finalizer +
'}';
}
}
| 5,613
| 34.987179
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/reduction/PublisherResult.java
|
package org.infinispan.reactive.publisher.impl.commands.reduction;
import java.util.Set;
import org.infinispan.commons.util.IntSet;
/**
* A result from a publisher. It may or may not contain a result. Also depending on the type of publisher operation
* it may or may not contain either suspected segments or suspected keys, but never both.
* @author wburns
* @since 10.0
*/
public interface PublisherResult<R> {
IntSet getSuspectedSegments();
Set<?> getSuspectedKeys();
R getResult();
}
| 506
| 24.35
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/reduction/SegmentPublisherResult.java
|
package org.infinispan.reactive.publisher.impl.commands.reduction;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.util.IntSet;
/**
* A PublisherResult that was performed due to segments only
* @author wburns
* @since 10.0
*/
public class SegmentPublisherResult<R> implements PublisherResult<R> {
private final IntSet suspectedSegments;
private final R result;
public SegmentPublisherResult(IntSet suspectedSegments, R result) {
this.suspectedSegments = suspectedSegments;
this.result = result;
}
@Override
public IntSet getSuspectedSegments() {
return suspectedSegments;
}
@Override
public Set<?> getSuspectedKeys() {
return null;
}
@Override
public R getResult() {
return result;
}
@Override
public String toString() {
return "SegmentPublisherResult{" +
"result=" + result +
", suspectedSegments=" + suspectedSegments +
'}';
}
public static class Externalizer implements AdvancedExternalizer<SegmentPublisherResult> {
@Override
public Set<Class<? extends SegmentPublisherResult>> getTypeClasses() {
return Collections.singleton(SegmentPublisherResult.class);
}
@Override
public Integer getId() {
return Ids.SIMPLE_PUBLISHER_RESULT;
}
@Override
public void writeObject(ObjectOutput output, SegmentPublisherResult object) throws IOException {
output.writeObject(object.suspectedSegments);
output.writeObject(object.result);
}
@Override
public SegmentPublisherResult readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new SegmentPublisherResult<>((IntSet) input.readObject(), input.readObject());
}
}
}
| 2,017
| 26.27027
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/reduction/KeyPublisherResult.java
|
package org.infinispan.reactive.publisher.impl.commands.reduction;
import java.util.Set;
import org.infinispan.commons.util.IntSet;
/**
* A PublisherResult that was performed due to included keys. Note that this response is only ever created on the
* originator node. This is because we can't have a partial response with key based publishers. Either all results
* are returned or the node crashes or has an exception.
* @author wburns
* @since 10.0
*/
public class KeyPublisherResult<K, R> implements PublisherResult<R> {
private final Set<K> suspectedKeys;
public KeyPublisherResult(Set<K> suspectedKeys) {
this.suspectedKeys = suspectedKeys;
}
@Override
public IntSet getSuspectedSegments() {
return null;
}
@Override
public Set<K> getSuspectedKeys() {
return suspectedKeys;
}
@Override
public R getResult() {
return null;
}
@Override
public String toString() {
return "KeyPublisherResult{" +
", suspectedKeys=" + suspectedKeys +
'}';
}
}
| 1,055
| 23.55814
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/batch/NextPublisherCommand.java
|
package org.infinispan.reactive.publisher.impl.commands.batch;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.reactive.publisher.impl.PublisherHandler;
import org.infinispan.util.ByteString;
public class NextPublisherCommand extends BaseRpcCommand implements TopologyAffectedCommand {
public static final byte COMMAND_ID = 25;
private String requestId;
private int topologyId = -1;
// Only here for CommandIdUniquenessTest
private NextPublisherCommand() { super(null); }
public NextPublisherCommand(ByteString cacheName) {
super(cacheName);
}
public NextPublisherCommand(ByteString cacheName, String requestId) {
super(cacheName);
this.requestId = requestId;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry componentRegistry) throws Throwable {
PublisherHandler publisherHandler = componentRegistry.getPublisherHandler().running();
return publisherHandler.getNext(requestId);
}
@Override
public int getTopologyId() {
return topologyId;
}
@Override
public void setTopologyId(int topologyId) {
this.topologyId = topologyId;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(requestId);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
requestId = input.readUTF();
}
}
| 1,837
| 26.029412
| 96
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/batch/KeyPublisherResponse.java
|
package org.infinispan.reactive.publisher.impl.commands.batch;
import java.util.List;
import java.util.function.ObjIntConsumer;
import org.infinispan.commons.util.IntSet;
import org.infinispan.reactive.publisher.impl.PublisherHandler;
/**
* A Publisher Response that is used when key tracking is enabled. This is used in cases when EXACTLY_ONCE delivery
* guarantee is needed and a map (that isn't encoder based) or flat map operation is required.
* <p>
* The keys array will hold all of the original keys for the mapped/flatmapped values.
* <p>
* The extraObjects array will only be required when using flatMap based operation. This is required as some flat map
* operations may return more than one value. In this case it is possible to overflow the results array (sized based on
* batch size). However since we are tracking by key we must retain all values that map to a given key in the response.
*/
public class KeyPublisherResponse extends PublisherResponse {
final Object[] extraObjects;
final int extraSize;
final Object[] keys;
// Note that after being deserialized this is always equal to keySize.length - local this will be how many entries
// are in the array
final int keySize;
public KeyPublisherResponse(Object[] results, IntSet completedSegments, IntSet lostSegments, int size,
boolean complete, List<PublisherHandler.SegmentResult> segmentResults, Object[] extraObjects, int extraSize,
Object[] keys, int keySize) {
super(results, completedSegments, lostSegments, size, complete, segmentResults);
this.extraObjects = extraObjects;
this.extraSize = extraSize;
this.keys = keys;
this.keySize = keySize;
}
public int getExtraSize() {
return extraSize;
}
public Object[] getExtraObjects() {
return extraObjects;
}
@Override
public void keysForNonCompletedSegments(ObjIntConsumer consumer) {
int size = segmentResults.size();
if (size == 0) {
return;
}
PublisherHandler.SegmentResult segmentResult = segmentResults.get(segmentResults.size() - 1);
int segment = segmentResult.getSegment();
for (int i = 0; i < keySize; ++i) {
consumer.accept(keys[i], segment);
}
}
@Override
public String toString() {
return "KeyPublisherResponse{" +
"size=" + size +
", completedSegments=" + completedSegments +
", lostSegments=" + lostSegments +
", complete=" + complete +
", segmentResults=" + segmentResults +
", extraSize=" + extraSize +
", keySize=" + keySize +
'}';
}
}
| 2,665
| 36.549296
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/batch/CancelPublisherCommand.java
|
package org.infinispan.reactive.publisher.impl.commands.batch;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.reactive.publisher.impl.PublisherHandler;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
public class CancelPublisherCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 49;
private String requestId;
// Only here for CommandIdUniquenessTest
private CancelPublisherCommand() { super(null); }
public CancelPublisherCommand(ByteString cacheName) {
super(cacheName);
}
public CancelPublisherCommand(ByteString cacheName, String requestId) {
super(cacheName);
this.requestId = requestId;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry componentRegistry) throws Throwable {
PublisherHandler publisherHandler = componentRegistry.getPublisherHandler().running();
publisherHandler.closePublisher(requestId);
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(requestId);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
requestId = input.readUTF();
}
}
| 1,658
| 27.603448
| 96
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/batch/InitialPublisherCommand.java
|
package org.infinispan.reactive.publisher.impl.commands.batch;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.commands.functional.functions.InjectableComponent;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.reactive.publisher.impl.DeliveryGuarantee;
import org.infinispan.reactive.publisher.impl.PublisherHandler;
import org.infinispan.util.ByteString;
import org.reactivestreams.Publisher;
public class InitialPublisherCommand<K, I, R> extends BaseRpcCommand implements TopologyAffectedCommand {
public static final byte COMMAND_ID = 18;
private String requestId;
private DeliveryGuarantee deliveryGuarantee;
private int batchSize;
private IntSet segments;
private Set<K> keys;
private Set<K> excludedKeys;
private long explicitFlags;
private boolean entryStream;
private boolean trackKeys;
private Function<? super Publisher<I>, ? extends Publisher<R>> transformer;
private int topologyId = -1;
// Only here for CommandIdUniquenessTest
private InitialPublisherCommand() { super(null); }
public InitialPublisherCommand(ByteString cacheName) {
super(cacheName);
}
public InitialPublisherCommand(ByteString cacheName, String requestId, DeliveryGuarantee deliveryGuarantee,
int batchSize, IntSet segments, Set<K> keys, Set<K> excludedKeys, long explicitFlags, boolean entryStream,
boolean trackKeys, Function<? super Publisher<I>, ? extends Publisher<R>> transformer) {
super(cacheName);
this.requestId = requestId;
this.deliveryGuarantee = deliveryGuarantee;
this.batchSize = batchSize;
this.segments = segments;
this.keys = keys;
this.excludedKeys = excludedKeys;
this.explicitFlags = explicitFlags;
this.entryStream = entryStream;
this.trackKeys = trackKeys;
this.transformer = transformer;
}
public String getRequestId() {
return requestId;
}
public DeliveryGuarantee getDeliveryGuarantee() {
return deliveryGuarantee;
}
public int getBatchSize() {
return batchSize;
}
public IntSet getSegments() {
return segments;
}
public Set<K> getKeys() {
return keys;
}
public Set<K> getExcludedKeys() {
return excludedKeys;
}
public long getExplicitFlags() {
return explicitFlags;
}
public boolean isEntryStream() {
return entryStream;
}
public boolean isTrackKeys() {
return trackKeys;
}
public Function<? super Publisher<I>, ? extends Publisher<R>> getTransformer() {
return transformer;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry componentRegistry) throws Throwable {
if (transformer instanceof InjectableComponent) {
((InjectableComponent) transformer).inject(componentRegistry);
}
PublisherHandler publisherHandler = componentRegistry.getPublisherHandler().running();
return publisherHandler.register(this);
}
@Override
public int getTopologyId() {
return topologyId;
}
@Override
public void setTopologyId(int topologyId) {
this.topologyId = topologyId;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(requestId);
MarshallUtil.marshallEnum(deliveryGuarantee, output);
UnsignedNumeric.writeUnsignedInt(output, batchSize);
output.writeObject(segments);
MarshallUtil.marshallCollection(keys, output);
MarshallUtil.marshallCollection(excludedKeys, output);
// Maybe put the booleans into a single byte - only saves 2 bytes though
output.writeLong(explicitFlags);
output.writeBoolean(entryStream);
output.writeBoolean(trackKeys);
output.writeObject(transformer);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
requestId = input.readUTF();
deliveryGuarantee = MarshallUtil.unmarshallEnum(input, DeliveryGuarantee::valueOf);
batchSize = UnsignedNumeric.readUnsignedInt(input);
segments = (IntSet) input.readObject();
keys = MarshallUtil.unmarshallCollectionUnbounded(input, HashSet::new);
excludedKeys = MarshallUtil.unmarshallCollectionUnbounded(input, HashSet::new);
explicitFlags = input.readLong();
entryStream = input.readBoolean();
trackKeys = input.readBoolean();
transformer = (Function) input.readObject();
}
}
| 5,060
| 30.63125
| 115
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/batch/PublisherResponse.java
|
package org.infinispan.reactive.publisher.impl.commands.batch;
import java.util.Collections;
import java.util.List;
import java.util.function.ObjIntConsumer;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.reactive.publisher.impl.PublisherHandler;
/**
* The response for a cache publisher request to a given node. It contains an array with how many results there were,
* which segments were completed or lost during processing, whether the operation has sent all values (complete), and
* also an offset into the results array of which elements don't map to any of the completed segments. Note that
* the results will never contain values for a segment that was lost in the same response.
*/
public class PublisherResponse {
final Object[] results;
// The completed segments after this request - This may be null
final IntSet completedSegments;
// The segments that were lost mid processing - This may be null
final IntSet lostSegments;
// How many elements are in the results
// Note that after being deserialized this is always equal to results.length - local this will be how many entries
// are in the array
final int size;
final boolean complete;
final List<PublisherHandler.SegmentResult> segmentResults;
public PublisherResponse(Object[] results, IntSet completedSegments, IntSet lostSegments, int size, boolean complete,
List<PublisherHandler.SegmentResult> segmentResults) {
this.results = results;
this.completedSegments = completedSegments;
this.lostSegments = lostSegments;
this.size = size;
this.complete = complete;
this.segmentResults = segmentResults;
}
public static PublisherResponse emptyResponse(IntSet completedSegments, IntSet lostSegments) {
return new PublisherResponse(Util.EMPTY_OBJECT_ARRAY, completedSegments, lostSegments, 0, true, Collections.emptyList());
}
public Object[] getResults() {
return results;
}
public IntSet getCompletedSegments() {
return completedSegments;
}
public IntSet getLostSegments() {
return lostSegments;
}
public int getSize() {
return size;
}
public boolean isComplete() {
return complete;
}
public List<PublisherHandler.SegmentResult> getSegmentResults() {
return segmentResults;
}
public void keysForNonCompletedSegments(ObjIntConsumer consumer) {
int segmentResultSize = segmentResults.size();
if (segmentResultSize == 0) {
return;
}
// The last segment results has ones that weren't completed
PublisherHandler.SegmentResult segmentResult = segmentResults.get(segmentResultSize - 1);
int segment = segmentResult.getSegment();
for (int i = segmentResult.getEntryCount(); i > 0; --i) {
consumer.accept(results[size - i], segment);
}
}
@Override
public String toString() {
return "PublisherResponse{" +
"size=" + size +
", completedSegments=" + completedSegments +
", lostSegments=" + lostSegments +
", complete=" + complete +
", segmentResults=" + segmentResults +
'}';
}
}
| 3,234
| 34.163043
| 127
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/reactive/publisher/impl/commands/batch/PublisherResponseExternalizer.java
|
package org.infinispan.reactive.publisher.impl.commands.batch;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.infinispan.commons.io.UnsignedNumeric;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.Ids;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.reactive.publisher.impl.PublisherHandler;
public class PublisherResponseExternalizer extends AbstractExternalizer<PublisherResponse> {
@Override
public Integer getId() {
return Ids.PUBLISHER_RESPONSE;
}
@Override
public Set<Class<? extends PublisherResponse>> getTypeClasses() {
return Util.asSet(PublisherResponse.class, KeyPublisherResponse.class);
}
@Override
public void writeObject(ObjectOutput output, PublisherResponse object) throws IOException {
output.writeObject(object.completedSegments);
output.writeObject(object.lostSegments);
output.writeBoolean(object.complete);
UnsignedNumeric.writeUnsignedInt(output, object.segmentResults.size());
for (PublisherHandler.SegmentResult result : object.segmentResults) {
UnsignedNumeric.writeUnsignedInt(output, result.getSegment());
UnsignedNumeric.writeUnsignedInt(output, result.getEntryCount());
}
if (object instanceof KeyPublisherResponse) {
KeyPublisherResponse keyResponse = (KeyPublisherResponse) object;
// Just send the combined count of both arrays - the read handles both the same way
// segmentOffset for a KeyPublisherResponse is actually the extra value size
UnsignedNumeric.writeUnsignedInt(output, keyResponse.size + keyResponse.extraSize);
for (int i = 0; i < keyResponse.size; ++i) {
output.writeObject(keyResponse.results[i]);
}
for (int i = 0; i < keyResponse.extraSize; ++i) {
output.writeObject(keyResponse.extraObjects[i]);
}
output.writeBoolean(true);
UnsignedNumeric.writeUnsignedInt(output, keyResponse.keySize);
for (int i = 0; i < keyResponse.keySize; ++i) {
output.writeObject(keyResponse.keys[i]);
}
} else {
UnsignedNumeric.writeUnsignedInt(output, object.size);
for (int i = 0; i < object.size; ++i) {
output.writeObject(object.results[i]);
}
output.writeBoolean(false);
}
}
@Override
public PublisherResponse readObject(ObjectInput input) throws IOException, ClassNotFoundException {
IntSet completedSegments = (IntSet) input.readObject();
IntSet lostSegments = (IntSet) input.readObject();
boolean complete = input.readBoolean();
int segmentResultSize = UnsignedNumeric.readUnsignedInt(input);
List<PublisherHandler.SegmentResult> segmentResults = new ArrayList<>(segmentResultSize);
for (int i = 0; i < segmentResultSize; ++i) {
int segment = UnsignedNumeric.readUnsignedInt(input);
int entryCount = UnsignedNumeric.readUnsignedInt(input);
segmentResults.add(new PublisherHandler.SegmentResult(segment, entryCount));
}
int size = UnsignedNumeric.readUnsignedInt(input);
Object[] values = new Object[size];
for (int i = 0; i < size; ++i) {
values[i] = input.readObject();
}
boolean keyResponse = input.readBoolean();
if (keyResponse) {
int keySize = UnsignedNumeric.readUnsignedInt(input);
Object[] keys = new Object[keySize];
for (int i = 0; i < keySize; ++i) {
keys[i] = input.readObject();
}
// All of the extra objects were just smashed into our values - so there is no separate extraObjects array
return new KeyPublisherResponse(values, completedSegments, lostSegments, size, complete, segmentResults, null, 0, keys, keySize);
} else {
return new PublisherResponse(values, completedSegments, lostSegments, size, complete, segmentResults);
}
}
}
| 4,147
| 39.271845
| 138
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/HealthStatus.java
|
package org.infinispan.health;
/**
* General Health status.
*
* @author Sebastian Łaskawiec
* @since 9.0
*/
public enum HealthStatus {
/**
* Given entity is unhealthy.
*
* <p>
* An unhealthy status means that a cache is in {@link org.infinispan.partitionhandling.AvailabilityMode#DEGRADED_MODE}.
* Please keep in mind that in the future additional rules might be added to reflect Unhealthy status of the cache.
* </p>.
*/
DEGRADED,
/**
* Given entity is healthy.
*/
HEALTHY,
/**
* The given entity is still initializing.
*
* <p>This can happen when the entity does not have the time to completely initialize or when it is recovering after a cluster shutdown.</p>
*/
INITIALIZING,
/**
* Given entity is healthy but a rebalance is in progress.
*/
HEALTHY_REBALANCING,
/**
* The cache did not start due to a error.
*/
FAILED
}
| 938
| 21.357143
| 143
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/ClusterHealth.java
|
package org.infinispan.health;
import java.util.List;
import org.infinispan.commons.dataconversion.internal.JsonSerialization;
/**
* Cluster health information.
*/
public interface ClusterHealth extends JsonSerialization {
/**
* Returns total cluster health.
*/
HealthStatus getHealthStatus();
/**
* Returns the name of the cluster.
*/
String getClusterName();
/**
* Returns the number of nodes in the cluster.
*/
int getNumberOfNodes();
/**
* Returns node names.
*/
List<String> getNodeNames();
}
| 563
| 16.625
| 72
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/HostInfo.java
|
package org.infinispan.health;
/**
* Information about the host.
*
* @author Sebastian Łaskawiec
* @since 9.0
*/
public interface HostInfo {
/**
* Returns the number of CPUs installed in the host.
*/
int getNumberOfCpus();
/**
* Gets total memory in KB.
*/
long getTotalMemoryKb();
/**
* Gets free memory in KB.
*/
long getFreeMemoryInKb();
}
| 406
| 14.653846
| 56
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/CacheHealth.java
|
package org.infinispan.health;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.dataconversion.internal.JsonSerialization;
/**
* Cache health information.
*
* @author Sebastian Łaskawiec
* @since 9.0
*/
public interface CacheHealth extends JsonSerialization {
/**
* Returns Cache name.
*/
String getCacheName();
/**
* Returns Cache health status.
*/
HealthStatus getStatus();
default Json toJson() {
return Json.object().set("status", getStatus()).set("cache_name", getCacheName());
}
}
| 578
| 19.678571
| 88
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/Health.java
|
package org.infinispan.health;
import java.util.List;
import java.util.Set;
/**
* An entry point for checking health status.
*
* @author Sebastian Łaskawiec
* @since 9.0
*/
public interface Health {
/**
* Returns Cluster health.
*/
ClusterHealth getClusterHealth();
/**
* Returns per cache health.
*/
List<CacheHealth> getCacheHealth();
/**
* Returns per cache health for the provided cache names.
*
* @param cacheNames
*/
List<CacheHealth> getCacheHealth(Set<String> cacheNames);
/**
* Gets basic information about the host.
*/
HostInfo getHostInfo();
}
| 648
| 17.027778
| 61
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/jmx/HealthJMXExposer.java
|
package org.infinispan.health.jmx;
/**
* A Contract for exposing Health API over the JMX.
*
* @author Sebastian Łaskawiec
* @since 9.0
*/
public interface HealthJMXExposer {
/**
* JMX Object name.
*/
String OBJECT_NAME = "CacheContainerHealth";
/**
* Returns the total amount of CPUs for the JVM.
*/
int getNumberOfCpus();
/**
* Returns the amount of total memory (KB) in the host.
*/
long getTotalMemoryKb();
/**
* Returns the amount of free memory (KB) in the host.
*/
long getFreeMemoryKb();
/**
* Returns cluster health status.
*/
String getClusterHealth();
/**
* Returns cluster name.
*/
String getClusterName();
/**
* Returns total nodes in the cluster.
*/
int getNumberOfNodes();
/**
* Returns per Cache statuses.
*/
String[] getCacheHealth();
}
| 911
| 16.882353
| 59
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/impl/HealthImpl.java
|
package org.infinispan.health.impl;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import org.infinispan.commons.CacheException;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.health.CacheHealth;
import org.infinispan.health.ClusterHealth;
import org.infinispan.health.Health;
import org.infinispan.health.HostInfo;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.security.actions.SecurityActions;
public class HealthImpl implements Health {
private final EmbeddedCacheManager embeddedCacheManager;
private final InternalCacheRegistry internalCacheRegistry;
private final HostInfo hostInfoImpl = new HostInfoImpl();
public HealthImpl(EmbeddedCacheManager embeddedCacheManager, InternalCacheRegistry internalCacheRegistry) {
this.embeddedCacheManager = embeddedCacheManager;
this.internalCacheRegistry = internalCacheRegistry;
}
@Override
public ClusterHealth getClusterHealth() {
return new ClusterHealthImpl(embeddedCacheManager, internalCacheRegistry);
}
@Override
public List<CacheHealth> getCacheHealth() {
return embeddedCacheManager.getCacheNames().stream().map(this::getHealth).collect(Collectors.toList());
}
private CacheHealth getHealth(String cacheName) {
try {
GlobalComponentRegistry gcr = SecurityActions.getGlobalComponentRegistry(embeddedCacheManager);
ComponentRegistry cr = gcr.getNamedComponentRegistry(cacheName);
if (cr == null)
return new InvalidCacheHealth(cacheName);
return new CacheHealthImpl(cr);
} catch (CacheException cacheException) {
return new InvalidCacheHealth(cacheName);
}
}
@Override
public List<CacheHealth> getCacheHealth(Set<String> cacheNames) {
return cacheNames.stream().map(this::getHealth).collect(Collectors.toList());
}
@Override
public HostInfo getHostInfo() {
return hostInfoImpl;
}
}
| 2,111
| 33.064516
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/impl/HostInfoImpl.java
|
package org.infinispan.health.impl;
import org.infinispan.commons.util.ProcessorInfo;
import org.infinispan.health.HostInfo;
class HostInfoImpl implements HostInfo {
@Override
public int getNumberOfCpus() {
return ProcessorInfo.availableProcessors();
}
@Override
public long getTotalMemoryKb() {
return Runtime.getRuntime().totalMemory() / 1024;
}
@Override
public long getFreeMemoryInKb() {
return Runtime.getRuntime().freeMemory() / 1024;
}
}
| 510
| 21.217391
| 57
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/impl/CacheHealthImpl.java
|
package org.infinispan.health.impl;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.health.CacheHealth;
import org.infinispan.health.HealthStatus;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
public class CacheHealthImpl implements CacheHealth {
private final ComponentRegistry cr;
public CacheHealthImpl(ComponentRegistry cr) {
this.cr = cr;
}
@Override
public String getCacheName() {
return cr.getCacheName();
}
@Override
public HealthStatus getStatus() {
if (cr.getStatus() == ComponentStatus.INITIALIZING) return HealthStatus.INITIALIZING;
PartitionHandlingManager partitionHandlingManager = cr.getComponent(PartitionHandlingManager.class);
if (!isComponentHealthy() || partitionHandlingManager.getAvailabilityMode() == AvailabilityMode.DEGRADED_MODE) {
return HealthStatus.DEGRADED;
}
DistributionManager distributionManager = cr.getDistributionManager();
if (distributionManager != null && distributionManager.isRehashInProgress()) {
return HealthStatus.HEALTHY_REBALANCING;
}
return HealthStatus.HEALTHY;
}
private boolean isComponentHealthy() {
switch (cr.getStatus()) {
case INSTANTIATED:
case RUNNING:
return true;
default:
return false;
}
}
}
| 1,551
| 30.04
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/impl/ClusterHealthImpl.java
|
package org.infinispan.health.impl;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.health.CacheHealth;
import org.infinispan.health.ClusterHealth;
import org.infinispan.health.HealthStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.security.actions.SecurityActions;
class ClusterHealthImpl implements ClusterHealth {
private final EmbeddedCacheManager cacheManager;
private final InternalCacheRegistry internalCacheRegistry;
private GlobalComponentRegistry gcr;
ClusterHealthImpl(EmbeddedCacheManager cacheManager, InternalCacheRegistry internalCacheRegistry) {
this.cacheManager = cacheManager;
this.internalCacheRegistry = internalCacheRegistry;
gcr = SecurityActions.getGlobalComponentRegistry(cacheManager);
}
@Override
public HealthStatus getHealthStatus() {
return Stream.concat(cacheManager.getCacheNames().stream(), internalCacheRegistry.getInternalCacheNames().stream())
.map(this::getCacheHealth)
.filter(Objects::nonNull)
.map(CacheHealth::getStatus)
.filter(h -> !h.equals(HealthStatus.HEALTHY))
.findFirst().orElse(HealthStatus.HEALTHY);
}
private CacheHealth getCacheHealth(String cacheName) {
try {
if (!cacheManager.cacheExists(cacheName))
return null;
ComponentRegistry cr = gcr.getNamedComponentRegistry(cacheName);
if (cr == null)
return new InvalidCacheHealth(cacheName);
return new CacheHealthImpl(cr);
} catch (CacheException cacheException) {
return new InvalidCacheHealth(cacheName);
}
}
@Override
public String getClusterName() {
return cacheManager.getClusterName();
}
@Override
public int getNumberOfNodes() {
return Optional.ofNullable(cacheManager.getMembers()).orElse(Collections.emptyList())
.size();
}
@Override
public List<String> getNodeNames() {
return Optional.ofNullable(cacheManager.getMembers()).orElse(Collections.emptyList())
.stream()
.map(Object::toString)
.collect(Collectors.toList());
}
@Override
public Json toJson() {
return Json.object()
.set("cluster_name", getClusterName())
.set("health_status", getHealthStatus())
.set("number_of_nodes", getNumberOfNodes())
.set("node_names", Json.make(getNodeNames()));
}
}
| 2,882
| 32.137931
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/impl/InvalidCacheHealth.java
|
package org.infinispan.health.impl;
import org.infinispan.health.CacheHealth;
import org.infinispan.health.HealthStatus;
/**
* @since 12.0
*/
final class InvalidCacheHealth implements CacheHealth {
private final String name;
public InvalidCacheHealth(String name) {
this.name = name;
}
@Override
public String getCacheName() {
return name;
}
@Override
public HealthStatus getStatus() {
return HealthStatus.FAILED;
}
}
| 470
| 17.115385
| 55
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/health/impl/jmx/HealthJMXExposerImpl.java
|
package org.infinispan.health.impl.jmx;
import java.util.List;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.health.CacheHealth;
import org.infinispan.health.Health;
import org.infinispan.health.jmx.HealthJMXExposer;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedAttribute;
import org.infinispan.jmx.annotations.Units;
/**
* A JMX exposer (or adapter) for Health API.
*
* @author Sebastian Łaskawiec
* @since 9.0
*/
@MBean(objectName = HealthJMXExposer.OBJECT_NAME, description = "Health Check API")
@Scope(Scopes.GLOBAL)
public class HealthJMXExposerImpl implements HealthJMXExposer {
private final Health health;
public HealthJMXExposerImpl(Health health) {
this.health = health;
}
@ManagedAttribute(displayName = "Number of CPUs in the host", description = "Number of CPUs in the host")
@Override
public int getNumberOfCpus() {
return health.getHostInfo().getNumberOfCpus();
}
@ManagedAttribute(displayName = "The amount of total memory (KB) in the host", description = "The amount of total memory (KB) in the host", units = Units.KILO_BYTES)
@Override
public long getTotalMemoryKb() {
return health.getHostInfo().getTotalMemoryKb();
}
@ManagedAttribute(displayName = "The amount of free memory (KB) in the host", description = "The amount of free memory (KB) in the host", units = Units.KILO_BYTES)
@Override
public long getFreeMemoryKb() {
return health.getHostInfo().getFreeMemoryInKb();
}
@ManagedAttribute(displayName = "Cluster health status", description = "Cluster health status")
@Override
public String getClusterHealth() {
return health.getClusterHealth().getHealthStatus().toString();
}
@ManagedAttribute(displayName = "Cluster name", description = "Cluster name")
@Override
public String getClusterName() {
return health.getClusterHealth().getClusterName();
}
@ManagedAttribute(displayName = "Total nodes in the cluster", description = "Total nodes in the cluster")
@Override
public int getNumberOfNodes() {
return health.getClusterHealth().getNumberOfNodes();
}
@ManagedAttribute(displayName = "Per Cache statuses", description = "Per Cache statuses")
@Override
public String[] getCacheHealth() {
List<CacheHealth> cacheHealths = health.getCacheHealth();
String[] returnValues = new String[cacheHealths.size() * 2];
for (int i = 0; i < cacheHealths.size(); ++i) {
returnValues[i * 2] = cacheHealths.get(i).getCacheName();
returnValues[i * 2 + 1] = cacheHealths.get(i).getStatus().toString();
}
return returnValues;
}
}
| 2,810
| 35.038462
| 169
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/package-info.java
|
/**
* Package that contains the interface describing the underlyling API for storage in Infinispan
*
* @api.public
*/
package org.infinispan.container;
| 156
| 21.428571
| 95
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/DataContainer.java
|
package org.infinispan.container;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.Spliterators;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.eviction.impl.ActivationManager;
import org.infinispan.eviction.impl.PassivationManager;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
/**
* The main internal data structure which stores entries. Care should be taken when using this directly as entries
* could be stored in a different way than they were given to a {@link org.infinispan.Cache}. If you wish to convert
* entries to the stored format, you should use the provided {@link org.infinispan.encoding.DataConversion} such as
* <pre>
* cache.getAdvancedCache().getKeyDataConversion().toStorage(key);
* </pre>
* when dealing with keys or the following when dealing with values
* <pre>
* cache.getAdvancedCache().getValueDataConversion().toStorage(value);
* </pre>
* You can also convert from storage to the user provided type by using the
* {@link org.infinispan.encoding.DataConversion#fromStorage(Object)} method on any value returned from the DataContainer
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @author Vladimir Blagojevic
* @since 4.0
*/
@Scope(Scopes.NAMED_CACHE)
public interface DataContainer<K, V> extends Iterable<InternalCacheEntry<K, V>> {
/**
* Retrieves a cached entry
*
* @param k key under which entry is stored
* @return entry, if it exists and has not expired, or null if not
* @deprecated since 10.1 - Please use {@link #peek(Object)} instead.
*/
@Deprecated
InternalCacheEntry<K, V> get(Object k);
/**
* Retrieves a cache entry in the same way as {@link #get(Object)}} except that it does not update or reorder any of
* the internal constructs. I.e., expiration does not happen, and in the case of the LRU container, the entry is not
* moved to the end of the chain.
* <p/>
* This method should be used instead of {@link #get(Object)}} when called while iterating through the data container
* using methods like {@link #iterator()} to avoid changing the underlying collection's order.
*
* @param k key under which entry is stored
* @return entry, if it exists, or null if not
*/
InternalCacheEntry<K, V> peek(Object k);
/**
* Puts an entry in the cache along with metadata adding information such lifespan of entry, max idle time, version
* information...etc.
* <p/>
* The {@code key} must be activate by invoking {@link ActivationManager#activateAsync(Object, int)}
* boolean)}.
*
* @param k key under which to store entry
* @param v value to store
* @param metadata metadata of the entry
*/
void put(K k, V v, Metadata metadata);
/**
* Tests whether an entry exists in the container
*
* @param k key to test
* @return true if entry exists and has not expired; false otherwise
*/
boolean containsKey(Object k);
/**
* Removes an entry from the cache
* <p/>
* The {@code key} must be activate by invoking {@link ActivationManager#activateAsync(Object, int)}
*
* @param k key to remove
* @return entry removed, or null if it didn't exist or had expired
*/
InternalCacheEntry<K, V> remove(Object k);
/**
* @return count of the number of entries in the container excluding expired entries
* @implSpec
* Default method invokes the {@link #iterator()} method and just counts entries.
*/
default int size() {
int size = 0;
// We have to loop through to make sure to remove expired entries
for (InternalCacheEntry<K, V> ignore : this) {
if (++size == Integer.MAX_VALUE) return Integer.MAX_VALUE;
}
return size;
}
/**
*
* @return count of the number of entries in the container including expired entries
*/
int sizeIncludingExpired();
/**
* Removes all entries in the container
*/
void clear();
/**
* Atomically, it removes the key from {@code DataContainer} and passivates it to persistence.
* <p/>
* The passivation must be done by invoking the method {@link PassivationManager#passivateAsync(InternalCacheEntry)}.
*
* @param key The key to evict.
*/
void evict(K key);
/**
* Computes the new value for the key.
* <p/>
* See {@link org.infinispan.container.DataContainer.ComputeAction#compute(Object,
* org.infinispan.container.entries.InternalCacheEntry, InternalEntryFactory)}.
* <p/>
* The {@code key} must be activated by invoking {@link ActivationManager#activateAsync(Object, int)}.
* <p>
* Note the entry provided to {@link org.infinispan.container.DataContainer.ComputeAction} may be expired as these
* entries are not filtered as many other methods do.
* @param key The key.
* @param action The action that will compute the new value.
* @return The {@link org.infinispan.container.entries.InternalCacheEntry} associated to the key.
*/
InternalCacheEntry<K, V> compute(K key, ComputeAction<K, V> action);
/**
* {@inheritDoc}
* <p>This iterator only returns entries that are not expired, however it will not remove them while doing so.</p>
* @return iterator that doesn't produce expired entries
*/
@Override
Iterator<InternalCacheEntry<K, V>> iterator();
/**
* {@inheritDoc}
* <p>This spliterator only returns entries that are not expired, however it will not remove them while doing so.</p>
* @return spliterator that doesn't produce expired entries
*/
@Override
default Spliterator<InternalCacheEntry<K, V>> spliterator() {
return Spliterators.spliterator(iterator(), sizeIncludingExpired(),
Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.DISTINCT);
}
/**
* Same as {@link DataContainer#iterator()} except that is also returns expired entries.
* @return iterator that returns all entries including expired ones
*/
Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired();
/**
* Same as {@link DataContainer#spliterator()} except that is also returns expired entries.
* @return spliterator that returns all entries including expired ones
*/
default Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired() {
return Spliterators.spliterator(iteratorIncludingExpired(), sizeIncludingExpired(),
Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.DISTINCT);
}
interface ComputeAction<K, V> {
/**
* Computes the new value for the key.
*
* @return The new {@code InternalCacheEntry} for the key, {@code null} if the entry is to be removed or {@code
* oldEntry} is the entry is not to be changed (i.e. not entries are added, removed or touched).
*/
InternalCacheEntry<K, V> compute(K key, InternalCacheEntry<K, V> oldEntry, InternalEntryFactory factory);
}
/**
* Resizes the capacity of the underlying container. This is only supported if the container is bounded.
* An {@link UnsupportedOperationException} is thrown otherwise.
*
* @param newSize the new size
*/
default void resize(long newSize) {
throw new UnsupportedOperationException();
}
/**
* Returns the capacity of the underlying container. This is only supported if the container is bounded. An {@link UnsupportedOperationException} is thrown
* otherwise.
*
* @return
*/
default long capacity() {
throw new UnsupportedOperationException();
}
/**
* Returns how large the eviction size is currently. This is only supported if the container is bounded. An
* {@link UnsupportedOperationException} is thrown otherwise. This value will always be lower than the value returned
* from {@link DataContainer#capacity()}
* @return how large the counted eviction is
*/
default long evictionSize() {
throw new UnsupportedOperationException();
}
}
| 8,222
| 36.894009
| 158
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/NumericVersionGenerator.java
|
package org.infinispan.container.versioning;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
/**
* Generates unique numeric versions for both local and clustered environments.
* When used on clustered caches, node information is used to guarantee versions
* are unique cluster-wide.
*
* If the cache is configured to be local, the version generated is based
* around an atomic counter. On the contrary, if the cache is clustered, the
* generated version is composed of:
* [view id (2 bytes)][rank (2 bytes)][version counter (4 bytes)], where rank
* refers to the position of this node within the view.
*
* @author Galder Zamarreño
* @since 5.3
*/
@Scope(Scopes.NAMED_CACHE)
public class NumericVersionGenerator implements VersionGenerator {
// TODO: Possibly seed version counter on capped System.currentTimeMillis, to avoid issues with clients holding to versions in between restarts
final AtomicInteger versionCounter = new AtomicInteger();
private static final NumericVersion NON_EXISTING = new NumericVersion(0);
@Inject Configuration configuration;
@Inject RankCalculator rankCalculator;
private boolean isClustered;
@Start
public void start() {
isClustered = configuration.clustering().cacheMode().isClustered();
}
@Override
public IncrementableEntryVersion generateNew() {
long counter = versionCounter.incrementAndGet();
return createNumericVersion(counter);
}
private IncrementableEntryVersion createNumericVersion(long counter) {
// Version counter occupies the least significant 4 bytes of the version
return isClustered
? new NumericVersion(rankCalculator.getVersionPrefix() | counter)
: new NumericVersion(counter);
}
@Override
public IncrementableEntryVersion increment(IncrementableEntryVersion initialVersion) {
if (initialVersion instanceof NumericVersion) {
NumericVersion old = (NumericVersion) initialVersion;
long counter = old.getVersion() + 1;
return createNumericVersion(counter);
}
throw CONTAINER.unexpectedInitialVersion(initialVersion.getClass().getName());
}
@Override
public IncrementableEntryVersion nonExistingVersion() {
return NON_EXISTING;
}
void resetCounter() {
versionCounter.set(0);
}
}
| 2,642
| 33.324675
| 146
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/IncrementableEntryVersion.java
|
package org.infinispan.container.versioning;
/**
* An interface indicating that a version of this type can be incremented.
*
* @author Manik Surtani
* @since 5.1
*/
public interface IncrementableEntryVersion extends EntryVersion {
}
| 239
| 20.818182
| 74
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/EntryVersion.java
|
package org.infinispan.container.versioning;
/**
* A version is used to compare entries against one another. Versions do not guarantee contiguity, but do guarantee
* to be comparable. However this comparability is not the same as the JDK's {@link Comparable} interface. It is
* richer in that {@link Comparable} doesn't differentiate between instances that are the same versus instances that
* are equal-but-different.
*
* @author Manik Surtani
* @since 5.1
*/
public interface EntryVersion {
/**
* Compares the given version against the current instance.
* @param other the other version to compare against
* @return a InequalVersionComparisonResult instance
*/
InequalVersionComparisonResult compareTo(EntryVersion other);
}
| 762
| 35.333333
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/SimpleClusteredVersionGenerator.java
|
package org.infinispan.container.versioning;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.annotation.TopologyChanged;
import org.infinispan.notifications.cachelistener.event.TopologyChangedEvent;
/**
* A version generator implementation for SimpleClusteredVersions
*
* @author Manik Surtani
* @since 5.1
*/
@Scope(Scopes.NAMED_CACHE)
public class SimpleClusteredVersionGenerator implements VersionGenerator {
// The current cache topology ID is recorded and used as a part of the version generated, and as such used as the
// most significant part of a version comparison. If a version is generated based on an old cache topology and another is
// generated based on a newer topology, the one based on the newer topology wins regardless of the version's counter.
// See SimpleClusteredVersion for more details.
private volatile int topologyId = -1;
private static final SimpleClusteredVersion NON_EXISTING = new SimpleClusteredVersion(0, 0);
@Inject CacheNotifier<?, ?> cacheNotifier;
@Start(priority = 11)
public void start() {
cacheNotifier.addListener(new TopologyIdUpdater());
}
@Override
public IncrementableEntryVersion generateNew() {
if (topologyId == -1) {
throw new IllegalStateException("Topology id not set yet");
}
return new SimpleClusteredVersion(topologyId, 1);
}
@Override
public IncrementableEntryVersion increment(IncrementableEntryVersion initialVersion) {
if (initialVersion instanceof SimpleClusteredVersion) {
SimpleClusteredVersion old = (SimpleClusteredVersion) initialVersion;
return new SimpleClusteredVersion(topologyId, old.getVersion() + 1);
} else {
throw new IllegalArgumentException("I only know how to deal with SimpleClusteredVersions, not " + initialVersion.getClass().getName());
}
}
@Override
public IncrementableEntryVersion nonExistingVersion() {
return NON_EXISTING;
}
@Listener
public class TopologyIdUpdater {
@TopologyChanged
public void onTopologyChange(TopologyChangedEvent<?, ?> tce) {
topologyId = tce.getNewTopologyId();
}
}
}
| 2,480
| 36.029851
| 144
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/VersionGenerator.java
|
package org.infinispan.container.versioning;
/**
* Generates versions
*
* @author Manik Surtani
* @since 5.1
*/
public interface VersionGenerator {
/**
* Generates a new entry version
* @return a new entry version
*/
IncrementableEntryVersion generateNew();
IncrementableEntryVersion increment(IncrementableEntryVersion initialVersion);
IncrementableEntryVersion nonExistingVersion();
}
| 419
| 20
| 81
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/InequalVersionComparisonResult.java
|
package org.infinispan.container.versioning;
/**
* Versions can be compared to each other to result in one version being before, after or at the same time as another
* version. This is different from the JDK's {@link Comparable} interface, which is much more simplistic in that it
* doesn't differentiate between something that is the same versus equal-but-different.
*
* @author Manik Surtani
* @since 5.1
*/
public enum InequalVersionComparisonResult {
/**
* Denotes a version that was created temporally <i>before</i> another version.
*/
BEFORE,
/**
* Denotes a version that was created temporally <i>after</i> another version.
*/
AFTER,
/**
* Denotes that the two versions being compared are equal.
*/
EQUAL,
/**
* Denotes a version that was created at the same time as another version, but is not equal. This is only really
* useful when using a partition-aware versioning scheme, such as vector or Lamport clocks.
*/
CONFLICTING
}
| 1,007
| 32.6
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/RankCalculator.java
|
package org.infinispan.container.versioning;
import java.lang.invoke.MethodHandles;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachemanagerlistener.CacheManagerNotifier;
import org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged;
import org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Compute the version prefix to be used by {@link NumericVersionGenerator} in clustered caches.
*
* @since 14.0
*/
@Scope(Scopes.GLOBAL)
@Listener
public class RankCalculator {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
final AtomicLong versionPrefix = new AtomicLong();
@Inject CacheManagerNotifier cacheManagerNotifier;
@Inject Transport transport;
@Start
void start() {
if (transport != null) {
cacheManagerNotifier.addListener(this);
updateRank(transport.getAddress(), transport.getMembers(), transport.getViewId());
}
}
@Stop
void stop() {
if (transport != null) {
cacheManagerNotifier.removeListener(this);
}
}
@ViewChanged
public void updateRank(ViewChangedEvent e) {
long rank = updateRank(e.getLocalAddress(), e.getNewMembers(), e.getViewId());
if (log.isTraceEnabled())
log.tracef("Calculated rank based on view %s and result was %d", e, rank);
}
public long getVersionPrefix() {
return versionPrefix.get();
}
private long updateRank(Address address, List<Address> members, long viewId) {
long rank = members.indexOf(address) + 1;
// Version is composed of: <view id (2 bytes)><rank (2 bytes)><version counter (4 bytes)>
// View id and rank form the prefix which is updated on a view change.
long newVersionPrefix = (viewId << 48) | (rank << 32);
versionPrefix.set(newVersionPrefix);
return versionPrefix.get();
}
}
| 2,431
| 32.777778
| 96
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/SimpleClusteredVersion.java
|
package org.infinispan.container.versioning;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.marshall.core.Ids;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import net.jcip.annotations.Immutable;
/**
* A simple versioning scheme that is cluster-aware
*
* @author Manik Surtani
* @since 5.1
*/
@Immutable
@ProtoTypeId(ProtoStreamTypeIds.SIMPLE_CLUSTERED_VERSION)
public class SimpleClusteredVersion implements IncrementableEntryVersion {
/**
* The cache topology id in which it was first created.
*/
private final int topologyId;
private final long version;
@ProtoFactory
public SimpleClusteredVersion(int topologyId, long version) {
this.version = version;
this.topologyId = topologyId;
}
@ProtoField(number = 1, defaultValue = "-1")
public int getTopologyId() {
return topologyId;
}
@ProtoField(number = 2, defaultValue = "-1")
public long getVersion() {
return version;
}
@Override
public InequalVersionComparisonResult compareTo(EntryVersion other) {
if (other instanceof SimpleClusteredVersion) {
SimpleClusteredVersion otherVersion = (SimpleClusteredVersion) other;
if (topologyId > otherVersion.topologyId)
return InequalVersionComparisonResult.AFTER;
if (topologyId < otherVersion.topologyId)
return InequalVersionComparisonResult.BEFORE;
if (version > otherVersion.version)
return InequalVersionComparisonResult.AFTER;
if (version < otherVersion.version)
return InequalVersionComparisonResult.BEFORE;
return InequalVersionComparisonResult.EQUAL;
} else {
throw new IllegalArgumentException("I only know how to deal with SimpleClusteredVersions, not " + other.getClass().getName());
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SimpleClusteredVersion that = (SimpleClusteredVersion) o;
return topologyId == that.topologyId &&
version == that.version;
}
@Override
public int hashCode() {
return Objects.hash(topologyId, version);
}
@Override
public String toString() {
return "SimpleClusteredVersion{" +
"topologyId=" + topologyId +
", version=" + version +
'}';
}
public static class Externalizer extends AbstractExternalizer<SimpleClusteredVersion> {
@Override
public void writeObject(ObjectOutput output, SimpleClusteredVersion ch) throws IOException {
output.writeInt(ch.topologyId);
output.writeLong(ch.version);
}
@Override
public SimpleClusteredVersion readObject(ObjectInput unmarshaller) throws IOException, ClassNotFoundException {
int topologyId = unmarshaller.readInt();
long version = unmarshaller.readLong();
return new SimpleClusteredVersion(topologyId, version);
}
@Override
public Integer getId() {
return Ids.SIMPLE_CLUSTERED_VERSION;
}
@Override
public Set<Class<? extends SimpleClusteredVersion>> getTypeClasses() {
return Collections.singleton(SimpleClusteredVersion.class);
}
}
}
| 3,670
| 29.591667
| 135
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/NumericVersion.java
|
package org.infinispan.container.versioning;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.marshall.core.Ids;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
/**
* Numeric version
*
* @author Galder Zamarreño
* @since 5.3
*/
@ProtoTypeId(ProtoStreamTypeIds.NUMERIC_VERSION)
public class NumericVersion implements IncrementableEntryVersion {
private final long version;
@ProtoFactory
public NumericVersion(long version) {
this.version = version;
}
@ProtoField(number = 1, defaultValue = "-1")
public long getVersion() {
return version;
}
@Override
public InequalVersionComparisonResult compareTo(EntryVersion other) {
if (other instanceof NumericVersion) {
NumericVersion otherVersion = (NumericVersion) other;
if (version < otherVersion.version)
return InequalVersionComparisonResult.BEFORE;
else if (version > otherVersion.version)
return InequalVersionComparisonResult.AFTER;
else
return InequalVersionComparisonResult.EQUAL;
}
throw new IllegalArgumentException(
"Unable to compare other types: " + other.getClass().getName());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
NumericVersion that = (NumericVersion) o;
return version == that.version;
}
@Override
public int hashCode() {
return (int) (version ^ (version >>> 32));
}
@Override
public String toString() {
return "NumericVersion{" +
"version=" + version +
'}';
}
public static class Externalizer extends AbstractExternalizer<NumericVersion> {
@Override
public Set<Class<? extends NumericVersion>> getTypeClasses() {
return Collections.singleton(NumericVersion.class);
}
@Override
public void writeObject(ObjectOutput output, NumericVersion object) throws IOException {
output.writeLong(object.version);
}
@Override
public NumericVersion readObject(ObjectInput input) throws IOException {
return new NumericVersion(input.readLong());
}
@Override
public Integer getId() {
return Ids.NUMERIC_VERSION;
}
}
}
| 2,684
| 26.397959
| 94
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/IracEntryVersion.java
|
package org.infinispan.container.versioning.irac;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.BiConsumer;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.container.versioning.InequalVersionComparisonResult;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.infinispan.util.ByteString;
import org.infinispan.xsite.XSiteNamedCache;
/**
* An entry version for the IRAC algorithm (async cross site replication).
* <p>
* It is represented as a vector clock where each site keeps it version.
* <p>
* The site version is composed as a pair (topology id, version).
*
* @author Pedro Ruivo
* @see TopologyIracVersion
* @since 11.0
*/
@ProtoTypeId(ProtoStreamTypeIds.IRAC_VERSION)
public class IracEntryVersion {
private final MapEntry[] vectorClock;
private IracEntryVersion(MapEntry[] vectorClock) {
this.vectorClock = vectorClock;
}
public static IracEntryVersion newVersion(ByteString site, TopologyIracVersion version) {
return new IracEntryVersion(new MapEntry[] {new MapEntry(site, version)});
}
@ProtoFactory
static IracEntryVersion protoFactory(List<MapEntry> entries) {
MapEntry[] vc = entries.toArray(new MapEntry[entries.size()]);
Arrays.sort(vc);
return new IracEntryVersion(vc);
}
@ProtoField(number = 1, collectionImplementation = ArrayList.class)
List<MapEntry> entries() {
return Arrays.asList(vectorClock);
}
/**
* Iterates over all entries of this version as pairs (site name, site version).
*
* @param consumer The {@link BiConsumer}.
*/
public void forEach(BiConsumer<ByteString, TopologyIracVersion> consumer) {
for (MapEntry entry : vectorClock) {
consumer.accept(entry.site, entry.version);
}
}
/**
* Compares this instance with another {@link IracEntryVersion} instance.
* @param other The other {@link IracEntryVersion} instance.
* @return A {@link InequalVersionComparisonResult} instance with the compare result.
*/
public InequalVersionComparisonResult compareTo(IracEntryVersion other) {
VectorClockComparator comparator = new VectorClockComparator(Math.max(vectorClock.length, other.vectorClock.length));
forEach(comparator::setOurs);
other.forEach(comparator::setTheirs);
Merger merger = Merger.NONE;
for (VersionCompare v : comparator.values()) {
merger = merger.accept(v);
}
return merger.result();
}
public IracEntryVersion merge(IracEntryVersion other) {
if (other == null) {
return this;
}
TreeMap<ByteString, TopologyIracVersion> copy = toTreeMap(vectorClock);
for (MapEntry entry : other.vectorClock) {
copy.merge(entry.site, entry.version, TopologyIracVersion::max);
}
return new IracEntryVersion(toMapEntryArray(copy));
}
public TopologyIracVersion getVersion(ByteString siteName) {
int index = Arrays.binarySearch(vectorClock, searchKey(siteName));
return index >= 0 ? vectorClock[index].version : null;
}
public int getTopology(ByteString siteName) {
TopologyIracVersion version = getVersion(siteName);
return version == null ? 0 : version.getTopologyId();
}
public IracEntryVersion increment(ByteString siteName, int topologyId) {
TreeMap<ByteString, TopologyIracVersion> map = toTreeMap(vectorClock);
TopologyIracVersion existing = map.get(siteName);
if (existing == null) {
map.put(siteName, TopologyIracVersion.newVersion(topologyId));
} else {
map.put(siteName, existing.increment(topologyId));
}
return new IracEntryVersion(toMapEntryArray(map));
}
@Override
public String toString() {
List<String> entries = new LinkedList<>();
forEach((site, version) -> entries.add(site + "=" + version));
return "(" + String.join(", ", entries) + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
IracEntryVersion other = (IracEntryVersion) o;
return Arrays.equals(vectorClock, other.vectorClock);
}
@Override
public int hashCode() {
return Arrays.hashCode(vectorClock);
}
private static MapEntry[] toMapEntryArray(TreeMap<ByteString, TopologyIracVersion> map) {
int length = map.size();
MapEntry[] entries = new MapEntry[length];
int index = 0;
for (Map.Entry<ByteString, TopologyIracVersion> e : map.entrySet()) {
entries[index++] = new MapEntry(e.getKey(), e.getValue());
}
return entries;
}
private static TreeMap<ByteString, TopologyIracVersion> toTreeMap(MapEntry[] entries) {
TreeMap<ByteString, TopologyIracVersion> copy = new TreeMap<>();
for (MapEntry entry : entries) {
copy.put(entry.site, entry.version);
}
return copy;
}
private static MapEntry searchKey(ByteString site) {
return new MapEntry(site, null);
}
private enum Merger {
NONE {
@Override
Merger accept(VersionCompare versions) {
int compare = versions.ours.compareTo(versions.theirs);
if (compare < 0) {
return OLD;
} else if (compare > 0) {
return NEW;
}
return EQUALS;
}
@Override
InequalVersionComparisonResult result() {
throw new IllegalStateException();
}
},
OLD {
@Override
Merger accept(VersionCompare versions) {
int compare = versions.ours.compareTo(versions.theirs);
if (compare < 0) {
return OLD;
} else if (compare > 0) {
return CONFLICT;
}
return OLD_OR_EQUALS;
}
@Override
InequalVersionComparisonResult result() {
return InequalVersionComparisonResult.BEFORE;
}
},
OLD_OR_EQUALS {
@Override
Merger accept(VersionCompare versions) {
int compare = versions.ours.compareTo(versions.theirs);
return compare <= 0 ? OLD_OR_EQUALS : CONFLICT;
}
@Override
InequalVersionComparisonResult result() {
return InequalVersionComparisonResult.BEFORE;
}
},
NEW {
@Override
Merger accept(VersionCompare versions) {
int compare = versions.ours.compareTo(versions.theirs);
if (compare > 0) {
return NEW;
} else if (compare < 0) {
return CONFLICT;
}
return NEW_OR_EQUALS;
}
@Override
InequalVersionComparisonResult result() {
return InequalVersionComparisonResult.AFTER;
}
},
NEW_OR_EQUALS {
@Override
Merger accept(VersionCompare versions) {
int compare = versions.ours.compareTo(versions.theirs);
return compare < 0 ? CONFLICT : NEW_OR_EQUALS;
}
@Override
InequalVersionComparisonResult result() {
return InequalVersionComparisonResult.AFTER;
}
},
EQUALS {
@Override
Merger accept(VersionCompare versions) {
int compare = versions.ours.compareTo(versions.theirs);
if (compare < 0) {
return OLD_OR_EQUALS;
} else if (compare > 0) {
return NEW_OR_EQUALS;
}
return EQUALS;
}
@Override
InequalVersionComparisonResult result() {
return InequalVersionComparisonResult.EQUAL;
}
},
CONFLICT {
@Override
Merger accept(VersionCompare versions) {
//no-op
return CONFLICT;
}
@Override
InequalVersionComparisonResult result() {
return InequalVersionComparisonResult.CONFLICTING;
}
};
abstract Merger accept(VersionCompare versions);
abstract InequalVersionComparisonResult result();
}
@ProtoTypeId(ProtoStreamTypeIds.IRAC_VERSION_ENTRY)
public static class MapEntry implements Comparable<MapEntry> {
final ByteString site;
@ProtoField(2)
final TopologyIracVersion version;
@ProtoFactory
MapEntry(String site, TopologyIracVersion version) {
this(XSiteNamedCache.cachedByteString(site), version);
}
MapEntry(ByteString site, TopologyIracVersion version) {
this.site = site;
this.version = version;
}
@ProtoField(1)
public String getSite() {
return site.toString();
}
@Override
public String toString() {
return "MapEntry{" +
"site='" + site + '\'' +
", version=" + version +
'}';
}
@Override
public int compareTo(MapEntry o) {
return site.compareTo(o.site);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MapEntry entry = (MapEntry) o;
return site.equals(entry.site) && version.equals(entry.version);
}
@Override
public int hashCode() {
int result = site.hashCode();
result = 31 * result + version.hashCode();
return result;
}
}
private static class VersionCompare {
TopologyIracVersion ours;
TopologyIracVersion theirs;
@Override
public String toString() {
return "VersionCompare{" +
"ours=" + ours +
", theirs=" + theirs +
'}';
}
}
private static class VectorClockComparator {
private final Map<ByteString, VersionCompare> vectorClock;
VectorClockComparator(int capacity) {
vectorClock = new HashMap<>(capacity);
}
@Override
public String toString() {
return "VectorClock{" +
"vectorClock=" + vectorClock +
'}';
}
void setOurs(ByteString site, TopologyIracVersion version) {
VersionCompare v = vectorClock.get(site);
if (v == null) {
v = new VersionCompare();
vectorClock.put(site, v);
}
v.ours = version;
if (v.theirs == null) {
v.theirs = TopologyIracVersion.NO_VERSION;
}
}
void setTheirs(ByteString site, TopologyIracVersion version) {
VersionCompare v = vectorClock.get(site);
if (v == null) {
v = new VersionCompare();
vectorClock.put(site, v);
}
v.theirs = version;
if (v.ours == null) {
v.ours = TopologyIracVersion.NO_VERSION;
}
}
Collection<VersionCompare> values() {
return vectorClock.values();
}
}
}
| 11,368
| 28.453368
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/DefaultIracTombstoneManager.java
|
package org.infinispan.container.versioning.irac;
import static org.infinispan.commons.util.concurrent.CompletableFutures.completedNull;
import static org.infinispan.remoting.transport.impl.VoidResponseCollector.ignoreLeavers;
import static org.infinispan.remoting.transport.impl.VoidResponseCollector.validOnly;
import java.lang.invoke.MethodHandles;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.irac.IracTombstoneCleanupCommand;
import org.infinispan.commands.irac.IracTombstonePrimaryCheckCommand;
import org.infinispan.commands.irac.IracTombstoneRemoteSiteCheckCommand;
import org.infinispan.commands.irac.IracTombstoneStateResponseCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.BackupConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.XSiteStateTransferConfiguration;
import org.infinispan.distribution.DistributionInfo;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.reactive.RxJavaInterop;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.irac.IracExecutor;
import org.infinispan.xsite.irac.IracManager;
import org.infinispan.xsite.irac.IracXSiteBackup;
import org.infinispan.xsite.status.SiteState;
import org.infinispan.xsite.status.TakeOfflineManager;
import io.reactivex.rxjava3.annotations.NonNull;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.CompletableObserver;
import io.reactivex.rxjava3.core.CompletableSource;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.disposables.Disposable;
import io.reactivex.rxjava3.functions.Predicate;
import net.jcip.annotations.GuardedBy;
/**
* A default implementation for {@link IracTombstoneManager}.
* <p>
* This class is responsible to keep track of the tombstones for the IRAC algorithm. Tombstones are used when a key is
* removed but its metadata is necessary to detect possible conflicts in this and remote sites. When all sites have
* updated the key, the tombstone can be removed.
* <p>
* Tombstones are removed periodically in the background.
*
* @since 14.0
*/
@Scope(Scopes.NAMED_CACHE)
public class DefaultIracTombstoneManager implements IracTombstoneManager {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private static final int ACTION_COUNT = Action.values().length;
private static final BiFunction<Set<Address>, List<Address>, Set<Address>> ADD_ALL_TO_SET = (set, list) -> {
set.addAll(list);
return set;
};
private static final BinaryOperator<Set<Address>> MERGE_SETS = (set, set2) -> {
set.addAll(set2);
return set;
};
private static final BiConsumer<Void, Throwable> TRACE_ROUND_COMPLETED = (__, throwable) -> {
if (throwable != null) {
log.trace("[IRAC] Tombstone cleanup round failed!", throwable);
} else {
log.trace("[IRAC] Tombstone cleanup round finished!");
}
};
@Inject DistributionManager distributionManager;
@Inject RpcManager rpcManager;
@Inject CommandsFactory commandsFactory;
@Inject TakeOfflineManager takeOfflineManager;
@Inject ComponentRef<IracManager> iracManager;
@ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR)
@Inject ScheduledExecutorService scheduledExecutorService;
@Inject BlockingManager blockingManager;
private final Map<Object, IracTombstoneInfo> tombstoneMap;
private final IracExecutor iracExecutor;
private final Collection<IracXSiteBackup> asyncBackups;
private final Scheduler scheduler;
private volatile boolean stopped = true;
private final int batchSize;
private final int segmentCount;
public DefaultIracTombstoneManager(Configuration config, Collection<IracXSiteBackup> backups) {
iracExecutor = new IracExecutor(this::performCleanup);
asyncBackups = backups;
tombstoneMap = new ConcurrentHashMap<>(config.sites().tombstoneMapSize());
scheduler = new Scheduler(config.sites().tombstoneMapSize(), config.sites().maxTombstoneCleanupDelay());
batchSize = config.sites().asyncBackupsStream()
.map(BackupConfiguration::stateTransfer)
.map(XSiteStateTransferConfiguration::chunkSize)
.reduce(1, Integer::max);
segmentCount = config.clustering().hash().numSegments();
}
@Start
public void start() {
iracExecutor.setExecutor(blockingManager.asExecutor(commandsFactory.getCacheName() + "-tombstone-cleanup"));
stopped = false;
scheduler.disabled = false;
scheduler.scheduleWithCurrentDelay();
}
@Stop
public void stop() {
stopped = true;
stopCleanupTask();
// drop everything
tombstoneMap.clear();
}
// for testing purposes only!
public void stopCleanupTask() {
scheduler.disable();
}
public void storeTombstone(int segment, Object key, IracMetadata metadata) {
IracTombstoneInfo tombstone = new IracTombstoneInfo(key, segment, metadata);
tombstoneMap.put(key, tombstone);
if (log.isTraceEnabled()) {
log.tracef("[IRAC] Tombstone stored: %s", tombstone);
}
}
@Override
public void storeTombstoneIfAbsent(IracTombstoneInfo tombstone) {
if (tombstone == null) {
return;
}
boolean added = tombstoneMap.putIfAbsent(tombstone.getKey(), tombstone) == null;
if (log.isTraceEnabled()) {
log.tracef("[IRAC] Tombstone stored? %s. %s", added, tombstone);
}
}
@Override
public IracMetadata getTombstone(Object key) {
IracTombstoneInfo tombstone = tombstoneMap.get(key);
return tombstone == null ? null : tombstone.getMetadata();
}
@Override
public void removeTombstone(IracTombstoneInfo tombstone) {
if (tombstone == null) {
return;
}
boolean removed = tombstoneMap.remove(tombstone.getKey(), tombstone);
if (log.isTraceEnabled()) {
log.tracef("[IRAC] Tombstone removed? %s. %s", removed, tombstone);
}
}
@Override
public void removeTombstone(Object key) {
IracTombstoneInfo tombstone = tombstoneMap.remove(key);
if (tombstone != null && log.isTraceEnabled()) {
log.tracef("[IRAC] Tombstone removed %s", tombstone);
}
}
@Override
public boolean isEmpty() {
return tombstoneMap.isEmpty();
}
@Override
public int size() {
return tombstoneMap.size();
}
@Override
public boolean isTaskRunning() {
return scheduler.running;
}
@Override
public long getCurrentDelayMillis() {
return scheduler.currentDelayMillis;
}
@Override
public void sendStateTo(Address requestor, IntSet segments) {
StateTransferHelper helper = new StateTransferHelper(requestor, segments);
Flowable.fromIterable(tombstoneMap.values())
.filter(helper)
.buffer(batchSize)
.concatMapCompletableDelayError(helper)
.subscribe(helper);
}
@Override
public void checkStaleTombstone(Collection<? extends IracTombstoneInfo> tombstones) {
boolean trace = log.isTraceEnabled();
if (trace) {
log.tracef("[IRAC] Checking for stale tombstones from backup owner. %s", tombstones);
}
LocalizedCacheTopology topology = distributionManager.getCacheTopology();
IntSet segments = IntSets.mutableEmptySet(segmentCount);
IracTombstoneCleanupCommand cmd = commandsFactory.buildIracTombstoneCleanupCommand(tombstones.size());
for (IracTombstoneInfo tombstone : tombstones) {
IracTombstoneInfo data = tombstoneMap.get(tombstone.getKey());
if (!topology.getSegmentDistribution(tombstone.getSegment()).isPrimary() || (tombstone.equals(data))) {
// not a primary owner or the data is the same (i.e. it is valid)
continue;
}
segments.add(tombstone.getSegment());
cmd.add(tombstone);
}
if (cmd.isEmpty()) {
if (trace) {
log.trace("[IRAC] Nothing to send.");
}
return;
}
int membersSize = distributionManager.getCacheTopology().getMembers().size();
Collection<Address> owners = segments.intStream()
.mapToObj(segment -> getSegmentDistribution(segment).writeOwners())
.reduce(new HashSet<>(membersSize), ADD_ALL_TO_SET, MERGE_SETS);
if (trace) {
log.tracef("[IRAC] Cleaning up %d tombstones: %s", cmd.getTombstonesToRemove().size(), cmd.getTombstonesToRemove());
}
rpcManager.sendToMany(owners, cmd, DeliverOrder.NONE);
}
// Testing purposes
public void startCleanupTombstone() {
iracExecutor.run();
}
// Testing purposes
public void runCleanupAndWait() {
performCleanup().toCompletableFuture().join();
}
// Testing purposes
public boolean contains(IracTombstoneInfo tombstone) {
return tombstone.equals(tombstoneMap.get(tombstone.getKey()));
}
private CompletionStage<Void> performCleanup() {
if (stopped) {
return CompletableFutures.completedNull();
}
boolean trace = log.isTraceEnabled();
if (trace) {
log.trace("[IRAC] Starting tombstone cleanup round.");
}
scheduler.onTaskStarted(tombstoneMap.size());
CompletionStage<Void> stage = Flowable.fromIterable(tombstoneMap.values())
.groupBy(this::classifyTombstone)
// We are using flatMap to allow for actions to be done in parallel, but the requests in each action
// must be performed sequentially
.flatMap(group -> {
switch (group.getKey()) {
case REMOVE_TOMBSTONE:
return removeAllTombstones(group);
case NOTIFY_PRIMARY_OWNER:
return notifyPrimaryOwner(group);
case CHECK_REMOTE_SITE:
return checkRemoteSite(group);
case KEEP_TOMBSTONE:
default:
return Flowable.empty();
}
}, true, ACTION_COUNT, ACTION_COUNT)
.lastStage(null);
if (trace) {
stage = stage.whenComplete(TRACE_ROUND_COMPLETED);
}
return stage.whenComplete(scheduler);
}
private DistributionInfo getSegmentDistribution(int segment) {
return distributionManager.getCacheTopology().getSegmentDistribution(segment);
}
private Flowable<Void> removeAllTombstones(Flowable<IracTombstoneInfo> flowable) {
return flowable.concatMapDelayError(tombstone -> {
try {
removeTombstone(tombstone);
return Flowable.empty();
} catch (Throwable t) {
return Flowable.error(t);
}
});
}
private Flowable<Void> notifyPrimaryOwner(Flowable<IracTombstoneInfo> flowable) {
return flowable.groupBy(IracTombstoneInfo::getSegment)
// With groupBy need to subscribe to all segments eagerly, but we only want to process sequentially,
// so we are using concatMapEager
.concatMapEagerDelayError(segment -> segment.buffer(batchSize).concatMapDelayError(
tombstones -> new PrimaryOwnerCheckTask(segment.getKey(), tombstones).check()),
true, segmentCount, segmentCount);
}
private Flowable<Void> checkRemoteSite(Flowable<IracTombstoneInfo> flowable) {
return flowable.buffer(batchSize)
.concatMapDelayError(tombstoneMap -> new CleanupTask(tombstoneMap).check());
}
private Action classifyTombstone(IracTombstoneInfo tombstone) {
DistributionInfo info = getSegmentDistribution(tombstone.getSegment());
if (!info.isWriteOwner() && !info.isReadOwner()) {
// not an owner, remove tombstone from local map
return Action.REMOVE_TOMBSTONE;
} else if (!info.isPrimary()) {
// backup owner, notify primary owner to check if the tombstone can be cleanup
return iracManager.running().containsKey(tombstone.getKey()) ? Action.KEEP_TOMBSTONE : Action.NOTIFY_PRIMARY_OWNER;
} else {
// primary owner, check all remote sites.
return iracManager.running().containsKey(tombstone.getKey()) ? Action.KEEP_TOMBSTONE : Action.CHECK_REMOTE_SITE;
}
}
private final class CleanupTask implements Function<Void, CompletionStage<Void>>, Runnable {
private final Collection<IracTombstoneInfo> tombstoneToCheck;
private final IntSet tombstoneToKeep;
private final int id;
private volatile boolean failedToCheck;
private CleanupTask(Collection<IracTombstoneInfo> tombstoneToCheck) {
this.tombstoneToCheck = tombstoneToCheck;
tombstoneToKeep = IntSets.concurrentSet(tombstoneToCheck.size());
failedToCheck = false;
id = tombstoneToCheck.hashCode();
}
Flowable<Void> check() {
if (log.isTraceEnabled()) {
log.tracef("[cleanup-task-%d] Running cleanup task with %s tombstones to check", id, tombstoneToCheck.size());
}
if (tombstoneToCheck.isEmpty()) {
return Flowable.empty();
}
List<Object> keys = tombstoneToCheck.stream()
.map(IracTombstoneInfo::getKey)
.collect(Collectors.toList());
IracTombstoneRemoteSiteCheckCommand cmd = commandsFactory.buildIracTombstoneRemoteSiteCheckCommand(keys);
// if one of the site return true (i.e. the key is in updateKeys map, then do not remove it)
AggregateCompletionStage<Void> stage = CompletionStages.aggregateCompletionStage();
for (IracXSiteBackup backup : asyncBackups) {
if (takeOfflineManager.getSiteState(backup.getSiteName()) == SiteState.OFFLINE) {
continue; // backup is offline
}
// we don't need the tombstone to query the remote site
stage.dependsOn(rpcManager.invokeXSite(backup, cmd).thenAccept(this::mergeIntSet));
}
// in case of exception, keep the tombstone
return RxJavaInterop.voidCompletionStageToFlowable(blockingManager.thenComposeBlocking(
stage.freeze().exceptionally(this::onException), this, "tombstone-response"));
}
private void mergeIntSet(IntSet rsp) {
if (log.isTraceEnabled()) {
log.tracef("[cleanup-task-%d] Received response: %s", id, rsp);
}
tombstoneToKeep.addAll(rsp);
}
private Void onException(Throwable ignored) {
if (log.isTraceEnabled()) {
log.tracef(ignored, "[cleanup-task-%d] Received exception", id);
}
failedToCheck = true;
return null;
}
@Override
public CompletionStage<Void> apply(Void aVoid) {
IracTombstoneCleanupCommand cmd = commandsFactory.buildIracTombstoneCleanupCommand(tombstoneToCheck.size());
forEachTombstoneToRemove(cmd::add);
if (log.isTraceEnabled()) {
log.tracef("[cleanup-task-%d] Removing %d tombstones.", id, cmd.getTombstonesToRemove().size());
}
if (cmd.isEmpty()) {
// nothing to remove
return completedNull();
}
int membersSize = distributionManager.getCacheTopology().getMembers().size();
Collection<Address> owners = tombstoneToCheck.stream()
.mapToInt(IracTombstoneInfo::getSegment)
.distinct()
.mapToObj(segment -> getSegmentDistribution(segment).writeOwners())
.reduce(new HashSet<>(membersSize), ADD_ALL_TO_SET, MERGE_SETS);
// send cleanup to all write owner
return rpcManager.invokeCommand(owners, cmd, validOnly(), rpcManager.getSyncRpcOptions()).thenRun(this);
}
@Override
public void run() {
forEachTombstoneToRemove(DefaultIracTombstoneManager.this::removeTombstone);
}
void forEachTombstoneToRemove(Consumer<IracTombstoneInfo> consumer) {
if (failedToCheck) {
return;
}
int index = 0;
for (IracTombstoneInfo tombstone : tombstoneToCheck) {
if (tombstoneToKeep.contains(index++)) {
continue;
}
consumer.accept(tombstone);
}
}
}
private class StateTransferHelper implements Predicate<IracTombstoneInfo>,
io.reactivex.rxjava3.functions.Function<Collection<IracTombstoneInfo>, CompletableSource>,
CompletableObserver {
private final Address requestor;
private final IntSet segments;
private StateTransferHelper(Address requestor, IntSet segments) {
this.requestor = requestor;
this.segments = segments;
}
@Override
public boolean test(IracTombstoneInfo tombstone) {
return segments.contains(tombstone.getSegment());
}
@Override
public CompletableSource apply(Collection<IracTombstoneInfo> state) {
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
IracTombstoneStateResponseCommand cmd = commandsFactory.buildIracTombstoneStateResponseCommand(state);
CompletionStage<Void> rsp = rpcManager.invokeCommand(requestor, cmd, ignoreLeavers(), rpcOptions);
return Completable.fromCompletionStage(rsp);
}
@Override
public void onSubscribe(@NonNull Disposable d) {
//no-op
}
@Override
public void onComplete() {
if (log.isDebugEnabled()) {
log.debugf("Tombstones transferred to %s for segments %s", requestor, segments);
}
}
@Override
public void onError(@NonNull Throwable e) {
log.failedToTransferTombstones(requestor, segments, e);
}
}
private final class Scheduler implements BiConsumer<Void, Throwable> {
final int targetSize;
final long maxDelayMillis;
int preCleanupSize;
int previousPostCleanupSize;
long currentDelayMillis;
volatile boolean running;
volatile boolean disabled;
@GuardedBy("this")
ScheduledFuture<?> future;
private Scheduler(int targetSize, long maxDelayMillis) {
this.targetSize = targetSize;
this.maxDelayMillis = maxDelayMillis;
currentDelayMillis = maxDelayMillis / 2;
}
void onTaskStarted(int size) {
running = true;
preCleanupSize = size;
}
void onTaskCompleted(int postCleanupSize) {
if (postCleanupSize >= targetSize) {
// The tombstones map is already at or above the target size, start a new cleanup round immediately
// Keep the delay >= 1 to simplify the tombstoneCreationRate calculation
currentDelayMillis = 1;
} else {
// Estimate how long it would take for the tombstones map to reach the target size
double tombstoneCreationRate = (preCleanupSize - previousPostCleanupSize) * 1.0 / currentDelayMillis;
double estimationMillis;
if (tombstoneCreationRate <= 0) {
// The tombstone map will never reach the target size, use the maximum delay
estimationMillis = maxDelayMillis;
} else {
// Ensure that 1 <= estimation <= maxDelayMillis
estimationMillis = Math.min((targetSize - postCleanupSize) / tombstoneCreationRate + 1, maxDelayMillis);
}
// Use a geometric average between the current estimation and the previous one
// to dampen the changes as the rate changes from one interval to the next
// (especially when the interval duration is very short)
currentDelayMillis = Math.round(Math.sqrt(currentDelayMillis * estimationMillis));
}
previousPostCleanupSize = postCleanupSize;
scheduleWithCurrentDelay();
}
synchronized void scheduleWithCurrentDelay() {
running = false;
if (stopped || disabled) {
return;
}
if (future != null) {
future.cancel(true);
}
future = scheduledExecutorService.schedule(iracExecutor, currentDelayMillis, TimeUnit.MILLISECONDS);
}
synchronized void disable() {
disabled = true;
if (future != null) {
future.cancel(true);
future = null;
}
}
@Override
public void accept(Void unused, Throwable throwable) {
// invoked after the cleanup round
onTaskCompleted(tombstoneMap.size());
}
}
private class PrimaryOwnerCheckTask {
private final int segment;
private final Collection<IracTombstoneInfo> tombstones;
private PrimaryOwnerCheckTask(int segment, Collection<IracTombstoneInfo> tombstones) {
this.segment = segment;
this.tombstones = tombstones;
assert consistencyCheck();
}
Flowable<Void> check() {
if (tombstones.isEmpty()) {
return Flowable.empty();
}
IracTombstonePrimaryCheckCommand cmd = commandsFactory.buildIracTombstonePrimaryCheckCommand(tombstones);
RpcOptions rpcOptions = rpcManager.getSyncRpcOptions();
CompletionStage<Void> rsp = rpcManager.invokeCommand(getSegmentDistribution(segment).primary(), cmd, ignoreLeavers(), rpcOptions);
return RxJavaInterop.voidCompletionStageToFlowable(rsp);
}
private boolean consistencyCheck() {
return tombstones.stream().allMatch(tombstoneInfo -> tombstoneInfo.getSegment() == segment);
}
}
private enum Action {
KEEP_TOMBSTONE,
REMOVE_TOMBSTONE,
CHECK_REMOTE_SITE,
NOTIFY_PRIMARY_OWNER
}
}
| 23,427
| 37.852405
| 139
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/NoOpIracTombstoneManager.java
|
package org.infinispan.container.versioning.irac;
import java.util.Collection;
import org.infinispan.commons.util.IntSet;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.remoting.transport.Address;
/**
* No-op implementation for {@link IracTombstoneManager}.
* <p>
* It is used when IRAC is not enabled.
*
* @since 14.0
*/
public final class NoOpIracTombstoneManager implements IracTombstoneManager {
private static final NoOpIracTombstoneManager INSTANCE = new NoOpIracTombstoneManager();
private NoOpIracTombstoneManager() {
}
public static NoOpIracTombstoneManager getInstance() {
return INSTANCE;
}
@Override
public void storeTombstone(int segment, Object key, IracMetadata metadata) {
//no-op
}
@Override
public void storeTombstoneIfAbsent(IracTombstoneInfo tombstone) {
//no-op
}
@Override
public void removeTombstone(IracTombstoneInfo tombstone) {
//no-op
}
@Override
public void removeTombstone(Object key) {
//no-op
}
@Override
public IracMetadata getTombstone(Object key) {
return null;
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public int size() {
return 0;
}
@Override
public boolean isTaskRunning() {
return false;
}
@Override
public long getCurrentDelayMillis() {
return 0;
}
@Override
public void sendStateTo(Address requestor, IntSet segments) {
//no-op
}
@Override
public void checkStaleTombstone(Collection<? extends IracTombstoneInfo> tombstones) {
//no-op
}
}
| 1,635
| 18.95122
| 91
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/IracVersionGenerator.java
|
package org.infinispan.container.versioning.irac;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.topology.CacheTopology;
/**
* A version generator for the IRAC protocol.
* <p>
* The version is segment based and the new version is also after than the previous one.
*
* @author Pedro Ruivo
* @since 11.0
*/
public interface IracVersionGenerator extends Lifecycle {
/**
* Generates a new {@link IracMetadata} for a given {@code segment}.
* <p>
* The {@link IracEntryVersion} created is always higher than the previous one for the same {@code segment}.
*
* @param segment The segment.
* @return The {@link IracMetadata} created.
*/
IracMetadata generateNewMetadata(int segment);
/**
* Generate a new {@link IracMetadata} for a given {@code segment}.
* <p>
* The {@link IracEntryVersion} created will be the same as the previous one for the same {@code segment}. If there
* was no version prior then it will create an initial version.
*
* @param segment The segment.
* @return The {@link IracMetadata} created.
*/
IracMetadata generateMetadataWithCurrentVersion(int segment);
/**
* Same as {@link #generateNewMetadata(int)} but it makes sure the new version is higher than {@code versionSeen}.
*
* @param segment The segment.
* @param versionSeen The {@link IracEntryVersion} seen before. Can be {@code null}.
* @return The {@link IracMetadata} created.
*/
IracMetadata generateNewMetadata(int segment, IracEntryVersion versionSeen);
/**
* Updates the version for the {@code segment} with a new {@code remoteVersion} seen.
* <p>
* This method should merge both the current version internally stored and the {@code remoteVersion} to achieve an
* {@link IracEntryVersion} higher than both.
*
* @param segment The segment.
* @param remoteVersion The remote {@link IracEntryVersion} received.
*/
void updateVersion(int segment, IracEntryVersion remoteVersion);
/**
* Invoked when a topology change occurs in the cluster.
*
* @param newTopology The new {@link CacheTopology}
*/
void onTopologyChange(CacheTopology newTopology);
}
| 2,276
| 34.030769
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/TopologyIracVersion.java
|
package org.infinispan.container.versioning.irac;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.infinispan.protostream.descriptors.Type;
/**
* The version stored per {@link CacheEntry} for IRAC.
* <p>
* It is composed by the topology Id and a version. The topology Id is increment when the topology changes and the
* version on each update.
*
* @author Pedro Ruivo
* @since 11.0
*/
@ProtoTypeId(ProtoStreamTypeIds.IRAC_SITE_VERSION)
public class TopologyIracVersion implements Comparable<TopologyIracVersion> {
public static final TopologyIracVersion NO_VERSION = new TopologyIracVersion(0, 0);
private static final Pattern PARSE_PATTERN = Pattern.compile("\\((\\d+):(\\d+)\\)");
private final int topologyId;
private final long version;
private TopologyIracVersion(int topologyId, long version) {
this.topologyId = topologyId;
this.version = version;
}
@ProtoFactory
public static TopologyIracVersion create(int topologyId, long version) {
return topologyId == 0 && version == 0 ? NO_VERSION : new TopologyIracVersion(topologyId, version);
}
public static TopologyIracVersion newVersion(int currentTopologyId) {
return new TopologyIracVersion(currentTopologyId, 1);
}
public static TopologyIracVersion max(TopologyIracVersion v1, TopologyIracVersion v2) {
return v1.compareTo(v2) < 0 ? v2 : v1;
}
public static TopologyIracVersion fromString(String s) {
Matcher m = PARSE_PATTERN.matcher(s);
if (!m.find()) {
return null;
}
int topology = Integer.parseInt(m.group(1));
long version = Long.parseLong(m.group(2));
return new TopologyIracVersion(topology, version);
}
@ProtoField(number = 1, type = Type.UINT32, defaultValue = "0")
public int getTopologyId() {
return topologyId;
}
@ProtoField(number = 2, type = Type.UINT64, defaultValue = "0")
public long getVersion() {
return version;
}
public TopologyIracVersion increment(int currentTopologyId) {
return currentTopologyId > topologyId ?
TopologyIracVersion.newVersion(currentTopologyId) :
new TopologyIracVersion(topologyId, version + 1);
}
@Override
public int compareTo(TopologyIracVersion other) {
int topologyCompare = Integer.compare(topologyId, other.topologyId);
return topologyCompare == 0 ? Long.compare(version, other.version) : topologyCompare;
}
@Override
public String toString() {
return '(' + Integer.toString(topologyId) + ':' + version + ')';
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TopologyIracVersion that = (TopologyIracVersion) o;
return this.topologyId == that.topologyId &&
this.version == that.version;
}
@Override
public int hashCode() {
int result = topologyId;
result = 31 * result + (int) (version ^ (version >>> 32));
return result;
}
}
| 3,388
| 30.672897
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/IracTombstoneManager.java
|
package org.infinispan.container.versioning.irac;
import java.util.Collection;
import org.infinispan.commands.irac.IracTombstoneCleanupCommand;
import org.infinispan.commons.util.IntSet;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.remoting.transport.Address;
/**
* Stores and manages tombstones for removed keys.
* <p>
* It manages the tombstones for IRAC protocol. Tombstones are used when a key is removed but the version/metadata is
* required to perform conflict or duplicates detection.
* <p>
* Tombstones are removed when they are not required by any site or its value is updated (with a non-null value).
*
* @since 14.0
*/
public interface IracTombstoneManager {
/**
* Stores a tombstone for a removed key.
* <p>
* It overwrites any previous tombstone associated to the {@code key}.
*
* @param key The key.
* @param segment The key's segment.
* @param metadata The {@link IracMetadata}.
*/
void storeTombstone(int segment, Object key, IracMetadata metadata);
/**
* Same as {@link #storeTombstone(int, Object, IracMetadata)} but it doesn't overwrite an existing tombstone.
*
* @param key The key.
* @param segment The key's segment.
* @param metadata The {@link IracMetadata}.
*/
default void storeTombstoneIfAbsent(int segment, Object key, IracMetadata metadata) {
if (metadata == null) {
return;
}
storeTombstoneIfAbsent(new IracTombstoneInfo(key, segment, metadata));
}
/**
* Same as {@link #storeTombstoneIfAbsent(int, Object, IracMetadata)} but with a {@link IracTombstoneInfo} instance.
*
* @param tombstone The tombstone to store.
*/
void storeTombstoneIfAbsent(IracTombstoneInfo tombstone);
/**
* Removes the tombstone if it matches.
*
* @param tombstone The {@link IracTombstoneInfo}.
*/
void removeTombstone(IracTombstoneInfo tombstone);
/**
* Removes the tombstone for {@code key}.
*
* @param key The key.
*/
void removeTombstone(Object key);
/**
* Returns the tombstone associated to the {@code key} or {@code null} if it doesn't exist.
*
* @param key The key.
* @return The tombstone.
*/
IracMetadata getTombstone(Object key);
/**
* @return {@code true} if no tombstones are stored.
*/
boolean isEmpty();
/**
* @return the number of tombstones stored.
*/
int size();
/**
* @return {@code true} if the cleanup task is currently running.
*/
boolean isTaskRunning();
/**
* @return The current delay between cleanup task in milliseconds.
*/
long getCurrentDelayMillis();
/**
* Sends the tombstone belonging to the segments in {@code segment} to the {@code originator}
* <p>
* The sending is done asynchronously, and it does not wait for the sending to complete.
*
* @param requestor The requestor {@link Address}.
* @param segments The segments requested.
*/
void sendStateTo(Address requestor, IntSet segments);
/**
* It receives a {@link Collection} of {@link IracTombstoneInfo} and sends {@link IracTombstoneCleanupCommand} for
* the tombstone no longer valid.
*
* @param tombstones The {@link IracTombstoneInfo} collection.
*/
void checkStaleTombstone(Collection<? extends IracTombstoneInfo> tombstones);
}
| 3,397
| 28.807018
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/IracTombstoneInfo.java
|
package org.infinispan.container.versioning.irac;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Objects;
import org.infinispan.commons.util.Util;
import org.infinispan.metadata.impl.IracMetadata;
/**
* A data class to store the tombstone information for a key.
*
* @since 14.0
*/
public class IracTombstoneInfo {
private final Object key;
private final int segment;
private final IracMetadata metadata;
public IracTombstoneInfo(Object key, int segment, IracMetadata metadata) {
this.key = Objects.requireNonNull(key);
this.segment = segment;
this.metadata = Objects.requireNonNull(metadata);
}
public Object getKey() {
return key;
}
public int getSegment() {
return segment;
}
public IracMetadata getMetadata() {
return metadata;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IracTombstoneInfo that = (IracTombstoneInfo) o;
if (segment != that.segment) return false;
if (!key.equals(that.key)) return false;
return metadata.equals(that.metadata);
}
@Override
public int hashCode() {
int result = key.hashCode();
result = 31 * result + segment;
result = 31 * result + metadata.hashCode();
return result;
}
@Override
public String toString() {
return "IracTombstoneInfo{" +
"key=" + Util.toStr(key) +
", segment=" + segment +
", metadata=" + metadata +
'}';
}
public static void writeTo(ObjectOutput output, IracTombstoneInfo tombstone) throws IOException {
if (tombstone == null) {
output.writeObject(null);
return;
}
output.writeObject(tombstone.key);
output.writeInt(tombstone.segment);
IracMetadata.writeTo(output, tombstone.metadata);
}
public static IracTombstoneInfo readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
Object key = input.readObject();
return key == null ? null : new IracTombstoneInfo(key, input.readInt(), IracMetadata.readFrom(input));
}
}
| 2,229
| 25.547619
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/DefaultIracVersionGenerator.java
|
package org.infinispan.container.versioning.irac;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.irac.IracUpdateVersionCommand;
import org.infinispan.commons.util.Version;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.globalstate.GlobalStateManager;
import org.infinispan.globalstate.ScopedPersistentState;
import org.infinispan.globalstate.impl.GlobalStateManagerImpl;
import org.infinispan.globalstate.impl.ScopedPersistentStateImpl;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.topology.CacheTopology;
import org.infinispan.util.ByteString;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.XSiteNamedCache;
/**
* Default implementation of {@link IracVersionGenerator}.
*
* @author Pedro Ruivo
* @since 11.0
*/
@Scope(Scopes.NAMED_CACHE)
public class DefaultIracVersionGenerator implements IracVersionGenerator {
private static final Log log = LogFactory.getLog(DefaultIracVersionGenerator.class);
private static final Pattern PROPERTY_PATTERN = Pattern.compile("(\\d+)_(.*)$");
private static final AtomicIntegerFieldUpdater<DefaultIracVersionGenerator> TOPOLOGY_UPDATED = AtomicIntegerFieldUpdater
.newUpdater(DefaultIracVersionGenerator.class, "topologyId");
private final Map<Integer, IracEntryVersion> segmentVersion;
private final BiFunction<Integer, IracEntryVersion, IracEntryVersion> incrementAndGet = this::incrementAndGet;
private final Function<Integer, IracEntryVersion> createFunction = segment -> newVersion();
@Inject RpcManager rpcManager;
@Inject GlobalStateManager globalStateManager;
@Inject CommandsFactory commandsFactory;
private ByteString localSite;
private volatile int topologyId = 1;
public DefaultIracVersionGenerator(int numberOfSegments) {
segmentVersion = new ConcurrentHashMap<>(numberOfSegments);
}
@Start
@Override
public void start() {
rpcManager.getTransport().checkCrossSiteAvailable();
localSite = XSiteNamedCache.cachedByteString(rpcManager.getTransport().localSiteName());
globalStateManager.readScopedState(scope()).ifPresent(this::loadState);
}
@Stop
@Override
public void stop() {
globalStateManager.writeScopedState(writeState());
}
@Override
public IracMetadata generateNewMetadata(int segment) {
return new IracMetadata(localSite, segmentVersion.compute(segment, incrementAndGet));
}
@Override
public IracMetadata generateMetadataWithCurrentVersion(int segment) {
return new IracMetadata(localSite, segmentVersion.computeIfAbsent(segment, createFunction));
}
@Override
public IracMetadata generateNewMetadata(int segment, IracEntryVersion versionSeen) {
if (versionSeen == null) {
return generateNewMetadata(segment);
}
int vTopology = versionSeen.getTopology(localSite);
if (vTopology > topologyId) {
updateTopology(vTopology);
}
IracEntryVersion version = segmentVersion.compute(segment, (s, currentVersion) ->
currentVersion == null ?
versionSeen.increment(localSite, topologyId) :
currentVersion.merge(versionSeen).increment(localSite, topologyId));
return new IracMetadata(localSite, version);
}
@Override
public void updateVersion(int segment, IracEntryVersion remoteVersion) {
if (remoteVersion == null) {
return;
}
segmentVersion.merge(segment, remoteVersion, IracEntryVersion::merge);
updateTopology(remoteVersion.getTopology(localSite));
}
@Override
public void onTopologyChange(CacheTopology newTopology) {
TOPOLOGY_UPDATED.incrementAndGet(this);
if (newTopology.getPhase().isRebalance()) {
IracUpdateVersionCommand cmd = commandsFactory.buildIracUpdateVersionCommand(peek());
rpcManager.sendToAll(cmd, DeliverOrder.NONE);
}
}
public Map<Integer, IracEntryVersion> peek() {
// make a copy. onTopologyChange() uses this method and avoids marshalling problems
return new HashMap<>(segmentVersion);
}
private void updateTopology(int newTopology) {
int currentTopology = topologyId;
while (newTopology > currentTopology && !TOPOLOGY_UPDATED.compareAndSet(this, currentTopology, newTopology)) {
currentTopology = topologyId;
}
}
private IracEntryVersion newVersion() {
return IracEntryVersion.newVersion(localSite, TopologyIracVersion.newVersion(topologyId));
}
private IracEntryVersion incrementAndGet(int segment, IracEntryVersion currentVersion) {
return currentVersion == null ? newVersion() : currentVersion.increment(localSite, topologyId);
}
private String scope() {
return "___irac_version_" + commandsFactory.getCacheName();
}
private void loadState(ScopedPersistentState state) {
assert Version.getVersion().equals(state.getProperty(GlobalStateManagerImpl.VERSION));
state.forEach((segmentAndSite, versionString) -> {
Matcher result = PROPERTY_PATTERN.matcher(segmentAndSite);
if (!result.find()) {
//other data, @version and so on
return;
}
int segment = Integer.parseInt(result.group(1));
String site = result.group(2);
TopologyIracVersion v = TopologyIracVersion.fromString(versionString);
if (v == null) {
return;
}
IracEntryVersion partialVersion = IracEntryVersion.newVersion(XSiteNamedCache.cachedByteString(site), v);
segmentVersion.compute(segment, (seg, version) -> version == null ? partialVersion : version.merge(partialVersion));
});
if (log.isTraceEnabled()) {
log.tracef("Read state (%s entries): %s", segmentVersion.size(), segmentVersion);
}
}
private ScopedPersistentState writeState() {
if (log.isTraceEnabled()) {
log.tracef("Write state (%s entries): %s", segmentVersion.size(), segmentVersion);
}
ScopedPersistentStateImpl state = new ScopedPersistentStateImpl(scope());
state.setProperty(GlobalStateManagerImpl.VERSION, Version.getVersion());
segmentVersion.forEach((segment, version) -> {
String prefix = segment + "_";
version.forEach((site, v) -> state.setProperty(prefix + site, v.toString()));
});
return state;
}
}
| 7,053
| 38.629213
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/versioning/irac/NoOpIracVersionGenerator.java
|
package org.infinispan.container.versioning.irac;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.topology.CacheTopology;
/**
* A no-op implementation of {@link IracVersionGenerator} for caches without asynchronous remote site backups.
*
* @author Pedro Ruivo
* @since 11.0
*/
@Scope(Scopes.NAMED_CACHE)
public class NoOpIracVersionGenerator implements IracVersionGenerator {
private static final NoOpIracVersionGenerator INSTANCE = new NoOpIracVersionGenerator();
private NoOpIracVersionGenerator() {
}
public static NoOpIracVersionGenerator getInstance() {
return INSTANCE;
}
@Override
public IracMetadata generateNewMetadata(int segment) {
throw new IllegalStateException(); //if we don't have IRAC enabled, this shouldn't be invoked.
}
@Override
public IracMetadata generateMetadataWithCurrentVersion(int segment) {
throw new IllegalStateException(); //if we don't have IRAC enabled, this shouldn't be invoked.
}
@Override
public IracMetadata generateNewMetadata(int segment, IracEntryVersion versionSeen) {
throw new IllegalStateException(); //if we don't have IRAC enabled, this shouldn't be invoked.
}
@Override
public void updateVersion(int segment, IracEntryVersion remoteVersion) {
//no-op
}
@Override
public void onTopologyChange(CacheTopology newTopology) {
//no-op
}
@Override
public void start() {
//no-op
}
@Override
public void stop() {
//no-op
}
}
| 1,625
| 25.655738
| 110
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/PeekableTouchableContainerMap.java
|
package org.infinispan.container.impl;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.infinispan.commons.util.AbstractDelegatingConcurrentMap;
import org.infinispan.container.entries.InternalCacheEntry;
public class PeekableTouchableContainerMap<K, V> extends AbstractDelegatingConcurrentMap<K, InternalCacheEntry<K, V>>
implements PeekableTouchableMap<K, V> {
private final ConcurrentMap<K, InternalCacheEntry<K, V>> map;
public PeekableTouchableContainerMap() {
this(new ConcurrentHashMap<>());
}
public PeekableTouchableContainerMap(ConcurrentMap<K, InternalCacheEntry<K, V>> map) {
this.map = map;
}
@Override
protected ConcurrentMap<K, InternalCacheEntry<K, V>> delegate() {
return map;
}
@Override
public InternalCacheEntry<K, V> peek(Object key) {
return delegate().get(key);
}
@Override
public boolean touchKey(Object key, long currentTimeMillis) {
InternalCacheEntry<K, V> ice = peek(key);
if (ice != null) {
ice.touch(currentTimeMillis);
return true;
}
return false;
}
@Override
public void touchAll(long currentTimeMillis) {
for (InternalCacheEntry<K, V> ice : map.values()) {
ice.touch(currentTimeMillis);
}
}
}
| 1,331
| 26.75
| 117
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/package-info.java
|
/**
* Data containers which store cache entries. This package contains different implementations of
* containers based on their performance and ordering characteristics, as well as the entries
* that live in the containers.
* <p />
* This package also contains the factory for creating entries, and is typically used by the {@link LockingInterceptor}
* to wrap an entry and put it in a thread's {@link InvocationContext}
*/
package org.infinispan.container.impl;
| 471
| 46.2
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/InternalEntryFactoryImpl.java
|
package org.infinispan.container.impl;
import java.util.Map;
import org.infinispan.commons.time.TimeService;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.ImmortalCacheValue;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.entries.L1InternalCacheEntry;
import org.infinispan.container.entries.MortalCacheEntry;
import org.infinispan.container.entries.MortalCacheValue;
import org.infinispan.container.entries.TransientCacheEntry;
import org.infinispan.container.entries.TransientCacheValue;
import org.infinispan.container.entries.TransientMortalCacheEntry;
import org.infinispan.container.entries.TransientMortalCacheValue;
import org.infinispan.container.entries.metadata.L1MetadataInternalCacheEntry;
import org.infinispan.container.entries.metadata.MetadataImmortalCacheEntry;
import org.infinispan.container.entries.metadata.MetadataImmortalCacheValue;
import org.infinispan.container.entries.metadata.MetadataMortalCacheEntry;
import org.infinispan.container.entries.metadata.MetadataMortalCacheValue;
import org.infinispan.container.entries.metadata.MetadataTransientCacheEntry;
import org.infinispan.container.entries.metadata.MetadataTransientCacheValue;
import org.infinispan.container.entries.metadata.MetadataTransientMortalCacheEntry;
import org.infinispan.container.entries.metadata.MetadataTransientMortalCacheValue;
import org.infinispan.container.versioning.EntryVersion;
import org.infinispan.container.versioning.IncrementableEntryVersion;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* An implementation that generates non-versioned entries
*
* @author Manik Surtani
* @since 5.1
*/
@Scope(Scopes.NAMED_CACHE)
public class InternalEntryFactoryImpl implements InternalEntryFactory {
@Inject TimeService timeService;
@Override
public InternalCacheEntry create(Object key, Object value, Metadata metadata) {
long lifespan = metadata != null ? metadata.lifespan() : -1;
long maxIdle = metadata != null ? metadata.maxIdle() : -1;
if (!isStoreMetadata(metadata, null)) {
if (lifespan < 0 && maxIdle < 0) return new ImmortalCacheEntry(key, value);
if (lifespan > -1 && maxIdle < 0) return new MortalCacheEntry(key, value, lifespan, timeService.wallClockTime());
if (lifespan < 0 && maxIdle > -1) return new TransientCacheEntry(key, value, maxIdle, timeService.wallClockTime());
return new TransientMortalCacheEntry(key, value, maxIdle, lifespan, timeService.wallClockTime());
} else {
if (lifespan < 0 && maxIdle < 0) return new MetadataImmortalCacheEntry(key, value, metadata);
if (lifespan > -1 && maxIdle < 0) return new MetadataMortalCacheEntry(key, value, metadata, timeService.wallClockTime());
if (lifespan < 0 && maxIdle > -1) return new MetadataTransientCacheEntry(key, value, metadata, timeService.wallClockTime());
return new MetadataTransientMortalCacheEntry(key, value, metadata, timeService.wallClockTime());
}
}
@Override
public InternalCacheEntry create(CacheEntry cacheEntry) {
// -1 signals the timestamps should be ignored
if (cacheEntry.getCreated() == -1 && cacheEntry.getLastUsed() == -1) {
return create(cacheEntry.getKey(), cacheEntry.getValue(),
cacheEntry.getMetadata(), cacheEntry.getLifespan(), cacheEntry.getMaxIdle());
} else {
return create(cacheEntry.getKey(), cacheEntry.getValue(), cacheEntry.getMetadata(),
cacheEntry.getCreated(), cacheEntry.getLifespan(),
cacheEntry.getLastUsed(), cacheEntry.getMaxIdle());
}
}
@Override
public InternalCacheEntry create(Object key, Object value, InternalCacheEntry cacheEntry) {
return create(key, value, cacheEntry.getMetadata(), cacheEntry.getCreated(),
cacheEntry.getLifespan(), cacheEntry.getLastUsed(), cacheEntry.getMaxIdle());
}
@Override
public InternalCacheEntry create(Object key, Object value, EntryVersion version, long created, long lifespan, long lastUsed, long maxIdle) {
if (version == null) {
if (lifespan < 0 && maxIdle < 0) return new ImmortalCacheEntry(key, value);
if (lifespan > -1 && maxIdle < 0) return new MortalCacheEntry(key, value, lifespan, created);
if (lifespan < 0 && maxIdle > -1) return new TransientCacheEntry(key, value, maxIdle, lastUsed);
return new TransientMortalCacheEntry(key, value, maxIdle, lifespan, lastUsed, created);
} else {
// If no metadata passed, assumed embedded metadata
Metadata metadata = new EmbeddedMetadata.Builder()
.lifespan(lifespan).maxIdle(maxIdle).version(version).build();
if (lifespan < 0 && maxIdle < 0) return new MetadataImmortalCacheEntry(key, value, metadata);
if (lifespan > -1 && maxIdle < 0) return new MetadataMortalCacheEntry(key, value, metadata, created);
if (lifespan < 0 && maxIdle > -1) return new MetadataTransientCacheEntry(key, value, metadata, lastUsed);
return new MetadataTransientMortalCacheEntry(key, value, metadata, lastUsed, created);
}
}
@Override
public InternalCacheEntry create(Object key, Object value, Metadata metadata, long created, long lifespan, long lastUsed, long maxIdle) {
if (!isStoreMetadata(metadata, null)) {
if (lifespan < 0 && maxIdle < 0) return new ImmortalCacheEntry(key, value);
if (lifespan > -1 && maxIdle < 0) return new MortalCacheEntry(key, value, lifespan, created);
if (lifespan < 0 && maxIdle > -1) return new TransientCacheEntry(key, value, maxIdle, lastUsed);
return new TransientMortalCacheEntry(key, value, maxIdle, lifespan, lastUsed, created);
} else {
// Metadata to store, take lifespan and maxIdle settings from it
long metaLifespan = metadata.lifespan();
long metaMaxIdle = metadata.maxIdle();
if (metaLifespan < 0 && metaMaxIdle < 0) return new MetadataImmortalCacheEntry(key, value, metadata);
if (metaLifespan > -1 && metaMaxIdle < 0) return new MetadataMortalCacheEntry(key, value, metadata, created);
if (metaLifespan < 0 && metaMaxIdle > -1) return new MetadataTransientCacheEntry(key, value, metadata, lastUsed);
return new MetadataTransientMortalCacheEntry(key, value, metadata, lastUsed, created);
}
}
@Override
public InternalCacheValue createValue(CacheEntry cacheEntry) {
Metadata metadata = cacheEntry.getMetadata();
long lifespan = cacheEntry.getLifespan();
long maxIdle = cacheEntry.getMaxIdle();
if (!isStoreMetadata(metadata, null)) {
if (lifespan < 0 && maxIdle < 0) return new ImmortalCacheValue(cacheEntry.getValue());
if (lifespan > -1 && maxIdle < 0)
return new MortalCacheValue(cacheEntry.getValue(), cacheEntry.getCreated(), lifespan);
if (lifespan < 0 && maxIdle > -1)
return new TransientCacheValue(cacheEntry.getValue(), maxIdle, cacheEntry.getLastUsed());
return new TransientMortalCacheValue(cacheEntry.getValue(), cacheEntry.getCreated(), lifespan, maxIdle,
cacheEntry.getLastUsed());
} else {
if (lifespan < 0 && maxIdle < 0) return new MetadataImmortalCacheValue(cacheEntry.getValue(),
cacheEntry.getMetadata());
if (lifespan > -1 && maxIdle < 0)
return new MetadataMortalCacheValue(cacheEntry.getValue(), cacheEntry.getMetadata(),
cacheEntry.getCreated());
if (lifespan < 0 && maxIdle > -1)
return new MetadataTransientCacheValue(cacheEntry.getValue(), cacheEntry.getMetadata(),
cacheEntry.getLastUsed());
return new MetadataTransientMortalCacheValue(cacheEntry.getValue(), cacheEntry.getMetadata(),
cacheEntry.getCreated(), cacheEntry.getLastUsed());
}
}
@Override
// TODO: Do we need this???
public InternalCacheEntry create(Object key, Object value, Metadata metadata, long lifespan, long maxIdle) {
if (!isStoreMetadata(metadata, null)) {
if (lifespan < 0 && maxIdle < 0) return new ImmortalCacheEntry(key, value);
if (lifespan > -1 && maxIdle < 0) return new MortalCacheEntry(key, value, lifespan, timeService.wallClockTime());
if (lifespan < 0 && maxIdle > -1) return new TransientCacheEntry(key, value, maxIdle, timeService.wallClockTime());
return new TransientMortalCacheEntry(key, value, maxIdle, lifespan, timeService.wallClockTime());
} else {
// Metadata to store, take lifespan and maxIdle settings from it
long metaLifespan = metadata.lifespan();
long metaMaxIdle = metadata.maxIdle();
if (metaLifespan < 0 && metaMaxIdle < 0) return new MetadataImmortalCacheEntry(key, value, metadata);
if (metaLifespan > -1 && metaMaxIdle < 0) return new MetadataMortalCacheEntry(key, value, metadata, timeService.wallClockTime());
if (metaLifespan < 0 && metaMaxIdle > -1) return new MetadataTransientCacheEntry(key, value, metadata, timeService.wallClockTime());
return new MetadataTransientMortalCacheEntry(key, value, metadata, timeService.wallClockTime());
}
}
@Override
public InternalCacheEntry update(InternalCacheEntry ice, Metadata metadata) {
if (!isStoreMetadata(metadata, ice))
return updateMetadataUnawareEntry(ice, metadata.lifespan(), metadata.maxIdle());
else
return updateMetadataAwareEntry(ice, metadata);
}
@Override
public InternalCacheEntry update(InternalCacheEntry cacheEntry, Object value, Metadata metadata) {
// Update value and metadata atomically. Any attempt to get a copy of
// the cache entry should also acquire the same lock, to avoid returning
// partially applied cache entry updates
synchronized (cacheEntry) {
boolean reincarnate = metadata == null || metadata.updateCreationTimestamp();
cacheEntry.setValue(value);
InternalCacheEntry original = cacheEntry;
cacheEntry = update(cacheEntry, metadata);
// we have the same instance. So we need to reincarnate, if mortal.
if (reincarnate && cacheEntry.getLifespan() > 0 && original == cacheEntry) {
cacheEntry.reincarnate(timeService.wallClockTime());
}
return cacheEntry;
}
}
@Override
public CacheEntry copy(CacheEntry cacheEntry) {
synchronized (cacheEntry) {
return cacheEntry.clone();
}
}
@Override
public <K, V> InternalCacheEntry createL1(K key, V value, Metadata metadata) {
if (!isStoreMetadata(metadata, null)) {
return new L1InternalCacheEntry(key, value, metadata.lifespan(), timeService.wallClockTime());
} else {
return new L1MetadataInternalCacheEntry(key, value, metadata, timeService.wallClockTime());
}
}
@Override
public <K, V> InternalCacheValue<V> getValueFromCtx(K key, InvocationContext ctx) {
CacheEntry<K, V> entry = ctx.lookupEntry(key);
if (entry instanceof InternalCacheEntry) {
return ((InternalCacheEntry<K, V>) entry).toInternalCacheValue();
} else if (entry != null) {
InternalCacheValue<V> cv = create(entry).toInternalCacheValue();
PrivateMetadata metadata = entry.getInternalMetadata();
if (ctx.isInTxScope()) {
Map<Object, IncrementableEntryVersion> updatedVersions = ((TxInvocationContext<?>) ctx)
.getCacheTransaction().getUpdatedEntryVersions();
if (updatedVersions != null) {
IncrementableEntryVersion version = updatedVersions.get(entry.getKey());
if (version != null) {
metadata = PrivateMetadata.getBuilder(metadata).entryVersion(version).build();
}
}
}
cv.setInternalMetadata(metadata);
return cv;
} else {
return null;
}
}
private InternalCacheEntry updateMetadataUnawareEntry(InternalCacheEntry ice, long lifespan, long maxIdle) {
if (lifespan < 0) {
if (maxIdle < 0) {
// Need extra check because MetadataImmortalCacheEntry extends ImmortalCacheEntry
if (ice instanceof ImmortalCacheEntry && !(ice instanceof MetadataImmortalCacheEntry)) {
return ice;
} else {
return new ImmortalCacheEntry(ice.getKey(), ice.getValue());
}
} else {
if (ice instanceof TransientCacheEntry) {
((TransientCacheEntry) ice).setMaxIdle(maxIdle);
return ice;
} else {
return new TransientCacheEntry(ice.getKey(), ice.getValue(), maxIdle, timeService.wallClockTime());
}
}
} else {
if (maxIdle < 0) {
if (ice instanceof MortalCacheEntry) {
((MortalCacheEntry) ice).setLifespan(lifespan);
return ice;
} else {
return new MortalCacheEntry(ice.getKey(), ice.getValue(), lifespan, timeService.wallClockTime());
}
} else {
if (ice instanceof TransientMortalCacheEntry) {
TransientMortalCacheEntry transientMortalEntry = (TransientMortalCacheEntry) ice;
transientMortalEntry.setLifespan(lifespan);
transientMortalEntry.setMaxIdle(maxIdle);
return ice;
} else {
long ctm = timeService.wallClockTime();
return new TransientMortalCacheEntry(ice.getKey(), ice.getValue(), maxIdle, lifespan, ctm, ctm);
}
}
}
}
private InternalCacheEntry updateMetadataAwareEntry(InternalCacheEntry ice, Metadata metadata) {
long lifespan = metadata.lifespan();
long maxIdle = metadata.maxIdle();
if (lifespan < 0) {
if (maxIdle < 0) {
if (ice instanceof MetadataImmortalCacheEntry) {
ice.setMetadata(metadata);
return ice;
} else {
return new MetadataImmortalCacheEntry(ice.getKey(), ice.getValue(), metadata);
}
} else {
if (ice instanceof MetadataTransientCacheEntry) {
ice.setMetadata(metadata);
return ice;
} else {
return new MetadataTransientCacheEntry(ice.getKey(), ice.getValue(), metadata,
timeService.wallClockTime());
}
}
} else {
if (maxIdle < 0) {
if (ice instanceof MetadataMortalCacheEntry) {
ice.setMetadata(metadata);
return ice;
} else {
return new MetadataMortalCacheEntry(ice.getKey(), ice.getValue(), metadata, timeService.wallClockTime());
}
} else {
if (ice instanceof MetadataTransientMortalCacheEntry) {
ice.setMetadata(metadata);
return ice;
} else {
long ctm = timeService.wallClockTime();
return new MetadataTransientMortalCacheEntry(ice.getKey(), ice.getValue(), metadata, ctm, ctm);
}
}
}
}
/**
* Indicates whether the entire metadata object needs to be stored or not.
*
* This check is done to avoid keeping the entire metadata object around
* when only lifespan or maxIdle time is stored. If more information
* needs to be stored (i.e. version), or the metadata object is not the
* embedded one, keep the entire metadata object around.
*
* @return true if the entire metadata object needs to be stored, otherwise
* simply store lifespan and/or maxIdle in existing cache entries
*/
public static boolean isStoreMetadata(Metadata metadata, InternalCacheEntry ice) {
return metadata != null
&& (metadata.version() != null
|| !(metadata instanceof EmbeddedMetadata));
}
}
| 16,796
| 49.746224
| 143
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/KeyValueMetadataSizeCalculator.java
|
package org.infinispan.container.impl;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Size calculator that takes into account not only key and value but also metadata.
* @author wburns
* @since 9.0
*/
@FunctionalInterface
public interface KeyValueMetadataSizeCalculator<K, V> {
/**
* Method used to calculate how much memory in size the key, value and metadata use.
* @param key The key for this entry to be used in size calculation
* @param value The value for this entry to be used in size calculation
* @param metadata The metadata for this entry to be used in size calculation
* @return The size approximately in memory the key, value and metadata use.
*/
default long calculateSize(K key, V value, Metadata metadata) {
return calculateSize(key, value, metadata, null);
}
/**
* Method used to calculate how much memory in size the key, value and metadata use.
*
* @param key The key for this entry to be used in size calculation
* @param value The value for this entry to be used in size calculation
* @param metadata The metadata for this entry to be used in size calculation
* @param internalMetadata The internal metadata for this entry to be used in size calculation
* @return The size approximately in memory the key, value and metadata use.
*/
long calculateSize(K key, V value, Metadata metadata, PrivateMetadata internalMetadata);
}
| 1,515
| 41.111111
| 97
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/InternalEntryFactory.java
|
package org.infinispan.container.impl;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.versioning.EntryVersion;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
/**
* A factory for {@link InternalCacheEntry} and {@link InternalCacheValue} instances.
*
* @author Manik Surtani
* @since 5.1
*/
@Scope(Scopes.NAMED_CACHE)
public interface InternalEntryFactory {
/**
* Creates a new {@link InternalCacheEntry} instance based on the key, value, version and timestamp/lifespan
* information reflected in the {@link CacheEntry} instance passed in.
* @param cacheEntry cache entry to copy
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry
*/
<K, V> InternalCacheEntry<K, V> create(CacheEntry<K, V> cacheEntry);
/**
* Creates a new {@link InternalCacheEntry} instance based on the version and timestamp/lifespan
* information reflected in the {@link CacheEntry} instance passed in. Key and value are both passed in
* explicitly.
* @param key key to use
* @param value value to use
* @param cacheEntry cache entry to retrieve version and timestamp/lifespan information from
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry
*/
<K, V> InternalCacheEntry<K, V> create(K key, V value, InternalCacheEntry<?, ?> cacheEntry);
/**
* Creates a new {@link InternalCacheEntry} instance
* @param key key to use
* @param value value to use
* @param metadata metadata for entry
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry
*/
<K, V> InternalCacheEntry<K, V> create(K key, V value, Metadata metadata);
/**
* Creates a new {@link InternalCacheEntry} instance
* @param key key to use
* @param value value to use
* @param metadata metadata for entry
* @param lifespan lifespan to use
* @param maxIdle maxIdle to use
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry
*/
<K, V> InternalCacheEntry<K, V> create(K key, V value, Metadata metadata, long lifespan, long maxIdle);
/**
* Creates a new {@link InternalCacheEntry} instance
* @param key key to use
* @param value value to use
* @param metadata metadata for entry
* @param created creation timestamp to use
* @param lifespan lifespan to use
* @param lastUsed lastUsed timestamp to use
* @param maxIdle maxIdle to use
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry
*/
<K, V> InternalCacheEntry<K, V> create(K key, V value, Metadata metadata, long created, long lifespan, long lastUsed, long maxIdle);
/**
* Creates a new {@link InternalCacheEntry} instance
* @param key key to use
* @param value value to use
* @param version version to use
* @param created creation timestamp to use
* @param lifespan lifespan to use
* @param lastUsed lastUsed timestamp to use
* @param maxIdle maxIdle to use
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry
*/
// To be deprecated, once metadata object can be retrieved remotely...
<K, V> InternalCacheEntry<K, V> create(K key, V value, EntryVersion version, long created, long lifespan, long lastUsed, long maxIdle);
/**
* TODO: Adjust javadoc
*
* Updates an existing {@link InternalCacheEntry} with new metadata. This may result in a new
* {@link InternalCacheEntry} instance being created, as a different {@link InternalCacheEntry} implementation
* may be more appropriate to suit the new metadata values. As such, one should consider the {@link InternalCacheEntry}
* passed in as a parameter as passed by value and not by reference.
*
* @param cacheEntry original internal cache entry
* @param metadata new metadata
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry instance
*/
<K, V> InternalCacheEntry<K, V> update(InternalCacheEntry<K, V> cacheEntry, Metadata metadata);
/**
* Similar to {@link #update(org.infinispan.container.entries.InternalCacheEntry, org.infinispan.metadata.Metadata)}
* but it also updates the {@link org.infinispan.container.entries.InternalCacheEntry} value.
* <p/>
* If the same internal cache entry is returned and if it is a mortal cache entry, the returned instance needs to be
* reincarnated.
*
* @param cacheEntry original internal cache entry
* @param value new value
* @param metadata new metadata
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return a new InternalCacheEntry instance or the existing original
*/
<K, V> InternalCacheEntry<K, V> update(InternalCacheEntry<K, V> cacheEntry, V value, Metadata metadata);
/**
* Creates an {@link InternalCacheValue} based on the {@link InternalCacheEntry} passed in.
*
* @param cacheEntry to use to generate a {@link InternalCacheValue}
* @param <V> The value type
* @return an {@link InternalCacheValue}
*/
<V> InternalCacheValue<V> createValue(CacheEntry<?, V> cacheEntry);
/**
* Creates a copy of this cache entry and synchronizes serializes the copy process with the {@link #update(org.infinispan.container.entries.InternalCacheEntry, org.infinispan.metadata.Metadata)}.
* This is requires so that readers of the entry will get an consistent snapshot of the value red.
* @param <K> The key type for the entry
* @param <V> The value type for the entry
*/
<K, V> CacheEntry<K, V> copy(CacheEntry<K, V> cacheEntry);
/**
* Creates a L1 entry.
*
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @param key
*@param value @return a new {@link org.infinispan.container.entries.InternalCacheEntry}
*/
<K, V> InternalCacheEntry<K, V> createL1(K key, V value, Metadata metadata);
/**
* Retrieve an {@link InternalCacheValue} from the provided {@link InvocationContext} if an {@link InternalCacheEntry}
* exists, otherwise create {@link InternalCacheEntry} from the context's {@link CacheEntry} and return its value.
* <p>
* If the entry is not in the context a <b>null</b> value is returned
* @param key the key of the entry to be retrieved
* @param ctx the invocation context from which the value should be retrieved
* @param <K> The key type for the entry
* @param <V> The value type for the entry
* @return an {@link InternalCacheValue}
*/
<K, V> InternalCacheValue<V> getValueFromCtx(K key, InvocationContext ctx);
}
| 7,259
| 41.45614
| 198
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/BoundedSegmentedDataContainer.java
|
package org.infinispan.container.impl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Optional;
import java.util.PrimitiveIterator;
import java.util.Spliterator;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.IntConsumer;
import java.util.stream.Collectors;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.ConcatIterator;
import org.infinispan.commons.util.EntrySizeCalculator;
import org.infinispan.commons.util.FlattenSpliterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.container.entries.CacheEntrySizeCalculator;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.PrimitiveEntrySizeCalculator;
import org.infinispan.eviction.EvictionType;
import org.infinispan.marshall.core.WrappedByteArraySizeCalculator;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.Policy;
/**
* Bounded implementation of segmented data container. Bulk operations (iterator|spliterator) that are given segments
* use the segments maps directly to only read the given segments (non segment based just read from bounded container).
* <p>
* Note this implementation supports both temporary non owned segments and not (L1). This map only utilizes heap based
* (ie. ConcurrentHashMap) maps internally
*
* @author wburns
* @since 9.3
*/
public class BoundedSegmentedDataContainer<K, V> extends DefaultSegmentedDataContainer<K, V> {
protected final Cache<K, InternalCacheEntry<K, V>> evictionCache;
protected final PeekableTouchableMap<K, V> entries;
public BoundedSegmentedDataContainer(int numSegments, long thresholdSize, EvictionType thresholdPolicy) {
super(PeekableTouchableContainerMap::new, numSegments);
Caffeine<K, InternalCacheEntry<K, V>> caffeine = caffeineBuilder();
switch (thresholdPolicy) {
case MEMORY:
CacheEntrySizeCalculator<K, V> calc = new CacheEntrySizeCalculator<>(new WrappedByteArraySizeCalculator<>(
new PrimitiveEntrySizeCalculator()));
caffeine.weigher((k, v) -> (int) calc.calculateSize(k, v)).maximumWeight(thresholdSize);
break;
case COUNT:
caffeine.maximumSize(thresholdSize);
break;
default:
throw new UnsupportedOperationException("Policy not supported: " + thresholdPolicy);
}
DefaultEvictionListener evictionListener = new DefaultEvictionListener() {
@Override
void onEntryChosenForEviction(K key, InternalCacheEntry<K, V> value) {
super.onEntryChosenForEviction(key, value);
computeEntryRemoved(key, value);
}
};
evictionCache = applyListener(caffeine, evictionListener).build();
entries = new PeekableTouchableCaffeineMap<>(evictionCache);
}
public BoundedSegmentedDataContainer(int numSegments, long thresholdSize,
EntrySizeCalculator<? super K, ? super InternalCacheEntry<K, V>> sizeCalculator) {
super(PeekableTouchableContainerMap::new, numSegments);
DefaultEvictionListener evictionListener = new DefaultEvictionListener();
evictionCache = applyListener(Caffeine.newBuilder()
.weigher((K k, InternalCacheEntry<K, V> v) -> (int) sizeCalculator.calculateSize(k, v))
.maximumWeight(thresholdSize), evictionListener)
.build();
entries = new PeekableTouchableCaffeineMap<>(evictionCache);
}
@Override
protected void computeEntryWritten(K key, InternalCacheEntry<K, V> value) {
computeEntryWritten(getSegmentForKey(key), key, value);
}
protected void computeEntryWritten(int segment, K key, InternalCacheEntry<K, V> value) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = BoundedSegmentedDataContainer.super.getMapForSegment(segment);
if (map != null) {
map.put(key, value);
}
}
@Override
protected void computeEntryRemoved(K key, InternalCacheEntry<K, V> value) {
computeEntryRemoved(getSegmentForKey(key), key, value);
}
protected void computeEntryRemoved(int segment, K key, InternalCacheEntry<K, V> value) {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = BoundedSegmentedDataContainer.super.getMapForSegment(segment);
if (map != null) {
map.remove(key, value);
}
}
@Override
protected void putEntryInMap(PeekableTouchableMap<K, V> map, int segment, K key, InternalCacheEntry<K, V> ice) {
map.compute(key, (k, __) -> {
computeEntryWritten(segment, k, ice);
return ice;
});
}
@Override
protected InternalCacheEntry<K, V> removeEntryInMap(PeekableTouchableMap<K, V> map, int segment, Object key) {
ByRef<InternalCacheEntry<K, V>> ref = new ByRef<>(null);
map.computeIfPresent((K) key, (k, prev) -> {
computeEntryRemoved(segment, k, prev);
ref.set(prev);
return null;
});
return ref.get();
}
@Override
public PeekableTouchableMap<K, V> getMapForSegment(int segment) {
// All writes and other ops go directly to the caffeine cache
return entries;
}
@Override
public InternalCacheEntry<K, V> peek(Object k) {
return peek(-1, k);
}
@Override
public void clear() {
entries.clear();
for (int i = 0; i < maps.length(); ++i) {
clearMapIfPresent(i);
}
}
@Override
public void clear(IntSet segments) {
clear(segments, false);
segments.forEach((IntConsumer) this::clearMapIfPresent);
}
private void clearMapIfPresent(int segment) {
ConcurrentMap<?, ?> map = maps.get(segment);
if (map != null) {
map.clear();
}
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired() {
return entries.values().iterator();
}
@Override
public Iterator<InternalCacheEntry<K, V>> iteratorIncludingExpired(IntSet segments) {
// We could explore a streaming approach here to not have to allocate an additional ArrayList
List<Collection<InternalCacheEntry<K, V>>> valueIterables = new ArrayList<>(segments.size() + 1);
PrimitiveIterator.OfInt iter = segments.iterator();
boolean includeOthers = false;
while (iter.hasNext()) {
int segment = iter.nextInt();
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(segment);
if (map != null) {
valueIterables.add(map.values());
} else {
includeOthers = true;
}
}
if (includeOthers) {
valueIterables.add(entries.values().stream()
.filter(e -> segments.contains(getSegmentForKey(e.getKey())))
.collect(Collectors.toSet()));
}
return new ConcatIterator<>(valueIterables);
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired() {
return entries.values().spliterator();
}
@Override
public Spliterator<InternalCacheEntry<K, V>> spliteratorIncludingExpired(IntSet segments) {
// Copy the ints into an array to parallelize them
int[] segmentArray = segments.toIntArray();
AtomicBoolean usedOthers = new AtomicBoolean(false);
return new FlattenSpliterator<>(i -> {
ConcurrentMap<K, InternalCacheEntry<K, V>> map = maps.get(segmentArray[i]);
if (map == null) {
if (!usedOthers.getAndSet(true)) {
return entries.values().stream()
.filter(e -> segments.contains(getSegmentForKey(e.getKey())))
.collect(Collectors.toSet());
}
return Collections.emptyList();
}
return map.values();
}, segmentArray.length, Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.DISTINCT);
}
@Override
public int sizeIncludingExpired() {
return entries.size();
}
/**
* Clears entries out of caffeine map by invoking remove on iterator. This can either keep all keys that match the
* provided segments when keepSegments is <code>true</code> or it will remove only the provided segments when
* keepSegments is <code>false</code>.
* @param segments the segments to either remove or keep
* @param keepSegments whether segments are kept or removed
*/
private void clear(IntSet segments, boolean keepSegments) {
for (Iterator<K> keyIterator = entries.keySet().iterator(); keyIterator.hasNext(); ) {
K key = keyIterator.next();
int keySegment = getSegmentForKey(key);
if (keepSegments != segments.contains(keySegment)) {
keyIterator.remove();
}
}
}
@Override
public void removeSegments(IntSet segments) {
// Call super remove segments so the maps are removed more efficiently
super.removeSegments(segments);
// Finally remove the entries from bounded cache
clear(segments, false);
}
private Policy.Eviction<K, InternalCacheEntry<K, V>> eviction() {
if (evictionCache != null) {
Optional<Policy.Eviction<K, InternalCacheEntry<K, V>>> eviction = evictionCache.policy().eviction();
if (eviction.isPresent()) {
return eviction.get();
}
}
throw new UnsupportedOperationException();
}
@Override
public long capacity() {
Policy.Eviction<K, InternalCacheEntry<K, V>> evict = eviction();
return evict.getMaximum();
}
@Override
public void resize(long newSize) {
Policy.Eviction<K, InternalCacheEntry<K, V>> evict = eviction();
evict.setMaximum(newSize);
}
@Override
public long evictionSize() {
Policy.Eviction<K, InternalCacheEntry<K, V>> evict = eviction();
return evict.weightedSize().orElse(entries.size());
}
@Override
public void cleanUp() {
evictionCache.cleanUp();
}
}
| 10,114
| 35.516245
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/PeekableTouchableCaffeineMap.java
|
package org.infinispan.container.impl;
import java.util.concurrent.ConcurrentMap;
import org.infinispan.commons.util.AbstractDelegatingConcurrentMap;
import org.infinispan.container.entries.InternalCacheEntry;
import com.github.benmanes.caffeine.cache.Cache;
public class PeekableTouchableCaffeineMap<K, V> extends AbstractDelegatingConcurrentMap<K, InternalCacheEntry<K, V>>
implements PeekableTouchableMap<K, V> {
private final Cache<K, InternalCacheEntry<K, V>> caffeineCache;
private final ConcurrentMap<K, InternalCacheEntry<K, V>> map;
public PeekableTouchableCaffeineMap(Cache<K, InternalCacheEntry<K, V>> cache) {
this.caffeineCache = cache;
this.map = cache.asMap();
}
@Override
protected ConcurrentMap<K, InternalCacheEntry<K, V>> delegate() {
return map;
}
@Override
public InternalCacheEntry<K, V> peek(Object key) {
return caffeineCache.policy().getIfPresentQuietly((K) key);
}
@Override
public boolean touchKey(Object key, long currentTimeMillis) {
InternalCacheEntry<K, V> ice = peek(key);
if (ice != null) {
ice.touch(currentTimeMillis);
return true;
}
return false;
}
@Override
public void touchAll(long currentTimeMillis) {
for (InternalCacheEntry<K, V> ice : map.values()) {
ice.touch(currentTimeMillis);
}
}
}
| 1,380
| 28.382979
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/PeekableTouchableMap.java
|
package org.infinispan.container.impl;
import java.util.concurrent.ConcurrentMap;
import org.infinispan.container.entries.InternalCacheEntry;
public interface PeekableTouchableMap<K, V> extends ConcurrentMap<K, InternalCacheEntry<K, V>> {
/**
* Peaks at a value for the given key. Note that this does not update any expiration or
* eviction information when this is performed on the map, unlike the get method.
* @param key The key to find the value for
* @return The value mapping to this key
*/
InternalCacheEntry<K, V> peek(Object key);
/**
* Touches the entry for the given key in this map. This method will update any recency timestamps for both
* expiration or eviction as needed.
* @param key key to touch
* @param currentTimeMillis the recency timestamp to set
* @return whether the entry was touched or not
*/
boolean touchKey(Object key, long currentTimeMillis);
/**
* Touches all entries in the map setting the recency timestamps for both expiration eviction appropriately.
* @param currentTimeMillis the recency timestamp to set
*/
void touchAll(long currentTimeMillis);
/**
* Same as {@link #put(Object, Object)} except that the map is not required to return a value. This can be useful
* when retrieving a previous value may incur additional costs.
* <p>
* @implSpec The default implementation is equivalent to, for this
* {@code map}:
* <pre> {@code
* map.put(key, value);
* }}</pre>
* @param key key to insert for the value
* @param value the value to insert into this map
*/
default void putNoReturn(K key, InternalCacheEntry<K, V> value) {
put(key, value);
}
}
| 1,720
| 34.854167
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/AbstractInternalDataContainer.java
|
package org.infinispan.container.impl;
import static org.infinispan.commons.util.Util.toStr;
import java.lang.invoke.MethodHandles;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Spliterator;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.function.Predicate;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.AbstractIterator;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.FilterSpliterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.eviction.EvictionManager;
import org.infinispan.eviction.impl.PassivationManager;
import org.infinispan.expiration.impl.InternalExpirationManager;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.L1Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.DataOperationOrderer;
import org.infinispan.util.concurrent.DataOperationOrderer.Operation;
import org.infinispan.util.concurrent.WithinThreadExecutor;
import com.github.benmanes.caffeine.cache.Caffeine;
import com.github.benmanes.caffeine.cache.RemovalCause;
import com.github.benmanes.caffeine.cache.RemovalListener;
/**
* Abstract class implemenation for a segmented data container. All methods delegate to
* {@link #getSegmentForKey(Object)} for methods that don't provide a segment and implementors can provide what
* map we should look into for a given segment via {@link #getMapForSegment(int)}.
* @author wburns
* @since 9.3
*/
@Scope(Scopes.NAMED_CACHE)
public abstract class AbstractInternalDataContainer<K, V> implements InternalDataContainer<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@Inject protected TimeService timeService;
@Inject protected EvictionManager<K, V> evictionManager;
@Inject protected InternalExpirationManager<K, V> expirationManager;
@Inject protected InternalEntryFactory entryFactory;
@Inject protected ComponentRef<PassivationManager> passivator;
@Inject protected Configuration configuration;
@Inject protected KeyPartitioner keyPartitioner;
@Inject protected DataOperationOrderer orderer;
protected final List<Consumer<Iterable<InternalCacheEntry<K, V>>>> listeners = new CopyOnWriteArrayList<>();
/**
* A long to keep track of how many entries that {@link InternalCacheEntry#canExpire()} we currently have in the
* container. All operations that insert or remove the container's expirable entries must update this value.
*/
private final AtomicLong expirable = new AtomicLong();
protected abstract PeekableTouchableMap<K, V> getMapForSegment(int segment);
protected abstract int getSegmentForKey(Object key);
@Override
public InternalCacheEntry<K, V> get(int segment, Object k) {
PeekableTouchableMap<K, V> map = getMapForSegment(segment);
InternalCacheEntry<K, V> e = map != null ? map.get(k) : null;
if (e != null && e.canExpire()) {
long currentTimeMillis = timeService.wallClockTime();
if (e.isExpired(currentTimeMillis) &&
expirationManager.entryExpiredInMemory(e, currentTimeMillis, false).join() == Boolean.TRUE) {
e = null;
} else {
e.touch(currentTimeMillis);
}
}
return e;
}
@Override
public InternalCacheEntry<K, V> get(Object k) {
return get(getSegmentForKey(k), k);
}
@Override
public InternalCacheEntry<K, V> peek(int segment, Object k) {
PeekableTouchableMap<K, V> entries = getMapForSegment(segment);
if (entries != null) {
return entries.peek(k);
}
return null;
}
@Override
public InternalCacheEntry<K, V> peek(Object k) {
return peek(getSegmentForKey(k), k);
}
@Override
public boolean touch(int segment, Object k, long currentTimeMillis) {
PeekableTouchableMap<K, V> entries = getMapForSegment(segment);
if (entries != null) {
return entries.touchKey(k, currentTimeMillis);
}
return false;
}
@Override
public void put(int segment, K k, V v, Metadata metadata, PrivateMetadata internalMetadata, long createdTimestamp, long lastUseTimestamp) {
PeekableTouchableMap<K, V> entries = getMapForSegment(segment);
if (entries != null) {
boolean l1Entry = false;
if (metadata instanceof L1Metadata) {
metadata = ((L1Metadata) metadata).metadata();
l1Entry = true;
}
InternalCacheEntry<K, V> e = entries.get(k);
if (log.isTraceEnabled()) {
log.tracef("Creating new ICE for writing. Existing=%s, metadata=%s, new value=%s", e, metadata, toStr(v));
}
final InternalCacheEntry<K, V> copy;
if (l1Entry) {
copy = entryFactory.createL1(k, v, metadata);
} else if (e != null) {
copy = entryFactory.update(e, v, metadata);
} else {
// this is a brand-new entry
// -1 signals the timestamps should be ignored
if (createdTimestamp == -1 && lastUseTimestamp == -1) {
copy = entryFactory.create(k, v, metadata);
} else {
copy = entryFactory.create(k, v, metadata, createdTimestamp, metadata.lifespan(),
lastUseTimestamp, metadata.maxIdle());
}
}
copy.setInternalMetadata(internalMetadata);
if (log.isTraceEnabled())
log.tracef("Store %s=%s in container", k, copy);
if (e != null) entryUpdated(copy, e);
else entryAdded(copy);
putEntryInMap(entries, segment, k, copy);
} else {
log.tracef("Insertion attempted for key: %s but there was no map created for it at segment: %d", k, segment);
}
}
@Override
public void put(K k, V v, Metadata metadata) {
put(getSegmentForKey(k), k, v, metadata, null, -1, -1);
}
@Override
public boolean containsKey(int segment, Object k) {
InternalCacheEntry<K, V> ice = peek(segment, k);
if (ice != null && ice.canExpire()) {
long currentTimeMillis = timeService.wallClockTime();
if (ice.isExpired(currentTimeMillis)) {
if (expirationManager.entryExpiredInMemory(ice, currentTimeMillis, false).join() == Boolean.TRUE) {
ice = null;
}
}
}
return ice != null;
}
@Override
public boolean containsKey(Object k) {
return containsKey(getSegmentForKey(k), k);
}
@Override
public InternalCacheEntry<K, V> remove(int segment, Object k) {
PeekableTouchableMap<K, V> entries = getMapForSegment(segment);
if (entries != null) {
InternalCacheEntry<K, V> e = removeEntryInMap(entries, segment, k);
if (log.isTraceEnabled()) {
log.tracef("Removed %s=%s from container", k, e);
}
if (e == null) {
return null;
}
if (e.canExpire()) {
entryRemoved(e);
if (e.isExpired(timeService.wallClockTime())) {
return null;
}
}
return e;
}
return null;
}
@Override
public InternalCacheEntry<K, V> remove(Object k) {
return remove(getSegmentForKey(k), k);
}
@Override
public CompletionStage<Void> evict(int segment, K key) {
PeekableTouchableMap<K, V> entries = getMapForSegment(segment);
if (entries == null) {
return CompletableFutures.completedNull();
}
ByRef<CompletionStage<Void>> evictionStageRef = new ByRef<>(CompletableFutures.completedNull());
entries.computeIfPresent(key, (o, entry) -> {
// Note this is non blocking but we are invoking it in the ConcurrentMap locked section - so we have to
// return the value somehow
// - we don't need an orderer as it is handled in OrderedClusteringDependentLogic
// - we don't need eviction manager either as it is handled in NotifyHelper
evictionStageRef.set(handleEviction(entry, null, passivator.running(), null, this, null));
computeEntryRemoved(o, entry);
entryRemoved(entry);
return null;
});
return evictionStageRef.get();
}
@Override
public void evict(K key) {
CompletionStages.join(evict(getSegmentForKey(key), key));
}
@Override
public InternalCacheEntry<K, V> compute(int segment, K key, DataContainer.ComputeAction<K, V> action) {
PeekableTouchableMap<K, V> entries = getMapForSegment(segment);
return entries != null ? entries.compute(key, (k, oldEntry) -> {
InternalCacheEntry<K, V> newEntry = action.compute(k, oldEntry, entryFactory);
if (newEntry == oldEntry) {
return oldEntry;
} else if (newEntry == null) {
computeEntryRemoved(k, oldEntry);
entryRemoved(oldEntry);
return null;
}
computeEntryWritten(k, newEntry);
entryAdded(newEntry);
if (log.isTraceEnabled())
log.tracef("Store %s in container", newEntry);
return newEntry;
}) : null;
}
@Override
public InternalCacheEntry<K, V> compute(K key, DataContainer.ComputeAction<K, V> action) {
return compute(getSegmentForKey(key), key, action);
}
@Override
public void clear(IntSet segments) {
segments.forEach((int segment) -> {
Map<K, InternalCacheEntry<K, V>> map = getMapForSegment(segment);
if (map != null) {
segmentRemoved(map);
map.clear();
}
});
}
/**
* This method is invoked every time an entry is written inside a compute block
* @param key key passed to compute method
* @param value the new value
*/
protected void computeEntryWritten(K key, InternalCacheEntry<K, V> value) {
// Do nothing by default
}
/**
* This method is invoked every time an entry is removed inside a compute block
*
* @param key key passed to compute method
* @param value the old value
*/
protected void computeEntryRemoved(K key, InternalCacheEntry<K, V> value) {
// Do nothing by default
}
protected void putEntryInMap(PeekableTouchableMap<K, V> map, int segment, K key, InternalCacheEntry<K, V> ice) {
map.putNoReturn(key, ice);
}
protected InternalCacheEntry<K, V> removeEntryInMap(PeekableTouchableMap<K, V> map, int segment, Object key) {
return map.remove(key);
}
@Override
public void addRemovalListener(Consumer<Iterable<InternalCacheEntry<K, V>>> listener) {
listeners.add(listener);
}
@Override
public void removeRemovalListener(Object listener) {
listeners.remove(listener);
}
@Override
public boolean hasExpirable() {
return expirable.get() > 0;
}
protected final void entryAdded(InternalCacheEntry<K, V> ice) {
if (ice.canExpire()) {
expirable.incrementAndGet();
}
}
protected final void entryUpdated(InternalCacheEntry<K, V> curr, InternalCacheEntry<K, V> prev) {
byte combination = 0b00;
if (curr.canExpire()) combination |= 0b01;
if (prev.canExpire()) combination |= 0b10;
// If both do not expire or if both do expire, then we do nothing.
switch (combination) {
// Previous could not expire, but current can.
case 0b01:
expirable.incrementAndGet();
break;
// Previous could expire, but current can not.
case 0b10:
expirable.decrementAndGet();
break;
default: break;
}
}
protected final void entryRemoved(InternalCacheEntry<K, V> ice) {
if (ice.canExpire()) {
expirable.decrementAndGet();
}
}
protected final void segmentRemoved(Map<K, InternalCacheEntry<K, V>> segment) {
long expirableInSegment = segment.values().stream().filter(InternalCacheEntry::canExpire).count();
expirable.addAndGet(-expirableInSegment);
}
protected class EntryIterator extends AbstractIterator<InternalCacheEntry<K, V>> {
private final Iterator<InternalCacheEntry<K, V>> it;
public EntryIterator(Iterator<InternalCacheEntry<K, V>> it) {
this.it = it;
}
protected InternalCacheEntry<K, V> getNext() {
boolean initializedTime = false;
long now = 0;
while (it.hasNext()) {
InternalCacheEntry<K, V> entry = it.next();
if (!entry.canExpire()) {
if (log.isTraceEnabled()) {
log.tracef("Return next entry %s", entry);
}
return entry;
} else {
if (!initializedTime) {
now = timeService.wallClockTime();
initializedTime = true;
}
if (!entry.isExpired(now)) {
if (log.isTraceEnabled()) {
log.tracef("Return next entry %s", entry);
}
return entry;
} else if (log.isTraceEnabled()) {
log.tracef("%s is expired", entry);
}
}
}
if (log.isTraceEnabled()) {
log.tracef("Return next null");
}
return null;
}
}
protected Caffeine<K, InternalCacheEntry<K, V>> applyListener(Caffeine<K, InternalCacheEntry<K, V>> caffeine,
DefaultEvictionListener listener) {
return caffeine.executor(new WithinThreadExecutor()).evictionListener((key, value, cause) -> {
if (cause == RemovalCause.SIZE) {
listener.onEntryChosenForEviction(key, value);
}
}).removalListener(listener);
}
static <K, V> Caffeine<K, V> caffeineBuilder() {
//noinspection unchecked
return (Caffeine<K, V>) Caffeine.newBuilder();
}
/**
* Performs the eviction logic, except it doesn't actually remove the entry from the data container.
*
* It will acquire the orderer for the key if necessary (not null), passivate the entry, and notify the listeners,
* all in a non blocking fashion.
* The caller MUST hold the data container key lock.
*
* If the orderer is null, it means a concurrent write/remove is impossible, so we always passivate
* and notify the listeners.
*
* If the orderer is non-null and the self delay is null, when the orderer stage completes
* we know both the eviction operation removed the entry from the data container and the other operation
* removed/updated/inserted the entry, but we don't know the order.
* We don't care about the order for removals, we always skip passivation.
* We don't care about the order for activations/other evictions (READ) either, we always perform passivation.
* For writes we want to passivate only if the entry is no longer in the data container, i.e. the eviction
* removed the entry last.
*
* If the self delay is non-null, we may also acquire the orderer before the eviction operation removes the entry.
* We have to wait for the delay to complete before passivating the entry, but the scenarios are the same.
*
* It doesn't make sense to have a null orderer and a non-null self delay.
* @param entry evicted entry
* @param orderer used to guarantee ordering between other operations. May be null when an operation is already ordered
* @param passivator Passivates the entry to the store if necessary
* @param evictionManager Handles additional eviction logic. May be null if eviction is also not required
* @param dataContainer container to check if the key has already been removed
* @param selfDelay if null, the entry was already removed;
* if non-null, completes after the eviction finishes removing the entry
* @param <K> key type of the entry
* @param <V> value type of the entry
* @return stage that when complete all of the eviction logic is complete
*/
public static <K, V> CompletionStage<Void> handleEviction(InternalCacheEntry<K, V> entry, DataOperationOrderer orderer,
PassivationManager passivator, EvictionManager<K, V> evictionManager, DataContainer<K, V> dataContainer,
CompletionStage<Void> selfDelay) {
K key = entry.getKey();
CompletableFuture<Operation> future = new CompletableFuture<>();
CompletionStage<Operation> ordererStage = orderer != null ? orderer.orderOn(key, future) : null;
if (ordererStage != null) {
if (log.isTraceEnabled()) {
log.tracef("Encountered concurrent operation during eviction of %s", key);
}
return ordererStage.thenCompose(operation -> {
if (log.isTraceEnabled()) {
log.tracef("Concurrent operation during eviction of %s was %s", key, operation);
}
switch (operation) {
case REMOVE:
return skipPassivation(orderer, key, future, operation);
case WRITE:
if (dataContainer.containsKey(key)) {
if (selfDelay != null) {
if (log.isTraceEnabled()) {
log.tracef("Delaying check for %s verify if passivation should occur as there was a" +
" concurrent write", key);
}
return selfDelay.thenCompose(ignore -> {
// Recheck the data container after eviction has completed
if (dataContainer.containsKey(key)) {
return skipPassivation(orderer, key, future, operation);
} else {
return handleNotificationAndOrderer(key, entry, passivator.passivateAsync(entry), orderer, evictionManager, future);
}
});
}
return skipPassivation(orderer, key, future, operation);
}
//falls through
default:
CompletionStage<Void> passivatedStage = passivator.passivateAsync(entry);
// This is a concurrent regular read - in which case we passivate just as normal
return handleNotificationAndOrderer(key, entry, passivatedStage, orderer, evictionManager, future);
}
});
}
return handleNotificationAndOrderer(key, entry, passivator.passivateAsync(entry), orderer, evictionManager, future);
}
private static CompletionStage<Void> skipPassivation(DataOperationOrderer orderer, Object key,
CompletableFuture<Operation> future, Operation op) {
if (log.isTraceEnabled()) {
log.tracef("Skipping passivation for key %s due to %s", key, op);
}
orderer.completeOperation(key, future, Operation.READ);
return CompletableFutures.completedNull();
}
private static <K, V> CompletionStage<Void> handleNotificationAndOrderer(K key, InternalCacheEntry<K, V> value,
CompletionStage<Void> stage, DataOperationOrderer orderer, EvictionManager<K, V> evictionManager,
CompletableFuture<Operation> future) {
if (evictionManager != null) {
stage = stage.thenCompose(ignore -> evictionManager.onEntryEviction(Collections.singletonMap(key, value)));
}
if (orderer != null) {
return stage.whenComplete((ignore, ignoreT) -> orderer.completeOperation(key, future, Operation.READ));
}
return stage;
}
class DefaultEvictionListener implements RemovalListener<K, InternalCacheEntry<K, V>> {
Map<Object, CompletableFuture<Void>> ensureEvictionDone = new ConcurrentHashMap<>();
void onEntryChosenForEviction(K key, InternalCacheEntry<K, V> value) {
// Schedule an eviction to happen after the key lock is released
CompletableFuture<Void> future = new CompletableFuture<>();
ensureEvictionDone.put(key, future);
handleEviction(value, orderer, passivator.running(), evictionManager, AbstractInternalDataContainer.this,
future);
}
// It is very important that the fact that this method is invoked AFTER the entry has been evicted outside of the
// lock. This way we can see if the entry has been updated concurrently with an eviction properly
@Override
public void onRemoval(K key, InternalCacheEntry<K, V> value, RemovalCause cause) {
if (cause == RemovalCause.SIZE) {
CompletableFuture<Void> future = ensureEvictionDone.remove(key);
if (future != null) {
future.complete(null);
}
}
}
}
/**
* Returns a new spliterator that will not return entries that have expired.
* @param spliterator the spliterator to filter expired entries out of
* @return new spliterator with expired entries filtered
*/
protected Spliterator<InternalCacheEntry<K, V>> filterExpiredEntries(Spliterator<InternalCacheEntry<K, V>> spliterator) {
// This way we only read the wall clock time at the beginning
long accessTime = timeService.wallClockTime();
return new FilterSpliterator<>(spliterator, expiredIterationPredicate(accessTime));
}
/**
* Returns a predicate that will return false when an entry is expired. This predicate assumes this is invoked from
* an iteration process.
* @param accessTime the access time to base expiration off of
* @return predicate that returns true if an entry is not expired
*/
protected Predicate<InternalCacheEntry<K, V>> expiredIterationPredicate(long accessTime) {
return e -> !e.isExpired(accessTime);
}
}
| 22,726
| 39.511586
| 146
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/container/impl/EntryFactory.java
|
package org.infinispan.container.impl;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.NullCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.impl.EntryWrappingInterceptor;
import org.infinispan.interceptors.locking.ClusteringDependentLogic;
import org.infinispan.topology.CacheTopology;
import org.infinispan.commons.util.concurrent.CompletableFutures;
/**
* A factory for constructing {@link org.infinispan.container.entries.MVCCEntry} instances for use in the {@link org.infinispan.context.InvocationContext}.
* Implementations of this interface would typically wrap an internal {@link org.infinispan.container.entries.CacheEntry}
* with an {@link org.infinispan.container.entries.MVCCEntry}.
*
* <h3>Expected wrapping patterns</h3>
*
* {@link EntryWrappingInterceptor} checks {@link CacheTopology#getReadConsistentHash()} and if this node is an owner
* of the key, it loads the entry from {@link DataContainer}. On the way back through interceptor stack, the entry is
* committed from {@link EntryWrappingInterceptor} through {@link ClusteringDependentLogic} which checks
* {@link CacheTopology#getWriteConsistentHash()}.
* <p>
* Entry being wrapped is a prerequisite for the command to run and therefore commit the entry, but it's not up to
* {@link EntryWrappingInterceptor} to make sure the entry is always wrapped - all the interceptors below can expect
* is <em>(key is in readCH) => (entry is wrapped)</em>. The entry may be wrapped by EWI or other interceptors later,
* e.g. (but not limited to) when:
* <ul>
* <li>entry is in L1
* <li>entry is fetched from remote node
* <li>the cache is transactional and command should be executed on origin (but it does not need previous value
* - it is then wrapped as null entry)
* </ul>
* It is the distribution interceptor that enforces that (entry is read/written by command) => (entry is wrapped),
* by fetching the remote value, limiting the set of keys in given command (narrowing it) or not executing the command
* locally at all.
* <p>
* If the entry should be read locally but it's not found in DC, the entry will be wrapped by
* {@link EntryWrappingInterceptor} (either as {@link NullCacheEntry} for reads or other appropriate type for writes).
* Such entry returns <code>false</code> on {@link CacheEntry#skipLookup()} as it's value is unsure (subsequent
* interceptors can retrieve the new value from the cache store or remote node and call
* {@link EntryFactory#wrapExternalEntry} to update the context.
* <p>
* With repeatable reads, the value that is context must not be overwritten by value out of the transaction
* (only commands in this transaction can change the context entry. That's why {@link EntryWrappingInterceptor}
* calls {@link CacheEntry#setSkipLookup} from the return handler for every command.
* <p>
* When a command is retried and repeatable reads are not used, the entry is removed from the context completely
* and wrapped again by {@link EntryWrappingInterceptor}. When repeatable reads are in use,
* {@link org.infinispan.container.entries.RepeatableReadEntry} entry keeps the value before the command was executed
* and the context is reset to this value.
* <p>
* This summarizes expected behaviour of interceptors loading from persistence layer:
* <ul>
* <li>entry == null: don't load the entry because this node is not a read owner
* <li>entry.skipLookup == false: attempt to load the entry
* <li>entry.skipLookup == true: don't load the entry because it was already published
* </ul>
* Distribution interceptor should behave as follows:
* <ul>
* <li>entry == null: If this is a write command, check writeCH and if this node is
* <ul>
* <li>primary owner: that should not happen as command.topologyId is outdated (the topology is checked
* before executing the command and {@link org.infinispan.statetransfer.OutdatedTopologyException} is thrown)
* <li>backup owner and {@link VisitableCommand#loadType()} is {@link org.infinispan.commands.VisitableCommand.LoadType#OWNER OWNER}:
* retrieve the value from remote node
* <li>backup owner that does not need previous value: wrap null
* <li>non-owner: don't execute the command (or narrow the set of keys in it)
* </ul>
* If this is a read-only command:
* <ul>
* <li>If this is the origin, fetch the entry from remote node
* <li>If this is not the origin, the command must have different topologyId and we retry
* </ul>
* <li>entry != null: don't do any remote retrieval because the value is known
* </ul>
* <p>
* In local mode, the data can be always read and written, so there is no risk that a command won't have the entry
* wrapped.
*
* @author Manik Surtani (<a href="mailto:manik@jboss.org">manik@jboss.org</a>)
* @author Galder Zamarreño
* @author Mircea.Markus@jboss.com
* @since 4.0
*/
public interface EntryFactory {
/**
* Use to synchronize multiple {@link #wrapEntryForReading(InvocationContext, Object, int, boolean, boolean, CompletionStage)}
* or {@link #wrapEntryForWriting(InvocationContext, Object, int, boolean, boolean, CompletionStage)} calls.
*
* <p>The basic pattern is:</p>
*
* <pre>{@code
* CompletableFuture<Void> initialStage = new CompletableFuture<>();
* CompletionStage<Void> currentStage = initialStage;
* for (Object key : ...) {
* currentStage = entryFactory.wrapEntryForWriting(..., currentStage);
* }
* return asyncInvokeNext(ctx, command, expirationCheckDelay(currentStage, initialStage));
* }</pre>
*
* <p>The effect {@code expirationCheckDelay(currentStage, initialStage)} call is equivalent to completing the
* {@code initialStage} and returning {@code currentStage}, but it optimizes the common case where
* {@code currentStage} and {@code initialStage} are the same.</p>
*/
static CompletionStage<Void> expirationCheckDelay(CompletionStage<Void> currentStage, CompletableFuture<Void> initialStage) {
if (currentStage == initialStage) {
// No expiration to process asynchronously, don't bother completing the initial stage
return CompletableFutures.completedNull();
}
// Allow the expiration check to modify the invocation context
initialStage.complete(null);
return currentStage;
}
/**
* Wraps an entry for reading. Usually this is just a raw {@link CacheEntry} but certain combinations of isolation
* levels and the presence of an ongoing JTA transaction may force this to be a proper, wrapped MVCCEntry. The entry
* is also typically placed in the invocation context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param segment segment for the key
* @param isOwner true if this node is current owner in readCH (or we ignore CH)
* @param hasLock true if the invoker already has the lock for this key
* @param previousStage if wrapping can't be performed synchronously, only access the invocation context
* from another thread after this stage is complete
* @return stage that when complete the value should be in the context
*/
CompletionStage<Void> wrapEntryForReading(InvocationContext ctx, Object key, int segment, boolean isOwner,
boolean hasLock, CompletionStage<Void> previousStage);
/**
* Insert an entry that exists in the data container into the context.
*
* Doesn't do anything if the key was already wrapped.
*
* <p>
* The returned stage will always be complete if <b>isOwner</b> is false.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param segment segment for the key
* @param isOwner true if this node is current owner in readCH (or we ignore CH)
* @param isRead true if this operation is expected to read the value of the entry
* @param previousStage if wrapping can't be performed synchronously, only access the invocation context
* from another thread after this stage is complete
* @since 8.1
*/
CompletionStage<Void> wrapEntryForWriting(InvocationContext ctx, Object key, int segment, boolean isOwner, boolean isRead, CompletionStage<Void> previousStage);
/**
* Insert an entry that exists in the data container into the context, even if it is expired
*
* Doesn't do anything if the key was already wrapped
* @param ctx current invocation context
* @param key key to look up and wrap
* @param segment segment for the key
* @param isOwner is the local node a read owner?
*/
void wrapEntryForWritingSkipExpiration(InvocationContext ctx, Object key, int segment, boolean isOwner);
/**
* Insert an external entry (e.g. loaded from a cache loader or from a remote node) into the context.
*
* @param ctx current invocation context
* @param key key to look up and wrap
* @param externalEntry the value to be inserted into context
* @param isRead true if this operation is expected to read the value of the entry
* @param isWrite if this is executed within a write command
* @since 8.1
*/
void wrapExternalEntry(InvocationContext ctx, Object key, CacheEntry externalEntry, boolean isRead, boolean isWrite);
}
| 9,645
| 52.588889
| 163
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.