repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferCancelSendCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.statetransfer.XSiteStateProvider;
/**
* Cancel sending XSite state.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferCancelSendCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 105;
private String siteName;
// For CommandIdUniquenessTest only
public XSiteStateTransferCancelSendCommand() {
super(null);
}
public XSiteStateTransferCancelSendCommand(ByteString cacheName) {
this(cacheName, null);
}
public XSiteStateTransferCancelSendCommand(ByteString cacheName, String siteName) {
super(cacheName);
this.siteName = siteName;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) {
invokeLocal(registry.getXSiteStateTransferManager().running().getStateProvider());
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(siteName);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = input.readUTF();
}
@Override
public String toString() {
return "XSiteStateTransferCancelSendCommand{" +
"siteName='" + siteName + '\'' +
", cacheName=" + cacheName +
'}';
}
public void invokeLocal(XSiteStateProvider provider) {
provider.cancelStateTransfer(siteName);
}
}
| 2,006
| 24.730769
| 88
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteAmendOfflineStatusCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.status.TakeOfflineManager;
/**
* Amend a sites offline status.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteAmendOfflineStatusCommand extends BaseRpcCommand {
public static final int COMMAND_ID = 103;
private String siteName;
private Integer afterFailures;
private Long minTimeToWait;
// For CommandIdUniquenessTest only
public XSiteAmendOfflineStatusCommand() {
this(null);
}
public XSiteAmendOfflineStatusCommand(ByteString cacheName) {
this(cacheName, null, null, null);
}
public XSiteAmendOfflineStatusCommand(ByteString cacheName, String siteName, Integer afterFailures, Long minTimeToWait) {
super(cacheName);
this.siteName = siteName;
this.afterFailures = afterFailures;
this.minTimeToWait = minTimeToWait;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) throws Throwable {
TakeOfflineManager takeOfflineManager = registry.getTakeOfflineManager().running();
takeOfflineManager.amendConfiguration(siteName, afterFailures, minTimeToWait);
return CompletableFutures.completedNull();
}
@Override
public final boolean isReturnValueExpected() {
return false;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(siteName);
output.writeObject(afterFailures);
output.writeObject(minTimeToWait);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = input.readUTF();
afterFailures = (Integer) input.readObject();
minTimeToWait = (Long) input.readObject();
}
@Override
public String toString() {
return "XSiteAmendOfflineStatusCommand{" +
"siteName='" + siteName + '\'' +
", afterFailures=" + afterFailures +
", minTimeToWait=" + minTimeToWait +
'}';
}
}
| 2,417
| 27.785714
| 124
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteSetStateTransferModeCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.configuration.cache.XSiteStateTransferMode;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
/**
* A {@link CacheRpcCommand} that sets the {@link XSiteStateTransferMode} cluster-wide.
*
* @author Pedro Ruivo
* @since 12.1
*/
public class XSiteSetStateTransferModeCommand extends BaseRpcCommand {
public static final int COMMAND_ID = 36;
private String site;
private XSiteStateTransferMode mode;
@SuppressWarnings("unused") // for CommandIdUniquenessTest
public XSiteSetStateTransferModeCommand() {
super(null);
}
public XSiteSetStateTransferModeCommand(ByteString cacheName) {
super(cacheName);
}
public XSiteSetStateTransferModeCommand(ByteString cacheName, String site, XSiteStateTransferMode mode) {
super(cacheName);
this.site = site;
this.mode = mode;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(site);
MarshallUtil.marshallEnum(mode, output);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
site = input.readUTF();
mode = MarshallUtil.unmarshallEnum(input, XSiteStateTransferMode::valueOf);
}
@Override
public CompletionStage<Void> invokeAsync(ComponentRegistry registry) throws Throwable {
registry.getXSiteStateTransferManager().running().setAutomaticStateTransfer(site, mode);
return CompletableFutures.completedNull();
}
}
| 2,086
| 27.986111
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteTakeOfflineCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.xsite.status.TakeOfflineManager;
/**
* Take a site offline.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteTakeOfflineCommand extends BaseRpcCommand {
public static final int COMMAND_ID = 101;
private String siteName;
// For CommandIdUniquenessTest only
public XSiteTakeOfflineCommand() {
this(null);
}
public XSiteTakeOfflineCommand(ByteString cacheName) {
this(cacheName, null);
}
public XSiteTakeOfflineCommand(ByteString cacheName, String siteName) {
super(cacheName);
this.siteName = siteName;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) throws Throwable {
TakeOfflineManager takeOfflineManager = registry.getTakeOfflineManager().running();
return CompletableFuture.completedFuture(takeOfflineManager.takeSiteOffline(siteName));
}
@Override
public final boolean isReturnValueExpected() {
return true;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(siteName);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = input.readUTF();
}
@Override
public String toString() {
return "XSiteTakeOfflineCommand{" +
"siteName='" + siteName + '\'' +
'}';
}
}
| 1,828
| 24.054795
| 93
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferClearStatusCommand.java
|
package org.infinispan.xsite.commands;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.statetransfer.XSiteStateTransferManager;
/**
* Clear XSite state transfer status.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferClearStatusCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 111;
// For CommandIdUniquenessTest only
public XSiteStateTransferClearStatusCommand() {
super(null);
}
public XSiteStateTransferClearStatusCommand(ByteString cacheName) {
super(cacheName);
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) {
invokeLocal(registry.getXSiteStateTransferManager().running());
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public String toString() {
return "XSiteStateTransferClearStatusCommand{" +
"cacheName=" + cacheName +
'}';
}
public void invokeLocal(XSiteStateTransferManager stateTransferManager) {
stateTransferManager.clearStatus();
}
}
| 1,447
| 24.403509
| 76
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferFinishReceiveCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.BackupReceiver;
import org.infinispan.xsite.XSiteReplicateCommand;
import org.infinispan.xsite.statetransfer.XSiteStateConsumer;
/**
* Finish receiving XSite state.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferFinishReceiveCommand extends XSiteReplicateCommand<Void> {
public static final byte COMMAND_ID = 107;
private String siteName;
// For CommandIdUniquenessTest only
public XSiteStateTransferFinishReceiveCommand() {
super(COMMAND_ID, null);
}
public XSiteStateTransferFinishReceiveCommand(ByteString cacheName) {
this(cacheName, null);
}
public XSiteStateTransferFinishReceiveCommand(ByteString cacheName, String siteName) {
super(COMMAND_ID, cacheName);
this.siteName = siteName;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) {
invokeLocal(registry.getXSiteStateTransferManager().running().getStateConsumer());
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> performInLocalSite(BackupReceiver receiver, boolean preserveOrder) {
assert !preserveOrder;
return receiver.handleEndReceivingStateTransfer(this);
}
public void setSiteName(String siteName) {
this.siteName = siteName;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
MarshallUtil.marshallString(siteName, output);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = MarshallUtil.unmarshallString(input);
}
public static XSiteStateTransferFinishReceiveCommand copyForCache(XSiteStateTransferFinishReceiveCommand command, ByteString cacheName) {
if (!command.cacheName.equals(cacheName))
return new XSiteStateTransferFinishReceiveCommand(cacheName, command.originSite);
command.siteName = command.originSite;
return command;
}
@Override
public String toString() {
return "XSiteStateTransferFinishReceiveCommand{" +
"siteName='" + siteName + '\'' +
", cacheName=" + cacheName +
'}';
}
public void invokeLocal(XSiteStateConsumer consumer) {
consumer.endStateTransfer(siteName);
}
}
| 2,681
| 29.477273
| 140
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferFinishSendCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.statetransfer.XSiteStateTransferManager;
/**
* Finish sending XSite state.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferFinishSendCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 108;
private String siteName;
private boolean statusOk;
// For CommandIdUniquenessTest only
public XSiteStateTransferFinishSendCommand() {
super(null);
}
public XSiteStateTransferFinishSendCommand(ByteString cacheName) {
this(cacheName, null, false);
}
public XSiteStateTransferFinishSendCommand(ByteString cacheName, String siteName, boolean statusOk) {
super(cacheName);
this.siteName = siteName;
this.statusOk = statusOk;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) throws Throwable {
XSiteStateTransferManager stateTransferManager = registry.getXSiteStateTransferManager().running();
stateTransferManager.notifyStatePushFinished(siteName, getOrigin(), statusOk);
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(siteName);
output.writeBoolean(statusOk);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = input.readUTF();
statusOk = input.readBoolean();
}
@Override
public String toString() {
return "XSiteStateTransferFinishSendCommand{" +
"siteName='" + siteName + '\'' +
", statusOk=" + statusOk +
", cacheName=" + cacheName +
'}';
}
}
| 2,222
| 26.7875
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteBringOnlineCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.xsite.status.TakeOfflineManager;
/**
* Take a site offline.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteBringOnlineCommand extends BaseRpcCommand {
public static final int COMMAND_ID = 102;
private String siteName;
// For CommandIdUniquenessTest only
public XSiteBringOnlineCommand() {
this(null);
}
public XSiteBringOnlineCommand(ByteString cacheName) {
this(cacheName, null);
}
public XSiteBringOnlineCommand(ByteString cacheName, String siteName) {
super(cacheName);
this.siteName = siteName;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) throws Throwable {
TakeOfflineManager takeOfflineManager = registry.getTakeOfflineManager().running();
return CompletableFuture.completedFuture(takeOfflineManager.bringSiteOnline(siteName));
}
@Override
public final boolean isReturnValueExpected() {
return true;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(siteName);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = input.readUTF();
}
@Override
public String toString() {
return "XSiteBringOnlineCommand{" +
"siteName='" + siteName + '\'' +
'}';
}
}
| 1,828
| 24.054795
| 93
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferStartSendCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.statetransfer.XSiteStateProvider;
/**
* Start send XSite state.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferStartSendCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 104;
private String siteName;
private int topologyId;
// For CommandIdUniquenessTest only
public XSiteStateTransferStartSendCommand() {
super(null);
}
public XSiteStateTransferStartSendCommand(ByteString cacheName) {
this(cacheName, null, -1);
}
public XSiteStateTransferStartSendCommand(ByteString cacheName, String siteName, int topologyId) {
super(cacheName);
this.siteName = siteName;
this.topologyId = topologyId;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) {
invokeLocal(registry.getXSiteStateTransferManager().running().getStateProvider());
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(siteName);
output.writeInt(topologyId);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = input.readUTF();
topologyId = input.readInt();
}
@Override
public String toString() {
return "XSiteStateTransferStartSendCommand{" +
"siteName='" + siteName + '\'' +
", topologyId=" + topologyId +
", cacheName=" + cacheName +
'}';
}
public void invokeLocal(XSiteStateProvider provider) {
provider.startStateTransfer(siteName, getOrigin(), topologyId);
}
}
| 2,218
| 25.73494
| 101
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferRestartSendingCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.statetransfer.XSiteStateProvider;
/**
* Restart sending XSite state.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferRestartSendingCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 110;
private String siteName;
private int topologyId;
// For CommandIdUniquenessTest only
public XSiteStateTransferRestartSendingCommand() {
super(null);
}
public XSiteStateTransferRestartSendingCommand(ByteString cacheName) {
this(cacheName, null, -1);
}
public XSiteStateTransferRestartSendingCommand(ByteString cacheName, String siteName, int topologyId) {
super(cacheName);
this.siteName = siteName;
this.topologyId = topologyId;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) {
invokeLocal(registry.getXSiteStateTransferManager().running().getStateProvider());
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(siteName);
output.writeInt(topologyId);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = input.readUTF();
topologyId = input.readInt();
}
@Override
public String toString() {
return "XSiteStateTransferRestartSendingCommand{" +
"siteName='" + siteName + '\'' +
", topologyId=" + topologyId +
", cacheName=" + cacheName +
'}';
}
public void invokeLocal(XSiteStateProvider provider) {
provider.cancelStateTransfer(siteName);
provider.startStateTransfer(siteName, getOrigin(), topologyId);
}
}
| 2,294
| 26.321429
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStatusCommand.java
|
package org.infinispan.xsite.commands;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.xsite.BackupSender;
import org.infinispan.xsite.status.TakeOfflineManager;
/**
* Return the status of a {@link BackupSender}.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStatusCommand extends BaseRpcCommand {
public static final int COMMAND_ID = 100;
// For CommandIdUniquenessTest only
public XSiteStatusCommand() {
this(null);
}
public XSiteStatusCommand(ByteString cacheName) {
super(cacheName);
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) throws Throwable {
TakeOfflineManager takeOfflineManager = registry.getTakeOfflineManager().running();
return CompletableFuture.completedFuture(takeOfflineManager.status());
}
@Override
public final boolean isReturnValueExpected() {
return true;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public String toString() {
return "XSiteStatusCommand{}";
}
}
| 1,281
| 23.653846
| 89
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferStatusRequestCommand.java
|
package org.infinispan.xsite.commands;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.xsite.statetransfer.XSiteStateTransferManager;
/**
* Get XSite state transfer status.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferStatusRequestCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 109;
// For CommandIdUniquenessTest only
public XSiteStateTransferStatusRequestCommand() {
super(null);
}
public XSiteStateTransferStatusRequestCommand(ByteString cacheName) {
super(cacheName);
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) {
XSiteStateTransferManager stateTransferManager = registry.getXSiteStateTransferManager().running();
return CompletableFuture.completedFuture(stateTransferManager.getStatus());
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public String toString() {
return "XSiteStateTransferStatusRequestCommand{" +
"cacheName=" + cacheName +
'}';
}
}
| 1,377
| 25
| 105
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteAutoTransferStatusCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.configuration.cache.XSiteStateTransferMode;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.xsite.response.AutoStateTransferResponse;
import org.infinispan.xsite.status.SiteState;
/**
* A {@link CacheRpcCommand} to check the remote site status and state transfer mode in the local cluster.
*
* @author Pedro Ruivo
* @since 12.1
*/
public class XSiteAutoTransferStatusCommand extends BaseRpcCommand {
public static final int COMMAND_ID = 35;
private String site;
@SuppressWarnings("unused") //for CommandIdUniquenessTest
public XSiteAutoTransferStatusCommand() {
super(null);
}
public XSiteAutoTransferStatusCommand(ByteString cacheName) {
super(cacheName);
}
public XSiteAutoTransferStatusCommand(ByteString cacheName, String site) {
super(cacheName);
this.site = site;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeUTF(site);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
site = input.readUTF();
}
@Override
public CompletionStage<AutoStateTransferResponse> invokeAsync(ComponentRegistry registry) throws Throwable {
boolean offline = registry.getTakeOfflineManager().running().getSiteState(site) == SiteState.OFFLINE;
XSiteStateTransferMode mode = registry.getXSiteStateTransferManager().running().stateTransferMode(site);
return CompletableFuture.completedFuture(new AutoStateTransferResponse(offline, mode));
}
@Override
public String toString() {
return "XSiteAutoTransferStatusCommand{" +
"cacheName=" + cacheName +
", site='" + site + '\'' +
'}';
}
}
| 2,294
| 28.423077
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteViewNotificationCommand.java
|
package org.infinispan.xsite.commands;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.GlobalRpcCommand;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.GlobalXSiteAdminOperations;
/**
* A {@link GlobalRpcCommand} which notifies new remote sites are online.
*
* @author Pedro Ruivo
* @since 12.1
*/
public class XSiteViewNotificationCommand implements GlobalRpcCommand {
public static final int COMMAND_ID = 34;
private Collection<String> sitesUp;
public XSiteViewNotificationCommand() {
this(Collections.emptyList());
}
public XSiteViewNotificationCommand(Collection<String> sitesUp) {
this.sitesUp = sitesUp;
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public boolean isReturnValueExpected() {
return false;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
MarshallUtil.marshallCollection(sitesUp, output, DataOutput::writeUTF);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
sitesUp = MarshallUtil.unmarshallCollection(input, ArrayList::new, DataInput::readUTF);
}
@Override
public CompletionStage<?> invokeAsync(GlobalComponentRegistry globalComponentRegistry) throws Throwable {
globalComponentRegistry.getComponent(GlobalXSiteAdminOperations.class).onSitesUp(sitesUp);
return CompletableFutures.completedNull();
}
}
| 1,862
| 27.661538
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/commands/XSiteStateTransferStartReceiveCommand.java
|
package org.infinispan.xsite.commands;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.xsite.BackupReceiver;
import org.infinispan.xsite.XSiteReplicateCommand;
import org.infinispan.xsite.statetransfer.XSiteStateConsumer;
import org.infinispan.xsite.statetransfer.XSiteStateTransferManager;
/**
* Start receiving XSite state.
*
* @author Ryan Emerson
* @since 11.0
*/
public class XSiteStateTransferStartReceiveCommand extends XSiteReplicateCommand<Void> {
public static final byte COMMAND_ID = 106;
private String siteName;
// For CommandIdUniquenessTest only
@SuppressWarnings("unused")
public XSiteStateTransferStartReceiveCommand() {
super(COMMAND_ID, null);
}
public XSiteStateTransferStartReceiveCommand(ByteString cacheName) {
this(cacheName, null);
}
private XSiteStateTransferStartReceiveCommand(ByteString cacheName, String siteName) {
super(COMMAND_ID, cacheName);
this.siteName = siteName;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) {
XSiteStateTransferManager stateTransferManager = registry.getXSiteStateTransferManager().running();
XSiteStateConsumer consumer = stateTransferManager.getStateConsumer();
consumer.startStateTransfer(siteName);
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> performInLocalSite(BackupReceiver receiver, boolean preserveOrder) {
assert !preserveOrder;
return receiver.handleStartReceivingStateTransfer(this);
}
public void setSiteName(String siteName) {
this.siteName = siteName;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
MarshallUtil.marshallString(siteName, output);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
siteName = MarshallUtil.unmarshallString(input);
}
public static XSiteStateTransferStartReceiveCommand copyForCache(XSiteStateTransferStartReceiveCommand command, ByteString cacheName) {
if (!command.cacheName.equals(cacheName))
return new XSiteStateTransferStartReceiveCommand(cacheName, command.originSite);
command.siteName = command.originSite;
return command;
}
@Override
public String toString() {
return "XSiteStateTransferStartReceiveCommand{" +
"siteName='" + siteName + '\'' +
", cacheName=" + cacheName +
'}';
}
}
| 2,807
| 30.909091
| 138
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/response/AutoStateTransferResponse.java
|
package org.infinispan.xsite.response;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.configuration.cache.XSiteStateTransferMode;
import org.infinispan.marshall.core.Ids;
import org.infinispan.remoting.responses.Response;
import org.infinispan.xsite.commands.XSiteAutoTransferStatusCommand;
/**
* A {@link Response} implementation for command {@link XSiteAutoTransferStatusCommand}.
*
* @author Pedro Ruivo
* @since 12.1
*/
public class AutoStateTransferResponse implements Response {
public static final AbstractExternalizer<AutoStateTransferResponse> EXTERNALIZER = new Externalizer();
private static final XSiteStateTransferMode[] CACHED_VALUES = XSiteStateTransferMode.values();
private final boolean isOffline;
private final XSiteStateTransferMode stateTransferMode;
public AutoStateTransferResponse(boolean isOffline, XSiteStateTransferMode stateTransferMode) {
this.isOffline = isOffline;
this.stateTransferMode = stateTransferMode;
}
private static XSiteStateTransferMode valueOf(int ordinal) {
return CACHED_VALUES[ordinal];
}
@Override
public boolean isSuccessful() {
return true;
}
@Override
public boolean isValid() {
return true;
}
public boolean isOffline() {
return isOffline;
}
public XSiteStateTransferMode stateTransferMode() {
return stateTransferMode;
}
public boolean canDoAutomaticStateTransfer() {
return isOffline && stateTransferMode == XSiteStateTransferMode.AUTO;
}
private static class Externalizer extends AbstractExternalizer<AutoStateTransferResponse> {
@Override
public void writeObject(ObjectOutput output, AutoStateTransferResponse response) throws IOException {
output.writeBoolean(response.isOffline);
MarshallUtil.marshallEnum(response.stateTransferMode, output);
}
@Override
public AutoStateTransferResponse readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new AutoStateTransferResponse(input.readBoolean(),
MarshallUtil.unmarshallEnum(input, AutoStateTransferResponse::valueOf));
}
@Override
public Integer getId() {
return Ids.XSITE_AUTO_TRANSFER_RESPONSE;
}
@Override
public Set<Class<? extends AutoStateTransferResponse>> getTypeClasses() {
return Collections.singleton(AutoStateTransferResponse.class);
}
}
}
| 2,677
| 30.880952
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/xsite/response/AutoStateTransferResponseCollector.java
|
package org.infinispan.xsite.response;
import org.infinispan.configuration.cache.XSiteStateTransferMode;
import org.infinispan.remoting.RpcException;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.ResponseCollectors;
/**
* A {@link ResponseCollector} that merges {@link AutoStateTransferResponse}.
*
* @author Pedro Ruivo
* @since 12.1
*/
public class AutoStateTransferResponseCollector implements ResponseCollector<AutoStateTransferResponse> {
private boolean isOffline;
private XSiteStateTransferMode stateTransferMode;
public AutoStateTransferResponseCollector(boolean isOffline, XSiteStateTransferMode stateTransferMode) {
this.isOffline = isOffline;
this.stateTransferMode = stateTransferMode;
}
@Override
public synchronized AutoStateTransferResponse finish() {
return new AutoStateTransferResponse(isOffline, stateTransferMode);
}
@Override
public final AutoStateTransferResponse addResponse(Address sender, Response response) {
if (response instanceof AutoStateTransferResponse) {
merge((AutoStateTransferResponse) response);
} else if (response instanceof ExceptionResponse) {
throw ResponseCollectors.wrapRemoteException(sender, ((ExceptionResponse) response).getException());
} else if (!(response instanceof CacheNotFoundResponse)) {
throw ResponseCollectors.wrapRemoteException(sender, new RpcException("Unknown response type: " + response));
}
return null;
}
public synchronized void merge(AutoStateTransferResponse response) {
//if one node is in offline mode, then the remote site is offline
isOffline = (isOffline || response.isOffline());
//if one node has auto state transfer disabled, then the full cluster has auto state transfer disabled.
if (response.stateTransferMode() == XSiteStateTransferMode.MANUAL) {
stateTransferMode = XSiteStateTransferMode.MANUAL;
}
}
}
| 2,229
| 39.545455
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/affinity/package-info.java
|
/**
* This package contains the {@link org.infinispan.affinity.KeyAffinityService} interfaces which allow user code to determine mapping of keys onto nodes
*
* @api.public
*/
package org.infinispan.affinity;
| 212
| 29.428571
| 152
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/affinity/KeyGenerator.java
|
package org.infinispan.affinity;
/**
* Used for generating keys; used by {@link org.infinispan.affinity.KeyAffinityService} to generate the affinity keys.
* It offers the possibility to generate keys in a particular format.
*
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
public interface KeyGenerator<K> {
K getKey();
}
| 335
| 24.846154
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/affinity/KeyAffinityServiceFactory.java
|
package org.infinispan.affinity;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.Executor;
import org.infinispan.Cache;
import org.infinispan.affinity.impl.KeyAffinityServiceImpl;
import org.infinispan.remoting.transport.Address;
/**
* Factory for {@link org.infinispan.affinity.KeyAffinityService}.
* Services build by this factory have the following characteristics:
* <ul>
* <li>are run asynchronously by a thread that can be plugged through an {@link org.infinispan.commons.executors.ExecutorFactory} </li>
* <li>for key generation, the {@link org.infinispan.distribution.ch.ConsistentHash} function of a distributed cache is used. Service does not make sense for replicated caches.</li>
* <li>for each address cluster member (identified by an {@link org.infinispan.remoting.transport.Address} member, a fixed number of keys is generated</li>
* </ul>
*
* @see org.infinispan.affinity.KeyAffinityService
*
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
public class KeyAffinityServiceFactory {
/**
* Creates an {@link org.infinispan.affinity.KeyAffinityService} instance that generates keys mapped to all addresses
* in the cluster. Changes in topology would also noticed: by adding a new node, the service will automatically start
* generating keys for it.
*
* @param cache the distributed cache for which this service runs
* @param ex used for running async key generation process. On service shutdown, the executor won't be
* stopped; i.e. it's user responsibility manage it's lifecycle.
* @param keyGenerator allows one to control how the generated keys look like.
* @param keyBufferSize the number of generated keys per {@link org.infinispan.remoting.transport.Address}.
* @param start weather to start the service or not
* @return an {@link org.infinispan.affinity.KeyAffinityService} implementation.
* @throws IllegalStateException if the supplied cache is not DIST.
*/
public static <K, V> KeyAffinityService<K> newKeyAffinityService(Cache<K, V> cache, Executor ex, KeyGenerator<K> keyGenerator, int keyBufferSize, boolean start) {
return new KeyAffinityServiceImpl<>(ex, cache, keyGenerator, keyBufferSize, null, start);
}
/**
* Same as {@link #newKeyAffinityService(org.infinispan.Cache, java.util.concurrent.Executor, KeyGenerator, int,
* boolean)} with start == true;
*/
public static <K, V> KeyAffinityService<K> newKeyAffinityService(Cache<K, V> cache, Executor ex, KeyGenerator<K> keyGenerator, int keyBufferSize) {
return newKeyAffinityService(cache, ex, keyGenerator, keyBufferSize, true);
}
/**
* Creates a service that would only generate keys for addresses specified in filter.
*
* @param filter the set of addresses for which to generate keys
*/
public static <K, V> KeyAffinityService<K> newKeyAffinityService(Cache<K, V> cache, Collection<Address> filter, KeyGenerator<K> keyGenerator, Executor ex, int keyBufferSize, boolean start) {
return new KeyAffinityServiceImpl<>(ex, cache, keyGenerator, keyBufferSize, filter, start);
}
/**
* Same as {@link #newKeyAffinityService(org.infinispan.Cache, java.util.Collection, KeyGenerator,
* java.util.concurrent.Executor, int, boolean)} with start == true.
*/
public static <K, V> KeyAffinityService<K> newKeyAffinityService(Cache<K, V> cache, Collection<Address> filter, KeyGenerator<K> keyGenerator, Executor ex, int keyBufferSize) {
return newKeyAffinityService(cache, filter, keyGenerator, ex, keyBufferSize, true);
}
/**
* Created an service that only generates keys for the local address.
*/
public static <K, V> KeyAffinityService<K> newLocalKeyAffinityService(Cache<K, V> cache, KeyGenerator<K> keyGenerator, Executor ex, int keyBufferSize, boolean start) {
Address localAddress = cache.getAdvancedCache().getRpcManager().getTransport().getAddress();
Collection<Address> forAddresses = Collections.singletonList(localAddress);
return newKeyAffinityService(cache, forAddresses, keyGenerator, ex, keyBufferSize, start);
}
/**
* Same as {@link #newLocalKeyAffinityService(org.infinispan.Cache, KeyGenerator, java.util.concurrent.Executor, int, boolean)} with start == true.
*/
public static <K, V> KeyAffinityService<K> newLocalKeyAffinityService(Cache<K, V> cache, KeyGenerator<K> keyGenerator, Executor ex, int keyBufferSize) {
return newLocalKeyAffinityService(cache, keyGenerator, ex, keyBufferSize, true);
}
}
| 4,614
| 52.662791
| 193
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/affinity/ListenerRegistration.java
|
package org.infinispan.affinity;
import org.infinispan.affinity.impl.KeyAffinityServiceImpl;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.TopologyChanged;
import org.infinispan.notifications.cachelistener.event.TopologyChangedEvent;
import org.infinispan.notifications.cachemanagerlistener.annotation.CacheStopped;
import org.infinispan.notifications.cachemanagerlistener.event.CacheStoppedEvent;
/**
* Used for registering various cache notifications.
*
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@Listener(sync = true)
public class ListenerRegistration {
private final KeyAffinityServiceImpl<?> keyAffinityService;
public ListenerRegistration(KeyAffinityServiceImpl<?> keyAffinityService) {
this.keyAffinityService = keyAffinityService;
}
@TopologyChanged
public void handleViewChange(TopologyChangedEvent<?, ?> tce) {
if (!tce.isPre()) keyAffinityService.handleViewChange(tce);
}
@CacheStopped
public void handleCacheStopped(CacheStoppedEvent cse) {
keyAffinityService.handleCacheStopped(cse);
}
}
| 1,120
| 31.970588
| 81
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/affinity/KeyAffinityService.java
|
package org.infinispan.affinity;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.remoting.transport.Address;
/**
* Defines a service that generates keys to be mapped to specific nodes in a distributed(vs. replicated) cluster.
* The service is instantiated through through one of the factory methods from {@link org.infinispan.affinity.KeyAffinityServiceFactory}.
* <p/>
* Sample usage:
* <p/>
* <pre><code>
* Cache<String, Long> cache = getDistributedCache();
* KeyAffinityService<String> service = KeyAffinityServiceFactory.newKeyAffinityService(cache, 100);
* ...
* String sessionId = sessionObject.getId();
* String newCollocatedSession = service.getCollocatedKey(sessionId);
*
* //this will reside on the same node in the cluster
* cache.put(newCollocatedSession, someInfo);
* </code></pre>
* <p/>
* Uniqueness: the service does not guarantee that the generated keys are unique. It relies on an
* {@link org.infinispan.affinity.KeyGenerator} for obtaining and distributing the generated keys. If key uniqueness is
* needed that should be enforced in the generator.
* <p/>
* The service might also drop key generated through the {@link org.infinispan.affinity.KeyGenerator}.
*
* @see org.infinispan.affinity.KeyAffinityServiceFactory
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
public interface KeyAffinityService<K> extends Lifecycle {
/**
* Returns a key that will be distributed on the cluster node identified by address.
* @param address identifying the cluster node.
* @return a key object
* @throws IllegalStateException if the service has not been started or it is shutdown
*/
K getKeyForAddress(Address address);
/**
* Returns a key that will be distributed on the same node as the supplied key.
* @param otherKey the key for which we need a collocation
* @return a key object
* @throws IllegalStateException if the service has not been started or it is shutdown
*/
K getCollocatedKey(K otherKey);
/**
* Checks weather or not the service is started.
*/
boolean isStarted();
}
| 2,142
| 37.267857
| 137
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/affinity/impl/KeyAffinityServiceImpl.java
|
package org.infinispan.affinity.impl;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.StampedLock;
import org.infinispan.Cache;
import org.infinispan.affinity.KeyAffinityService;
import org.infinispan.affinity.KeyGenerator;
import org.infinispan.affinity.ListenerRegistration;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.notifications.cachelistener.event.TopologyChangedEvent;
import org.infinispan.notifications.cachemanagerlistener.event.CacheStoppedEvent;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.concurrent.ReclosableLatch;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import net.jcip.annotations.GuardedBy;
import net.jcip.annotations.ThreadSafe;
/**
* Implementation of KeyAffinityService.
*
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@ThreadSafe
public class KeyAffinityServiceImpl<K> implements KeyAffinityService<K> {
// TODO During state transfer, we should try to assign keys to a node only if they are owners in both CHs
public final static float THRESHOLD = 0.5f;
//interval between key/queue poll
private static final int POLL_INTERVAL_MILLIS = 50;
private static final Log log = LogFactory.getLog(KeyAffinityServiceImpl.class);
private final Set<Address> filter;
@GuardedBy("maxNumberInvariant")
private final Map<Address, BlockingQueue<K>> address2key = new ConcurrentHashMap<>();
private final Executor executor;
private final Cache<? extends K, ?> cache;
private final KeyGenerator<? extends K> keyGenerator;
private final int bufferSize;
private final AtomicInteger maxNumberOfKeys = new AtomicInteger(); //(nr. of addresses) * bufferSize;
final AtomicInteger existingKeyCount = new AtomicInteger();
private volatile boolean started;
/**
* Guards and make sure the following invariant stands: maxNumberOfKeys == address2key.keys().size() * bufferSize
*/
private final StampedLock maxNumberInvariant = new StampedLock();
/**
* Used for coordinating between the KeyGeneratorWorker and consumers.
*/
private final ReclosableLatch keyProducerStartLatch = new ReclosableLatch();
private volatile KeyGeneratorWorker keyGenWorker;
private volatile ListenerRegistration listenerRegistration;
public KeyAffinityServiceImpl(Executor executor, Cache<? extends K, ?> cache, KeyGenerator<? extends K> keyGenerator,
int bufferSize, Collection<Address> filter, boolean start) {
this.executor = executor;
this.cache = cache;
this.keyGenerator = keyGenerator;
this.bufferSize = bufferSize;
if (filter != null) {
this.filter = ConcurrentHashMap.newKeySet();
for (Address address : filter) {
this.filter.add(address);
}
} else {
this.filter = null;
}
if (start)
start();
}
@Override
public K getCollocatedKey(K otherKey) {
Address address = getAddressForKey(otherKey);
return getKeyForAddress(address);
}
@Override
public K getKeyForAddress(Address address) {
if (!started) {
throw new IllegalStateException("You have to start the service first!");
}
if (address == null)
throw new NullPointerException("Null address not supported!");
BlockingQueue<K> queue = null;
try {
K result = null;
while (result == null && !keyGenWorker.isStopped()) {
// obtain the read lock inside the loop, otherwise a topology change will never be able
// to obtain the write lock
long stamp = maxNumberInvariant.readLock();
try {
queue = address2key.get(address);
if (queue == null)
throw new IllegalStateException("Address " + address + " is no longer in the cluster");
// first try to take an element without waiting
result = queue.poll();
if (result == null) {
// there are no elements in the queue, make sure the producer is started
keyProducerStartLatch.open();
// our address might have been removed from the consistent hash
if (!isNodeInConsistentHash(address))
throw new IllegalStateException("Address " + address + " is no longer in the cluster");
}
} finally {
maxNumberInvariant.unlockRead(stamp);
}
if (result == null) {
// Now wait for a new key. If there's a topology change, poll() will time out and we'll retry
// on the new queue.
try {
result = queue.poll(POLL_INTERVAL_MILLIS, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
// Ignore and restore interruption status
Thread.currentThread().interrupt();
}
}
}
existingKeyCount.decrementAndGet();
log.tracef("Returning key %s for address %s", result, address);
return result;
} finally {
if (queue != null && queue.size() < bufferSize * THRESHOLD + 1) {
keyProducerStartLatch.open();
}
}
}
@Override
public void start() {
if (started) {
log.debug("Service already started, ignoring call to start!");
return;
}
List<Address> existingNodes = getExistingNodes();
long stamp = maxNumberInvariant.writeLock();
try {
addQueuesForAddresses(existingNodes);
resetNumberOfKeys();
} finally {
maxNumberInvariant.unlockWrite(stamp);
}
keyGenWorker = new KeyGeneratorWorker();
executor.execute(keyGenWorker);
listenerRegistration = new ListenerRegistration(this);
cache.getCacheManager().addListener(listenerRegistration);
cache.addListener(listenerRegistration);
keyProducerStartLatch.open();
started = true;
}
@Override
public void stop() {
if (!started) {
log.debug("Ignoring call to stop as service is not started.");
return;
}
started = false;
EmbeddedCacheManager cacheManager = cache.getCacheManager();
if (cacheManager.getListeners().contains(listenerRegistration)) {
cacheManager.removeListener(listenerRegistration);
} else {
throw new IllegalStateException("Listener must have been registered!");
}
//most likely the listeners collection is shared between CacheManager and the Cache
if (cache.getListeners().contains(listenerRegistration)) {
cache.removeListener(listenerRegistration);
}
keyGenWorker.stop();
}
public void handleViewChange(TopologyChangedEvent<?, ?> vce) {
log.tracef("TopologyChangedEvent received: %s", vce);
long stamp = maxNumberInvariant.writeLock();
try {
address2key.clear(); //we need to drop everything as key-mapping data is stale due to view change
addQueuesForAddresses(vce.getConsistentHashAtEnd().getMembers());
resetNumberOfKeys();
keyProducerStartLatch.open();
} finally {
maxNumberInvariant.unlockWrite(stamp);
}
}
public boolean isKeyGeneratorThreadAlive() {
return !keyGenWorker.isStopped();
}
public void handleCacheStopped(CacheStoppedEvent cse) {
if (this.cache.getName().equals(cse.getCacheName())) {
log.tracef("Cache stopped, stopping the service: %s", cse);
stop();
}
}
private class KeyGeneratorWorker implements Runnable {
private volatile boolean isActive;
private volatile boolean isStopped = false;
@Override
public void run() {
try {
while (!isStopped) {
keyProducerStartLatch.await(10, TimeUnit.SECONDS);
if (!isStopped) {
isActive = true;
log.trace("KeyGeneratorWorker marked as ACTIVE");
generateKeys();
isActive = false;
log.trace("KeyGeneratorWorker marked as INACTIVE");
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
finally {
log.debugf("Shutting down KeyAffinity service for key set: %s", filter);
}
}
public boolean isStopped() {
return isStopped;
}
private void generateKeys() {
long stamp = maxNumberInvariant.readLock();
try {
// if there's a topology change, some queues will stop receiving keys
// so we want to establish an upper bound on how many extra keys to generate
// in order to fill all the queues
int maxMisses = maxNumberOfKeys.get();
int missCount = 0;
while (existingKeyCount.get() < maxNumberOfKeys.get() && missCount < maxMisses) {
K key = keyGenerator.getKey();
Address addressForKey = getAddressForKey(key);
boolean added = false;
if (interestedInAddress(addressForKey)) {
added = tryAddKey(addressForKey, key);
}
if (!added) missCount++;
}
// if we had too many misses, just release the lock and try again
if (missCount < maxMisses) {
keyProducerStartLatch.close();
}
} finally {
maxNumberInvariant.unlockRead(stamp);
}
}
@GuardedBy("maxNumberInvariant")
private boolean tryAddKey(Address address, K key) {
BlockingQueue<K> queue = address2key.get(address);
// on node stop the distribution manager might still return the dead server for a while after we have already removed its queue
if (queue == null)
return false;
boolean added = queue.offer(key);
if (added) {
existingKeyCount.incrementAndGet();
log.tracef("Added key %s for address %s", key, address);
}
return added;
}
public boolean isActive() {
return isActive;
}
public void stop() {
isStopped = true;
keyProducerStartLatch.open();
}
}
/**
* Important: this *MUST* be called with WL on {@link #address2key}.
*/
@GuardedBy("maxNumberInvariant")
private void resetNumberOfKeys() {
maxNumberOfKeys.set(address2key.keySet().size() * bufferSize);
existingKeyCount.set(0);
if (log.isTraceEnabled()) {
log.tracef("resetNumberOfKeys ends with: maxNumberOfKeys=%s, existingKeyCount=%s",
maxNumberOfKeys.get(), existingKeyCount.get());
}
}
/**
* Important: this *MUST* be called with WL on {@link #address2key}.
*/
@GuardedBy("maxNumberInvariant")
private void addQueuesForAddresses(Collection<Address> addresses) {
for (Address address : addresses) {
if (interestedInAddress(address)) {
address2key.put(address, new ArrayBlockingQueue<>(bufferSize));
} else {
log.tracef("Skipping address: %s", address);
}
}
}
private boolean interestedInAddress(Address address) {
return filter == null || filter.contains(address);
}
private List<Address> getExistingNodes() {
return cache.getAdvancedCache().getRpcManager().getTransport().getMembers();
}
private Address getAddressForKey(Object key) {
DistributionManager distributionManager = getDistributionManager();
return distributionManager.getCacheTopology().getDistribution(key).primary();
}
private boolean isNodeInConsistentHash(Address address) {
DistributionManager distributionManager = getDistributionManager();
ConsistentHash hash = distributionManager.getWriteConsistentHash();
return hash.getMembers().contains(address);
}
private DistributionManager getDistributionManager() {
DistributionManager distributionManager = cache.getAdvancedCache().getDistributionManager();
if (distributionManager == null) {
throw new IllegalStateException("Null distribution manager. Is this an distributed(v.s. replicated) cache?");
}
return distributionManager;
}
public Map<Address, BlockingQueue<K>> getAddress2KeysMapping() {
return Collections.unmodifiableMap(address2key);
}
public int getMaxNumberOfKeys() {
return maxNumberOfKeys.intValue();
}
public boolean isKeyGeneratorThreadActive() {
return keyGenWorker.isActive();
}
@Override
public boolean isStarted() {
return started;
}
}
| 13,293
| 34.450667
| 136
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/affinity/impl/RndKeyGenerator.java
|
package org.infinispan.affinity.impl;
import java.util.Random;
import org.infinispan.affinity.KeyGenerator;
/**
* Key provider that relies on {@link java.util.Random}'s distribution to generate keys.
* It doesn't offer any guarantee that the keys are unique.
*
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
public class RndKeyGenerator implements KeyGenerator<Object> {
public static final Random rnd = new Random();
@Override
public Object getKey() {
return rnd.nextLong();
}
}
| 512
| 21.304348
| 88
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/lifecycle/package-info.java
|
/**
* Module and component lifecycle.
*
* @api.public
*/
package org.infinispan.lifecycle;
| 95
| 12.714286
| 34
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/lifecycle/ComponentStatus.java
|
package org.infinispan.lifecycle;
/**
* Different states a component may be in.
*
* @author Manik Surtani
* @see org.infinispan.commons.api.Lifecycle
* @since 4.0
*/
public enum ComponentStatus {
/**
* Object has been instantiated, but start() has not been called.
*/
INSTANTIATED,
/**
* The <code>start()</code> method has been called but not yet completed.
*/
INITIALIZING,
/**
* The <code>start()</code> method has been completed and the component is running.
*/
RUNNING,
/**
* The <code>stop()</code> method has been called but has not yet completed.
*/
STOPPING,
/**
* The <code>stop()</code> method has completed and the component has terminated.
*/
TERMINATED,
/**
* The component is in a failed state due to a problem with one of the other lifecycle transition phases.
*/
FAILED;
public boolean needToDestroyFailedCache() {
return this == ComponentStatus.FAILED;
}
public boolean startAllowed() {
return this == ComponentStatus.INSTANTIATED;
}
public boolean needToInitializeBeforeStart() {
return this == ComponentStatus.TERMINATED;
}
public boolean stopAllowed() {
switch (this) {
case INSTANTIATED:
case TERMINATED:
case STOPPING:
return false;
default:
return true;
}
}
public boolean allowInvocations() {
return this == ComponentStatus.RUNNING;
}
public boolean startingUp() {
return this == ComponentStatus.INITIALIZING || this == ComponentStatus.INSTANTIATED;
}
public boolean isTerminated() {
return this == ComponentStatus.TERMINATED;
}
public boolean isStopping() {
return this == ComponentStatus.STOPPING;
}
}
| 1,796
| 21.185185
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/lifecycle/ModuleLifecycle.java
|
package org.infinispan.lifecycle;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
/**
* ModuleLifecycle is an API hook for delegating lifecycle events to Infinispan sub-modules.
* <p>
* For example the 'query' module needs to register an interceptor with the Cache if the Cache has querying enabled etc.
* <p>
* To use this hook, you would need to implement this interface and annotate it with
* {@link org.infinispan.factories.annotations.InfinispanModule}.
* <p>
* Modules who also have their own configuration (see {@see org.infinispan.configuration}), can access their
* configuration beans via {@link Configuration#module(Class)}
*
* @author Manik Surtani
* @since 4.0
*/
public interface ModuleLifecycle {
default void cacheManagerStarting(GlobalComponentRegistry gcr, GlobalConfiguration globalConfiguration) {}
default void cacheManagerStarted(GlobalComponentRegistry gcr) {}
default void cacheManagerStopping(GlobalComponentRegistry gcr) {}
default void cacheManagerStopped(GlobalComponentRegistry gcr) {}
default void cacheStarting(ComponentRegistry cr, Configuration configuration, String cacheName) {}
default void cacheStarted(ComponentRegistry cr, String cacheName) {}
default void cacheStopping(ComponentRegistry cr, String cacheName) {}
default void cacheStopped(ComponentRegistry cr, String cacheName) {}
}
| 1,560
| 38.025
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/upgrade/RollingUpgradeManager.java
|
package org.infinispan.upgrade;
import static org.infinispan.util.logging.Log.CONTAINER;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ServiceFinder;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.SurvivesRestarts;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.jmx.annotations.Parameter;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* RollingUpgradeManager handles the synchronization of data between Infinispan
* clusters when performing rolling upgrades.
*
* @author Manik Surtani
* @author Tristan Tarrant
* @since 5.2
*/
@MBean(objectName = "RollingUpgradeManager", description = "Handles the migration of data when upgrading between versions.")
@Scope(value = Scopes.NAMED_CACHE)
@SurvivesRestarts
public class RollingUpgradeManager {
private static final Log log = LogFactory.getLog(RollingUpgradeManager.class);
private final ConcurrentMap<String, TargetMigrator> targetMigrators = new ConcurrentHashMap<>(2);
@Inject Cache<Object, Object> cache;
@Inject TimeService timeService;
@Inject GlobalConfiguration globalConfiguration;
@Start
public void start() {
ClassLoader cl = globalConfiguration.classLoader();
for (TargetMigrator m : ServiceFinder.load(TargetMigrator.class, cl)) {
targetMigrators.put(m.getName(), m);
}
}
@ManagedOperation(
description = "Synchronizes data from source clusters to target clusters with the specified migrator.",
displayName = "Synchronizes data from source clusters to target clusters with the specified migrator."
)
public long synchronizeData(@Parameter(name="migratorName", description="Specifies the name of the migrator to use. Set hotrod as the value unless using custom migrators.") String migratorName) {
TargetMigrator migrator = getMigrator(migratorName);
long start = timeService.time();
long count = migrator.synchronizeData(cache);
log.entriesMigrated(count, cache.getName(), Util.prettyPrintTime(timeService.timeDuration(start, TimeUnit.MILLISECONDS)));
return count;
}
@ManagedOperation(
description = "Synchronizes data from source clusters to target clusters with the specified migrator.",
displayName = "Synchronizes data from source clusters to target clusters with the specified migrator."
)
public long synchronizeData(@Parameter(name = "migratorName", description = "Specifies the name of the migrator to use. Set hotrod as the value unless using custom migrators.") String migratorName,
@Parameter(name = "readBatch", description = "Specifies how many entries to read at a time from source clusters. Default is 10000.") int readBatch,
@Parameter(name = "threads", description = "Specifies the number of threads to use per node when writing data to target clusters. Defaults to number of available processors.") int threads) {
TargetMigrator migrator = getMigrator(migratorName);
long start = timeService.time();
long count = migrator.synchronizeData(cache, readBatch, threads);
log.entriesMigrated(count, cache.getName(), Util.prettyPrintTime(timeService.timeDuration(start, TimeUnit.MILLISECONDS)));
return count;
}
@ManagedOperation(
description = "Disconnects target clusters from source clusters.",
displayName = "Disconnects target clusters from source clusters."
)
public void disconnectSource(@Parameter(name = "migratorName", description = "Specifies the name of the migrator to use. Set hotrod as the value unless using custom migrators.") String migratorName) {
TargetMigrator migrator = getMigrator(migratorName);
migrator.disconnectSource(cache);
}
@ManagedOperation(
description = "Connects target clusters to source clusters.",
displayName = "Connects target clusters from source clusters."
)
public void connectSource(@Parameter(name = "migratorName", description = "Specifies the name of the migrator to use. Set hotrod as the value unless using custom migrators.") String migratorName,
@Parameter(name = "configuration", description = "Specifies the configuration of the remote store to add, in JSON format.") StoreConfiguration configuration) {
TargetMigrator migrator = getMigrator(migratorName);
migrator.connectSource(cache, configuration);
}
@ManagedOperation(
description = "Checks if the target cluster is connected to the source cluster.",
displayName = "Checks if the target cluster is connected to the source cluster."
)
public boolean isConnected(@Parameter(name = "migratorName", description = "Specifies the name of the migrator to use. Set hotrod as the value unless using custom migrators.") String migratorName) {
TargetMigrator migrator = getMigrator(migratorName);
return migrator.isConnected(cache);
}
private TargetMigrator getMigrator(String name) {
TargetMigrator targetMigrator = targetMigrators.get(name);
if (targetMigrator == null) {
throw CONTAINER.unknownMigrator(name);
}
return targetMigrator;
}
}
| 5,836
| 49.756522
| 221
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/upgrade/TargetMigrator.java
|
package org.infinispan.upgrade;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.StoreConfiguration;
/**
* Performs migration operations on the target server or cluster of servers
*
* @author Tristan Tarrant
* @since 5.2
*/
public interface TargetMigrator {
/**
* Returns the name of this migrator
*/
String getName();
/**
* Performs the synchronization of data between source and target
*/
long synchronizeData(Cache<Object, Object> cache) throws CacheException;
/**
* Performs the synchronization of data between source and target
*/
long synchronizeData(Cache<Object, Object> cache, int readBatch, int threads) throws CacheException;
/**
* Disconnects the target from the source. This operation is the last step that must be performed after a rolling upgrade.
*/
void disconnectSource(Cache<Object, Object> cache) throws CacheException;
/**
* Connects the target cluster to the source cluster through a Remote Store.
*
* @param cache The cache to add the store to
* @param configuration The configuration of the store
*/
void connectSource(Cache<Object, Object> cache, StoreConfiguration configuration);
/**
* Check if a cluster is connected to other for doing rolling upgrades
* @param cache The cache name
* @return true if the cache has a remote store pointing to another cluster ready to do rolling upgrades.
*/
boolean isConnected(Cache<Object, Object> cache);
}
| 1,554
| 31.395833
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/package-info.java
|
/**
* Persistence API.
*
* @api.public
*/
package org.infinispan.persistence;
| 82
| 10.857143
| 35
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/InitializationContextImpl.java
|
package org.infinispan.persistence;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import org.infinispan.Cache;
import org.infinispan.commons.io.ByteBufferFactory;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.NonBlockingManager;
import org.infinispan.util.concurrent.WithinThreadExecutor;
/**
* @author Mircea Markus
* @since 6.0
*/
public class InitializationContextImpl implements InitializationContext {
private final StoreConfiguration configuration;
private final Cache cache;
private final KeyPartitioner keyPartitioner;
private final PersistenceMarshaller marshaller;
private final TimeService timeService;
private final ByteBufferFactory byteBufferFactory;
private final MarshallableEntryFactory marshallableEntryFactory;
private final Executor nonBlockingExecutor;
private final GlobalConfiguration globalConfiguration;
private final BlockingManager blockingManager;
private final NonBlockingManager nonBlockingManager;
public InitializationContextImpl(StoreConfiguration configuration, Cache cache, KeyPartitioner keyPartitioner,
PersistenceMarshaller marshaller, TimeService timeService,
ByteBufferFactory byteBufferFactory, MarshallableEntryFactory marshallableEntryFactory,
Executor nonBlockingExecutor, GlobalConfiguration globalConfiguration,
BlockingManager blockingManager, NonBlockingManager nonBlockingManager) {
this.configuration = configuration;
this.cache = cache;
this.keyPartitioner = keyPartitioner;
this.marshaller = marshaller;
this.timeService = timeService;
this.byteBufferFactory = byteBufferFactory;
this.marshallableEntryFactory = marshallableEntryFactory;
this.nonBlockingExecutor = nonBlockingExecutor;
this.globalConfiguration = globalConfiguration;
this.blockingManager = blockingManager;
this.nonBlockingManager = nonBlockingManager;
}
@Override
public StoreConfiguration getConfiguration() {
return configuration;
}
@Override
public Cache getCache() {
return cache;
}
@Override
public KeyPartitioner getKeyPartitioner() {
return keyPartitioner;
}
@Override
public TimeService getTimeService() {
return timeService;
}
@Override
public ByteBufferFactory getByteBufferFactory() {
return byteBufferFactory;
}
@Override
public <K,V> MarshallableEntryFactory<K,V> getMarshallableEntryFactory() {
//noinspection unchecked
return marshallableEntryFactory;
}
@Deprecated
@Override
public ExecutorService getExecutor() {
return new WithinThreadExecutor();
}
@Override
public Executor getNonBlockingExecutor() {
return nonBlockingExecutor;
}
@Override
public BlockingManager getBlockingManager() {
return blockingManager;
}
@Override
public NonBlockingManager getNonBlockingManager() {
return nonBlockingManager;
}
@Override
public PersistenceMarshaller getPersistenceMarshaller() {
return marshaller;
}
@Override
public GlobalConfiguration getGlobalConfiguration() {
return globalConfiguration;
}
}
| 3,790
| 31.127119
| 123
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/PersistenceUtil.java
|
package org.infinispan.persistence;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.IntFunction;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.global.GlobalStateConfiguration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.InvocationContext;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.AdvancedCacheLoader;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.SegmentedAdvancedLoadWriteStore;
import org.infinispan.util.logging.Log;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
import io.reactivex.rxjava3.schedulers.Schedulers;
/**
* @author Mircea Markus
* @since 6.0
*/
public class PersistenceUtil {
private static final int SEGMENT_NOT_PROVIDED = -1;
public static <K, V> int count(AdvancedCacheLoader<K, V> acl, Predicate<? super K> filter) {
// This can't be null
Long result = singleToValue(Flowable.fromPublisher(acl.publishKeys(filter)).count());
if (result > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
}
return result.intValue();
}
/**
* Counts how many entries are present in the segmented store. Only the segments provided will have entries counted.
* @param salws segmented store containing entries
* @param segments segments to count entries from
* @return count of entries that are in the provided segments
*/
public static int count(SegmentedAdvancedLoadWriteStore<?, ?> salws, IntSet segments) {
Long result = singleToValue(Flowable.fromPublisher(salws.publishKeys(segments, null)).count());
if (result > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;
}
return result.intValue();
}
// This method is blocking - but only invoked by tests or user code
@SuppressWarnings("checkstyle:ForbiddenMethod")
private static <E> E singleToValue(Single<E> single) {
return single.blockingGet();
}
// This method is blocking - but only invoked by tests or user code
@SuppressWarnings("checkstyle:ForbiddenMethod")
public static <K, V> Set<K> toKeySet(NonBlockingStore<K, V> nonBlockingStore, IntSet segments,
Predicate<? super K> filter) {
return Flowable.fromPublisher(nonBlockingStore.publishKeys(segments, filter))
.collect(Collectors.toSet())
.blockingGet();
}
public static <K, V> Set<K> toKeySet(AdvancedCacheLoader<K, V> acl, Predicate<? super K> filter) {
if (acl == null)
return Collections.emptySet();
return singleToValue(Flowable.fromPublisher(acl.publishKeys(filter))
.collectInto(new HashSet<>(), Set::add));
}
public static <K, V> Set<InternalCacheEntry<K, V>> toEntrySet(AdvancedCacheLoader<K, V> acl, Predicate<? super K> filter, final InternalEntryFactory ief) {
if (acl == null)
return Collections.emptySet();
return singleToValue(Flowable.fromPublisher(acl.entryPublisher(filter, true, true))
.map(me -> ief.create(me.getKey(), me.getValue(), me.getMetadata()))
.collectInto(new HashSet<>(), Set::add));
}
/**
* @deprecated since 9.4 This method references PersistenceManager, which isn't a public class
*/
@Deprecated
public static <K, V> InternalCacheEntry<K,V> loadAndStoreInDataContainer(DataContainer<K, V> dataContainer,
final PersistenceManager persistenceManager, K key, final InvocationContext ctx, final TimeService timeService,
final AtomicReference<Boolean> isLoaded) {
return org.infinispan.persistence.internal.PersistenceUtil.loadAndStoreInDataContainer(dataContainer,
persistenceManager, key, ctx, timeService, isLoaded);
}
/**
* @deprecated since 9.4 This method references PersistenceManager, which isn't a public class
*/
@Deprecated
public static <K, V> InternalCacheEntry<K,V> loadAndStoreInDataContainer(DataContainer<K, V> dataContainer, int segment,
final PersistenceManager persistenceManager, K key, final InvocationContext ctx, final TimeService timeService,
final AtomicReference<Boolean> isLoaded) {
return org.infinispan.persistence.internal.PersistenceUtil.loadAndStoreInDataContainer(dataContainer, segment,
persistenceManager, key, ctx, timeService, isLoaded);
}
/**
* @deprecated since 9.4 This method references PersistenceManager, which isn't a public class
*/
@Deprecated
public static <K, V> InternalCacheEntry<K,V> loadAndComputeInDataContainer(DataContainer<K, V> dataContainer,
int segment, final PersistenceManager persistenceManager, K key, final InvocationContext ctx,
final TimeService timeService, DataContainer.ComputeAction<K, V> action) {
return org.infinispan.persistence.internal.PersistenceUtil.loadAndComputeInDataContainer(dataContainer, segment,
persistenceManager, key, ctx, timeService, action);
}
/**
* @deprecated since 9.4 This method references PersistenceManager, which isn't a public class
*/
@Deprecated
public static <K, V> MarshallableEntry<K, V> loadAndCheckExpiration(PersistenceManager persistenceManager, Object key,
InvocationContext context, TimeService timeService) {
return org.infinispan.persistence.internal.PersistenceUtil.loadAndCheckExpiration(persistenceManager, key,
SEGMENT_NOT_PROVIDED, context);
}
public static <K, V> InternalCacheEntry<K, V> convert(MarshallableEntry<K, V> loaded, InternalEntryFactory factory) {
return org.infinispan.persistence.internal.PersistenceUtil.convert(loaded, factory);
}
/**
* Will create a publisher that parallelizes each publisher returned from the <b>publisherFunction</b> by executing
* them on the executor as needed.
* <p>
* Note that returned publisher will be publishing entries from the invocation of the executor. Thus any subscription
* will not block the thread it was invoked on, unless explicitly configured to do so.
* @param segments segments to parallelize across
* @param executor the executor execute parallelized operations on
* @param publisherFunction function that creates a different publisher for each segment
* @param <R> the returned value
* @return a publisher that
*/
public static <R> Publisher<R> parallelizePublisher(IntSet segments, Executor executor,
IntFunction<Publisher<R>> publisherFunction) {
return org.infinispan.persistence.internal.PersistenceUtil.parallelizePublisher(segments, Schedulers.from(executor),
publisherFunction);
}
/**
* Replace unwanted characters from cache names so they can be used as filenames
*/
public static String sanitizedCacheName(String cacheName) {
return cacheName.replaceAll("[^a-zA-Z0-9-_\\.]", "_");
}
public static Path getQualifiedLocation(GlobalConfiguration globalConfiguration, String location, String cacheName, String qualifier) {
Path persistentLocation = getLocation(globalConfiguration, location);
return persistentLocation.resolve(Paths.get(sanitizedCacheName(cacheName), qualifier));
}
public static Path getLocation(GlobalConfiguration globalConfiguration, String location) {
GlobalStateConfiguration globalState = globalConfiguration.globalState();
Path persistentLocation = Paths.get(globalState.persistentLocation());
if (location == null) {
if (!globalState.enabled()) {
// Should never be reached as store builders should ensure that the locations are not null during validation.
throw PERSISTENCE.storeLocationRequired();
}
return persistentLocation;
}
Path path = Paths.get(location);
if (!globalState.enabled()) {
return path;
}
if (path.isAbsolute()) {
// Ensure that the path lives under the global persistent location
if (path.startsWith(persistentLocation)) {
return path;
} else {
throw PERSISTENCE.forbiddenStoreLocation(path, persistentLocation);
}
}
return persistentLocation.resolve(path);
}
public static void validateGlobalStateStoreLocation(GlobalConfiguration globalConfiguration, String storeType, Attribute<?>... attributes) {
if (!globalConfiguration.globalState().enabled()) {
for (Attribute<?> attr : attributes)
if (attr.isNull())
throw Log.CONFIG.storeLocationRequired(storeType, attr.name());
}
}
}
| 9,483
| 44.161905
| 158
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/keymappers/DefaultTwoWayKey2StringMapper.java
|
package org.infinispan.persistence.keymappers;
import java.util.Base64;
import java.util.UUID;
import org.infinispan.commons.marshall.WrappedByteArray;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Default implementation for {@link TwoWayKey2StringMapper} that knows how to handle all primitive
* wrapper keys and Strings.
*
* @author Mircea.Markus@jboss.com
* @author Tristan Tarrant
*
* @since 4.1
*/
public class DefaultTwoWayKey2StringMapper implements TwoWayKey2StringMapper {
private static final Log log = LogFactory.getLog(DefaultTwoWayKey2StringMapper.class);
private static final char NON_STRING_PREFIX = '\uFEFF';
private static final char SHORT_IDENTIFIER = '1';
private static final char BYTE_IDENTIFIER = '2';
private static final char LONG_IDENTIFIER = '3';
private static final char INTEGER_IDENTIFIER = '4';
private static final char DOUBLE_IDENTIFIER = '5';
private static final char FLOAT_IDENTIFIER = '6';
private static final char BOOLEAN_IDENTIFIER = '7';
private static final char BYTEARRAYKEY_IDENTIFIER = '8';
private static final char NATIVE_BYTEARRAYKEY_IDENTIFIER = '9';
private static final char UUID_IDENTIFIER = 'a';
@Override
public String getStringMapping(Object key) {
char identifier;
if (key.getClass().equals(String.class)) {
return key.toString();
} else if (key.getClass().equals(Short.class)) {
identifier = SHORT_IDENTIFIER;
} else if (key.getClass().equals(Byte.class)) {
identifier = BYTE_IDENTIFIER;
} else if (key.getClass().equals(Long.class)) {
identifier = LONG_IDENTIFIER;
} else if (key.getClass().equals(Integer.class)) {
identifier = INTEGER_IDENTIFIER;
} else if (key.getClass().equals(Double.class)) {
identifier = DOUBLE_IDENTIFIER;
} else if (key.getClass().equals(Float.class)) {
identifier = FLOAT_IDENTIFIER;
} else if (key.getClass().equals(Boolean.class)) {
identifier = BOOLEAN_IDENTIFIER;
} else if (key.getClass().equals(WrappedByteArray.class)) {
return generateString(BYTEARRAYKEY_IDENTIFIER, Base64.getEncoder().encodeToString(((WrappedByteArray)key).getBytes()));
} else if (key.getClass().equals(byte[].class)) {
return generateString(NATIVE_BYTEARRAYKEY_IDENTIFIER, Base64.getEncoder().encodeToString((byte[]) key));
} else if (key.getClass().equals(UUID.class)) {
identifier = UUID_IDENTIFIER;
} else {
throw new IllegalArgumentException("Unsupported key type: " + key.getClass().getName());
}
return generateString(identifier, key.toString());
}
@Override
public Object getKeyMapping(String key) {
if (key.length() > 0 && key.charAt(0) == NON_STRING_PREFIX) {
char type = key.charAt(1);
String value = key.substring(2);
switch (type) {
case SHORT_IDENTIFIER:
return Short.parseShort(value);
case BYTE_IDENTIFIER:
return Byte.parseByte(value);
case LONG_IDENTIFIER:
return Long.parseLong(value);
case INTEGER_IDENTIFIER:
return Integer.parseInt(value);
case DOUBLE_IDENTIFIER:
return Double.parseDouble(value);
case FLOAT_IDENTIFIER:
return Float.parseFloat(value);
case BOOLEAN_IDENTIFIER:
return Boolean.parseBoolean(value);
case BYTEARRAYKEY_IDENTIFIER:
byte[] bytes = Base64.getDecoder().decode(value);
return new WrappedByteArray(bytes);
case NATIVE_BYTEARRAYKEY_IDENTIFIER:
return Base64.getDecoder().decode(value);
case UUID_IDENTIFIER:
return UUID.fromString(value);
default:
throw new IllegalArgumentException("Unsupported type code: " + type);
}
} else {
return key;
}
}
@Override
public boolean isSupportedType(Class<?> keyType) {
return isPrimitive(keyType) || keyType == WrappedByteArray.class|| keyType == UUID.class;
}
private String generateString(char identifier, String s) {
return String.valueOf(NON_STRING_PREFIX) + String.valueOf(identifier) + s;
}
private static boolean isPrimitive(Class<?> key) {
return key == String.class || key == Short.class || key == Byte.class || key == Long.class || key == Integer.class
|| key == Double.class || key == Float.class || key == Boolean.class || key == byte[].class;
}
}
| 4,646
| 39.763158
| 128
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/keymappers/WrappedByteArrayOrPrimitiveMapper.java
|
package org.infinispan.persistence.keymappers;
import java.util.Base64;
import org.infinispan.commons.marshall.StreamingMarshaller;
import org.infinispan.commons.marshall.WrappedByteArray;
/**
* This class is an implementation for {@link TwoWayKey2StringMapper} that supports both primitives
* and {@link org.infinispan.commons.marshall.WrappedByteArray}s. It extends {@link DefaultTwoWayKey2StringMapper}
* to achieve this.
*
* @author Justin Hayes
* @since 5.2
*/
public class WrappedByteArrayOrPrimitiveMapper extends DefaultTwoWayKey2StringMapper implements MarshallingTwoWayKey2StringMapper {
@Override
public void setMarshaller(StreamingMarshaller marshaller) {
//TODO The marshaler is not used so we could maybe implement TwoWayKey2StringMapper instead of MarshallingTwoWayKey2StringMapper
}
@Override
public String getStringMapping(Object key) {
if (super.isSupportedType(key.getClass())) {
// Use our parent
return super.getStringMapping(key);
} else {
// Do it ourself
try {
WrappedByteArray mv = (WrappedByteArray) key;
String serializedObj = serializeObj(mv);
return serializedObj;
} catch (Exception ex) {
throw new IllegalArgumentException("Exception occurred serializing key.", ex);
}
}
}
@Override
public Object getKeyMapping(String key) {
if (super.isSupportedType(key.getClass())) {
// Use our parent
return super.getKeyMapping(key);
} else {
// Do it ourself
try {
Object obj = deserializeObj(key);
WrappedByteArray mv = (WrappedByteArray) obj;
return mv;
} catch (Exception ex) {
throw new IllegalArgumentException("Exception occurred deserializing key.", ex);
}
}
}
/**
* Use MarshalledValue.Externalizer to serialize.
*
* @param mv
* @return
* @throws Exception
*/
private String serializeObj(WrappedByteArray mv) throws Exception {
return Base64.getEncoder().encodeToString(mv.getBytes());
}
/**
* Use MarshalledValue.Externalizer to deserialize.
*
* @param key
* @return
* @throws Exception
*/
private WrappedByteArray deserializeObj(String key) throws Exception {
byte[] data = Base64.getDecoder().decode(key);
return new WrappedByteArray(data);
}
@Override
public boolean isSupportedType(Class<?> keyType) {
return keyType.equals(WrappedByteArray.class) || super.isSupportedType(keyType);
}
}
| 2,607
| 29.682353
| 134
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/keymappers/MarshallingTwoWayKey2StringMapper.java
|
package org.infinispan.persistence.keymappers;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.marshall.StreamingMarshaller;
/**
*
* MarshallingTwoWayKey2StringMapper.
*
* @author Tristan Tarrant
* @since 5.2
*/
public interface MarshallingTwoWayKey2StringMapper extends TwoWayKey2StringMapper {
/**
* @deprecated since 10.0, use {@link #setMarshaller(Marshaller)} instead
*/
@Deprecated
default void setMarshaller(StreamingMarshaller marshaller) {
// no-op
}
default void setMarshaller(Marshaller marshaller) {
// no-op
}
}
| 606
| 21.481481
| 83
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/keymappers/Key2StringMapper.java
|
package org.infinispan.persistence.keymappers;
/**
* Defines the logic of mapping a key object to a String. This is required by certain cache stores, in order
* to map each key to a String which the underlying store is capable of handling. It should generate a unique String
* based on the supplied key.
*
* @author Mircea.Markus@jboss.com
* @author Manik Surtani
*/
public interface Key2StringMapper {
/**
* Do we support this key type?
* @param keyType type to test
* @return true if the type is supported, false otherwise.
*/
boolean isSupportedType(Class<?> keyType);
/**
* Must return an unique String for the supplied key.
* @param key key to map to a String
* @return String representation of the key
*/
String getStringMapping(Object key);
}
| 804
| 28.814815
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/keymappers/UnsupportedKeyTypeException.java
|
package org.infinispan.persistence.keymappers;
import org.infinispan.persistence.spi.PersistenceException;
/**
* Exception thrown by certain cache stores when one tries to persist an entry with an unsupported key type.
*
* @author Mircea.Markus@jboss.com
*/
public class UnsupportedKeyTypeException extends PersistenceException {
/** The serialVersionUID */
private static final long serialVersionUID = 1442739860198872706L;
public UnsupportedKeyTypeException(Object key) {
this("Unsupported key type: '" + key.getClass().getName() + "' on key: " + key);
}
public UnsupportedKeyTypeException(String message) {
super(message);
}
public UnsupportedKeyTypeException(String message, Throwable cause) {
super(message, cause);
}
public UnsupportedKeyTypeException(Throwable cause) {
super(cause);
}
}
| 860
| 26.774194
| 108
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/keymappers/TwoWayKey2StringMapper.java
|
package org.infinispan.persistence.keymappers;
/**
* Extends {@link Key2StringMapper} and allows a bidirectional transformation between keys and Strings. Note that the
* object instance created by {@link #getKeyMapping(String)} is guaranteed to be <i>equal</i> to the original object
* used to generate the String, but not necessarily the same object reference.
* <p />
* The following condition should be satisfied by implementations of this interface:
* <code>
* assert key.equals(mapper.getKeyMapping(mapper.getStringMapping(key)));
* </code>
*
* @author Mircea.Markus@jboss.com
* @author Manik Surtani
* @since 4.1
*/
public interface TwoWayKey2StringMapper extends Key2StringMapper {
/**
* Maps a String back to its original key
* @param stringKey string representation of a key
* @return an object instance that is <i>equal</i> to the original object used to create the key mapping.
*/
Object getKeyMapping(String stringKey);
}
| 973
| 37.96
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/async/PutModification.java
|
package org.infinispan.persistence.async;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.persistence.spi.MarshallableEntry;
class PutModification implements Modification {
private final int segment;
private final MarshallableEntry entry;
PutModification(int segment, MarshallableEntry entry) {
this.segment = segment;
this.entry = entry;
}
@Override
public <K, V> void apply(AsyncNonBlockingStore<K, V> store) {
store.putModification(AsyncNonBlockingStore.wrapKeyIfNeeded(entry.getKey()), this);
}
@Override
public int getSegment() {
return segment;
}
@SuppressWarnings("unchecked")
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> asStage() {
return CompletableFuture.completedFuture(entry);
}
@SuppressWarnings("unchecked")
public <K, V> MarshallableEntry<K, V> getEntry() {
return entry;
}
@Override
public String toString() {
return "PutModification{" +
"segment=" + segment +
", key=" + entry.getKey() +
'}';
}
}
| 1,148
| 23.978261
| 89
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/async/Modification.java
|
package org.infinispan.persistence.async;
import java.util.concurrent.CompletionStage;
import org.infinispan.persistence.spi.MarshallableEntry;
interface Modification {
/**
* Applies the modification to the provided async store.
* <p>
* This method is not thread safe, callers must ensure that it is not invoked on multiple threads in parallel.
* @param store the store to apply the modification to
* @param <K> key type
* @param <V> value type
*/
<K, V> void apply(AsyncNonBlockingStore<K, V> store);
/**
* Returns the segment that maps to this modification. Some modifications may not map to a given
* segment and may throw an {@link UnsupportedOperationException}.
* @return the segment that maps to the modification
*/
int getSegment();
/**
* Returns this modification as a stage that is already complete.
* @param <K> key type
* @param <V> value type
* @return a stage that represents the modification
*/
<K, V> CompletionStage<MarshallableEntry<K, V>> asStage();
}
| 1,058
| 31.090909
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/async/AsyncNonBlockingStore.java
|
package org.infinispan.persistence.async;
import java.lang.invoke.MethodHandles;
import java.util.AbstractMap;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.marshall.WrappedByteArray;
import org.infinispan.commons.reactive.RxJavaInterop;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.PersistenceConfiguration;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.support.DelegatingNonBlockingStore;
import org.infinispan.persistence.support.SegmentPublisherWrapper;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.flowables.ConnectableFlowable;
import io.reactivex.rxjava3.functions.Function;
import net.jcip.annotations.GuardedBy;
/**
* A delegating NonBlockingStore implementation that batches write operations and runs the resulting batches on the
* delegate store in a non overlapping manner. That is that only a single batch will be running at a time.
* <p>
* Whenever a write operation is performed it will also attempt to start a batch write immediately to the delegate store.
* Any concurrent writes during this time may be included in the batch. Any additional writes will be enqueued until
* the batch completes in which case it will automatically submit the pending batch, if there is one. Write operations
* to the same key in the same batch will be coalesced with only the last write being written to the underlying store.
* If the number of enqueued pending write operations becomes equal or larger than the modification queue, then any
* subsequent write will be added to the queue, but the returned Stage will not complete until the current batch completes
* in an attempt to provide some backpressure to slow writes.
* <p>
* Read operations may be resolved by this store immediately if the given key is still being updated in the
* delegate store or if it is enqueued for the next batch. If the key is in neither it will query the underlying store
* to acquire it.
* @author wburns
* @since 11.0
* @param <K> key type for the store
* @param <V> value type for the store
*/
public class AsyncNonBlockingStore<K, V> extends DelegatingNonBlockingStore<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final NonBlockingStore<K, V> actual;
private Executor nonBlockingExecutor;
private int segmentCount;
private int modificationQueueSize;
private PersistenceConfiguration persistenceConfiguration;
private AsyncStoreConfiguration asyncConfiguration;
// "Non blocking" scheduler used for the purpose of delaying retry batch operations on failures
private ScheduledExecutorService scheduler;
// This variable will be non null if there is a pending batch being sent to the underlying store
// If a request causes the modification queue to overflow it will receive a stage back that is only complete
// when this future is completed (aka. previous replication has completed)
@GuardedBy("this")
private CompletableFuture<Void> batchFuture;
// This variable will be non null if the underlying store has been found to be not available
// Note that the async store will still be available as long as the queue size (ie. modificationMap.size) is not
// greater than the configured modificationQueueSize
@GuardedBy("this")
private CompletableFuture<Void> delegateAvailableFuture;
// Any pending modifications will be enqueued in this map
@GuardedBy("this")
private Map<Object, Modification> pendingModifications = new HashMap<>();
// If there is a pending clear this will be true
@GuardedBy("this")
private boolean hasPendingClear;
// The next two variables are held temporarily until a replication of the values is complete. We need to retain
// these values until we are sure the entries are actually in the store - note these variables are only written to
// via reference (thus the map is safe to read outside of this lock, but the reference must be read in synchronized)
// This map contains all the modifications currently being replicated to the delegating store
@GuardedBy("this")
private Map<Object, Modification> replicatingModifications = Collections.emptyMap();
// True if there is an outstanding clear that is being ran on the delegating store
@GuardedBy("this")
private boolean isReplicatingClear;
private volatile boolean stopped = true;
public AsyncNonBlockingStore(NonBlockingStore<K, V> actual) {
this.actual = actual;
}
@Override
public Set<Characteristic> characteristics() {
// Here solely for documentation purposes. This store supports only the characteristics of
// the underlying store
return super.characteristics();
}
@Override
public CompletionStage<Void> start(InitializationContext ctx) {
Configuration cacheConfiguration = ctx.getCache().getCacheConfiguration();
persistenceConfiguration = cacheConfiguration.persistence();
scheduler = SecurityActions.getGlobalComponentRegistry(ctx.getCache().getCacheManager()).getComponent(
ScheduledExecutorService.class, KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR);
assert scheduler != null;
StoreConfiguration storeConfiguration = ctx.getConfiguration();
segmentCount = storeConfiguration.segmented() ? cacheConfiguration.clustering().hash().numSegments() : 1;
asyncConfiguration = storeConfiguration.async();
modificationQueueSize = asyncConfiguration.modificationQueueSize();
nonBlockingExecutor = ctx.getNonBlockingExecutor();
stopped = false;
return actual.start(ctx);
}
@Override
public CompletionStage<Void> stop() {
CompletionStage<Void> asyncStage;
if (log.isTraceEnabled()) {
log.tracef("Stopping async store containing store %s", actual);
}
asyncStage = awaitQuiescence();
return asyncStage.thenCompose(ignore -> {
if (log.isTraceEnabled()) {
log.tracef("Stopping store %s from async store", actual);
}
stopped = true;
return actual.stop();
});
}
/**
* Returns a stage that when complete, this store has submitted and completed all pending modifications
*/
private CompletionStage<Void> awaitQuiescence() {
CompletionStage<Void> stage;
synchronized (this) {
stage = batchFuture;
}
if (stage == null) {
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Must wait until prior batch completes for %s", actual);
}
return stage.thenCompose(ignore -> awaitQuiescence());
}
void putModification(Object key, Modification modification) {
pendingModifications.put(key, modification);
}
void putClearModification() {
pendingModifications.clear();
hasPendingClear = true;
}
/**
* This method submits a batch of modifications to the underlying store and completes {@code batchFuture}
* when the modifications are done.
*
* If there are any pending modifications at that time, it automatically submits a new batch,
* otherwise it sets {@code batchFuture} to null.
*
* Callers must atomically check that {@code batchFuture} is null and set it to a non-null value,
* to ensure that only one batch is being processed at any time.
*/
private void submitTask() {
Map<Object, Modification> newMap = new HashMap<>();
if (log.isTraceEnabled()) {
log.tracef("Starting new batch with id %s", System.identityHashCode(newMap));
}
boolean ourClearToReplicate;
Map<Object, Modification> ourModificationsToReplicate;
synchronized (this) {
// The isReplicatingClear would be true or replicatingModifications non empty if an update was currently pending
// But we should only allow one at a time
assert replicatingModifications.isEmpty() && !isReplicatingClear;
replicatingModifications = pendingModifications;
ourModificationsToReplicate = pendingModifications;
pendingModifications = newMap;
isReplicatingClear = hasPendingClear;
ourClearToReplicate = hasPendingClear;
hasPendingClear = false;
}
CompletionStage<Void> asyncBatchStage;
if (ourClearToReplicate) {
if (log.isTraceEnabled()) {
log.tracef("Sending clear to underlying store for id %s", System.identityHashCode(ourModificationsToReplicate));
}
asyncBatchStage = retry(actual::clear, persistenceConfiguration.connectionAttempts()).whenComplete((ignore, t) -> {
synchronized (this) {
isReplicatingClear = false;
}
});
} else {
asyncBatchStage = CompletableFutures.completedNull();
}
if (!ourModificationsToReplicate.isEmpty()) {
asyncBatchStage = asyncBatchStage.thenCompose(ignore -> {
if (log.isTraceEnabled()) {
log.tracef("Sending batch of %d write/remove operations to underlying store with id %s", ourModificationsToReplicate.size(),
System.identityHashCode(ourModificationsToReplicate));
}
return retry(() -> replicateModifications(ourModificationsToReplicate), persistenceConfiguration.connectionAttempts())
.whenComplete((ignore2, t) -> {
synchronized (this) {
replicatingModifications = Collections.emptyMap();
}
});
});
}
asyncBatchStage.whenComplete((ignore, t) -> {
if (log.isTraceEnabled()) {
log.tracef("Async operations completed for id %s", System.identityHashCode(ourModificationsToReplicate));
}
boolean submitNewBatch;
CompletableFuture<Void> future;
synchronized (this) {
submitNewBatch = !pendingModifications.isEmpty() || hasPendingClear;
future = batchFuture;
batchFuture = submitNewBatch ? new CompletableFuture<>() : null;
}
if (t != null) {
future.completeExceptionally(t);
} else {
future.complete(null);
}
if (submitNewBatch) {
if (log.isTraceEnabled()) {
log.trace("Submitting new batch after completion of prior");
}
submitTask();
}
});
}
/**
* Attempts to run the given supplier, checking the stage if it contains an error. It will rerun the Supplier
* until a supplied stage doesn't contain an exception or it has encountered retries amount of exceptions. In the
* latter case it will complete the returned stage with the last throwable encountered.
* <p>
* The supplier is only invoked on the delegating store if it is actually available and will wait for it to
* become so if necessary.
* @param operationSupplier supplies the stage to test if a throwable was encountered
* @param retries how many attempts to make before giving up and propagating the exception
* @return a stage that is completed when the underlying supplied stage completed normally or has encountered a
* throwable retries times
*/
private CompletionStage<Void> retry(Supplier<CompletionStage<Void>> operationSupplier, int retries) {
return CompletionStages.handleAndCompose(getAvailabilityDelayStage().thenCompose(ignore -> operationSupplier.get()), (ignore, throwable) -> {
if (throwable != null) {
if (retries > 0) {
int waitTime = persistenceConfiguration.availabilityInterval();
log.debugf(throwable,"Failed to process async operation - retrying with delay of %d ms", waitTime);
if (waitTime > 0) {
RunnableCompletionStage rcs = new RunnableCompletionStage(() -> retry(operationSupplier,retries - 1));
scheduler.schedule(rcs, waitTime, TimeUnit.MILLISECONDS);
return rcs;
}
return retry(operationSupplier,retries - 1);
} else {
log.debug("Failed to process async operation - no more retries", throwable);
return CompletableFuture.failedFuture(throwable);
}
}
return CompletableFutures.completedNull();
});
}
private static class RunnableCompletionStage extends CompletableFuture<Void> implements Runnable {
private final Supplier<CompletionStage<Void>> supplier;
private RunnableCompletionStage(Supplier<CompletionStage<Void>> supplier) {
this.supplier = supplier;
}
@Override
public void run() {
supplier.get().whenComplete((ignore, throwable) -> {
if (throwable != null) {
completeExceptionally(throwable);
} else {
complete(null);
}
});
}
}
private CompletionStage<Void> replicateModifications(Map<Object, Modification> modifications) {
// Use a connected flowable, so we don't have to iterate over the modifications twice
ConnectableFlowable<Modification> connectableModifications = Flowable.fromIterable(modifications.values())
.publish();
// The method below may subscribe to the Flowable on a different thread, thus we must auto connect after both are
// subscribed to (e.g. NonBlockingStoreAdapter subscribes on a blocking thread)
Flowable<Modification> modificationFlowable = connectableModifications.autoConnect(2);
return actual.batch(segmentCount,
modificationFlowable.ofType(RemoveModification.class)
.groupBy(Modification::getSegment, RemoveModification::getKey)
.map(SegmentPublisherWrapper::wrap),
modificationFlowable.ofType(PutModification.class)
.groupBy(Modification::getSegment, PutModification::<K, V>getEntry)
.map(SegmentPublisherWrapper::wrap));
}
private CompletionStage<Void> getAvailabilityDelayStage() {
if (asyncConfiguration.failSilently()) {
return CompletableFutures.completedNull();
}
CompletableFuture<Void> availabilityFuture;
synchronized (this) {
availabilityFuture = delegateAvailableFuture;
}
return availabilityFuture == null ? CompletableFutures.completedNull() : availabilityFuture;
}
@Override
public Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean includeValues) {
return Flowable.defer(() -> {
assertNotStopped();
if (log.isTraceEnabled()) {
log.tracef("Publisher subscribed to retrieve entries for segments %s", segments);
}
return abstractPublish(segments, filter, PutModification::getEntry, MarshallableEntry::getKey,
(innerSegments, predicate) -> actual.publishEntries(innerSegments, predicate, includeValues));
});
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
return Flowable.defer(() -> {
assertNotStopped();
if (log.isTraceEnabled()) {
log.tracef("Publisher subscribed to retrieve keys for segments %s", segments);
}
return abstractPublish(segments, filter, putModification -> putModification.<K, Object>getEntry().getKey(),
RxJavaInterop.identityFunction(), actual::publishKeys);
});
}
private <E> Publisher<E> abstractPublish(IntSet segments, Predicate<? super K> filter, Function<PutModification, E> putFunction,
Function<E, K> toKeyFunction, BiFunction<IntSet, Predicate<K>, Publisher<E>> publisherFunction) {
Map.Entry<Boolean, Map<Object, Modification>> entryModifications = flattenModificationMaps();
Map<Object, Modification> modificationCopy = entryModifications.getValue();
Flowable<E> modPublisher = Flowable.fromIterable(modificationCopy.values())
.ofType(PutModification.class)
.filter(modification -> segments.contains(modification.getSegment()))
.map(putFunction);
if (filter != null) {
modPublisher = modPublisher.filter(e -> filter.test(toKeyFunction.apply(e)));
}
// We had a clear so skip actually asking the store
if (entryModifications.getKey()) {
if (log.isTraceEnabled()) {
log.trace("Only utilizing pending modifications as clear a was found");
}
return modPublisher;
}
Predicate<K> combinedPredicate = k -> !modificationCopy.containsKey(k);
if (filter != null) {
combinedPredicate = combinedPredicate.and(filter);
}
return modPublisher.concatWith(publisherFunction.apply(segments, combinedPredicate));
}
private Map.Entry<Boolean, Map<Object, Modification>> flattenModificationMaps() {
Map<Object, Modification> modificationCopy;
Map<Object, Modification> modificationsToReplicate;
boolean clearToReplicate;
synchronized (this) {
modificationCopy = new HashMap<>(pendingModifications);
if (hasPendingClear) {
return new AbstractMap.SimpleImmutableEntry<>(Boolean.TRUE, modificationCopy);
}
modificationsToReplicate = this.replicatingModifications;
clearToReplicate = this.isReplicatingClear;
}
modificationCopy.putAll(modificationsToReplicate);
return new AbstractMap.SimpleImmutableEntry<>(clearToReplicate, modificationCopy);
}
@Override
public CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
assertNotStopped();
CompletionStage<MarshallableEntry<K, V>> pendingStage = getStageFromPending(key);
if (pendingStage != null) {
return pendingStage;
}
return actual.load(segment, key);
}
private CompletionStage<MarshallableEntry<K, V>> getStageFromPending(Object key) {
Object wrappedKey = wrapKeyIfNeeded(key);
Map<Object, Modification> modificationsToReplicate;
boolean clearToReplicate;
synchronized (this) {
// Note that writes to this map are done only in synchronized block, so we have to do same for get
Modification modification = pendingModifications.get(wrappedKey);
if (modification != null) {
if (log.isTraceEnabled()) {
log.tracef("Found entry was pending write in async store: %s", modification);
}
return modification.asStage();
}
if (hasPendingClear) {
if (log.isTraceEnabled()) {
log.trace("There is a pending clear from async store, returning null");
}
return CompletableFutures.completedNull();
}
// This map is never written to so just reading reference in synchronized block is sufficient
modificationsToReplicate = this.replicatingModifications;
clearToReplicate = this.isReplicatingClear;
}
Modification modification = modificationsToReplicate.get(wrappedKey);
if (modification != null) {
if (log.isTraceEnabled()) {
log.tracef("Found entry was replicating write in async store: %s", modification);
}
return modification.asStage();
} else if (clearToReplicate) {
if (log.isTraceEnabled()) {
log.trace("There is a clear being replicated from async store, returning null");
}
return CompletableFutures.completedNull();
}
return null;
}
@Override
public CompletionStage<Void> batch(int publisherCount, Publisher<SegmentedPublisher<Object>> removePublisher,
Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
assertNotStopped();
Completable removeCompletable = Flowable.fromPublisher(removePublisher)
.flatMapCompletable(sp -> Flowable.fromPublisher(sp)
.concatMapCompletable(key -> Completable.fromCompletionStage(submitModification(new RemoveModification(sp.getSegment(), key))), publisherCount));
Completable modifyCompletable = Flowable.fromPublisher(writePublisher)
.flatMapCompletable(sp -> Flowable.fromPublisher(sp)
.concatMapCompletable(me -> Completable.fromCompletionStage(submitModification(new PutModification(sp.getSegment(), me))), publisherCount));
return removeCompletable.mergeWith(modifyCompletable)
.toCompletionStage(null);
}
CompletionStage<Void> submitModification(Modification modification) {
boolean isTraceEnabled = log.isTraceEnabled();
boolean startNewBatch;
CompletionStage<Void> submitStage;
synchronized (this) {
int previousBatchId;
if (isTraceEnabled) {
previousBatchId = System.identityHashCode(replicatingModifications);
int currentBatchId = System.identityHashCode(pendingModifications);
log.tracef("Adding modification %s to batch %s", modification, currentBatchId);
} else {
previousBatchId = 0;
}
modification.apply(this);
startNewBatch = batchFuture == null;
if (startNewBatch) {
batchFuture = new CompletableFuture<>();
}
int queueSize = pendingModifications.size() + replicatingModifications.size();
submitStage = queueSize > modificationQueueSize ? batchFuture : null;
if (submitStage != null && isTraceEnabled) {
log.tracef("Too many modifications queued (%d), operation must wait until previous batch %d completes",
queueSize, previousBatchId);
}
}
if (startNewBatch) {
submitTask();
}
return submitStage == null ? CompletableFutures.completedNull() :
submitStage.thenApplyAsync(CompletableFutures.toNullFunction(), nonBlockingExecutor);
}
@Override
public CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> entry) {
assertNotStopped();
return submitModification(new PutModification(segment, entry));
}
@Override
public CompletionStage<Boolean> delete(int segment, Object key) {
assertNotStopped();
// Return null to signal that we don't know if the key exists in the store
// Use erasure to avoid calling thenApply
return (CompletionStage)submitModification(new RemoveModification(segment, key));
}
@Override
public CompletionStage<Void> clear() {
assertNotStopped();
submitModification(ClearModification.INSTANCE);
return CompletableFutures.completedNull();
}
@Override
public Publisher<MarshallableEntry<K, V>> purgeExpired() {
// We assume our modifications aren't expired - so just call actual store
return Flowable.defer(() -> {
assertNotStopped();
return actual.purgeExpired();
});
}
@Override
public CompletionStage<Void> addSegments(IntSet segments) {
assertNotStopped();
return actual.addSegments(segments);
}
@Override
public CompletionStage<Void> removeSegments(IntSet segments) {
assertNotStopped();
synchronized (this) {
pendingModifications.values().removeIf(modification -> segments.contains(modification.getSegment()));
}
return actual.removeSegments(segments);
}
@Override
public CompletionStage<Long> size(IntSet segments) {
assertNotStopped();
// TODO: technically this is wrong, but the old version did this is it okay?
return actual.size(segments);
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
assertNotStopped();
return actual.approximateSize(segments);
}
@Override
public CompletionStage<Boolean> isAvailable() {
if (stopped) {
return CompletableFutures.completedFalse();
}
if (asyncConfiguration.failSilently())
return CompletableFutures.completedTrue();
CompletionStage<Boolean> superAvailableStage = super.isAvailable();
return superAvailableStage.thenApply(delegateAvailable -> {
if (delegateAvailable) {
CompletableFuture<Void> delegateFuture;
synchronized (this) {
delegateFuture = delegateAvailableFuture;
delegateAvailableFuture = null;
}
if (delegateFuture != null) {
log.debugf("Underlying delegate %s is now available", actual);
delegateFuture.complete(null);
}
return true;
}
boolean delegateUnavailable;
boolean isReplicating;
int queueSize;
synchronized (this) {
isReplicating = !replicatingModifications.isEmpty() || isReplicatingClear;
queueSize = pendingModifications.size();
if (delegateUnavailable = delegateAvailableFuture == null) {
delegateAvailableFuture = new CompletableFuture<>();
}
}
if (delegateUnavailable) {
log.debugf("Underlying delegate %s is now unavailable!", actual);
}
return queueSize < modificationQueueSize || !isReplicating;
});
}
@Override
public NonBlockingStore<K, V> delegate() {
return actual;
}
private void assertNotStopped() throws CacheException {
if (stopped)
throw new IllegalLifecycleStateException("AsyncCacheWriter stopped; no longer accepting more entries.");
}
/**
* Wraps the provided key if necessary to provide equals to work properly
* @param key the key to wrap
* @return the wrapped object (if required) or the object itself
*/
static Object wrapKeyIfNeeded(Object key) {
if (key instanceof byte[]) {
return new WrappedByteArray((byte[]) key);
}
return key;
}
}
| 27,234
| 42.093354
| 163
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/async/RemoveModification.java
|
package org.infinispan.persistence.async;
import java.util.concurrent.CompletionStage;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.commons.util.concurrent.CompletableFutures;
class RemoveModification implements Modification {
private final int segment;
private final Object key;
RemoveModification(int segment, Object key) {
this.segment = segment;
this.key = key;
}
@Override
public <K, V> void apply(AsyncNonBlockingStore<K, V> store) {
store.putModification(AsyncNonBlockingStore.wrapKeyIfNeeded(key), this);
}
@Override
public int getSegment() {
return segment;
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> asStage() {
return CompletableFutures.completedNull();
}
public Object getKey() {
return key;
}
@Override
public String toString() {
return "RemoveModification{" +
"segment=" + segment +
", key=" + key +
'}';
}
}
| 1,022
| 22.25
| 78
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/async/ClearModification.java
|
package org.infinispan.persistence.async;
import java.util.concurrent.CompletionStage;
import org.infinispan.persistence.spi.MarshallableEntry;
class ClearModification implements Modification {
private ClearModification() {
}
public static final ClearModification INSTANCE = new ClearModification();
@Override
public <K, V> void apply(AsyncNonBlockingStore<K, V> store) {
store.putClearModification();
}
@Override
public int getSegment() {
throw new UnsupportedOperationException("This should never be invoked");
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> asStage() {
throw new UnsupportedOperationException("This should never be invoked");
}
@Override
public String toString() {
return "ClearModification{}";
}
}
| 815
| 23.727273
| 78
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/AdvancedCacheExpirationWriter.java
|
package org.infinispan.persistence.spi;
import java.util.concurrent.Executor;
import org.infinispan.commons.util.Experimental;
/**
* Defines functionality for advanced expiration techniques. Note this interface allows for providing not just the key
* when an entry is expired. This is important so that proper cluster wide expiration can be performed.
* @since 8.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@Experimental
@Deprecated
public interface AdvancedCacheExpirationWriter<K, V> extends AdvancedCacheWriter<K, V> {
/**
* Using the thread in the pool, removed all the expired data from the persistence storage. For each removed entry,
* the supplied listener is invoked. This should be preferred to
* {@link AdvancedCacheWriter#purge(Executor, PurgeListener)} since it allows for value and metadata to be provided
* which provides more accurate expiration when coordination is required.
*
* @param executor the executor to invoke the given command on
* @param listener the listener that is notified for each expired entry
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
void purge(Executor executor, ExpirationPurgeListener<K, V> listener);
/**
* Callback to be notified when an entry is removed by the {@link #purge(Executor, ExpirationPurgeListener)} method.
* Note this interface adds a new method to the purge listener. It is possible that a cache store may want to
* have a key only expiration and a key/metadata for various performance reasons.
*/
interface ExpirationPurgeListener<K, V> extends PurgeListener<K> {
/**
* If possible, {@link AdvancedCacheExpirationWriter} implementors should invoke this method for every
* entry that is purged from the store. One of the side effects of not implementing this method is that listeners
* do not receive {@link org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired} for the
* entries that are removed from the persistent store directly.
*/
void marshalledEntryPurged(MarshallableEntry<K, V> entry);
}
/**
* This method is never called. Implementers of {@link AdvancedCacheExpirationWriter} must instead
* implement {@link #purge(Executor, ExpirationPurgeListener)}.
*/
@Override
default void purge(Executor threadPool, PurgeListener<? super K> listener) {
throw new UnsupportedOperationException();
}
}
| 2,514
| 46.45283
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/package-info.java
|
/**
* The Persistence SPI.
*
* @api.public
*/
package org.infinispan.persistence.spi;
| 90
| 12
| 39
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/MarshallableEntryFactory.java
|
package org.infinispan.persistence.spi;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Factory for {@link MarshallableEntry}.
*
* @author Ryan Emerson
* @since 10.0
*/
public interface MarshallableEntryFactory<K,V> {
/**
* {@code metadataBytes} defaults to null
* {@code created} defaults to -1
* {@code lastUsed} defaults to -1
*
* @see #create(ByteBuffer, ByteBuffer, ByteBuffer, ByteBuffer, long, long)
*/
MarshallableEntry<K,V> create(ByteBuffer key, ByteBuffer valueBytes);
/**
* Creates a {@link MarshallableEntry} using already marshalled objects as arguments
*
* @param key {@link ByteBuffer} of serialized key object
* @param valueBytes {@link ByteBuffer} of serialized value object
* @param metadataBytes {@link ByteBuffer} of serialized metadata object
* @param internalMetadataBytes{@link ByteBuffer} of serialized internal metadata object
* @param created timestamp of when the entry was created, -1 means this value is ignored
* @param lastUsed timestamp of last time entry was accessed in memory
* @return {@link MarshallableEntry} instance that lazily handles unmarshalling of keys, values and metadata via the
* {@link MarshallableEntry#getKey()}, {@link MarshallableEntry#getValue()} and {@link
* MarshallableEntry#getMetadata()} methods.
*/
MarshallableEntry<K,V> create(ByteBuffer key, ByteBuffer valueBytes, ByteBuffer metadataBytes,
ByteBuffer internalMetadataBytes, long created, long lastUsed);
/**
* Creates a {@link MarshallableEntry} using a object key and already marshalled value/metadata as arguments
*
* @param key entry key
* @param valueBytes {@link ByteBuffer} of serialized value object
* @param metadataBytes {@link ByteBuffer} of serialized metadata object
* @param internalMetadataBytes {@link ByteBuffer} of serialized internal metadata object
* @param created timestamp of when the entry was created, -1 means this value is ignored
* @param lastUsed timestamp of last time entry was accessed in memory
* @return {@link MarshallableEntry} instance that lazily handles unmarshalling of values and metadata via the {@link
* MarshallableEntry#getKey()}, {@link MarshallableEntry#getValue()} and {@link MarshallableEntry#getMetadata()}
* methods.
*/
MarshallableEntry<K, V> create(Object key, ByteBuffer valueBytes, ByteBuffer metadataBytes,
ByteBuffer internalMetadataBytes, long created, long lastUsed);
/**
* {@code value} defaults to null
*
* @see #create(Object, Object)
*/
MarshallableEntry<K,V> create(Object key);
/**
* {@code metadata} defaults to null {@code created} defaults to -1 {@code lastUsed} defaults to -1
*
* @see #create(Object, Object, Metadata, PrivateMetadata, long, long)
*/
MarshallableEntry<K,V> create(Object key, Object value);
/**
* Creates a {@link MarshallableEntry} using non-marshalled POJOs as arguments
*
* @param key entry key
* @param value entry value
* @param metadata entry metadata
* @param internalMetadata entry internal metadata
* @param created timestamp of when the entry was created, -1 means this value is ignored
* @param lastUsed timestamp of last time entry was accessed in memory
* @return {@link MarshallableEntry} instance that lazily handles serialization of keys, values and metadata via the
* {@link MarshallableEntry#getKeyBytes()}, {@link MarshallableEntry#getValueBytes()} and {@link
* MarshallableEntry#getMetadataBytes()} methods.
*/
MarshallableEntry<K, V> create(Object key, Object value, Metadata metadata, PrivateMetadata internalMetadata,
long created, long lastUsed);
/**
* Creates a {@link MarshallableEntry} instance from a {@code key} and an {@link InternalCacheValue}.
*
* @param key the entry key.
* @param v the {@link InternalCacheValue}.
*/
default MarshallableEntry<K, V> create(Object key, InternalCacheValue<V> v) {
return create(key, v.getValue(), v.getMetadata(), v.getInternalMetadata(), v.getCreated(), v.getLastUsed());
}
/**
* Creates a {@link MarshallableEntry} instance from an {@link InternalCacheEntry}.
*
* @param e the {@link InternalCacheEntry}.
*/
default MarshallableEntry<K, V> create(InternalCacheEntry<K, V> e) {
return create(e.getKey(), e.getValue(), e.getMetadata(), e.getInternalMetadata(), e.getCreated(),
e.getLastUsed());
}
/**
* Creates a {@link MarshallableEntry} using a Key {@link MarshalledValue}.
*
* @param key entry key
* @param value a {@link MarshalledValue} whose values are used to populate {@link MarshallableEntry#getValueBytes()},
* {@link MarshallableEntry#getMetadataBytes()}, {@link MarshallableEntry#created()} and {@link
* MarshallableEntry#lastUsed()} fields.
* @return {@link MarshallableEntry} instance that lazily handles unmarshalling of keys, values and metadata via the
* {@link MarshallableEntry#getKey()}, {@link MarshallableEntry#getValue()} and {@link
* MarshallableEntry#getMetadata()} methods.
* @throws {@link NullPointerException} if the provided {@link MarshalledValue} is null.
*/
MarshallableEntry<K, V> create(Object key, MarshalledValue value);
/**
* Clone the provided MarshallableEntry if needed to apply lifespan expiration. If the entry already has lifespan
* applied this method will do nothing, returning the same MarshallableEntry back.
*
* @param me the entry to clone if applicable
* @param creationTime the creation time to apply for lifespan
* @param lifespan the duration for which the entry will expire after the creationTime
* @return a new entry if lifespan was applied otherwise the same entry provided
*/
MarshallableEntry<K, V> cloneWithExpiration(MarshallableEntry<K, V> me, long creationTime, long lifespan);
/**
* @return a cached empty {@link MarshallableEntry} instance.
*/
MarshallableEntry<K, V> getEmpty();
}
| 6,531
| 46.333333
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/ExternalStore.java
|
package org.infinispan.persistence.spi;
import net.jcip.annotations.ThreadSafe;
/**
* Basic interface for interacting with an external store in a read-write mode.
*
* @author Mircea Markus
* @since 6.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@ThreadSafe
@Deprecated
public interface ExternalStore<K, V> extends CacheLoader<K, V>, CacheWriter<K, V> {
@Override
default boolean isAvailable() {
return CacheWriter.super.isAvailable();
}
/**
* Method to be used to destroy and clean up any resources associated with this store. This is normally only
* useful for non shared stores.
* <p>
* This method will ensure the store is stopped and properly cleans up all resources for it.
* @implSpec Default implementation just invokes {@link #stop()}
*/
default void destroy() { stop(); }
}
| 859
| 28.655172
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/AdvancedCacheWriter.java
|
package org.infinispan.persistence.spi;
import java.util.concurrent.Executor;
import net.jcip.annotations.ThreadSafe;
/**
* Defines advanced functionality for persisting data to an external storage.
*
* @author Mircea Markus
* @since 6.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@ThreadSafe
@Deprecated
public interface AdvancedCacheWriter<K, V> extends CacheWriter<K, V> {
/**
* Removes all the data from the storage.
*
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
void clear();
/**
* Using the thread in the pool, removed all the expired data from the persistence storage. For each removed entry,
* the supplied listener is invoked.
* <p>
* When this method returns all entries will be purged and no tasks will be running due to this loader in the
* provided executor. If however an exception is thrown there could be tasks still pending or running in the
* executor.
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
void purge(Executor threadPool, PurgeListener<? super K> listener);
/**
* Callback to be notified when an entry is removed by the {@link #purge(java.util.concurrent.Executor,
* org.infinispan.persistence.spi.AdvancedCacheWriter.PurgeListener)} method.
*/
interface PurgeListener<K> {
/**
* Optional. If possible, {@link AdvancedCacheWriter} implementors should invoke this method for every entry that
* is purged from the store. One of the side effects of not implementing this method is that listeners do not
* receive {@link org.infinispan.notifications.cachelistener.annotation.CacheEntryExpired} for the entries that
* are removed from the persistent store directly.
*/
void entryPurged(K key);
}
}
| 1,894
| 36.156863
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/PersistenceException.java
|
package org.infinispan.persistence.spi;
import org.infinispan.commons.CacheException;
/**
* An exception thrown by a {@link CacheLoader} or a {@link CacheWriter} implementation if there are problems
* reading from a loader.
*
* @author Manik Surtani
* @since 4.0
*/
public class PersistenceException extends CacheException {
private static final long serialVersionUID = -7640401612614646818L;
public PersistenceException() {
}
public PersistenceException(String message) {
super(message);
}
public PersistenceException(String message, Throwable cause) {
super(message, cause);
}
public PersistenceException(Throwable cause) {
super(cause);
}
}
| 702
| 21.677419
| 109
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/StoreUnavailableException.java
|
package org.infinispan.persistence.spi;
/**
* An exception thrown by the {@link org.infinispan.persistence.manager.PersistenceManager} if one or more
* stores are unavailable when a cache operation is attempted.
*
* @author Ryan Emerson
* @since 9.3
*/
public class StoreUnavailableException extends PersistenceException {
public StoreUnavailableException() {
}
public StoreUnavailableException(String message) {
super(message);
}
public StoreUnavailableException(String message, Throwable cause) {
super(message, cause);
}
public StoreUnavailableException(Throwable cause) {
super(cause);
}
}
| 647
| 23
| 106
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/FlagAffectedStore.java
|
package org.infinispan.persistence.spi;
/**
* Implemented by stores that can skip writes based on certain flags present in the invocation.
* @since 9.0
*/
public interface FlagAffectedStore<K, V> extends ExternalStore<K, V> {
boolean shouldWrite(long commandFlags);
}
| 277
| 22.166667
| 95
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/MarshalledValue.java
|
package org.infinispan.persistence.spi;
import org.infinispan.commons.io.ByteBuffer;
/**
* A marshallable object containing serialized representations of cache values and metadata, that can be used to store
* values, metadata and timestamps as a single entity.
*
* @author Ryan Emerson
* @since 10.0
*/
public interface MarshalledValue {
ByteBuffer getValueBytes();
ByteBuffer getMetadataBytes();
ByteBuffer getInternalMetadataBytes();
long getCreated();
long getLastUsed();
}
| 505
| 20.083333
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/CacheWriter.java
|
package org.infinispan.persistence.spi;
import java.util.concurrent.CompletionStage;
import org.infinispan.commons.api.Lifecycle;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import net.jcip.annotations.ThreadSafe;
/**
* Allows persisting data to an external storage, as opposed to the {@link CacheLoader}.
*
* @author Mircea Markus
* @since 6.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@ThreadSafe
@Deprecated
public interface CacheWriter<K, V> extends Lifecycle {
/**
* Used to initialize a cache loader. Typically invoked by the {@link org.infinispan.persistence.manager.PersistenceManager}
* when setting up cache loaders.
*
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
void init(InitializationContext ctx);
/**
* Persists the entry to the storage.
*
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
* @see MarshallableEntry
*/
void write(MarshallableEntry<? extends K, ? extends V> entry);
/**
* @return true if the entry existed in the persistent store and it was deleted.
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
boolean delete(Object key);
/**
* Persist all provided entries to the store in chunks, with the size of each chunk determined by the store
* implementation. If chunking is not supported by the underlying store, then entries are written to the store
* individually via {@link #write(MarshallableEntry)}.
*
* @param publisher a {@link Publisher} of {@link MarshallableEntry} instances
* @throws NullPointerException if the publisher is null.
*/
default CompletionStage<Void> bulkUpdate(Publisher<MarshallableEntry<? extends K, ? extends V>> publisher) {
return Flowable.fromPublisher(publisher)
.doOnNext(this::write)
.ignoreElements()
.toCompletionStage(null);
}
/**
* Remove all provided keys from the store in a single batch operation. If this is not supported by the
* underlying store, then keys are removed from the store individually via {@link #delete(Object)}.
*
* @param keys an Iterable of entry Keys to be removed from the store.
* @throws NullPointerException if keys is null.
*/
default void deleteBatch(Iterable<Object> keys) {
keys.forEach(this::delete);
}
/**
* @return true if the writer can be connected to, otherwise false
*/
default boolean isAvailable() {
return true;
}
}
| 2,663
| 33.597403
| 128
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/LocalOnlyCacheLoader.java
|
package org.infinispan.persistence.spi;
/**
* Marker interface for cache loaders that should only load values on the originating nodes.
* An example of a loader that uses this interface is {@link org.infinispan.persistence.cluster.ClusterLoader}.
*
* @author Dan Berindei
* @since 7.0
* @deprecated since 11.0. To be removed in 14.0 ISPN-11864 with no direct replacement.
*/
@Deprecated
public interface LocalOnlyCacheLoader {
}
| 437
| 30.285714
| 111
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/AdvancedLoadWriteStore.java
|
package org.infinispan.persistence.spi;
import net.jcip.annotations.ThreadSafe;
/**
* Advanced interface for interacting with an external store in a read-write mode.
*
* @author Mircea Markus
* @since 6.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@ThreadSafe
@Deprecated
public interface AdvancedLoadWriteStore<K,V> extends ExternalStore<K,V>, AdvancedCacheLoader<K,V>, AdvancedCacheWriter<K,V> {
}
| 430
| 25.9375
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/SegmentedAdvancedLoadWriteStore.java
|
package org.infinispan.persistence.spi;
import java.util.concurrent.Executor;
import java.util.function.Predicate;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.reactivestreams.Publisher;
import net.jcip.annotations.ThreadSafe;
/**
* An interface implementing both {@link AdvancedCacheWriter} and {@link AdvancedCacheLoader} as well as overrides
* of those methods that can be optimized when a segment is already known for the key or a subset of segments are to
* be used instead of the entire store.
* <p>
* Various methods on this interface may be invoked even if the store is configured as being segmented. That is whether
* the configuration is true for {@link StoreConfiguration#segmented()}. Each method is documented as to if this
* can occur or not.
* @author wburns
* @since 9.4
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@ThreadSafe
@Deprecated
public interface SegmentedAdvancedLoadWriteStore<K, V> extends AdvancedLoadWriteStore<K, V>, AdvancedCacheExpirationWriter<K, V> {
// CacheLoader methods
/**
* Fetches an entry from the storage given a segment to optimize this lookup based on. If a {@link MarshallableEntry}
* needs to be created here, {@link InitializationContext#getMarshallableEntryFactory()} and {@link
* InitializationContext#getByteBufferFactory()} should be used.
* <p>
* The provided segment may be used for performance purposes, however it it is acceptable to ignore this argument.
* <p>
* This method may be invoked invoked irrespective if the store is {@link StoreConfiguration#segmented()}.
*
* @param segment the segment that the key maps to
* @param key the key of the entry to fetch
* @return the entry, or null if the entry does not exist
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
MarshallableEntry<K, V> get(int segment, Object key);
/**
* Returns true if the storage contains an entry associated with the given key in the given segment
* <p>
* The provided segment may be used for performance purposes, however it it is acceptable to ignore this argument.
* <p>
* This method may be invoked invoked irrespective if the store is {@link StoreConfiguration#segmented()}.
* @param segment the segment that the key maps to
* @param key the key to see if exists
* @return true if the key is present in this loader with a given segment
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
boolean contains(int segment, Object key);
// CacheWriter methods
/**
* Persists the entry to the storage with the given segment to optimize future lookups.
* <p>
* The provided segment may be used for performance purposes, however it it is acceptable to ignore this argument.
* <p>
* This method may be invoked invoked irrespective if the store is {@link StoreConfiguration#segmented()}.
*
* @param segment the segment to persist this entry to
* @param entry the entry to write to the store
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
* @see MarshallableEntry
* @implSpec The default implementation falls back to invoking {@link #write(MarshallableEntry)}.
*/
void write(int segment, MarshallableEntry<? extends K, ? extends V> entry);
/**
* Removes the entry for the provided key which is in the given segment. This method then returns whether the
* entry was removed or not.
* <p>
* The provided segment may be used for performance purposes, however it it is acceptable to ignore this argument.
* <p>
* This method may be invoked invoked irrespective if the store is {@link StoreConfiguration#segmented()}.
* @param segment the segment that this key maps to
* @param key the key of the entry to remove
* @return true if the entry existed in the persistent store and it was deleted.
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
boolean delete(int segment, Object key);
// AdvancedCacheLoader methods
/**
* Returns the number of elements in the store that map to the given segments that aren't expired.
* <p>
* The segments here <b>must</b> be adhered to and the size must not count any entries that don't belong to
* the provided segments.
* <p>
* This method is not invoked invoked when the store is not configured to be {@link StoreConfiguration#segmented()}.
* @param segments the segments which should have their entries counted. Always non null.
* @return the count of entries in the given segments
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
int size(IntSet segments);
/**
* Publishes all the keys that map to the given segments from this store. The given publisher can be used by as many
* {@link org.reactivestreams.Subscriber}s as desired. Keys are not retrieved until a given Subscriber requests
* them from the {@link org.reactivestreams.Subscription}.
* <p>
* Stores will return only non expired keys
* <p>
* The segments here <b>must</b> be adhered to and the keys published must not include any that don't belong to
* the provided segments.
* <p>
* This method is not invoked invoked when the store is not configured to be {@link StoreConfiguration#segmented()}.
* @param segments the segments that the keys must map to. Always non null.
* @param filter a filter
* @return a publisher that will provide the keys from the store
*/
Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter);
/**
* Publishes all entries from this store. The given publisher can be used by as many {@link
* org.reactivestreams.Subscriber}s as desired. Entries are not retrieved until a given Subscriber requests them from
* the {@link org.reactivestreams.Subscription}.
* <p>
* If <b>fetchMetadata</b> is true this store must guarantee to not return any expired entries.
* <p>
* The segments here <b>must</b> be adhered to and the entries published must not include any that don't belong to
* the provided segments.
* <p>
* This method is not invoked invoked when the store is not configured to be {@link StoreConfiguration#segmented()}.
* {@link StoreConfiguration#segmented()}.
*
* @param segments the segments that the keys of the entries must map to. Always non null.
* @param filter a filter on the keys of the entries that if passed will allow the given entry to be returned
* from the publisher
* @param fetchValue whether the value should be included in the marshalled entry
* @param fetchMetadata whether the metadata should be included in the marshalled entry
* @return a publisher that will provide the entries from the store that map to the given segments
*/
Publisher<MarshallableEntry<K, V>> entryPublisher(IntSet segments, Predicate<? super K> filter, boolean fetchValue, boolean fetchMetadata);
// AdvancedCacheWriter methods
/**
* Removes all the data that maps to the given segments from the storage.
* <p>
* This method must only remove entries that map to the provided segments.
* <p>
* This method may be invoked irrespective if the configuration is {@link StoreConfiguration#segmented()} or not.
* @param segments data mapping to these segments are removed. Always non null.
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
void clear(IntSet segments);
/**
* Invoked when a node becomes an owner of the given segments. Note this method is only invoked for non shared
* store implementations.
* <p>
* This method may be invoked irrespective if the configuration is {@link StoreConfiguration#segmented()} or not.
* @param segments segments to associate with this store
* @implSpec This method does nothing by default
*/
default void addSegments(IntSet segments) { }
/**
* Invoked when a node loses ownership of a segment. The provided segments are the ones this node no longer owns.
* Note this method is only invoked for non shared store implementations.
* <p>
* This method may be invoked irrespective if the configuration is {@link StoreConfiguration#segmented()} or not.
* {@link StoreConfiguration#segmented()}.
* @param segments segments that should no longer be associated with this store
* @implSpec This method does nothing by default
*/
default void removeSegments(IntSet segments) { }
/**
* {@inheritDoc}
*/
@Override
default void purge(Executor executor, ExpirationPurgeListener<K, V> listener) {
purge(executor, (PurgeListener<K>) listener);
}
}
| 9,039
| 47.864865
| 142
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/NonBlockingStore.java
|
package org.infinispan.persistence.spi;
import java.util.EnumSet;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.function.Predicate;
import java.util.function.Supplier;
import jakarta.transaction.Transaction;
import org.infinispan.Cache;
import org.infinispan.commons.util.Experimental;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscriber;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
/**
* The contract for defining how caches interface with external sources of data, such as databases or filesystems.
* As the name implies, any method in this class must <b>never</b> block the invoking thread.
* <p>
* The first method invoked on this store is {@link #start(InitializationContext)}, which starts the store.
* Once the returned stage has completed, the store is assumed to be in working state and ready to handle operations.
* Infinispan guarantees the visibility of variables written during the start method, so you do not need to
* synchronize them manually unless they are mutated in the normal operations of the store itself.
* <p>
* After the store starts, Infinispan uses the {@link #characteristics()} method to query the characteristics of
* the store. It is highly recommended that this method never change the values it returns after the
* store starts because characteristics might not be cached. For more information, see {@link Characteristic}
* and its various values.
* <p>
* By default, this interface has only a few required methods. If you implement any of the optional methods,
* ensure that you advertise the appropriate characteristic for that method so that Infinispan invokes it.
* If Infinispan is instructed that a
* characteristic is available and the method is not overridden, an {@link UnsupportedOperationException} will be
* thrown when trying to invoke the appropriate method. Each {@link Characteristic} defines what methods map to which
* characteristic.
* <p>
* Although recommended, segmentation support in store implementations is optional. Segment parameters are provided
* for all methods where segment information is required, for example {@link #load(int, Object)} and
* {@link #publishEntries(IntSet, Predicate, boolean)}. If your store implementation does not support segmentation,
* you can ignore these parameters. However, you should note that segmented stores allow Infinispan caches to more
* efficiently perform bulk operations such as {@code Cache.size()} or {@code Cache.entrySet().stream()}. Segmentation
* also decreases the duration of state transfers when a store with {@link Characteristic#BULK_READ} is present,
* as well as the time required to remove data by segments. To indicate that a store implementation supports segmentation,
* the {@link Characteristic#SEGMENTABLE} characteristic must be returned by the {@link #characteristics()} method. Store
* implementations can determine if stores are configured to be segmented if {@link StoreConfiguration#segmented()} is
* enabled, which is available from the {@code InitializationContext}.
* <p>
* Store implementations might need to interact with blocking APIs to perform their required operations. However, the invoking
* thread must never be blocked, so Infinispan provides a {@link org.infinispan.util.concurrent.BlockingManager} utility class
* that handles blocking operations to ensure that they do not leak into the internal system. {@code BlockingManager} does this
* by running any blocking operations on blocking threads, while any stages continue on non-blocking threads.
* <p>
* This utility class provides different methods that range from equivalents for commonly used methods, such as
* {@link java.util.concurrent.CompletableFuture#supplyAsync(Supplier, Executor)}, to a wrapper around a {@link Publisher} that
* ensures it is subscribed and observed on the correct threads. To obtain a {@code BlockingManager}, invoke the
* {@link InitializationContext#getBlockingManager()} method on the provided context in the start method.
* <p>
* Implementations of this store must be thread safe if concurrent operations are performed on it. The one exception
* is that {@link #start(InitializationContext)} and {@link #stop()} are not invoked concurrently with other operations.
* <p>
* Note that this interface is Experimental and its methods may change slightly over time until it has matured.
* @author William Burns
* @since 11.0
* @param <K> key value type
* @param <V> value value type
*/
@Experimental
public interface NonBlockingStore<K, V> {
/**
* Enumeration defining the various characteristics of the underlying store to communicate what features it may
* or may not support.
*/
enum Characteristic {
/**
* If this store can be shared across multiple Infinispan nodes; for example, an external system such as
* a database. This characteristic allows validation of the store configuration.
*/
SHAREABLE,
/**
* If this store supports only being read from. Write-based operations are never invoked on this store.
* No optional methods map to this characteristic. The {@link #write(int, MarshallableEntry)},
* {@link #delete(int, Object)}, and {@link #batch(int, Publisher, Publisher)} methods
* are not invoked on stores with this characteristic.
*/
READ_ONLY,
/**
* If this store supports only being written to. Read-based operations are never invoked on this store.
* No optional methods map to this characteristic. The {@link #load(int, Object)} and
* {@link #containsKey(int, Object)} methods are not invoked on stores with this characteristic.
*/
WRITE_ONLY,
/**
* If this store supports bulk read operations. If a store does not have this characteristic, operations such
* as {@link Cache#size()} and {@code Cache.entrySet().stream()} do not use this store.
* <p>
* Stores that have this characteristic must override the {@link #publishKeys(IntSet, Predicate)},
* {@link #publishEntries(IntSet, Predicate, boolean)} and {@link #size(IntSet)} methods.
* <p>
* This characteristic is ignored if the store also contains {@link #WRITE_ONLY}.
*/
BULK_READ,
/**
* If this store supports being invoked in a transactional context with prepare and commit or rollback phases.
* Stores of this type can participate in the actual transaction, if present.
* <p>
* Stores that have this characteristic must override the
* {@link #prepareWithModifications(Transaction, int, Publisher, Publisher)} , {@link #commit(Transaction)} and
* {@link #rollback(Transaction)} methods.
* <p>
* This characteristic is ignored if the store also contains {@link #READ_ONLY}.
*/
TRANSACTIONAL,
/**
* If this store supports segmentation. All methods in this SPI take as an argument a way to map a given
* entry to a segment. A segment in Infinispan is an int that acts as a bucket for many keys. Many store
* implementations may be able to store and load entries in a more performant way if they segment their data
* accordingly.
* <p>
* If this store is not segmentable then invokers of this SPI are not required to calculate these segments before
* invoking these methods and thus these methods may be invoked with any int value, null or equivalent. Refer to
* each method to determine their effect when this store is not segmented.
* <p>
* Note that you can also configure stores at runtime to be segmented or not. If the runtime configuration of this
* store is non-segmented, it is equivalent to the store not having the SEGMENTABLE characteristic, which might cause
* parameters to be null or invalid segment numbers. Store implementation can block this configuration
* by throwing an exception in the {@link #start(InitializationContext)} method.
* <p>
* While it is possible that a SEGMENTABLE store can be configured as non-segmented, a store that is not
* SEGMENTABLE can never then later be configured as segmented.
* <p>
* Stores that have this characteristic must override the {@link #addSegments(IntSet)} and
* {@link #removeSegments(IntSet)} methods. However, if a store is {@link #SHAREABLE} and is configured to be shared
* via configuration these methods are not invoked.
*/
SEGMENTABLE,
/**
* If this store uses expiration metadata so that it never returns expired entries
* via any methods such as {@link #load(int, Object)}, {@link #publishKeys(IntSet, Predicate)} or
* {@link #publishEntries(IntSet, Predicate, boolean)}. Stores should use the provided
* {@link org.infinispan.commons.time.TimeService} in the {@code InitializationContext} to determine if entries are
* expired.
* <p>
* The information about an entry and its expiration is included in the {@link org.infinispan.metadata.Metadata},
* accessible from the {@link MarshallableEntry} that is provided.
* <p>
* Stores that have this characteristic must override the {@link #purgeExpired()} method.
*/
EXPIRATION
}
/**
* Shortcut to return -1L when the size or approximate size is unavailable.
*/
CompletableFuture<Long> SIZE_UNAVAILABLE_FUTURE = CompletableFuture.completedFuture(-1L);
/**
* The first method to invoke so that the store can be configured and additional steps, such as connecting through
* a socket or opening file descriptors, are performed.
* <p>
* The provided {@link InitializationContext} contains many helpful objects, including the configuration of the
* cache and store, concurrency utilities such as {@link org.infinispan.util.concurrent.BlockingManager} or
* an executor reserved for non-blocking operations only {@link InitializationContext#getNonBlockingExecutor()}.
* <p>
* This method is guaranteed not to be invoked concurrently with other operations. This means other methods are
* not invoked on this store until after the returned Stage completes.
* <p>
* It is expected that an implementation should be able to "restart" by invoking {@code start} a second time if
* {@link #stop()} has been invoked and allowed for its stage to complete.
* @param ctx initialization context used to initialize this store.
* @return a stage that, when complete, indicates that this store has started successfully.
*/
CompletionStage<Void> start(InitializationContext ctx);
/**
* This method is invoked when the cache is being shutdown. It is expected that all resources related to the
* store are freed when the returned stage is complete.
* <p>
* This method is guaranteed not to be invoked concurrently with other operations. This means other methods are
* not invoked on this store until after the returned Stage completes.
* <p>
* It is expected that an implementation should be able to "restart" by invoking {@link #start(InitializationContext)}
* a second time if {@code stop} has been invoked and allowed for its stage to complete.
* @return a stage that, when complete, indicates that this store has stopped.
*/
CompletionStage<Void> stop();
/**
* This method is to be invoked when the store should clean up all underlying data and storage of said data. For
* example a database store would remove the underlying table(s) that it is using and a file based store would
* remove all of the various files or directories it may have created.
* @implSpec
* The default implementation invokes the {@link #stop()} method returning the stage it returned.
* @return a stage that, when complete, indicates that this store is stopped and all data and storage for it are also
* cleaned up
*/
default CompletionStage<Void> destroy() {
return stop();
}
/**
* Returns a set of characteristics for this store and its elements. This method may be invoked multiple times
* to determine which methods of the store can be used and how the data in the store can be handled.
* <p>
* Refer to {@link Characteristic} and its values for descriptions of each characteristic for stores.
* @implSpec
* The default implementation returns an empty set.
* @return the set of characteristics that this store supports.
*/
default Set<Characteristic> characteristics() {
return EnumSet.noneOf(Characteristic.class);
}
/**
* Returns a stage that, when complete, returns a boolean indicating whether the current store can be accessed for
* requests. This can be useful for store implementations that rely on an external source, such as a remote database,
* that may become unreachable. This can reduce sending requests to a store that is not available, as subsequent cache
* requests will result in a {@link StoreUnavailableException} being thrown until the store becomes available again.
* <p>
* Store availability is polled periodically to update the status of stores if their availability changes. This method
* is not invoked concurrently with itself. In other words, this method is not invoked until after the previous stage
* has completed. However, this method is invoked concurrently with other operations, except for
* {@link #start(InitializationContext)} and {@link #stop()}.
* <p>
If a store is configured to be {@link StoreConfiguration#async()} and the store becomes unavailable, then it is
possible for the cache operations to be accepted in the interim period between the loss of availability and the
modification-queue becoming full. This allows for this store to be unavailable for short periods of time without a
{@link StoreUnavailableException} being thrown; however if the store does not become available before the queue
fills, then a {@link StoreUnavailableException} is thrown.
* @implSpec
* The default implementation returns a completed stage with the value {@code Boolean.TRUE}.
* @return stage that, when complete, indicates if the store is available.
*/
default CompletionStage<Boolean> isAvailable() {
return CompletableFutures.completedTrue();
}
/**
* Returns a stage that will contain the value loaded from the store. If a {@link MarshallableEntry} needs to be
* created here, {@link InitializationContext#getMarshallableEntryFactory()} and
* {@link InitializationContext#getByteBufferFactory()} should be used.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#WRITE_ONLY}</td>
* <td valign="top">This method will never be invoked.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#EXPIRATION}</td>
* <td valign="top">When set this method must not return expired entries.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When this is not set or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code segment} parameter may be ignored.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param segment the segment for the given key if segmentation is enabled, otherwise 0.
* @param key key of the entry to load.
* @return a stage that, when complete, contains the store value or null if not present.
*/
CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key);
/**
* Returns a stage that will contain whether the value can be found in the store.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#WRITE_ONLY}</td>
* <td valign="top">This method will never be invoked.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#EXPIRATION}</td>
* <td valign="top">When set this method must not return true if the entry was expired.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When this is not set or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code segment} parameter may be ignored.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* <p>
* @implSpec
* A default implementation is provided that does the following:
* <pre>{@code
* return load(segment, key)
* .thenApply(Objects::nonNull);}
* </pre>
* @param segment the segment for the given key if segmentation is enabled, otherwise 0.
* @param key key of the entry to check.
* @return a stage that, when complete, contains a boolean stating if the value is contained in the store.
*/
default CompletionStage<Boolean> containsKey(int segment, Object key) {
return load(segment, key)
.thenApply(Objects::nonNull);
}
/**
* Writes the entry to the store for the given segment returning a stage that completes normally when it is finished.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#READ_ONLY}</td>
* <td valign="top">This method will never be invoked.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#EXPIRATION}</td>
* <td valign="top">When set, this method must store the expiration metadata.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When set and segmentation is not disabled in the
* {@link StoreConfiguration#segmented() configuration},
* this method must ensure the segment is stored with the entry.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param segment the segment for the given key if segmentation is enabled, otherwise 0.
* @param entry the entry to persist to the store.
* @return a stage that when complete indicates that the store has written the value.
*/
CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> entry);
/**
* Removes the entry for given key and segment from the store
* and optionally report if the entry was actually removed or not.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#READ_ONLY}</td>
* <td valign="top">This method will never be invoked.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When this is not set or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code segment} parameter may be ignored.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param segment the segment for the given key if segmentation is enabled, otherwise 0.
* @param key key of the entry to delete from the store.
* @return a stage that completes with {@code TRUE} if the key existed in the store,
* {@code FALSE} if the key did not exist in the store,
* or {@code null} if the store does not report this information.
*/
CompletionStage<Boolean> delete(int segment, Object key);
/**
* Invoked when a node becomes an owner of the given segments. Some store implementations may require initializing
* additional resources when a new segment is required. For example a store could store entries in a different file
* per segment.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SHAREABLE}</td>
* <td valign="top">If the store has this characteristic and is configured to be {@link StoreConfiguration#shared()},
* this method will never be invoked.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">This method is invoked only if the store has this characteristic and is configured to be
* {@link StoreConfiguration#segmented() segmented}.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param segments the segments to add.
* @return a stage that, when complete, indicates that the segments have been added.
*/
default CompletionStage<Void> addSegments(IntSet segments) {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.SEGMENTABLE + ", but it does not implement addSegments");
}
/**
* Invoked when a node loses ownership of the given segments. A store must then remove any entries that map to the
* given segments and can remove any resources related to the given segments. For example, a database store can
* delete rows of the given segment or a file-based store can delete files related to the given segments.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SHAREABLE}</td>
* <td valign="top">If the store has this characteristic and is configured to be
* {@link StoreConfiguration#shared() shared}, this method will never be invoked.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">This method is invoked only if the store has this characteristic and is configured to be
* {@link StoreConfiguration#segmented() segmented}.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param segments the segments to remove.
* @return a stage that, when complete, indicates that the segments have been removed.
*/
default CompletionStage<Void> removeSegments(IntSet segments) {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.SEGMENTABLE + ", but it does not implement removeSegments");
}
/**
* Clears all entries from the store.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#READ_ONLY}</td>
* <td valign="top">This method will never be invoked.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @return a stage that, when complete, indicates that the store has been cleared.
*/
CompletionStage<Void> clear();
/**
* Writes and removes the entries provided by the Publishers into the store. Both are provided in the same method
* so that a batch may be performed as a single atomic operation if desired, although it is up to the store to
* manage its batching. If needed a store may generate batches of a configured size by using the
* {@link StoreConfiguration#maxBatchSize()} setting.
* <p>
* Each of the {@code Publisher}s may publish up to {@code publisherCount} publishers where each
* publisher is separated by the segment each entry maps to. Failure to request at least {@code publisherCount} publishers from the Publisher may cause a
* deadlock. Many reactive tools have methods such as {@code flatMap} that take an argument of how many concurrent
* subscriptions it manages, which is perfectly matched with this argument.
* <p>
* WARNING: For performance reasons neither Publisher will emit any {@link SegmentedPublisher}s until both write
* and remove Publishers are subscribed to. These Publishers should also be only subscribed once.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#READ_ONLY}</td>
* <td valign="top">This method will never be invoked.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">If not set or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code publisherCount} parameter has a value of 1,
* which means there is only be one {@code SegmentedPublisher} to subscribe to.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* <p>
* @implSpec
* The default implementation subscribes to both Publishers but requests values from the write publisher invoking
* {@link #write(int, MarshallableEntry)} for each of the entries in a non overlapping sequential fashion. Once all
* of the writes are complete it does the same for the remove key Publisher but invokes {@link #delete(int, Object)}
* for each key.
* @param publisherCount the maximum number of {@code SegmentPublisher}s either publisher will publish
* @param removePublisher publishes what keys should be removed from the store
* @param writePublisher publishes the entries to write to the store
* @return a stage that when complete signals that the store has written the values
*/
default CompletionStage<Void> batch(int publisherCount, Publisher<SegmentedPublisher<Object>> removePublisher,
Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
Flowable<Void> entriesWritten = Flowable.fromPublisher(writePublisher)
.concatMapEager(sp ->
Flowable.fromPublisher(sp)
.concatMapCompletable(me -> Completable.fromCompletionStage(write(sp.getSegment(), me)))
.toFlowable()
, publisherCount, publisherCount);
Flowable<Void> removedKeys = Flowable.fromPublisher(removePublisher)
.concatMapEager(sp ->
Flowable.fromPublisher(sp)
.concatMapCompletable(key -> Completable.fromCompletionStage(delete(sp.getSegment(), key)))
.toFlowable()
, publisherCount, publisherCount);
// Note that removed is done after write has completed, but is subscribed eagerly. This makes sure there is only
// one pending write or remove.
return Flowable.concatArrayEager(entriesWritten, removedKeys)
.lastStage(null);
}
/**
* Returns the amount of entries that map to the given segments in the store.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#BULK_READ}</td>
* <td valign="top">This method is only invoked if the store has this characteristic.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When this is not set or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code segments} parameter may be ignored.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param segments the segments for which the entries are counted.
* @return a stage that, when complete, contains the count of how many entries are present for the given segments.
*/
default CompletionStage<Long> size(IntSet segments) {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.BULK_READ + ", but it does not implement size");
}
/**
* Returns an estimation of the amount of entries that map to the given segments in the store. This is similar to
* {@link #size(IntSet)} except that it is not strict about the returned size. For instance, this method might ignore
* if an entry is expired or if the store has some underlying optimizations to eventually have a consistent size.
* <p>
* The implementations should be O(1).
* If a size approximation cannot be returned without iterating over all the entries in the store,
* the implementation should return {@code -1L}.
* </p>
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#BULK_READ}</td>
* <td valign="top">This method is only invoked if the store has this characteristic.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When the store does not have this characteristic or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code segment} parameter is always {@code IntSets.immutableRangeSet(numSegments)}.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* <p>
* @implSpec
* The default implementation always returns {@code -1}.
* @param segments the segments for which the entries are counted.
* @return a stage that, when complete, contains the approximate count of the entries in the given segments,
* or {@code -1L} if an approximate count cannot be provided.
*/
default CompletionStage<Long> approximateSize(IntSet segments) {
return SIZE_UNAVAILABLE_FUTURE;
}
/**
* Publishes entries from this store that are in one of the provided segments and also pass the provided filter.
* The returned publisher must support being subscribed to any number of times. That is subsequent invocations of
* {@link Publisher#subscribe(Subscriber)} should provide independent views of the underlying entries to the Subscribers.
* Entries should not be retrieved until a given Subscriber requests them via the
* {@link org.reactivestreams.Subscription#request(long)} method.
* <p>
* Subscribing to the returned {@link Publisher} should not block the invoking thread. It is the responsibility of
* the store implementation to ensure this occurs. If however the store must block to perform an operation it
* is recommended to wrap your Publisher before returning with the
* {@link org.infinispan.util.concurrent.BlockingManager#blockingPublisher(Publisher)} method and it will handle
* subscription and observation on the blocking and non-blocking executors respectively.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#BULK_READ}</td>
* <td valign="top">This method is only invoked if the store has this characteristic.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#EXPIRATION}</td>
* <td valign="top">When set the returned publisher must not return expired entries.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When this is not set or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code segment} parameter may be ignored.</td>
* </tr>
* </table>
* @param segments a set of segments to filter entries by. This will always be non-null.
* @param filter a filter to filter the keys by. If this is null then no additional filtering should be done after segments.
* @return a publisher that provides the keys from the store.
*/
default Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean includeValues) {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.BULK_READ + ", but it does not implement entryPublisher");
}
/**
* Publishes keys from this store that are in one of the provided segments and also pass the provided filter.
* The returned publisher must support being subscribed to any number of times. That is subsequent invocations of
* {@link Publisher#subscribe(Subscriber)} should provide independent views of the underlying keys to the Subscribers.
* Keys should not be retrieved until a given Subscriber requests them via the
* {@link org.reactivestreams.Subscription#request(long)} method.
* <p>
* Subscribing to the returned {@link Publisher} should not block the invoking thread. It is the responsibility of
* the store implementation to ensure this occurs. If however the store must block to perform an operation it
* is recommended to wrap your Publisher before returning with the
* {@link org.infinispan.util.concurrent.BlockingManager#blockingPublisher(Publisher)} method and it will handle
* subscription and observation on the blocking and non-blocking executors respectively.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#BULK_READ}</td>
* <td valign="top">This method is only invoked if the store has this characteristic.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#EXPIRATION}</td>
* <td valign="top">When set the returned publisher must not return expired keys.</td>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#SEGMENTABLE}</td>
* <td valign="top">When this is not set or segmentation is disabled in the
* {@link StoreConfiguration#segmented() configuration},
* the {@code segment} parameter may be ignored.</td>
* </tr>
* </table>
* <p>
* @implSpec
* A default implementation is provided that invokes {@link #publishEntries(IntSet, Predicate, boolean)} and
* maps the {@link MarshallableEntry} to its key.
* </pre>
* @param segments a set of segments to filter keys by. This will always be non-null.
* @param filter a filter to filter the keys by. If this is null then no additional filtering should be done after segments.
* @return a publisher that provides the keys from the store.
*/
default Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
return Flowable.fromPublisher(publishEntries(segments, filter, false))
.map(MarshallableEntry::getKey);
}
/**
* Returns a Publisher that, after it is subscribed to, removes any expired entries from the store and publishes
* them to the returned Publisher.
* <p>
* When the Publisher is subscribed to, it is expected to do point-in-time expiration and should
* not return a Publisher that has infinite entries or never completes.
* <p>
* Subscribing to the returned {@link Publisher} should not block the invoking thread. It is the responsibility of
* the store implementation to ensure this occurs. If however the store must block to perform an operation it
* is recommended to wrap your Publisher before returning with the
* {@link org.infinispan.util.concurrent.BlockingManager#blockingPublisher(Publisher)} method and it will handle
* subscription and observation on the blocking and non-blocking executors respectively.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#EXPIRATION}</td>
* <td valign="top">This method is only invoked if the store has this characteristic.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @return a Publisher that publishes the entries that are expired at the time of subscription.
*/
default Publisher<MarshallableEntry<K, V>> purgeExpired() {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.EXPIRATION + ", but it does not implement purgeExpired");
}
/**
* Write remove and put modifications to the store in the prepare phase, which should not yet persisted until the
* same transaction is committed via {@link #commit(Transaction)} or they are discarded if the transaction is rolled back via
* {@link #rollback(Transaction)}.
* <p>
* Each of the {@code Publisher}s may publish up to {@code publisherCount} publishers where each
* publisher is separated by the segment each entry maps to. Failure to request at least {@code publisherCount} publishers from the Publisher may cause a
* deadlock. Many reactive tools have methods such as {@code flatMap} that take an argument of how many concurrent
* subscriptions it manages, which is perfectly matched with this argument.
* <p>
* WARNING: For performance reasons neither Publisher will emit any {@link SegmentedPublisher}s until both write
* and remove Publishers are subscribed to. These Publishers should also be only subscribed once.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#TRANSACTIONAL}</td>
* <td valign="top">This method is invoked only if the store has this characteristic.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param transaction the current transactional context.
* @param publisherCount the maximum number of {@code SegmentPublisher}s either publisher will publish
* @param removePublisher publishes what keys should be removed from the store
* @param writePublisher publishes the entries to write to the store
* @return a stage that when complete signals that the store has written the values
*/
default CompletionStage<Void> prepareWithModifications(Transaction transaction, int publisherCount,
Publisher<SegmentedPublisher<Object>> removePublisher,
Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.TRANSACTIONAL + ", but it does not implement prepareWithModifications");
}
/**
* Commit changes in the provided transaction to the underlying store.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#TRANSACTIONAL}</td>
* <td valign="top">This method is invoked only if the store has this characteristic.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param transaction the current transactional context.
* @return a stage that, when completed, indicates that the transaction was committed.
*/
default CompletionStage<Void> commit(Transaction transaction) {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.TRANSACTIONAL + ", but it does not implement commit");
}
/**
* Roll back the changes from the provided transaction to the underlying store.
* <p>
* <h4>Summary of Characteristics Effects</h4>
* <table border="1" cellpadding="1" cellspacing="1" summary="Summary of Characteristics Effects">
* <tr>
* <th bgcolor="#CCCCFF" align="left">Characteristic</th>
* <th bgcolor="#CCCCFF" align="left">Effect</th>
* </tr>
* <tr>
* <td valign="top">{@link Characteristic#TRANSACTIONAL}</td>
* <td valign="top">This method is invoked only if the store has this characteristic.</td>
* </tr>
* </table>
* <p>
* If a problem is encountered, it is recommended to wrap any created/caught Throwable in a
* {@link PersistenceException} and the stage be completed exceptionally.
* @param transaction the current transactional context.
* @return a stage that, when completed, indicates that the transaction was rolled back.
*/
default CompletionStage<Void> rollback(Transaction transaction) {
throw new UnsupportedOperationException("Store characteristic included " + Characteristic.TRANSACTIONAL + ", but it does not implement rollback");
}
/**
* Some stores may not want to perform operations based on if a command has certain flags. This method is currently
* only used for testing single write operations. This method may be removed at any time as it is experimental, it is
* not recommended for end users to implement it.
* @implSpec
* The default implementation returns false.
* @param commandFlags the flags attributed to the command when performing the operation.
* @return whether the operation should occur.
*/
@Experimental
default boolean ignoreCommandWithFlags(long commandFlags) {
return false;
}
/**
* A Publisher that provides a stream of values and the segments to which those values map.
* @param <Type> type of values in this Publisher.
*/
interface SegmentedPublisher<Type> extends Publisher<Type> {
/**
* Returns the segment for all keys in the publisher.
* @return segment the data the publisher provides maps to.
*/
int getSegment();
}
}
| 47,025
| 54.455189
| 168
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/CacheLoader.java
|
package org.infinispan.persistence.spi;
import org.infinispan.commons.api.Lifecycle;
import net.jcip.annotations.ThreadSafe;
/**
* Defines the logic for loading data from an external storage. The writing of data is optional and coordinated through
* a {@link CacheWriter}.
*
* @author Mircea Markus
* @since 6.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@ThreadSafe
@Deprecated
public interface CacheLoader<K, V> extends Lifecycle {
/**
* Used to initialize a cache loader. Typically invoked by the {@link org.infinispan.persistence.manager.PersistenceManager}
* when setting up cache loaders.
*
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
void init(InitializationContext ctx);
/**
* Fetches an entry from the storage. If a {@link MarshallableEntry} needs to be created here, {@link
* InitializationContext#getMarshallableEntryFactory()} ()} and {@link
* InitializationContext#getByteBufferFactory()} should be used.
*
* @return the entry, or null if the entry does not exist
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
MarshallableEntry<K, V> loadEntry(Object key);
/**
* Returns true if the storage contains an entry associated with the given key.
*
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
boolean contains(Object key);
/**
* @return true if the writer can be connected to, otherwise false
*/
default boolean isAvailable() {
return true;
}
}
| 1,659
| 31.54902
| 128
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/AdvancedCacheLoader.java
|
package org.infinispan.persistence.spi;
import java.util.function.Predicate;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import net.jcip.annotations.ThreadSafe;
/**
* A specialised extension of the {@link CacheLoader} interface that allows processing parallel iteration over the
* existing entries.
*
* @author Mircea Markus
* @since 6.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@ThreadSafe
@Deprecated
public interface AdvancedCacheLoader<K, V> extends CacheLoader<K, V> {
/**
* Returns the number of elements in the store.
*
* @throws PersistenceException in case of an error, e.g. communicating with the external storage
*/
int size();
/**
* Publishes all the keys from this store. The given publisher can be used by as many
* {@link org.reactivestreams.Subscriber}s as desired. Keys are not retrieved until a given Subscriber requests
* them from the {@link org.reactivestreams.Subscription}.
* <p>
* Stores will return only non expired keys
* @param filter a filter - null is treated as allowing all entries
* @return a publisher that will provide the keys from the store
*/
default Publisher<K> publishKeys(Predicate<? super K> filter) {
return Flowable.fromPublisher(entryPublisher(filter, false, true)).map(MarshallableEntry::getKey);
}
/**
* Publishes all entries from this store. The given publisher can be used by as many
* {@link org.reactivestreams.Subscriber}s as desired. Entries are not retrieved until a given Subscriber requests
* them from the {@link org.reactivestreams.Subscription}.
* <p>
* If <b>fetchMetadata</b> is true this store must guarantee to not return any expired entries.
* @param filter a filter - null is treated as allowing all entries
* @param fetchValue whether or not to fetch the value from the persistent store. E.g. if the iteration is
* intended only over the key set, no point fetching the values from the persistent store as
* well
* @param fetchMetadata whether or not to fetch the metadata from the persistent store. E.g. if the iteration is
* intended only ove the key set, then no point fetching the metadata from the persistent store
* as well
* @return a publisher that will provide the entries from the store
*/
Publisher<MarshallableEntry<K, V>> entryPublisher(Predicate<? super K> filter, boolean fetchValue,
boolean fetchMetadata);
}
| 2,645
| 43.1
| 119
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/InitializationContext.java
|
package org.infinispan.persistence.spi;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import org.infinispan.Cache;
import org.infinispan.commons.io.ByteBufferFactory;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.NonBlockingManager;
import net.jcip.annotations.ThreadSafe;
/**
* Aggregates the initialisation state needed by either a {@link CacheLoader} or a {@link CacheWriter}.
*
* @author Mircea Markus
* @since 6.0
*/
@ThreadSafe
public interface InitializationContext {
<T extends StoreConfiguration> T getConfiguration();
Cache getCache();
/**
* The configured partitioner that can be used to determine which segment a given key belongs to. This is useful
* when a store is segmented (ie. implements {@link SegmentedAdvancedLoadWriteStore}).
* @return partitioner that can provide what segment a key maps to
*/
KeyPartitioner getKeyPartitioner();
TimeService getTimeService();
/**
* To be used for building {@link org.infinispan.commons.io.ByteBuffer} objects.
*/
ByteBufferFactory getByteBufferFactory();
/**
* Returns the preferred executor to be used by stores if needed. Stores normally shouldn't need this unless they
* *must* perform some blocking code asynchronously.
* @return the executor to be used with stores
* @deprecated since 11.0 - Please use {@link #getBlockingManager()} ()} or {@link #getNonBlockingExecutor()} instead
*/
@Deprecated
ExecutorService getExecutor();
/**
* Returns an executor for non-blocking tasks. Users must guarantee that the tasks they submit to this executor
* do not block the thread in which the executor runs. Doing so can cause Infinispan to handle operations
* more slowly, reducing performance, because threads are limited to the number of cores and are used extensively.
* @return an executor that can submit non-blocking tasks.
*/
Executor getNonBlockingExecutor();
/**
* Returns a manager that is designed to execute tasks that might block. This manager ensures that only the blocking
* code is run on a blocking thread and any stage continues on a non-blocking thread.
* @return a manager that should be used to execute blocking operations.
*/
BlockingManager getBlockingManager();
/**
* Returns a manager that is designed to help with non blocking operations.
* @return a manager that can be used to help with offloading non blocking work.
*/
NonBlockingManager getNonBlockingManager();
/**
* Should be used to build all {@link MarshallableEntry} objects.
*/
<K,V> MarshallableEntryFactory<K,V> getMarshallableEntryFactory();
/**
* Returns the persistence marshaller which should be used to marshall/unmarshall all stored bytes.
*/
PersistenceMarshaller getPersistenceMarshaller();
/**
* Returns the global configuration
*/
GlobalConfiguration getGlobalConfiguration();
}
| 3,310
| 35.788889
| 120
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/TransactionalCacheWriter.java
|
package org.infinispan.persistence.spi;
import jakarta.transaction.Transaction;
import org.infinispan.persistence.support.BatchModification;
/**
* Defines the functionality of a transactional store. This interface allows the implementing store to participate in the
* 2PC protocol of a cache's transaction. This enables the cache transaction to be rolledback if an exception occurs whilst
* writing key changes to the underlying store, or for the writes to the underlying store to be rolledback if the exception
* occurs in-memory.
*
* As this writer is part of the 2PC, all writes to the underlying store should only be executed by the originator of a
* transaction in normal operation. In the event that the originator crashes between the prepare and commit/rollback phase
* it is expected that the underlying store's transaction will eventually timeout and rollback. In the event that the originator
* crashes and transaction recovery is enabled, then forcing commit will result in the replaying of said Tx's (prepare/commit) to
* the underlying store.
*
* @author Ryan Emerson
* @since 9.0
* @deprecated since 11.0 replaced by {@link NonBlockingStore}
*/
@Deprecated
public interface TransactionalCacheWriter<K, V> extends AdvancedCacheWriter<K, V> {
/**
* Write modifications to the store in the prepare phase, as this is the only way we know the FINAL values of the entries.
* This is required to handle scenarios where an objects value is changed after the put command has been executed, but
* before the commit is called on the Tx.
*
* @param transaction the current transactional context.
* @param batchModification an object containing the write/remove operations required for this transaction.
* @throws PersistenceException if an error occurs when communicating/performing writes on the underlying store.
*/
void prepareWithModifications(Transaction transaction, BatchModification batchModification) throws PersistenceException;
/**
* Commit the provided transaction's changes to the underlying store.
*
* @param transaction the current transactional context.
*/
void commit(Transaction transaction);
/**
* Rollback the provided transaction's changes to the underlying store.
*
* @param transaction the current transactional context.
*/
void rollback(Transaction transaction);
}
| 2,396
| 46
| 129
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/spi/MarshallableEntry.java
|
package org.infinispan.persistence.spi;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
/**
* Defines an externally persisted entry. External stores that keep the data in serialised form should return an
* MarshalledEntry that contains the data in binary form (ByteBuffer) and unmarshall it lazily when
* getKey/Value/Metadata are invoked. This approach avoids unnecessary object (de)serialization e.g
* when the entries are fetched from the external store for the sole purpose of being sent over the wire to
* another requestor node.
*
* @author Ryan Emerson
* @since 10.0
*/
public interface MarshallableEntry<K, V> {
/**
* Returns the key in serialized format.
*/
ByteBuffer getKeyBytes();
/**
* Returns the value in serialize format.
*/
ByteBuffer getValueBytes();
/**
* @return null if there's no metadata associated with the object (e.g. expiry info, version..)
*/
ByteBuffer getMetadataBytes();
/**
* @return {@code null} if there is no internal metadata associated with the object.
*/
ByteBuffer getInternalMetadataBytes();
/**
* Returns the same key as {@link #getKeyBytes()}, but unmarshalled.
*/
K getKey();
/**
* Returns the same value as {@link #getKeyBytes()}, but unmarshalled.
*/
V getValue();
/**
* @return might be null if there's no metadata associated with the object (e.g. expiry info, version..).
*/
Metadata getMetadata();
/**
* @return {@code null} if there is no internal metadata associated with the object.
*/
PrivateMetadata getInternalMetadata();
long created();
long lastUsed();
boolean isExpired(long now);
long expiryTime();
MarshalledValue getMarshalledValue();
}
| 1,839
| 25.666667
| 112
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/manager/package-info.java
|
/**
* Implementations of the {@link org.infinispan.persistence.manager.PersistenceManager} interface, which define the logic
* of how infinispan interacts with external stores.
*
* @author Ryan Emerson
* @api.private
*/
package org.infinispan.persistence.manager;
| 270
| 29.111111
| 121
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/manager/PersistenceManager.java
|
package org.infinispan.persistence.manager;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.BiPredicate;
import java.util.function.Predicate;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.persistence.spi.AdvancedCacheLoader;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.transaction.impl.AbstractCacheTransaction;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* Defines the logic for interacting with the chain of external storage.
*
* @author Manik Surtani
* @author Mircea Markus
* @since 4.0
*/
public interface PersistenceManager extends Lifecycle {
boolean isEnabled();
/**
* Returns whether the manager is enabled and has at least one store
*/
boolean hasWriter();
boolean hasStore(Predicate<StoreConfiguration> test);
/**
* Loads the data from the external store into memory during cache startup.
*/
Flowable<MarshallableEntry<Object, Object>> preloadPublisher();
/**
* Marks the given storage as disabled.
*/
CompletionStage<Void> disableStore(String storeType);
/**
* Adds a new store to the cache.
*
* @param storeConfiguration the configuration for the store
* @throws org.infinispan.commons.CacheException if the cache is was not empty
*/
CompletionStage<Void> addStore(StoreConfiguration storeConfiguration);
/**
* Add a {@link StoreChangeListener} to be notified when a store is added or removed dynamically.
*/
void addStoreListener(StoreChangeListener listener);
/**
* Remote a registered {@link StoreChangeListener}
*/
void removeStoreListener(StoreChangeListener listener);
interface StoreChangeListener {
/**
* Notifies when a store was added or removed dynamically.
*
* This method is always invoked with mutual access to any other method in {@link PersistenceManager}.
* Implementations must only ensure visibility or atomicity of their own variables and operations.
*/
void storeChanged(PersistenceStatus persistenceStatus);
}
<T> Set<T> getStores(Class<T> storeClass);
Collection<String> getStoresAsString();
/**
* Removes the expired entries from all the existing storage.
*/
CompletionStage<Void> purgeExpired();
/**
* Invokes {@link org.infinispan.persistence.spi.AdvancedCacheWriter#clear()} on all the stores that aloes it.
*/
CompletionStage<Void> clearAllStores(Predicate<? super StoreConfiguration> predicate);
CompletionStage<Boolean> deleteFromAllStores(Object key, int segment, Predicate<? super StoreConfiguration> predicate);
/**
* See {@link #publishEntries(Predicate, boolean, boolean, Predicate)}
*/
default <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(boolean fetchValue, boolean fetchMetadata) {
return publishEntries(null, fetchValue, fetchMetadata, AccessMode.BOTH);
}
/**
* Returns a publisher that will publish all entries stored by the underlying cache store. Only the first
* cache store that implements {@link AdvancedCacheLoader} will be used. Predicate is applied by the underlying
* loader in a best attempt to improve performance.
* <p>
* Caller can tell the store to also fetch the value or metadata. In some cases this can improve performance. If
* metadata is not fetched the publisher may include expired entries.
* @param filter filter so that only entries whose key matches are returned
* @param fetchValue whether to fetch value or not
* @param fetchMetadata whether to fetch metadata or not
* @param predicate whether a store can be used by publish entries
* @param <K> key type
* @param <V> value type
* @return publisher that will publish entries
*/
<K, V> Publisher<MarshallableEntry<K, V>> publishEntries(Predicate<? super K> filter, boolean fetchValue,
boolean fetchMetadata, Predicate<? super StoreConfiguration> predicate);
/**
* Returns a publisher that will publish entries that map to the provided segments. It will attempt to find the
* first segmented store if one is available. If not it will fall back to the first non segmented store and
* filter out entries that don't map to the provided segment.
* @param segments only entries that map to these segments are processed
* @param filter filter so that only entries whose key matches are returned
* @param fetchValue whether to fetch value or not
* @param fetchMetadata whether to fetch metadata or not
* @param predicate whether a store can be used by publish entries
* @param <K> key type
* @param <V> value type
* @return publisher that will publish entries belonging to the given segments
*/
<K, V> Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean fetchValue,
boolean fetchMetadata, Predicate<? super StoreConfiguration> predicate);
/**
* Returns a publisher that will publish all keys stored by the underlying cache store. Only the first cache store
* that implements {@link AdvancedCacheLoader} will be used. Predicate is applied by the underlying
* loader in a best attempt to improve performance.
* <p>
* This method should be preferred over {@link #publishEntries(Predicate, boolean, boolean, Predicate)} when only
* keys are desired as many stores can do this in a significantly more performant way.
* <p>
* This publisher will never return a key which belongs to an expired entry
* @param filter filter so that only keys which match are returned
* @param predicate access mode to choose what type of loader to use
* @param <K> key type
* @return publisher that will publish keys
*/
<K> Publisher<K> publishKeys(Predicate<? super K> filter, Predicate<? super StoreConfiguration> predicate);
/**
* Returns a publisher that will publish keys that map to the provided segments. It will attempt to find the
* first segmented store if one is available. If not it will fall back to the first non segmented store and
* filter out entries that don't map to the provided segment.
* <p>
* This method should be preferred over {@link #publishEntries(IntSet, Predicate, boolean, boolean, Predicate)}
* when only keys are desired as many stores can do this in a significantly more performant way.
* <p>
* This publisher will never return a key which belongs to an expired entry
* @param segments only keys that map to these segments are processed
* @param filter filter so that only keys which match are returned
* @param predicate access mode to choose what type of loader to use
* @param <K> key type
* @return publisher that will publish keys belonging to the given segments
*/
<K> Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter, Predicate<? super StoreConfiguration> predicate);
/**
* Loads an entry from the persistence store for the given key. The returned value may be null. This value
* is guaranteed to not be expired when it was returned.
* @param key key to read the entry from
* @param localInvocation whether this invocation is a local invocation. Some loaders may be ignored if it is not local
* @param includeStores if a loader that is also a store can be loaded from
* @return entry that maps to the key
*/
<K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, boolean localInvocation, boolean includeStores);
/**
* Same as {@link #loadFromAllStores(Object, boolean, boolean)} except that the segment of the key is also
* provided to avoid having to calculate the segment.
* @param key key to read the entry from
* @param segment segment the key maps to
* @param localInvocation whether this invocation is a local invocation. Some loaders may be ignored if it is not local
* @param includeStores if a loader that is also a store can be loaded from
* @return entry that maps to the key
* @implSpec default implementation invokes {@link #loadFromAllStores(Object, boolean, boolean)} ignoring the segment
*/
default <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, int segment, boolean localInvocation, boolean includeStores) {
return loadFromAllStores(key, localInvocation, includeStores);
}
/**
* Returns an approximate count of how many entries are persisted in the given segments.
*
* If no store can handle the request for the given mode, a value of <b>-1</b> is returned instead.
*
* @param predicate whether a loader can be used
* @param segments the segments to include
* @return size or -1 if approximate size couldn't be computed
*/
CompletionStage<Long> approximateSize(Predicate<? super StoreConfiguration> predicate, IntSet segments);
default CompletionStage<Long> size() {
return size(AccessMode.BOTH);
}
default CompletionStage<Long> size(IntSet segments) {
return size(AccessMode.BOTH, segments);
}
/**
* Returns the count of how many entries are persisted in the given segments. If no store can handle the request
* for the given mode a value of <b>-1</b> is returned instead.
*
* @param predicate whether a loader can be used
* @param segments segments to check
* @return size or -1 if size couldn't be computed
*/
CompletionStage<Long> size(Predicate<? super StoreConfiguration> predicate, IntSet segments);
/**
* Returns the count of how many entries are persisted. If no store can handle the request for the given mode a
* value of <b>-1</b> is returned instead.
* @param predicate whether a loader can be used
* @return size or -1 if size couldn't be computed
*/
CompletionStage<Long> size(Predicate<? super StoreConfiguration> predicate);
enum AccessMode implements Predicate<StoreConfiguration> {
/**
* The operation is performed in all {@link org.infinispan.persistence.spi.CacheWriter} or {@link
* org.infinispan.persistence.spi.CacheLoader}
*/
BOTH {
@Override
public boolean test(StoreConfiguration configuration) {
return true;
}
},
/**
* The operation is performed only in shared configured {@link org.infinispan.persistence.spi.CacheWriter} or
* {@link org.infinispan.persistence.spi.CacheLoader}
*/
SHARED {
@Override
public boolean test(StoreConfiguration configuration) {
return configuration.shared();
}
},
/**
* The operation is performed only in non-shared {@link org.infinispan.persistence.spi.CacheWriter} or {@link
* org.infinispan.persistence.spi.CacheLoader}
*/
PRIVATE {
@Override
public boolean test(StoreConfiguration configuration) {
return !configuration.shared();
}
},
/**
* The operation is performed only in a {@link org.infinispan.persistence.spi.CacheWriter} or {@link
* org.infinispan.persistence.spi.CacheLoader} that has async write behind.
*/
ASYNC {
@Override
public boolean test(StoreConfiguration configuration) {
return configuration.async().enabled();
}
},
/**
* The operation is performed only in a {@link org.infinispan.persistence.spi.CacheWriter} or {@link
* org.infinispan.persistence.spi.CacheLoader} that doesn't have async write behind.
*/
NOT_ASYNC {
@Override
public boolean test(StoreConfiguration configuration) {
return !configuration.async().enabled();
}
},
}
void setClearOnStop(boolean clearOnStop);
/**
* Write to all stores that are not transactional. A store is considered transactional if all of the following are true:
*
* <p><ul>
* <li>The store implements {@link org.infinispan.persistence.spi.TransactionalCacheWriter}</li>
* <li>The store is configured to be transactional</li>
* <li>The cache's TransactionMode === TRANSACTIONAL</li>
* </ul></p>
*
* @param marshalledEntry the entry to be written to all non-tx stores.
* @param segment the segment the entry maps to
* @param predicate should we write to a given store
*/
default CompletionStage<Void> writeToAllNonTxStores(MarshallableEntry marshalledEntry, int segment, Predicate<? super StoreConfiguration> predicate) {
return writeToAllNonTxStores(marshalledEntry, segment, predicate, 0);
}
/**
* @see #writeToAllNonTxStores(MarshallableEntry, int, Predicate)
*
* @param flags Flags used during command invocation
*/
CompletionStage<Void> writeToAllNonTxStores(MarshallableEntry marshalledEntry, int segment, Predicate<? super StoreConfiguration> predicate, long flags);
/**
* Perform the prepare phase of 2PC on all Tx stores.
*
* @param txInvocationContext the tx invocation containing the modifications
* @param predicate should we prepare on a given store
* @throws PersistenceException if an error is encountered at any of the underlying stores.
*/
CompletionStage<Void> prepareAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate) throws PersistenceException;
/**
* Perform the commit operation for the provided transaction on all Tx stores.
*
* @param txInvocationContext the transactional context to be committed.
* @param predicate should we commit each store
*/
CompletionStage<Void> commitAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate);
/**
* Perform the rollback operation for the provided transaction on all Tx stores.
*
* @param txInvocationContext the transactional context to be rolledback.
* @param predicate should we rollback each store
*/
CompletionStage<Void> rollbackAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate);
/**
* Writes the values modified from a put map command to the stores.
* @param putMapCommand the put map command to write values from
* @param ctx context to lookup entries
* @param commandKeyPredicate predicate to control if a key/command combination should be accepted
* @return a stage of how many writes were performed
*/
CompletionStage<Long> writeMapCommand(PutMapCommand putMapCommand, InvocationContext ctx,
BiPredicate<? super PutMapCommand, Object> commandKeyPredicate);
/**
* Writes a batch for the given modifications in the transactional context
* @param invocationContext transactional context
* @param commandKeyPredicate predicate to control if a key/command combination should be accepted
* @return a stage of how many writes were performed
*/
CompletionStage<Long> performBatch(TxInvocationContext<AbstractCacheTransaction> invocationContext,
BiPredicate<? super WriteCommand, Object> commandKeyPredicate);
/**
* Writes the entries to the stores that pass the given predicate
* @param iterable entries to write
* @param predicate predicate to test for a store
* @param <K> key type
* @param <V> value type
* @return a stage that when complete the values were written
*/
<K, V> CompletionStage<Void> writeEntries(Iterable<MarshallableEntry<K, V>> iterable,
Predicate<? super StoreConfiguration> predicate);
/**
* @return true if all configured stores are available and ready for read/write operations.
*/
boolean isAvailable();
/**
* Notifies any underlying segmented stores that the segments provided are owned by this cache and to start/configure
* any underlying resources required to handle requests for entries on the given segments.
* <p>
* This only affects stores that are not shared as shared stores have to keep all segments running at all times
* <p>
* This method returns true if all stores were able to handle the added segments. That is that either there are no
* stores or that all the configured stores are segmented. Note that configured loaders do not affect the return
* value.
* @param segments segments this cache owns
* @return false if a configured store couldn't configure newly added segments
*/
default CompletionStage<Boolean> addSegments(IntSet segments) {
return CompletableFutures.completedTrue();
}
/**
* Notifies any underlying segmented stores that a given segment is no longer owned by this cache and allowing
* it to remove the given segments and release resources related to it.
* <p>
* This only affects stores that are not shared as shared stores have to keep all segments running at all times
* <p>
* This method returns true if all stores were able to handle the removed segments. That is that either there are no
* stores or that all the configured stores are segmented. Note that configured loaders do not affect the return
* value.
* @param segments segments this cache no longer owns
* @return false if a configured store couldn't remove configured segments
*/
default CompletionStage<Boolean> removeSegments(IntSet segments) {
return CompletableFutures.completedTrue();
}
/**
* @return true if no {@link org.infinispan.persistence.spi.CacheWriter} instances have been configured.
*/
boolean isReadOnly();
}
| 18,393
| 43.645631
| 156
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/manager/PersistenceStatus.java
|
package org.infinispan.persistence.manager;
/**
* @since 13.0
*/
public class PersistenceStatus {
private final boolean isEnabled;
private final boolean usingSegmentedStore;
private final boolean usingAsyncStore;
private final boolean usingSharedStore;
private final boolean usingSharedAsyncStore;
private final boolean usingReadOnly;
private final boolean usingTransactionalStore;
public PersistenceStatus(boolean isEnabled, boolean usingSegmentedStore, boolean usingAsyncStore,
boolean usingSharedStore, boolean usingSharedAsyncStore, boolean usingReadOnly, boolean usingTransactionalStore) {
this.isEnabled = isEnabled;
this.usingSegmentedStore = usingSegmentedStore;
this.usingAsyncStore = usingAsyncStore;
this.usingSharedStore = usingSharedStore;
this.usingSharedAsyncStore = usingSharedAsyncStore;
this.usingReadOnly = usingReadOnly;
this.usingTransactionalStore = usingTransactionalStore;
}
public boolean isEnabled() {
return isEnabled;
}
public boolean usingSegmentedStore() {
return usingSegmentedStore;
}
public boolean usingAsyncStore() {
return usingAsyncStore;
}
public boolean usingSharedStore() {
return usingSharedStore;
}
public boolean usingSharedAsyncStore() {
return usingSharedAsyncStore;
}
public boolean usingReadOnly() {
return usingReadOnly;
}
public boolean usingTransactionalStore() {
return usingTransactionalStore;
}
}
| 1,542
| 27.574074
| 142
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/manager/PreloadManager.java
|
package org.infinispan.persistence.manager;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import java.lang.invoke.MethodHandles;
import java.util.concurrent.CompletionStage;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.AdvancedCache;
import org.infinispan.cache.impl.InvocationHelper;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.InvocationContextFactory;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.LocalTxInvocationContext;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.metadata.impl.InternalMetadataImpl;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.transaction.impl.FakeJTATransaction;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.TransactionCoordinator;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
/**
* Separate the preload into its own component
*/
@Scope(Scopes.NAMED_CACHE)
public class PreloadManager {
public static final long PRELOAD_FLAGS = FlagBitSets.CACHE_MODE_LOCAL |
FlagBitSets.SKIP_OWNERSHIP_CHECK |
FlagBitSets.IGNORE_RETURN_VALUES |
FlagBitSets.SKIP_CACHE_STORE |
FlagBitSets.SKIP_LOCKING |
FlagBitSets.SKIP_XSITE_BACKUP |
FlagBitSets.IRAC_STATE;
public static final long PRELOAD_WITHOUT_INDEXING_FLAGS =
EnumUtil.mergeBitSets(PRELOAD_FLAGS, FlagBitSets.SKIP_INDEXING);
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@Inject Configuration configuration;
@Inject protected PersistenceManager persistenceManager;
@Inject TimeService timeService;
@Inject protected ComponentRef<AdvancedCache<?, ?>> cache;
@Inject CommandsFactory commandsFactory;
@Inject KeyPartitioner keyPartitioner;
@Inject InvocationContextFactory invocationContextFactory;
@Inject InvocationHelper invocationHelper;
@Inject TransactionCoordinator transactionCoordinator;
@Inject TransactionManager transactionManager;
@Inject TransactionTable transactionTable;
private volatile boolean fullyPreloaded;
@Start
public void start() {
fullyPreloaded = false;
CompletionStages.join(doPreload());
}
private CompletionStage<Void> doPreload() {
Publisher<MarshallableEntry<Object, Object>> publisher = persistenceManager.preloadPublisher();
long start = timeService.time();
final long maxEntries = getMaxEntries();
final long flags = getFlagsForStateInsertion();
AdvancedCache<?,?> tmpCache = this.cache.wired().withStorageMediaType();
DataConversion keyDataConversion = tmpCache.getKeyDataConversion();
DataConversion valueDataConversion = tmpCache.getValueDataConversion();
Transaction outerTransaction = suspendIfNeeded();
try {
return Flowable.fromPublisher(publisher)
.take(maxEntries)
.concatMapSingle(me -> preloadEntry(flags, me, keyDataConversion, valueDataConversion))
.count()
.toCompletionStage()
.thenAccept(insertAmount -> {
this.fullyPreloaded = insertAmount < maxEntries;
log.debugf("Preloaded %d keys in %s", insertAmount,
Util.prettyPrintTime(timeService.timeDuration(start, MILLISECONDS)));
});
} finally {
resumeIfNeeded(outerTransaction);
}
}
private Single<?> preloadEntry(long flags, MarshallableEntry<Object, Object> me, DataConversion keyDataConversion, DataConversion valueDataConversion) {
// CallInterceptor will preserve the timestamps if the metadata is an InternalMetadataImpl instance
InternalMetadataImpl metadata = new InternalMetadataImpl(me.getMetadata(), me.created(), me.lastUsed());
// TODO If the storage media type is application/x-protostream, this will convert to POJOs and back
Object key = keyDataConversion.toStorage(me.getKey());
Object value = valueDataConversion.toStorage(me.getValue());
PutKeyValueCommand cmd = commandsFactory.buildPutKeyValueCommand(key, value, keyPartitioner.getSegment(key),
metadata, flags);
cmd.setInternalMetadata(me.getInternalMetadata());
CompletionStage<?> stage;
if (configuration.transaction().transactionMode().isTransactional()) {
try {
Transaction transaction = new FakeJTATransaction();
InvocationContext ctx = invocationContextFactory.createInvocationContext(transaction, false);
LocalTransaction localTransaction = ((LocalTxInvocationContext) ctx).getCacheTransaction();
stage = CompletionStages.handleAndCompose(invocationHelper.invokeAsync(ctx, cmd),
(__, t) -> completeTransaction(key, localTransaction, t))
.whenComplete((__, t) -> transactionTable.removeLocalTransaction(localTransaction));
} catch (Exception e) {
throw log.problemPreloadingKey(key, e);
}
} else {
stage = invocationHelper.invokeAsync(cmd, 1);
}
// The return value doesn't matter, but it cannot be null
return Completable.fromCompletionStage(stage).toSingleDefault(me);
}
private CompletionStage<?> completeTransaction(Object key, LocalTransaction localTransaction, Throwable t) {
if (t != null) {
return transactionCoordinator.rollback(localTransaction)
.whenComplete((__1, t1) -> {
throw log.problemPreloadingKey(key, t);
});
}
return transactionCoordinator.commit(localTransaction, true);
}
private void resumeIfNeeded(Transaction transaction) {
if (configuration.transaction().transactionMode().isTransactional() && transactionManager != null &&
transaction != null) {
try {
transactionManager.resume(transaction);
} catch (Exception e) {
throw new PersistenceException(e);
}
}
}
private Transaction suspendIfNeeded() {
if (configuration.transaction().transactionMode().isTransactional() && transactionManager != null) {
try {
return transactionManager.suspend();
} catch (Exception e) {
throw new PersistenceException(e);
}
}
return null;
}
private long getMaxEntries() {
long maxCount;
if (configuration.memory().isEvictionEnabled() && (maxCount = configuration.memory().maxCount()) > 0) {
return maxCount;
}
return Long.MAX_VALUE;
}
private long getFlagsForStateInsertion() {
boolean hasSharedStore = persistenceManager.hasStore(StoreConfiguration::shared);
if (!hasSharedStore || !configuration.indexing().isVolatile()) {
return PRELOAD_WITHOUT_INDEXING_FLAGS;
} else {
return PRELOAD_FLAGS;
}
}
/**
* @return true if all entries from the store have been inserted to the cache. If the persistence/preload
* is disabled or eviction limit was reached when preloading, returns false.
*/
public boolean isFullyPreloaded() {
return fullyPreloaded;
}
}
| 8,771
| 43.080402
| 155
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/manager/PersistenceManagerImpl.java
|
package org.infinispan.persistence.manager;
import static java.util.Collections.singletonList;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static org.infinispan.util.logging.Log.CONFIG;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.StampedLock;
import java.util.function.BiPredicate;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.write.DataWriteCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.io.ByteBufferFactory;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.entries.MVCCEntry;
import org.infinispan.container.impl.InternalEntryFactory;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.FlagBitSets;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.expiration.impl.InternalExpirationManager;
import org.infinispan.factories.InterceptorChainFactory;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.impl.CacheLoaderInterceptor;
import org.infinispan.interceptors.impl.CacheWriterInterceptor;
import org.infinispan.interceptors.impl.TransactionalStoreInterceptor;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.persistence.InitializationContextImpl;
import org.infinispan.persistence.async.AsyncNonBlockingStore;
import org.infinispan.persistence.internal.PersistenceUtil;
import org.infinispan.persistence.spi.LocalOnlyCacheLoader;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.NonBlockingStore.Characteristic;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.persistence.spi.StoreUnavailableException;
import org.infinispan.persistence.support.DelegatingNonBlockingStore;
import org.infinispan.persistence.support.NonBlockingStoreAdapter;
import org.infinispan.persistence.support.SegmentPublisherWrapper;
import org.infinispan.persistence.support.SingleSegmentPublisher;
import org.infinispan.transaction.impl.AbstractCacheTransaction;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.NonBlockingManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Maybe;
import io.reactivex.rxjava3.core.Single;
import io.reactivex.rxjava3.functions.Function;
import net.jcip.annotations.GuardedBy;
@Scope(Scopes.NAMED_CACHE)
public class PersistenceManagerImpl implements PersistenceManager {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
@Inject Configuration configuration;
@Inject GlobalConfiguration globalConfiguration;
@Inject ComponentRef<AdvancedCache<Object, Object>> cache;
@Inject KeyPartitioner keyPartitioner;
@Inject TimeService timeService;
@Inject @ComponentName(KnownComponentNames.PERSISTENCE_MARSHALLER)
PersistenceMarshaller persistenceMarshaller;
@Inject ByteBufferFactory byteBufferFactory;
@Inject CacheNotifier<Object, Object> cacheNotifier;
@Inject InternalEntryFactory internalEntryFactory;
@Inject MarshallableEntryFactory<?, ?> marshallableEntryFactory;
@ComponentName(KnownComponentNames.NON_BLOCKING_EXECUTOR)
@Inject Executor nonBlockingExecutor;
@Inject BlockingManager blockingManager;
@Inject NonBlockingManager nonBlockingManager;
@Inject ComponentRef<InternalExpirationManager<Object, Object>> expirationManager;
@Inject DistributionManager distributionManager;
@Inject InterceptorChainFactory interceptorChainFactory;
// We use stamped lock since we require releasing locks in threads that may be the same that acquired it
private final StampedLock lock = new StampedLock();
// making it volatile as it might change after @Start, so it needs the visibility.
private volatile boolean enabled;
private volatile boolean clearOnStop;
private volatile AutoCloseable availabilityTask;
private volatile String unavailableExceptionMessage;
// Writes to an invalidation cache skip the shared check
private boolean isInvalidationCache;
private boolean allSegmentedOrShared;
private int segmentCount;
@GuardedBy("lock")
private List<StoreStatus> stores = null;
private final List<StoreChangeListener> listeners = new CopyOnWriteArrayList<>();
private <K, V> NonBlockingStore<K, V> getStore(Predicate<StoreStatus> predicate) {
// We almost always will be doing reads, so optimistic should be faster
// Writes are only done during startup, shutdown and if removing a store
long stamp = lock.tryOptimisticRead();
NonBlockingStore<K, V> store = getStoreLocked(predicate);
if (!lock.validate(stamp)) {
stamp = acquireReadLock();
try {
store = getStoreLocked(predicate);
} finally {
releaseReadLock(stamp);
}
}
return store;
}
@GuardedBy("lock#readLock")
private <K, V> NonBlockingStore<K, V> getStoreLocked(Predicate<StoreStatus> predicate) {
if (stores == null) {
return null;
}
for (StoreStatus storeStatus : stores) {
if (predicate.test(storeStatus)) {
return storeStatus.store();
}
}
return null;
}
@GuardedBy("lock#readLock")
private StoreStatus getStoreStatusLocked(Predicate<? super StoreStatus> predicate) {
for (StoreStatus storeStatus : stores) {
if (predicate.test(storeStatus)) {
return storeStatus;
}
}
return null;
}
@Override
@Start
public void start() {
enabled = configuration.persistence().usingStores();
segmentCount = configuration.clustering().hash().numSegments();
isInvalidationCache = configuration.clustering().cacheMode().isInvalidation();
if (!enabled)
return;
// Blocks here waiting for stores and availability task to start if needed
Completable.using(this::acquireWriteLock,
__ -> startManagerAndStores(configuration.persistence().stores()),
this::releaseWriteLock)
.blockingAwait();
}
@GuardedBy("lock#writeLock")
private Completable startManagerAndStores(Collection<StoreConfiguration> storeConfigurations) {
if (storeConfigurations.isEmpty()) {
throw new IllegalArgumentException("Store configurations require at least one configuration");
}
enabled = true;
if (stores == null) {
stores = new ArrayList<>(storeConfigurations.size());
}
Completable storeStartup = startStoresOnly(storeConfigurations);
long interval = configuration.persistence().availabilityInterval();
if (interval > 0 && availabilityTask == null) {
storeStartup = storeStartup.doOnComplete(() ->
availabilityTask = nonBlockingManager.scheduleWithFixedDelay(this::pollStoreAvailability, interval,
interval, MILLISECONDS, t -> !(t instanceof Error)));
}
return storeStartup.doOnComplete(() -> {
boolean hasMaxIdle = configuration.expiration().maxIdle() > 0;
boolean hasLifespan = configuration.expiration().lifespan() > 0;
if (hasLifespan || hasMaxIdle) {
stores.stream().forEach(status -> {
// If a store is not writeable, then expiration works fine as it only expires in memory, thus refreshing
// the value that can be read from the store
if (status.hasCharacteristic(Characteristic.READ_ONLY)) {
return;
}
if (hasMaxIdle) {
// Max idle is not currently supported with stores, it sorta works with passivation though
if (!configuration.persistence().passivation()) {
throw CONFIG.maxIdleNotAllowedWithoutPassivation();
}
CONFIG.maxIdleNotTestedWithPassivation();
}
if (!status.hasCharacteristic(Characteristic.EXPIRATION)) {
throw CONFIG.expirationNotAllowedWhenStoreDoesNotSupport(status.store.getClass().getName());
}
});
}
allSegmentedOrShared = allStoresSegmentedOrShared();
});
}
private Completable startStoresOnly(Iterable<StoreConfiguration> storeConfigurations) {
return Flowable.fromIterable(storeConfigurations)
// We have to ensure stores are started in configured order to ensure the stores map retains that order
.concatMapSingle(storeConfiguration -> {
NonBlockingStore<?, ?> actualStore = PersistenceUtil.storeFromConfiguration(storeConfiguration);
NonBlockingStore<?, ?> nonBlockingStore;
if (storeConfiguration.async().enabled()) {
nonBlockingStore = new AsyncNonBlockingStore<>(actualStore);
} else {
nonBlockingStore = actualStore;
}
InitializationContextImpl ctx =
new InitializationContextImpl(storeConfiguration, cache.wired(), keyPartitioner, persistenceMarshaller,
timeService, byteBufferFactory, marshallableEntryFactory, nonBlockingExecutor,
globalConfiguration, blockingManager, nonBlockingManager);
CompletionStage<Void> stage = nonBlockingStore.start(ctx).whenComplete((ignore, t) -> {
// On exception, just put a status with only the store - this way we can still invoke stop on it later
if (t != null) {
stores.add(new StoreStatus(nonBlockingStore, null, null));
}
});
return Completable.fromCompletionStage(stage)
.toSingle(() -> new StoreStatus(nonBlockingStore, storeConfiguration,
updateCharacteristics(nonBlockingStore, nonBlockingStore.characteristics(), storeConfiguration)));
})
// This relies upon visibility guarantees of reactive streams for publishing map values
.doOnNext(stores::add)
.delay(status -> {
// Caches that need state transfer will clear the store *after* the stable
// topology is restored, if needed.
if (!configuration.clustering().cacheMode().needsStateTransfer() && status.config.purgeOnStartup()) {
return Flowable.fromCompletable(Completable.fromCompletionStage(status.store.clear()));
}
return Flowable.empty();
}).ignoreElements();
}
@GuardedBy("lock")
private boolean allStoresSegmentedOrShared() {
return getStoreLocked(storeStatus -> !storeStatus.hasCharacteristic(Characteristic.SEGMENTABLE) ||
!storeStatus.hasCharacteristic(Characteristic.SHAREABLE)) != null;
}
private Set<Characteristic> updateCharacteristics(NonBlockingStore<?, ?> store, Set<Characteristic> characteristics,
StoreConfiguration storeConfiguration) {
if (storeConfiguration.ignoreModifications()) {
if (characteristics.contains(Characteristic.WRITE_ONLY)) {
throw log.storeConfiguredHasBothReadAndWriteOnly(store.getClass().getName(), Characteristic.WRITE_ONLY,
Characteristic.READ_ONLY);
}
characteristics.add(Characteristic.READ_ONLY);
characteristics.remove(Characteristic.TRANSACTIONAL);
}
if (storeConfiguration.writeOnly()) {
if (characteristics.contains(Characteristic.READ_ONLY)) {
throw log.storeConfiguredHasBothReadAndWriteOnly(store.getClass().getName(), Characteristic.READ_ONLY,
Characteristic.WRITE_ONLY);
}
characteristics.add(Characteristic.WRITE_ONLY);
characteristics.remove(Characteristic.BULK_READ);
}
if (storeConfiguration.segmented()) {
if (!characteristics.contains(Characteristic.SEGMENTABLE)) {
throw log.storeConfiguredSegmentedButCharacteristicNotPresent(store.getClass().getName());
}
} else {
characteristics.remove(Characteristic.SEGMENTABLE);
}
if (storeConfiguration.transactional()) {
if (!characteristics.contains(Characteristic.TRANSACTIONAL)) {
throw log.storeConfiguredTransactionalButCharacteristicNotPresent(store.getClass().getName());
}
} else {
characteristics.remove(Characteristic.TRANSACTIONAL);
}
if (storeConfiguration.shared()) {
if (!characteristics.contains(Characteristic.SHAREABLE)) {
throw log.storeConfiguredSharedButCharacteristicNotPresent(store.getClass().getName());
}
} else {
characteristics.remove(Characteristic.SHAREABLE);
}
return characteristics;
}
/**
* Polls the availability of all configured stores.
* <p>
* If a store is found to be unavailable all future requests to this manager will throw a
* {@link StoreUnavailableException}, until the stores are all available again.
* <p>
* Note that this method should not be called until the previous invocation's stage completed.
* {@link NonBlockingManager#scheduleWithFixedDelay(java.util.function.Supplier, long, long, java.util.concurrent.TimeUnit)}
* guarantees that.
*
* @return stage that completes when all store availability checks are done
*/
protected CompletionStage<Void> pollStoreAvailability() {
if (log.isTraceEnabled()) {
log.trace("Polling Store availability");
}
AtomicReference<NonBlockingStore<?, ?>> firstUnavailableStore = new AtomicReference<>();
long stamp = acquireReadLock();
boolean release = true;
try {
AggregateCompletionStage<Void> stageBuilder = CompletionStages.aggregateCompletionStage();
for (StoreStatus storeStatus : stores) {
CompletionStage<Boolean> availableStage;
try {
availableStage = storeStatus.store.isAvailable();
} catch (Throwable t) {
log.storeIsAvailableCheckThrewException(t, storeStatus.store.getClass().getName());
availableStage = CompletableFutures.booleanStage(false);
}
availableStage = availableStage.exceptionally(throwable -> {
log.storeIsAvailableCompletedExceptionally(throwable, storeStatus.store.getClass().getName());
return false;
});
stageBuilder.dependsOn(availableStage.thenCompose(isAvailable -> {
storeStatus.availability = isAvailable;
if (!isAvailable) {
// Update persistence availability as soon as we know one store is unavailable
firstUnavailableStore.compareAndSet(null, storeStatus.store);
return updatePersistenceAvailability(storeStatus.store);
}
return CompletableFutures.completedNull();
}));
}
CompletionStage<Void> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return updatePersistenceAvailability(firstUnavailableStore.get());
} else {
release = false;
return stage.thenCompose(__ -> updatePersistenceAvailability(firstUnavailableStore.get()))
.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
private CompletionStage<Void> updatePersistenceAvailability(NonBlockingStore<?, ?> unavailableStore) {
// No locking needed: there is only one availability check task running at any given time
if (unavailableStore != null) {
if (unavailableExceptionMessage == null) {
log.persistenceUnavailable(unavailableStore.getClass().getName());
unavailableExceptionMessage = "Store " + unavailableStore + " is unavailable";
return cacheNotifier.notifyPersistenceAvailabilityChanged(false);
}
} else {
// All stores are available
if (unavailableExceptionMessage != null) {
log.persistenceAvailable();
unavailableExceptionMessage = null;
return cacheNotifier.notifyPersistenceAvailabilityChanged(true);
}
}
return CompletableFutures.completedNull();
}
@Override
@Stop
public void stop() {
AggregateCompletionStage<Void> allStage = CompletionStages.aggregateCompletionStage();
long stamp = acquireWriteLock();
try {
stopAvailabilityTask();
if (stores == null)
return;
for (StoreStatus storeStatus : stores) {
NonBlockingStore<Object, Object> store = storeStatus.store();
CompletionStage<Void> storeStage;
if (clearOnStop && !storeStatus.hasCharacteristic(Characteristic.READ_ONLY)) {
// Clear the persistent store before stopping
storeStage = store.clear().thenCompose(__ -> store.stop());
} else {
storeStage = store.stop();
}
allStage.dependsOn(storeStage);
}
stores = null;
} finally {
releaseWriteLock(stamp);
}
// Wait until it completes
CompletionStages.join(allStage.freeze());
}
private void stopAvailabilityTask() {
AutoCloseable taskToClose = availabilityTask;
if (taskToClose != null) {
try {
taskToClose.close();
} catch (Exception e) {
log.warn("There was a problem stopping availability task", e);
}
}
}
@Override
public boolean isEnabled() {
return enabled;
}
@Override
public boolean isReadOnly() {
return getStore(storeStatus -> !storeStatus.hasCharacteristic(Characteristic.READ_ONLY)) == null;
}
@Override
public boolean hasWriter() {
return getStore(storeStatus -> !storeStatus.hasCharacteristic(Characteristic.READ_ONLY)) != null;
}
@Override
public boolean hasStore(Predicate<StoreConfiguration> test) {
return getStore(storeStatus -> test.test(storeStatus.config)) != null;
}
@Override
public Flowable<MarshallableEntry<Object, Object>> preloadPublisher() {
long stamp = acquireReadLock();
NonBlockingStore<Object, Object> nonBlockingStore = getStoreLocked(status -> status.config.preload());
if (nonBlockingStore == null) {
releaseReadLock(stamp);
return Flowable.empty();
}
Publisher<MarshallableEntry<Object, Object>> publisher = nonBlockingStore.publishEntries(
IntSets.immutableRangeSet(segmentCount), null, true);
return Flowable.fromPublisher(publisher)
.doFinally(() -> releaseReadLock(stamp));
}
@Override
public void addStoreListener(StoreChangeListener listener) {
listeners.add(listener);
}
@Override
public void removeStoreListener(StoreChangeListener listener) {
listeners.remove(listener);
}
@Override
public CompletionStage<Void> addStore(StoreConfiguration storeConfiguration) {
return Single.fromCompletionStage(cache.wired().sizeAsync())
.doOnSuccess(l -> {
if (l > 0) throw log.cannotAddStore(cache.wired().getName());
})
.concatMapCompletable(v -> Completable.using(this::acquireWriteLock, lock ->
startManagerAndStores(singletonList(storeConfiguration))
.doOnComplete(() -> {
AsyncInterceptorChain chain = cache.wired().getAsyncInterceptorChain();
interceptorChainFactory.addPersistenceInterceptors(chain, configuration, singletonList(storeConfiguration));
listeners.forEach(l -> l.storeChanged(createStatus()));
})
, this::releaseWriteLock))
.toCompletionStage(null);
}
private PersistenceStatus createStatus() {
boolean usingSharedStore = false;
boolean usingSharedAsync = false;
boolean usingSegments = false;
boolean usingAsync = false;
boolean usingReadOnly = false;
boolean usingTransactionalStore = false;
for (StoreStatus storeStatus : stores) {
if (storeStatus.config.shared()) {
usingSharedStore = true;
}
if (storeStatus.config.async().enabled()) {
usingSharedAsync |= storeStatus.config.shared();
usingAsync = true;
}
if (storeStatus.config.segmented()) {
usingSegments = true;
}
if (storeStatus.config.ignoreModifications()) {
usingReadOnly = true;
}
if (storeStatus.config.transactional()) {
usingTransactionalStore = true;
}
}
return new PersistenceStatus(enabled, usingSegments, usingAsync, usingSharedStore, usingSharedAsync,
usingReadOnly, usingTransactionalStore);
}
@Override
public CompletionStage<Void> disableStore(String storeType) {
boolean stillHasAStore = false;
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
long stamp = lock.writeLock();
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedNull();
}
boolean allAvailable = true;
Iterator<StoreStatus> statusIterator = stores.iterator();
while (statusIterator.hasNext()) {
StoreStatus status = statusIterator.next();
NonBlockingStore<?, ?> nonBlockingStore = unwrapStore(status.store());
if (nonBlockingStore.getClass().getName().equals(storeType) || containedInAdapter(nonBlockingStore, storeType)) {
statusIterator.remove();
aggregateCompletionStage.dependsOn(nonBlockingStore.stop()
.whenComplete((v, t) -> {
if (t != null) {
log.warn("There was an error stopping the store", t);
}
}));
} else {
stillHasAStore = true;
allAvailable = allAvailable && status.availability;
}
}
if (!stillHasAStore) {
unavailableExceptionMessage = null;
enabled = false;
stopAvailabilityTask();
} else if (allAvailable) {
unavailableExceptionMessage = null;
}
allSegmentedOrShared = allStoresSegmentedOrShared();
listeners.forEach(l -> l.storeChanged(createStatus()));
if (!stillHasAStore) {
AsyncInterceptorChain chain = cache.wired().getAsyncInterceptorChain();
AsyncInterceptor loaderInterceptor = chain.findInterceptorExtending(CacheLoaderInterceptor.class);
if (loaderInterceptor == null) {
PERSISTENCE.persistenceWithoutCacheLoaderInterceptor();
} else {
chain.removeInterceptor(loaderInterceptor.getClass());
}
AsyncInterceptor writerInterceptor = chain.findInterceptorExtending(CacheWriterInterceptor.class);
if (writerInterceptor == null) {
writerInterceptor = chain.findInterceptorWithClass(TransactionalStoreInterceptor.class);
if (writerInterceptor == null) {
PERSISTENCE.persistenceWithoutCacheWriteInterceptor();
} else {
chain.removeInterceptor(writerInterceptor.getClass());
}
} else {
chain.removeInterceptor(writerInterceptor.getClass());
}
}
return aggregateCompletionStage.freeze();
} finally {
lock.unlockWrite(stamp);
}
}
private <K, V> NonBlockingStore<K, V> unwrapStore(NonBlockingStore<K, V> store) {
if (store instanceof DelegatingNonBlockingStore) {
return ((DelegatingNonBlockingStore<K, V>) store).delegate();
}
return store;
}
private Object unwrapOldSPI(NonBlockingStore<?, ?> store) {
if (store instanceof NonBlockingStoreAdapter) {
return ((NonBlockingStoreAdapter<?, ?>) store).getActualStore();
}
return store;
}
private boolean containedInAdapter(NonBlockingStore<?, ?> nonBlockingStore, String adaptedClassName) {
return nonBlockingStore instanceof NonBlockingStoreAdapter &&
((NonBlockingStoreAdapter<?, ?>) nonBlockingStore).getActualStore().getClass().getName().equals(adaptedClassName);
}
@Override
public <T> Set<T> getStores(Class<T> storeClass) {
long stamp = acquireReadLock();
try {
if (!checkStoreAvailability()) {
return Collections.emptySet();
}
return stores.stream()
.map(StoreStatus::store)
.map(this::unwrapStore)
.map(this::unwrapOldSPI)
.filter(storeClass::isInstance)
.map(storeClass::cast)
.collect(Collectors.toCollection(HashSet::new));
} finally {
releaseReadLock(stamp);
}
}
@Override
public Collection<String> getStoresAsString() {
long stamp = acquireReadLock();
try {
if (!checkStoreAvailability()) {
return Collections.emptyList();
}
return stores.stream()
.map(StoreStatus::store)
.map(this::unwrapStore)
.map(this::unwrapOldSPI)
.map(c -> c.getClass().getName())
.collect(Collectors.toCollection(ArrayList::new));
} finally {
releaseReadLock(stamp);
}
}
@Override
public CompletionStage<Void> purgeExpired() {
long stamp = acquireReadLock();
try {
if (!checkStoreAvailability()) {
releaseReadLock(stamp);
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Purging entries from stores on cache %s", cache.getName());
}
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
for (StoreStatus storeStatus : stores) {
if (storeStatus.hasCharacteristic(Characteristic.EXPIRATION)) {
Flowable<MarshallableEntry<Object, Object>> flowable = Flowable.fromPublisher(storeStatus.store().purgeExpired());
Completable completable = flowable.concatMapCompletable(me -> Completable.fromCompletionStage(
expirationManager.running().handleInStoreExpirationInternal(me)));
aggregateCompletionStage.dependsOn(completable.toCompletionStage(null));
}
}
return aggregateCompletionStage.freeze()
.whenComplete((v, t) -> releaseReadLock(stamp));
} catch (Throwable t) {
releaseReadLock(stamp);
throw t;
}
}
@Override
public CompletionStage<Void> clearAllStores(Predicate<? super StoreConfiguration> predicate) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Clearing all stores");
}
// Let the clear work in parallel across the stores
AggregateCompletionStage<Void> stageBuilder = CompletionStages.aggregateCompletionStage();
for (StoreStatus storeStatus : stores) {
if (!storeStatus.hasCharacteristic(Characteristic.READ_ONLY)
&& predicate.test(storeStatus.config)) {
stageBuilder.dependsOn(storeStatus.store.clear());
}
}
CompletionStage<Void> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return stage;
} else {
release = false;
return stage.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
@Override
public CompletionStage<Boolean> deleteFromAllStores(Object key, int segment, Predicate<? super StoreConfiguration> predicate) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedFalse();
}
if (log.isTraceEnabled()) {
log.tracef("Deleting entry for key %s from stores", key);
}
if (stores.isEmpty())
return CompletableFutures.completedFalse();
// Let the write work in parallel across the stores
AtomicBoolean removedAny = new AtomicBoolean();
AggregateCompletionStage<AtomicBoolean> stageBuilder = CompletionStages.aggregateCompletionStage(removedAny);
for (StoreStatus storeStatus : stores) {
if (!storeStatus.hasCharacteristic(Characteristic.READ_ONLY)
&& predicate.test(storeStatus.config)) {
stageBuilder.dependsOn(storeStatus.store.delete(segment, key)
.thenAccept(removed -> {
// If a store doesn't say, pretend it was removed
if (removed == null || removed) {
removedAny.set(true);
}
}));
}
}
CompletionStage<AtomicBoolean> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return CompletableFutures.booleanStage(removedAny.get());
} else {
release = false;
return stage.handle((removed, throwable) -> {
releaseReadLock(stamp);
if (throwable != null) {
throw CompletableFutures.asCompletionException(throwable);
}
return removed.get();
});
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(boolean fetchValue, boolean fetchMetadata) {
return publishEntries(k -> true, fetchValue, fetchMetadata, k -> true);
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(Predicate<? super K> filter, boolean fetchValue,
boolean fetchMetadata, Predicate<? super StoreConfiguration> predicate) {
return publishEntries(IntSets.immutableRangeSet(segmentCount), filter, fetchValue, fetchMetadata, predicate);
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter,
boolean fetchValue, boolean fetchMetadata, Predicate<? super StoreConfiguration> predicate) {
return Flowable.using(this::acquireReadLock,
ignore -> {
if (!checkStoreAvailability()) {
return Flowable.empty();
}
if (log.isTraceEnabled()) {
log.tracef("Publishing entries for segments %s", segments);
}
for (StoreStatus storeStatus : stores) {
Set<Characteristic> characteristics = storeStatus.characteristics;
if (characteristics.contains(Characteristic.BULK_READ) && predicate.test(storeStatus.config)) {
Predicate<? super K> filterToUse;
if (!characteristics.contains(Characteristic.SEGMENTABLE) &&
!segments.containsAll(IntSets.immutableRangeSet(segmentCount))) {
filterToUse = PersistenceUtil.combinePredicate(segments, keyPartitioner, filter);
} else {
filterToUse = filter;
}
return storeStatus.<K, V>store().publishEntries(segments, filterToUse, fetchValue);
}
}
return Flowable.empty();
},
this::releaseReadLock);
}
@Override
public <K> Publisher<K> publishKeys(Predicate<? super K> filter, Predicate<? super StoreConfiguration> predicate) {
return publishKeys(IntSets.immutableRangeSet(segmentCount), filter, predicate);
}
@Override
public <K> Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter, Predicate<? super StoreConfiguration> predicate) {
return Flowable.using(this::acquireReadLock,
ignore -> {
if (!checkStoreAvailability()) {
return Flowable.empty();
}
if (log.isTraceEnabled()) {
log.tracef("Publishing keys for segments %s", segments);
}
for (StoreStatus storeStatus : stores) {
Set<Characteristic> characteristics = storeStatus.characteristics;
if (characteristics.contains(Characteristic.BULK_READ) &&
predicate.test(storeStatus.config)) {
Predicate<? super K> filterToUse;
if (!characteristics.contains(Characteristic.SEGMENTABLE) &&
!segments.containsAll(IntSets.immutableRangeSet(segmentCount))) {
filterToUse =
PersistenceUtil.combinePredicate(segments, keyPartitioner, filter);
} else {
filterToUse = filter;
}
return storeStatus.<K, Object>store().publishKeys(segments, filterToUse);
}
}
return Flowable.empty();
},
this::releaseReadLock);
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, boolean localInvocation,
boolean includeStores) {
return loadFromAllStores(key, keyPartitioner.getSegment(key), localInvocation, includeStores);
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, int segment,
boolean localInvocation, boolean includeStores) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Loading entry for key %s with segment %d", key, segment);
}
Iterator<StoreStatus> iterator = stores.iterator();
CompletionStage<MarshallableEntry<K, V>> stage =
loadFromStoresIterator(key, segment, iterator, localInvocation, includeStores);
if (CompletionStages.isCompletedSuccessfully(stage)) {
return stage;
} else {
release = false;
return stage.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
private <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromStoresIterator(Object key, int segment,
Iterator<StoreStatus> iterator,
boolean localInvocation,
boolean includeStores) {
while (iterator.hasNext()) {
StoreStatus storeStatus = iterator.next();
NonBlockingStore<K, V> store = storeStatus.store();
if (!allowLoad(storeStatus, localInvocation, includeStores)) {
continue;
}
CompletionStage<MarshallableEntry<K, V>> loadStage = store.load(segmentOrZero(storeStatus, segment), key);
return loadStage.thenCompose(e -> {
if (e != null) {
// Read only we apply lifespan expiration to the entry, so it can be reread later
// Max Idle is only allowed when the store has passivation, so it can't be read only
if (storeStatus.hasCharacteristic(Characteristic.READ_ONLY) && configuration.expiration().lifespan() > 0) {
e = marshallableEntryFactory.cloneWithExpiration((MarshallableEntry) e, timeService.wallClockTime(),
configuration.expiration().lifespan());
}
return CompletableFuture.completedFuture(e);
} else {
return loadFromStoresIterator(key, segment, iterator, localInvocation, includeStores);
}
});
}
return CompletableFutures.completedNull();
}
private boolean allowLoad(StoreStatus storeStatus, boolean localInvocation, boolean includeStores) {
return !storeStatus.hasCharacteristic(Characteristic.WRITE_ONLY) &&
(localInvocation || !isLocalOnlyLoader(storeStatus.store)) &&
(includeStores || storeStatus.hasCharacteristic(Characteristic.READ_ONLY) ||
storeStatus.config.ignoreModifications());
}
private boolean isLocalOnlyLoader(NonBlockingStore<?, ?> store) {
if (store instanceof LocalOnlyCacheLoader) return true;
NonBlockingStore<?, ?> unwrappedStore;
if (store instanceof DelegatingNonBlockingStore) {
unwrappedStore = ((DelegatingNonBlockingStore<?, ?>) store).delegate();
} else {
unwrappedStore = store;
}
if (unwrappedStore instanceof LocalOnlyCacheLoader) {
return true;
}
if (unwrappedStore instanceof NonBlockingStoreAdapter) {
return ((NonBlockingStoreAdapter<?, ?>) unwrappedStore).getActualStore() instanceof LocalOnlyCacheLoader;
}
return false;
}
@Override
public CompletionStage<Long> approximateSize(Predicate<? super StoreConfiguration> predicate, IntSet segments) {
if (!isEnabled()) {
return NonBlockingStore.SIZE_UNAVAILABLE_FUTURE;
}
long stamp = acquireReadLock();
try {
if (!isAvailable()) {
releaseReadLock(stamp);
return NonBlockingStore.SIZE_UNAVAILABLE_FUTURE;
}
if (stores == null) {
throw new IllegalLifecycleStateException();
}
// Ignore stores without BULK_READ, they don't implement approximateSize()
StoreStatus firstStoreStatus = getStoreStatusLocked(storeStatus ->
storeStatus.hasCharacteristic(Characteristic.BULK_READ) &&
predicate.test(storeStatus.config));
if (firstStoreStatus == null) {
releaseReadLock(stamp);
return NonBlockingStore.SIZE_UNAVAILABLE_FUTURE;
}
if (log.isTraceEnabled()) {
log.tracef("Obtaining approximate size from store %s", firstStoreStatus.store);
}
CompletionStage<Long> stage;
if (firstStoreStatus.hasCharacteristic(Characteristic.SEGMENTABLE)) {
stage = firstStoreStatus.store.approximateSize(segments);
} else {
stage = firstStoreStatus.store.approximateSize(IntSets.immutableRangeSet(segmentCount))
.thenApply(size -> {
// Counting only the keys in the given segments would be expensive,
// so we compute an estimate assuming that each segment has a similar number of entries
LocalizedCacheTopology cacheTopology = distributionManager.getCacheTopology();
int storeSegments = firstStoreStatus.hasCharacteristic(Characteristic.SHAREABLE) ?
segmentCount : cacheTopology.getLocalWriteSegmentsCount();
return storeSegments > 0 ? size * segments.size() / storeSegments : size;
});
}
return stage.whenComplete((ignore, ignoreT) -> releaseReadLock(stamp));
} catch (Throwable t) {
releaseReadLock(stamp);
throw t;
}
}
@Override
public CompletionStage<Long> size(Predicate<? super StoreConfiguration> predicate, IntSet segments) {
long stamp = acquireReadLock();
try {
checkStoreAvailability();
if (log.isTraceEnabled()) {
log.tracef("Obtaining size from stores");
}
NonBlockingStore<?, ?> nonBlockingStore = getStoreLocked(storeStatus -> storeStatus.hasCharacteristic(
Characteristic.BULK_READ) && predicate.test(storeStatus.config));
if (nonBlockingStore == null) {
releaseReadLock(stamp);
return NonBlockingStore.SIZE_UNAVAILABLE_FUTURE;
}
if (segments == null) {
segments = IntSets.immutableRangeSet(segmentCount);
}
return nonBlockingStore.size(segments)
.whenComplete((ignore, ignoreT) -> releaseReadLock(stamp));
} catch (Throwable t) {
releaseReadLock(stamp);
throw t;
}
}
@Override
public CompletionStage<Long> size(Predicate<? super StoreConfiguration> predicate) {
return size(predicate, null);
}
@Override
public void setClearOnStop(boolean clearOnStop) {
this.clearOnStop = clearOnStop;
}
@Override
public CompletionStage<Void> writeToAllNonTxStores(MarshallableEntry marshalledEntry, int segment,
Predicate<? super StoreConfiguration> predicate, long flags) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Writing entry %s for with segment: %d", marshalledEntry, segment);
}
// Let the write work in parallel across the stores
AggregateCompletionStage<Void> stageBuilder = CompletionStages.aggregateCompletionStage();
for (StoreStatus storeStatus : stores) {
if (shouldWrite(storeStatus, predicate, flags)) {
stageBuilder.dependsOn(storeStatus.store.write(segment, marshalledEntry));
}
}
CompletionStage<Void> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return stage;
} else {
release = false;
return stage.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
private int segmentOrZero(StoreStatus storeStatus, int segment) {
return storeStatus.hasCharacteristic(Characteristic.SEGMENTABLE) ? segment : 0;
}
private boolean shouldWrite(StoreStatus storeStatus, Predicate<? super StoreConfiguration> userPredicate) {
return !storeStatus.hasCharacteristic(Characteristic.READ_ONLY)
&& userPredicate.test(storeStatus.config);
}
private boolean shouldWrite(StoreStatus storeStatus, Predicate<? super StoreConfiguration> userPredicate, long flags) {
return shouldWrite(storeStatus, userPredicate)
&& !storeStatus.store.ignoreCommandWithFlags(flags);
}
@Override
public CompletionStage<Void> prepareAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate) throws PersistenceException {
Flowable<MVCCEntry<Object, Object>> mvccEntryFlowable = toMvccEntryFlowable(txInvocationContext, null);
return batchOperation(mvccEntryFlowable, txInvocationContext, (stores, segmentCount, removeFlowable,
putFlowable) -> stores.prepareWithModifications(txInvocationContext.getTransaction(), segmentCount, removeFlowable, putFlowable))
.thenApply(CompletableFutures.toNullFunction());
}
@Override
public CompletionStage<Void> commitAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Committing transaction %s to stores", txInvocationContext);
}
// Let the commit work in parallel across the stores
AggregateCompletionStage<Void> stageBuilder = CompletionStages.aggregateCompletionStage();
for (StoreStatus storeStatus : stores) {
if (shouldPerformTransactionOperation(storeStatus, predicate)) {
stageBuilder.dependsOn(storeStatus.store.commit(txInvocationContext.getTransaction()));
}
}
CompletionStage<Void> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return stage;
} else {
release = false;
return stage.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
@Override
public CompletionStage<Void> rollbackAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedNull();
}
if (log.isTraceEnabled()) {
log.tracef("Rolling back transaction %s for stores", txInvocationContext);
}
// Let the rollback work in parallel across the stores
AggregateCompletionStage<Void> stageBuilder = CompletionStages.aggregateCompletionStage();
for (StoreStatus storeStatus : stores) {
if (shouldPerformTransactionOperation(storeStatus, predicate)) {
stageBuilder.dependsOn(storeStatus.store.rollback(txInvocationContext.getTransaction()));
}
}
CompletionStage<Void> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return stage;
} else {
release = false;
return stage.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
private boolean shouldPerformTransactionOperation(StoreStatus storeStatus, Predicate<? super StoreConfiguration> predicate) {
return storeStatus.hasCharacteristic(Characteristic.TRANSACTIONAL)
&& predicate.test(storeStatus.config);
}
@Override
public <K, V> CompletionStage<Void> writeEntries(Iterable<MarshallableEntry<K, V>> iterable,
Predicate<? super StoreConfiguration> predicate) {
return Completable.using(
this::acquireReadLock,
ignore -> {
if (!checkStoreAvailability()) {
return Completable.complete();
}
if (log.isTraceEnabled()) {
log.trace("Writing entries to stores");
}
return Flowable.fromIterable(stores)
.filter(storeStatus -> shouldWrite(storeStatus, predicate) &&
!storeStatus.hasCharacteristic(Characteristic.TRANSACTIONAL))
// Let the write work in parallel across the stores
.flatMapCompletable(storeStatus -> {
boolean segmented = storeStatus.hasCharacteristic(Characteristic.SEGMENTABLE);
Flowable<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> flowable;
if (segmented) {
flowable = Flowable.fromIterable(iterable)
.groupBy(groupingFunction(MarshallableEntry::getKey))
.map(SegmentPublisherWrapper::wrap);
} else {
flowable = Flowable.just(SingleSegmentPublisher.singleSegment(Flowable.fromIterable(iterable)));
}
return Completable.fromCompletionStage(storeStatus.<K, V>store().batch(segmentCount(segmented),
Flowable.empty(), flowable));
});
},
this::releaseReadLock
).toCompletionStage(null);
}
@Override
public CompletionStage<Long> writeMapCommand(PutMapCommand putMapCommand, InvocationContext ctx,
BiPredicate<? super PutMapCommand, Object> commandKeyPredicate) {
Flowable<MVCCEntry<Object, Object>> mvccEntryFlowable = entriesFromCommand(putMapCommand, ctx, commandKeyPredicate);
return batchOperation(mvccEntryFlowable, ctx, NonBlockingStore::batch);
}
@Override
public CompletionStage<Long> performBatch(TxInvocationContext<AbstractCacheTransaction> ctx,
BiPredicate<? super WriteCommand, Object> commandKeyPredicate) {
Flowable<MVCCEntry<Object, Object>> mvccEntryFlowable = toMvccEntryFlowable(ctx, commandKeyPredicate);
return batchOperation(mvccEntryFlowable, ctx, NonBlockingStore::batch);
}
/**
* Takes all the modified entries in the flowable and writes or removes them from the stores in a single batch
* operation per store.
* <p>
* The {@link HandleFlowables} is provided for the sole reason of allowing reuse of this method by different callers.
* @param mvccEntryFlowable flowable containing modified entries
* @param ctx the context with modifications
* @param flowableHandler callback handler that actually should subscribe to the underlying store
* @param <K> key type
* @param <V> value type
* @return a stage that when complete will contain how many write operations were done
*/
private <K, V> CompletionStage<Long> batchOperation(Flowable<MVCCEntry<K, V>> mvccEntryFlowable, InvocationContext ctx,
HandleFlowables<K, V> flowableHandler) {
return Single.using(
this::acquireReadLock,
ignore -> {
if (!checkStoreAvailability()) {
return Single.just(0L);
}
if (log.isTraceEnabled()) {
log.trace("Writing batch to stores");
}
return Flowable.fromIterable(stores)
.filter(storeStatus -> !storeStatus.hasCharacteristic(Characteristic.READ_ONLY))
.flatMapSingle(storeStatus -> {
Flowable<MVCCEntry<K, V>> flowableToUse;
boolean shared = storeStatus.config.shared();
if (shared) {
if (log.isTraceEnabled()) {
log.tracef("Store %s is shared, checking skip shared stores and ignoring entries not" +
" primarily owned by this node", storeStatus.store);
}
flowableToUse = mvccEntryFlowable.filter(mvccEntry -> !mvccEntry.isSkipSharedStore());
} else {
flowableToUse = mvccEntryFlowable;
}
boolean segmented = storeStatus.config.segmented();
// Now we have to split this stores' flowable into two (one for remove and one for put)
flowableToUse = flowableToUse.publish().autoConnect(2);
Flowable<NonBlockingStore.SegmentedPublisher<Object>> removeFlowable = createRemoveFlowable(
flowableToUse, shared, segmented, storeStatus);
ByRef.Long writeCount = new ByRef.Long(0);
Flowable<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> writeFlowable =
createWriteFlowable(flowableToUse, ctx, shared, segmented, writeCount, storeStatus);
CompletionStage<Void> storeBatchStage = flowableHandler.handleFlowables(storeStatus.store(),
segmentCount(segmented), removeFlowable, writeFlowable);
return Single.fromCompletionStage(storeBatchStage
.thenApply(ignore2 -> writeCount.get()));
// Only take the last element for the count - ensures all stores are completed
}).last(0L);
},
this::releaseReadLock
).toCompletionStage();
}
private <K, V> Flowable<NonBlockingStore.SegmentedPublisher<Object>> createRemoveFlowable(
Flowable<MVCCEntry<K, V>> flowableToUse, boolean shared, boolean segmented, StoreStatus storeStatus) {
Flowable<K> keyRemoveFlowable = flowableToUse
.filter(MVCCEntry::isRemoved)
.map(MVCCEntry::getKey);
Flowable<NonBlockingStore.SegmentedPublisher<Object>> flowable;
if (segmented) {
flowable = keyRemoveFlowable
.groupBy(keyPartitioner::getSegment)
.map(SegmentPublisherWrapper::wrap);
flowable = filterSharedSegments(flowable, null, shared);
} else {
if (shared && !isInvalidationCache) {
keyRemoveFlowable = keyRemoveFlowable.filter(k ->
distributionManager.getCacheTopology().getDistribution(k).isPrimary());
}
flowable = Flowable.just(SingleSegmentPublisher.singleSegment(keyRemoveFlowable));
}
if (log.isTraceEnabled()) {
flowable = flowable.doOnSubscribe(sub ->
log.tracef("Store %s has subscribed to remove batch", storeStatus.store));
flowable = flowable.map(sp -> {
int segment = sp.getSegment();
return SingleSegmentPublisher.singleSegment(segment, Flowable.fromPublisher(sp)
.doOnNext(keyToRemove -> log.tracef("Emitting key %s for removal from segment %s",
keyToRemove, segment)));
});
}
return flowable;
}
private <K, V> Flowable<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> createWriteFlowable(
Flowable<MVCCEntry<K, V>> flowableToUse, InvocationContext ctx, boolean shared, boolean segmented,
ByRef.Long writeCount, StoreStatus storeStatus) {
Flowable<MarshallableEntry<K, V>> entryWriteFlowable = flowableToUse
.filter(mvccEntry -> !mvccEntry.isRemoved())
.map(mvcEntry -> {
K key = mvcEntry.getKey();
InternalCacheValue<V> sv = internalEntryFactory.getValueFromCtx(key, ctx);
//noinspection unchecked
return (MarshallableEntry<K, V>) marshallableEntryFactory.create(key, (InternalCacheValue) sv);
});
Flowable<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> flowable;
if (segmented) {
// Note the writeCount includes entries that aren't written due to being shared
// at this point
entryWriteFlowable = entryWriteFlowable.doOnNext(obj -> writeCount.inc());
flowable = entryWriteFlowable
.groupBy(me -> keyPartitioner.getSegment(me.getKey()))
.map(SegmentPublisherWrapper::wrap);
// The writeCount will be decremented for each grouping of values ignored
flowable = filterSharedSegments(flowable, writeCount, shared);
} else {
if (shared && !isInvalidationCache) {
entryWriteFlowable = entryWriteFlowable.filter(me ->
distributionManager.getCacheTopology().getDistribution(me.getKey()).isPrimary());
}
entryWriteFlowable = entryWriteFlowable.doOnNext(obj -> writeCount.inc());
flowable = Flowable.just(SingleSegmentPublisher.singleSegment(entryWriteFlowable));
}
if (log.isTraceEnabled()) {
flowable = flowable.doOnSubscribe(sub ->
log.tracef("Store %s has subscribed to write batch", storeStatus.store));
flowable = flowable.map(sp -> {
int segment = sp.getSegment();
return SingleSegmentPublisher.singleSegment(segment, Flowable.fromPublisher(sp)
.doOnNext(me -> log.tracef("Emitting entry %s for write to segment %s",
me.getKey(), segment)));
});
}
return flowable;
}
private <I> Flowable<NonBlockingStore.SegmentedPublisher<I>> filterSharedSegments(
Flowable<NonBlockingStore.SegmentedPublisher<I>> flowable, ByRef.Long writeCount, boolean shared) {
if (!shared || isInvalidationCache) {
return flowable;
}
return flowable.map(sp -> {
if (distributionManager.getCacheTopology().getSegmentDistribution(sp.getSegment()).isPrimary()) {
return sp;
}
Flowable<I> emptyFlowable = Flowable.fromPublisher(sp);
if (writeCount != null) {
emptyFlowable = emptyFlowable.doOnNext(ignore -> writeCount.dec())
.ignoreElements()
.toFlowable();
} else {
emptyFlowable = emptyFlowable.take(0);
}
// Unfortunately we need to still need to subscribe to the publisher even though we don't want
// the store to use its values. Thus we just return them an empty SegmentPublisher.
return SingleSegmentPublisher.singleSegment(sp.getSegment(), emptyFlowable);
});
}
/**
* Creates a Flowable of MVCCEntry(s) that were modified due to the commands in the transactional context
* @param ctx the transactional context
* @param commandKeyPredicate predicate to test if a key/command combination should be written
* @param <K> key type
* @param <V> value type
* @return a Flowable containing MVCCEntry(s) for the modifications in the tx context
*/
private <K, V> Flowable<MVCCEntry<K, V>> toMvccEntryFlowable(TxInvocationContext<AbstractCacheTransaction> ctx,
BiPredicate<? super WriteCommand, Object> commandKeyPredicate) {
return Flowable.fromIterable(ctx.getCacheTransaction().getAllModifications())
.filter(writeCommand -> !writeCommand.hasAnyFlag(FlagBitSets.SKIP_CACHE_STORE | FlagBitSets.ROLLING_UPGRADE))
.concatMap(writeCommand -> entriesFromCommand(writeCommand, ctx, commandKeyPredicate));
}
private <K, V, WCT extends WriteCommand> Flowable<MVCCEntry<K, V>> entriesFromCommand(WCT writeCommand, InvocationContext ctx,
BiPredicate<? super WCT, Object> commandKeyPredicate) {
if (writeCommand instanceof DataWriteCommand) {
Object key = ((DataWriteCommand) writeCommand).getKey();
MVCCEntry<K, V> entry = acquireKeyFromContext(ctx, writeCommand, key, commandKeyPredicate);
return entry != null ? Flowable.just(entry) : Flowable.empty();
} else {
if (writeCommand instanceof InvalidateCommand) {
return Flowable.empty();
}
// Assume multiple key command
return Flowable.fromIterable(writeCommand.getAffectedKeys())
.concatMapMaybe(key -> {
MVCCEntry<K, V> entry = acquireKeyFromContext(ctx, writeCommand, key, commandKeyPredicate);
// We use an empty Flowable to symbolize a miss - which is filtered by ofType just below
return entry != null ? Maybe.just(entry) : Maybe.empty();
});
}
}
private <K, V, WCT extends WriteCommand> MVCCEntry<K, V> acquireKeyFromContext(InvocationContext ctx, WCT command, Object key,
BiPredicate<? super WCT, Object> commandKeyPredicate) {
if (commandKeyPredicate == null || commandKeyPredicate.test(command, key)) {
//noinspection unchecked
MVCCEntry<K, V> entry = (MVCCEntry<K, V>) ctx.lookupEntry(key);
if (entry.isChanged()) {
return entry;
}
}
return null;
}
/**
* Here just to create a lambda for method reuse of
* {@link #batchOperation(Flowable, InvocationContext, HandleFlowables)}
*/
interface HandleFlowables<K, V> {
CompletionStage<Void> handleFlowables(NonBlockingStore<K, V> store, int publisherCount,
Flowable<NonBlockingStore.SegmentedPublisher<Object>> removeFlowable,
Flowable<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> putFlowable);
}
/**
* Provides a function that groups entries by their segments (via keyPartitioner).
*/
private <E> Function<E, Integer> groupingFunction(Function<E, Object> toKeyFunction) {
return value -> keyPartitioner.getSegment(toKeyFunction.apply(value));
}
/**
* Returns how many segments the user must worry about when segmented or not.
* @param segmented whether the store is segmented
* @return how many segments the store must worry about
*/
private int segmentCount(boolean segmented) {
return segmented ? segmentCount : 1;
}
@Override
public boolean isAvailable() {
return unavailableExceptionMessage == null;
}
@Override
public CompletionStage<Boolean> addSegments(IntSet segments) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedFalse();
}
if (log.isTraceEnabled()) {
log.tracef("Adding segments %s to stores", segments);
}
// Let the add work in parallel across the stores
AggregateCompletionStage<Boolean> stageBuilder = CompletionStages.aggregateCompletionStage(allSegmentedOrShared);
for (StoreStatus storeStatus : stores) {
if (shouldInvokeSegmentMethods(storeStatus)) {
stageBuilder.dependsOn(storeStatus.store.addSegments(segments));
}
}
CompletionStage<Boolean> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return stage;
} else {
release = false;
return stage.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
@Override
public CompletionStage<Boolean> removeSegments(IntSet segments) {
long stamp = acquireReadLock();
boolean release = true;
try {
if (!checkStoreAvailability()) {
return CompletableFutures.completedFalse();
}
if (log.isTraceEnabled()) {
log.tracef("Removing segments %s from stores", segments);
}
// Let the add work in parallel across the stores
AggregateCompletionStage<Boolean> stageBuilder = CompletionStages.aggregateCompletionStage(allSegmentedOrShared);
for (StoreStatus storeStatus : stores) {
if (shouldInvokeSegmentMethods(storeStatus)) {
stageBuilder.dependsOn(storeStatus.store.removeSegments(segments));
}
}
CompletionStage<Boolean> stage = stageBuilder.freeze();
if (CompletionStages.isCompletedSuccessfully(stage)) {
return stage;
} else {
release = false;
return stage.whenComplete((e, throwable) -> releaseReadLock(stamp));
}
} finally {
if (release) {
releaseReadLock(stamp);
}
}
}
private static boolean shouldInvokeSegmentMethods(StoreStatus storeStatus) {
return storeStatus.hasCharacteristic(Characteristic.SEGMENTABLE) &&
!storeStatus.hasCharacteristic(Characteristic.SHAREABLE);
}
public <K, V> List<NonBlockingStore<K, V>> getAllStores(Predicate<Set<Characteristic>> predicate) {
long stamp = acquireReadLock();
try {
if (!checkStoreAvailability()) {
return Collections.emptyList();
}
return stores.stream()
.filter(storeStatus -> predicate.test(storeStatus.characteristics))
.map(StoreStatus::<K, V>store)
.collect(Collectors.toCollection(ArrayList::new));
} finally {
releaseReadLock(stamp);
}
}
/**
* Method must be here for augmentation to tell blockhound this method is okay to block
*/
private long acquireReadLock() {
return lock.readLock();
}
/**
* Method must be here for augmentation to tell blockhound this method is okay to block
*/
private long acquireWriteLock() {
return lock.writeLock();
}
/**
* Opposite of acquireReadLock here for symmetry
*/
private void releaseReadLock(long stamp) {
lock.unlockRead(stamp);
}
/**
* Opposite of acquireWriteLock here for symmetry
*/
private void releaseWriteLock(long stamp) {
lock.unlockWrite(stamp);
}
private boolean checkStoreAvailability() {
if (!enabled) return false;
String message = unavailableExceptionMessage;
if (message != null) {
throw new StoreUnavailableException(message);
}
// Stores will be null if this is not started or was stopped and not restarted.
if (stores == null) {
throw new IllegalLifecycleStateException();
}
return true;
}
static class StoreStatus {
final NonBlockingStore<?, ?> store;
final StoreConfiguration config;
final Set<Characteristic> characteristics;
// This variable is protected by PersistenceManagerImpl#lock and also the fact that availability check can
// only be ran one at a time
boolean availability = true;
StoreStatus(NonBlockingStore<?, ?> store, StoreConfiguration config, Set<Characteristic> characteristics) {
this.store = store;
this.config = config;
this.characteristics = characteristics;
}
<K, V> NonBlockingStore<K, V> store() {
return (NonBlockingStore) store;
}
private boolean hasCharacteristic(Characteristic characteristic) {
return characteristics.contains(characteristic);
}
}
boolean anyLocksHeld() {
return lock.isReadLocked() || lock.isWriteLocked();
}
}
| 69,562
| 42.750314
| 141
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/manager/PassivationPersistenceManager.java
|
package org.infinispan.persistence.manager;
import java.lang.invoke.MethodHandles;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Predicate;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.support.DelegatingPersistenceManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
public class PassivationPersistenceManager extends DelegatingPersistenceManager {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final ConcurrentMap<Object, MarshallableEntry> map = new ConcurrentHashMap<>();
public PassivationPersistenceManager(PersistenceManager persistenceManager) {
super(persistenceManager);
}
public CompletionStage<Void> passivate(MarshallableEntry marshallableEntry, int segment) {
Object key = marshallableEntry.getKey();
if (log.isTraceEnabled()) {
log.tracef("Storing entry temporarily during passivation for key %s", key);
}
map.put(key, marshallableEntry);
return writeToAllNonTxStores(marshallableEntry, segment, AccessMode.PRIVATE)
.whenComplete((ignore, ignoreT) -> {
map.remove(key);
if (log.isTraceEnabled()) {
log.tracef("Removed temporary entry during passivation for key %s", key);
}
});
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, int segment,
boolean localInvocation, boolean includeStores) {
MarshallableEntry entry = map.get(key);
if (entry != null) {
if (log.isTraceEnabled()) {
log.tracef("Retrieved entry for key %s from temporary passivation map", key);
}
return CompletableFuture.completedFuture(entry);
}
return super.loadFromAllStores(key, segment, localInvocation, includeStores);
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, boolean localInvocation,
boolean includeStores) {
MarshallableEntry entry = map.get(key);
if (entry != null) {
if (log.isTraceEnabled()) {
log.tracef("Retrieved entry for key %s from temporary passivation map", key);
}
return CompletableFuture.completedFuture(entry);
}
return super.loadFromAllStores(key, localInvocation, includeStores);
}
@Override
public <K> Publisher<K> publishKeys(Predicate<? super K> filter, Predicate<? super StoreConfiguration> predicate) {
if (map.isEmpty()) {
return super.publishKeys(filter, predicate);
}
Set<K> keys = (Set<K>) new HashSet<>(map.keySet());
Predicate<K> filterToUse = key -> !keys.contains(key);
Flowable<K> mapFlowable = Flowable.fromIterable(keys);
if (filter != null) {
filterToUse = filterToUse.and(filter);
mapFlowable = mapFlowable.filter(filter::test);
}
return Flowable.concat(
mapFlowable,
super.publishKeys(filterToUse, predicate)
);
}
@Override
public <K> Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter,
Predicate<? super StoreConfiguration> predicate) {
if (map.isEmpty()) {
return super.publishKeys(segments, filter, predicate);
}
Set<K> keys = (Set<K>) new HashSet<>(map.keySet());
Predicate<K> filterToUse = key -> !keys.contains(key);
Flowable<K> mapFlowable = Flowable.fromIterable(keys);
if (filter != null) {
filterToUse = filterToUse.and(filter);
mapFlowable = mapFlowable.filter(filter::test);
}
return Flowable.concat(
mapFlowable,
super.publishKeys(segments, filterToUse, predicate)
);
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(boolean fetchValue, boolean fetchMetadata) {
if (map.isEmpty()) {
return super.publishEntries(fetchValue, fetchMetadata);
}
Map<Object, MarshallableEntry<K, V>> entries = new HashMap(map);
Predicate<K> filterToUse = key -> !entries.containsKey(key);
return Flowable.concat(
Flowable.fromIterable(entries.values()),
super.publishEntries(filterToUse, fetchValue, fetchMetadata, AccessMode.BOTH)
);
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(Predicate<? super K> filter, boolean fetchValue,
boolean fetchMetadata,
Predicate<? super StoreConfiguration> predicate) {
if (map.isEmpty()) {
return super.publishEntries(filter, fetchValue, fetchMetadata, predicate);
}
Map<Object, MarshallableEntry<K, V>> entries = new HashMap(map);
Predicate<K> filterToUse = key -> !entries.containsKey(key);
Flowable<MarshallableEntry<K, V>> mapFlowable = Flowable.fromIterable(entries.values());
if (filter != null) {
filterToUse = filterToUse.and(filter);
mapFlowable = mapFlowable.filter(entry -> filter.test(entry.getKey()));
}
return Flowable.concat(
mapFlowable,
super.publishEntries(filterToUse, fetchValue, fetchMetadata, predicate)
);
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter,
boolean fetchValue, boolean fetchMetadata,
Predicate<? super StoreConfiguration> predicate) {
if (map.isEmpty()) {
return super.publishEntries(segments, filter, fetchValue, fetchMetadata, predicate);
}
Map<Object, MarshallableEntry<K, V>> entries = new HashMap(map);
Predicate<K> filterToUse = key -> !entries.containsKey(key);
Flowable<MarshallableEntry<K, V>> mapFlowable = Flowable.fromIterable(entries.values());
if (filter != null) {
filterToUse = filterToUse.and(filter);
mapFlowable = mapFlowable.filter(entry -> filter.test(entry.getKey()));
}
return Flowable.concat(
mapFlowable,
super.publishEntries(segments, filterToUse, fetchValue, fetchMetadata, predicate)
);
}
@Override
public CompletionStage<Long> size(Predicate<? super StoreConfiguration> predicate) {
if (map.isEmpty()) {
return super.size(predicate);
}
// We can't use optimized size and require iteration if we have entries
return NonBlockingStore.SIZE_UNAVAILABLE_FUTURE;
}
public int pendingPassivations() {
return map.size();
}
}
| 7,483
| 40.120879
| 125
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/util/EntryLoader.java
|
package org.infinispan.persistence.util;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.FlagAffectedCommand;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.ImmutableContext;
/**
* Interface that describes methods used for loading entries from the underlying
* {@link org.infinispan.persistence.manager.PersistenceManager} and store those entries into the
* {@link org.infinispan.container.DataContainer} if necessary.
* @param <K> key type
* @param <V> value type
* @since 10.0
* @author wburns
*/
public interface EntryLoader<K, V> {
/**
* Load and store the entry if present in the data container, returning the entry in the CompletionStage
* @param ctx context that generated this request
* @param key key to load from the store
* @param segment segment of the key to load
* @param cmd the command that generated this load request
* @return stage that when complete contains the loaded entry. If the entry is non null the entry is also
* written into the underlying data container
* @since 10.0
*/
CompletionStage<InternalCacheEntry<K, V>> loadAndStoreInDataContainer(InvocationContext ctx, Object key,
int segment, FlagAffectedCommand cmd);
/**
* Load and store the entry if present in the data container, returning the entry in the CompletionStage.
*
* @param key key to load from the store
* @param segment segment of the key to load
* @return stage that when complete contains the loaded entry. If the entry is non null the entry is also written
* into the underlying data container
* @since 10.0
*/
default CompletionStage<InternalCacheEntry<K, V>> loadAndStoreInDataContainer(K key, int segment) {
return loadAndStoreInDataContainer(ImmutableContext.INSTANCE, key, segment, null);
}
}
| 1,942
| 41.23913
| 116
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/file/package-info.java
|
/**
* Simple filesystem-based {@link org.infinispan.persistence.spi.CacheWriter} implementation.
*
* @api.public
*/
package org.infinispan.persistence.file;
| 161
| 22.142857
| 93
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/file/SingleFileStore.java
|
package org.infinispan.persistence.file;
import static io.reactivex.rxjava3.core.Flowable.defer;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.time.LocalDate;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.PrimitiveIterator;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.locks.StampedLock;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.io.ByteBufferFactory;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.AbstractSegmentedStoreConfiguration;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.SingleFileStoreConfiguration;
import org.infinispan.configuration.cache.TransactionConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.container.versioning.SimpleClusteredVersion;
import org.infinispan.container.versioning.irac.IracEntryVersion;
import org.infinispan.container.versioning.irac.TopologyIracVersion;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.IracMetadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.persistence.PersistenceUtil;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.util.KeyValuePair;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.XSiteNamedCache;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.processors.UnicastProcessor;
import net.jcip.annotations.GuardedBy;
/**
* A filesystem-based implementation of a {@link org.infinispan.persistence.spi.NonBlockingStore}.
* This file store stores cache values in a single file <tt><location>/<cache name>.dat</tt>,
* keys and file positions are kept in memory.
* <p/>
* Note: this CacheStore implementation keeps keys and file positions in memory!
* The current implementation needs about 100 bytes per cache entry, plus the
* memory for the key objects.
* <p/>
* So, the space taken by this cache store is both the space in the file
* itself plus the in-memory index with the keys and their file positions.
* With this in mind and to avoid the cache store leading to
* OutOfMemoryExceptions, you can optionally configure the maximum number
* of entries to maintain in this cache store, which affects both the size
* of the file and the size of the in-memory index. However, setting this
* maximum limit results in older entries in the cache store to be eliminated,
* and hence, it only makes sense configuring a maximum limit if Infinispan
* is used as a cache where loss of data in the cache store does not lead to
* data loss, and data can be recomputed or re-queried from the original data
* source.
* <p/>
* This class is fully thread safe, yet allows for concurrent load / store
* of individual cache entries.
*
* @author Karsten Blees
* @author Mircea Markus
* @since 6.0
*/
@ConfiguredBy(SingleFileStoreConfiguration.class)
public class SingleFileStore<K, V> implements NonBlockingStore<K, V> {
private static final Log log = LogFactory.getLog(SingleFileStore.class);
public static final byte[] MAGIC_BEFORE_11 = new byte[]{'F', 'C', 'S', '1'}; //<11
public static final byte[] MAGIC_11_0 = new byte[]{'F', 'C', 'S', '2'};
public static final byte[] MAGIC_12_0 = new byte[]{'F', 'C', 'S', '3'};
public static final byte[] MAGIC_12_1 = new byte[]{'F', 'C', 'S', '4'};
public static final byte[] MAGIC_LATEST = MAGIC_12_1;
private static final byte[] ZERO_INT = {0, 0, 0, 0};
private static final int KEYLEN_POS = 4;
/*
* 4 bytes - entry size
* 4 bytes - key length
* 4 bytes - value length
* 4 bytes - metadata length
* 8 bytes - expiration time
*/
public static final int KEY_POS_BEFORE_11 = 4 + 4 + 4 + 4 + 8;
/*
* 4 bytes - entry size
* 4 bytes - key length
* 4 bytes - value length
* 4 bytes - metadata length
* 4 bytes - internal metadata length
* 8 bytes - expiration time
*/
public static final int KEY_POS_11_0 = 4 + 4 + 4 + 4 + 4 + 8;
public static final int KEY_POS_LATEST = KEY_POS_11_0;
// bytes required by created and lastUsed timestamps
private static final int TIMESTAMP_BYTES = 8 + 8;
private static final int SMALLEST_ENTRY_SIZE = 128;
private SingleFileStoreConfiguration configuration;
protected InitializationContext ctx;
private FileChannel channel;
@GuardedBy("resizeLock")
private Map<K, FileEntry>[] entries;
private SortedSet<FileEntry> freeList;
private long filePos;
private File file;
private float fragmentationFactor = .75f;
// Prevent clear() from truncating the file after a write() allocated the entry but before it wrote the data
private final StampedLock resizeLock = new StampedLock();
private TimeService timeService;
private MarshallableEntryFactory<K, V> entryFactory;
private KeyPartitioner keyPartitioner;
private BlockingManager blockingManager;
private boolean segmented;
private int actualNumSegments;
private int maxEntries;
public static File getStoreFile(String directoryPath, String cacheName) {
return new File(new File(directoryPath), cacheName + ".dat");
}
@Override
public CompletionStage<Void> start(InitializationContext ctx) {
this.ctx = ctx;
this.configuration = ctx.getConfiguration();
this.timeService = ctx.getTimeService();
this.entryFactory = ctx.getMarshallableEntryFactory();
this.blockingManager = ctx.getBlockingManager();
keyPartitioner = ctx.getKeyPartitioner();
maxEntries = configuration.maxEntries();
segmented = configuration.segmented();
if (segmented) {
actualNumSegments = ctx.getCache().getCacheConfiguration().clustering().hash().numSegments();
} else {
actualNumSegments = 1;
}
entries = new Map[actualNumSegments];
freeList = Collections.synchronizedSortedSet(new TreeSet<>());
// Not really blocking because no other thread can access the lock during start
blockingAddSegments(IntSets.immutableRangeSet(actualNumSegments));
return blockingManager.runBlocking(this::blockingStart, "sfs-start");
}
private void blockingStart() {
boolean readOnly = configuration.ignoreModifications();
assert !(configuration.purgeOnStartup() && readOnly) : "Store can't be configured with both purge and ignore modifications";
try {
Path resolvedPath = PersistenceUtil.getLocation(ctx.getGlobalConfiguration(), configuration.location());
file = getStoreFile(resolvedPath.toString(), cacheName());
boolean hasSingleFile = file.exists();
if (hasSingleFile) {
channel = new RandomAccessFile(file, "rw").getChannel();
byte[] magicHeader = validateExistingFile(channel, file.getAbsolutePath());
if (magicHeader != null) {
migrateNonSegmented(magicHeader);
} else {
rebuildIndex();
processFreeEntries();
}
} else if (hasAnyComposedSegmentedFiles()) {
migrateFromComposedSegmentedLoadWriteStore();
} else {
// No existing files
if (!readOnly) {
File dir = file.getParentFile();
if (!(dir.mkdirs() || dir.exists())) {
throw PERSISTENCE.directoryCannotBeCreated(dir.getAbsolutePath());
}
channel = createNewFile(file);
}
}
// Initialize the fragmentation factor
fragmentationFactor = configuration.fragmentationFactor();
} catch (PersistenceException e) {
throw e;
} catch (Throwable t) {
throw new PersistenceException(t);
}
}
private boolean hasAnyComposedSegmentedFiles() {
int numSegments = ctx.getCache().getCacheConfiguration().clustering().hash().numSegments();
for (int segment = 0; segment < numSegments; segment++) {
File segmentFile = getComposedSegmentFile(segment);
if (segmentFile.exists())
return true;
}
return false;
}
/**
* @return The magic bytes from the header or {@code null} if migration is not needed.
*/
private byte[] validateExistingFile(FileChannel fileChannel, String filePath) throws Exception {
// check file format and read persistent state if enabled for the cache
byte[] headerMagic = new byte[MAGIC_LATEST.length];
int headerBytes = fileChannel.read(ByteBuffer.wrap(headerMagic), 0);
if (headerBytes == MAGIC_LATEST.length) {
if (!Arrays.equals(MAGIC_LATEST, headerMagic))
return headerMagic;
} else {
// Store file has less than MAGIC_LEN bytes
throw PERSISTENCE.invalidSingleFileStoreData(filePath);
}
return null;
}
private void migrateNonSegmented(byte[] magicHeader) throws Exception {
PERSISTENCE.startMigratingPersistenceData(cacheName());
File newFile = new File(file.getParentFile(), cacheName() + "_new.dat");
try {
if (newFile.exists()) {
if (log.isTraceEnabled()) log.tracef("Overwriting temporary migration file %s", newFile);
// Delete file to overwrite permissions as well
newFile.delete();
}
try (FileChannel newChannel = createNewFile(newFile)) {
copyEntriesFromOldFile(magicHeader, newChannel, channel, file.toString());
}
//close old file
channel.close();
//replace old file with the new file
Files.move(newFile.toPath(), file.toPath(), StandardCopyOption.REPLACE_EXISTING);
//reopen the file
channel = new RandomAccessFile(file, "rw").getChannel();
PERSISTENCE.persistedDataSuccessfulMigrated(cacheName());
} catch (IOException e) {
throw PERSISTENCE.persistedDataMigrationFailed(cacheName(), e);
}
}
private void copyEntriesFromOldFile(byte[] magicHeader, FileChannel destChannel, FileChannel sourceChannel,
String sourcePath) throws Exception {
if (magicHeader == null) {
// The segment file has the 12.1 magic header
copyEntriesFromV12_0(destChannel, sourceChannel, sourcePath);
} else if (Arrays.equals(MAGIC_12_0, magicHeader)) {
if (ctx.getGlobalConfiguration().serialization().marshaller() == null) {
// ISPN-13128 Upgrading to 12.0 with the default marshaller might have corrupted the file data
copyCorruptDataV12_0(destChannel, sourceChannel, sourcePath);
} else {
// Data is not corrupt
copyEntriesFromV12_0(destChannel, sourceChannel, sourcePath);
}
} else if (Arrays.equals(MAGIC_11_0, magicHeader)) {
copyEntriesFromV11(destChannel, sourceChannel);
} else if (Arrays.equals(MAGIC_BEFORE_11, magicHeader)) {
throw PERSISTENCE.persistedDataMigrationUnsupportedVersion("< 11");
} else {
throw PERSISTENCE.invalidSingleFileStoreData(file.getAbsolutePath());
}
}
private FileChannel createNewFile(File newFile) throws IOException {
FileChannel newChannel = new RandomAccessFile(newFile, "rw").getChannel();
try {
// Write Magic
newChannel.truncate(0);
newChannel.write(ByteBuffer.wrap(MAGIC_LATEST), 0);
filePos = MAGIC_LATEST.length;
return newChannel;
} catch (Throwable t) {
newChannel.close();
throw t;
}
}
@Override
public CompletionStage<Void> stop() {
return ctx.getBlockingManager().runBlocking(this::blockingStop, "sfs-stop");
}
private void blockingStop() {
if (log.isTraceEnabled() && channel != null) {
// Must compute the size before acquiring resizeLock
Long size = CompletionStages.join(approximateSize(IntSets.immutableRangeSet(actualNumSegments)));
log.tracef("Stopping store %s, size = %d, file size = %d", cacheName(), size, filePos);
}
long stamp = resizeLock.writeLock();
try {
if (channel != null) {
// reset state
channel.close();
channel = null;
entries = null;
freeList = null;
}
} catch (Exception e) {
throw new PersistenceException(e);
} finally {
resizeLock.unlockWrite(stamp);
}
}
@Override
public Set<Characteristic> characteristics() {
return EnumSet.of(Characteristic.BULK_READ, Characteristic.EXPIRATION, Characteristic.SEGMENTABLE);
}
@Override
public CompletionStage<Boolean> isAvailable() {
return CompletableFutures.booleanStage(file.exists());
}
/**
* Rebuilds the in-memory index from file.
*/
private void rebuildIndex() throws Exception {
filePos = MAGIC_LATEST.length;
ByteBuffer buf = ByteBuffer.allocate(KEY_POS_LATEST);
for (; ; ) {
// read FileEntry fields from file (size, keyLen etc.)
buf = readChannel(buf, filePos, KEY_POS_LATEST, channel);
// return if end of file is reached
if (buf.remaining() > 0)
break;
buf.flip();
// initialize FileEntry from buffer
FileEntry fe = new FileEntry(filePos, buf);
// sanity check
if (fe.size < KEY_POS_LATEST + fe.keyLen + fe.dataLen + fe.metadataLen + fe.internalMetadataLen) {
throw PERSISTENCE.errorReadingFileStore(file.getPath(), filePos);
}
// update file pointer
filePos += fe.size;
// check if the entry is used or free
if (fe.keyLen > 0) {
// load the key from file
buf = readChannel(buf, fe.offset + KEY_POS_LATEST, fe.keyLen, channel);
// deserialize key and add to entries map
// Marshaller should allow for provided type return for safety
K key = (K) ctx.getPersistenceMarshaller().objectFromByteBuffer(buf.array(), 0, fe.keyLen);
// We start by owning all the segments
Map<K, FileEntry> segmentEntries = getSegmentEntries(getSegment(key));
segmentEntries.put(key, fe);
} else {
// add to free list
freeList.add(fe);
}
}
}
private int getSegment(Object key) {
return segmented ? keyPartitioner.getSegment(key) : 0;
}
// Initialise missing internal metadata state for corrupt data
private PrivateMetadata generateMissingInternalMetadata() {
// Optimistic Transactions
AdvancedCache<?, ?> cache = ctx.getCache().getAdvancedCache();
Configuration config = cache.getCacheConfiguration();
TransactionConfiguration txConfig = config.transaction();
PrivateMetadata.Builder builder = new PrivateMetadata.Builder();
if (txConfig.transactionMode() == TransactionMode.TRANSACTIONAL && txConfig.lockingMode() == LockingMode.OPTIMISTIC) {
builder.entryVersion(new SimpleClusteredVersion(1, 1));
}
// Async XSite
if (config.sites().hasAsyncEnabledBackups()) {
String siteName = cache.getRpcManager().getTransport().localSiteName();
IracEntryVersion version = IracEntryVersion.newVersion(XSiteNamedCache.cachedByteString(siteName), TopologyIracVersion.newVersion(1));
builder.iracMetadata(new IracMetadata(siteName, version));
}
return builder.build();
}
private void migrateFromComposedSegmentedLoadWriteStore() throws IOException {
PERSISTENCE.startMigratingPersistenceData(cacheName());
File newFile = new File(file.getParentFile(), cacheName() + "_new.dat");
try {
if (newFile.exists()) {
if (log.isTraceEnabled()) log.tracef("Overwriting temporary migration file %s", newFile);
// Delete file to overwrite permissions as well
newFile.delete();
}
try (FileChannel newChannel = createNewFile(newFile)) {
copyEntriesFromOldSegmentFiles(newChannel);
}
// Move the new file to the final name
Files.move(newFile.toPath(), file.toPath(), StandardCopyOption.REPLACE_EXISTING);
// Reopen the file
channel = new RandomAccessFile(file, "rw").getChannel();
// Remove the composed segment files
removeComposedSegmentedLoadWriteStoreFiles();
PERSISTENCE.persistedDataSuccessfulMigrated(cacheName());
} catch (PersistenceException e) {
throw e;
} catch (Exception e) {
throw PERSISTENCE.persistedDataMigrationFailed(cacheName(), e);
}
}
private void copyEntriesFromOldSegmentFiles(FileChannel newChannel) throws Exception {
int numSegments = ctx.getCache().getCacheConfiguration().clustering().hash().numSegments();
for (int segment = 0; segment < numSegments; segment++) {
File segmentFile = getComposedSegmentFile(segment);
if (segmentFile.exists()) {
try (FileChannel segmentChannel = new RandomAccessFile(segmentFile, "rw").getChannel()) {
byte[] magic = validateExistingFile(segmentChannel, segmentFile.toString());
copyEntriesFromOldFile(magic, newChannel, segmentChannel, segmentFile.toString());
}
}
}
}
private void removeComposedSegmentedLoadWriteStoreFiles() {
Path rootLocation = PersistenceUtil.getLocation(ctx.getGlobalConfiguration(), configuration.location());
if (log.isTraceEnabled()) log.tracef("Removing old ComposedSegmentedLoadWriteStore files from %s", rootLocation);
int numSegments = ctx.getCache().getCacheConfiguration().clustering().hash().numSegments();
for (int segment = 0; segment < numSegments; segment++) {
File segmentFile = getComposedSegmentFile(segment);
if (segmentFile.exists()) {
segmentFile.delete();
File parentDir = segmentFile.getParentFile();
if (parentDir != null && parentDir.isDirectory() && parentDir.list().length == 0) {
// The segment directory is empty, remove it
parentDir.delete();
}
}
}
}
private File getComposedSegmentFile(int segment) {
return segmentFileLocation(ctx.getGlobalConfiguration(), configuration.location(), cacheName(), segment);
}
private static File segmentFileLocation(GlobalConfiguration globalConfiguration, String location,
String cacheName, int segment) {
Path rootPath = PersistenceUtil.getLocation(globalConfiguration, location);
String segmentPath = AbstractSegmentedStoreConfiguration.fileLocationTransform(rootPath.toString(), segment);
return getStoreFile(segmentPath, cacheName);
}
private void copyEntriesFromV12_0(FileChannel destChannel, FileChannel sourceChannel, String sourcePath) throws Exception {
try {
long currentTs = timeService.wallClockTime();
ByteBuffer buf = ByteBuffer.allocate(KEY_POS_LATEST);
ByteBuffer bodyBuf = ByteBuffer.allocate(KEY_POS_LATEST);
long oldFilePos = MAGIC_12_0.length;
while (true) {
// read FileEntry fields from file (size, keyLen etc.)
buf = readChannel(buf, oldFilePos, KEY_POS_11_0, sourceChannel);
if (buf.remaining() > 0)
break;
buf.flip();
// initialize FileEntry from buffer
FileEntry oldFe = new FileEntry(oldFilePos, buf);
// sanity check
if (oldFe.size < KEY_POS_11_0 + oldFe.keyLen + oldFe.dataLen + oldFe.metadataLen + oldFe.internalMetadataLen) {
throw PERSISTENCE.errorReadingFileStore(file.getPath(), oldFilePos);
}
//update old file pos to the next entry
oldFilePos += oldFe.size;
// check if the entry is used or free
// if it is free, it is ignored.
if (oldFe.keyLen < 1)
continue;
// The entry has already expired, so avoid writing to the new file
if (oldFe.expiryTime > 0 && oldFe.expiryTime < currentTs)
continue;
// Read the body of the entry, skipping the fixed header
bodyBuf = allocate(bodyBuf, oldFe.size - KEY_POS_LATEST);
readChannel(bodyBuf, oldFe.offset + KEY_POS_LATEST, oldFe.size - KEY_POS_LATEST, sourceChannel);
K key = (K) ctx.getPersistenceMarshaller().objectFromByteBuffer(bodyBuf.array(), 0, oldFe.keyLen);
// Update the entry with the destination filePos
FileEntry newFe = new FileEntry(this.filePos, oldFe.size, oldFe.keyLen, oldFe.dataLen, oldFe.metadataLen, oldFe.internalMetadataLen, oldFe.expiryTime);
// Put the updated entry in the entries map so we don't need to rebuild the index later
Map<K, FileEntry> segmentEntries = getSegmentEntries(getSegment(key));
segmentEntries.put(key, newFe);
buf.flip();
destChannel.write(buf, this.filePos);
bodyBuf.flip();
destChannel.write(bodyBuf, this.filePos + KEY_POS_LATEST);
this.filePos += newFe.size;
if (log.isTraceEnabled())
log.tracef("Recovered entry %s at %d:%d", key, newFe.size, newFe.offset, newFe.size);
}
} catch (IOException e) {
throw PERSISTENCE.persistedDataMigrationFailed(cacheName(), e);
}
}
private String cacheName() {
return ctx.getCache().getName();
}
private void copyCorruptDataV12_0(FileChannel destChannel, FileChannel sourceChannel, String sourcePath) {
PERSISTENCE.startRecoveringCorruptPersistenceData(sourcePath);
// Day before the release of Infinispan 10.0.0.Final
// This was the first release that SFS migrations on startup could be migrated from via ISPN 10 -> 11 -> 12
long sanityEpoch = LocalDate.of(2019, 10, 26)
.atStartOfDay()
.atZone(ZoneId.systemDefault())
.toInstant()
.toEpochMilli();
long currentTs = timeService.wallClockTime();
int entriesRecovered = 0;
ByteBuffer buf = ByteBuffer.allocate(KEY_POS_LATEST);
ByRef<ByteBuffer> bufRef = ByRef.create(buf);
try {
long fileSize = sourceChannel.size();
long oldFilePos = MAGIC_12_0.length;
while (true) {
buf = readChannel(buf, oldFilePos, KEY_POS_LATEST, sourceChannel);
// EOF reached
if (buf.remaining() > 0)
break;
buf.flip();
FileEntry fe = new FileEntry(oldFilePos, buf);
// Semantic check to find valid FileEntry
// fe.keyLen = 0 is valid, however it means we should skip the entry
if (fe.size <= 0 || fe.expiryTime < -1 ||
fe.keyLen <= 0 || fe.keyLen > fe.size ||
fe.dataLen <= 0 || fe.dataLen > fe.size ||
fe.metadataLen < 0 || fe.metadataLen > fe.size ||
fe.internalMetadataLen < 0 || fe.internalMetadataLen > fe.size) {
// Check failed, try to read FileEntry from the next byte
oldFilePos++;
continue;
}
// Extra check to prevent buffers being created that exceed the remaining number of bytes in the file
long estimateSizeExcludingInternal = fe.keyLen;
estimateSizeExcludingInternal += fe.dataLen;
estimateSizeExcludingInternal += fe.metadataLen;
if (estimateSizeExcludingInternal > fileSize - oldFilePos) {
oldFilePos++;
continue;
}
K key;
V value;
Metadata metadata = null;
ByRef.Long offset = new ByRef.Long(oldFilePos + KEY_POS_LATEST);
bufRef.set(buf);
try {
// Read old entry content and then write
key = unmarshallObject(bufRef, offset, fe.keyLen, sourceChannel);
value = unmarshallObject(bufRef, offset, fe.dataLen, sourceChannel);
int metaLen = fe.metadataLen > 0 ? fe.metadataLen - TIMESTAMP_BYTES : 0;
if (metaLen > 0)
metadata = unmarshallObject(bufRef, offset, metaLen, sourceChannel);
// Entries successfully unmarshalled so it's safe to increment oldFilePos to FileEntry+offset so brute-force can resume on next iteration
oldFilePos = offset.get();
} catch(Throwable t) {
// Must have been a false positive FileEntry. Increment oldFilePos by 1 bytes and retry
oldFilePos++;
continue;
} finally {
buf = bufRef.get();
}
long created = -1;
long lastUsed = -1;
if (fe.metadataLen > 0 && fe.expiryTime > 0) {
buf = readChannelUpdateOffset(buf, offset, TIMESTAMP_BYTES, sourceChannel);
buf.flip();
// Try to read timestamps. If corrupt data then this could be nonsense
created = buf.getLong();
lastUsed = buf.getLong();
// If the Timestamps are in the future or < sanityEpoch, then we're migrating corrupt data so set the value to current wallClockTime
if (created != -1 && (created > currentTs || created < sanityEpoch)) {
long lifespan = metadata.lifespan();
created = lifespan > 0 ? fe.expiryTime - lifespan : currentTs;
}
if (lastUsed != -1 && (lastUsed > currentTs || lastUsed < sanityEpoch)) {
long maxIdle = metadata.maxIdle();
lastUsed = maxIdle > 0 ? fe.expiryTime - maxIdle : currentTs;
}
oldFilePos = offset.get();
}
PrivateMetadata internalMeta = null;
if (fe.internalMetadataLen > 0) {
try {
bufRef.set(buf);
internalMeta = unmarshallObject(bufRef, offset, fe.internalMetadataLen, sourceChannel);
oldFilePos = offset.get();
} catch (Throwable t) {
// Will fail if data is corrupt as PrivateMetadata doesn't exist
internalMeta = generateMissingInternalMetadata();
} finally {
buf = bufRef.get();
}
}
// Last expiration check before writing as expiryTime is considered good now
// This check is required as write below doesn't verify expiration or not and
// just creates a new expiryTime.
if (fe.expiryTime > 0 && fe.expiryTime < currentTs) {
continue;
}
MarshallableEntry<? extends K, ? extends V> me = (MarshallableEntry<? extends K, ? extends V>) ctx.getMarshallableEntryFactory().create(key, value, metadata, internalMeta, created, lastUsed);
write(getSegment(key), me, destChannel);
entriesRecovered++;
}
if (log.isTraceEnabled()) log.tracef("Recovered %d entries", entriesRecovered);
} catch (IOException e) {
throw PERSISTENCE.corruptDataMigrationFailed(cacheName(), e);
}
}
private void copyEntriesFromV11(FileChannel destChannel, FileChannel sourceChannel) {
long oldFilePos = MAGIC_11_0.length;
// Only update the key/value/meta bytes if the default marshaller is configured
boolean wrapperMissing = ctx.getGlobalConfiguration().serialization().marshaller() == null;
try {
long currentTs = timeService.wallClockTime();
ByteBuffer buf = ByteBuffer.allocate(KEY_POS_LATEST);
ByRef<ByteBuffer> bufRef = ByRef.create(buf);
for (; ; ) {
// read FileEntry fields from file (size, keyLen etc.)
buf = readChannel(buf, oldFilePos, KEY_POS_11_0, sourceChannel);
if (buf.remaining() > 0)
break;
buf.flip();
// initialize FileEntry from buffer
FileEntry oldFe = new FileEntry(oldFilePos, buf);
// sanity check
if (oldFe.size < KEY_POS_11_0 + oldFe.keyLen + oldFe.dataLen + oldFe.metadataLen + oldFe.internalMetadataLen) {
throw PERSISTENCE.errorReadingFileStore(file.getPath(), oldFilePos);
}
//update old file pos to the next entry
oldFilePos += oldFe.size;
// check if the entry is used or free
// if it is free, it is ignored.
if (oldFe.keyLen < 1)
continue;
// The entry has already expired, so avoid writing to the new file
if (oldFe.expiryTime > 0 && oldFe.expiryTime < currentTs)
continue;
ByRef.Long offset = new ByRef.Long(oldFe.offset + KEY_POS_11_0);
long created = -1;
long lastUsed = -1;
bufRef.set(buf);
K key = unmarshallObject(bufRef, offset, oldFe.keyLen, wrapperMissing, sourceChannel);
V value = unmarshallObject(bufRef, offset, oldFe.dataLen, wrapperMissing, sourceChannel);
Metadata metadata = null;
if (oldFe.metadataLen > 0) {
metadata = unmarshallObject(bufRef, offset, oldFe.metadataLen - TIMESTAMP_BYTES, wrapperMissing, sourceChannel);
if (oldFe.expiryTime > 0) {
buf = bufRef.get();
buf = readChannelUpdateOffset(buf, offset, TIMESTAMP_BYTES, sourceChannel);
buf.flip();
created = buf.getLong();
lastUsed = buf.getLong();
bufRef.set(buf);
}
}
PrivateMetadata internalMeta = null;
if (oldFe.internalMetadataLen > 0) {
internalMeta = unmarshallObject(bufRef, offset, oldFe.internalMetadataLen, wrapperMissing, sourceChannel);
buf = bufRef.get();
}
MarshallableEntry<? extends K, ? extends V> me = (MarshallableEntry<? extends K, ? extends V>) ctx.getMarshallableEntryFactory()
.create(key, value, metadata, internalMeta, created, lastUsed);
write(getSegment(key), me, destChannel);
}
} catch (IOException | ClassNotFoundException e) {
throw PERSISTENCE.persistedDataMigrationFailed(cacheName(), e);
}
}
private <T> T unmarshallObject(ByRef<ByteBuffer> buf, ByRef.Long offset, int length, FileChannel sourceChannel) throws ClassNotFoundException, IOException {
return unmarshallObject(buf, offset, length, false, sourceChannel);
}
@SuppressWarnings("unchecked")
private <T> T unmarshallObject(ByRef<ByteBuffer> bufRef, ByRef.Long offset, int length, boolean legacyWrapperMissing,
FileChannel sourceChannel) throws ClassNotFoundException, IOException {
ByteBuffer buf = bufRef.get();
buf = readChannelUpdateOffset(buf, offset, length, sourceChannel);
byte[] bytes = buf.array();
bufRef.set(buf);
PersistenceMarshaller persistenceMarshaller = ctx.getPersistenceMarshaller();
if (legacyWrapperMissing) {
// Read using raw user marshaller without MarshallUserObject wrapping
Marshaller marshaller = persistenceMarshaller.getUserMarshaller();
try {
return (T) marshaller.objectFromByteBuffer(bytes, 0, length);
} catch (IllegalArgumentException e) {
// For internal cache key/values and custom metadata we need to use the persistence marshaller
return (T) persistenceMarshaller.objectFromByteBuffer(bytes, 0, length);
}
}
return(T) persistenceMarshaller.objectFromByteBuffer(bytes, 0, length);
}
private ByteBuffer readChannelUpdateOffset(ByteBuffer buf, ByRef.Long offset, int length, FileChannel sourceChannel) throws IOException {
return readChannel(buf, offset.getAndAdd(length), length, sourceChannel);
}
private ByteBuffer readChannel(ByteBuffer buf, long offset, int length, FileChannel channel) throws IOException {
buf = allocate(buf, length);
channel.read(buf, offset);
return buf;
}
private ByteBuffer allocate(ByteBuffer buf, int length) {
buf.flip();
if (buf.capacity() < length) {
buf = ByteBuffer.allocate(length);
}
buf.clear().limit(length);
return buf;
}
/**
* The base class implementation calls {@link #load(int, Object)} for this, we can do better because
* we keep all keys in memory.
*/
@Override
public CompletionStage<Boolean> containsKey(int segment, Object key) {
// Avoid switching threads if there is nothing to load
long stamp = resizeLock.tryReadLock();
if (stamp != 0) {
// Acquires the FileEntry lock and releases the read lock
FileEntry fe = getFileEntryWithReadLock(segment, key, stamp, false);
return CompletableFutures.booleanStage(fe != null);
}
// Someone is holding the write lock
return blockingManager.supplyBlocking(() -> blockingContainsKey(segment, key), "sfs-containsKey");
}
private boolean blockingContainsKey(int segment, Object key) {
long stamp = resizeLock.readLock();
FileEntry fe = getFileEntryWithReadLock(segment, key, stamp, false);
return fe != null;
}
/**
* Allocates the requested space in the file.
*
* @param len requested space
* @return allocated file position and length as FileEntry object
*/
@GuardedBy("resizeLock.readLock()")
private FileEntry allocate(int len) {
synchronized (freeList) {
// lookup a free entry of sufficient size
SortedSet<FileEntry> candidates = freeList.tailSet(new FileEntry(0, len));
for (Iterator<FileEntry> it = candidates.iterator(); it.hasNext(); ) {
FileEntry free = it.next();
// ignore entries that are still in use by concurrent readers
if (free.isLocked())
continue;
// There's no race condition risk between locking the entry on
// loading and checking whether it's locked (or store allocation),
// because for the entry to be lockable, it needs to be in the
// entries collection, in which case it's not in the free list.
// The only way an entry can be found in the free list is if it's
// been removed, and to remove it, lock on "entries" needs to be
// acquired, which is also a pre-requisite for loading data.
// found one, remove from freeList
it.remove();
return allocateExistingEntry(free, len);
}
// no appropriate free section available, append at end of file
FileEntry fe = new FileEntry(filePos, len);
filePos += len;
if (log.isTraceEnabled()) log.tracef("New entry allocated at %d:%d, %d free entries, file size is %d", fe.offset, fe.size, freeList.size(), filePos);
return fe;
}
}
private FileEntry allocateExistingEntry(FileEntry free, int len) {
int remainder = free.size - len;
// If the entry is quite bigger than configured threshold, then split it
if ((remainder >= SMALLEST_ENTRY_SIZE) && (len <= (free.size * fragmentationFactor))) {
try {
// Add remainder of the space as a fileEntry
FileEntry newFreeEntry = new FileEntry(free.offset + len, remainder);
addNewFreeEntry(newFreeEntry);
FileEntry newEntry = new FileEntry(free.offset, len);
if (log.isTraceEnabled()) log.tracef("Split entry at %d:%d, allocated %d:%d, free %d:%d, %d free entries",
free.offset, free.size, newEntry.offset, newEntry.size, newFreeEntry.offset, newFreeEntry.size,
freeList.size());
return newEntry;
} catch (IOException e) {
throw new PersistenceException("Cannot add new free entry", e);
}
}
if (log.isTraceEnabled()) log.tracef("Existing free entry allocated at %d:%d, %d free entries", free.offset, free.size, freeList.size());
return free;
}
/**
* Writes a new free entry to the file and also adds it to the free list
*/
private void addNewFreeEntry(FileEntry fe) throws IOException {
ByteBuffer buf = ByteBuffer.allocate(KEY_POS_LATEST);
buf.putInt(fe.size);
buf.putInt(0);
buf.putInt(0);
buf.putInt(0);
buf.putInt(0);
buf.putLong(-1);
buf.flip();
channel.write(buf, fe.offset);
freeList.add(fe);
}
/**
* Frees the space of the specified file entry (for reuse by allocate).
* <p/>
* Note: Caller must hold the {@code resizeLock} in shared mode.
*/
private void free(FileEntry fe) throws IOException {
if (fe != null) {
// Wait for any reader to finish
fe.waitUnlocked();
// Invalidate entry on disk (by setting keyLen field to 0)
// No need to wait for readers to unlock here, the FileEntry instance is not modified,
// and allocate() won't return an entry as long as it has a reader.
channel.write(ByteBuffer.wrap(ZERO_INT), fe.offset + KEYLEN_POS);
if (!freeList.add(fe)) {
throw new IllegalStateException(String.format("Trying to free an entry that was not allocated: %s", fe));
}
if (log.isTraceEnabled()) log.tracef("Deleted entry at %d:%d, there are now %d free entries", fe.offset, fe.size, freeList.size());
}
}
@Override
public CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> marshalledEntry) {
return blockingManager.runBlocking(() -> blockingWrite(segment, marshalledEntry), "sfs-write");
}
private void blockingWrite(int segment, MarshallableEntry<? extends K, ? extends V> marshalledEntry) {
write(segment, marshalledEntry, channel);
}
private void write(int segment, MarshallableEntry<? extends K, ? extends V> marshalledEntry, FileChannel channel) {
// serialize cache value
org.infinispan.commons.io.ByteBuffer key = marshalledEntry.getKeyBytes();
org.infinispan.commons.io.ByteBuffer data = marshalledEntry.getValueBytes();
org.infinispan.commons.io.ByteBuffer metadata = marshalledEntry.getMetadataBytes();
org.infinispan.commons.io.ByteBuffer internalMetadata = marshalledEntry.getInternalMetadataBytes();
// allocate file entry and store in cache file
int metadataLength = metadata == null ? 0 : metadata.getLength() + TIMESTAMP_BYTES;
int internalMetadataLength = internalMetadata == null ? 0 : internalMetadata.getLength();
int len = KEY_POS_LATEST + key.getLength() + data.getLength() + metadataLength + internalMetadataLength;
long stamp = resizeLock.readLock();
try {
Map<K, FileEntry> segmentEntries = getSegmentEntries(segment);
if (segmentEntries == null) {
// We don't own the segment
return;
}
FileEntry newEntry = allocate(len);
newEntry = new FileEntry(newEntry.offset, newEntry.size, key.getLength(), data.getLength(), metadataLength, internalMetadataLength, marshalledEntry.expiryTime());
ByteBuffer buf = ByteBuffer.allocate(len);
newEntry.writeToBuf(buf);
buf.put(key.getBuf(), key.getOffset(), key.getLength());
buf.put(data.getBuf(), data.getOffset(), data.getLength());
if (metadata != null) {
buf.put(metadata.getBuf(), metadata.getOffset(), metadata.getLength());
// Only write created & lastUsed if expiryTime is set
if (newEntry.expiryTime > 0) {
buf.putLong(marshalledEntry.created());
buf.putLong(marshalledEntry.lastUsed());
}
}
if (internalMetadata != null) {
buf.put(internalMetadata.getBuf(), internalMetadata.getOffset(), internalMetadata.getLength());
}
buf.flip();
channel.write(buf, newEntry.offset);
if (log.isTraceEnabled())
log.tracef("Wrote entry %s:%d at %d:%d", marshalledEntry.getKey(), len, newEntry.offset, newEntry.size);
// add the new entry to in-memory index
FileEntry oldEntry = segmentEntries.put(marshalledEntry.getKey(), newEntry);
// if we added an entry, check if we need to evict something
if (oldEntry == null)
oldEntry = evict();
// in case we replaced or evicted an entry, add to freeList
free(oldEntry);
} catch (Exception e) {
throw new PersistenceException(e);
} finally {
resizeLock.unlockRead(stamp);
}
}
/**
* Try to evict an entry if the capacity of the cache store is reached.
*
* @return FileEntry to evict, or null (if unbounded or capacity is not yet reached)
*/
@GuardedBy("resizeLock#readLock")
private FileEntry evict() {
if (maxEntries > 0) {
// When eviction is enabled, segmentation is disabled
Map<K, FileEntry> segment0Entries = getSegmentEntries(0);
synchronized (segment0Entries) {
if (segment0Entries.size() > maxEntries) {
Iterator<FileEntry> it = segment0Entries.values().iterator();
FileEntry fe = it.next();
it.remove();
return fe;
}
}
}
return null;
}
@Override
public CompletionStage<Void> clear() {
return blockingManager.runBlocking(this::blockingClear, "sfs-clear");
}
private void blockingClear() {
long stamp = resizeLock.writeLock();
try {
// Wait until all readers are done reading all file entries
// First, used entries
for (Map<K, FileEntry> segmentEntries : entries) {
if (segmentEntries == null)
continue;
synchronized (segmentEntries) {
for (FileEntry fe : segmentEntries.values())
fe.waitUnlocked();
segmentEntries.clear();
}
}
// Then free entries that others might still be reading
synchronized (freeList) {
for (FileEntry fe : freeList)
fe.waitUnlocked();
// clear in-memory state
freeList.clear();
}
// All readers are done, reset file
if (log.isTraceEnabled()) log.tracef("Truncating file, current size is %d", filePos);
channel.truncate(4);
channel.write(ByteBuffer.wrap(MAGIC_LATEST), 0);
filePos = MAGIC_LATEST.length;
} catch (Exception e) {
throw new PersistenceException(e);
} finally {
resizeLock.unlockWrite(stamp);
}
}
@Override
public CompletionStage<Boolean> delete(int segment, Object key) {
long stamp = resizeLock.tryReadLock();
if (stamp != 0) {
FileEntry fe = deleteWithReadLock(segment, key);
if (fe == null) {
resizeLock.unlockRead(stamp);
return CompletableFutures.completedFalse();
}
return blockingManager.supplyBlocking(() -> deleteInFile(stamp, fe), "sfs-delete");
}
return blockingManager.supplyBlocking(() -> blockingDelete(segment, key), "sfs-delete");
}
private boolean blockingDelete(int segment, Object key) {
long stamp = resizeLock.readLock();
FileEntry fe = deleteWithReadLock(segment, key);
return deleteInFile(stamp, fe);
}
/**
* Mark the entry as deleted on disk and release the read lock.
*/
private boolean deleteInFile(long stamp, FileEntry fe) {
try {
free(fe);
return fe != null;
} catch (Exception e) {
throw new PersistenceException(e);
} finally {
resizeLock.unlockRead(stamp);
}
}
private FileEntry deleteWithReadLock(int segment, Object key) {
Map<K, FileEntry> segmentEntries = getSegmentEntries(segment);
if (segmentEntries == null) {
// We don't own the segment
return null;
}
return segmentEntries.remove(key);
}
@Override
public CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
// Avoid switching threads if there is nothing to load
long stamp = resizeLock.tryReadLock();
if (stamp != 0) {
// Acquires the FileEntry lock and releases the read lock
FileEntry fe = getFileEntryWithReadLock(segment, key, stamp, true);
if (fe == null) {
return CompletableFutures.completedNull();
}
// Perform the actual read holding only the FileEntry lock
return blockingManager.supplyBlocking(() -> readFromDisk(fe, key, true, true), "sfs-load");
}
// Someone is holding the write lock
return blockingManager.supplyBlocking(() -> blockingLoad(segment, key, true, true), "sfs-load");
}
private MarshallableEntry<K, V> blockingLoad(int segment, Object key, boolean loadValue, boolean loadMetadata) {
long stamp = resizeLock.readLock();
FileEntry fe = getFileEntryWithReadLock(segment, key, stamp, true);
if (fe == null)
return null;
// Perform the actual read holding only the FileEntry lock
return readFromDisk(fe, key, loadValue, loadMetadata);
}
/**
* Get the file entry from the segment map and release the read lock
*/
private FileEntry getFileEntryWithReadLock(int segment, Object key, long stamp, boolean lockFileEntry) {
final FileEntry fe;
try {
Map<K, FileEntry> segmentEntries = getSegmentEntries(segment);
if (segmentEntries == null)
return null;
synchronized (segmentEntries) {
// lookup FileEntry of the key
fe = segmentEntries.get(key);
if (fe == null)
return null;
// Entries are removed due to expiration from {@link SingleFileStore#purge}
if (fe.isExpired(timeService.wallClockTime())) {
return null;
} else if (lockFileEntry) {
// lock entry for reading before releasing entries monitor
fe.lock();
}
}
} finally {
resizeLock.unlockRead(stamp);
}
return fe;
}
private MarshallableEntry<K, V> readFromDisk(FileEntry fe, Object key, boolean loadValue, boolean loadMetadata) {
org.infinispan.commons.io.ByteBuffer valueBb = null;
// If we only require the key, then no need to read disk
if (!loadValue && !loadMetadata) {
try {
return entryFactory.create(key);
} finally {
fe.unlock();
}
}
final byte[] data;
try {
// load serialized data from disk
data = new byte[fe.keyLen + fe.dataLen + (loadMetadata ? fe.metadataLen + fe.internalMetadataLen : 0)];
// The entry lock will prevent clear() from truncating the file at this point
channel.read(ByteBuffer.wrap(data), fe.offset + KEY_POS_LATEST);
} catch (Exception e) {
throw new PersistenceException(e);
} finally {
// No need to keep the lock for deserialization.
// FileEntry is immutable, so its members can't be changed by another thread.
fe.unlock();
}
if (log.isTraceEnabled()) log.tracef("Read entry %s at %d:%d", key, fe.offset, fe.actualSize());
ByteBufferFactory factory = ctx.getByteBufferFactory();
org.infinispan.commons.io.ByteBuffer keyBb = factory.newByteBuffer(data, 0, fe.keyLen);
if (loadValue) {
valueBb = factory.newByteBuffer(data, fe.keyLen, fe.dataLen);
}
if (loadMetadata) {
long created = -1;
long lastUsed = -1;
org.infinispan.commons.io.ByteBuffer metadataBb = null;
org.infinispan.commons.io.ByteBuffer internalMetadataBb = null;
int offset = fe.keyLen + fe.dataLen;
if (fe.metadataLen > 0) {
int metaLength = fe.metadataLen - TIMESTAMP_BYTES;
metadataBb = factory.newByteBuffer(data, offset, metaLength);
offset += metaLength;
ByteBuffer buffer = ByteBuffer.wrap(data, offset, TIMESTAMP_BYTES);
if (fe.expiryTime > 0) {
offset += TIMESTAMP_BYTES;
created = buffer.getLong();
lastUsed = buffer.getLong();
}
}
if (fe.internalMetadataLen > 0) {
internalMetadataBb = factory.newByteBuffer(data, offset, fe.internalMetadataLen);
}
return entryFactory.create(keyBb, valueBb, metadataBb, internalMetadataBb, created, lastUsed);
}
return entryFactory.create(keyBb, valueBb);
}
/**
* @return The entries of a segment, or {@code null} if the segment is not owned
*/
@GuardedBy("resizeLock")
private Map<K, FileEntry> getSegmentEntries(int segment) {
if (!segmented) {
return entries[0];
}
// Segmented
if (actualNumSegments <= segment) {
throw new IndexOutOfBoundsException();
}
return entries[segment];
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
if (!segmented) {
return publishSegmentKeys(k -> keyMatches(segments, filter, k), 0);
}
return Flowable.fromIterable(segments)
.concatMap(segment -> publishSegmentKeys(filter, segment));
}
private Publisher<K> publishSegmentKeys(Predicate<? super K> filter, int segment) {
long stamp = resizeLock.tryReadLock();
if (stamp != 0) {
return publishSegmentKeysWithReadLock(filter, segment, stamp);
} else {
return blockingManager.blockingPublisher(defer(() -> {
long stamp1 = resizeLock.readLock();
return publishSegmentKeysWithReadLock(filter, segment, stamp1);
}));
}
}
private boolean keyMatches(IntSet segments, Predicate<? super K> filter, K k) {
return segments.contains(keyPartitioner.getSegment(k)) && (filter == null || filter.test(k));
}
private Flowable<K> publishSegmentKeysWithReadLock(Predicate<? super K> filter, int segment, long stamp) {
try {
Map<K, FileEntry> segmentEntries = getSegmentEntries(segment);
if (segmentEntries == null) {
return Flowable.empty();
}
List<K> keys;
long now = ctx.getTimeService().wallClockTime();
synchronized (segmentEntries) {
keys = new ArrayList<>(segmentEntries.size());
for (Map.Entry<K, FileEntry> e : segmentEntries.entrySet()) {
K key = e.getKey();
if (e.getValue().isExpired(now))
continue;
if (filter != null && !filter.test(key))
continue;
keys.add(key);
}
}
return Flowable.fromIterable(keys);
} finally {
resizeLock.unlockRead(stamp);
}
}
@Override
public Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter,
boolean includeValues) {
if (!segmented) {
return publishSegmentEntries(0, k -> keyMatches(segments, filter, k), includeValues);
}
return Flowable.fromIterable(segments)
.concatMap(segment -> publishSegmentEntries(segment, filter, includeValues));
}
private Publisher<MarshallableEntry<K, V>> publishSegmentEntries(int segment, Predicate<? super K> filter,
boolean includeValues) {
long stamp = resizeLock.tryReadLock();
if (stamp != 0 && getSegmentEntries(segment) == null) {
resizeLock.unlockRead(stamp);
return Flowable.empty();
}
return blockingManager.blockingPublisher(defer(() -> {
return blockingPublishSegmentEntries(segment, filter, includeValues, stamp);
}));
}
private Flowable<MarshallableEntry<K, V>> blockingPublishSegmentEntries(int segment, Predicate<? super K> filter,
boolean includeValues, long stamp) {
List<KeyValuePair<K, FileEntry>> keysToLoad;
long now = ctx.getTimeService().wallClockTime();
if (stamp == 0) {
// tryReadLock() did not acquire the lock
stamp = resizeLock.readLock();
}
try {
Map<K, FileEntry> segmentEntries = getSegmentEntries(segment);
if (segmentEntries == null) {
return Flowable.empty();
}
synchronized (segmentEntries) {
// This way the sorting of entries is lazily done on each invocation of the publisher
keysToLoad = new ArrayList<>(segmentEntries.size());
for (Map.Entry<K, FileEntry> e : segmentEntries.entrySet()) {
if (e.getValue().isExpired(now))
continue;
if (filter != null && !filter.test(e.getKey()))
continue;
keysToLoad.add(new KeyValuePair<>(e.getKey(), e.getValue()));
}
}
} finally {
resizeLock.unlockRead(stamp);
}
keysToLoad.sort(Comparator.comparingLong(o -> o.getValue().offset));
return Flowable.fromIterable(keysToLoad).map(kvp -> {
MarshallableEntry<K, V> entry = blockingLoad(segment, kvp.getKey(), includeValues, true);
if (entry == null) {
// Rxjava2 doesn't allow nulls
entry = entryFactory.getEmpty();
}
return entry;
}).filter(me -> me != entryFactory.getEmpty());
}
/**
* Manipulates the free entries for optimizing disk space.
*/
private void processFreeEntries() {
long stamp = resizeLock.readLock();
try {
synchronized (freeList) {
// Get a reverse sorted list of free entries based on file offset (bigger entries will be ahead of smaller entries)
// This helps to work backwards with free entries at end of the file
List<FileEntry> l = new ArrayList<>(freeList);
l.sort(Comparator.comparingLong(fe -> -fe.offset));
truncateFile(l);
mergeFreeEntries(l);
}
} finally {
resizeLock.unlockRead(stamp);
}
}
/**
* Removes free entries towards the end of the file and truncates the file.
*/
private void truncateFile(List<FileEntry> entries) {
long startTime = 0;
if (log.isTraceEnabled()) startTime = timeService.wallClockTime();
int reclaimedSpace = 0;
int removedEntries = 0;
long truncateOffset = -1;
for (ListIterator<FileEntry> it = entries.listIterator(); it.hasNext(); ) {
FileEntry fe = it.next();
// Till we have free entries at the end of the file,
// we can remove them and contract the file to release disk
// space.
if (!fe.isLocked() && ((fe.offset + fe.size) == filePos)) {
truncateOffset = fe.offset;
filePos = fe.offset;
freeList.remove(fe);
// Removing the entry would require moving all the elements, which is expensive
it.set(null);
reclaimedSpace += fe.size;
removedEntries++;
} else {
break;
}
}
if (truncateOffset > 0) {
try {
channel.truncate(truncateOffset);
} catch (IOException e) {
throw new PersistenceException("Error while truncating file", e);
}
}
if (log.isTraceEnabled()) {
log.tracef("Removed entries: %d, Reclaimed Space: %d, Free Entries %d", removedEntries, reclaimedSpace, freeList.size());
log.tracef("Time taken for truncateFile: %d (ms)", timeService.wallClockTime() - startTime);
}
}
/**
* Coalesces adjacent free entries to create larger free entries (so that the probability of finding a free entry during allocation increases)
*/
private void mergeFreeEntries(List<FileEntry> entries) {
long startTime = 0;
if (log.isTraceEnabled()) startTime = timeService.wallClockTime();
FileEntry lastEntry = null;
FileEntry newEntry = null;
int mergeCounter = 0;
for (FileEntry fe : entries) {
// truncateFile sets entries to null instead of removing them
if (fe == null || fe.isLocked())
continue;
// Merge any holes created (consecutive free entries) in the file
if ((lastEntry != null) && (lastEntry.offset == (fe.offset + fe.size))) {
if (newEntry == null) {
newEntry = new FileEntry(fe.offset, fe.size + lastEntry.size);
freeList.remove(lastEntry);
mergeCounter++;
} else {
newEntry = new FileEntry(fe.offset, fe.size + newEntry.size);
}
freeList.remove(fe);
mergeCounter++;
} else {
if (newEntry != null) {
mergeAndLogEntry(newEntry, mergeCounter);
newEntry = null;
mergeCounter = 0;
}
}
lastEntry = fe;
}
if (newEntry != null)
mergeAndLogEntry(newEntry, mergeCounter);
if (log.isTraceEnabled()) log.tracef("Total time taken for mergeFreeEntries: " + (timeService.wallClockTime() - startTime) + " (ms)");
}
private void mergeAndLogEntry(FileEntry entry, int mergeCounter) {
try {
addNewFreeEntry(entry);
if (log.isTraceEnabled()) log.tracef("Merged %d entries at %d:%d, %d free entries", mergeCounter, entry.offset, entry.size, freeList.size());
} catch (IOException e) {
throw new PersistenceException("Could not add new merged entry", e);
}
}
@Override
public Publisher<MarshallableEntry<K, V>> purgeExpired() {
UnicastProcessor<MarshallableEntry<K, V>> processor = UnicastProcessor.create();
blockingManager.runBlocking(() -> blockingPurgeExpired(processor), "sfs-purgeExpired");
return processor;
}
private void blockingPurgeExpired(UnicastProcessor<MarshallableEntry<K, V>> processor) {
try {
long now = timeService.wallClockTime();
for (int segment = 0; segment < actualNumSegments; segment++) {
List<KeyValuePair<Object, FileEntry>> entriesToPurge;
long stamp = resizeLock.readLock();
try {
Map<K, FileEntry> segmentEntries = getSegmentEntries(segment);
if (segmentEntries == null)
continue;
entriesToPurge = collectExpiredEntries(now, segmentEntries);
} finally {
resizeLock.unlockRead(stamp);
}
purgeExpiredEntries(now, processor, entriesToPurge);
}
// Disk space optimizations
processFreeEntries();
} catch (Throwable t) {
processor.onError(t);
} finally {
processor.onComplete();
}
}
private void purgeExpiredEntries(long now, UnicastProcessor<MarshallableEntry<K, V>> processor,
List<KeyValuePair<Object, FileEntry>> entriesToPurge) {
entriesToPurge.sort(Comparator.comparingLong(kvp -> kvp.getValue().offset));
for (ListIterator<KeyValuePair<Object, FileEntry>> it = entriesToPurge.listIterator(); it.hasNext(); ) {
KeyValuePair<Object, FileEntry> next = it.next();
FileEntry fe = next.getValue();
if (fe.isExpired(now)) {
it.set(null);
// Safe to unlock because the entry was locked collectExpiredEntries
MarshallableEntry<K, V> entry = readFromDisk(fe, next.getKey(), true, true);
processor.onNext(entry);
try {
free(fe);
} catch (Exception e) {
throw new PersistenceException(e);
}
}
}
}
@GuardedBy("resizeLock")
private List<KeyValuePair<Object, FileEntry>> collectExpiredEntries(long now, Map<K, FileEntry> segmentEntries) {
List<KeyValuePair<Object, FileEntry>> entriesToPurge = new ArrayList<>();
synchronized (segmentEntries) {
for (Iterator<Map.Entry<K, FileEntry>> it = segmentEntries.entrySet().iterator(); it.hasNext(); ) {
Map.Entry<K, FileEntry> next = it.next();
FileEntry fe = next.getValue();
if (fe.isExpired(now)) {
it.remove();
// We don't have to worry about other operations freeing the entry while we are reading it,
// but we have to lock because readFromDisk() unlocks
fe.lock();
entriesToPurge.add(new KeyValuePair<>(next.getKey(), fe));
}
}
}
return entriesToPurge;
}
@Override
public CompletionStage<Long> size(IntSet segments) {
return Flowable.fromPublisher(publishKeys(segments, null)).count().toCompletionStage();
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
return blockingManager.supplyBlocking(() -> blockingApproximateSize(segments), "sfs-approximateSize");
}
private long blockingApproximateSize(IntSet segments) {
long size = 0;
long stamp = resizeLock.readLock();
try {
if (!segmented) {
return getSegmentEntries(0).size();
}
for (PrimitiveIterator.OfInt iterator = segments.iterator(); iterator.hasNext(); ) {
int segment = iterator.next();
Map<K, FileEntry> segmentEntries = getSegmentEntries(segment);
if (segmentEntries != null) {
size += segmentEntries.size();
}
}
} finally {
resizeLock.unlockRead(stamp);
}
return size;
}
Map<K, FileEntry> getEntries() {
long stamp = resizeLock.readLock();
try {
return Arrays.stream(entries)
.flatMap(segmentEntries -> segmentEntries != null ?
segmentEntries.entrySet().stream() :
Stream.empty())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
} finally {
resizeLock.unlockRead(stamp);
}
}
SortedSet<FileEntry> getFreeList() {
return freeList;
}
long getFileSize() {
return filePos;
}
public SingleFileStoreConfiguration getConfiguration() {
return configuration;
}
@Override
public CompletionStage<Void> addSegments(IntSet segments) {
if (!segmented) {
throw new UnsupportedOperationException();
}
return blockingManager.runBlocking(() -> blockingAddSegments(segments), "sfs-addSegments");
}
private void blockingAddSegments(IntSet segments) {
long stamp = resizeLock.writeLock();
try {
for (int segment : segments) {
if (entries[segment] != null)
continue;
// Only use LinkedHashMap (LRU) for entries when cache store is bounded
Map<K, FileEntry> entryMap = configuration.maxEntries() > 0 ?
new LinkedHashMap<>(16, 0.75f, true) :
new HashMap<>();
entries[segment] = Collections.synchronizedMap(entryMap);
}
} finally {
resizeLock.unlockWrite(stamp);
}
}
@Override
public CompletionStage<Void> removeSegments(IntSet segments) {
if (!segmented) {
throw new UnsupportedOperationException();
}
return blockingManager.runBlocking(() -> blockingRemoveSegments(segments), "sfs-removeSegments");
}
private void blockingRemoveSegments(IntSet segments) {
List<Map<K, FileEntry>> removedSegments = new ArrayList<>(segments.size());
long stamp = resizeLock.writeLock();
try {
for (int segment : segments) {
if (entries[segment] == null)
continue;
removedSegments.add(entries[segment]);
entries[segment] = null;
}
} finally {
resizeLock.unlockWrite(stamp);
}
try {
for (Map<K, FileEntry> removedSegment : removedSegments) {
for (FileEntry fileEntry : removedSegment.values()) {
free(fileEntry);
}
}
} catch (IOException e) {
throw new PersistenceException(e);
}
// Disk space optimizations
processFreeEntries();
}
/**
* Helper class to represent an entry in the cache file.
* <p/>
* The format of a FileEntry on disk is as follows:
* <ul>
* <li>4 bytes: {@link #size}</li>
* <li>4 bytes: {@link #keyLen}, 0 if the block is unused</li>
* <li>4 bytes: {@link #dataLen}</li>
* <li>4 bytes: {@link #metadataLen}</li>
* <li>8 bytes: {@link #expiryTime}</li>
* <li>{@link #keyLen} bytes: serialized key</li>
* <li>{@link #dataLen} bytes: serialized data</li>
* <li>{@link #metadataLen} bytes: serialized key</li>
* </ul>
*/
private static class FileEntry implements Comparable<FileEntry> {
/**
* File offset of this block.
*/
final long offset;
/**
* Total size of this block.
*/
final int size;
/**
* Size of serialized key.
*/
final int keyLen;
/**
* Size of serialized data.
*/
final int dataLen;
/**
* Size of serialized metadata.
*/
final int metadataLen;
/**
* Size of serialized internal metadata.
*/
final int internalMetadataLen;
/**
* Time stamp when the entry will expire (i.e. will be collected by purge).
*/
final long expiryTime;
/**
* Number of current readers.
*/
transient int readers = 0;
FileEntry(long offset, ByteBuffer buf) {
this.offset = offset;
this.size = buf.getInt();
this.keyLen = buf.getInt();
this.dataLen = buf.getInt();
this.metadataLen = buf.getInt();
this.internalMetadataLen = buf.getInt();
this.expiryTime = buf.getLong();
}
FileEntry(long offset, int size) {
this(offset, size, 0, 0, 0, 0, -1);
}
FileEntry(long offset, int size, int keyLen, int dataLen, int metadataLen, int internalMetadataLen, long expiryTime) {
this.offset = offset;
this.size = size;
this.keyLen = keyLen;
this.dataLen = dataLen;
this.metadataLen = metadataLen;
this.internalMetadataLen = internalMetadataLen;
this.expiryTime = expiryTime;
}
synchronized boolean isLocked() {
return readers > 0;
}
synchronized void lock() {
readers++;
}
synchronized void unlock() {
readers--;
if (readers == 0)
notifyAll();
}
synchronized void waitUnlocked() {
while (readers > 0) {
try {
wait();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
boolean isExpired(long now) {
return expiryTime > 0 && expiryTime < now;
}
int actualSize() {
return KEY_POS_LATEST + keyLen + dataLen + metadataLen + internalMetadataLen;
}
void writeToBuf(ByteBuffer buf) {
buf.putInt(size);
buf.putInt(keyLen);
buf.putInt(dataLen);
buf.putInt(metadataLen);
buf.putInt(internalMetadataLen);
buf.putLong(expiryTime);
}
@Override
public int compareTo(FileEntry fe) {
// We compare the size first, as the entries in the free list must be sorted by size
int diff = size - fe.size;
if (diff != 0) return diff;
return Long.compare(offset, fe.offset);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FileEntry fileEntry = (FileEntry) o;
if (offset != fileEntry.offset) return false;
if (size != fileEntry.size) return false;
return true;
}
@Override
public int hashCode() {
int result = (int) (offset ^ (offset >>> 32));
result = 31 * result + size;
return result;
}
@Override
public String toString() {
return "FileEntry@" +
offset +
"{size=" + size +
", actual=" + actualSize() +
'}';
}
}
}
| 70,754
| 37.855025
| 203
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/DelegatingNonBlockingStore.java
|
package org.infinispan.persistence.support;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.Predicate;
import jakarta.transaction.Transaction;
import org.infinispan.commons.util.IntSet;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.reactivestreams.Publisher;
public abstract class DelegatingNonBlockingStore<K, V> implements NonBlockingStore<K, V> {
public abstract NonBlockingStore<K, V> delegate();
@Override
public CompletionStage<Void> start(InitializationContext ctx) {
return delegate().start(ctx);
}
@Override
public CompletionStage<Void> stop() {
return delegate().stop();
}
@Override
public Set<Characteristic> characteristics() {
return delegate().characteristics();
}
@Override
public CompletionStage<Boolean> isAvailable() {
return delegate().isAvailable();
}
@Override
public CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
return delegate().load(segment, key);
}
@Override
public CompletionStage<Boolean> containsKey(int segment, Object key) {
return delegate().containsKey(segment, key);
}
@Override
public CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> entry) {
return delegate().write(segment, entry);
}
@Override
public CompletionStage<Boolean> delete(int segment, Object key) {
return delegate().delete(segment, key);
}
@Override
public CompletionStage<Void> addSegments(IntSet segments) {
return delegate().addSegments(segments);
}
@Override
public CompletionStage<Void> removeSegments(IntSet segments) {
return delegate().removeSegments(segments);
}
@Override
public CompletionStage<Void> clear() {
return delegate().clear();
}
@Override
public CompletionStage<Void> batch(int publisherCount,
Publisher<NonBlockingStore.SegmentedPublisher<Object>> removePublisher,
Publisher<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
return delegate().batch(publisherCount, removePublisher, writePublisher);
}
@Override
public CompletionStage<Long> size(IntSet segments) {
return delegate().size(segments);
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
return delegate().approximateSize(segments);
}
@Override
public Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean includeValues) {
return delegate().publishEntries(segments, filter, includeValues);
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
return delegate().publishKeys(segments, filter);
}
@Override
public Publisher<MarshallableEntry<K, V>> purgeExpired() {
return delegate().purgeExpired();
}
@Override
public CompletionStage<Void> prepareWithModifications(Transaction transaction, int publisherCount,
Publisher<SegmentedPublisher<Object>> removePublisher, Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
return delegate().prepareWithModifications(transaction, publisherCount, removePublisher, writePublisher);
}
@Override
public CompletionStage<Void> commit(Transaction transaction) {
return delegate().commit(transaction);
}
@Override
public CompletionStage<Void> rollback(Transaction transaction) {
return delegate().rollback(transaction);
}
@Override
public boolean ignoreCommandWithFlags(long commandFlags) {
return delegate().ignoreCommandWithFlags(commandFlags);
}
}
| 3,848
| 29.547619
| 136
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/DelegatingCacheLoader.java
|
package org.infinispan.persistence.support;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.CacheLoader;
import org.infinispan.persistence.spi.InitializationContext;
/**
* @author Mircea Markus
* @since 6.0
*/
public abstract class DelegatingCacheLoader<K, V> implements CacheLoader<K, V> {
protected CacheLoader actual;
protected InitializationContext ctx;
@Override
public void init(InitializationContext ctx) {
this.ctx = ctx;
//the delegates only propagate init if the underlaying object is a delegate as well.
// we do this in order to assure the init is only invoked once on the actual store instance
if (actual instanceof DelegatingCacheLoader)
actual.init(ctx);
}
@Override
public void start() {
if (actual instanceof DelegatingCacheLoader)
actual.start();
}
@Override
public void stop() {
if (actual instanceof DelegatingCacheLoader)
actual.stop();
}
protected DelegatingCacheLoader(CacheLoader actual) {
this.actual = actual;
}
@Override
public boolean contains(Object key) {
return actual != null && actual.contains(key);
}
@Override
public MarshallableEntry<K, V> loadEntry(Object key) {
return actual != null ? actual.loadEntry(key) : null;
}
public CacheLoader undelegate() {
CacheLoader cl = this;
do {
cl = ((DelegatingCacheLoader) cl).actual;
} while (cl instanceof DelegatingCacheLoader);
return cl;
}
}
| 1,559
| 25.440678
| 97
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/SegmentPublisherWrapper.java
|
package org.infinispan.persistence.support;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.reactivestreams.Subscriber;
import io.reactivex.rxjava3.flowables.GroupedFlowable;
public class SegmentPublisherWrapper<Type> implements NonBlockingStore.SegmentedPublisher<Type> {
private final GroupedFlowable<Integer, ? extends Type> groupedFlowable;
private SegmentPublisherWrapper(GroupedFlowable<Integer, ? extends Type> groupedFlowable) {
this.groupedFlowable = groupedFlowable;
}
public static <Type> SegmentPublisherWrapper<Type> wrap(GroupedFlowable<Integer, ? extends Type> groupedFlowable) {
return new SegmentPublisherWrapper<>(groupedFlowable);
}
@Override
public int getSegment() {
return groupedFlowable.getKey();
}
@Override
public void subscribe(Subscriber<? super Type> s) {
groupedFlowable.subscribe(s);
}
}
| 902
| 30.137931
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/AbstractSegmentedAdvancedLoadWriteStore.java
|
package org.infinispan.persistence.support;
import java.util.function.ToIntFunction;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.SegmentedAdvancedLoadWriteStore;
/**
* Abstract segment loader writer that implements all the single key non segmented methods by invoking the segmented
* equivalent by passing in the segment returned from invoking {@link #getKeyMapper()}. These methods are also all
* declared final as to make sure the end user does not implement the incorrect method. All other methods must be
* implemented as normal.
* @author wburns
* @since 9.4
*/
public abstract class AbstractSegmentedAdvancedLoadWriteStore<K, V> implements SegmentedAdvancedLoadWriteStore<K, V> {
protected abstract ToIntFunction<Object> getKeyMapper();
@Override
public final MarshallableEntry<K, V> loadEntry(Object key) {
return get(getKeyMapper().applyAsInt(key), key);
}
@Override
public final boolean contains(Object key) {
return contains(getKeyMapper().applyAsInt(key), key);
}
@Override
public final void write(MarshallableEntry<? extends K, ? extends V> entry) {
write(getKeyMapper().applyAsInt(entry.getKey()), entry);
}
@Override
public final boolean delete(Object key) {
return delete(getKeyMapper().applyAsInt(key), key);
}
}
| 1,357
| 33.820513
| 118
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/ComposedSegmentedLoadWriteStore.java
|
package org.infinispan.persistence.support;
import java.util.PrimitiveIterator;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.ObjIntConsumer;
import java.util.function.Predicate;
import java.util.function.ToIntFunction;
import org.infinispan.Cache;
import org.infinispan.commons.reactive.RxJavaInterop;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.AbstractSegmentedStoreConfiguration;
import org.infinispan.configuration.cache.HashConfiguration;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.persistence.InitializationContextImpl;
import org.infinispan.persistence.internal.PersistenceUtil;
import org.infinispan.persistence.spi.AdvancedCacheExpirationWriter;
import org.infinispan.persistence.spi.AdvancedLoadWriteStore;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.util.concurrent.CompletionStages;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
/**
* Segmented store that creates multiple inner stores for each segment. This is used by stores that are not segmented
* but have a configuration that implements {@link AbstractSegmentedStoreConfiguration}.
* @author wburns
* @since 9.4
*/
public class ComposedSegmentedLoadWriteStore<K, V, T extends AbstractSegmentedStoreConfiguration> extends AbstractSegmentedAdvancedLoadWriteStore<K, V> {
private final AbstractSegmentedStoreConfiguration<T> configuration;
Cache<K, V> cache;
KeyPartitioner keyPartitioner;
InitializationContext ctx;
boolean shouldStopSegments;
AtomicReferenceArray<AdvancedLoadWriteStore<K, V>> stores;
public ComposedSegmentedLoadWriteStore(AbstractSegmentedStoreConfiguration<T> configuration) {
this.configuration = configuration;
}
@Override
public ToIntFunction<Object> getKeyMapper() {
return keyPartitioner;
}
@Override
public MarshallableEntry<K, V> get(int segment, Object key) {
AdvancedLoadWriteStore<K, V> store = stores.get(segment);
if (store != null) {
return store.loadEntry(key);
}
return null;
}
@Override
public boolean contains(int segment, Object key) {
AdvancedLoadWriteStore<K, V> store = stores.get(segment);
return store != null && store.contains(key);
}
@Override
public void write(int segment, MarshallableEntry<? extends K, ? extends V> entry) {
AdvancedLoadWriteStore<K, V> store = stores.get(segment);
if (store != null) {
store.write(entry);
}
}
@Override
public boolean delete(int segment, Object key) {
AdvancedLoadWriteStore<K, V> store = stores.get(segment);
return store != null && store.delete(key);
}
@Override
public int size(IntSet segments) {
int size = 0;
PrimitiveIterator.OfInt segmentIterator = segments.iterator();
while (segmentIterator.hasNext()) {
int segment = segmentIterator.nextInt();
AdvancedLoadWriteStore<K, V> store = stores.get(segment);
if (store != null) {
size += store.size();
if (size < 0) {
return Integer.MAX_VALUE;
}
}
}
return size;
}
@Override
public int size() {
int size = 0;
for (int i = 0; i < stores.length(); ++i) {
AdvancedLoadWriteStore<K, V> store = stores.get(i);
if (store != null) {
size += store.size();
if (size < 0) {
return Integer.MAX_VALUE;
}
}
}
return size;
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
IntFunction<Publisher<K>> publisherFunction = i -> {
AdvancedLoadWriteStore<K, V> alws = stores.get(i);
if (alws != null) {
return alws.publishKeys(filter);
}
return Flowable.empty();
};
if (segments.size() == 1) {
return publisherFunction.apply(segments.iterator().nextInt());
}
return Flowable.fromStream(segments.intStream().mapToObj(publisherFunction))
.concatMap(RxJavaInterop.identityFunction());
}
@Override
public Publisher<K> publishKeys(Predicate<? super K> filter) {
return publishKeys(IntSets.immutableRangeSet(stores.length()), filter);
}
@Override
public Publisher<MarshallableEntry<K, V>> entryPublisher(IntSet segments, Predicate<? super K> filter, boolean fetchValue, boolean fetchMetadata) {
IntFunction<Publisher<MarshallableEntry<K, V>>> publisherFunction = i -> {
AdvancedLoadWriteStore<K, V> alws = stores.get(i);
if (alws != null) {
return alws.entryPublisher(filter, fetchValue, fetchMetadata);
}
return Flowable.empty();
};
if (segments.size() == 1) {
return publisherFunction.apply(segments.iterator().nextInt());
}
return Flowable.fromStream(segments.intStream().mapToObj(publisherFunction))
.concatMap(RxJavaInterop.identityFunction());
}
@Override
public Publisher<MarshallableEntry<K, V>> entryPublisher(Predicate<? super K> filter, boolean fetchValue, boolean fetchMetadata) {
return entryPublisher(IntSets.immutableRangeSet(stores.length()), filter, fetchValue, fetchMetadata);
}
@Override
public void clear() {
for (int i = 0; i < stores.length(); ++i) {
AdvancedLoadWriteStore<K, V> alws = stores.get(i);
if (alws != null) {
alws.clear();
}
}
}
@Override
public void purge(Executor executor, ExpirationPurgeListener<K, V> listener) {
for (int i = 0; i < stores.length(); ++i) {
AdvancedLoadWriteStore<K, V> alws = stores.get(i);
if (alws instanceof AdvancedCacheExpirationWriter) {
((AdvancedCacheExpirationWriter) alws).purge(executor, listener);
} else if (alws != null) {
alws.purge(executor, listener);
}
}
}
@Override
public void clear(IntSet segments) {
for (PrimitiveIterator.OfInt segmentIterator = segments.iterator(); segmentIterator.hasNext(); ) {
AdvancedLoadWriteStore<K, V> alws = stores.get(segmentIterator.nextInt());
if (alws != null) {
alws.clear();
}
}
}
@Override
public void deleteBatch(Iterable<Object> keys) {
int maxBatchSize = configuration.maxBatchSize();
CompletionStage<Void> stage = Flowable.fromIterable(keys)
// Separate out batches by segment
.groupBy(keyPartitioner::getSegment)
.flatMap(groupedFlowable ->
groupedFlowable
.buffer(maxBatchSize)
.doOnNext(batch -> stores.get(groupedFlowable.getKey()).deleteBatch(batch))
, stores.length())
.ignoreElements()
.toCompletionStage(null);
CompletionStages.join(stage);
}
@Override
public CompletionStage<Void> bulkUpdate(Publisher<MarshallableEntry<? extends K, ? extends V>> publisher) {
int maxBatchSize = configuration.maxBatchSize();
return Flowable.fromPublisher(publisher)
.groupBy(me -> keyPartitioner.getSegment(me.getKey()))
.flatMapCompletable(groupedFlowable ->
groupedFlowable
.buffer(maxBatchSize)
.flatMapCompletable(batch -> {
CompletionStage<Void> stage = stores.get(groupedFlowable.getKey()).bulkUpdate(Flowable.fromIterable(batch));
return Completable.fromCompletionStage(stage);
// Make sure to set the parallelism level to how many groups will be created
}), false, stores.length())
.toCompletionStage(null);
}
@Override
public void init(InitializationContext ctx) {
this.ctx = ctx;
cache = ctx.getCache();
}
@Override
public void start() {
ComponentRegistry componentRegistry = cache.getAdvancedCache().getComponentRegistry();
HashConfiguration hashConfiguration = cache.getCacheConfiguration().clustering().hash();
keyPartitioner = componentRegistry.getComponent(KeyPartitioner.class);
stores = new AtomicReferenceArray<>(hashConfiguration.numSegments());
// Local (invalidation) and replicated we just instantiate all the maps immediately
// Distributed needs them all only at beginning for preload of data - rehash event will remove others
for (int i = 0; i < stores.length(); ++i) {
startNewStoreForSegment(i);
}
// Distributed is the only mode that allows for dynamic addition/removal of maps as others own all segments
// in some fashion - others will clear instead when segment ownership is lost
shouldStopSegments = cache.getCacheConfiguration().clustering().cacheMode().isDistributed();
}
private void startNewStoreForSegment(int segment) {
if (stores.get(segment) == null) {
T storeConfiguration = configuration.newConfigurationFrom(segment, ctx);
AdvancedLoadWriteStore<K, V> newStore = PersistenceUtil.createStoreInstance(storeConfiguration);
newStore.init(new InitializationContextImpl(storeConfiguration, cache, keyPartitioner, ctx.getPersistenceMarshaller(), ctx.getTimeService(),
ctx.getByteBufferFactory(), ctx.getMarshallableEntryFactory(), ctx.getNonBlockingExecutor(), ctx.getGlobalConfiguration(),
ctx.getBlockingManager(), ctx.getNonBlockingManager()));
newStore.start();
stores.set(segment, newStore);
}
}
private void stopStoreForSegment(int segment) {
AdvancedLoadWriteStore<K, V> store = stores.getAndSet(segment, null);
if (store != null) {
store.stop();
}
}
private void destroyStore(int segment) {
AdvancedLoadWriteStore<K, V> store = stores.getAndSet(segment, null);
if (store != null) {
store.destroy();
}
}
@Override
public void stop() {
for (int i = 0; i < stores.length(); ++i) {
stopStoreForSegment(i);
}
}
@Override
public void destroy() {
for (int i = 0; i < stores.length(); ++i) {
destroyStore(i);
}
}
@Override
public void addSegments(IntSet segments) {
segments.forEach((IntConsumer) this::startNewStoreForSegment);
}
@Override
public void removeSegments(IntSet segments) {
if (shouldStopSegments) {
for (PrimitiveIterator.OfInt segmentIterator = segments.iterator(); segmentIterator.hasNext(); ) {
destroyStore(segmentIterator.nextInt());
}
} else {
clear(segments);
}
}
/**
* Method that allows user to directly invoke some method(s) on the underlying store. The segment that each
* store maps to is also provided as an argument to the consumer
* @param consumer callback for every store that is currently installed
*/
public void forEach(ObjIntConsumer<? super AdvancedLoadWriteStore> consumer) {
for (int i = 0; i < stores.length(); ++i) {
AdvancedLoadWriteStore store = stores.get(i);
if (store != null) {
consumer.accept(store, i);
}
}
}
}
| 11,737
| 35.91195
| 153
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/DelegatingPersistenceManager.java
|
package org.infinispan.persistence.support;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.BiPredicate;
import java.util.function.Predicate;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.transaction.impl.AbstractCacheTransaction;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
@Scope(Scopes.NAMED_CACHE)
public class DelegatingPersistenceManager implements PersistenceManager, Lifecycle {
protected final PersistenceManager persistenceManager;
public DelegatingPersistenceManager(PersistenceManager persistenceManager) {
this.persistenceManager = persistenceManager;
}
@Inject
void inject (ComponentRegistry componentRegistry) {
componentRegistry.wireDependencies(persistenceManager, false);
}
@Start
@Override
public void start() {
persistenceManager.start();
}
@Stop
@Override
public void stop() {
persistenceManager.stop();
}
public PersistenceManager getActual() {
return persistenceManager;
}
@Override
public boolean isEnabled() {
return persistenceManager.isEnabled();
}
@Override
public boolean hasWriter() {
return persistenceManager.hasWriter();
}
@Override
public boolean hasStore(Predicate<StoreConfiguration> test) {
return persistenceManager.hasStore(test);
}
@Override
public Flowable<MarshallableEntry<Object, Object>> preloadPublisher() {
return persistenceManager.preloadPublisher();
}
@Override
public CompletionStage<Void> disableStore(String storeType) {
return persistenceManager.disableStore(storeType);
}
@Override
public CompletionStage<Void> addStore(StoreConfiguration persistenceConfiguration) {
return persistenceManager.addStore(persistenceConfiguration);
}
@Override
public void addStoreListener(StoreChangeListener listener) {
persistenceManager.addStoreListener(listener);
}
@Override
public void removeStoreListener(StoreChangeListener listener) {
persistenceManager.removeStoreListener(listener);
}
@Override
public <T> Set<T> getStores(Class<T> storeClass) {
return persistenceManager.getStores(storeClass);
}
@Override
public Collection<String> getStoresAsString() {
return persistenceManager.getStoresAsString();
}
@Override
public CompletionStage<Void> purgeExpired() {
return persistenceManager.purgeExpired();
}
@Override
public CompletionStage<Void> clearAllStores(Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.clearAllStores(predicate);
}
@Override
public CompletionStage<Boolean> deleteFromAllStores(Object key, int segment, Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.deleteFromAllStores(key, segment, predicate);
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(Predicate<? super K> filter, boolean fetchValue,
boolean fetchMetadata, Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.publishEntries(filter, fetchValue, fetchMetadata, predicate);
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter,
boolean fetchValue, boolean fetchMetadata,
Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.publishEntries(segments, filter, fetchValue, fetchMetadata, predicate);
}
@Override
public <K> Publisher<K> publishKeys(Predicate<? super K> filter, Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.publishKeys(filter, predicate);
}
@Override
public <K> Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter,
Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.publishKeys(segments, filter, predicate);
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, boolean localInvocation,
boolean includeStores) {
return persistenceManager.loadFromAllStores(key, localInvocation, includeStores);
}
@Override
public CompletionStage<Long> size(Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.size(predicate);
}
@Override
public void setClearOnStop(boolean clearOnStop) {
persistenceManager.setClearOnStop(clearOnStop);
}
@Override
public CompletionStage<Void> writeToAllNonTxStores(MarshallableEntry marshalledEntry, int segment,
Predicate<? super StoreConfiguration> predicate, long flags) {
return persistenceManager.writeToAllNonTxStores(marshalledEntry, segment, predicate, flags);
}
@Override
public CompletionStage<Void> prepareAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate) throws PersistenceException {
return persistenceManager.prepareAllTxStores(txInvocationContext, predicate);
}
@Override
public CompletionStage<Void> commitAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.commitAllTxStores(txInvocationContext, predicate);
}
@Override
public CompletionStage<Void> rollbackAllTxStores(TxInvocationContext<AbstractCacheTransaction> txInvocationContext,
Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.rollbackAllTxStores(txInvocationContext, predicate);
}
@Override
public CompletionStage<Long> writeMapCommand(PutMapCommand putMapCommand, InvocationContext ctx,
BiPredicate<? super PutMapCommand, Object> commandKeyPredicate) {
return persistenceManager.writeMapCommand(putMapCommand, ctx, commandKeyPredicate);
}
@Override
public <K, V> CompletionStage<Void> writeEntries(Iterable<MarshallableEntry<K, V>> iterable,
Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.writeEntries(iterable, predicate);
}
@Override
public CompletionStage<Long> performBatch(TxInvocationContext<AbstractCacheTransaction> invocationContext,
BiPredicate<? super WriteCommand, Object> commandKeyPredicate) {
return persistenceManager.performBatch(invocationContext, commandKeyPredicate);
}
@Override
public boolean isAvailable() {
return persistenceManager.isAvailable();
}
@Override
public boolean isReadOnly() {
return persistenceManager.isReadOnly();
}
@Override
public <K, V> Publisher<MarshallableEntry<K, V>> publishEntries(boolean fetchValue, boolean fetchMetadata) {
return persistenceManager.publishEntries(fetchValue, fetchMetadata);
}
@Override
public <K, V> CompletionStage<MarshallableEntry<K, V>> loadFromAllStores(Object key, int segment,
boolean localInvocation,
boolean includeStores) {
return persistenceManager.loadFromAllStores(key, segment, localInvocation, includeStores);
}
@Override
public CompletionStage<Long> approximateSize(Predicate<? super StoreConfiguration> predicate, IntSet segments) {
return persistenceManager.approximateSize(predicate, segments);
}
@Override
public CompletionStage<Long> size() {
return persistenceManager.size();
}
@Override
public CompletionStage<Long> size(Predicate<? super StoreConfiguration> predicate, IntSet segments) {
return persistenceManager.size(predicate, segments);
}
@Override
public CompletionStage<Void> writeToAllNonTxStores(MarshallableEntry marshalledEntry, int segment,
Predicate<? super StoreConfiguration> predicate) {
return persistenceManager.writeToAllNonTxStores(marshalledEntry, segment, predicate);
}
@Override
public CompletionStage<Boolean> addSegments(IntSet segments) {
return persistenceManager.addSegments(segments);
}
@Override
public CompletionStage<Boolean> removeSegments(IntSet segments) {
return persistenceManager.removeSegments(segments);
}
}
| 9,587
| 35.876923
| 140
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/NonBlockingStoreAdapter.java
|
package org.infinispan.persistence.support;
import java.lang.invoke.MethodHandles;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import jakarta.transaction.Transaction;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.commons.persistence.Store;
import org.infinispan.commons.reactive.RxJavaInterop;
import org.infinispan.commons.util.IntSet;
import org.infinispan.persistence.spi.AdvancedCacheExpirationWriter;
import org.infinispan.persistence.spi.AdvancedCacheLoader;
import org.infinispan.persistence.spi.AdvancedCacheWriter;
import org.infinispan.persistence.spi.CacheLoader;
import org.infinispan.persistence.spi.CacheWriter;
import org.infinispan.persistence.spi.ExternalStore;
import org.infinispan.persistence.spi.FlagAffectedStore;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.SegmentedAdvancedLoadWriteStore;
import org.infinispan.persistence.spi.TransactionalCacheWriter;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Single;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
public class NonBlockingStoreAdapter<K, V> implements NonBlockingStore<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final AtomicInteger id = new AtomicInteger();
private final Lifecycle oldStoreImpl;
private final Set<Characteristic> characteristics;
private BlockingManager blockingManager;
private MarshallableEntryFactory<K, V> marshallableEntryFactory;
public NonBlockingStoreAdapter(Lifecycle oldStoreImpl) {
this.oldStoreImpl = oldStoreImpl;
this.characteristics = determineCharacteristics(oldStoreImpl);
}
public Lifecycle getActualStore() {
return oldStoreImpl;
}
private String nextTraceId(String operationName) {
return log.isTraceEnabled() ? "StoreAdapter-" + operationName + "-" + id.getAndIncrement() : null;
}
static private Set<Characteristic> determineCharacteristics(Object storeImpl) {
EnumSet<Characteristic> characteristics;
if (storeImpl instanceof SegmentedAdvancedLoadWriteStore) {
characteristics = EnumSet.of(Characteristic.SEGMENTABLE, Characteristic.EXPIRATION,
Characteristic.BULK_READ);
} else {
characteristics = EnumSet.noneOf(Characteristic.class);
if (storeImpl instanceof AdvancedCacheLoader) {
characteristics.add(Characteristic.BULK_READ);
} else if (!(storeImpl instanceof CacheLoader)) {
characteristics.add(Characteristic.WRITE_ONLY);
}
if (storeImpl instanceof AdvancedCacheWriter) {
characteristics.add(Characteristic.EXPIRATION);
} else if (!(storeImpl instanceof CacheWriter)) {
characteristics.add(Characteristic.READ_ONLY);
}
}
Store storeAnnotation = storeImpl.getClass().getAnnotation(Store.class);
if (storeAnnotation != null && storeAnnotation.shared()) {
characteristics.add(Characteristic.SHAREABLE);
}
// Transactional is a special interface that could be true on a segment or not segmented store both
if (storeImpl instanceof TransactionalCacheWriter) {
characteristics.add(Characteristic.TRANSACTIONAL);
}
return characteristics;
}
@Override
public CompletionStage<Void> start(InitializationContext ctx) {
blockingManager = ctx.getBlockingManager();
marshallableEntryFactory = ctx.getMarshallableEntryFactory();
return blockingManager.runBlocking(() -> {
if (isReadOnly()) {
loader().init(ctx);
} else {
writer().init(ctx);
}
oldStoreImpl.start();
}, nextTraceId("start"));
}
@Override
public CompletionStage<Void> stop() {
return blockingManager.runBlocking(oldStoreImpl::stop, nextTraceId("stop"));
}
@Override
public CompletionStage<Void> destroy() {
return blockingManager.runBlocking(() -> {
if (oldStoreImpl instanceof ExternalStore) {
((ExternalStore<?, ?>) oldStoreImpl).destroy();
} else {
oldStoreImpl.stop();
}
}, nextTraceId("destroy"));
}
@Override
public Set<Characteristic> characteristics() {
return characteristics;
}
@Override
public CompletionStage<Long> size(IntSet segments) {
return blockingManager.supplyBlocking(() ->
isSegmented() ? segmentedStore().size(segments) : advancedLoader().size(), nextTraceId("size"))
.thenApply(Integer::longValue);
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
// Old SPI didn't support approximations
return SIZE_UNAVAILABLE_FUTURE;
}
@Override
public Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean includeValues) {
Publisher<MarshallableEntry<K, V>> publisher;
if (isSegmented()) {
publisher = segmentedStore().entryPublisher(segments, filter, includeValues, true);
} else {
publisher = advancedLoader().entryPublisher(filter, includeValues, true);
}
// Despite this being a publisher, we assume the subscription is blocking as the SPI never enforced this
// We do however assume the creation of the Publisher is not blocking... maybe we should?
return blockingManager.blockingPublisher(publisher);
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
Publisher<K> publisher;
if (isSegmented()) {
publisher = segmentedStore().publishKeys(segments, filter);
} else {
publisher = advancedLoader().publishKeys(filter);
}
// Despite this being a publisher, we assume the subscription is blocking as the SPI never enforced this
// We do however assume the creation of the Publisher is not blocking... maybe we should?
return blockingManager.blockingPublisher(publisher);
}
@Override
public Publisher<MarshallableEntry<K, V>> purgeExpired() {
return Flowable.defer(() -> {
FlowableProcessor<MarshallableEntry<K, V>> flowableProcessor = UnicastProcessor.create();
AdvancedCacheExpirationWriter.ExpirationPurgeListener<K, V> expirationPurgeListener = new AdvancedCacheExpirationWriter.ExpirationPurgeListener<K, V>() {
@Override
public void marshalledEntryPurged(MarshallableEntry<K, V> entry) {
flowableProcessor.onNext(entry);
}
@Override
public void entryPurged(K key) {
flowableProcessor.onNext(marshallableEntryFactory.create(key));
}
};
CompletionStage<Void> purgeStage;
AdvancedCacheWriter<K, V> advancedCacheWriter = advancedWriter();
if (advancedCacheWriter instanceof AdvancedCacheExpirationWriter) {
purgeStage = blockingManager.runBlocking(() -> ((AdvancedCacheExpirationWriter<K, V>) advancedCacheWriter)
.purge(Runnable::run, expirationPurgeListener), nextTraceId("purgeExpired"));
} else {
purgeStage = blockingManager.runBlocking(() -> advancedCacheWriter
.purge(Runnable::run, expirationPurgeListener), nextTraceId("purgeExpired"));
}
purgeStage.whenComplete((ignore, t) -> {
if (t != null) {
flowableProcessor.onError(t);
} else {
flowableProcessor.onComplete();
}
});
return flowableProcessor;
});
}
@Override
public CompletionStage<Boolean> isAvailable() {
return blockingManager.supplyBlocking(() ->
isReadOnly() ? loader().isAvailable() : writer().isAvailable(), nextTraceId("isAvailable"));
}
@Override
public CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
return blockingManager.supplyBlocking(() ->
isSegmented() ? segmentedStore().get(segment, key) : loader().loadEntry(key), nextTraceId("load"));
}
@Override
public CompletionStage<Boolean> containsKey(int segment, Object key) {
return blockingManager.supplyBlocking(() ->
isSegmented() ? segmentedStore().contains(segment, key) : loader().contains(key), nextTraceId("containsKey"));
}
@Override
public CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> entry) {
return blockingManager.runBlocking(() -> {
if (isSegmented()) {
segmentedStore().write(segment, entry);
} else {
writer().write(entry);
}
}, nextTraceId("write"));
}
@Override
public CompletionStage<Boolean> delete(int segment, Object key) {
return blockingManager.supplyBlocking(() ->
isSegmented() ? segmentedStore().delete(segment, key) : writer().delete(key), nextTraceId("delete"));
}
@Override
public CompletionStage<Void> addSegments(IntSet segments) {
return blockingManager.runBlocking(() -> segmentedStore().addSegments(segments), nextTraceId("addSegments"));
}
@Override
public CompletionStage<Void> removeSegments(IntSet segments) {
return blockingManager.runBlocking(() -> segmentedStore().removeSegments(segments), nextTraceId("removeSegments"));
}
@Override
public CompletionStage<Void> clear() {
// Technically clear is defined on AdvancedCacheWriter - but there is no equivalent characteristic for that
// so we have to double check the implementation
if (oldStoreImpl instanceof AdvancedCacheWriter) {
return blockingManager.runBlocking(advancedWriter()::clear, nextTraceId("clear"));
}
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> batch(int publisherCount, Publisher<NonBlockingStore.SegmentedPublisher<Object>> removePublisher,
Publisher<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
Flowable<Object> objectFlowable = Flowable.fromPublisher(removePublisher)
.flatMap(RxJavaInterop.identityFunction(), false, publisherCount);
Flowable<MarshallableEntry<? extends K, ? extends V>> meFlowable = Flowable.fromPublisher(writePublisher)
.flatMap(RxJavaInterop.identityFunction(), false, publisherCount);
return blockingManager.supplyBlocking(() -> {
Single<Set<Object>> objectSingle = objectFlowable.collect(Collectors.toSet());
objectSingle.subscribe(writer()::deleteBatch);
// While bulkUpdate appears to be non blocking - there was no mandate that the operation actually be so.
// Thus we run it on a blocking thread just in case
return writer().bulkUpdate(meFlowable);
}, nextTraceId("batch-update"))
.thenCompose(Function.identity());
}
@Override
public CompletionStage<Void> prepareWithModifications(Transaction transaction, int publisherCount,
Publisher<SegmentedPublisher<Object>> removePublisher, Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
Set<Object> affectedKeys = new HashSet<>();
BatchModification oldBatchModification = new BatchModification(affectedKeys);
Flowable.fromPublisher(removePublisher)
.subscribe(sp ->
Flowable.fromPublisher(sp)
.subscribe(key -> {
affectedKeys.add(key);
oldBatchModification.removeEntry(key);
})
);
Flowable.fromPublisher(writePublisher)
.subscribe(sp ->
Flowable.fromPublisher(sp)
.subscribe(me -> {
Object key = me.getKey();
affectedKeys.add(key);
//noinspection unchecked
oldBatchModification.addMarshalledEntry(key, (MarshallableEntry<Object, Object>) me);
})
);
return blockingManager.runBlocking(
() -> transactionalStore().prepareWithModifications(transaction, oldBatchModification), nextTraceId("prepareWithModifications"));
}
@Override
public CompletionStage<Void> commit(Transaction transaction) {
return blockingManager.runBlocking(
() -> transactionalStore().commit(transaction), nextTraceId("commit"));
}
@Override
public CompletionStage<Void> rollback(Transaction transaction) {
return blockingManager.runBlocking(
() -> transactionalStore().rollback(transaction), nextTraceId("rollback"));
}
@Override
public boolean ignoreCommandWithFlags(long commandFlags) {
if (oldStoreImpl instanceof FlagAffectedStore) {
return !((FlagAffectedStore) oldStoreImpl).shouldWrite(commandFlags);
}
return false;
}
boolean isSegmented() {
return characteristics.contains(Characteristic.SEGMENTABLE);
}
boolean isReadOnly() {
return characteristics.contains(Characteristic.READ_ONLY);
}
public TransactionalCacheWriter<K, V> transactionalStore() {
return (TransactionalCacheWriter<K, V>) oldStoreImpl;
}
public SegmentedAdvancedLoadWriteStore<K, V> segmentedStore() {
return (SegmentedAdvancedLoadWriteStore<K, V>) oldStoreImpl;
}
public AdvancedCacheLoader<K, V> advancedLoader() {
return (AdvancedCacheLoader<K, V>) oldStoreImpl;
}
public AdvancedCacheWriter<K, V> advancedWriter() {
return (AdvancedCacheWriter<K, V>) oldStoreImpl;
}
public CacheLoader<K, V> loader() {
return (CacheLoader<K, V>) oldStoreImpl;
}
public CacheWriter<K, V> writer() {
return (CacheWriter<K, V>) oldStoreImpl;
}
}
| 14,598
| 39.328729
| 162
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/SingleSegmentPublisher.java
|
package org.infinispan.persistence.support;
import java.util.Objects;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.reactivestreams.Publisher;
import org.reactivestreams.Subscriber;
public class SingleSegmentPublisher<E> implements NonBlockingStore.SegmentedPublisher<E> {
private final int segment;
private final Publisher<? extends E> publisher;
public static <E> NonBlockingStore.SegmentedPublisher<E> singleSegment(int segment, Publisher<? extends E> publisher) {
return new SingleSegmentPublisher<>(segment, publisher);
}
public static <E> NonBlockingStore.SegmentedPublisher<E> singleSegment(Publisher<? extends E> publisher) {
return new SingleSegmentPublisher<>(0, publisher);
}
private SingleSegmentPublisher(int segment, Publisher<? extends E> publisher) {
this.segment = segment;
this.publisher = Objects.requireNonNull(publisher);
}
@Override
public int getSegment() {
return segment;
}
@Override
public void subscribe(Subscriber<? super E> s) {
publisher.subscribe(s);
}
}
| 1,093
| 29.388889
| 122
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/DelegatingInitializationContext.java
|
package org.infinispan.persistence.support;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import org.infinispan.Cache;
import org.infinispan.commons.io.ByteBufferFactory;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.NonBlockingManager;
public abstract class DelegatingInitializationContext implements InitializationContext {
public abstract InitializationContext delegate();
@Override
public <T extends StoreConfiguration> T getConfiguration() {
return delegate().getConfiguration();
}
@Override
public Cache getCache() {
return delegate().getCache();
}
@Override
public KeyPartitioner getKeyPartitioner() {
return delegate().getKeyPartitioner();
}
@Override
public TimeService getTimeService() {
return delegate().getTimeService();
}
@Override
public ByteBufferFactory getByteBufferFactory() {
return delegate().getByteBufferFactory();
}
@Override
public ExecutorService getExecutor() {
return delegate().getExecutor();
}
@Override
public Executor getNonBlockingExecutor() {
return delegate().getNonBlockingExecutor();
}
@Override
public BlockingManager getBlockingManager() {
return delegate().getBlockingManager();
}
@Override
public NonBlockingManager getNonBlockingManager() {
return delegate().getNonBlockingManager();
}
@Override
public <K, V> MarshallableEntryFactory<K, V> getMarshallableEntryFactory() {
return delegate().getMarshallableEntryFactory();
}
@Override
public PersistenceMarshaller getPersistenceMarshaller() {
return delegate().getPersistenceMarshaller();
}
@Override
public GlobalConfiguration getGlobalConfiguration() {
return delegate().getGlobalConfiguration();
}
}
| 2,295
| 27.345679
| 88
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/BatchModification.java
|
package org.infinispan.persistence.support;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.infinispan.persistence.spi.MarshallableEntry;
/**
* A simple wrapper class, necessary for Transactional stores, which allows MarshalledEntries and Object keys to be passed
* to a store implementation in order. This class also removes repeated operations on the same key in order to prevent
* redundant operations on the underlying store. For example a tx, {put(1, "Test"); remove(1);}, will be simply written
* to the store as {remove(1);}.
*
* @author Ryan Emerson
*/
public class BatchModification {
private final Map<Object, MarshallableEntry<Object, Object>> marshalledEntries = new HashMap<>();
private final Set<Object> keysToRemove = new HashSet<>();
private final Set<Object> affectedKeys;
public BatchModification(Set<Object> affectedKeys) {
this.affectedKeys = affectedKeys;
}
public void addMarshalledEntry(Object key, MarshallableEntry<Object, Object> marshalledEntry) {
keysToRemove.remove(key);
marshalledEntries.put(key, marshalledEntry);
}
public void removeEntry(Object key) {
marshalledEntries.remove(key);
keysToRemove.add(key);
}
public Set<Object> getAffectedKeys() {
return affectedKeys;
}
public Set<Object> getKeysToRemove() {
return keysToRemove;
}
public Collection<MarshallableEntry<Object, Object>> getMarshallableEntries() {
return marshalledEntries.values();
}
}
| 1,583
| 30.68
| 122
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/support/DelegatingCacheWriter.java
|
package org.infinispan.persistence.support;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.CacheWriter;
import org.infinispan.persistence.spi.InitializationContext;
/**
* @author Mircea Markus
* @since 6.0
*/
public abstract class DelegatingCacheWriter<K, V> implements CacheWriter<K, V> {
protected final CacheWriter<K, V> actual;
protected InitializationContext ctx;
public DelegatingCacheWriter(CacheWriter<K, V> actual) {
this.actual = actual;
}
@Override
public void init(InitializationContext ctx) {
this.ctx = ctx;
//the delegates only propagate init if the underlaying object is a delegate as well.
// we do this in order to assure the init is only invoked once
if (actual instanceof DelegatingCacheWriter)
actual.init(ctx);
}
@Override
public void start() {
if (actual instanceof DelegatingCacheWriter)
actual.start();
}
@Override
public void stop() {
if (actual instanceof DelegatingCacheWriter)
actual.stop();
}
@Override
public void write(MarshallableEntry<? extends K, ? extends V> entry) {
actual.write(entry);
}
@Override
public boolean delete(Object key) {
return actual.delete(key);
}
public CacheWriter undelegate() {
CacheWriter cl = this;
do {
cl = ((DelegatingCacheWriter) cl).actual;
} while (cl instanceof DelegatingCacheWriter);
return cl;
}
}
| 1,507
| 24.133333
| 90
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/modifications/package-info.java
|
/**
* Modifications that are used to encapsulate cache operations for application to a {@link CacheStore}.
*/
package org.infinispan.persistence.modifications;
| 162
| 31.6
| 103
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/modifications/Modification.java
|
package org.infinispan.persistence.modifications;
/**
* An interface that defines a {@link org.infinispan.persistence.spi.CacheWriter} modification
*
* @author Manik Surtani
* @since 4.0
*/
public interface Modification {
enum Type {
STORE, REMOVE, CLEAR, LIST
}
Type getType();
}
| 303
| 18
| 94
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/modifications/Store.java
|
package org.infinispan.persistence.modifications;
import org.infinispan.persistence.spi.MarshallableEntry;
/**
* Modification representing {@link org.infinispan.persistence.spi.CacheWriter#write(MarshallableEntry)}.
*
* @author Manik Surtani
* @since 4.0
*/
public class Store implements Modification {
final Object key;
final MarshallableEntry storedEntry;
public Store(Object key, MarshallableEntry storedValue) {
this.key = key;
this.storedEntry = storedValue;
}
@Override
public Type getType() {
return Type.STORE;
}
public MarshallableEntry getStoredValue() {
return storedEntry;
}
public Object getKey() {
return key;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Store)) return false;
Store store = (Store) o;
if (key != null ? !key.equals(store.key) : store.key != null) return false;
if (storedEntry != null ? !storedEntry.equals(store.storedEntry) : store.storedEntry != null) return false;
return true;
}
@Override
public int hashCode() {
int result = key != null ? key.hashCode() : 0;
result = 31 * result + (storedEntry != null ? storedEntry.hashCode() : 0);
return result;
}
@Override
public String toString() {
return "Store{" +
"key=" + key +
", storedEntry=" + storedEntry +
'}';
}
}
| 1,452
| 22.435484
| 113
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/modifications/ModificationsList.java
|
package org.infinispan.persistence.modifications;
import java.util.List;
/**
* ModificationsList contains a List<Modification>
*
* @author Sanne Grinovero
* @since 4.1
*/
public class ModificationsList implements Modification {
private final List<? extends Modification> list;
public ModificationsList(List<? extends Modification> list) {
this.list = list;
}
@Override
public Type getType() {
return Modification.Type.LIST;
}
public List<? extends Modification> getList() {
return list;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((list == null) ? 0 : list.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ModificationsList other = (ModificationsList) obj;
if (list == null) {
if (other.list != null)
return false;
} else if (!list.equals(other.list))
return false;
return true;
}
@Override
public String toString() {
return "ModificationsList: [" + list + "]";
}
}
| 1,272
| 20.576271
| 71
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/modifications/Remove.java
|
package org.infinispan.persistence.modifications;
/**
* Represents a {@link org.infinispan.persistence.spi.CacheWriter#delete(Object)} (Object)} modification
*
* @author Manik Surtani
* @since 4.0
*/
public class Remove implements Modification {
final Object key;
public Remove(Object key) {
this.key = key;
}
@Override
public Type getType() {
return Type.REMOVE;
}
public Object getKey() {
return key;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Remove remove = (Remove) o;
if (key != null ? !key.equals(remove.key) : remove.key != null) return false;
return true;
}
@Override
public int hashCode() {
return key != null ? key.hashCode() : 0;
}
@Override
public String toString() {
return "Remove{" +
"key=" + key +
'}';
}
}
| 965
| 18.32
| 104
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/modifications/Clear.java
|
package org.infinispan.persistence.modifications;
public class Clear implements Modification {
@Override
public Type getType() {
return Type.CLEAR;
}
}
| 168
| 17.777778
| 49
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/cluster/ClusterLoader.java
|
package org.infinispan.persistence.cluster;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.util.EnumUtil;
import org.infinispan.configuration.cache.ClusterLoaderConfiguration;
import org.infinispan.configuration.cache.Configurations;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.context.Flag;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.manager.PersistenceManager.StoreChangeListener;
import org.infinispan.persistence.manager.PersistenceStatus;
import org.infinispan.persistence.spi.CacheLoader;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.LocalOnlyCacheLoader;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
/**
* Cache loader that consults other members in the cluster for values. A <code>remoteCallTimeout</code> property is
* required, a <code>long</code> that specifies in milliseconds how long to wait for results before returning a null.
*
* @author Mircea.Markus@jboss.com
* @deprecated since 11.0. To be removed in 14.0 ISPN-11864 with no direct replacement.
*/
@ConfiguredBy(ClusterLoaderConfiguration.class)
@Deprecated
public class ClusterLoader implements CacheLoader, LocalOnlyCacheLoader, StoreChangeListener {
private RpcManager rpcManager;
private AdvancedCache<?, ?> cache;
private CommandsFactory commandsFactory;
private KeyPartitioner keyPartitioner;
private PersistenceManager persistenceManager;
private volatile boolean needsSegments;
private InitializationContext ctx;
@Override
public void init(InitializationContext ctx) {
this.ctx = ctx;
cache = ctx.getCache().getAdvancedCache();
commandsFactory = cache.getComponentRegistry().getCommandsFactory();
rpcManager = cache.getRpcManager();
keyPartitioner = cache.getComponentRegistry().getComponent(KeyPartitioner.class);
persistenceManager = cache.getComponentRegistry().getComponent(PersistenceManager.class);
needsSegments = Configurations.needSegments(cache.getCacheConfiguration());
}
@Override
public MarshallableEntry loadEntry(Object key) throws PersistenceException {
if (!isCacheReady()) return null;
ClusteredGetCommand clusteredGetCommand = commandsFactory.buildClusteredGetCommand(key,
needsSegments ? keyPartitioner.getSegment(key) : null,
EnumUtil.bitSetOf(Flag.SKIP_OWNERSHIP_CHECK));
Collection<Response> responses;
try {
clusteredGetCommand.setTopologyId(rpcManager.getTopologyId());
CompletionStage<Map<Address, Response>> getAll = rpcManager.invokeCommandOnAll(clusteredGetCommand,
MapResponseCollector.ignoreLeavers(), rpcManager.getSyncRpcOptions());
responses = rpcManager.blocking(getAll).values();
} catch (Exception e) {
PERSISTENCE.errorDoingRemoteCall(e);
throw new PersistenceException(e);
}
if (responses.isEmpty()) return null;
Response response;
if (responses.size() > 1) {
// Remove duplicates before deciding if multiple responses were received
Set<Response> setResponses = new HashSet<>(responses);
if (setResponses.size() > 1)
throw new PersistenceException(String.format(
"Responses contains more than 1 element and these elements are not equal, so can't decide which one to use: %s",
setResponses));
response = setResponses.iterator().next();
} else {
response = responses.iterator().next();
}
if (response.isSuccessful() && response instanceof SuccessfulResponse) {
InternalCacheValue value = (InternalCacheValue) ((SuccessfulResponse) response).getResponseValue();
return value == null ? null :
ctx.getMarshallableEntryFactory().create(key, value.getValue());
}
PERSISTENCE.unknownResponsesFromRemoteCache(responses);
throw new PersistenceException("Unknown responses");
}
@Override
public boolean contains(Object key) {
return loadEntry(key) != null;
}
@Override
public void start() {
persistenceManager.addStoreListener(this);
}
@Override
public void storeChanged(PersistenceStatus status) {
synchronized (this) {
needsSegments = needsSegments || status.usingSegmentedStore();
}
}
@Override
public void stop() {
persistenceManager.removeStoreListener(this);
}
/**
* A test to check whether the cache is in its started state. If not, calls should not be made as the channel may
* not have properly started, blocks due to state transfers may be in progress, etc.
*
* @return true if the cache is in its STARTED state.
*/
protected boolean isCacheReady() {
return cache.getStatus() == ComponentStatus.RUNNING;
}
}
| 5,727
| 39.055944
| 130
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/cluster/package-info.java
|
/**
* A {@link org.infinispan.persistence.spi.CacheLoader} (not {@link org.infinispan.persistence.spi.CacheWriter}) that polls other nodes in the cluster for state. Useful if state
* transfer on startup is disabled, this {@link org.infinispan.persistence.spi.CacheLoader} implementation allows for lazily loading state from
* remote nodes, on demand and on a per-entry basis.
*
* @api.public
*/
package org.infinispan.persistence.cluster;
| 446
| 48.666667
| 178
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/Compactor.java
|
package org.infinispan.persistence.sifs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.io.ByteBufferImpl;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.Util;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.reactive.RxJavaInterop;
import org.infinispan.util.concurrent.AggregateCompletionStage;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.NonBlockingManager;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Scheduler;
import io.reactivex.rxjava3.functions.Consumer;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
import io.reactivex.rxjava3.schedulers.Schedulers;
/**
* Component keeping the data about log file usage - as soon as entries from some file are overwritten so that the file
* becomes cluttered with old records, the valid records are moved to another file and the old ones are dropped.
* Expired records are moved as tombstones without values (records of entry removal).
*
* @author Radim Vansa <rvansa@redhat.com>
*/
class Compactor implements Consumer<Object> {
private static final Log log = LogFactory.getLog(Compactor.class, Log.class);
private final NonBlockingManager nonBlockingManager;
private final ConcurrentMap<Integer, Stats> fileStats = new ConcurrentHashMap<>();
private final FileProvider fileProvider;
private final TemporaryTable temporaryTable;
private final Marshaller marshaller;
private final TimeService timeService;
private final KeyPartitioner keyPartitioner;
private final int maxFileSize;
private final double compactionThreshold;
private final Executor blockingExecutor;
private FlowableProcessor<Object> processor;
private Index index;
// as processing single scheduled compaction takes a lot of time, we don't use the queue to signalize
private final AtomicBoolean clearSignal = new AtomicBoolean();
private volatile boolean terminateSignal = false;
// variable used to denote running (not null but not complete) and stopped (not null but complete)
// This variable is never to be null
private volatile CompletableFuture<?> stopped = CompletableFutures.completedNull();
private CompletableFuture<Void> paused = CompletableFutures.completedNull();
// Special object used solely for the purpose of resuming the compactor after compacting a file and waiting for
// all indices to be updated
private static final Object RESUME_PILL = new Object();
// This buffer is used by the compactor thread to avoid allocating buffers per entry written that are smaller
// than the header size
private final java.nio.ByteBuffer REUSED_BUFFER = java.nio.ByteBuffer.allocate(EntryHeader.HEADER_SIZE_11_0);
FileProvider.Log logFile = null;
long nextExpirationTime = -1;
int currentOffset = 0;
public Compactor(NonBlockingManager nonBlockingManager, FileProvider fileProvider, TemporaryTable temporaryTable,
Marshaller marshaller, TimeService timeService, KeyPartitioner keyPartitioner, int maxFileSize,
double compactionThreshold, Executor blockingExecutor) {
this.nonBlockingManager = nonBlockingManager;
this.fileProvider = fileProvider;
this.temporaryTable = temporaryTable;
this.marshaller = marshaller;
this.timeService = timeService;
this.keyPartitioner = keyPartitioner;
this.maxFileSize = maxFileSize;
this.compactionThreshold = compactionThreshold;
this.blockingExecutor = blockingExecutor;
}
public void setIndex(Index index) {
this.index = index;
}
public void releaseStats(int file) {
fileStats.remove(file);
}
public void free(int file, int size) {
// entries expired from compacted file are reported with file = -1
if (file < 0) return;
recordFreeSpace(getStats(file, -1, -1), file, size);
}
public void completeFile(int file, int currentSize, long nextExpirationTime) {
completeFile(file, currentSize, nextExpirationTime, true);
}
public void completeFile(int file, int currentSize, long nextExpirationTime, boolean canSchedule) {
Stats stats = getStats(file, currentSize, nextExpirationTime);
stats.setCompleted();
// It is possible this was a logFile that was compacted
if (canSchedule && stats.readyToBeScheduled(compactionThreshold, stats.getFree())) {
schedule(file, stats);
}
}
ConcurrentMap<Integer, Stats> getFileStats() {
return fileStats;
}
boolean addFreeFile(int file, int expectedSize, int freeSize, long expirationTime) {
return addFreeFile(file, expectedSize, freeSize, expirationTime, true);
}
boolean addFreeFile(int file, int expectedSize, int freeSize, long expirationTime, boolean canScheduleCompaction) {
int fileSize = (int) fileProvider.getFileSize(file);
if (fileSize != expectedSize) {
log.tracef("Unable to add file %s as it its size %s does not match expected %s, index may be dirty", file, fileSize, expectedSize);
return false;
}
Stats stats = new Stats(fileSize, freeSize, expirationTime);
if (fileStats.putIfAbsent(file, stats) != null) {
log.tracef("Unable to add file %s as it is already present, index may be dirty", file);
return false;
}
log.tracef("Added new file %s to compactor manually with total size %s and free size %s", file, fileSize, freeSize);
stats.setCompleted();
if (canScheduleCompaction && stats.readyToBeScheduled(compactionThreshold, freeSize)) {
schedule(file, stats);
}
return true;
}
public void start() {
stopped = new CompletableFuture<>();
processor = UnicastProcessor.create().toSerialized();
Scheduler scheduler = Schedulers.from(blockingExecutor);
processor.observeOn(scheduler)
.delay(obj -> {
// These types are special and should allow processing always
if (obj == RESUME_PILL || obj instanceof CompletableFuture) {
return Flowable.empty();
}
return RxJavaInterop.voidCompletionStageToFlowable(paused);
})
.subscribe(this, error -> {
log.compactorEncounteredException(error, -1);
stopped.completeExceptionally(error);
}, () -> stopped.complete(null));
fileStats.forEach((file, stats) -> {
if (stats.readyToBeScheduled(compactionThreshold, stats.getFree())) {
schedule(file, stats);
}
});
}
public interface CompactionExpirationSubscriber {
void onEntryPosition(EntryPosition entryPosition) throws IOException;
void onEntryEntryRecord(EntryRecord entryRecord) throws IOException;
void onComplete();
void onError(Throwable t);
}
public void performExpirationCompaction(CompactionExpirationSubscriber subscriber) {
processor.onNext(subscriber);
}
// Present for testing only - note is still asynchronous if underlying executor is
CompletionStage<Void> forceCompactionForAllNonLogFiles() {
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
for (Map.Entry<Integer, Stats> stats : fileStats.entrySet()) {
int fileId = stats.getKey();
if (!fileProvider.isLogFile(fileId) && !stats.getValue().markedForDeletion && stats.getValue().setScheduled()) {
CompactionRequest compactionRequest = new CompactionRequest(fileId);
processor.onNext(compactionRequest);
aggregateCompletionStage.dependsOn(compactionRequest);
}
}
return aggregateCompletionStage.freeze();
}
// Present for testing only - so test can see what files are currently known to compactor
Set<Integer> getFiles() {
return fileStats.keySet();
}
private Stats getStats(int file, int currentSize, long expirationTime) {
Stats stats = fileStats.get(file);
if (stats == null) {
int fileSize = currentSize < 0 ? (int) fileProvider.getFileSize(file) : currentSize;
stats = new Stats(fileSize, 0, expirationTime);
Stats other = fileStats.putIfAbsent(file, stats);
if (other != null) {
if (fileSize > other.getTotal()) {
other.setTotal(fileSize);
}
return other;
}
}
if (stats.getTotal() < 0) {
int fileSize = currentSize < 0 ? (int) fileProvider.getFileSize(file) : currentSize;
if (fileSize >= 0) {
stats.setTotal(fileSize);
}
stats.setNextExpirationTime(ExpiryHelper.mostRecentExpirationTime(stats.nextExpirationTime, expirationTime));
}
return stats;
}
private void recordFreeSpace(Stats stats, int file, int size) {
if (stats.addFree(size, compactionThreshold)) {
schedule(file, stats);
}
}
private void schedule(int file, Stats stats) {
assert stats.isScheduled();
if (!terminateSignal) {
log.debugf("Scheduling file %d for compaction: %d/%d free", file, stats.free.get(), stats.total);
CompactionRequest request = new CompactionRequest(file);
processor.onNext(request);
request.whenComplete((__, t) -> {
if (t != null) {
log.compactorEncounteredException(t, file);
// Poor attempt to allow compactor to continue operating - file will never be compacted again
fileStats.remove(file);
}
});
}
}
/**
* Immediately sends a request to pause the compactor. The returned stage will complete when the
* compactor is actually paused. To resume the compactor the {@link #resumeAfterClear()} method
* must be invoked or else the compactor will not process new requests.
*
* @return a stage that when complete the compactor is paused
*/
public CompletionStage<Void> clearAndPause() {
if (clearSignal.getAndSet(true)) {
throw new IllegalStateException("Clear signal was already set for compactor, clear cannot be invoked " +
"concurrently with another!");
}
ClearFuture clearFuture = new ClearFuture();
// Make sure to do this before submitting to processor this is done in the blocking thread
clearFuture.whenComplete((ignore, t) -> fileStats.clear());
processor.onNext(clearFuture);
return clearFuture;
}
private static class ClearFuture extends CompletableFuture<Void> {
@Override
public String toString() {
return "ClearFuture{}";
}
}
public void resumeAfterClear() {
// This completion will push all the other tasks that have been delayed in this method call
if (!clearSignal.getAndSet(false)) {
throw new IllegalStateException("Resume of compactor invoked without first clear and pausing!");
}
}
private void resumeAfterPause() {
processor.onNext(RESUME_PILL);
}
public void stopOperations() {
// This will short circuit any compactor call, so it can only process the entry it may be on currently
terminateSignal = true;
processor.onComplete();
// The stopped CompletableFuture is completed in onComplete or onError callback for the processor, so this will
// return after all compaction calls are completed
stopped.join();
if (logFile != null) {
Util.close(logFile);
// Complete the file, this file should not be compacted
completeFile(logFile.fileId, currentOffset, nextExpirationTime, false);
logFile = null;
}
}
private static class CompactionRequest extends CompletableFuture<Void> {
private final int fileId;
private CompactionRequest(int fileId) {
this.fileId = fileId;
}
@Override
public String toString() {
return "CompactionRequest{" +
"fileId=" + fileId +
'}';
}
}
void handleIgnoredElement(Object o) {
if (o instanceof CompactionExpirationSubscriber) {
// We assume the subscriber handles blocking properly
((CompactionExpirationSubscriber) o).onComplete();
} else if (o instanceof CompletableFuture) {
nonBlockingManager.complete((CompletableFuture<?>) o, null);
}
}
@Override
public void accept(Object o) throws Throwable {
if (terminateSignal) {
log.tracef("Compactor already terminated, ignoring request " + o);
// Just ignore if terminated
handleIgnoredElement(o);
return;
}
if (o == RESUME_PILL) {
log.tracef("Resuming compactor");
// This completion will push all the other tasks that have been delayed in this method call
// Note this must be completed in the context of the compactor thread
paused.complete(null);
return;
}
// Note that this accept is only invoked from a single thread at a time so we don't have to worry about
// any other threads decrementing clear signal. However, another thread can increment, that is okay for us
if (clearSignal.get()) {
// We ignore any entries since it was last cleared
if (o instanceof ClearFuture) {
log.tracef("Compactor ignoring all future compactions until resumed");
if (logFile != null) {
logFile.close();
logFile = null;
nextExpirationTime = -1;
}
nonBlockingManager.complete((CompletableFuture<?>) o, null);
} else {
log.tracef("Ignoring compaction request for %s as compactor is being cleared", o);
handleIgnoredElement(o);
}
return;
}
if (o instanceof CompactionExpirationSubscriber) {
CompactionExpirationSubscriber subscriber = (CompactionExpirationSubscriber) o;
try {
// We have to copy the file ids into its own collection because it can pickup the compactor files sometimes
// causing extra unneeded churn in some cases
Set<Integer> currentFiles = new HashSet<>();
try (CloseableIterator<Integer> iter = fileProvider.getFileIterator()) {
while (iter.hasNext()) {
currentFiles.add(iter.next());
}
}
for (int fileId : currentFiles) {
boolean isLogFile = fileProvider.isLogFile(fileId);
if (isLogFile) {
// Force log file to be in the stats
free(fileId, 0);
}
Stats stats = fileStats.get(fileId);
long currentTimeMilliseconds = timeService.wallClockTime();
if (stats != null) {
// Don't check for expired entries in any files that are marked for deletion or don't have entries
// that can expire yet
// Note that log files do not set the expiration time, so it is always -1 in that case, but we still
// want to check just in case some files are expired there.
// Note that we when compacting an expired entry from the log file we first write to the compacted
// file and then notify the subscriber. Assuming the subscriber then invokes remove expired it
// will actually cause two writes for the same expired entry. This is required though in case if
// the entry is not removed from the listener as we don't want to keep returning the same entry
// to the listener that it has expired.
if (stats.markedForDeletion() || (!isLogFile && stats.nextExpirationTime == -1) || stats.nextExpirationTime > currentTimeMilliseconds) {
log.tracef("Skipping expiration for file %d since it is marked for deletion: %s or its expiration time %s is not yet",
(Object) fileId, stats.markedForDeletion(), stats.nextExpirationTime);
continue;
}
// Make sure we don't start another compaction for this file while performing expiration
if (stats.setScheduled()) {
compactSingleFile(fileId, isLogFile, subscriber, currentTimeMilliseconds);
if (isLogFile) {
// Unschedule the compaction for log file as we can't remove it
stats.scheduled.set(false);
}
}
} else {
log.tracef("Skipping expiration for file %d as it is not included in fileStats", fileId);
}
}
subscriber.onComplete();
} catch (Throwable t) {
subscriber.onError(t);
}
return;
}
CompactionRequest request = (CompactionRequest) o;
try {
// Any other type submitted has to be a positive integer
Stats stats = fileStats.get(request.fileId);
// Double check that the file wasn't removed. If stats are null that means the file was previously removed
// and also make sure the file wasn't marked for deletion, but hasn't yet
if (stats != null && !stats.markedForDeletion()) {
compactSingleFile(request.fileId, false, null, timeService.wallClockTime());
}
request.complete(null);
} catch (Throwable t) {
log.trace("Completing compaction for file: " + request.fileId + " due to exception!", t);
request.completeExceptionally(t);
}
}
/**
* Compacts a single file into the current log file. This method has two modes of operation based on if the file
* is a log file or not. If it is a log file non expired entries are ignored and only expired entries are "updated"
* to be deleted in the new log file and expiration listener is notified. If it is not a log file all entries are
* moved to the new log file and the current file is deleted afterwards. If an expired entry is found during compaction
* of a non log file the expiration listener is notified and the entry is not moved, however if no expiration listener
* is provided the expired entry is moved to the new file as is still expired.
* @param scheduledFile the file identifier to compact
* @param isLogFile whether the provided file as a log file, which means we only notify and compact expired
* entries (ignore others)
* @param subscriber the subscriber that is notified of various entries being expired
* @throws IOException thrown if there was an issue with reading or writing to a file
* @throws ClassNotFoundException thrown if there is an issue deserializing the key for an entry
*/
private void compactSingleFile(int scheduledFile, boolean isLogFile, CompactionExpirationSubscriber subscriber,
long currentTimeMilliseconds) throws IOException, ClassNotFoundException {
assert scheduledFile >= 0;
if (subscriber == null) {
log.tracef("Compacting file %d isLogFile %b", scheduledFile, Boolean.valueOf(isLogFile));
} else {
log.tracef("Removing expired entries from file %d isLogFile %b", scheduledFile, Boolean.valueOf(isLogFile));
}
int scheduledOffset = 0;
// Store expired entries to remove after we update the index
List<EntryPosition> expiredTemp = subscriber != null ? new ArrayList<>() : null;
List<EntryRecord> expiredIndex = subscriber != null ? new ArrayList<>() : null;
FileProvider.Handle handle = fileProvider.getFile(scheduledFile);
if (handle == null) {
throw new IllegalStateException("Compactor should not get deleted file for compaction!");
}
try {
AggregateCompletionStage<Void> aggregateCompletionStage = CompletionStages.aggregateCompletionStage();
while (!clearSignal.get() && !terminateSignal) {
EntryHeader header = EntryRecord.readEntryHeader(handle, scheduledOffset);
if (header == null) {
break;
}
byte[] serializedKey = EntryRecord.readKey(handle, header, scheduledOffset);
if (serializedKey == null) {
throw new IllegalStateException("End of file reached when reading key on "
+ handle.getFileId() + ":" + scheduledOffset);
}
Object key = marshaller.objectFromByteBuffer(serializedKey);
int segment = keyPartitioner.getSegment(key);
int valueLength = header.valueLength();
int indexedOffset = valueLength > 0 ? scheduledOffset : ~scheduledOffset;
// Whether to drop the entire index (this cannot be true if truncate is false)
// We drop all entries by default unless it is a log file as we can't drop any of those since we may
// try to compact a log file multiple times, note modifications to drop variable below should only be to set
// it to false
int prevFile = -1;
int prevOffset = -1;
boolean drop = !isLogFile;
// Whether to truncate the value
boolean truncate = false;
EntryPosition entry = temporaryTable.get(segment, key);
if (entry != null) {
synchronized (entry) {
if (log.isTraceEnabled()) {
log.tracef("Key for %d:%d was found in temporary table on %d:%d",
scheduledFile, scheduledOffset, entry.file, entry.offset);
}
if (entry.file == scheduledFile && entry.offset == indexedOffset) {
long entryExpiryTime = header.expiryTime();
// It's quite unlikely that we would compact a record that is not indexed yet,
// but let's handle that
if (entryExpiryTime >= 0 && entryExpiryTime <= currentTimeMilliseconds) {
// We can only truncate expired entries if this was compacted with purge expire and this entry
// isn't a removed marker
if (expiredTemp != null && entry.offset >= 0) {
truncate = true;
expiredTemp.add(entry);
}
} else if (isLogFile) {
// Non expired entry in a log file, just skip it
scheduledOffset += header.totalLength();
continue;
}
} else if (entry.file == scheduledFile && entry.offset == ~scheduledOffset) {
// The temporary table doesn't know how many entries we have for a key, so we shouldn't truncate
// or drop
log.tracef("Key for %d:%d ignored as it was expired but was in temporary table");
scheduledOffset += header.totalLength();
continue;
} else {
truncate = true;
}
}
// When we have found the entry in temporary table, it's possible that the delete operation
// (that was recorded in temporary table) will arrive to index after DROPPED - in that case
// we could remove the entry and delete would not find it
drop = false;
} else {
log.tracef("Loading from index for key %s", key);
EntryInfo info = index.getInfo(key, segment, serializedKey);
Objects.requireNonNull(info, "No index info found for key: " + key);
if (info.numRecords <= 0) {
throw new IllegalArgumentException("Number of records " + info.numRecords + " for index of key " + key + " should be more than zero!");
}
if (info.file == scheduledFile && info.offset == scheduledOffset) {
assert header.valueLength() > 0;
long entryExpiryTime = header.expiryTime();
// live record with data
if (entryExpiryTime >= 0 && entryExpiryTime <= currentTimeMilliseconds) {
// We can only truncate expired entries if this was compacted with purge expire
if (expiredIndex != null) {
EntryRecord record = index.getRecordEvenIfExpired(key, segment, serializedKey);
truncate = true;
expiredIndex.add(record);
// If there are more entries we cannot drop the index as we need a tombstone
if (info.numRecords > 1) {
drop = false;
}
} else {
// We can't drop an expired entry without notifying, so we write it to the new compacted file
drop = false;
}
} else if (isLogFile) {
// Non expired entry in a log file, just skip it
scheduledOffset += header.totalLength();
continue;
} else {
drop = false;
}
if (log.isTraceEnabled()) {
log.tracef("Is key %s at %d:%d expired? %s, numRecords? %d", key, scheduledFile, scheduledOffset, truncate, info.numRecords);
}
} else if (isLogFile) {
// If entry doesn't match the index we can't touch it when it is a log file
scheduledOffset += header.totalLength();
continue;
} else if (info.file == scheduledFile && info.offset == ~scheduledOffset && info.numRecords > 1) {
// The entry was expired, but we have other records so we can't drop this one or else the index will rebuild incorrectly
drop = false;
} else if (log.isTraceEnabled()) {
log.tracef("Key %s for %d:%d was found in index on %d:%d, %d record => drop", key,
scheduledFile, scheduledOffset, info.file, info.offset, info.numRecords);
}
prevFile = info.file;
prevOffset = info.offset;
}
if (drop) {
if (log.isTraceEnabled()) {
log.tracef("Drop index for key %s, file %d:%d (%s)", key, scheduledFile, scheduledOffset,
header.valueLength() > 0 ? "record" : "tombstone");
}
index.handleRequest(IndexRequest.dropped(segment, key, ByteBufferImpl.create(serializedKey), prevFile, prevOffset, scheduledFile, scheduledOffset));
} else {
if (logFile == null || currentOffset + header.totalLength() > maxFileSize) {
if (logFile != null) {
logFile.close();
completeFile(logFile.fileId, currentOffset, nextExpirationTime);
nextExpirationTime = -1;
}
currentOffset = 0;
logFile = fileProvider.getFileForLog();
log.debugf("Compacting to %d", (Object) logFile.fileId);
}
byte[] serializedValue = null;
EntryMetadata metadata = null;
byte[] serializedInternalMetadata = null;
int entryOffset;
int writtenLength;
if (header.valueLength() > 0 && !truncate) {
if (header.metadataLength() > 0) {
metadata = EntryRecord.readMetadata(handle, header, scheduledOffset);
}
serializedValue = EntryRecord.readValue(handle, header, scheduledOffset);
if (header.internalMetadataLength() > 0) {
serializedInternalMetadata = EntryRecord.readInternalMetadata(handle, header, scheduledOffset);
}
entryOffset = currentOffset;
writtenLength = header.totalLength();
// Update the next expiration time only for entries that are not removed
nextExpirationTime = ExpiryHelper.mostRecentExpirationTime(nextExpirationTime, header.expiryTime());
} else {
entryOffset = ~currentOffset;
writtenLength = header.getHeaderLength() + header.keyLength();
}
EntryRecord.writeEntry(logFile.fileChannel, REUSED_BUFFER, serializedKey, metadata, serializedValue, serializedInternalMetadata, header.seqId(), header.expiryTime());
TemporaryTable.LockedEntry lockedEntry = temporaryTable.replaceOrLock(segment, key, logFile.fileId, entryOffset, scheduledFile, indexedOffset);
if (lockedEntry == null) {
if (log.isTraceEnabled()) {
log.trace("Found entry in temporary table");
}
} else {
boolean update = false;
try {
EntryInfo info = index.getInfo(key, segment, serializedKey);
if (info == null) {
throw new IllegalStateException(String.format(
"%s was not found in index but it was not in temporary table and there's entry on %d:%d", key, scheduledFile, indexedOffset));
} else {
update = info.file == scheduledFile && info.offset == indexedOffset;
}
if (log.isTraceEnabled()) {
log.tracef("In index the key is on %d:%d (%s)", info.file, info.offset, String.valueOf(update));
}
} finally {
if (update) {
temporaryTable.updateAndUnlock(lockedEntry, logFile.fileId, entryOffset);
} else {
temporaryTable.removeAndUnlock(lockedEntry, segment, key);
}
}
}
if (log.isTraceEnabled()) {
log.tracef("Update %d:%d -> %d:%d | %d,%d", scheduledFile, indexedOffset,
logFile.fileId, entryOffset, logFile.fileChannel.position(), logFile.fileChannel.size());
}
IndexRequest indexRequest;
ByteBuffer keyBuffer = ByteBufferImpl.create(serializedKey);
if (isLogFile) {
// When it is a log file we are still keeping the original entry, we are just updating it to say
// it was expired
indexRequest = IndexRequest.update(segment, key, keyBuffer, logFile.fileId, entryOffset, writtenLength);
} else {
// entryFile cannot be used as we have to report the file due to free space statistics
indexRequest = IndexRequest.moved(segment, key, keyBuffer, logFile.fileId, entryOffset, writtenLength,
scheduledFile, indexedOffset);
}
aggregateCompletionStage.dependsOn(index.handleRequest(indexRequest));
currentOffset += writtenLength;
}
scheduledOffset += header.totalLength();
}
if (!clearSignal.get()) {
// We delay the next operation until all prior moves are done. By moving it can trigger another
// compaction before the index has been fully updated. Thus we block any other compaction events
// until all entries have been moved for this file
CompletionStage<Void> aggregate = aggregateCompletionStage.freeze();
if (!CompletionStages.isCompletedSuccessfully(aggregate)) {
paused = new CompletableFuture<>();
// We resume after completed, Note that we must complete the {@code paused} variable inside the compactor
// execution pipeline otherwise we can invoke compactor operations in the wrong thread
aggregate.whenComplete((ignore, t) -> {
resumeAfterPause();
if (t != null) {
log.error("There was a problem moving indexes for compactor with file " + logFile.fileId, t);
}
});
}
}
} finally {
handle.close();
}
if (subscriber != null) {
for (EntryPosition entryPosition : expiredTemp) {
subscriber.onEntryPosition(entryPosition);
}
for (EntryRecord entryRecord : expiredIndex) {
subscriber.onEntryEntryRecord(entryRecord);
}
}
if (isLogFile) {
log.tracef("Finished expiring entries in log file %d, leaving file as is", scheduledFile);
} else if (!terminateSignal && !clearSignal.get()) {
// The deletion must be executed only after the index is fully updated.
log.tracef("Finished compacting %d, scheduling delete", scheduledFile);
// Mark the file for deletion so expiration won't check it
Stats stats = fileStats.get(scheduledFile);
if (stats != null) {
stats.markForDeletion();
}
index.deleteFileAsync(scheduledFile);
} else {
log.tracef("Not doing anything to compacted file %d as either the terminate clear signal were set", scheduledFile);
}
}
static class Stats {
private final AtomicInteger free;
private volatile int total;
private volatile long nextExpirationTime;
/* File is not 'completed' when we have not loaded that yet completely.
Files created by log appender/compactor are completed as soon as it closes them.
File cannot be scheduled for compaction until it's completed.
*/
private volatile boolean completed = false;
private final AtomicBoolean scheduled = new AtomicBoolean();
private boolean markedForDeletion = false;
private Stats(int total, int free, long nextExpirationTime) {
this.free = new AtomicInteger(free);
this.total = total;
this.nextExpirationTime = nextExpirationTime;
}
public int getTotal() {
return total;
}
public void setTotal(int total) {
this.total = total;
}
public boolean addFree(int size, double compactionThreshold) {
int free = this.free.addAndGet(size);
return readyToBeScheduled(compactionThreshold, free);
}
public int getFree() {
return free.get();
}
public long getNextExpirationTime() {
return nextExpirationTime;
}
public void setNextExpirationTime(long nextExpirationTime) {
this.nextExpirationTime = nextExpirationTime;
}
public boolean readyToBeScheduled(double compactionThreshold, int free) {
int total = this.total;
// Note setScheduled must be last as it changes state
return completed && total >= 0 && free >= total * compactionThreshold && setScheduled();
}
public boolean isScheduled() {
return scheduled.get();
}
public boolean setScheduled() {
return !scheduled.getAndSet(true);
}
public boolean isCompleted() {
return completed;
}
public void setCompleted() {
this.completed = true;
}
public void markForDeletion() {
this.markedForDeletion = true;
}
public boolean markedForDeletion() {
return this.markedForDeletion;
}
@Override
public String toString() {
return "Stats{" +
"free=" + free +
", total=" + total +
", nextExpirationTime=" + nextExpirationTime +
", completed=" + completed +
", scheduled=" + scheduled +
", markedForDeletion=" + markedForDeletion +
'}';
}
}
}
| 37,116
| 45.338327
| 181
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/EntryPosition.java
|
package org.infinispan.persistence.sifs;
/**
* File-offset pair
*
* @author Radim Vansa <rvansa@redhat.com>
*/
class EntryPosition {
public final int file;
public final int offset;
public EntryPosition(int file, int offset) {
this.file = file;
this.offset = offset;
}
public boolean equals(long file, int offset) {
return this.file == file && this.offset == offset;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || !(o instanceof EntryPosition)) return false;
EntryPosition entryPosition = (EntryPosition) o;
if (file != entryPosition.file) return false;
if (offset != entryPosition.offset) return false;
return true;
}
@Override
public int hashCode() {
int result = file;
result = 31 * result + offset;
return result;
}
@Override
public String toString() {
return String.format("[%d:%d]", file, offset);
}
}
| 993
| 20.608696
| 67
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/package-info.java
|
/**
* Soft Index {@link org.infinispan.persistence.spi.AdvancedLoadWriteStore}.
*
* @api.public
*/
package org.infinispan.persistence.sifs;
| 144
| 19.714286
| 76
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/Log.java
|
package org.infinispan.persistence.sifs;
import java.io.IOException;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.persistence.spi.PersistenceException;
import org.jboss.logging.BasicLogger;
import org.jboss.logging.Logger;
import org.jboss.logging.annotations.Cause;
import org.jboss.logging.annotations.LogMessage;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
/**
* This module reserves range 29001 - 29500
*/
@MessageLogger(projectCode = "ISPN")
public interface Log extends BasicLogger {
@Message(value = "Max size of index node (%d) is limited to 32767 bytes.", id = 29001)
CacheConfigurationException maxNodeSizeLimitedToShort(int maxNodeSize);
@Message(value = "Min size of index node (%d) must be less or equal to max size (%d).", id = 29002)
CacheConfigurationException minNodeSizeMustBeLessOrEqualToMax(int minNodeSize, int maxNodeSize);
@Message(value = "Calculation of size has been interrupted.", id = 29003)
PersistenceException sizeCalculationInterrupted(@Cause InterruptedException e);
// @LogMessage(level = Logger.Level.ERROR)
// @Message(value = "Failed processing task for key %s", id = 29004)
// void failedProcessingTask(Object key, @Cause Exception e);
// @LogMessage(level = Logger.Level.ERROR)
// @Message(value = "Iteration was interrupted.", id = 29005)
// void iterationInterrupted(@Cause InterruptedException e);
@LogMessage(level = Logger.Level.WARN)
@Message(value = "Cannot truncate index", id = 29006)
void cannotTruncateIndex(@Cause IOException e);
@LogMessage(level = Logger.Level.ERROR)
@Message(value = "Unexpected error in index updater thread.", id = 29007)
void errorInIndexUpdater(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(value = "Failed to close the index file.", id = 29008)
void failedToCloseIndex(@Cause IOException e);
@LogMessage(level = Logger.Level.ERROR)
@Message(value = "Unexpected error in data compactor.", id = 29009)
void compactorFailed(@Cause Throwable e);
@LogMessage(level = Logger.Level.ERROR)
@Message(value = "Cannot close/delete data file %d.", id = 290010)
void cannotCloseDeleteFile(int fileId, @Cause IOException e);
@LogMessage(level = Logger.Level.ERROR)
@Message(value = "Cannot close data file.", id = 29011)
void cannotCloseFile(@Cause IOException e);
@Message(value = "Compaction threshold (%f) should be between 0 (exclusively) and 1 (inclusively).", id = 29012)
CacheConfigurationException invalidCompactionThreshold(double value);
@Message(value = "Cannot open index on %s", id = 29013)
PersistenceException cannotOpenIndex(String location, @Cause IOException e);
@Message(value = "Interrupted while stopping the store", id = 29014)
PersistenceException interruptedWhileStopping(@Cause InterruptedException e);
@Message(value = "Interrupted while pausing the index for clear.", id = 29015)
PersistenceException interruptedWhileClearing(@Cause InterruptedException e);
@Message(value = "Cannot clear/reopen index.", id = 29016)
PersistenceException cannotClearIndex(@Cause IOException e);
@Message(value = "Cannot clear data directory.", id = 29017)
PersistenceException cannotClearData(@Cause IOException e);
@Message(value = "The serialized form of key %s is too long (%d); with maxNodeSize=%d bytes you can use only keys serialized to at most %d bytes.", id = 29018)
PersistenceException keyIsTooLong(Object key, int keyLength, int maxNodeSize, int maxKeyLength);
@Message(value = "Cannot load key %s from index.", id = 29019)
PersistenceException cannotLoadKeyFromIndex(Object key, @Cause Exception e);
@Message(value = "Index looks corrupt.", id = 29020)
PersistenceException indexLooksCorrupt(@Cause Exception e);
@LogMessage(level = Logger.Level.ERROR)
@Message(value = "File id %s encountered an exception while compacting, file may be orphaned", id = 29021)
void compactorEncounteredException(@Cause Throwable t, int fileId);
}
| 4,102
| 43.597826
| 162
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/LogAppender.java
|
package org.infinispan.persistence.sifs;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.util.Util;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.util.concurrent.NonBlockingManager;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.functions.Consumer;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
import io.reactivex.rxjava3.schedulers.Schedulers;
public class LogAppender implements Consumer<LogAppender.WriteOperation> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass(), Log.class);
private final NonBlockingManager nonBlockingManager;
private final Index index;
private final TemporaryTable temporaryTable;
private final Compactor compactor;
private final FileProvider fileProvider;
private final boolean syncWrites;
private final int maxFileSize;
// Used to keep track of how many log requests have been submitted. This way if the blocking thread has consumed
// the same number of log requests it can immediately flush.
private final AtomicInteger submittedCount = new AtomicInteger();
// This variable is null unless sync writes are enabled. When sync writes are enabled this list holds
// all the log requests that should be completed when the disk is ensured to be flushed
private final List<Consumer<LogAppender>> toSyncLogRequests;
// This buffer is used by the log appender thread to avoid allocating buffers per entry written that are smaller
// than the header size
private final java.nio.ByteBuffer REUSED_BUFFER = java.nio.ByteBuffer.allocate(EntryHeader.HEADER_SIZE_11_0);
// These variables are only ever read from the provided executor and rxjava guarantees visibility
// to them, so they don't need to be volatile or synchronized
private int currentOffset = 0;
private long seqId = 0;
private int receivedCount = 0;
private List<LogRequest> delayedLogRequests;
private FileProvider.Log logFile;
private long nextExpirationTime = -1;
// This is volatile as it can be read from different threads when submitting
private volatile FlowableProcessor<LogRequest> requestProcessor;
// This is only accessed by the requestProcessor thread
private FlowableProcessor<WriteOperation> writeProcessor;
// This is only accessed by the writeProcessor thread
private FlowableProcessor<Consumer<LogAppender>> completionProcessor;
public LogAppender(NonBlockingManager nonBlockingManager, Index index,
TemporaryTable temporaryTable, Compactor compactor,
FileProvider fileProvider, boolean syncWrites, int maxFileSize) {
this.nonBlockingManager = nonBlockingManager;
this.index = index;
this.temporaryTable = temporaryTable;
this.compactor = compactor;
this.fileProvider = fileProvider;
this.syncWrites = syncWrites;
this.maxFileSize = maxFileSize;
this.toSyncLogRequests = syncWrites ? new ArrayList<>() : null;
}
public synchronized void start(Executor executor) {
assert requestProcessor == null;
writeProcessor = UnicastProcessor.create();
writeProcessor.observeOn(Schedulers.from(executor))
.subscribe(this, e -> log.warn("Exception encountered while performing write log request ", e));
completionProcessor = UnicastProcessor.create();
completionProcessor.observeOn(nonBlockingManager.asScheduler())
.subscribe(this::complete, e -> log.warn("Exception encountered while performing write log request ", e));
// Need to be serialized in case if we receive requests from concurrent threads
requestProcessor = UnicastProcessor.<LogRequest>create().toSerialized();
requestProcessor.subscribe(this::callerAccept,
e -> log.warn("Exception encountered while handling log request for log appender", e), () -> {
writeProcessor.onComplete();
writeProcessor = null;
completionProcessor.onComplete();
completionProcessor = null;
if (logFile != null) {
Util.close(logFile);
// add the current appended file - note this method will fail if it is already present, which will
// happen if there are some free entries
compactor.addFreeFile(logFile.fileId, (int) fileProvider.getFileSize(logFile.fileId), 0, nextExpirationTime);
logFile = null;
}
});
}
public synchronized void stop() {
assert requestProcessor != null;
requestProcessor.onComplete();
requestProcessor = null;
}
void handleRequestCompletion(LogRequest request) {
int offset = request.getSerializedValue() == null ? ~request.getFileOffset() : request.getFileOffset();
temporaryTable.set(request.getSement(), request.getKey(), request.getFile(), offset);
IndexRequest indexRequest = IndexRequest.update(request.getSement(), request.getKey(), request.getSerializedKey(),
request.getFile(), offset, request.length());
request.setIndexRequest(indexRequest);
index.handleRequest(indexRequest);
completeRequest(request);
}
static class WriteOperation implements Consumer<LogAppender> {
private final LogRequest logRequest;
private final java.nio.ByteBuffer serializedKey;
private final java.nio.ByteBuffer serializedMetadata;
private final java.nio.ByteBuffer serializedValue;
private final java.nio.ByteBuffer serializedInternalMetadata;
private WriteOperation(LogRequest logRequest, java.nio.ByteBuffer serializedKey,
java.nio.ByteBuffer serializedMetadata, java.nio.ByteBuffer serializedValue,
java.nio.ByteBuffer serializedInternalMetadata) {
this.logRequest = logRequest;
this.serializedKey = serializedKey;
this.serializedMetadata = serializedMetadata;
this.serializedValue = serializedValue;
this.serializedInternalMetadata = serializedInternalMetadata;
}
static WriteOperation fromLogRequest(LogRequest logRequest) {
return new WriteOperation(logRequest, fromISPNByteBuffer(logRequest.getSerializedKey()),
fromISPNByteBuffer(logRequest.getSerializedMetadata()),
fromISPNByteBuffer(logRequest.getSerializedValue()),
fromISPNByteBuffer(logRequest.getSerializedInternalMetadata()));
}
static java.nio.ByteBuffer fromISPNByteBuffer(ByteBuffer byteBuffer) {
if (byteBuffer == null) {
return null;
}
return java.nio.ByteBuffer.wrap(byteBuffer.getBuf(), byteBuffer.getOffset(), byteBuffer.getLength());
}
// This method isn't required, here solely to avoid allocating an additional Consumer for the non sync case
@Override
public void accept(LogAppender appender) throws Throwable {
appender.handleRequestCompletion(logRequest);
}
}
/**
* Clears all the log entries returning a stage when the completion is done. Note that after the clear is complete
* this appender will also be paused. To resume it callers must ensure they invoke {@link #resume()} to restart
* the appender
* @return a stage that when complete the log will be cleared and this appender is paused
*/
public CompletionStage<Void> clearAndPause() {
LogRequest clearRequest = LogRequest.clearRequest();
requestProcessor.onNext(clearRequest);
return clearRequest;
}
public CompletionStage<Void> pause() {
LogRequest pauseRequest = LogRequest.pauseRequest();
requestProcessor.onNext(pauseRequest);
return pauseRequest;
}
public CompletionStage<Void> resume() {
LogRequest resumeRequest = LogRequest.resumeRequest();
requestProcessor.onNext(resumeRequest);
return resumeRequest;
}
public <K, V> CompletionStage<Void> storeRequest(int segment, MarshallableEntry<K, V> entry) {
LogRequest storeRequest = LogRequest.storeRequest(segment, entry);
requestProcessor.onNext(storeRequest);
return storeRequest;
}
public CompletionStage<Boolean> deleteRequest(int segment, Object key, ByteBuffer serializedKey) {
LogRequest deleteRequest = LogRequest.deleteRequest(segment, key, serializedKey);
requestProcessor.onNext(deleteRequest);
return deleteRequest.thenCompose(v -> cast(deleteRequest.getIndexRequest()));
}
private static <I> CompletionStage<I> cast(CompletionStage stage) {
return (CompletionStage<I>) stage;
}
/**
* This method is invoked for every request sent via {@link #storeRequest(int, MarshallableEntry)},
* {@link #deleteRequest(int, Object, ByteBuffer)} and {@link #clearAndPause()}. Note this method is only invoked
* by one thread at any time and has visibility guaranatees as provided by rxjava.
* @param request the log request
*/
private void callerAccept(LogRequest request) {
if (request.isPause()) {
delayedLogRequests = new ArrayList<>();
// This request is created in the same thread - so there can be no dependents
request.complete(null);
return;
} else if (request.isResume()) {
delayedLogRequests.forEach(this::sendToWriteProcessor);
delayedLogRequests = null;
// This request is created in the same thread - so there can be no dependents
request.complete(null);
return;
} else if (request.isClear()) {
assert delayedLogRequests == null;
delayedLogRequests = new ArrayList<>();
} else if (delayedLogRequests != null) {
// We were paused - so enqueue the request for later
delayedLogRequests.add(request);
return;
}
sendToWriteProcessor(request);
}
private void sendToWriteProcessor(LogRequest request) {
// Write requests must be synced - so keep track of count to compare later
if (syncWrites && request.getKey() != null) {
submittedCount.incrementAndGet();
}
writeProcessor.onNext(WriteOperation.fromLogRequest(request));
}
@Override
public void accept(WriteOperation writeOperation) {
LogRequest actualRequest = writeOperation.logRequest;
try {
if (logFile == null) {
logFile = fileProvider.getFileForLog();
log.tracef("Appending records to %s", logFile.fileId);
}
if (actualRequest.isClear()) {
logFile.close();
completePendingLogRequests();
nextExpirationTime = -1;
currentOffset = 0;
logFile = null;
completeRequest(actualRequest);
return;
}
int actualLength = actualRequest.length();
if (currentOffset != 0 && currentOffset + actualLength > maxFileSize) {
// switch to next file
logFile.close();
completePendingLogRequests();
final int fileId = logFile.fileId;
final int offset = currentOffset;
final long exp = nextExpirationTime;
// Have to schedule the compaction after all other log appender operations are complete and register their
// index updates. Then we can do a sync index call to ensure the compactor is ran after all updates are done
completionProcessor.onNext(la -> la.index.ensureRunOnLast(() -> compactor.completeFile(fileId, offset, exp)));
logFile = fileProvider.getFileForLog();
nextExpirationTime = -1;
currentOffset = 0;
log.tracef("Appending records to %s", logFile.fileId);
}
long seqId = nextSeqId();
log.tracef("Appending record to %s:%s", logFile.fileId, currentOffset);
nextExpirationTime = ExpiryHelper.mostRecentExpirationTime(nextExpirationTime, actualRequest.getExpiration());
EntryRecord.writeEntry(logFile.fileChannel, REUSED_BUFFER, writeOperation.serializedKey,
writeOperation.serializedMetadata, writeOperation.serializedInternalMetadata,
writeOperation.serializedValue, seqId, actualRequest.getExpiration(), actualRequest.getCreated(),
actualRequest.getLastUsed());
actualRequest.setFile(logFile.fileId);
actualRequest.setFileOffset(currentOffset);
if (!syncWrites) {
completionProcessor.onNext(writeOperation);
} else {
// This cannot be null when sync writes is true
toSyncLogRequests.add(la -> la.handleRequestCompletion(actualRequest));
if (submittedCount.get() == ++receivedCount || toSyncLogRequests.size() == 1000) {
logFile.fileChannel.force(false);
completePendingLogRequests();
}
}
currentOffset += actualLength;
} catch (Exception e) {
log.debugf("Exception encountered while processing log request %s", actualRequest);
actualRequest.completeExceptionally(e);
}
}
public void complete(Consumer<LogAppender> consumer) throws Throwable {
consumer.accept(this);
}
/**
* Must only be invoked by {@link #accept(WriteOperation)} method.
*/
private void completePendingLogRequests() {
if (toSyncLogRequests != null) {
for (Iterator<Consumer<LogAppender>> iter = toSyncLogRequests.iterator(); iter.hasNext(); ) {
Consumer<LogAppender> consumer = iter.next();
iter.remove();
completionProcessor.onNext(consumer);
}
}
}
public void setSeqId(long seqId) {
this.seqId = seqId;
}
private long nextSeqId() {
return seqId++;
}
private void completeRequest(CompletableFuture<Void> future) {
nonBlockingManager.complete(future, null);
}
}
| 14,333
| 42.436364
| 127
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/IndexRequest.java
|
package org.infinispan.persistence.sifs;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.util.Util;
/**
* Request for some change to be persisted in the Index or operation executed by index updater thread.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
class IndexRequest extends CompletableFuture<Object> {
public enum Type {
UPDATE,
MOVED,
DROPPED,
FOUND_OLD,
CLEAR,
SYNC_REQUEST,
}
private final Type type;
private final int segment;
private final Object key;
// the file and offset are duplicate to those in TemporaryTable because we have to match the CAS requests
private final int file;
private final int offset;
private final int prevFile;
private final int prevOffset;
private final ByteBuffer serializedKey;
private final int size;
private IndexRequest(Type type, int segment, Object key, ByteBuffer serializedKey, int file, int offset, int size, int prevFile, int prevOffset) {
this.type = type;
this.segment = segment;
this.key = key;
this.file = file;
this.offset = offset;
this.prevFile = prevFile;
this.prevOffset = prevOffset;
this.serializedKey = serializedKey;
this.size = size;
}
public static IndexRequest update(int segment, Object key, ByteBuffer serializedKey, int file, int offset, int size) {
return new IndexRequest(Type.UPDATE, segment, Objects.requireNonNull(key), serializedKey, file, offset, size, -1, -1);
}
public static IndexRequest moved(int segment, Object key, ByteBuffer serializedKey, int file, int offset, int size, int prevFile, int prevOffset) {
return new IndexRequest(Type.MOVED, segment, Objects.requireNonNull(key), serializedKey, file, offset, size, prevFile, prevOffset);
}
public static IndexRequest dropped(int segment, Object key, ByteBuffer serializedKey, int file, int offset, int prevFile, int prevOffset) {
return new IndexRequest(Type.DROPPED, segment, Objects.requireNonNull(key), serializedKey, file, offset, -1, prevFile, prevOffset);
}
public static IndexRequest foundOld(int segment, Object key, ByteBuffer serializedKey, int file, int offset, int size) {
return new IndexRequest(Type.FOUND_OLD, segment, Objects.requireNonNull(key), serializedKey, file, offset, size, -1, -1);
}
public static IndexRequest clearRequest() {
return new IndexRequest(Type.CLEAR, -1, null, null, -1, -1, -1, -1, -1);
}
/**
* Allows for an index request that will be ran in the index thread. This can be useful to run something after all
* pending index updates have been applied.
* @param runnable what will be ran in the index thread after all pending index updates are applied first
* @return the request
*/
public static IndexRequest syncRequest(Runnable runnable) {
return new IndexRequest(Type.SYNC_REQUEST, -1, runnable, null, -1, -1, -1, -1, -1);
}
public Type getType() {
return type;
}
public int getSegment() {
return segment;
}
public Object getKey() {
return key;
}
public long getPrevFile() {
return prevFile;
}
public int getPrevOffset() {
return prevOffset;
}
public ByteBuffer getSerializedKey() {
return serializedKey;
}
public int getFile() {
return file;
}
public int getOffset() {
return offset;
}
public int getSize() {
return size;
}
@Override
public String toString() {
return "IndexRequest{" +
"key=" + Util.toStr(key) +
", serializedKey=" + serializedKey +
", cacheSegment=" + segment +
", file=" + file +
", offset=" + offset +
", prevFile=" + prevFile +
", prevOffset=" + prevOffset +
", size=" + size +
", type=" + type +
'}';
}
}
| 3,999
| 30.25
| 150
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/EntryMetadata.java
|
package org.infinispan.persistence.sifs;
import org.infinispan.commons.io.ByteBuffer;
/**
* Object to hold metadata bytes and timestamps.
*
* @author Ryan Emerson
* @since 10.0
*/
public class EntryMetadata {
static final int TIMESTAMP_BYTES = 8 + 8;
private final byte[] metadataBytes;
private final long created;
private final long lastUsed;
public EntryMetadata(byte[] metadataBytes, long created, long lastUsed) {
this.metadataBytes = metadataBytes;
this.created = created;
this.lastUsed = lastUsed;
}
public byte[] getBytes() {
return metadataBytes;
}
public long getCreated() {
return created;
}
public long getLastUsed() {
return lastUsed;
}
public int length() {
return metadataBytes.length + TIMESTAMP_BYTES;
}
static short size(ByteBuffer buffer) {
return (short) (buffer == null ? 0 : buffer.getLength() + TIMESTAMP_BYTES);
}
static short size(java.nio.ByteBuffer buffer) {
return (short) (buffer == null ? 0 : buffer.remaining() + TIMESTAMP_BYTES);
}
}
| 1,090
| 21.265306
| 81
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/TemporaryTable.java
|
package org.infinispan.persistence.sifs;
import java.util.AbstractMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.IntConsumer;
import org.infinispan.commons.util.IntSet;
import org.infinispan.util.logging.LogFactory;
import io.reactivex.rxjava3.core.Flowable;
/**
* Table holding the entry positions in log before these are persisted to the index.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
public class TemporaryTable {
private static final Log log = LogFactory.getLog(TemporaryTable.class, Log.class);
private final AtomicReferenceArray<ConcurrentMap<Object, Entry>> table;
public TemporaryTable(int numSegments) {
table = new AtomicReferenceArray<>(numSegments);
}
public int getSegmentMax() {
return table.length();
}
public void addSegments(IntSet segments) {
segments.forEach((IntConsumer) segment -> table.compareAndSet(segment, null, new ConcurrentHashMap<>()));
}
public void removeSegments(IntSet segments) {
segments.forEach((IntConsumer) segment -> table.set(segment, null));
}
public boolean set(int segment, Object key, int file, int offset) {
ConcurrentMap<Object, Entry> map = table.get(segment);
if (map == null) {
return false;
}
for (; ; ) {
Entry entry = map.putIfAbsent(key, new Entry(file, offset, false));
if (entry != null) {
synchronized (entry) {
if (entry.isRemoved()) {
continue;
} else if (entry.isLocked()) {
try {
if (log.isTraceEnabled()) {
log.tracef("Waiting for lock on %s", key);
}
entry.wait();
continue;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IllegalStateException("Unexpected interruption!", e);
}
}
entry.update(file, offset);
break;
}
} else {
break;
}
}
return true;
}
public LockedEntry replaceOrLock(int segment, Object key, int file, int offset, int prevFile, int prevOffset) {
ConcurrentMap<Object, Entry> map = table.get(segment);
if (map == null) {
return null;
}
for (;;) {
Entry lockedEntry = new Entry(-1, -1, true);
Entry entry = map.putIfAbsent(key, lockedEntry);
if (entry != null) {
synchronized (entry) {
if (entry.isRemoved()) {
continue;
}
if (entry.isLocked()) {
throw new IllegalStateException("Unexpected double locking");
}
if (entry.getFile() == prevFile && entry.getOffset() == prevOffset) {
entry.update(file, offset);
}
return null;
}
} else {
return lockedEntry;
}
}
}
public void updateAndUnlock(LockedEntry lockedEntry, int file, int offset) {
Entry entry = (Entry) lockedEntry;
synchronized (entry) {
entry.file = file;
entry.offset = offset;
entry.locked = false;
entry.notifyAll();
}
}
public void removeAndUnlock(LockedEntry lockedEntry, int segment, Object key) {
Entry entry = (Entry) lockedEntry;
synchronized (entry) {
ConcurrentMap<Object, Entry> map = table.get(segment);
if (map != null) {
map.remove(key);
}
entry.setRemoved(true);
entry.notifyAll();
}
}
public EntryPosition get(int segment, Object key) {
ConcurrentMap<Object, Entry> map = table.get(segment);
if (map == null) {
return null;
}
Entry entry = map.get(key);
if (entry == null) {
return null;
}
synchronized (entry) {
// when the entry is locked, it means that it was not in the table before
// and it's protected against writes, but its value is not up-to-date
if (entry.isLocked()) {
return null;
}
return new EntryPosition(entry.getFile(), entry.getOffset());
}
}
public void clear() {
for (int i = 0; i < table.length(); ++i) {
ConcurrentMap<Object, Entry> map = table.get(i);
if (map != null) {
map.clear();
}
}
}
public void removeConditionally(int segment, Object key, int file, int offset) {
ConcurrentMap<Object, Entry> map = table.get(segment);
if (map == null) {
return;
}
Entry tempEntry = map.get(key);
if (tempEntry != null) {
synchronized (tempEntry) {
if (tempEntry.isLocked()) {
return;
}
if (tempEntry.getFile() == file && tempEntry.getOffset() == offset) {
map.remove(key, tempEntry);
tempEntry.setRemoved(true);
}
}
}
}
private static class Entry extends LockedEntry {
private int file;
private int offset;
private boolean locked;
private boolean removed = false;
Entry(int file, int offset, boolean locked) {
this.file = file;
this.offset = offset;
this.locked = locked;
}
public int getFile() {
return file;
}
public int getOffset() {
return offset;
}
public void update(int currentFile, int currentOffset) {
this.file = currentFile;
this.offset = currentOffset;
}
public boolean isRemoved() {
return removed;
}
public void setRemoved(boolean removed) {
this.removed = removed;
}
public boolean isLocked() {
return locked;
}
}
public abstract static class LockedEntry {
private LockedEntry() {}
}
<K, V> Flowable<Map.Entry<Object, EntryPosition>> publish(IntSet segments) {
return Flowable.fromIterable(segments)
.flatMap(segment -> {
ConcurrentMap<Object, Entry> map = table.get(segment);
if (map == null) {
return Flowable.empty();
}
return Flowable.fromIterable(map.entrySet())
.filter(entry -> !entry.getValue().isLocked())
.map(entry -> new AbstractMap.SimpleImmutableEntry<>(entry.getKey(),
new EntryPosition(entry.getValue().getFile(), entry.getValue().getOffset())));
});
}
}
| 6,832
| 29.368889
| 114
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/EntryRecord.java
|
package org.infinispan.persistence.sifs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import org.infinispan.commons.util.Util;
/**
* Helper for reading/writing entries into file.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
public class EntryRecord {
private final EntryHeader header;
private final byte[] key;
private byte[] value;
private EntryMetadata meta;
private byte[] internalMetadata;
EntryRecord(EntryHeader header, byte[] key) {
this.header = header;
this.key = key;
}
public EntryHeader getHeader() {
return header;
}
public byte[] getKey() {
return key;
}
public byte[] getMetadata() {
return meta == null ? null : meta.getBytes();
}
public byte[] getInternalMetadata() {
return internalMetadata;
}
public byte[] getValue() {
return value;
}
public long getCreated() {
return meta == null ? -1 : meta.getCreated();
}
public long getLastUsed() {
return meta == null ? -1 : meta.getLastUsed();
}
@Override
public String toString() {
return "EntryRecord{" +
"header=" + header +
", key=" + Util.printArray(key) +
", value=" + Util.printArray(value) +
", meta=" + meta +
", internalMetadata=" + Util.printArray(internalMetadata) +
'}';
}
public EntryRecord loadMetadataAndValue(FileProvider.Handle handle, int offset, boolean saveValue) throws IOException {
loadMetadata(handle, offset);
byte[] readValue = null;
if (value == null) {
readValue = readValue(handle, header, offset);
if (saveValue) {
value = readValue;
}
}
if (internalMetadata == null && header.internalMetadataLength() > 0) {
internalMetadata = readInternalMetadata(handle, header, offset);
}
if (value == null) {
assert !saveValue;
assert readValue != null;
EntryRecord copyRecord = new EntryRecord(header, key);
copyRecord.meta = meta;
copyRecord.internalMetadata = internalMetadata;
copyRecord.value = readValue;
return copyRecord;
}
return this;
}
public EntryRecord loadMetadata(FileProvider.Handle handle, int offset) throws IOException {
if (meta == null && header.metadataLength() > 0) {
meta = readMetadata(handle, header, offset);
}
return this;
}
public static EntryHeader readEntryHeader(FileProvider.Handle handle, long offset) throws IOException {
ByteBuffer header = ByteBuffer.allocate(EntryHeader.HEADER_SIZE_11_0);
if (read(handle, header, offset, EntryHeader.HEADER_SIZE_11_0) < 0) {
return null;
}
header.flip();
try {
return new EntryHeader(header);
} catch (IllegalStateException e) {
throw new IllegalStateException("Error reading from " + handle.getFileId() + ":" + offset, e);
}
}
public static EntryHeader read10_1EntryHeader(FileProvider.Handle handle, long offset) throws IOException {
ByteBuffer header = ByteBuffer.allocate(EntryHeader.HEADER_SIZE_10_1);
if (read(handle, header, offset, EntryHeader.HEADER_SIZE_10_1) < 0) {
return null;
}
header.flip();
try {
return new EntryHeader(header, true);
} catch (IllegalStateException e) {
throw new IllegalStateException("Error reading from " + handle.getFileId() + ":" + offset, e);
}
}
public static byte[] readKey(FileProvider.Handle handle, EntryHeader header, long offset) throws IOException {
byte[] key = new byte[header.keyLength()];
if (read(handle, ByteBuffer.wrap(key), offset + header.getHeaderLength(), header.keyLength()) < 0) {
return null;
}
return key;
}
public static EntryMetadata readMetadata(FileProvider.Handle handle, EntryHeader header, long offset) throws IOException {
assert header.metadataLength() > 0;
offset += header.getHeaderLength() + header.keyLength();
int metaLength = header.metadataLength() - EntryMetadata.TIMESTAMP_BYTES;
assert metaLength > 0;
byte[] metadata = new byte[metaLength];
if (read(handle, ByteBuffer.wrap(metadata), offset, metaLength) < 0) {
throw new IllegalStateException("End of file reached when reading metadata on "
+ handle.getFileId() + ":" + offset + ": " + header);
}
offset += metaLength;
ByteBuffer buffer = ByteBuffer.allocate(EntryMetadata.TIMESTAMP_BYTES);
if (read(handle, buffer, offset, EntryMetadata.TIMESTAMP_BYTES) < 0) {
throw new IllegalStateException("End of file reached when reading timestamps on "
+ handle.getFileId() + ":" + offset + ": " + header);
}
buffer.flip();
return new EntryMetadata(metadata, buffer.getLong(), buffer.getLong());
}
public static byte[] readInternalMetadata(FileProvider.Handle handle, EntryHeader header, long offset) throws IOException {
final int length = header.internalMetadataLength();
assert length > 0;
offset += header.getHeaderLength() + header.keyLength() + header.metadataLength() + header.valueLength();
byte[] metadata = new byte[length];
if (read(handle, ByteBuffer.wrap(metadata), offset, length) < 0) {
throw new IllegalStateException("End of file reached when reading internal metadata on "
+ handle.getFileId() + ":" + offset + ": " + header);
}
return metadata;
}
public static byte[] readValue(FileProvider.Handle handle, EntryHeader header, long offset) throws IOException {
assert header.valueLength() > 0;
byte[] value = new byte[header.valueLength()];
if (read(handle, ByteBuffer.wrap(value), offset + header.getHeaderLength() + header.keyLength() + header.metadataLength(), header.valueLength()) < 0) {
throw new IllegalStateException("End of file reached when reading metadata on "
+ handle.getFileId() + ":" + offset + ": " + header);
}
return value;
}
private static int read(FileProvider.Handle handle, ByteBuffer buffer, long position, int length) throws IOException {
int read = 0;
do {
int newRead = handle.read(buffer, position + read);
if (newRead < 0) {
return -1;
}
read += newRead;
} while (read < length);
return read;
}
public static void writeEntry(FileChannel fileChannel, ByteBuffer reusedBuffer, byte[] serializedKey, EntryMetadata metadata, byte[] serializedValue,
byte[] serializedInternalMetadata, long seqId, long expiration) throws IOException {
assert reusedBuffer.limit() == EntryHeader.HEADER_SIZE_11_0;
assert reusedBuffer.position() == 0;
EntryHeader.writeHeader(reusedBuffer, (short) serializedKey.length, metadata == null ? 0 : (short) metadata.length(),
serializedValue == null ? 0 : serializedValue.length,
serializedInternalMetadata == null ? 0 : (short) serializedInternalMetadata.length,
seqId, expiration);
reusedBuffer.flip();
write(fileChannel, reusedBuffer);
reusedBuffer.position(0);
write(fileChannel, ByteBuffer.wrap(serializedKey));
if (metadata != null) {
write(fileChannel, ByteBuffer.wrap(metadata.getBytes()));
writeTimestamps(fileChannel, reusedBuffer, metadata.getCreated(), metadata.getLastUsed());
}
if (serializedValue != null) {
write(fileChannel, ByteBuffer.wrap(serializedValue));
}
if (serializedInternalMetadata != null) {
write(fileChannel, ByteBuffer.wrap(serializedInternalMetadata));
}
}
static void writeEntry(FileChannel fileChannel, ByteBuffer reusedBuffer, ByteBuffer serializedKey,
ByteBuffer serializedMetadata,
ByteBuffer serializedInternalMetadata,
ByteBuffer serializedValue,
long seqId, long expiration, long created, long lastUsed) throws IOException {
assert reusedBuffer.limit() == EntryHeader.HEADER_SIZE_11_0;
assert reusedBuffer.position() == 0;
EntryHeader.writeHeader(reusedBuffer, (short) serializedKey.remaining(), EntryMetadata.size(serializedMetadata),
serializedValue == null ? 0 : serializedValue.remaining(),
serializedInternalMetadata == null ? 0 : (short) serializedInternalMetadata.remaining(),
seqId, expiration);
reusedBuffer.flip();
write(fileChannel, reusedBuffer);
reusedBuffer.position(0);
write(fileChannel, serializedKey);
if (serializedMetadata != null) {
write(fileChannel, serializedMetadata);
writeTimestamps(fileChannel, reusedBuffer, created, lastUsed);
}
if (serializedValue != null) {
write(fileChannel, serializedValue);
}
if (serializedInternalMetadata != null) {
write(fileChannel, serializedInternalMetadata);
}
}
private static void writeTimestamps(FileChannel fileChannel, ByteBuffer reusedBuffer, long created, long lastUsed) throws IOException {
assert reusedBuffer.position() == 0;
int previousLimit = reusedBuffer.limit();
assert previousLimit >= EntryMetadata.TIMESTAMP_BYTES;
reusedBuffer.putLong(created);
reusedBuffer.putLong(lastUsed);
reusedBuffer.flip();
write(fileChannel, reusedBuffer);
// Reset the buffer to what it was before
reusedBuffer.position(0);
reusedBuffer.limit(previousLimit);
}
private static void write(FileChannel fileChannel, ByteBuffer buffer) throws IOException {
while (buffer.hasRemaining()) fileChannel.write(buffer);
}
}
| 9,936
| 37.968627
| 157
|
java
|
null |
infinispan-main/core/src/main/java/org/infinispan/persistence/sifs/NonBlockingSoftIndexFileStore.java
|
package org.infinispan.persistence.sifs;
import static org.infinispan.persistence.PersistenceUtil.getQualifiedLocation;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.file.Path;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Predicate;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.io.ByteBufferFactory;
import org.infinispan.commons.io.ByteBufferImpl;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.marshall.MarshallingException;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.AbstractIterator;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.Util;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.container.entries.ExpiryHelper;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.persistence.sifs.configuration.SoftIndexFileStoreConfiguration;
import org.infinispan.persistence.sifs.configuration.SoftIndexFileStoreConfigurationBuilder;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.concurrent.ActionSequencer;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Maybe;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
/**
* Local file-based cache store, optimized for write-through use with strong consistency guarantees
* (ability to flush disk operations before returning from the store call).
*
* * DESIGN:
* There are three threads operating in the cache-store:
* - LogAppender: Requests to store entries are passed to the LogAppender thread
* via queue, then the requestor threads wait until LogAppender notifies
* them about successful store. LogAppender serializes the writes
* into append-only file, writes the offset into TemporaryTable
* and enqueues request to update index into UpdateQueue.
* The append-only files have limited size, when the file is full,
* new file is started.
* - IndexUpdater: Reads the UpdateQueue, applies the operation into B-tree-like
* structure Index (exact description below) and then removes
* the entry from TemporaryTable. When the Index is overwriten,
* the current entry offset is retrieved and IndexUpdater increases
* the unused space statistics in FileStats.
* - Compactor: When a limit of unused space in some file is reached (according
* to FileStats), the Compactor starts reading this file sequentially,
* querying TemporaryTable or Index for the current entry position
* and copying the unchanged entries into another file. For the entries
* that are still valid in the original file, a compare-and-set
* (file-offset based) request is enqueued into UpdateQueue - therefore
* this operation cannot interfere with concurrent writes overwriting
* the entry. Multiple files can be merged into single file during
* compaction.
*
* Structures:
* - TemporaryTable: keeps the records about current entry location until this is
* applied to the Index. Each read request goes to the TemporaryTable,
* if the key is not found here, Index is queried.
* - UpdateQueue: bounded queue (to prevent grow the TemporaryTable too much) of either
* forced writes (used for regular stores) or compare-and-set writes
* (used by Compactor).
* - FileStats: simple (Concurrent)HashTable with actual file size and amount of unused
* space for each file.
* - Index: B+-tree of IndexNodes. The tree is dropped and built a new if the process
* crashes, it does not need to flush disk operations. On disk it is kept as single random-accessed file, with free blocks list stored in memory.
*
* As IndexUpdater may easily become a bottleneck under heavy load, the IndexUpdater thread,
* UpdateQueue and tree of IndexNodes may be multiplied several times - the Index is divided
* into Segments. Each segment owns keys according to the hashCode() of the key.
*
* Amount of entries in IndexNode is limited by the size it occupies on disk. This size is
* limited by configurable nodeSize (4096 bytes by default?), only in case that the node
* contains single pivot (too long) it can be longer. A key_prefix common for all keys
* in the IndexNode is stored in order to reduce space requirements. For implementation
* reasons the keys are limited to 32kB - this requirement may be circumvented later.
*
* The pivots are not whole keys - it is the shortest part of key that is greater than all
* left children (but lesser or equal to all right children) - let us call this key_part.
* The key_parts are sorted in the IndexNode, naturally. On disk it has this format:
*
* key_prefix_length(2 bytes), key_prefix, num_parts(2 bytes),
* ( key_part_length (2 bytes), key_part, left_child_index_node_offset (8 bytes))+,
* right_child_index_node_offset (8 bytes)
*
* In memory, for every child a SoftReference<IndexNode> is held. When this reference
* is empty (but the offset in file is set), any reader may load the reference using
* double-locking pattern (synchronized over the reference itself). The entry is never
* loaded by multiple threads in parallel and even may block other threads trying to
* read this node.
*
* For each node in memory a RW-lock is held. When the IndexUpdater thread updates
* the Index (modifying some IndexNodes), it prepares a copy of these nodes (already
* stored into index file). Then, in locks only the uppermost node for writing, overwrites
* the references to new data and unlocks the this node. After that the changed nodes are
* traversed from top down, write locked and their record in index file is released.
* Reader threads crawl the tree from top down, locking the parent node (for reading),
* locking child node and unlocking parent node.
*
* @author Radim Vansa <rvansa@redhat.com>
*/
@ConfiguredBy(SoftIndexFileStoreConfigurationBuilder.class)
public class NonBlockingSoftIndexFileStore<K, V> implements NonBlockingStore<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass(), Log.class);
public static final String PREFIX_10_1 = "";
public static final String PREFIX_11_0 = "ispn.";
public static final String PREFIX_12_0 = "ispn12.";
public static final String PREFIX_LATEST = PREFIX_12_0;
private SoftIndexFileStoreConfiguration configuration;
private TemporaryTable temporaryTable;
private FileProvider fileProvider;
private LogAppender logAppender;
private Index index;
private Compactor compactor;
private Marshaller marshaller;
private ByteBufferFactory byteBufferFactory;
private MarshallableEntryFactory<K, V> marshallableEntryFactory;
private TimeService timeService;
private int maxKeyLength;
private BlockingManager blockingManager;
private ActionSequencer sizeAndClearSequencer;
private KeyPartitioner keyPartitioner;
private InitializationContext ctx;
@Override
public Set<Characteristic> characteristics() {
return EnumSet.of(Characteristic.BULK_READ, Characteristic.SEGMENTABLE, Characteristic.EXPIRATION);
}
@Override
public CompletionStage<Void> addSegments(IntSet segments) {
temporaryTable.addSegments(segments);
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> removeSegments(IntSet segments) {
temporaryTable.removeSegments(segments);
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> start(InitializationContext ctx) {
this.ctx = ctx;
keyPartitioner = ctx.getKeyPartitioner();
blockingManager = ctx.getBlockingManager();
// TODO: I don't think we need to use blocking executor here
sizeAndClearSequencer = new ActionSequencer(blockingManager.asExecutor("SIFS-sizeOrClear"),
false, timeService);
configuration = ctx.getConfiguration();
marshaller = ctx.getPersistenceMarshaller();
marshallableEntryFactory = ctx.getMarshallableEntryFactory();
byteBufferFactory = ctx.getByteBufferFactory();
timeService = ctx.getTimeService();
maxKeyLength = configuration.maxNodeSize() - IndexNode.RESERVED_SPACE;
Configuration cacheConfig = ctx.getCache().getCacheConfiguration();
int cacheSegments = cacheConfig.clustering().hash().numSegments();
temporaryTable = new TemporaryTable(cacheSegments);
temporaryTable.addSegments(IntSets.immutableRangeSet(cacheConfig.clustering().hash().numSegments()));
fileProvider = new FileProvider(getDataLocation(), configuration.openFilesLimit(), PREFIX_LATEST,
configuration.maxFileSize());
compactor = new Compactor(ctx.getNonBlockingManager(), fileProvider, temporaryTable, marshaller, timeService,
keyPartitioner, configuration.maxFileSize(), configuration.compactionThreshold(),
blockingManager.asExecutor("sifs-compactor"));
try {
index = new Index(ctx.getNonBlockingManager(), fileProvider, getIndexLocation(), configuration.indexSegments(),
cacheSegments, configuration.minNodeSize(), configuration.maxNodeSize(), temporaryTable, compactor,
timeService);
} catch (IOException e) {
throw log.cannotOpenIndex(configuration.indexLocation(), e);
}
compactor.setIndex(index);
logAppender = new LogAppender(ctx.getNonBlockingManager(), index, temporaryTable, compactor, fileProvider,
configuration.syncWrites(), configuration.maxFileSize());
logAppender.start(blockingManager.asExecutor("sifs-log-processor"));
startIndex();
final AtomicLong maxSeqId = new AtomicLong(0);
return blockingManager.runBlocking(() -> {
boolean migrateData = false;
// we don't destroy the data on startup
// get the old files
FileProvider oldFileProvider = new FileProvider(getDataLocation(), configuration.openFilesLimit(), PREFIX_10_1,
configuration.maxFileSize());
if (oldFileProvider.hasFiles()) {
throw PERSISTENCE.persistedDataMigrationUnsupportedVersion("< 11");
}
oldFileProvider = new FileProvider(getDataLocation(), configuration.openFilesLimit(), PREFIX_11_0,
configuration.maxFileSize());
if (oldFileProvider.hasFiles()) {
try {
index.reset();
} catch (IOException e) {
throw PERSISTENCE.issueEncounteredResettingIndex(ctx.getCache().getName(), e);
}
migrateFromOldFormat(oldFileProvider);
migrateData = true;
} else if (index.load()) {
log.debug("Not building the index - loaded from persisted state");
try {
maxSeqId.set(index.getMaxSeqId());
} catch (IOException e) {
log.debug("Failed to load index. Rebuilding it.");
buildIndex(maxSeqId);
}
} else {
log.debug("Building the index");
try {
index.reset();
} catch (IOException e) {
throw PERSISTENCE.issueEncounteredResettingIndex(ctx.getCache().getName(), e);
}
buildIndex(maxSeqId);
}
if (!migrateData) {
logAppender.setSeqId(maxSeqId.get() + 1);
}
// Compactor may have to write to the index, so it can't be started until after Index has been fully started
compactor.start();
}, "soft-index-start");
}
@Override
public Publisher<MarshallableEntry<K, V>> purgeExpired() {
return Flowable.defer(() -> {
log.tracef("Purging expired contents from soft index file store");
UnicastProcessor<MarshallableEntry<K, V>> unicastProcessor = UnicastProcessor.create();
// Compactor is ran asynchronously
compactor.performExpirationCompaction(new CompactorSubscriber(unicastProcessor));
return unicastProcessor;
});
}
private class CompactorSubscriber implements Compactor.CompactionExpirationSubscriber {
private final FlowableProcessor<MarshallableEntry<K, V>> processor;
private CompactorSubscriber(FlowableProcessor<MarshallableEntry<K, V>> processor) {
this.processor = processor;
}
@Override
public void onEntryPosition(EntryPosition entryPosition) throws IOException {
MarshallableEntry<K, V> entry = readValueFromFileOffset(null, entryPosition, true);
assert entry != null : "EntryPosition didn't return a value: " + entryPosition;
processor.onNext(entry);
}
@Override
public void onEntryEntryRecord(EntryRecord entryRecord) {
MarshallableEntry<K, V> entry = entryFromRecord(entryRecord);
processor.onNext(entry);
}
@Override
public void onComplete() {
processor.onComplete();
}
@Override
public void onError(Throwable t) {
processor.onError(t);
}
}
private void migrateFromOldFormat(FileProvider oldFileProvider) {
String cacheName = ctx.getCache().getName();
PERSISTENCE.startMigratingPersistenceData(cacheName);
try {
CompletionStages.join(index.clear());
} catch (CompletionException e) {
throw PERSISTENCE.persistedDataMigrationFailed(cacheName, e.getCause());
}
// Only update the key/value/meta bytes if the default marshaller is configured
boolean transformationRequired = ctx.getGlobalConfiguration().serialization().marshaller() == null;
try(CloseableIterator<Integer> it = oldFileProvider.getFileIterator()) {
while (it.hasNext()) {
int fileId = it.next();
try (FileProvider.Handle handle = oldFileProvider.getFile(fileId)) {
int offset = 0;
while (true) {
EntryHeader header = EntryRecord.readEntryHeader(handle, offset);
if (header == null) {
//end of file. go to next one
break;
}
MarshallableEntry<K, V> entry = readEntry(handle, header, offset, null, true,
(key, value, meta, internalMeta, created, lastUsed) -> {
if (!transformationRequired) {
return marshallableEntryFactory.create(key, value, meta, internalMeta, created, lastUsed);
}
try {
Object k = unmarshallLegacy(key, false);
Object v = unmarshallLegacy(value, false);
Metadata m = unmarshallLegacy(meta, true);
PrivateMetadata im = internalMeta == null ? null : (PrivateMetadata) ctx.getPersistenceMarshaller().objectFromByteBuffer(internalMeta.getBuf());
return marshallableEntryFactory.create(k, v, m, im, created, lastUsed);
} catch (ClassNotFoundException | IOException e) {
throw new MarshallingException(e);
}
}, false);
int segment = keyPartitioner.getSegment(entry.getKey());
// entry is null if expired or removed (tombstone), in both case, we can ignore it.
//noinspection ConstantConditions (entry is not null!)
if (entry.getValueBytes() != null) {
// using the storeQueue (instead of binary copy) to avoid building the index later
CompletionStages.join(logAppender.storeRequest(segment, entry));
} else {
// delete the entry. The file is append only so we can have a put() and later a remove() for the same key
CompletionStages.join(logAppender.deleteRequest(segment, entry.getKey(), entry.getKeyBytes()));
}
offset += header.totalLength();
}
}
// file is read. can be removed.
oldFileProvider.deleteFile(fileId);
}
PERSISTENCE.persistedDataSuccessfulMigrated(cacheName);
} catch (IOException e) {
throw PERSISTENCE.persistedDataMigrationFailed(cacheName, e);
}
}
private <T> T unmarshallLegacy(ByteBuffer buf, boolean allowInternal) throws ClassNotFoundException, IOException {
if (buf == null)
return null;
// Read using raw user marshaller without MarshallUserObject wrapping
Marshaller marshaller = ctx.getPersistenceMarshaller().getUserMarshaller();
try {
return (T) marshaller.objectFromByteBuffer(buf.getBuf(), buf.getOffset(), buf.getLength());
} catch (IllegalArgumentException e) {
// For metadata we need to attempt to read with user-marshaller first in case custom metadata used, otherwise use the persistence marshaller
if (allowInternal) {
return (T) ctx.getPersistenceMarshaller().objectFromByteBuffer(buf.getBuf(), buf.getOffset(), buf.getLength());
}
throw e;
}
}
private void buildIndex(final AtomicLong maxSeqId) {
Flowable<Integer> filePublisher = filePublisher();
CompletionStage<Void> stage = filePublisher.flatMap(outerFile -> {
ByRef.Long nextExpirationTime = new ByRef.Long(-1);
return handleFilePublisher(outerFile, false, false,
(file, offset, size, serializedKey, entryMetadata, serializedValue, serializedInternalMetadata, seqId, expiration) -> {
long prevSeqId;
while (seqId > (prevSeqId = maxSeqId.get()) && !maxSeqId.compareAndSet(prevSeqId, seqId)) {
}
Object key = marshaller.objectFromByteBuffer(serializedKey);
if (log.isTraceEnabled()) {
log.tracef("Loaded %d:%d (seqId %d, expiration %d)", file, offset, seqId, expiration);
}
// Make sure to keep track of the lowest expiration that isn't -1
nextExpirationTime.set(ExpiryHelper.mostRecentExpirationTime(nextExpirationTime.get(), expiration));
int segment = keyPartitioner.getSegment(key);
// We may check the seqId safely as we are the only thread writing to index
if (isSeqIdOld(seqId, segment, key, serializedKey)) {
index.handleRequest(IndexRequest.foundOld(segment, key, ByteBufferImpl.create(serializedKey), file, offset, size));
return null;
}
if (temporaryTable.set(segment, key, file, offset)) {
index.handleRequest(IndexRequest.update(segment, key, ByteBufferImpl.create(serializedKey), file, offset, size));
}
return null;
}).doOnComplete(() -> compactor.completeFile(outerFile, -1, nextExpirationTime.get(), false));
}).ignoreElements().toCompletionStage(null);
CompletionStages.join(stage);
}
private Path getDataLocation() {
return getQualifiedLocation(ctx.getGlobalConfiguration(), configuration.dataLocation(), ctx.getCache().getName(), "data");
}
protected Path getIndexLocation() {
return getQualifiedLocation(ctx.getGlobalConfiguration(), configuration.indexLocation(), ctx.getCache().getName(), "index");
}
protected boolean isSeqIdOld(long seqId, int segment, Object key, byte[] serializedKey) throws IOException {
for (; ; ) {
EntryPosition entry = temporaryTable.get(segment, key);
if (entry == null) {
entry = index.getInfo(key, segment, serializedKey);
}
if (entry == null) {
if (log.isTraceEnabled()) {
log.tracef("Did not found position for %s", key);
}
return false;
} else {
FileProvider.Handle handle = fileProvider.getFile(entry.file);
if (handle == null) {
// the file was deleted after we've looked up temporary table/index
continue;
}
try {
int entryOffset = entry.offset < 0 ? ~entry.offset : entry.offset;
EntryHeader header = EntryRecord.readEntryHeader(handle, entryOffset);
if (header == null) {
throw new IOException("Cannot read " + entry.file + ":" + entryOffset);
}
if (log.isTraceEnabled()) {
log.tracef("SeqId on %d:%d for key %s is %d", entry.file, entry.offset, key, header.seqId());
}
return seqId < header.seqId();
} finally {
handle.close();
}
}
}
}
protected void startIndex() {
// this call is extracted for better testability
index.start(blockingManager.asExecutor("sifs-index"));
}
@Override
public CompletionStage<Void> stop() {
return blockingManager.runBlocking(() -> {
try {
logAppender.stop();
compactor.stopOperations();
compactor = null;
CompletionStages.join(index.stop());
index = null;
fileProvider.stop();
fileProvider = null;
temporaryTable = null;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw log.interruptedWhileStopping(e);
}
}, "soft-index-stop");
}
@Override
public CompletionStage<Boolean> isAvailable() {
// TODO: does this block?
return CompletableFuture.completedFuture(getDataLocation().toFile().exists() &&
getIndexLocation().toFile().exists());
}
@Override
public CompletionStage<Void> clear() {
return sizeAndClearSequencer.orderOnKey(this, () -> {
CompletionStage<Void> chainedStage = logAppender.clearAndPause();
chainedStage = chainedStage.thenCompose(ignore -> compactor.clearAndPause());
chainedStage = chainedStage.thenCompose(ignore -> index.clear());
return blockingManager.thenRunBlocking(chainedStage, () -> {
try {
fileProvider.clear();
} catch (IOException e) {
throw log.cannotClearData(e);
}
temporaryTable.clear();
compactor.resumeAfterClear();
}, "soft-index-file-clear")
.thenCompose(v -> logAppender.resume());
}
);
}
@Override
public CompletionStage<Long> size(IntSet segments) {
return sizeAndClearSequencer.orderOnKey(this, () ->
logAppender.pause()
.thenCompose(v -> {
// Since this is invoked with the logAppender paused it is an exact size
long size = index.approximateSize(segments);
return logAppender.resume().thenApply(ignore -> size);
})
);
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
// Approximation doesn't pause the appender so the index can be slightly out of sync
return CompletableFuture.completedFuture(index.approximateSize(segments));
}
@Override
public CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> entry) {
int keyLength = entry.getKeyBytes().getLength();
if (keyLength > maxKeyLength) {
return CompletableFuture.failedFuture(log.keyIsTooLong(entry.getKey(), keyLength, configuration.maxNodeSize(), maxKeyLength));
}
try {
log.tracef("Writing entry for key %s for segment %d", entry.getKey(), segment);
return logAppender.storeRequest(segment, entry);
} catch (Exception e) {
return CompletableFuture.failedFuture(new PersistenceException(e));
}
}
@Override
public CompletionStage<Boolean> delete(int segment, Object key) {
try {
log.tracef("Deleting key %s for segment %d", key, segment);
return logAppender.deleteRequest(segment, key, marshaller.objectToBuffer(key));
} catch (Exception e) {
return CompletableFuture.failedFuture(new PersistenceException(e));
}
}
@Override
public CompletionStage<Boolean> containsKey(int segment, Object key) {
try {
for (;;) {
// TODO: consider storing expiration timestamp in temporary table
EntryPosition entry = temporaryTable.get(segment, key);
if (entry != null) {
if (entry.offset < 0) {
return CompletableFutures.completedFalse();
}
FileProvider.Handle handle = fileProvider.getFile(entry.file);
if (handle != null) {
return blockingManager.supplyBlocking(() -> {
try {
try {
EntryHeader header = EntryRecord.readEntryHeader(handle, entry.offset);
if (header == null) {
throw new IllegalStateException("Error reading from " + entry.file + ":" + entry.offset + " | " + handle.getFileSize());
}
return header.expiryTime() < 0 || header.expiryTime() > timeService.wallClockTime();
} finally {
handle.close();
}
} catch (IOException e) {
throw new CacheException(e);
}
}, "soft-index-containsKey");
}
} else {
EntryPosition position = index.getPosition(key, segment, marshaller.objectToBuffer(key));
return CompletableFutures.booleanStage(position != null);
}
}
} catch (Exception e) {
return CompletableFuture.failedFuture(log.cannotLoadKeyFromIndex(key, e));
}
}
@Override
public CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
return blockingManager.supplyBlocking(() -> {
log.tracef("Loading key %s for segment %d", key, segment);
try {
for (;;) {
EntryPosition entry = temporaryTable.get(segment, key);
if (entry != null) {
if (entry.offset < 0) {
log.tracef("Entry for key=%s found in temporary table on %d:%d but it is a tombstone", key, entry.file, entry.offset);
return null;
}
MarshallableEntry<K, V> marshallableEntry = readValueFromFileOffset(key, entry);
if (marshallableEntry != null) {
return marshallableEntry;
}
} else {
EntryRecord record = index.getRecord(key, segment, marshaller.objectToBuffer(key));
if (record == null) {
log.tracef("Entry for key=%s not found in index, returning null", key);
return null;
}
return entryFromRecord(record);
}
}
} catch (Exception e) {
throw log.cannotLoadKeyFromIndex(key, e);
}
}, "soft-index-load");
}
private MarshallableEntry<K, V> entryFromRecord(EntryRecord record) {
return marshallableEntryFactory.create(toBuffer(record.getKey()), toBuffer(record.getValue()),
toBuffer(record.getMetadata()), toBuffer(record.getInternalMetadata()), record.getCreated(), record.getLastUsed());
}
private MarshallableEntry<K, V> readValueFromFileOffset(Object key, EntryPosition entry) throws IOException {
return readValueFromFileOffset(key, entry, false);
}
private MarshallableEntry<K, V> readValueFromFileOffset(Object key, EntryPosition entry, boolean includeExpired) throws IOException {
FileProvider.Handle handle = fileProvider.getFile(entry.file);
if (handle != null) {
try {
EntryHeader header = EntryRecord.readEntryHeader(handle, entry.offset);
if (header == null) {
throw new IllegalStateException("Error reading from " + entry.file + ":" + entry.offset + " | " + handle.getFileSize());
}
return readEntry(handle, header, entry.offset, key, false,
(serializedKey, value, meta, internalMeta, created, lastUsed) ->
marshallableEntryFactory.create(serializedKey, value, meta, internalMeta, created, lastUsed),
includeExpired);
} finally {
handle.close();
}
}
return null;
}
private MarshallableEntry<K, V> readEntry(FileProvider.Handle handle, EntryHeader header, int offset,
Object key, boolean nonNull, EntryCreator<K, V> entryCreator, boolean includeExpired)
throws IOException {
if (!includeExpired && header.expiryTime() > 0 && header.expiryTime() <= timeService.wallClockTime()) {
if (log.isTraceEnabled()) {
log.tracef("Entry for key=%s found in temporary table on %d:%d but it is expired", key, handle.getFileId(), offset);
}
return nonNull ?
entryCreator.create(readAndCheckKey(handle, header, offset), null, null, null, -1, -1) :
null;
}
ByteBuffer serializedKey = readAndCheckKey(handle, header, offset);
if (header.valueLength() <= 0) {
if (log.isTraceEnabled()) {
log.tracef("Entry for key=%s found in temporary table on %d:%d but it is a tombstone in log", key, handle.getFileId(), offset);
}
return nonNull ? entryCreator.create(serializedKey, null, null, null, -1, -1) : null;
}
if (log.isTraceEnabled()) {
log.tracef("Entry for key=%s found in temporary table on %d:%d and loaded", key, handle.getFileId(), offset);
}
ByteBuffer value = toBuffer(EntryRecord.readValue(handle, header, offset));
ByteBuffer serializedMetadata;
long created;
long lastUsed;
if (header.metadataLength() > 0) {
EntryMetadata metadata = EntryRecord.readMetadata(handle, header, offset);
serializedMetadata = toBuffer(metadata.getBytes());
created = metadata.getCreated();
lastUsed = metadata.getLastUsed();
} else {
serializedMetadata = null;
created = -1;
lastUsed = -1;
}
ByteBuffer internalMetadata = header.internalMetadataLength() > 0 ?
toBuffer(EntryRecord.readInternalMetadata(handle, header, offset)) :
null;
return entryCreator.create(serializedKey, value, serializedMetadata, internalMetadata, created, lastUsed);
}
public interface EntryCreator<K,V> {
MarshallableEntry<K, V> create(ByteBuffer key, ByteBuffer value, ByteBuffer metadata,
ByteBuffer internalMetadata, long created, long lastUsed) throws IOException;
}
private ByteBuffer readAndCheckKey(FileProvider.Handle handle, EntryHeader header, int offset) throws IOException {
ByteBuffer serializedKey = toBuffer(EntryRecord.readKey(handle, header, offset));
if (serializedKey == null) {
throw new IllegalStateException("Error reading key from " + handle.getFileId() + ":" + offset);
}
return serializedKey;
}
private ByteBuffer toBuffer(byte[] array) {
return array == null ? null : byteBufferFactory.newByteBuffer(array, 0, array.length);
}
private interface EntryFunctor<R> {
R apply(int file, int offset, int size, byte[] serializedKey, EntryMetadata metadata, byte[] serializedValue, byte[] serializedInternalMetadata, long seqId, long expiration) throws Exception;
}
private Flowable<Integer> filePublisher() {
return Flowable.using(fileProvider::getFileIterator, it -> Flowable.fromIterable(() -> it),
// This close happens after the lasst file iterator is returned, but before processing it.
// TODO: Is this okay or can compaction etc affect this?
CloseableIterator::close);
}
private <R> Flowable<R> handleFilePublisher(int file, boolean fetchValue, boolean fetchMetadata,
EntryFunctor<R> functor) {
return Flowable.using(() -> {
log.tracef("Loading entries from file %d", file);
return Optional.ofNullable(fileProvider.getFile(file));
},
optHandle -> {
if (!optHandle.isPresent()) {
log.tracef("File %d was deleted during iteration", file);
return Flowable.empty();
}
FileProvider.Handle handle = optHandle.get();
AtomicInteger offset = new AtomicInteger();
return Flowable.fromIterable(() -> new HandleIterator<>(offset, handle, fetchMetadata, fetchValue,
functor, file));
},
optHandle -> {
if (optHandle.isPresent()) {
optHandle.get().close();
}
}
);
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
// TODO: do this more efficiently later
return Flowable.fromPublisher(publishEntries(segments, filter, false))
.map(MarshallableEntry::getKey);
}
@Override
public Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean includeValues) {
return blockingManager.blockingPublisher(Flowable.defer(() -> {
Set<Object> seenKeys = new HashSet<>();
Flowable<Map.Entry<Object, EntryPosition>> tableFlowable = temporaryTable.publish(segments)
.doOnNext(entry -> seenKeys.add(entry.getKey()));
if (filter != null) {
tableFlowable = tableFlowable.filter(entry -> filter.test((K) entry.getKey()));
}
Flowable<MarshallableEntry<K, V>> entryFlowable = tableFlowable.flatMapMaybe(entry -> {
EntryPosition position = entry.getValue();
if (position.offset < 0) {
return Maybe.empty();
}
MarshallableEntry<K, V> marshallableEntry = readValueFromFileOffset(entry.getKey(), position);
if (marshallableEntry == null) {
// Using the key partitioner here isn't the best, however this case should rarely happen
return Maybe.fromCompletionStage(load(keyPartitioner.getSegment(entry.getKey()), entry.getKey()));
}
return Maybe.just(marshallableEntry);
});
Flowable<MarshallableEntry<K, V>> indexFlowable = index.publish(segments, includeValues)
.mapOptional(er -> {
if (er.getHeader().valueLength() == 0) {
return Optional.empty();
}
final K key = (K) marshaller.objectFromByteBuffer(er.getKey());
if ((filter != null && !filter.test(key)) || seenKeys.contains(key)) {
return Optional.empty();
}
return Optional.of(marshallableEntryFactory.create(key, byteBufferFactory.newByteBuffer(er.getValue()),
byteBufferFactory.newByteBuffer(er.getMetadata()),
byteBufferFactory.newByteBuffer(er.getInternalMetadata()),
er.getCreated(), er.getLastUsed()));
});
return Flowable.concat(entryFlowable, indexFlowable);
}));
}
private class HandleIterator<R> extends AbstractIterator<R> {
private final AtomicInteger offset;
private final FileProvider.Handle handle;
private final boolean fetchMetadata;
private final boolean fetchValue;
private final EntryFunctor<R> functor;
private final int file;
public HandleIterator(AtomicInteger offset, FileProvider.Handle handle, boolean fetchMetadata, boolean fetchValue,
EntryFunctor<R> functor, int file) {
this.offset = offset;
this.handle = handle;
this.fetchMetadata = fetchMetadata;
this.fetchValue = fetchValue;
this.functor = functor;
this.file = file;
}
@Override
protected R getNext() {
R next = null;
int innerOffset = offset.get();
try {
while (next == null) {
EntryHeader header = EntryRecord.readEntryHeader(handle, innerOffset);
if (header == null) {
return null; // end of file;
}
try {
byte[] serializedKey = EntryRecord.readKey(handle, header, innerOffset);
if (serializedKey == null) {
continue; // we have read the file concurrently with writing there
}
EntryMetadata meta = null;
if (fetchMetadata && header.metadataLength() > 0) {
meta = EntryRecord.readMetadata(handle, header, innerOffset);
}
byte[] serializedValue = null;
int offsetOrNegation = innerOffset;
if (header.valueLength() > 0) {
if (header.expiryTime() >= 0 && header.expiryTime() <= timeService.wallClockTime()) {
offsetOrNegation = ~innerOffset;
} else if (fetchValue) {
serializedValue = EntryRecord.readValue(handle, header, innerOffset);
} else {
serializedValue = Util.EMPTY_BYTE_ARRAY;
}
} else {
offsetOrNegation = ~innerOffset;
}
byte[] serializedInternalMetadata = null;
if (fetchMetadata && header.internalMetadataLength() > 0) {
serializedInternalMetadata = EntryRecord.readInternalMetadata(handle, header, innerOffset);
}
next = functor.apply(file, offsetOrNegation, header.totalLength(), serializedKey, meta,
serializedValue, serializedInternalMetadata, header.seqId(), header.expiryTime());
} finally {
innerOffset = offset.addAndGet(header.totalLength());
}
}
return next;
} catch (Exception e) {
throw new PersistenceException(e);
}
}
}
}
| 40,433
| 46.40211
| 197
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.