repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/JdbcStringBasedStoreConfigurationSerializer.java
|
package org.infinispan.persistence.jdbc.configuration;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.io.ConfigurationWriter;
import org.infinispan.commons.util.Version;
import org.infinispan.configuration.serializing.ConfigurationSerializer;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationSerializer;
import org.infinispan.persistence.jdbc.common.configuration.Attribute;
import org.infinispan.persistence.jdbc.common.configuration.Element;
/**
* JdbcStringBasedStoreConfigurationSerializer.
*
* @author Tristan Tarrant
* @since 9.0
*/
public class JdbcStringBasedStoreConfigurationSerializer extends AbstractJdbcStoreConfigurationSerializer implements ConfigurationSerializer<JdbcStringBasedStoreConfiguration> {
@Override
public void serialize(ConfigurationWriter writer, JdbcStringBasedStoreConfiguration configuration) {
writer.writeStartElement(Element.STRING_KEYED_JDBC_STORE);
writer.writeDefaultNamespace(JdbcStoreConfigurationParser.NAMESPACE + Version.getMajorMinor());
writeJdbcStoreAttributes(writer, configuration);
writeCommonStoreSubAttributes(writer, configuration);
writeJDBCStoreTable(writer, Element.STRING_KEYED_TABLE, configuration.table());
writeJDBCStoreConnection(writer, configuration);
writeCommonStoreElements(writer, configuration);
writer.writeEndElement();
}
protected void writeJDBCStoreTable(ConfigurationWriter writer, Element element, TableManipulationConfiguration configuration) {
AttributeSet attributes = configuration.attributes();
writer.writeStartElement(element);
attributes.write(writer, TableManipulationConfiguration.TABLE_NAME_PREFIX, Attribute.PREFIX);
attributes.write(writer, TableManipulationConfiguration.BATCH_SIZE, Attribute.BATCH_SIZE);
attributes.write(writer, TableManipulationConfiguration.FETCH_SIZE, Attribute.FETCH_SIZE);
attributes.write(writer, TableManipulationConfiguration.CREATE_ON_START, Attribute.CREATE_ON_START);
attributes.write(writer, TableManipulationConfiguration.DROP_ON_EXIT, Attribute.DROP_ON_EXIT);
writeJDBCStoreColumn(writer, Element.ID_COLUMN, configuration.idColumnConfiguration().attributes(), IdColumnConfiguration.ID_COLUMN_NAME, IdColumnConfiguration.ID_COLUMN_TYPE);
writeJDBCStoreColumn(writer, Element.DATA_COLUMN, configuration.dataColumnConfiguration().attributes(), DataColumnConfiguration.DATA_COLUMN_NAME, DataColumnConfiguration.DATA_COLUMN_TYPE);
writeJDBCStoreColumn(writer, Element.TIMESTAMP_COLUMN, configuration.timeStampColumnConfiguration().attributes(), TimestampColumnConfiguration.TIMESTAMP_COLUMN_NAME, TimestampColumnConfiguration.TIMESTAMP_COLUMN_TYPE);
writeJDBCStoreColumn(writer, Element.SEGMENT_COLUMN, configuration.segmentColumnConfiguration().attributes(), SegmentColumnConfiguration.SEGMENT_COLUMN_NAME, SegmentColumnConfiguration.SEGMENT_COLUMN_TYPE);
writer.writeEndElement();
}
private void writeJDBCStoreColumn(ConfigurationWriter writer, Element element, AttributeSet attributes, AttributeDefinition<?> columnName,
AttributeDefinition<?> columnType) {
writer.writeStartElement(element);
attributes.write(writer, columnName, Attribute.NAME);
attributes.write(writer, columnType, Attribute.TYPE);
writer.writeEndElement();
}
}
| 3,520
| 59.706897
| 224
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/SegmentColumnConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.configuration;
import static org.infinispan.persistence.jdbc.configuration.SegmentColumnConfiguration.SEGMENT_COLUMN_NAME;
import static org.infinispan.persistence.jdbc.configuration.SegmentColumnConfiguration.SEGMENT_COLUMN_TYPE;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationBuilder;
public class SegmentColumnConfigurationBuilder implements Builder<SegmentColumnConfiguration> {
private final AttributeSet attributes;
private final AbstractJdbcStoreConfigurationBuilder abstractJdbcStoreConfigurationBuilder;
SegmentColumnConfigurationBuilder(AbstractJdbcStoreConfigurationBuilder abstractJdbcStoreConfigurationBuilder) {
this.abstractJdbcStoreConfigurationBuilder = abstractJdbcStoreConfigurationBuilder;
attributes = SegmentColumnConfiguration.attributeSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
public SegmentColumnConfigurationBuilder columnName(String columnName) {
attributes.attribute(SEGMENT_COLUMN_NAME).set(columnName);
return this;
}
public SegmentColumnConfigurationBuilder columnType(String columnType) {
attributes.attribute(SEGMENT_COLUMN_TYPE).set(columnType);
return this;
}
@Override
public void validate() {
Boolean segmented = abstractJdbcStoreConfigurationBuilder.attributes().attribute(AbstractStoreConfiguration.SEGMENTED).get();
if (segmented != null && segmented) {
TableManipulationConfigurationBuilder.validateIfSet(attributes, SEGMENT_COLUMN_NAME, SEGMENT_COLUMN_TYPE);
}
}
@Override
public SegmentColumnConfiguration create() {
return new SegmentColumnConfiguration(attributes.protect());
}
@Override
public Builder<?> read(SegmentColumnConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 2,199
| 36.931034
| 131
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/TableManipulationConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.configuration;
import static org.infinispan.persistence.jdbc.common.logging.Log.CONFIG;
import static org.infinispan.persistence.jdbc.configuration.TableManipulationConfiguration.BATCH_SIZE;
import static org.infinispan.persistence.jdbc.configuration.TableManipulationConfiguration.CREATE_ON_START;
import static org.infinispan.persistence.jdbc.configuration.TableManipulationConfiguration.DROP_ON_EXIT;
import static org.infinispan.persistence.jdbc.configuration.TableManipulationConfiguration.FETCH_SIZE;
import static org.infinispan.persistence.jdbc.configuration.TableManipulationConfiguration.TABLE_NAME_PREFIX;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.Self;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationChildBuilder;
/**
* TableManipulationConfigurationBuilder.
*
* @author Tristan Tarrant
* @since 5.2
*/
public abstract class TableManipulationConfigurationBuilder<B extends AbstractJdbcStoreConfigurationBuilder<?, B>, S extends TableManipulationConfigurationBuilder<B, S>>
extends AbstractJdbcStoreConfigurationChildBuilder<B>
implements Builder<TableManipulationConfiguration>, Self<S> {
final AttributeSet attributes;
private final DataColumnConfigurationBuilder dataColumn = new DataColumnConfigurationBuilder();
private final IdColumnConfigurationBuilder idColumn = new IdColumnConfigurationBuilder();
private final TimestampColumnConfigurationBuilder timeStampColumn = new TimestampColumnConfigurationBuilder();
private final SegmentColumnConfigurationBuilder segmentColumn;
TableManipulationConfigurationBuilder(AbstractJdbcStoreConfigurationBuilder<?, B> builder) {
super(builder);
attributes = TableManipulationConfiguration.attributeSet();
segmentColumn = new SegmentColumnConfigurationBuilder(builder);
}
@Override
public AttributeSet attributes() {
return attributes;
}
/**
* @deprecated Please use {@link org.infinispan.configuration.cache.AbstractStoreConfigurationBuilder#maxBatchSize(int)} instead.
*/
@Deprecated
public S batchSize(int batchSize) {
attributes.attribute(BATCH_SIZE).set(batchSize);
maxBatchSize(batchSize);
return self();
}
/**
* For DB queries the fetch size is on {@link java.sql.ResultSet#setFetchSize(int)}. This is optional
* parameter, if not specified will be defaulted to {@link org.infinispan.persistence.jdbc.impl.table.TableManager#DEFAULT_FETCH_SIZE}.
*/
public S fetchSize(int fetchSize) {
attributes.attribute(FETCH_SIZE).set(fetchSize);
return self();
}
/**
* Sets the prefix for the name of the table where the data will be stored. "_<cache name>" will
* be appended to this prefix in order to enforce unique table names for each cache.
*/
public S tableNamePrefix(String tableNamePrefix) {
attributes.attribute(TABLE_NAME_PREFIX).set(tableNamePrefix);
return self();
}
String tableNamePrefix() {
return attributes.attribute(TABLE_NAME_PREFIX).get();
}
/**
* Determines whether database tables should be created by the store on startup
*/
public S createOnStart(boolean createOnStart) {
attributes.attribute(CREATE_ON_START).set(createOnStart);
return self();
}
/**
* Determines whether database tables should be dropped by the store on shutdown
*/
public S dropOnExit(boolean dropOnExit) {
attributes.attribute(DROP_ON_EXIT).set(dropOnExit);
return self();
}
/**
* The name of the database column used to store the keys
*/
public S idColumnName(String idColumnName) {
idColumn.idColumnName(idColumnName);
return self();
}
/**
* The type of the database column used to store the keys
*/
public S idColumnType(String idColumnType) {
idColumn.idColumnType(idColumnType);
return self();
}
/**
* The name of the database column used to store the entries
*/
public S dataColumnName(String dataColumnName) {
dataColumn.dataColumnName(dataColumnName);
return self();
}
/**
* The type of the database column used to store the entries
*/
public S dataColumnType(String dataColumnType) {
dataColumn.dataColumnType(dataColumnType);
return self();
}
/**
* The name of the database column used to store the timestamps
*/
public S timestampColumnName(String timestampColumnName) {
timeStampColumn.dataColumnName(timestampColumnName);
return self();
}
/**
* The type of the database column used to store the timestamps
*/
public S timestampColumnType(String timestampColumnType) {
timeStampColumn.dataColumnType(timestampColumnType);
return self();
}
/**
* The name of the database column used to store the segments
*/
public S segmentColumnName(String segmentColumnName) {
segmentColumn.columnName(segmentColumnName);
return self();
}
/**
* The type of the database column used to store the segments
*/
public S segmentColumnType(String segmentColumnType) {
segmentColumn.columnType(segmentColumnType);
return self();
}
@Override
public void validate() {
validateIfSet(attributes, TABLE_NAME_PREFIX);
idColumn.validate();
dataColumn.validate();
timeStampColumn.validate();
segmentColumn.validate();
}
static void validateIfSet(AttributeSet attributes, AttributeDefinition<?>... definitions) {
for(AttributeDefinition<?> definition : definitions) {
String value = (String) attributes.attribute(definition).get();
if(value == null || value.isEmpty()) {
throw CONFIG.tableManipulationAttributeNotSet(attributes.getName(), definition.name());
}
}
}
@Override
public void validate(GlobalConfiguration globalConfig) {
}
@Override
public TableManipulationConfiguration create() {
return new TableManipulationConfiguration(attributes.protect(), idColumn.create(), dataColumn.create(), timeStampColumn.create(), segmentColumn.create());
}
@Override
public Builder<?> read(TableManipulationConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
idColumn.read(template.idColumnConfiguration(), combine);
dataColumn.read(template.dataColumnConfiguration(), combine);
timeStampColumn.read(template.timeStampColumnConfiguration(), combine);
segmentColumn.read(template.segmentColumnConfiguration(), combine);
return this;
}
@Override
public String toString() {
return "TableManipulationConfigurationBuilder [attributes=" + attributes + "]";
}
}
| 7,197
| 34.810945
| 169
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/IdColumnConfiguration.java
|
package org.infinispan.persistence.jdbc.configuration;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public class IdColumnConfiguration {
public static final AttributeDefinition<String> ID_COLUMN_NAME = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.NAME, null, String.class).immutable().build();
public static final AttributeDefinition<String> ID_COLUMN_TYPE = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.TYPE, null, String.class).immutable().build();
static AttributeSet attributeSet() {
return new AttributeSet(IdColumnConfiguration.class, ID_COLUMN_NAME, ID_COLUMN_TYPE);
}
private final Attribute<String> idColumnName;
private final Attribute<String> idColumnType;
private final AttributeSet attributes;
public IdColumnConfiguration(AttributeSet attributes) {
this.attributes = attributes.checkProtection();
idColumnName = attributes.attribute(ID_COLUMN_NAME);
idColumnType = attributes.attribute(ID_COLUMN_TYPE);
}
public String idColumnName() {
return idColumnName.get();
}
public String idColumnType() {
return idColumnType.get();
}
public AttributeSet attributes() {
return attributes;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IdColumnConfiguration that = (IdColumnConfiguration) o;
return attributes.equals(that.attributes);
}
@Override
public int hashCode() {
return attributes.hashCode();
}
@Override
public String toString() {
return "IdColumnConfiguration{" +
"attributes=" + attributes +
'}';
}
}
| 1,949
| 30.967213
| 205
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/JdbcStringBasedStoreConfiguration.java
|
package org.infinispan.persistence.jdbc.configuration;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.ConfigurationFor;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.serializing.SerializedWith;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.Element;
import org.infinispan.persistence.jdbc.stringbased.JdbcStringBasedStore;
import org.infinispan.persistence.keymappers.DefaultTwoWayKey2StringMapper;
@BuiltBy(JdbcStringBasedStoreConfigurationBuilder.class)
@ConfigurationFor(JdbcStringBasedStore.class)
@SerializedWith(JdbcStringBasedStoreConfigurationSerializer.class)
public class JdbcStringBasedStoreConfiguration extends AbstractJdbcStoreConfiguration<JdbcStringBasedStoreConfiguration> {
static final AttributeDefinition<String> KEY2STRING_MAPPER = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.KEY_TO_STRING_MAPPER, DefaultTwoWayKey2StringMapper.class.getName()).immutable().build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(JdbcStringBasedStoreConfiguration.class, AbstractJdbcStoreConfiguration.attributeDefinitionSet(), KEY2STRING_MAPPER);
}
private final Attribute<String> key2StringMapper;
private final TableManipulationConfiguration table;
public JdbcStringBasedStoreConfiguration(AttributeSet attributes, AsyncStoreConfiguration async,
ConnectionFactoryConfiguration connectionFactory, TableManipulationConfiguration table) {
super(Element.STRING_KEYED_JDBC_STORE, attributes, async, connectionFactory);
this.table = table;
key2StringMapper = attributes.attribute(KEY2STRING_MAPPER);
}
public String key2StringMapper() {
return key2StringMapper.get();
}
public TableManipulationConfiguration table() {
return table;
}
@Override
public String toString() {
return "JdbcStringBasedStoreConfiguration [table=" + table + ", attributes=" + attributes +
", connectionFactory=" + connectionFactory() + ", async=" + async() + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((table == null) ? 0 : table.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (getClass() != obj.getClass())
return false;
JdbcStringBasedStoreConfiguration other = (JdbcStringBasedStoreConfiguration) obj;
if (table == null) {
return other.table == null;
} else return table.equals(other.table);
}
}
| 3,209
| 43.583333
| 244
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/TimestampColumnConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.configuration;
import static org.infinispan.persistence.jdbc.configuration.TimestampColumnConfiguration.TIMESTAMP_COLUMN_NAME;
import static org.infinispan.persistence.jdbc.configuration.TimestampColumnConfiguration.TIMESTAMP_COLUMN_TYPE;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public class TimestampColumnConfigurationBuilder implements Builder<TimestampColumnConfiguration> {
private final AttributeSet attributes;
TimestampColumnConfigurationBuilder() {
attributes = TimestampColumnConfiguration.attributeSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
public TimestampColumnConfigurationBuilder dataColumnName(String dataColumnName) {
attributes.attribute(TIMESTAMP_COLUMN_NAME).set(dataColumnName);
return this;
}
public TimestampColumnConfigurationBuilder dataColumnType(String dataColumnType) {
attributes.attribute(TIMESTAMP_COLUMN_TYPE).set(dataColumnType);
return this;
}
@Override
public void validate() {
TableManipulationConfigurationBuilder.validateIfSet(attributes, TIMESTAMP_COLUMN_NAME, TIMESTAMP_COLUMN_TYPE);
}
@Override
public TimestampColumnConfiguration create() {
return new TimestampColumnConfiguration(attributes.protect());
}
@Override
public Builder<?> read(TimestampColumnConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 1,641
| 31.84
| 116
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/IdColumnConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.configuration;
import static org.infinispan.persistence.jdbc.configuration.IdColumnConfiguration.ID_COLUMN_NAME;
import static org.infinispan.persistence.jdbc.configuration.IdColumnConfiguration.ID_COLUMN_TYPE;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public class IdColumnConfigurationBuilder implements Builder<IdColumnConfiguration> {
private final AttributeSet attributes;
IdColumnConfigurationBuilder() {
attributes = IdColumnConfiguration.attributeSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
public IdColumnConfigurationBuilder idColumnName(String idColumnName) {
attributes.attribute(ID_COLUMN_NAME).set(idColumnName);
return this;
}
public IdColumnConfigurationBuilder idColumnType(String idColumnType) {
attributes.attribute(ID_COLUMN_TYPE).set(idColumnType);
return this;
}
@Override
public void validate() {
TableManipulationConfigurationBuilder.validateIfSet(attributes, ID_COLUMN_NAME, ID_COLUMN_TYPE);
}
@Override
public IdColumnConfiguration create() {
return new IdColumnConfiguration(attributes.protect());
}
@Override
public Builder<?> read(IdColumnConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 1,511
| 28.647059
| 102
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/SegmentColumnConfiguration.java
|
package org.infinispan.persistence.jdbc.configuration;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public class SegmentColumnConfiguration {
public static final AttributeDefinition<String> SEGMENT_COLUMN_NAME = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.NAME, null, String.class).immutable().build();
public static final AttributeDefinition<String> SEGMENT_COLUMN_TYPE = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.TYPE, null, String.class).immutable().build();
static AttributeSet attributeSet() {
return new AttributeSet(SegmentColumnConfiguration.class, SEGMENT_COLUMN_NAME, SEGMENT_COLUMN_TYPE);
}
private final Attribute<String> segmentColumnName;
private final Attribute<String> segmentColumnType;
private final AttributeSet attributes;
public SegmentColumnConfiguration(AttributeSet attributes) {
this.attributes = attributes;
segmentColumnName = attributes.attribute(SEGMENT_COLUMN_NAME);
segmentColumnType = attributes.attribute(SEGMENT_COLUMN_TYPE);
}
public AttributeSet attributes() {
return attributes;
}
public String segmentColumnName() {
return segmentColumnName.get();
}
public String segmentColumnType() {
return segmentColumnType.get();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SegmentColumnConfiguration that = (SegmentColumnConfiguration) o;
return attributes.equals(that.attributes);
}
@Override
public int hashCode() {
return attributes.hashCode();
}
@Override
public String toString() {
return "SegmentColumnConfiguration{" +
"attributes=" + attributes +
'}';
}
}
| 2,031
| 32.311475
| 210
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/configuration/DataColumnConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.configuration;
import static org.infinispan.persistence.jdbc.configuration.DataColumnConfiguration.DATA_COLUMN_NAME;
import static org.infinispan.persistence.jdbc.configuration.DataColumnConfiguration.DATA_COLUMN_TYPE;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public class DataColumnConfigurationBuilder implements Builder<DataColumnConfiguration> {
private final AttributeSet attributes;
DataColumnConfigurationBuilder() {
attributes = DataColumnConfiguration.attributeSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
public DataColumnConfigurationBuilder dataColumnName(String dataColumnName) {
attributes.attribute(DATA_COLUMN_NAME).set(dataColumnName);
return this;
}
public DataColumnConfigurationBuilder dataColumnType(String dataColumnType) {
attributes.attribute(DATA_COLUMN_TYPE).set(dataColumnType);
return this;
}
@Override
public void validate() {
TableManipulationConfigurationBuilder.validateIfSet(attributes, DATA_COLUMN_NAME, DATA_COLUMN_TYPE);
}
@Override
public DataColumnConfiguration create() {
return new DataColumnConfiguration(attributes.protect());
}
@Override
public Builder<?> read(DataColumnConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 1,556
| 30.14
| 106
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/stringbased/package-info.java
|
/**
* JDBC CacheStore implementation which maps keys to strings.
If you can guarantee that your application would only use
* Strings as keys, then this implementation will perform better than binary or mixed
* implementations.
*
* @api.public
*/
package org.infinispan.persistence.jdbc.stringbased;
| 308
| 29.9
| 85
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/stringbased/JdbcStringBasedStore.java
|
package org.infinispan.persistence.jdbc.stringbased;
import static org.infinispan.persistence.jdbc.common.JdbcUtil.marshall;
import static org.infinispan.persistence.jdbc.common.JdbcUtil.unmarshall;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.stream.Collectors;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.Util;
import org.infinispan.commons.util.Version;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.persistence.jdbc.common.JdbcUtil;
import org.infinispan.persistence.jdbc.common.TableOperations;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.impl.BaseJdbcStore;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.jdbc.impl.table.TableManager;
import org.infinispan.persistence.jdbc.impl.table.TableManagerFactory;
import org.infinispan.persistence.keymappers.Key2StringMapper;
import org.infinispan.persistence.keymappers.TwoWayKey2StringMapper;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.MarshalledValue;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
/**
* {@link org.infinispan.persistence.spi.AdvancedCacheLoader} implementation that stores the entries in a database.
* This cache store will store each entry within a row in the table. This assures a finer grained granularity for all
* operation, and better performance. In order to be able to store non-string keys, it relies on an {@link
* org.infinispan.persistence.keymappers.Key2StringMapper}.
* <p/>
* Note that only the keys are stored as strings, the values are still saved as binary data. Using a character
* data type for the value column will result in unmarshalling errors.
* <p/>
* The actual storage table is defined through configuration {@link org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration}.
* The table can be created/dropped on-the-fly, at deployment time. For more details consult javadoc for {@link
* org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration}.
* <p/>
* <b>Preload</b>.In order to support preload functionality the store needs to read the string keys from the database and transform them
* into the corresponding key objects. {@link org.infinispan.persistence.keymappers.Key2StringMapper} only supports
* key to string transformation(one way); in order to be able to use preload one needs to specify an
* {@link org.infinispan.persistence.keymappers.TwoWayKey2StringMapper}, which extends {@link org.infinispan.persistence.keymappers.Key2StringMapper} and
* allows bidirectional transformation.
* <p/>
* <b>Rehashing</b>. When a node leaves/joins, Infinispan moves around persistent state as part of rehashing process.
* For this it needs access to the underlaying key objects, so if distribution is used, the mapper needs to be an
* {@link org.infinispan.persistence.keymappers.TwoWayKey2StringMapper} otherwise the cache won't start (same constraint as with preloading).
*
* @author Mircea.Markus@jboss.com
* @see org.infinispan.persistence.keymappers.Key2StringMapper
* @see org.infinispan.persistence.keymappers.DefaultTwoWayKey2StringMapper
*/
@ConfiguredBy(JdbcStringBasedStoreConfiguration.class)
public class JdbcStringBasedStore<K, V> extends BaseJdbcStore<K, V, JdbcStringBasedStoreConfiguration> {
private static final Log log = LogFactory.getLog(JdbcStringBasedStore.class, Log.class);
private JdbcStringBasedStoreConfiguration configuration;
private Key2StringMapper key2StringMapper;
private MarshallableEntryFactory<K, V> marshalledEntryFactory;
private PersistenceMarshaller marshaller;
private TimeService timeService;
private KeyPartitioner keyPartitioner;
// All possible segments, when the store is shared, or the segments owned by this node, when the store is private
private IntSet sizeSegments;
@Override
public Set<Characteristic> characteristics() {
return EnumSet.of(Characteristic.BULK_READ, Characteristic.EXPIRATION, Characteristic.SEGMENTABLE,
Characteristic.TRANSACTIONAL, Characteristic.SHAREABLE);
}
@Override
protected TableOperations<K, V> createTableOperations(InitializationContext ctx, JdbcStringBasedStoreConfiguration configuration) {
this.configuration = ctx.getConfiguration();
this.marshalledEntryFactory = ctx.getMarshallableEntryFactory();
this.marshaller = ctx.getPersistenceMarshaller();
this.timeService = ctx.getTimeService();
this.keyPartitioner = configuration.segmented() ? ctx.getKeyPartitioner() : null;
int numSegments = ctx.getCache().getCacheConfiguration().clustering().hash().numSegments();
if (configuration.shared()) {
this.sizeSegments = IntSets.immutableRangeSet(numSegments);
} else {
this.sizeSegments = IntSets.concurrentSet(numSegments);
this.sizeSegments.addAll(IntSets.immutableRangeSet(numSegments));
}
String cacheName = ctx.getCache().getName();
TableManager<K, V> tableManager = TableManagerFactory.getManager(ctx, connectionFactory, configuration,
ctx.getCache().getName());
tableManager.start();
if (!configuration.table().createOnStart()) {
Connection connection = null;
try {
connection = connectionFactory.getConnection();
// If meta exists, then ensure that the stored configuration is compatible with the current settings
if (tableManager.metaTableExists(connection)) {
TableManager.Metadata meta = tableManager.getMetadata(connection);
if (meta != null) {
int storedSegments = meta.getSegments();
if (!configuration.segmented()) {
// ISPN-13135 number of segments was previously written incorrectly, so don't validate number for older versions
String versionStr = Version.decodeVersion(meta.getVersion());
List<Integer> versionParts = Arrays.stream(versionStr.split("\\.")).map(Integer::parseInt).collect(Collectors.toList());
// Ignore check if version < 12.1.5. Meta table only created since 12.0.0
if ((versionParts.get(0) > 12 || versionParts.get(2) > 4) && storedSegments != -1)
throw log.existingStoreNoSegmentation();
}
int configuredSegments = numSegments;
if (configuration.segmented() && storedSegments != configuredSegments)
throw log.existingStoreSegmentMismatch(storedSegments, configuredSegments);
}
tableManager.updateMetaTable(connection);
} else {
// The meta table does not exist, therefore we must be reading from a 11.x store. Migrate the old data
org.infinispan.util.logging.Log.PERSISTENCE.startMigratingPersistenceData(cacheName);
try {
migrateFromV11(ctx, tableManager);
} catch (SQLException e) {
throw org.infinispan.util.logging.Log.PERSISTENCE.persistedDataMigrationFailed(cacheName, e);
}
tableManager.createMetaTable(connection);
org.infinispan.util.logging.Log.PERSISTENCE.persistedDataSuccessfulMigrated(cacheName);
}
} finally {
connectionFactory.releaseConnection(connection);
}
}
try {
Object mapper = Util.loadClassStrict(configuration.key2StringMapper(),
ctx.getGlobalConfiguration().classLoader()).newInstance();
if (mapper instanceof Key2StringMapper) key2StringMapper = (Key2StringMapper) mapper;
} catch (Exception e) {
log.errorf("Trying to instantiate %s, however it failed due to %s", configuration.key2StringMapper(),
e.getClass().getName());
throw new IllegalStateException("This should not happen.", e);
}
if (log.isTraceEnabled()) {
log.tracef("Using key2StringMapper: %s", key2StringMapper.getClass().getName());
}
if (configuration.preload()) {
enforceTwoWayMapper("preload");
}
if (ctx.getCache().getCacheConfiguration() != null && ctx.getCache().getCacheConfiguration().clustering().cacheMode().isDistributed()) {
enforceTwoWayMapper("distribution/rehashing");
}
return tableManager;
}
public TableManager<K, V> getTableManager() {
return (TableManager<K, V>) tableOperations;
}
private void migrateFromV11(InitializationContext ctx, TableManager<K, V> tableManager) throws SQLException {
// If a custom user marshaller was previously used, no need to update rows
if (ctx.getGlobalConfiguration().serialization().marshaller() != null)
return;
Connection conn = null;
PreparedStatement ps = null;
ResultSet rs = null;
try {
conn = connectionFactory.getConnection();
conn.setAutoCommit(false);
String sql = tableManager.getLoadNonExpiredAllRowsSql();
ps = conn.prepareStatement(sql);
ps.setLong(1, timeService.wallClockTime());
rs = ps.executeQuery();
Marshaller userMarshaller = marshaller.getUserMarshaller();
try (PreparedStatement upsertBatch = conn.prepareStatement(tableManager.getUpdateRowSql())) {
int batchSize = 0;
while (rs.next()) {
batchSize++;
InputStream inputStream = rs.getBinaryStream(1);
String keyStr = rs.getString(2);
long timestamp = rs.getLong(3);
int segment = keyPartitioner == null ? -1 : rs.getInt(4);
MarshalledValue mv = unmarshall(inputStream, marshaller);
V value = unmarshall(mv.getValueBytes(), userMarshaller);
Metadata meta;
try {
meta = unmarshall(mv.getMetadataBytes(), userMarshaller);
} catch (IllegalArgumentException e) {
// For metadata we need to attempt to read with user-marshaller first in case custom metadata used, otherwise use the persistence marshaller
meta = unmarshall(mv.getMetadataBytes(), marshaller);
}
PrivateMetadata internalMeta = unmarshall(mv.getInternalMetadataBytes(), marshaller);
MarshallableEntry<K, V> entry = marshalledEntryFactory.create(null, value, meta, internalMeta, mv.getCreated(), mv.getLastUsed());
ByteBuffer byteBuffer = marshall(entry.getMarshalledValue(), marshaller);
tableManager.prepareUpdateStatement(upsertBatch, keyStr, timestamp, segment, byteBuffer);
upsertBatch.addBatch();
if (batchSize == configuration.maxBatchSize()) {
batchSize = 0;
upsertBatch.executeBatch();
upsertBatch.clearBatch();
}
}
if (batchSize != 0)
upsertBatch.executeBatch();
conn.commit();
}
} finally {
JdbcUtil.safeClose(rs);
JdbcUtil.safeClose(ps);
connectionFactory.releaseConnection(conn);
}
}
@Override
protected void extraStopSteps() {
try {
TableManager<K, V> tableManager = getTableManager();
if (tableManager != null) {
tableManager.stop();
tableOperations = null;
}
} catch (Throwable t) {
log.debug("Exception while stopping", t);
}
}
@Override
public CompletionStage<Long> size(IntSet segments) {
return super.size(segments).thenApply(totalSize -> {
// Temporary workaround until we add a new query to compute the size of a set of segments
IntSet matchingSegments = IntSets.mutableCopyFrom(segments);
matchingSegments.retainAll(this.sizeSegments);
int totalSegments = sizeSegments.size();
return totalSize * matchingSegments.size() / totalSegments;
});
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
return size(segments);
}
@Override
public CompletionStage<Void> addSegments(IntSet segments) {
this.sizeSegments.addAll(segments);
return CompletableFutures.completedNull();
}
@Override
public CompletionStage<Void> removeSegments(IntSet segments) {
this.sizeSegments.removeAll(segments);
return CompletableFutures.completedNull();
}
public ConnectionFactory getConnectionFactory() {
return connectionFactory;
}
class PossibleExpirationNotification {
private final String key;
private final MarshalledValue is;
PossibleExpirationNotification(String key, MarshalledValue is) {
this.key = key;
this.is = is;
}
}
@Override
public Publisher<MarshallableEntry<K, V>> purgeExpired() {
return Flowable.defer(() -> {
UnicastProcessor<MarshallableEntry<K, V>> unicastProcessor = UnicastProcessor.create();
blockingManager.runBlocking(() -> {
TableManager<K, V> tableManager = getTableManager();
Connection conn = null;
PreparedStatement ps = null;
ResultSet rs = null;
try {
String sql = tableManager.getSelectOnlyExpiredRowsSql();
conn = connectionFactory.getConnection();
conn.setAutoCommit(false);
ps = conn.prepareStatement(sql);
ps.setLong(1, timeService.wallClockTime());
rs = ps.executeQuery();
int batchSize = configuration.maxBatchSize();
List<PossibleExpirationNotification> list;
if (key2StringMapper instanceof TwoWayKey2StringMapper) {
list = new ArrayList<>(batchSize);
} else {
list = null;
PERSISTENCE.twoWayKey2StringMapperIsMissing(TwoWayKey2StringMapper.class.getSimpleName());
}
long purgedAmount = 0;
try (PreparedStatement batchDelete = conn.prepareStatement(tableManager.getDeleteRowWithExpirationSql())) {
long possibleAmount = 0;
while (rs.next()) {
String keyStr = rs.getString(2);
batchDelete.setString(1, keyStr);
long expiryTime = rs.getLong(3);
batchDelete.setLong(2, expiryTime);
batchDelete.addBatch();
if (list != null) {
InputStream inputStream = rs.getBinaryStream(1);
MarshalledValue value = unmarshall(inputStream, marshaller);
list.add(new PossibleExpirationNotification(keyStr, value));
}
if (++possibleAmount == batchSize) {
purgedAmount += runBatchAndNotify(list, batchDelete, unicastProcessor);
possibleAmount = 0;
}
}
if (list == null || !list.isEmpty()) {
purgedAmount += runBatchAndNotify(list, batchDelete, unicastProcessor);
}
if (log.isTraceEnabled()) {
log.tracef("Successfully purged %d rows.", purgedAmount);
}
conn.commit();
unicastProcessor.onComplete();
}
} catch (SQLException e) {
log.failedClearingJdbcCacheStore(e);
try {
conn.rollback();
} catch (SQLException ex) {
log.sqlFailureTxRollback(ex);
}
unicastProcessor.onError(e);
} finally {
JdbcUtil.safeClose(rs);
JdbcUtil.safeClose(ps);
connectionFactory.releaseConnection(conn);
}
}, "jdbcstringstore-purge");
return unicastProcessor;
});
}
private long runBatchAndNotify(List<PossibleExpirationNotification> possible, PreparedStatement batchDelete,
FlowableProcessor<MarshallableEntry<K, V>> flowable) throws SQLException {
long purgeAmount = 0;
int[] results = batchDelete.executeBatch();
if (possible != null) {
for (int i = 0; i < results.length; ++i) {
PossibleExpirationNotification notification = possible.get(i);
if (results[i] != Statement.EXECUTE_FAILED) {
Object key = ((TwoWayKey2StringMapper) key2StringMapper).getKeyMapping(notification.key);
flowable.onNext(marshalledEntryFactory.create(key, notification.is));
purgeAmount++;
} else {
log.tracef("Unable to remove expired entry for key %s, most likely concurrent update", notification.key);
}
}
possible.clear();
} else {
purgeAmount += results.length;
}
return purgeAmount;
}
private void enforceTwoWayMapper(String where) throws PersistenceException {
if (!(key2StringMapper instanceof TwoWayKey2StringMapper)) {
PERSISTENCE.invalidKey2StringMapper(where, key2StringMapper.getClass().getName());
throw new PersistenceException(String.format("Invalid key to string mapper : %s", key2StringMapper.getClass().getName()));
}
}
}
| 18,933
| 45.068127
| 158
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/PersistenceContextInitializer.java
|
package org.infinispan.persistence.jdbc.impl;
import org.infinispan.persistence.jdbc.impl.table.AbstractTableManager;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
/**
* Interface used to initialise a {@link org.infinispan.protostream.SerializationContext} using the specified Pojos,
* Marshaller implementations and provided .proto schemas.
*
* @author Ryan Emerson
* @since 12.0
*/
@AutoProtoSchemaBuilder(
includeClasses = AbstractTableManager.MetadataImpl.class,
schemaFileName = "persistence.jdbc.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.persistence.jdbc",
service = false
)
public interface PersistenceContextInitializer extends SerializationContextInitializer {
}
| 839
| 35.521739
| 116
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/AbstractTableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import static org.infinispan.persistence.jdbc.common.JdbcUtil.marshall;
import static org.infinispan.persistence.jdbc.common.JdbcUtil.unmarshall;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Objects;
import java.util.PrimitiveIterator;
import java.util.function.Predicate;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.commons.util.Version;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.persistence.jdbc.common.JdbcUtil;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.common.sql.BaseTableOperations;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.jdbc.configuration.TableManipulationConfiguration;
import org.infinispan.persistence.jdbc.impl.PersistenceContextInitializerImpl;
import org.infinispan.persistence.keymappers.Key2StringMapper;
import org.infinispan.persistence.keymappers.TwoWayKey2StringMapper;
import org.infinispan.persistence.keymappers.UnsupportedKeyTypeException;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.MarshalledValue;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
/**
* @author Ryan Emerson
*/
public abstract class AbstractTableManager<K, V> extends BaseTableOperations<K, V> implements TableManager<K, V> {
private static final String DEFAULT_IDENTIFIER_QUOTE_STRING = "\"";
private static final String META_TABLE_SUFFIX = "_META";
private static final String META_TABLE_DATA_COLUMN = "data";
private final Log log;
protected final InitializationContext ctx;
protected final ConnectionFactory connectionFactory;
protected final JdbcStringBasedStoreConfiguration jdbcConfig;
protected final TableManipulationConfiguration config;
protected final PersistenceMarshaller marshaller;
protected final MarshallableEntryFactory<K, V> marshallableEntryFactory;
protected final String timestampIndexExt = "timestamp_index";
protected final String segmentIndexExt = "segment_index";
protected final String identifierQuoteString;
protected final DbMetaData dbMetadata;
protected final TableName dataTableName;
protected final TableName metaTableName;
protected MetadataImpl metadata;
protected Key2StringMapper key2StringMapper;
// the field order is important because we are reusing some sql
private final String insertRowSql;
private final String updateRowSql;
private final String upsertRowSql;
private final String selectRowSql;
private final String selectIdRowSql;
private final String deleteRowSql;
private final String getDeleteRowWithExpirationSql;
private final String loadAllRowsSql;
private final String countRowsSql;
private final String loadAllNonExpiredRowsSql;
private final String deleteAllRows;
private final String selectExpiredRowsSql;
AbstractTableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration jdbcConfig,
DbMetaData dbMetadata, String cacheName, Log log) {
this(ctx, connectionFactory, jdbcConfig, dbMetadata, cacheName, DEFAULT_IDENTIFIER_QUOTE_STRING, log);
}
AbstractTableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration jdbcConfig,
DbMetaData dbMetadata, String cacheName, String identifierQuoteString, Log log) {
super(jdbcConfig);
// cacheName is required
if (cacheName == null || cacheName.trim().isEmpty())
throw new PersistenceException("cacheName needed in order to create table");
this.ctx = ctx;
this.connectionFactory = connectionFactory;
this.jdbcConfig = jdbcConfig;
this.config = jdbcConfig.table();
this.dbMetadata = dbMetadata;
this.dataTableName = new TableName(identifierQuoteString, config.tableNamePrefix(), cacheName);
this.metaTableName = new TableName(identifierQuoteString, config.tableNamePrefix(), cacheName + META_TABLE_SUFFIX);
this.identifierQuoteString = identifierQuoteString;
this.log = log;
// init row sql
this.insertRowSql = initInsertRowSql();
this.updateRowSql = initUpdateRowSql();
this.upsertRowSql = initUpsertRowSql();
this.selectRowSql = initSelectRowSql();
this.selectIdRowSql = initSelectIdRowSql();
this.deleteRowSql = initDeleteRowSql();
this.getDeleteRowWithExpirationSql = initDeleteRowWithExpirationSql();
this.loadAllRowsSql = initLoadAllRowsSql();
this.countRowsSql = initCountNonExpiredRowsSql();
this.loadAllNonExpiredRowsSql = initLoadNonExpiredAllRowsSql();
this.deleteAllRows = initDeleteAllRowsSql();
this.selectExpiredRowsSql = initSelectOnlyExpiredRowsSql();
// ISPN-14108 only initiate variables from InitializationContext if not null. Required for StoreMigrator
if (ctx != null) {
this.marshaller = ctx.getPersistenceMarshaller();
this.marshallableEntryFactory = ctx.getMarshallableEntryFactory();
this.marshaller.register(new PersistenceContextInitializerImpl());
} else {
this.marshaller = null;
this.marshallableEntryFactory = null;
}
}
@Override
public void start() throws PersistenceException {
if (config.createOnStart()) {
Connection conn = null;
try {
conn = connectionFactory.getConnection();
if (!tableExists(conn, metaTableName)) {
createMetaTable(conn);
}
if (!tableExists(conn, dataTableName)) {
createDataTable(conn);
}
createIndex(conn, timestampIndexExt, config.timestampColumnName());
if (!dbMetadata.isSegmentedDisabled()) {
createIndex(conn, segmentIndexExt, config.segmentColumnName());
}
} finally {
connectionFactory.releaseConnection(conn);
}
}
JdbcStringBasedStoreConfiguration configuration = ctx.getConfiguration();
try {
Object mapper = Util.loadClassStrict(configuration.key2StringMapper(),
ctx.getGlobalConfiguration().classLoader()).newInstance();
if (mapper instanceof Key2StringMapper) key2StringMapper = (Key2StringMapper) mapper;
} catch (Exception e) {
log.errorf("Trying to instantiate %s, however it failed due to %s", configuration.key2StringMapper(),
e.getClass().getName());
throw new IllegalStateException("This should not happen.", e);
}
}
@Override
public void stop() throws PersistenceException {
if (config.dropOnExit()) {
Connection conn = null;
try {
conn = connectionFactory.getConnection();
dropTables(conn);
} finally {
connectionFactory.releaseConnection(conn);
}
}
}
@Override
public boolean tableExists(Connection connection, TableName tableName) {
Objects.requireNonNull(tableName, "table name is mandatory");
ResultSet rs = null;
try {
// we need to make sure, that (even if the user has extended permissions) only the tables in current schema are checked
// explicit set of the schema to the current user one to make sure only tables of the current users are requested
DatabaseMetaData metaData = connection.getMetaData();
String schemaPattern = tableName.getSchema();
rs = metaData.getTables(null, schemaPattern, tableName.getName(), new String[]{"TABLE"});
return rs.next();
} catch (SQLException e) {
if (log.isTraceEnabled())
log.tracef(e, "SQLException occurs while checking the table %s", tableName);
return false;
} finally {
JdbcUtil.safeClose(rs);
}
}
@Override
public void createMetaTable(Connection conn) throws PersistenceException {
// Store using internal names for columns and store as binary using the provided dataColumnType so no additional configuration is required
String sql = String.format("CREATE TABLE %1$s (%2$s %3$s NOT NULL)", metaTableName, META_TABLE_DATA_COLUMN, config.dataColumnType());
executeUpdateSql(conn, sql);
updateMetaTable(conn);
}
@Override
public void updateMetaTable(Connection conn) throws PersistenceException {
String clearTable = "DELETE FROM " + metaTableName;
executeUpdateSql(conn, clearTable);
short version = Version.getVersionShort();
int segments = ctx.getConfiguration().segmented() ? ctx.getCache().getCacheConfiguration().clustering().hash().numSegments() : -1;
this.metadata = new MetadataImpl(version, segments);
ByteBuffer buffer = marshall(metadata, ctx.getPersistenceMarshaller());
String sql = String.format("INSERT INTO %s (%s) VALUES (?)", metaTableName, META_TABLE_DATA_COLUMN);
PreparedStatement ps = null;
try {
ps = conn.prepareStatement(sql);
ps.setBinaryStream(1, new ByteArrayInputStream(buffer.getBuf(), buffer.getOffset(), buffer.getLength()));
ps.executeUpdate();
} catch (SQLException e) {
PERSISTENCE.errorCreatingTable(sql, e);
throw new PersistenceException(e);
} finally {
JdbcUtil.safeClose(ps);
}
}
@Override
public Metadata getMetadata(Connection connection) throws PersistenceException {
if (metadata == null) {
ResultSet rs = null;
try {
String sql = String.format("SELECT %s FROM %s", META_TABLE_DATA_COLUMN, metaTableName.toString());
rs = connection.createStatement().executeQuery(sql);
if (!rs.next()) {
log.sqlMetadataNotPresent(metaTableName.toString());
return null;
}
this.metadata = unmarshall(rs.getBinaryStream(1), ctx.getPersistenceMarshaller());
} catch (SQLException e) {
PERSISTENCE.sqlFailureMetaRetrieval(e);
throw new PersistenceException(e);
} finally {
JdbcUtil.safeClose(rs);
}
}
return metadata;
}
@Override
public void createDataTable(Connection conn) throws PersistenceException {
String ddl;
if (dbMetadata.isSegmentedDisabled()) {
ddl = String.format("CREATE TABLE %1$s (%2$s %3$s NOT NULL, %4$s %5$s NOT NULL, %6$s %7$s NOT NULL, PRIMARY KEY (%2$s))",
dataTableName, config.idColumnName(), config.idColumnType(), config.dataColumnName(),
config.dataColumnType(), config.timestampColumnName(), config.timestampColumnType());
} else {
ddl = String.format("CREATE TABLE %1$s (%2$s %3$s NOT NULL, %4$s %5$s NOT NULL, %6$s %7$s NOT NULL, %8$s %9$s NOT NULL, PRIMARY KEY (%2$s))",
dataTableName, config.idColumnName(), config.idColumnType(), config.dataColumnName(),
config.dataColumnType(), config.timestampColumnName(), config.timestampColumnType(),
config.segmentColumnName(), config.segmentColumnType());
}
if (log.isTraceEnabled()) {
log.tracef("Creating table with following DDL: '%s'.", ddl);
}
executeUpdateSql(conn, ddl);
}
private void createIndex(Connection conn, String indexExt, String columnName) throws PersistenceException {
if (dbMetadata.isIndexingDisabled()) return;
boolean indexExists = indexExists(getIndexName(dbMetadata.getMaxTableNameLength(), false, indexExt), conn);
if (!indexExists) {
String ddl = String.format("CREATE INDEX %s ON %s (%s)", getIndexName(dbMetadata.getMaxTableNameLength(), true, indexExt), dataTableName, columnName);
if (log.isTraceEnabled()) {
log.tracef("Adding index with following DDL: '%s'.", ddl);
}
executeUpdateSql(conn, ddl);
}
}
protected boolean indexExists(String indexName, Connection conn) throws PersistenceException {
ResultSet rs = null;
try {
DatabaseMetaData meta = conn.getMetaData();
rs = meta.getIndexInfo(null, dataTableName.getSchema(), dataTableName.getName(), false, false);
while (rs.next()) {
if (indexName.equalsIgnoreCase(rs.getString("INDEX_NAME"))) {
return true;
}
}
} catch (SQLException e) {
throw new PersistenceException(e);
} finally {
JdbcUtil.safeClose(rs);
}
return false;
}
public void executeUpdateSql(Connection conn, String sql) throws PersistenceException {
Statement statement = null;
try {
statement = conn.createStatement();
statement.executeUpdate(sql);
} catch (SQLException e) {
PERSISTENCE.errorCreatingTable(sql, e);
throw new PersistenceException(e);
} finally {
JdbcUtil.safeClose(statement);
}
}
@Override
public void dropDataTable(Connection conn) throws PersistenceException {
dropIndex(conn, timestampIndexExt);
dropIndex(conn, segmentIndexExt);
dropTable(conn, dataTableName);
}
@Override
public void dropMetaTable(Connection conn) throws PersistenceException {
dropTable(conn, metaTableName);
}
private void dropTable(Connection conn, TableName tableName) throws PersistenceException {
String clearTable = "DELETE FROM " + tableName;
executeUpdateSql(conn, clearTable);
String dropTableDdl = "DROP TABLE " + tableName;
if (log.isTraceEnabled()) {
log.tracef("Dropping table with following DDL '%s'", dropTableDdl);
}
executeUpdateSql(conn, dropTableDdl);
}
protected void dropIndex(Connection conn, String indexName) throws PersistenceException {
if (!indexExists(getIndexName(dbMetadata.getMaxTableNameLength(), true, indexName), conn)) return;
String dropIndexDdl = getDropTimestampSql(indexName);
if (log.isTraceEnabled()) {
log.tracef("Dropping timestamp index with '%s'", dropIndexDdl);
}
executeUpdateSql(conn, dropIndexDdl);
}
protected String getDropTimestampSql(String indexName) {
return String.format("DROP INDEX %s ON %s", getIndexName(dbMetadata.getMaxTableNameLength(), true, indexName), dataTableName);
}
@Override
public int getFetchSize() {
return config.fetchSize();
}
public int getBatchSize() {
return config.batchSize();
}
@Override
public boolean isUpsertSupported() {
return !dbMetadata.isUpsertDisabled();
}
@Override
public String getIdentifierQuoteString() {
return identifierQuoteString;
}
@Override
public TableName getDataTableName() {
return dataTableName;
}
@Override
public TableName getMetaTableName() {
return metaTableName;
}
public String getIndexName(int maxTableNameLength, boolean withIdentifier, String indexExt) {
String plainTableName = dataTableName.toString().replace(identifierQuoteString, "");
String indexName = plainTableName + "_" + indexExt;
if (withIdentifier) {
return identifierQuoteString + indexName + identifierQuoteString;
}
return indexName;
}
protected String initInsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("INSERT INTO %s (%s,%s,%s) VALUES (?,?,?)", dataTableName,
config.dataColumnName(), config.timestampColumnName(), config.idColumnName());
} else {
return String.format("INSERT INTO %s (%s,%s,%s,%s) VALUES (?,?,?,?)", dataTableName,
config.dataColumnName(), config.timestampColumnName(), config.idColumnName(), config.segmentColumnName());
}
}
public String getInsertRowSql() {
return insertRowSql;
}
protected String initUpdateRowSql() {
return String.format("UPDATE %s SET %s = ? , %s = ? WHERE %s = ?", dataTableName,
config.dataColumnName(), config.timestampColumnName(), config.idColumnName());
}
@Override
public String getUpdateRowSql() {
return updateRowSql;
}
protected String initSelectRowSql() {
return String.format("SELECT %s, %s FROM %s WHERE %s = ?",
config.dataColumnName(), config.idColumnName(), dataTableName, config.idColumnName());
}
@Override
public String getSelectRowSql() {
return selectRowSql;
}
protected String initSelectIdRowSql() {
return String.format("SELECT %s FROM %s WHERE %s = ?", config.idColumnName(), dataTableName, config.idColumnName());
}
protected String initCountNonExpiredRowsSql() {
return "SELECT COUNT(*) FROM " + dataTableName +
" WHERE " + config.timestampColumnName() + " < 0 OR " + config.timestampColumnName() + " > ?";
}
protected String initDeleteRowSql() {
return String.format("DELETE FROM %s WHERE %s = ?", dataTableName, config.idColumnName());
}
protected String initDeleteRowWithExpirationSql() {
return String.format("DELETE FROM %s WHERE %s = ? AND %s = ?", dataTableName, config.idColumnName(), config.timestampColumnName());
}
@Override
public String getDeleteRowSql() {
return deleteRowSql;
}
@Override
public String getDeleteRowWithExpirationSql() {
return getDeleteRowWithExpirationSql;
}
protected String initLoadNonExpiredAllRowsSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("SELECT %1$s, %2$s, %3$s FROM %4$s WHERE %3$s > ? OR %3$s < 0",
config.dataColumnName(), config.idColumnName(),
config.timestampColumnName(), dataTableName);
} else {
return String.format("SELECT %1$s, %2$s, %3$s, %4$s FROM %5$s WHERE %3$s > ? OR %3$s < 0",
config.dataColumnName(), config.idColumnName(),
config.timestampColumnName(), config.segmentColumnName(), dataTableName);
}
}
@Override
public String getLoadNonExpiredAllRowsSql() {
return loadAllNonExpiredRowsSql;
}
public String getLoadNonExpiredRowsSqlForSegments(int numSegments) {
StringBuilder stringBuilder = new StringBuilder("SELECT ");
stringBuilder.append(config.dataColumnName());
stringBuilder.append(", ");
stringBuilder.append(config.idColumnName());
stringBuilder.append(" FROM ");
stringBuilder.append(dataTableName);
// Note the timestamp or is surrounded with parenthesis
stringBuilder.append(" WHERE (");
stringBuilder.append(config.timestampColumnName());
stringBuilder.append(" > ? OR ");
stringBuilder.append(config.timestampColumnName());
stringBuilder.append(" < 0) AND ");
stringBuilder.append(config.segmentColumnName());
stringBuilder.append(" IN (?");
for (int i = 1; i < numSegments; ++i) {
stringBuilder.append(",?");
}
stringBuilder.append(")");
return stringBuilder.toString();
}
protected String initLoadAllRowsSql() {
return String.format("SELECT %s, %s FROM %s", config.dataColumnName(), config.idColumnName(), dataTableName);
}
@Override
public String getLoadAllRowsSql() {
return loadAllRowsSql;
}
protected String initDeleteAllRowsSql() {
return "DELETE FROM " + dataTableName;
}
protected String initSelectOnlyExpiredRowsSql() {
return String.format("SELECT %1$s, %2$s, %3$s FROM %4$s WHERE %3$s < ? AND %3$s > 0", config.dataColumnName(),
config.idColumnName(), config.timestampColumnName(), dataTableName);
}
@Override
public String getSelectOnlyExpiredRowsSql() {
return selectExpiredRowsSql;
}
protected String initUpsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("MERGE INTO %1$s " +
"USING (VALUES (?, ?, ?)) AS tmp (%2$s, %3$s, %4$s) " +
"ON (%2$s = tmp.%2$s) " +
"WHEN MATCHED THEN UPDATE SET %3$s = tmp.%3$s, %4$s = tmp.%4$s " +
"WHEN NOT MATCHED THEN INSERT (%2$s, %3$s, %4$s) VALUES (tmp.%2$s, tmp.%3$s, tmp.%4$s)",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName());
} else {
return String.format("MERGE INTO %1$s " +
"USING (VALUES (?, ?, ?, ?)) AS tmp (%2$s, %3$s, %4$s, %5$s) " +
"ON (%2$s = tmp.%2$s) " +
"WHEN MATCHED THEN UPDATE SET %3$s = tmp.%3$s, %4$s = tmp.%4$s, %5$s = tmp.%5$s " +
"WHEN NOT MATCHED THEN INSERT (%2$s, %3$s, %4$s, %5$s) VALUES (tmp.%2$s, tmp.%3$s, tmp.%4$s, tmp.%5$s)",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName(), config.segmentColumnName());
}
}
@Override
public boolean isStringEncodingRequired() {
return false;
}
@Override
public String encodeString(String string) {
return string;
}
@Override
public void prepareUpdateStatement(PreparedStatement ps, String key, long timestamp, int segment, ByteBuffer byteBuffer) throws SQLException {
ps.setBinaryStream(1, new ByteArrayInputStream(byteBuffer.getBuf(), byteBuffer.getOffset(), byteBuffer.getLength()), byteBuffer.getLength());
ps.setLong(2, timestamp);
ps.setString(3, key);
}
@Override
protected void preparePublishStatement(PreparedStatement ps, IntSet segments) throws SQLException {
int offset = 1;
ps.setLong(offset, ctx.getTimeService().wallClockTime());
if (!dbMetadata.isSegmentedDisabled() && segments != null) {
for (PrimitiveIterator.OfInt segIter = segments.iterator(); segIter.hasNext(); ) {
ps.setInt(++offset, segIter.nextInt());
}
}
}
@ProtoTypeId(ProtoStreamTypeIds.JDBC_PERSISTED_METADATA)
public static class MetadataImpl implements Metadata {
final short version;
final int segments;
@ProtoFactory
public MetadataImpl(short version, int segments) {
this.version = version;
this.segments = segments;
}
@Override
@ProtoField(number = 1, defaultValue = "-1")
public short getVersion() {
return version;
}
@Override
@ProtoField(number = 2, defaultValue = "-1")
public int getSegments() {
return segments;
}
@Override
public String toString() {
return "MetadataImpl{" +
"version=" + version +
", segments=" + segments +
'}';
}
}
protected String key2Str(Object key) throws PersistenceException {
if (!key2StringMapper.isSupportedType(key.getClass())) {
throw new UnsupportedKeyTypeException(key);
}
String keyStr = key2StringMapper.getStringMapping(key);
return isStringEncodingRequired() ? encodeString(keyStr) : keyStr;
}
@Override
public String getSelectAllSql(IntSet segments) {
if (!dbMetadata.isSegmentedDisabled() && segments != null) {
return getLoadNonExpiredRowsSqlForSegments(segments.size());
} else {
return getLoadNonExpiredAllRowsSql();
}
}
@Override
protected void prepareKeyStatement(PreparedStatement ps, Object key) throws SQLException {
String lockingKey = key2Str(key);
ps.setString(1, lockingKey);
}
@Override
protected MarshallableEntry<K, V> entryFromResultSet(ResultSet rs, Object keyIfPresent, boolean fetchValue,
Predicate<? super K> keyPredicate) throws SQLException {
MarshallableEntry<K, V> entry = null;
K key = (K) keyIfPresent;
if (key == null) {
String keyStr = rs.getString(2);
key = (K) ((TwoWayKey2StringMapper) key2StringMapper).getKeyMapping(keyStr);
}
if (keyPredicate == null || keyPredicate.test(key)) {
InputStream inputStream = rs.getBinaryStream(1);
MarshalledValue value = unmarshall(inputStream, marshaller);
entry = marshallableEntryFactory.create(key,
fetchValue ? value.getValueBytes() : null,
value.getMetadataBytes(),
value.getInternalMetadataBytes(),
value.getCreated(),
value.getLastUsed());
if (entry.getMetadata() != null && entry.isExpired(ctx.getTimeService().wallClockTime())) {
return null;
}
}
return entry;
}
@Override
public String getDeleteAllSql() {
return deleteAllRows;
}
@Override
public String getUpsertRowSql() {
return upsertRowSql;
}
@Override
public String getSizeSql() {
return countRowsSql;
}
@Override
public void upsertEntry(Connection connection, int segment, MarshallableEntry<? extends K, ? extends V> entry)
throws SQLException {
if (!dbMetadata.isUpsertDisabled()) {
super.upsertEntry(connection, segment, entry);
return;
}
String keyStr = key2Str(entry.getKey());
String sql = selectIdRowSql;
if (log.isTraceEnabled()) {
log.tracef("Running legacy upsert sql '%s'. Key string is '%s'", sql, keyStr);
}
PreparedStatement ps = null;
try {
ps = connection.prepareStatement(sql);
ps.setQueryTimeout(configuration.readQueryTimeout());
ps.setString(1, keyStr);
ResultSet rs = ps.executeQuery();
boolean update = rs.next();
if (update) {
sql = updateRowSql;
} else {
sql = insertRowSql;
}
JdbcUtil.safeClose(rs);
JdbcUtil.safeClose(ps);
if (log.isTraceEnabled()) {
log.tracef("Running sql '%s'. Key string is '%s'", sql, keyStr);
}
ps = connection.prepareStatement(sql);
ps.setQueryTimeout(configuration.writeQueryTimeout());
prepareValueStatement(ps, segment, entry);
ps.executeUpdate();
} finally {
JdbcUtil.safeClose(ps);
}
}
@Override
protected final void prepareValueStatement(PreparedStatement ps, int segment, MarshallableEntry<? extends K, ? extends V> entry) throws SQLException {
prepareValueStatement(ps, segment, key2Str(entry.getKey()), marshall(entry.getMarshalledValue(), marshaller), entry.expiryTime());
}
protected void prepareValueStatement(PreparedStatement ps, int segment, String keyStr, ByteBuffer valueBytes, long expiryTime) throws SQLException {
ps.setBinaryStream(1, new ByteArrayInputStream(valueBytes.getBuf(), valueBytes.getOffset(),
valueBytes.getLength()), valueBytes.getLength());
ps.setLong(2, expiryTime);
ps.setString(3, keyStr);
if (!dbMetadata.isSegmentedDisabled()) {
ps.setInt(4, segment);
}
}
@Override
protected void prepareSizeStatement(PreparedStatement ps) throws SQLException {
ps.setLong(1, ctx.getTimeService().wallClockTime());
}
}
| 27,914
| 37.87883
| 159
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/H2TableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.sql.Connection;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class H2TableManager extends AbstractTableManager {
private static final Log log = LogFactory.getLog(H2TableManager.class, Log.class);
H2TableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
}
@Override
protected String initUpsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("MERGE INTO %1$s (%2$s, %3$s, %4$s) KEY(%4$s) VALUES(?, ?, ?)", dataTableName,
config.dataColumnName(), config.timestampColumnName(), config.idColumnName());
} else {
return String.format("MERGE INTO %1$s (%2$s, %3$s, %4$s, %5$s) KEY(%4$s) VALUES(?, ?, ?, ?)", dataTableName,
config.dataColumnName(), config.timestampColumnName(), config.idColumnName(), config.segmentColumnName());
}
}
@Override
protected void dropIndex(Connection conn, String indexName) throws PersistenceException {
String dropIndexDdl = String.format("DROP INDEX IF EXISTS %s", getIndexName(dbMetadata.getMaxTableNameLength(), true, indexName));
executeUpdateSql(conn, dropIndexDdl);
}
}
| 1,762
| 43.075
| 164
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/OracleTableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.io.ByteArrayInputStream;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.Objects;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.persistence.jdbc.common.JdbcUtil;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class OracleTableManager extends AbstractTableManager {
private static final Log log = LogFactory.getLog(OracleTableManager.class, Log.class);
private static final String TIMESTAMP_INDEX_PREFIX = "IDX";
private static final String SEGMENT_INDEX_PREFIX = "SDX";
private final int dbVersion;
OracleTableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
dbVersion = dbMetadata.getMajorVersion() * 100 + dbMetadata.getMinorVersion();
}
@Override
public boolean tableExists(Connection connection, TableName tableName) {
Objects.requireNonNull(tableName, "table name is mandatory");
ResultSet rs = null;
try {
DatabaseMetaData metaData = connection.getMetaData();
String schemaPattern = tableName.getSchema() == null ? metaData.getUserName() : tableName.getSchema();
rs = metaData.getTables(null, schemaPattern, tableName.getName(), new String[]{"TABLE"});
return rs.next();
} catch (SQLException e) {
if (log.isTraceEnabled())
log.tracef(e, "SQLException occurs while checking the table %s", tableName);
return false;
} finally {
JdbcUtil.safeClose(rs);
}
}
@Override
protected boolean indexExists(String indexName, Connection conn) throws PersistenceException {
ResultSet rs = null;
try {
DatabaseMetaData metaData = conn.getMetaData();
String schemaPattern = dataTableName.getSchema() == null ? metaData.getUserName() : dataTableName.getSchema();
rs = metaData.getIndexInfo(null,
String.format("%1$s%2$s%1$s", identifierQuoteString, schemaPattern),
String.format("%1$s%2$s%1$s", identifierQuoteString, dataTableName.getName()),
false, false);
while (rs.next()) {
String index = rs.getString("INDEX_NAME");
if (indexName.equalsIgnoreCase(index)) {
return true;
}
}
} catch (SQLException e) {
throw new PersistenceException(e);
} finally {
JdbcUtil.safeClose(rs);
}
return false;
}
@Override
public String getIndexName(int maxTableNameLength, boolean withIdentifier, String indexExt) {
if (indexExt.equals(timestampIndexExt)) {
// Timestamp for Oracle began with IDX, to keep backwards compatible we have to keep using that
indexExt = TIMESTAMP_INDEX_PREFIX;
}
String plainTableName = dataTableName.getName();
/* Oracle version 12.1 and below supports index names only 30 characters long.
If cache names have length greater that 15 and have the same prefix it is possible to have the same index names timestamp and segments.
*/
if (dbVersion <= 1201 && indexExt.equals(segmentIndexExt) && plainTableName.length() + indexExt.length() + 1 > maxTableNameLength) {
indexExt = SEGMENT_INDEX_PREFIX;
}
int maxNameSize = maxTableNameLength - indexExt.length() - 1;
String truncatedName = plainTableName.length() > maxNameSize ? plainTableName.substring(0, maxNameSize) : plainTableName;
String indexName = indexExt + "_" + truncatedName;
if (withIdentifier) {
return identifierQuoteString + indexName + identifierQuoteString;
}
return indexName;
}
protected String getDropTimestampSql() {
return String.format("DROP INDEX %s", getIndexName(-1, true, timestampIndexExt));
}
@Override
protected String initInsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("INSERT INTO %s (%s,%s,%s) VALUES (?,?,?)", dataTableName,
config.idColumnName(), config.timestampColumnName(), config.dataColumnName());
} else {
return String.format("INSERT INTO %s (%s,%s,%s,%s) VALUES (?,?,?,?)", dataTableName,
config.idColumnName(), config.timestampColumnName(), config.dataColumnName(), config.segmentColumnName());
}
}
@Override
protected String initUpdateRowSql() {
return String.format("UPDATE %s SET %s = ? , %s = ? WHERE %s = ?", dataTableName,
config.timestampColumnName(), config.dataColumnName(), config.idColumnName());
}
@Override
public String initUpsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("MERGE INTO %1$s t " +
"USING (SELECT ? %2$s, ? %3$s, ? %4$s from dual) tmp ON (t.%2$s = tmp.%2$s) " +
"WHEN MATCHED THEN UPDATE SET t.%3$s = tmp.%3$s, t.%4$s = tmp.%4$s " +
"WHEN NOT MATCHED THEN INSERT (%2$s, %3$s, %4$s) VALUES (tmp.%2$s, tmp.%3$s, tmp.%4$s)",
dataTableName, config.idColumnName(), config.timestampColumnName(), config.dataColumnName());
} else {
return String.format("MERGE INTO %1$s t " +
"USING (SELECT ? %2$s, ? %3$s, ? %4$s, ? %5$s from dual) tmp ON (t.%2$s = tmp.%2$s) " +
"WHEN MATCHED THEN UPDATE SET t.%3$s = tmp.%3$s, t.%4$s = tmp.%4$s " +
"WHEN NOT MATCHED THEN INSERT (%2$s, %3$s, %4$s, %5$s) VALUES (tmp.%2$s, tmp.%3$s, tmp.%4$s, tmp.%5$s)",
dataTableName, config.idColumnName(), config.timestampColumnName(), config.dataColumnName(),
config.segmentColumnName());
}
}
@Override
protected void prepareValueStatement(PreparedStatement ps, int segment, String keyStr, ByteBuffer valueBytes, long expiryTime) throws SQLException {
ps.setString(1, keyStr);
ps.setLong(2, expiryTime);
// We must use BLOB here to avoid ORA-01461 caused by implicit casts on dual
ps.setBlob(3, new ByteArrayInputStream(valueBytes.getBuf(), valueBytes.getOffset(), valueBytes.getLength()), valueBytes.getLength());
if (!dbMetadata.isSegmentedDisabled()) {
ps.setInt(4, segment);
}
}
@Override
public void prepareUpdateStatement(PreparedStatement ps, String key, long timestamp, int segment, ByteBuffer byteBuffer) throws SQLException {
ps.setLong(1, timestamp);
ps.setBinaryStream(2, new ByteArrayInputStream(byteBuffer.getBuf(), byteBuffer.getOffset(), byteBuffer.getLength()), byteBuffer.getLength());
ps.setString(3, key);
if (!dbMetadata.isSegmentedDisabled()) {
ps.setInt(4, segment);
}
}
}
| 7,344
| 44.90625
| 168
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/PostgresTableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.sql.Connection;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class PostgresTableManager extends AbstractTableManager {
private static final Log log = LogFactory.getLog(PostgresTableManager.class, Log.class);
PostgresTableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
}
@Override
protected void dropIndex(Connection conn, String indexName) throws PersistenceException {
String dropIndexDdl = String.format("DROP INDEX IF EXISTS %s", getIndexName(dbMetadata.getMaxTableNameLength(), true, indexName));
executeUpdateSql(conn, dropIndexDdl);
}
@Override
public String initUpdateRowSql() {
return String.format("UPDATE %s SET %s = ? , %s = ? WHERE %s = cast(? as %s)",
dataTableName, config.dataColumnName(), config.timestampColumnName(),
config.idColumnName(), config.idColumnType());
}
@Override
public String initSelectRowSql() {
return String.format("SELECT %s, %s FROM %s WHERE %s = cast(? as %s)",
config.dataColumnName(), config.idColumnName(), dataTableName,
config.idColumnName(), config.idColumnType());
}
@Override
public String initSelectIdRowSql() {
return String.format("SELECT %s FROM %s WHERE %s = cast(? as %s)",
config.idColumnName(), dataTableName, config.idColumnName(),
config.idColumnType());
}
@Override
public String initDeleteRowSql() {
return String.format("DELETE FROM %s WHERE %s = cast(? as %s)",
dataTableName, config.idColumnName(), config.idColumnType());
}
@Override
public boolean isUpsertSupported() {
// ON CONFLICT added in Postgres 9.5
return super.isUpsertSupported() && (dbMetadata.getMajorVersion() >= 10 ||
(dbMetadata.getMajorVersion() == 9 && dbMetadata.getMinorVersion() >= 5));
}
@Override
public String initUpsertRowSql() {
return String.format("%1$s ON CONFLICT (%2$s) DO UPDATE SET %3$s = EXCLUDED.%3$s, %4$s = EXCLUDED.%4$s",
getInsertRowSql(), config.idColumnName(), config.dataColumnName(),
config.timestampColumnName());
}
}
| 2,817
| 39.257143
| 170
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/TableManagerFactory.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
import java.util.Arrays;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.CacheException;
import org.infinispan.persistence.jdbc.common.DatabaseType;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfiguration;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
public class TableManagerFactory {
private static final Log log = LogFactory.getLog(TableManagerFactory.class, Log.class);
public static final String UPSERT_DISABLED = "infinispan.jdbc.upsert.disabled";
public static final String INDEXING_DISABLED = "infinispan.jdbc.indexing.disabled";
public static <K, V> TableManager<K, V> getManager(InitializationContext ctx, ConnectionFactory connectionFactory,
JdbcStringBasedStoreConfiguration config, String cacheName) {
DbMetaData metaData = getDbMetaData(connectionFactory, config);
return getManager(metaData, ctx, connectionFactory, config, cacheName);
}
public static <K, V> TableManager<K, V> getManager(DbMetaData metaData, InitializationContext ctx,
ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, String cacheName) {
switch (metaData.getType()) {
case DB2:
case DB2_390:
return new DB2TableManager(ctx, connectionFactory, config, metaData, cacheName);
case H2:
return new H2TableManager(ctx, connectionFactory, config, metaData, cacheName);
case MARIA_DB:
case MYSQL:
return new MyTableOperations(ctx, connectionFactory, config, metaData, cacheName);
case ORACLE:
return new OracleTableManager(ctx, connectionFactory, config, metaData, cacheName);
case POSTGRES:
return new PostgresTableManager(ctx, connectionFactory, config, metaData, cacheName);
case SQLITE:
return new SQLiteTableManager(ctx, connectionFactory, config, metaData, cacheName);
case SYBASE:
return new SybaseTableManager(ctx, connectionFactory, config, metaData, cacheName);
case SQL_SERVER:
return new TableOperations(ctx, connectionFactory, config, metaData, cacheName);
default:
return new GenericTableManager(ctx, connectionFactory, config, metaData, cacheName);
}
}
private static DbMetaData getDbMetaData(ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config) {
return getDbMetaData(connectionFactory, config, !config.segmented());
}
public static DbMetaData getDbMetaData(ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config,
boolean segmentedDisabled) {
DatabaseType databaseType = config.dialect();
int majorVersion;
int minorVersion;
int maxTableName;
Connection connection = null;
try {
// Try to retrieve major and minor simultaneously, if both aren't available then no use anyway
connection = connectionFactory.getConnection();
DatabaseMetaData metaData = connection.getMetaData();
majorVersion = metaData.getDatabaseMajorVersion();
minorVersion = metaData.getDatabaseMinorVersion();
maxTableName = metaData.getMaxTableNameLength();
String version = majorVersion + "." + minorVersion;
if (log.isDebugEnabled()) {
log.debugf("Database version reported as '%s'.", version);
}
if (databaseType == null) {
databaseType = determineDatabaseType(metaData);
if (databaseType == null) {
throw new CacheConfigurationException("Unable to detect database dialect from JDBC driver name or connection metadata. Please provide this manually using the 'dialect' property in your configuration. Supported database dialect strings are " + Arrays.toString(DatabaseType.values()));
}
log.debugf("Guessing database dialect as '%s'. If this is incorrect, please specify the correct " +
"dialect using the 'dialect' attribute in your configuration. Supported database dialect strings are %s",
databaseType, Arrays.toString(DatabaseType.values()));
}
return new DbMetaData(databaseType, majorVersion, minorVersion, maxTableName,
isPropertyDisabled(config, UPSERT_DISABLED), isPropertyDisabled(config, INDEXING_DISABLED), segmentedDisabled);
} catch (SQLException e) {
throw new CacheException(e);
} finally {
connectionFactory.releaseConnection(connection);
}
}
private static DatabaseType determineDatabaseType(DatabaseMetaData metaData) throws SQLException {
String dbProduct = metaData.getDatabaseProductName();
DatabaseType databaseType = DatabaseType.guessDialect(dbProduct);
if (databaseType != null) {
return databaseType;
}
String dbDriver = metaData.getDriverName();
log.debugf("Unable to detect database dialect using produce name %s. Attempting to guess on driver name %s.", dbProduct, dbDriver);
return DatabaseType.guessDialect(dbDriver);
}
private static boolean isPropertyDisabled(AbstractJdbcStoreConfiguration config, String propertyName) {
String property = config.properties().getProperty(propertyName);
return property != null && Boolean.parseBoolean(property);
}
}
| 5,876
| 47.570248
| 300
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/DB2TableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.io.ByteArrayInputStream;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Objects;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.persistence.jdbc.common.JdbcUtil;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class DB2TableManager<K, V> extends AbstractTableManager<K, V> {
private static final Log log = LogFactory.getLog(DB2TableManager.class, Log.class);
DB2TableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config,
DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
}
@Override
protected String initInsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("INSERT INTO %s (%s,%s,%s) VALUES (?,?,?)", dataTableName,
config.idColumnName(), config.timestampColumnName(), config.dataColumnName());
} else {
return String.format("INSERT INTO %s (%s,%s,%s,%s) VALUES (?,?,?,?)", dataTableName,
config.idColumnName(), config.timestampColumnName(), config.dataColumnName(), config.segmentColumnName());
}
}
@Override
protected String initUpsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("MERGE INTO %1$s AS t " +
"USING (SELECT * FROM TABLE (VALUES (?,?,?))) AS tmp(%4$s, %3$s, %2$s) " +
"ON t.%4$s = tmp.%4$s " +
"WHEN MATCHED THEN UPDATE SET (t.%2$s, t.%3$s) = (tmp.%2$s, tmp.%3$s) " +
"WHEN NOT MATCHED THEN INSERT (t.%4$s, t.%3$s, t.%2$s) VALUES (tmp.%4$s, tmp.%3$s, tmp.%2$s)",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName());
} else {
return String.format("MERGE INTO %1$s AS t " +
"USING (SELECT * FROM TABLE (VALUES (?,?,?,?))) AS tmp(%4$s, %3$s, %2$s, %5$s) " +
"ON t.%4$s = tmp.%4$s " +
"WHEN MATCHED THEN UPDATE SET (t.%2$s, t.%3$s, t.%5$s) = (tmp.%2$s, tmp.%3$s, tmp.%5$s) " +
"WHEN NOT MATCHED THEN INSERT (t.%4$s, t.%3$s, t.%2$s, t.%5$s) VALUES (tmp.%4$s, tmp.%3$s, tmp.%2$s, tmp.%5$s)",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName(), config.segmentColumnName());
}
}
@Override
protected void prepareValueStatement(PreparedStatement ps, int segment, String keyStr, ByteBuffer valueBytes, long expiryTime) throws SQLException {
ps.setString(1, keyStr);
ps.setLong(2, expiryTime);
ps.setBinaryStream(3, new ByteArrayInputStream(valueBytes.getBuf(), valueBytes.getOffset(), valueBytes.getLength()), valueBytes.getLength());
if (!dbMetadata.isSegmentedDisabled()) {
ps.setInt(4, segment);
}
}
@Override
protected String getDropTimestampSql(String indexName) {
return String.format("DROP INDEX %s", getIndexName(dbMetadata.getMaxTableNameLength(), true, indexName));
}
@Override
public boolean tableExists(Connection connection, TableName tableName) throws PersistenceException {
Objects.requireNonNull(tableName, "table name is mandatory");
ResultSet rs = null;
try {
// we need to make sure, that (even if the user has extended permissions) only the tables in current schema are checked
// explicit set of the schema to the current user one to make sure only tables of the current users are requested
DatabaseMetaData metaData = connection.getMetaData();
String schemaPattern = tableName.getSchema();
if (schemaPattern == null) {
schemaPattern = getCurrentSchema(connection);
}
rs = metaData.getTables(null, schemaPattern, tableName.getName(), new String[]{"TABLE"});
return rs.next();
} catch (SQLException e) {
if (log.isTraceEnabled())
log.tracef(e, "SQLException occurs while checking the table %s", tableName);
return false;
} finally {
JdbcUtil.safeClose(rs);
}
}
private String getCurrentSchema(Connection connection) {
try (Statement statement = connection.createStatement()) {
try (ResultSet rs = statement.executeQuery("VALUES CURRENT SCHEMA")) {
if (rs.next()) {
return rs.getString(1);
} else {
return null;
}
}
} catch (SQLException e) {
log.debug("Couldn't obtain the current schema, no schema will be specified during table existence check.", e);
return null;
}
}
}
| 5,273
| 44.465517
| 151
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/GenericTableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class GenericTableManager extends AbstractTableManager {
private static final Log log = LogFactory.getLog(GenericTableManager.class, Log.class);
GenericTableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
}
}
| 822
| 40.15
| 169
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/TableOperations.java
|
package org.infinispan.persistence.jdbc.impl.table;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
* @since 9.0
*/
class TableOperations extends AbstractTableManager {
private static final Log log = LogFactory.getLog(MyTableOperations.class, Log.class);
TableOperations(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
}
@Override
public String initUpsertRowSql() {
// As SQL Server does not handle a merge atomically, we must acquire the table lock here otherwise it's possible
// for deadlocks to occur.
if (dbMetadata.isSegmentedDisabled()) {
return String.format("MERGE %1$s WITH (TABLOCK) " +
"USING (VALUES (?, ?, ?)) AS tmp (%2$s, %3$s, %4$s) " +
"ON (%1$s.%4$s = tmp.%4$s) " +
"WHEN MATCHED THEN UPDATE SET %2$s = tmp.%2$s, %3$s = tmp.%3$s " +
"WHEN NOT MATCHED THEN INSERT (%2$s, %3$s, %4$s) VALUES (tmp.%2$s, tmp.%3$s, tmp.%4$s);",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName());
} else {
return String.format("MERGE %1$s WITH (TABLOCK) " +
"USING (VALUES (?, ?, ?, ?)) AS tmp (%2$s, %3$s, %4$s, %5$s) " +
"ON (%1$s.%4$s = tmp.%4$s) " +
"WHEN MATCHED THEN UPDATE SET %2$s = tmp.%2$s, %3$s = tmp.%3$s " +
"WHEN NOT MATCHED THEN INSERT (%2$s, %3$s, %4$s, %5$s) VALUES (tmp.%2$s, tmp.%3$s, tmp.%4$s, %5$s);",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName(),
config.segmentColumnName());
}
}
@Override
protected String initSelectOnlyExpiredRowsSql() {
String loadAll = String.format("%s WITH (UPDLOCK)", getLoadAllRowsSql());
return String.format("%1$s WHERE %2$s < ? AND %2$s > 0", loadAll, config.timestampColumnName());
}
@Override
public boolean isStringEncodingRequired() {
return dbMetadata.getMajorVersion() <= 13;
}
@Override
public String encodeString(String string) {
char[] srcChars = string.toCharArray();
if (srcChars.length > 0 && srcChars[0] == '\uFEFF') {
char[] chars = new char[srcChars.length - 1];
string.getChars(1, string.toCharArray().length, chars, 0);
return new String(chars);
}
return string;
}
}
| 2,896
| 43.569231
| 165
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/SQLiteTableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class SQLiteTableManager extends AbstractTableManager {
private static final Log log = LogFactory.getLog(SQLiteTableManager.class, Log.class);
SQLiteTableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
}
@Override
public boolean isUpsertSupported() {
// OR/ON CONFLICT introduced in 3.8.11
return super.isUpsertSupported() && (dbMetadata.getMajorVersion() >= 4 ||
(dbMetadata.getMajorVersion() >= 3 && dbMetadata.getMinorVersion() >= 9));
}
@Override
public String initUpsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("INSERT OR REPLACE INTO %s (%s, %s, %s) VALUES (?, ?, ?)",
dataTableName, config.dataColumnName(), config.timestampColumnName(),
config.idColumnName());
} else {
return String.format("INSERT OR REPLACE INTO %s (%s, %s, %s, %s) VALUES (?, ?, ?, ?)",
dataTableName, config.dataColumnName(), config.timestampColumnName(),
config.idColumnName(), config.segmentColumnName());
}
}
}
| 1,714
| 41.875
| 168
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/DbMetaData.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.util.Objects;
import org.infinispan.persistence.jdbc.common.DatabaseType;
/**
* @author Ryan Emerson
*/
public class DbMetaData {
private final DatabaseType type;
private final int majorVersion;
private final int minorVersion;
private final int maxTableNameLength;
private final boolean upsertDisabled;
private final boolean indexingDisabled;
private final boolean segmentedDisabled;
public DbMetaData(DatabaseType type, Integer majorVersion, Integer minorVersion, int maxTableNameLength, boolean upsertDisabled,
boolean indexingDisabled, boolean segmentedDisabled) {
this.type = Objects.requireNonNull(type);
this.majorVersion = majorVersion == null ? -1 : majorVersion;
this.minorVersion = minorVersion == null ? -1 : minorVersion;
this.maxTableNameLength = maxTableNameLength;
this.upsertDisabled = upsertDisabled;
this.indexingDisabled = indexingDisabled;
this.segmentedDisabled = segmentedDisabled;
}
public DatabaseType getType() {
return type;
}
public int getMajorVersion() {
return majorVersion;
}
public int getMinorVersion() {
return minorVersion;
}
public boolean isUpsertDisabled() {
return upsertDisabled;
}
public boolean isIndexingDisabled() {
return indexingDisabled;
}
public boolean isSegmentedDisabled() {
return segmentedDisabled;
}
public int getMaxTableNameLength() {
return maxTableNameLength;
}
}
| 1,561
| 25.931034
| 131
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/TableName.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.io.Serializable;
/**
* Value object for table name operations.
*/
public class TableName implements Serializable {
private String identifierQuote;
private String schema;
private String tableName;
public TableName(String identifierQuote, String tableNamePrefix, String cacheName){
if(identifierQuote == null){
throw new IllegalArgumentException("identifierQuote must not be null");
}
if(tableNamePrefix == null){
throw new IllegalArgumentException("tableNamePrefix must not be null");
}
if(cacheName == null){
throw new IllegalArgumentException("cacheName must not be null");
}
this.identifierQuote = identifierQuote;
normalize(tableNamePrefix, cacheName);
}
public String getIdentifierQuote() {
return identifierQuote;
}
public void setIdentifierQuote(String identifierQuote) {
this.identifierQuote = identifierQuote;
}
public String getSchema(){
return schema;
}
public String getName(){
return tableName;
}
/**
*
* @return full qualified table name (contains schema and name) in a quoted way.
*/
@Override
public String toString() {
if(schema != null){
return identifierQuote + schema + identifierQuote + "." + identifierQuote + tableName + identifierQuote;
} else {
return identifierQuote + tableName + identifierQuote;
}
}
private void normalize(String tableNamePrefix, String cacheName){
cacheName = cacheName.replaceAll("[^\\p{Alnum}]", "_");
String tableName = (tableNamePrefix + "_" + cacheName);
// split table name to determine optional used schema
String[] tableNameParts = tableName.split("\\.", 2);
if(tableNameParts.length != 1){
this.schema = tableNameParts[0];
this.tableName = tableNameParts[1];
} else {
this.schema = null;
this.tableName = tableNameParts[0];
}
if(schema != null && schema.isEmpty()){
throw new IllegalArgumentException("Schema inside table name prefix must not be empty.");
}
}
}
| 2,196
| 27.532468
| 113
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/MyTableOperations.java
|
package org.infinispan.persistence.jdbc.impl.table;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class MyTableOperations extends AbstractTableManager {
private static final Log log = LogFactory.getLog(MyTableOperations.class, Log.class);
MyTableOperations(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, "`", log);
}
@Override
public int getFetchSize() {
return Integer.MIN_VALUE;
}
@Override
public String initUpsertRowSql() {
// Assumes that config.idColumnName is the primary key
if (dbMetadata.isSegmentedDisabled()) {
return String.format("%1$s ON DUPLICATE KEY UPDATE %2$s = VALUES(%2$s), %3$s = VALUES(%3$s)", getInsertRowSql(),
config.dataColumnName(), config.timestampColumnName());
} else {
return String.format("%1$s ON DUPLICATE KEY UPDATE %2$s = VALUES(%2$s), %3$s = VALUES(%3$s), %4$s = VALUES(%4$s)", getInsertRowSql(),
config.dataColumnName(), config.timestampColumnName(), config.segmentColumnName());
}
}
}
| 1,527
| 41.444444
| 167
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/SybaseTableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.infinispan.persistence.jdbc.common.JdbcUtil;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.jdbc.configuration.JdbcStringBasedStoreConfiguration;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
/**
* @author Ryan Emerson
*/
class SybaseTableManager extends AbstractTableManager {
private static final Log log = LogFactory.getLog(SybaseTableManager.class, Log.class);
SybaseTableManager(InitializationContext ctx, ConnectionFactory connectionFactory, JdbcStringBasedStoreConfiguration config, DbMetaData metaData, String cacheName) {
super(ctx, connectionFactory, config, metaData, cacheName, log);
}
@Override
protected String getDropTimestampSql(String indexName) {
return String.format("DROP INDEX %s.%s", dataTableName, getIndexName(dbMetadata.getMaxTableNameLength(), true, indexName));
}
@Override
protected String initUpdateRowSql() {
return String.format("UPDATE %s SET %s = ? , %s = ? WHERE %s = convert(%s,?)",
dataTableName, config.dataColumnName(), config.timestampColumnName(),
config.idColumnName(), config.idColumnType());
}
@Override
protected String initSelectRowSql() {
return String.format("SELECT %s, %s FROM %s WHERE %s = convert(%s,?)",
config.idColumnName(), config.dataColumnName(), dataTableName,
config.idColumnName(), config.idColumnType());
}
@Override
protected String initSelectIdRowSql() {
return String.format("SELECT %s FROM %s WHERE %s = convert(%s,?)",
config.idColumnName(), dataTableName, config.idColumnName(), config.idColumnType());
}
@Override
protected String initDeleteRowSql() {
return String.format("DELETE FROM %s WHERE %s = convert(%s,?)",
dataTableName, config.idColumnName(), config.idColumnType());
}
@Override
protected String initUpsertRowSql() {
if (dbMetadata.isSegmentedDisabled()) {
return String.format("MERGE INTO %1$s AS t " +
"USING (SELECT ? %2$s, ? %3$s, ? %4$s) AS tmp " +
"ON (t.%4$s = tmp.%4$s) " +
"WHEN MATCHED THEN UPDATE SET t.%2$s = tmp.%2$s, t.%3$s = tmp.%3$s " +
"WHEN NOT MATCHED THEN INSERT VALUES (tmp.%4$s, tmp.%2$s, tmp.%3$s)",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName());
} else {
return String.format("MERGE INTO %1$s AS t " +
"USING (SELECT ? %2$s, ? %3$s, ? %4$s, ? %5$s) AS tmp " +
"ON (t.%4$s = tmp.%4$s) " +
"WHEN MATCHED THEN UPDATE SET t.%2$s = tmp.%2$s, t.%3$s = tmp.%3$s " +
"WHEN NOT MATCHED THEN INSERT VALUES (tmp.%4$s, tmp.%2$s, tmp.%3$s, tmp.%5$s)",
dataTableName, config.dataColumnName(), config.timestampColumnName(), config.idColumnName(),
config.segmentColumnName());
}
}
@Override
protected boolean indexExists(String indexName, Connection conn) throws PersistenceException {
ResultSet rs = null;
try {
DatabaseMetaData meta = conn.getMetaData();
rs = meta.getIndexInfo(null, dataTableName.getSchema(), dataTableName.getName(), false, false);
while (rs.next()) {
String index = rs.getString("INDEX_NAME");
if (index != null && indexName.equalsIgnoreCase(index.replaceAll("\"", ""))) {
return true;
}
}
} catch (SQLException e) {
throw new PersistenceException(e);
} finally {
JdbcUtil.safeClose(rs);
}
return false;
}
}
| 4,158
| 41.438776
| 168
|
java
|
null |
infinispan-main/persistence/jdbc/src/main/java/org/infinispan/persistence/jdbc/impl/table/TableManager.java
|
package org.infinispan.persistence.jdbc.impl.table;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.persistence.jdbc.common.TableOperations;
import org.infinispan.persistence.spi.PersistenceException;
/**
* @author Ryan Emerson
*/
public interface TableManager<K, V> extends TableOperations<K, V> {
int DEFAULT_FETCH_SIZE = 100;
void start() throws PersistenceException;
void stop() throws PersistenceException;
boolean tableExists(Connection connection, TableName tableName);
default boolean metaTableExists(Connection conn) {
return tableExists(conn, getMetaTableName());
}
void createDataTable(Connection conn) throws PersistenceException;
void dropDataTable(Connection conn) throws PersistenceException;
void createMetaTable(Connection conn) throws PersistenceException;
void dropMetaTable(Connection conn) throws PersistenceException;
/**
* Write the latest metadata to the meta table.
*/
void updateMetaTable(Connection conn) throws PersistenceException;
Metadata getMetadata(Connection conn) throws PersistenceException;
default void dropTables(Connection conn) throws PersistenceException {
dropDataTable(conn);
dropMetaTable(conn);
}
int getFetchSize();
String getDeleteRowWithExpirationSql();
String getDeleteAllSql();
String getLoadNonExpiredAllRowsSql();
String getUpdateRowSql();
String getSelectRowSql();
String getLoadAllRowsSql();
String getSelectOnlyExpiredRowsSql();
boolean isUpsertSupported();
TableName getDataTableName();
TableName getMetaTableName();
String getIdentifierQuoteString();
boolean isStringEncodingRequired();
String encodeString(String stringToEncode);
void prepareUpdateStatement(PreparedStatement ps, String key, long timestamp, int segment, ByteBuffer byteBuffer) throws SQLException;
interface Metadata {
short getVersion();
int getSegments();
}
}
| 2,068
| 23.630952
| 137
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/test/java/org/infinispan/persistence/jdbc/common/UnitTestDatabaseManager.java
|
package org.infinispan.persistence.jdbc.common;
import static org.testng.Assert.assertEquals;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.impl.connectionfactory.PooledConnectionFactory;
import org.infinispan.persistence.jdbc.common.impl.connectionfactory.SimpleConnectionFactory;
/**
* Class that assures concurrent access to the in memory database.
*
* @author Mircea.Markus@jboss.com
* @author Navin Surtani (<a href="mailto:nsurtani@redhat.com">nsurtani@redhat.com</a>)
* @author Tristan Tarrant
*/
public class UnitTestDatabaseManager {
private static AtomicInteger userIndex = new AtomicInteger(0);
private static final String DB_TYPE = System.getProperty("infinispan.test.jdbc.db", "H2");
private static final String H2_DRIVER = org.h2.Driver.class.getName();
private static final String NON_EXISTENT_DRIVER = "non.existent.Driver";
private static final DatabaseType dt;
static {
String driver = "";
try {
if (DB_TYPE.equalsIgnoreCase("mysql")) {
driver = com.mysql.jdbc.Driver.class.getName();
dt = DatabaseType.MYSQL;
} else {
driver = H2_DRIVER;
dt = DatabaseType.H2;
}
try {
Class.forName(driver);
} catch (ClassNotFoundException e) {
driver = H2_DRIVER;
Class.forName(H2_DRIVER);
}
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
public static ConnectionFactoryConfigurationBuilder<?> configureUniqueConnectionFactory(AbstractJdbcStoreConfigurationBuilder<?, ?> store) {
switch (dt) {
case H2:
return store
.connectionPool()
.driverClass(org.h2.Driver.class)
.connectionUrl(String.format("jdbc:h2:mem:%s;DB_CLOSE_DELAY=-1", extractTestName() + userIndex.incrementAndGet()))
.username("sa");
case MYSQL:
return store
.simpleConnection()
.driverClass(com.mysql.jdbc.Driver.class)
.connectionUrl("jdbc:mysql://localhost/infinispan?user=ispn&password=ispn")
.username("ispn")
.password("ispn");
default:
throw new RuntimeException("Cannot configure connection for database type "+dt);
}
}
public static ConnectionFactoryConfigurationBuilder<?> configureSimpleConnectionFactory(AbstractJdbcStoreConfigurationBuilder<?, ?> store) {
return store
.simpleConnection()
.driverClass(org.h2.Driver.class)
.connectionUrl(String.format("jdbc:h2:mem:%s;DB_CLOSE_DELAY=-1", extractTestName() + userIndex.incrementAndGet()))
.username("sa");
}
public static ConnectionFactoryConfigurationBuilder<?> configureBrokenConnectionFactory
(AbstractJdbcStoreConfigurationBuilder<?, ?> storeBuilder) {
return storeBuilder.connectionPool()
.driverClass(NON_EXISTENT_DRIVER);
}
private static String extractTestName() {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
if (stack.length == 0)
return null;
for (int i = stack.length - 1; i > 0; i--) {
StackTraceElement e = stack[i];
String className = e.getClassName();
if (className.indexOf("org.infinispan") != -1)
return className.replace('.', '_') + "_" + e.getMethodName();
}
return null;
}
public static void setDialect(AbstractJdbcStoreConfigurationBuilder builder) {
builder.dialect(dt);
}
public static void verifyConnectionLeaks(ConnectionFactory connectionFactory) {
if (connectionFactory instanceof PooledConnectionFactory) {
PooledConnectionFactory pcf = (PooledConnectionFactory) connectionFactory;
try {
Thread.sleep(500); // C3P0 needs a little delay before reporting the correct number of connections. Bah!
assertEquals(pcf.getActiveConnections(), 0);
} catch (Exception e) {
throw new RuntimeException(e);
}
} else if (connectionFactory instanceof SimpleConnectionFactory) {
SimpleConnectionFactory scf = (SimpleConnectionFactory) connectionFactory;
assertEquals(scf.getConnectionCount(), 0);
}
}
}
| 4,646
| 39.060345
| 143
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/package-info.java
|
/**
* This package contains a {@link org.infinispan.persistence.spi.AdvancedLoadWriteStore} implementation based on
* a JDBC database connection.
*
* @api.public
*/
package org.infinispan.persistence.jdbc.common;
| 218
| 26.375
| 112
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/DatabaseType.java
|
package org.infinispan.persistence.jdbc.common;
/**
* Supported database dialects for the JDBC cache stores
*
* @author Manik Surtani
* @since 4.1
*/
public enum DatabaseType {
ACCESS,
DB2,
DB2_390,
DERBY,
FIREBIRD,
H2,
HSQL,
INFORMIX,
INTERBASE,
MARIA_DB,
MYSQL,
ORACLE,
ORACLE_XE,
POSTGRES,
SQLITE,
SQL_SERVER,
SYBASE;
public static DatabaseType guessDialect(String name) {
DatabaseType type = null;
if (name == null)
return null;
name = name.toLowerCase();
if (name.contains("mysql")) {
type = DatabaseType.MYSQL;
} else if (name.contains("mariadb")) {
type = DatabaseType.MARIA_DB;
//postgresqlplus example jdbc:edb://localhost:5444/edb
} else if (name.contains("postgres") || name.contains("edb")) {
type = DatabaseType.POSTGRES;
} else if (name.contains("derby")) {
type = DatabaseType.DERBY;
} else if (name.contains("hsql") || name.contains("hypersonic")) {
type = DatabaseType.HSQL;
} else if (name.contains("h2")) {
type = DatabaseType.H2;
} else if (name.contains("sqlite")) {
type = DatabaseType.SQLITE;
} else if (name.contains("db2")) {
type = DatabaseType.DB2;
} else if (name.contains("informix")) {
type = DatabaseType.INFORMIX;
} else if (name.contains("interbase")) {
type = DatabaseType.INTERBASE;
} else if (name.contains("firebird")) {
type = DatabaseType.FIREBIRD;
} else if (name.contains("sqlserver") || name.contains("microsoft")) {
type = DatabaseType.SQL_SERVER;
} else if (name.contains("access")) {
type = DatabaseType.ACCESS;
} else if (name.contains("oracle")) {
type = DatabaseType.ORACLE;
} else if (name.contains("adaptive")) {
type = DatabaseType.SYBASE;
}
return type;
}
}
| 1,948
| 27.246377
| 76
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/TableOperations.java
|
package org.infinispan.persistence.jdbc.common;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.commons.util.IntSet;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* @author William Burns
*/
public interface TableOperations<K, V> {
MarshallableEntry<K, V> loadEntry(Connection connection, int segment, Object key) throws SQLException;
default Flowable<K> publishKeys(Supplier<Connection> connectionSupplier, Consumer<Connection> connectionCloser,
IntSet segments, Predicate<? super K> filter) {
return publishEntries(connectionSupplier, connectionCloser, segments, filter, false)
.map(MarshallableEntry::getKey);
}
Flowable<MarshallableEntry<K, V>> publishEntries(Supplier<Connection> connectionSupplier,
Consumer<Connection> connectionCloser, IntSet segments, Predicate<? super K> filter, boolean fetchValue);
boolean deleteEntry(Connection connection, int segment, Object key) throws SQLException;
void deleteAllRows(Connection connection) throws SQLException;
void upsertEntry(Connection connection, int segment, MarshallableEntry<? extends K, ? extends V> entry) throws SQLException;
long size(Connection connection) throws SQLException;
void batchUpdates(Connection connection, int writePublisherCount, Publisher<Object> removePublisher,
Publisher<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) throws SQLException;
}
| 1,731
| 40.238095
| 127
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/SqlManager.java
|
package org.infinispan.persistence.jdbc.common;
import java.util.List;
import org.infinispan.persistence.jdbc.common.impl.table.DB2SqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.GenericSqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.H2SqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.MySQLSqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.OracleSqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.PostgresqlSqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.SQLLiteSqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.SQLServerSqlManager;
import org.infinispan.persistence.jdbc.common.impl.table.SybaseSqlManager;
public interface SqlManager {
String getSelectStatement(List<String> keyColumns, List<String> allColumns);
String getSelectAllStatement(List<String> allColumns);
String getDeleteStatement(List<String> keyColumns);
String getDeleteAllStatement();
String getUpsertStatement(List<String> keyColumns, List<String> allColumns);
String getSizeCommand();
static SqlManager fromDatabaseType(DatabaseType databaseType, String tableName) {
return fromDatabaseType(databaseType, tableName, false);
}
static SqlManager fromDatabaseType(DatabaseType databaseType, String tableName, boolean namedParameters) {
switch (databaseType) {
case DB2:
case DB2_390:
return new DB2SqlManager(tableName, namedParameters);
case H2:
return new H2SqlManager(tableName, namedParameters);
case MARIA_DB:
case MYSQL:
return new MySQLSqlManager(tableName, namedParameters);
case ORACLE:
return new OracleSqlManager(tableName, namedParameters);
case POSTGRES:
return new PostgresqlSqlManager(tableName, namedParameters);
case SQLITE:
return new SQLLiteSqlManager(tableName, namedParameters);
case SYBASE:
return new SybaseSqlManager(tableName, namedParameters);
case SQL_SERVER:
return new SQLServerSqlManager(tableName, namedParameters);
default:
return new GenericSqlManager(tableName, namedParameters);
}
}
}
| 2,292
| 39.22807
| 109
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/JdbcUtil.java
|
package org.infinispan.persistence.jdbc.common;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.io.IOException;
import java.io.InputStream;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.marshall.MarshallingException;
import org.infinispan.commons.marshall.StreamAwareMarshaller;
/**
* Contains common methods used by JDBC CacheStores.
*
* @author Mircea.Markus@jboss.com
*/
public class JdbcUtil {
public static void safeClose(Statement ps) {
if (ps != null) {
try {
ps.close();
} catch (SQLException e) {
PERSISTENCE.sqlFailureUnexpected(e);
}
}
}
public static void safeClose(Connection connection) {
if (connection != null) {
try {
connection.close();
} catch (SQLException e) {
PERSISTENCE.sqlFailureClosingConnection(connection, e);
}
}
}
public static void safeClose(ResultSet rs) {
if (rs != null) {
try {
rs.close();
} catch (SQLException e) {
PERSISTENCE.sqlFailureUnexpected(e);
}
}
}
public static ByteBuffer marshall(Object obj, Marshaller marshaller) {
try {
return marshaller.objectToBuffer(obj);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new MarshallingException(e);
} catch (IOException e) {
PERSISTENCE.errorMarshallingObject(e, obj);
throw new MarshallingException("I/O failure while marshalling object: " + obj, e);
}
}
@SuppressWarnings("unchecked")
public static <T> T unmarshall(InputStream inputStream, StreamAwareMarshaller marshaller) {
try {
return (T) marshaller.readObject(inputStream);
} catch (IOException e) {
PERSISTENCE.ioErrorUnmarshalling(e);
throw new MarshallingException("I/O error while unmarshalling from stream", e);
} catch (ClassNotFoundException e) {
PERSISTENCE.unexpectedClassNotFoundException(e);
throw new MarshallingException(e);
}
}
@SuppressWarnings("unchecked")
public static <T> T unmarshall(ByteBuffer buf, Marshaller marshaller) {
try {
return (T) marshaller.objectFromByteBuffer(buf.getBuf(), buf.getOffset(), buf.getLength());
} catch (IOException e) {
throw new MarshallingException("I/O error while unmarshalling", e);
} catch (ClassNotFoundException e) {
PERSISTENCE.unexpectedClassNotFoundException(e);
throw new MarshallingException(e);
}
}
}
| 2,816
| 30.3
| 100
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/package-info.java
|
/**
* Common configuration for JDBC cache stores.
*
* @api.public
*/
package org.infinispan.persistence.jdbc.common.configuration;
| 135
| 18.428571
| 61
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/ManagedConnectionFactoryConfiguration.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.impl.connectionfactory.ManagedConnectionFactory;
/**
* ManagedConnectionFactoryConfiguration.
*
* @author Tristan Tarrant
* @since 5.2
*/
@BuiltBy(ManagedConnectionFactoryConfigurationBuilder.class)
public class ManagedConnectionFactoryConfiguration implements ConnectionFactoryConfiguration {
public static final AttributeDefinition<String> JNDI_URL = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.JNDI_URL, null, String.class).immutable().build();
public static AttributeSet attributeSet() {
return new AttributeSet(ManagedConnectionFactoryConfiguration.class, JNDI_URL);
}
private final Attribute<String> jndiUrl;
private final AttributeSet attributes;
ManagedConnectionFactoryConfiguration(AttributeSet attributes) {
this.attributes = attributes.checkProtection();
this.jndiUrl = attributes.attribute(JNDI_URL);
}
public String jndiUrl() {
return jndiUrl.get();
}
@Override
public Class<? extends ConnectionFactory> connectionFactoryClass() {
return ManagedConnectionFactory.class;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ManagedConnectionFactoryConfiguration that = (ManagedConnectionFactoryConfiguration) o;
return attributes != null ? attributes.equals(that.attributes) : that.attributes == null;
}
@Override
public String toString() {
return "ManagedConnectionFactoryConfiguration [" +
"attributes=" + attributes +
']';
}
@Override
public int hashCode() {
return attributes != null ? attributes.hashCode() : 0;
}
public AttributeSet attributes() {
return attributes;
}
}
| 2,256
| 32.191176
| 203
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/Element.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import java.util.HashMap;
import java.util.Map;
/**
* An enumeration of all the recognized XML element local names for the JDBC cache stores
*
* @author Tristan Tarrant
* @since 5.2
*/
public enum Element {
// must be first
UNKNOWN(null),
STRING_KEYED_JDBC_STORE("string-keyed-jdbc-store"),
BINARY_KEYED_JDBC_STORE("binary-keyed-jdbc-store"),
MIXED_KEYED_JDBC_STORE("mixed-keyed-jdbc-store"),
TABLE_JDBC_STORE("table-jdbc-store"),
QUERY_JDBC_STORE("query-jdbc-store"),
CONNECTION_POOL("connection-pool"),
DATA_SOURCE("data-source"),
SIMPLE_CONNECTION("simple-connection"),
STRING_KEYED_TABLE("string-keyed-table"),
DATA_COLUMN("data-column"),
ID_COLUMN("id-column"),
TIMESTAMP_COLUMN("timestamp-column"),
SEGMENT_COLUMN("segment-column"),
SCHEMA("schema"),
QUERIES("queries"),
SELECT_SINGLE("select-single"),
SELECT_ALL("select-all"),
DELETE_SINGLE("delete-single"),
DELETE_ALL("delete-all"),
UPSERT("upsert"),
SIZE("size"),
;
private final String name;
Element(final String name) {
this.name = name;
}
/**
* Get the local name of this element.
*
* @return the local name
*/
public String getLocalName() {
return name;
}
private static final Map<String, Element> MAP;
static {
final Map<String, Element> map = new HashMap<>(8);
for (Element element : values()) {
final String name = element.getLocalName();
if (name != null) {
map.put(name, element);
}
}
MAP = map;
}
public static Element forName(final String localName) {
final Element element = MAP.get(localName);
return element == null ? UNKNOWN : element;
}
@Override
public String toString() {
return name;
}
}
| 1,875
| 21.333333
| 89
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/PooledConnectionFactoryConfiguration.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.impl.connectionfactory.PooledConnectionFactory;
@BuiltBy(PooledConnectionFactoryConfigurationBuilder.class)
public class PooledConnectionFactoryConfiguration extends AbstractUnmanagedConnectionFactoryConfiguration {
public static final AttributeDefinition<String> PROPERTY_FILE = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.PROPERTIES_FILE, null, String.class).immutable().build();
public static AttributeSet attributeSet() {
return new AttributeSet(PooledConnectionFactoryConfiguration.class, AbstractUnmanagedConnectionFactoryConfiguration.attributeSet(), PROPERTY_FILE);
}
private final Attribute<String> propertyFile;
protected PooledConnectionFactoryConfiguration(AttributeSet attributes) {
super(attributes);
this.propertyFile = attributes.attribute(PROPERTY_FILE);
}
public String propertyFile() {
return propertyFile.get();
}
@Override
public Class<? extends ConnectionFactory> connectionFactoryClass() {
return PooledConnectionFactory.class;
}
public AttributeSet attributes() {
return attributes;
}
}
| 1,597
| 39.974359
| 215
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/AbstractJdbcStoreConfiguration.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.persistence.jdbc.common.DatabaseType;
public abstract class AbstractJdbcStoreConfiguration<T extends AbstractJdbcStoreConfiguration<T>> extends AbstractStoreConfiguration<T> {
static final AttributeDefinition<DatabaseType> DIALECT = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.DIALECT, null, DatabaseType.class).immutable().build();
static final AttributeDefinition<Integer> READ_QUERY_TIMEOUT = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.READ_QUERY_TIMEOUT, 0, Integer.class).build();
static final AttributeDefinition<Integer> WRITE_QUERY_TIMEOUT = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.WRITE_QUERY_TIMEOUT, 0, Integer.class).build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(AbstractJdbcStoreConfiguration.class, AbstractStoreConfiguration.attributeDefinitionSet(),
DIALECT, READ_QUERY_TIMEOUT, WRITE_QUERY_TIMEOUT);
}
private final Attribute<DatabaseType> dialect;
private final Attribute<Integer> readQueryTimeout;
private final Attribute<Integer> writeQueryTimeout;
private final ConnectionFactoryConfiguration connectionFactory;
protected AbstractJdbcStoreConfiguration(Enum<?> element, AttributeSet attributes, AsyncStoreConfiguration async, ConnectionFactoryConfiguration connectionFactory) {
super(element, attributes, async);
this.connectionFactory = connectionFactory;
dialect = attributes.attribute(DIALECT);
readQueryTimeout = attributes.attribute(READ_QUERY_TIMEOUT);
writeQueryTimeout = attributes.attribute(WRITE_QUERY_TIMEOUT);
}
public ConnectionFactoryConfiguration connectionFactory() {
return connectionFactory;
}
/**
* @return always returns false
* @deprecated Since 13.0 with no replacement
*/
public boolean manageConnectionFactory() {
return false;
}
public DatabaseType dialect() {
return dialect.get();
}
/**
* @deprecated since 14.0, always returns <b>null</b>
*/
public Integer dbMajorVersion() {
return null;
}
/**
* @deprecated since 14.0, always returns <b>null</b>
*/
@Deprecated
public Integer dbMinorVersion() {
return null;
}
public Integer readQueryTimeout() {
return readQueryTimeout.get();
}
public Integer writeQueryTimeout() {
return writeQueryTimeout.get();
}
}
| 2,939
| 39.833333
| 206
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/AbstractJdbcStoreConfigurationSerializer.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import static org.infinispan.configuration.serializing.SerializeUtils.writeOptional;
import org.infinispan.commons.configuration.io.ConfigurationWriter;
import org.infinispan.configuration.serializing.AbstractStoreSerializer;
/**
* AbstractJdbcStoreConfigurationSerializer.
*
* @author Tristan Tarrant
* @since 9.0
*/
public abstract class AbstractJdbcStoreConfigurationSerializer extends AbstractStoreSerializer {
protected void writeJdbcStoreAttributes(ConfigurationWriter writer, AbstractJdbcStoreConfiguration configuration) {
configuration.attributes().write(writer);
}
private void writeJDBCStoreConnection(ConfigurationWriter writer, SimpleConnectionFactoryConfiguration configuration) {
writer.writeStartElement(Element.SIMPLE_CONNECTION);
writeOptional(writer, Attribute.CONNECTION_URL, configuration.connectionUrl());
writeOptional(writer, Attribute.DRIVER_CLASS, configuration.driverClass());
writeOptional(writer, Attribute.USERNAME, configuration.username());
writeOptional(writer, Attribute.PASSWORD, configuration.password());
writer.writeEndElement();
}
private void writeJDBCStoreConnection(ConfigurationWriter writer, PooledConnectionFactoryConfiguration configuration) {
writer.writeStartElement(Element.CONNECTION_POOL);
writeOptional(writer, Attribute.CONNECTION_URL, configuration.connectionUrl());
writeOptional(writer, Attribute.DRIVER_CLASS, configuration.driverClass());
writeOptional(writer, Attribute.USERNAME, configuration.username());
writeOptional(writer, Attribute.PASSWORD, configuration.password());
writer.writeEndElement();
}
private void writeJDBCStoreConnection(ConfigurationWriter writer, ManagedConnectionFactoryConfiguration configuration) {
writer.writeStartElement(Element.DATA_SOURCE);
writer.writeAttribute(Attribute.JNDI_URL, configuration.jndiUrl());
writer.writeEndElement();
}
protected void writeJDBCStoreConnection(ConfigurationWriter writer, AbstractJdbcStoreConfiguration configuration) {
ConnectionFactoryConfiguration cfc = configuration.connectionFactory();
if (cfc instanceof SimpleConnectionFactoryConfiguration) {
writeJDBCStoreConnection(writer, (SimpleConnectionFactoryConfiguration) cfc);
} else if (cfc instanceof PooledConnectionFactoryConfiguration) {
writeJDBCStoreConnection(writer, (PooledConnectionFactoryConfiguration) cfc);
} else if (cfc instanceof ManagedConnectionFactoryConfiguration) {
writeJDBCStoreConnection(writer, (ManagedConnectionFactoryConfiguration) cfc);
}
}
}
| 2,705
| 49.111111
| 123
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/AbstractJdbcStoreConfigurationParser.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import static org.infinispan.util.logging.Log.CONFIG;
import org.infinispan.commons.configuration.io.ConfigurationReader;
import org.infinispan.configuration.parsing.ConfigurationParser;
import org.infinispan.configuration.parsing.ParseUtils;
import org.infinispan.persistence.jdbc.common.DatabaseType;
public abstract class AbstractJdbcStoreConfigurationParser implements ConfigurationParser {
protected boolean handleCommonAttributes(ConfigurationReader reader,
AbstractJdbcStoreConfigurationBuilder<?, ?> builder, Attribute attribute, String value) {
switch (attribute) {
case DIALECT:
builder.dialect(DatabaseType.valueOf(value));
break;
case DB_MAJOR_VERSION:
case DB_MINOR_VERSION:
CONFIG.configDeprecated(attribute);
break;
case READ_QUERY_TIMEOUT:
builder.readQueryTimeout(Integer.parseInt(value));
break;
case WRITE_QUERY_TIMEOUT:
builder.writeQueryTimeout(Integer.parseInt(value));
break;
default:
return false;
}
return true;
}
protected boolean handleCommonElement(AbstractJdbcStoreConfigurationBuilder<?, ?> builder, Element element,
ConfigurationReader reader) {
switch (element) {
case CONNECTION_POOL: {
parseConnectionPoolAttributes(reader, builder.connectionPool());
break;
}
case DATA_SOURCE: {
parseDataSourceAttributes(reader, builder.dataSource());
break;
}
case SIMPLE_CONNECTION: {
parseSimpleConnectionAttributes(reader, builder.simpleConnection());
break;
}
default:
return false;
}
return true;
}
protected void parseDataSourceAttributes(ConfigurationReader reader,
ManagedConnectionFactoryConfigurationBuilder<?> builder) {
String jndiUrl = ParseUtils.requireSingleAttribute(reader, Attribute.JNDI_URL.getLocalName());
builder.jndiUrl(jndiUrl);
ParseUtils.requireNoContent(reader);
}
protected void parseConnectionPoolAttributes(ConfigurationReader reader,
PooledConnectionFactoryConfigurationBuilder<?> builder) {
for (int i = 0; i < reader.getAttributeCount(); i++) {
ParseUtils.requireNoNamespaceAttribute(reader, i);
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
switch (attribute) {
case PROPERTIES_FILE: {
builder.propertyFile(value);
break;
}
case CONNECTION_URL: {
builder.connectionUrl(value);
break;
}
case DRIVER_CLASS: {
builder.driverClass(value);
break;
}
case PASSWORD: {
builder.password(value);
break;
}
case USERNAME: {
builder.username(value);
break;
}
default: {
throw ParseUtils.unexpectedAttribute(reader, i);
}
}
}
ParseUtils.requireNoContent(reader);
}
protected void parseSimpleConnectionAttributes(ConfigurationReader reader,
SimpleConnectionFactoryConfigurationBuilder<?> builder) {
for (int i = 0; i < reader.getAttributeCount(); i++) {
ParseUtils.requireNoNamespaceAttribute(reader, i);
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
switch (attribute) {
case CONNECTION_URL: {
builder.connectionUrl(value);
break;
}
case DRIVER_CLASS: {
builder.driverClass(value);
break;
}
case PASSWORD: {
builder.password(value);
break;
}
case USERNAME: {
builder.username(value);
break;
}
default: {
throw ParseUtils.unexpectedAttribute(reader, i);
}
}
}
ParseUtils.requireNoContent(reader);
}
}
| 4,371
| 33.425197
| 110
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/JdbcStoreConfigurationChildBuilder.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.configuration.cache.LoaderConfigurationChildBuilder;
/**
* JdbcStoreConfigurationChildBuilder.
*
* @author Tristan Tarrant
* @since 5.2
*/
public interface JdbcStoreConfigurationChildBuilder<S extends AbstractJdbcStoreConfigurationBuilder<?, S>> extends LoaderConfigurationChildBuilder<S> {
/**
* Configures a connection pool to be used by this JDBC Cache Store to handle connections to the database
*/
PooledConnectionFactoryConfigurationBuilder<S> connectionPool();
/**
* Configures a DataSource to be used by this JDBC Cache Store to handle connections to the database
*/
ManagedConnectionFactoryConfigurationBuilder<S> dataSource();
/**
* Configures this JDBC Cache Store to use a single connection to the database
*/
SimpleConnectionFactoryConfigurationBuilder<S> simpleConnection();
/**
* Use the specified ConnectionFactory to handle connection to the database
*/
<C extends ConnectionFactoryConfigurationBuilder<?>> C connectionFactory(Class<C> klass);
/**
* Use the specified {@link ConnectionFactoryConfigurationBuilder} to configure connections to
* the database
*/
<C extends ConnectionFactoryConfigurationBuilder<?>> C connectionFactory(C builder);
}
| 1,335
| 32.4
| 151
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/AbstractJdbcStoreConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.lang.reflect.Constructor;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.ConfigurationUtils;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfigurationBuilder;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.persistence.jdbc.common.DatabaseType;
public abstract class AbstractJdbcStoreConfigurationBuilder<T extends AbstractJdbcStoreConfiguration, S extends AbstractJdbcStoreConfigurationBuilder<T, S>> extends
AbstractStoreConfigurationBuilder<T, S> implements JdbcStoreConfigurationChildBuilder<S> {
protected ConnectionFactoryConfigurationBuilder<ConnectionFactoryConfiguration> connectionFactory;
public AbstractJdbcStoreConfigurationBuilder(PersistenceConfigurationBuilder builder, AttributeSet attributes) {
super(builder, attributes);
}
@Override
public PooledConnectionFactoryConfigurationBuilder<S> connectionPool() {
return connectionFactory(PooledConnectionFactoryConfigurationBuilder.class);
}
@Override
public ManagedConnectionFactoryConfigurationBuilder<S> dataSource() {
return connectionFactory(ManagedConnectionFactoryConfigurationBuilder.class);
}
@Override
public SimpleConnectionFactoryConfigurationBuilder<S> simpleConnection() {
return connectionFactory(SimpleConnectionFactoryConfigurationBuilder.class);
}
/**
* Use the specified ConnectionFactory to handle connection to the database
*/
@Override
public <C extends ConnectionFactoryConfigurationBuilder<?>> C connectionFactory(Class<C> klass) {
if (connectionFactory != null) {
throw new IllegalStateException("A ConnectionFactory has already been configured for this store");
}
try {
Constructor<C> constructor = klass.getDeclaredConstructor(AbstractJdbcStoreConfigurationBuilder.class);
C builder = constructor.newInstance(this);
this.connectionFactory = (ConnectionFactoryConfigurationBuilder<ConnectionFactoryConfiguration>) builder;
return builder;
} catch (Exception e) {
throw new CacheConfigurationException("Could not instantiate loader configuration builder '" + klass.getName() + "'", e);
}
}
/**
* Use the specified {@link ConnectionFactoryConfigurationBuilder} to configure connections to
* the database
*/
@Override
public <C extends ConnectionFactoryConfigurationBuilder<?>> C connectionFactory(C builder) {
if (connectionFactory != null) {
throw new IllegalStateException("A ConnectionFactory has already been configured for this store");
}
this.connectionFactory = (ConnectionFactoryConfigurationBuilder<ConnectionFactoryConfiguration>) builder;
return builder;
}
public ConnectionFactoryConfigurationBuilder<ConnectionFactoryConfiguration> getConnectionFactory() {
return connectionFactory;
}
/**
* @param manageConnectionFactory ignored
* @return this
* @deprecated Deprecated since 13.0 with no replacement
*/
public S manageConnectionFactory(boolean manageConnectionFactory) {
return self();
}
public S dialect(DatabaseType databaseType) {
attributes.attribute(AbstractJdbcStoreConfiguration.DIALECT).set(databaseType);
return self();
}
/**
* @deprecated since 14.0 is ignored
*/
@Deprecated
public S dbMajorVersion(Integer majorVersion) {
return self();
}
/**
* @deprecated since 14.0 is ignored
*/
@Deprecated
public S dbMinorVersion(Integer minorVersion) {
return self();
}
public S readQueryTimeout(Integer queryTimeout) {
attributes.attribute(AbstractJdbcStoreConfiguration.READ_QUERY_TIMEOUT).set(queryTimeout);
return self();
}
public S writeQueryTimeout(Integer queryTimeout) {
attributes.attribute(AbstractJdbcStoreConfiguration.WRITE_QUERY_TIMEOUT).set(queryTimeout);
return self();
}
@Override
public void validate() {
super.validate();
if (connectionFactory == null) {
throw PERSISTENCE.missingConnectionFactory();
}
connectionFactory.validate();
}
@Override
public void validate(GlobalConfiguration globalConfig) {
// Can't validate global config with connection factory
}
@Override
public Builder<?> read(T template, Combine combine) {
super.read(template, combine);
Class<? extends ConnectionFactoryConfigurationBuilder<?>> cfb = (Class<? extends ConnectionFactoryConfigurationBuilder<?>>) ConfigurationUtils.builderFor(template
.connectionFactory());
connectionFactory(cfb);
connectionFactory.read(template.connectionFactory(), combine);
return this;
}
}
| 5,179
| 35.737589
| 168
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/PooledConnectionFactoryConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import static org.infinispan.persistence.jdbc.common.configuration.AbstractUnmanagedConnectionFactoryConfiguration.CONNECTION_URL;
import static org.infinispan.persistence.jdbc.common.configuration.AbstractUnmanagedConnectionFactoryConfiguration.DRIVER_CLASS;
import static org.infinispan.persistence.jdbc.common.configuration.AbstractUnmanagedConnectionFactoryConfiguration.PASSWORD;
import static org.infinispan.persistence.jdbc.common.configuration.AbstractUnmanagedConnectionFactoryConfiguration.USERNAME;
import static org.infinispan.persistence.jdbc.common.configuration.PooledConnectionFactoryConfiguration.PROPERTY_FILE;
import java.sql.Driver;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.global.GlobalConfiguration;
/**
* PooledConnectionFactoryConfigurationBuilder.
*
* @author Tristan Tarrant
* @since 5.2
*/
public class PooledConnectionFactoryConfigurationBuilder<S extends AbstractJdbcStoreConfigurationBuilder<?, S>> extends AbstractJdbcStoreConfigurationChildBuilder<S>
implements ConnectionFactoryConfigurationBuilder<PooledConnectionFactoryConfiguration> {
private final AttributeSet attributes;
protected PooledConnectionFactoryConfigurationBuilder(AbstractJdbcStoreConfigurationBuilder<?, S> builder) {
super(builder);
attributes = PooledConnectionFactoryConfiguration.attributeSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
public PooledConnectionFactoryConfigurationBuilder<S> propertyFile(String propertyFile) {
attributes.attribute(PROPERTY_FILE).set(propertyFile);
return this;
}
public PooledConnectionFactoryConfigurationBuilder<S> connectionUrl(String connectionUrl) {
attributes.attribute(CONNECTION_URL).set(connectionUrl);
return this;
}
public PooledConnectionFactoryConfigurationBuilder<S> driverClass(Class<? extends Driver> driverClass) {
attributes.attribute(DRIVER_CLASS).set(driverClass.getName());
return this;
}
public PooledConnectionFactoryConfigurationBuilder<S> driverClass(String driverClass) {
attributes.attribute(DRIVER_CLASS).set(driverClass);
return this;
}
public PooledConnectionFactoryConfigurationBuilder<S> username(String username) {
attributes.attribute(USERNAME).set(username);
return this;
}
public PooledConnectionFactoryConfigurationBuilder<S> password(String password) {
attributes.attribute(PASSWORD).set(password);
return this;
}
@Override
public void validate() {
// If a propertyFile is specified, then no exceptions are thrown for an incorrect config until the pool is created
String propertyFile = attributes.attribute(PROPERTY_FILE).get();
String connectionUrl = attributes.attribute(CONNECTION_URL).get();
if (propertyFile == null && connectionUrl == null) {
throw new CacheConfigurationException("Missing connectionUrl parameter");
}
}
@Override
public void validate(GlobalConfiguration globalConfig) {
}
@Override
public PooledConnectionFactoryConfiguration create() {
return new PooledConnectionFactoryConfiguration(attributes.protect());
}
@Override
public PooledConnectionFactoryConfigurationBuilder<S> read(PooledConnectionFactoryConfiguration template, Combine combine) {
attributes.read(template.attributes, combine);
return this;
}
}
| 3,642
| 38.172043
| 165
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/ConnectionFactoryConfiguration.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
/**
* ConnectionFactoryConfiguration.
*
* @author Tristan Tarrant
* @since 5.2
*/
public interface ConnectionFactoryConfiguration {
Class<? extends ConnectionFactory> connectionFactoryClass();
}
| 350
| 24.071429
| 82
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/ManagedConnectionFactoryConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import static org.infinispan.persistence.jdbc.common.configuration.ManagedConnectionFactoryConfiguration.JNDI_URL;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.global.GlobalConfiguration;
/**
* ManagedConnectionFactoryConfigurationBuilder.
*
* @author Tristan Tarrant
* @since 5.2
*/
public class ManagedConnectionFactoryConfigurationBuilder<S extends AbstractJdbcStoreConfigurationBuilder<?, S>> extends AbstractJdbcStoreConfigurationChildBuilder<S>
implements ConnectionFactoryConfigurationBuilder<ManagedConnectionFactoryConfiguration> {
private final AttributeSet attributes;
public ManagedConnectionFactoryConfigurationBuilder(AbstractJdbcStoreConfigurationBuilder<?, S> builder) {
super(builder);
attributes = ManagedConnectionFactoryConfiguration.attributeSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
public void jndiUrl(String jndiUrl) {
attributes.attribute(JNDI_URL).set(jndiUrl);
}
@Override
public void validate() {
String jndiUrl = attributes.attribute(JNDI_URL).get();
if (jndiUrl == null) {
throw new CacheConfigurationException("The jndiUrl has not been specified");
}
}
@Override
public void validate(GlobalConfiguration globalConfig) {
}
@Override
public ManagedConnectionFactoryConfiguration create() {
return new ManagedConnectionFactoryConfiguration(attributes.protect());
}
@Override
public ManagedConnectionFactoryConfigurationBuilder<S> read(ManagedConnectionFactoryConfiguration template, Combine combine) {
this.attributes.read(template.attributes(), combine);
return this;
}
}
| 1,919
| 32.103448
| 166
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/AbstractUnmanagedConnectionFactoryConfiguration.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
public abstract class AbstractUnmanagedConnectionFactoryConfiguration implements ConnectionFactoryConfiguration {
public static final AttributeDefinition<String> USERNAME = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.USERNAME, null, String.class).immutable().build();
public static final AttributeDefinition<String> PASSWORD = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.PASSWORD, null, String.class).immutable().build();
public static final AttributeDefinition<String> DRIVER_CLASS = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.DRIVER_CLASS, null, String.class).immutable().build();
public static final AttributeDefinition<String> CONNECTION_URL = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.CONNECTION_URL, null, String.class).immutable().build();
private final Attribute<String> connectionUrl;
private final Attribute<String> driverClass;
private final Attribute<String> username;
private final Attribute<String> password;
protected AttributeSet attributes;
public static AttributeSet attributeSet() {
return new AttributeSet(AbstractUnmanagedConnectionFactoryConfiguration.class, USERNAME, PASSWORD, DRIVER_CLASS, CONNECTION_URL);
}
public AbstractUnmanagedConnectionFactoryConfiguration(AttributeSet attributes) {
this.attributes = attributes.checkProtection();
this.connectionUrl = attributes.attribute(CONNECTION_URL);
this.driverClass = attributes.attribute(DRIVER_CLASS);
this.username = attributes.attribute(USERNAME);
this.password = attributes.attribute(PASSWORD);
}
public String connectionUrl() {
return connectionUrl.get();
}
public String driverClass() {
return driverClass.get();
}
public String username() {
return username.get();
}
public String password() {
return password.get();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AbstractUnmanagedConnectionFactoryConfiguration that = (AbstractUnmanagedConnectionFactoryConfiguration) o;
return attributes != null ? attributes.equals(that.attributes) : that.attributes == null;
}
@Override
public int hashCode() {
return attributes != null ? attributes.hashCode() : 0;
}
}
| 2,774
| 42.359375
| 215
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/AbstractJdbcStoreConfigurationChildBuilder.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.configuration.cache.AbstractStoreConfigurationChildBuilder;
/**
* AbstractJdbcStoreConfigurationChildBuilder.
*
* @author Tristan Tarrant
* @since 5.2
*/
public abstract class AbstractJdbcStoreConfigurationChildBuilder<S extends AbstractJdbcStoreConfigurationBuilder<?, S>>
extends AbstractStoreConfigurationChildBuilder<S> implements JdbcStoreConfigurationChildBuilder<S> {
private AbstractJdbcStoreConfigurationBuilder<?, S> builder;
protected AbstractJdbcStoreConfigurationChildBuilder(AbstractJdbcStoreConfigurationBuilder<?, S> builder) {
super(builder);
this.builder = builder;
}
@Override
public PooledConnectionFactoryConfigurationBuilder<S> connectionPool() {
return builder.connectionPool();
}
@Override
public ManagedConnectionFactoryConfigurationBuilder<S> dataSource() {
return builder.dataSource();
}
@Override
public SimpleConnectionFactoryConfigurationBuilder<S> simpleConnection() {
return builder.simpleConnection();
}
@Override
public <C extends ConnectionFactoryConfigurationBuilder<?>> C connectionFactory(Class<C> klass) {
return builder.connectionFactory(klass);
}
@Override
public <C extends ConnectionFactoryConfigurationBuilder<?>> C connectionFactory(C factoryBuilder) {
return builder.connectionFactory(factoryBuilder);
}
}
| 1,454
| 29.3125
| 119
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/ConnectionFactoryConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import org.infinispan.commons.configuration.Builder;
public interface ConnectionFactoryConfigurationBuilder<T extends ConnectionFactoryConfiguration> extends Builder<T> {
}
| 237
| 33
| 117
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/Attribute.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import java.util.HashMap;
import java.util.Map;
/**
* Enumerates the attributes used by the JDBC cache stores configuration
*
* @author Tristan Tarrant
* @since 5.2
*/
public enum Attribute {
// must be first
UNKNOWN(null),
BATCH_SIZE("batch-size"),
CONNECTION_URL("connection-url"),
CREATE_ON_START("create-on-start"),
DIALECT("dialect"),
DB_MAJOR_VERSION("db-major-version"),
DB_MINOR_VERSION("db-minor-version"),
DELETE_ALL("delete-all"),
DELETE_SINGLE("delete-single"),
JNDI_URL("jndi-url"),
DRIVER_CLASS("driver"),
DROP_ON_EXIT("drop-on-exit"),
EMBEDDED_KEY("embedded-key"),
FETCH_SIZE("fetch-size"),
FILE_NAME("file-name"),
KEY_COLUMNS("key-columns"),
KEY_MESSAGE_NAME("key-message-name"),
KEY_TO_STRING_MAPPER("key-to-string-mapper"),
MESSAGE_NAME("message-name"),
NAME("name"),
PACKAGE("package"),
PASSIVATION("passivation"),
PASSWORD("password"),
PREFIX("prefix"),
PROPERTIES_FILE("properties-file"),
READ_QUERY_TIMEOUT("read-query-timeout"),
SELECT_ALL("select-all"),
SELECT_SINGLE("select-single"),
SIZE("size"),
TABLE_NAME("table-name"),
TYPE("type"),
UPSERT("upsert"),
USERNAME("username"),
WRITE_QUERY_TIMEOUT("write-query-timeout"),
;
private final String name;
private Attribute(final String name) {
this.name = name;
}
/**
* Get the local name of this element.
*
* @return the local name
*/
public String getLocalName() {
return name;
}
private static final Map<String, Attribute> attributes;
static {
Map<String, Attribute> map = new HashMap<>();
for (Attribute attribute : values()) {
final String name = attribute.getLocalName();
if (name != null) {
map.put(name, attribute);
}
}
attributes = map;
}
public static Attribute forName(final String localName) {
final Attribute attribute = attributes.get(localName);
return attribute == null ? UNKNOWN : attribute;
}
@Override
public String toString() {
return name;
}
}
| 2,173
| 23.426966
| 72
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/SimpleConnectionFactoryConfiguration.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import java.util.Objects;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.impl.connectionfactory.SimpleConnectionFactory;
/**
* SimpleConnectionFactoryConfiguration.
*
* @author Tristan Tarrant
* @since 5.2
*/
@BuiltBy(SimpleConnectionFactoryConfigurationBuilder.class)
public class SimpleConnectionFactoryConfiguration extends AbstractUnmanagedConnectionFactoryConfiguration {
SimpleConnectionFactoryConfiguration(AttributeSet attributes) {
super(attributes);
}
public AttributeSet attributes() {
return attributes;
}
@Override
public Class<? extends ConnectionFactory> connectionFactoryClass() {
return SimpleConnectionFactory.class;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SimpleConnectionFactoryConfiguration that = (SimpleConnectionFactoryConfiguration) o;
return Objects.equals(attributes, that.attributes);
}
@Override
public int hashCode() {
return attributes != null ? attributes.hashCode() : 0;
}
@Override
public String toString() {
return "SimpleConnectionFactoryConfiguration [" + "attributes=" + attributes + "]";
}
}
| 1,508
| 28.019231
| 107
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/configuration/SimpleConnectionFactoryConfigurationBuilder.java
|
package org.infinispan.persistence.jdbc.common.configuration;
import static org.infinispan.persistence.jdbc.common.configuration.SimpleConnectionFactoryConfiguration.CONNECTION_URL;
import static org.infinispan.persistence.jdbc.common.configuration.SimpleConnectionFactoryConfiguration.DRIVER_CLASS;
import static org.infinispan.persistence.jdbc.common.configuration.SimpleConnectionFactoryConfiguration.PASSWORD;
import static org.infinispan.persistence.jdbc.common.configuration.SimpleConnectionFactoryConfiguration.USERNAME;
import java.sql.Driver;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.global.GlobalConfiguration;
/**
* SimpleConnectionFactoryBuilder.
*
* @author Tristan Tarrant
* @since 5.2
*/
public class SimpleConnectionFactoryConfigurationBuilder<S extends AbstractJdbcStoreConfigurationBuilder<?, S>> extends AbstractJdbcStoreConfigurationChildBuilder<S>
implements ConnectionFactoryConfigurationBuilder<SimpleConnectionFactoryConfiguration> {
private final AttributeSet attributes;
public SimpleConnectionFactoryConfigurationBuilder(AbstractJdbcStoreConfigurationBuilder<?, S> builder) {
super(builder);
attributes = SimpleConnectionFactoryConfiguration.attributeSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
public SimpleConnectionFactoryConfigurationBuilder<S> connectionUrl(String connectionUrl) {
attributes.attribute(CONNECTION_URL).set(connectionUrl);
return this;
}
public SimpleConnectionFactoryConfigurationBuilder<S> driverClass(Class<? extends Driver> driverClass) {
attributes.attribute(DRIVER_CLASS).set(driverClass.getName());
return this;
}
public SimpleConnectionFactoryConfigurationBuilder<S> driverClass(String driverClass) {
attributes.attribute(DRIVER_CLASS).set(driverClass);
return this;
}
public SimpleConnectionFactoryConfigurationBuilder<S> username(String username) {
attributes.attribute(USERNAME).set(username);
return this;
}
public SimpleConnectionFactoryConfigurationBuilder<S> password(String password) {
attributes.attribute(PASSWORD).set(password);
return this;
}
@Override
public void validate() {
String connectionUrl = attributes.attribute(CONNECTION_URL).get();
if (connectionUrl == null) {
throw new CacheConfigurationException("A connectionUrl has not been specified");
}
}
@Override
public void validate(GlobalConfiguration globalConfig) {
}
@Override
public SimpleConnectionFactoryConfiguration create() {
return new SimpleConnectionFactoryConfiguration(attributes.protect());
}
@Override
public SimpleConnectionFactoryConfigurationBuilder<S> read(SimpleConnectionFactoryConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 3,077
| 35.211765
| 165
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/logging/Log.java
|
package org.infinispan.persistence.jdbc.common.logging;
import static org.jboss.logging.Logger.Level.ERROR;
import static org.jboss.logging.Logger.Level.WARN;
import java.io.IOException;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.List;
import javax.naming.NamingException;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.persistence.spi.PersistenceException;
import org.jboss.logging.BasicLogger;
import org.jboss.logging.Logger;
import org.jboss.logging.annotations.Cause;
import org.jboss.logging.annotations.LogMessage;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
/**
* Log abstraction for the JDBC cache store. For this module, message ids
* ranging from 8001 to 9000 inclusively have been reserved.
*
* @author Galder Zamarreño
* @since 5.0
*/
@MessageLogger(projectCode = "ISPN")
public interface Log extends BasicLogger {
Log CONFIG = Logger.getMessageLogger(Log.class, org.infinispan.util.logging.Log.LOG_ROOT + "CONFIG");
Log PERSISTENCE = Logger.getMessageLogger(Log.class, org.infinispan.util.logging.Log.LOG_ROOT + "PERSISTENCE");
@LogMessage(level = ERROR)
@Message(value = "Exception while marshalling object: %s", id = 65)
void errorMarshallingObject(@Cause Throwable ioe, Object obj);
@LogMessage(level = ERROR)
@Message(value = "Failed clearing cache store", id = 8001)
void failedClearingJdbcCacheStore(@Cause Exception e);
// @LogMessage(level = ERROR)
// @Message(value = "I/O failure while integrating state into store", id = 8002)
// void ioErrorIntegratingState(@Cause IOException e);
@LogMessage(level = ERROR)
@Message(value = "SQL failure while integrating state into store", id = 8003)
void sqlFailureIntegratingState(@Cause SQLException e);
// @LogMessage(level = ERROR)
// @Message(value = "Class not found while integrating state into store", id = 8004)
// void classNotFoundIntegratingState(@Cause ClassNotFoundException e);
//
// @LogMessage(level = ERROR)
// @Message(value = "I/O Error while storing string keys to database", id = 8005)
// void ioErrorStoringKeys(@Cause IOException e);
//
// @LogMessage(level = ERROR)
// @Message(value = "SQL Error while storing string keys to database", id = 8006)
// void sqlFailureStoringKeys(@Cause SQLException e);
//
// @LogMessage(level = ERROR)
// @Message(value = "SQL error while fetching all StoredEntries", id = 8007)
// void sqlFailureFetchingAllStoredEntries(@Cause SQLException e);
//
// @LogMessage(level = ERROR)
// @Message(value = "I/O failure while marshalling bucket: %s", id = 8008)
// void errorMarshallingBucket(@Cause IOException ioe, Object bucket);
@LogMessage(level = ERROR)
@Message(value = "I/O error while unmarshalling from stream", id = 8009)
void ioErrorUnmarshalling(@Cause IOException e);
@LogMessage(level = ERROR)
@Message(value = "*UNEXPECTED* ClassNotFoundException.", id = 8010)
void unexpectedClassNotFoundException(@Cause ClassNotFoundException e);
@LogMessage(level = ERROR)
@Message(value = "Error while creating table; used DDL statement: '%s'", id = 8011)
void errorCreatingTable(String sql, @Cause SQLException e);
// @LogMessage(level = ERROR)
// @Message(value = "Sql failure while loading key: %s", id = 8014)
// void sqlFailureLoadingKey(String keyHashCode, @Cause SQLException e);
@LogMessage(level = ERROR)
@Message(value = "Could not find a connection in jndi under the name '%s'", id = 8015)
void connectionInJndiNotFound(String dataSourceName);
@LogMessage(level = ERROR)
@Message(value = "Could not lookup connection with datasource %s", id = 8016)
void namingExceptionLookingUpConnection(String dataSourceName, @Cause NamingException e);
@LogMessage(level = WARN)
@Message(value = "Failed to close naming context.", id = 8017)
void failedClosingNamingCtx(@Cause NamingException e);
@LogMessage(level = ERROR)
@Message(value = "Sql failure retrieving connection from datasource", id = 8018)
void sqlFailureRetrievingConnection(@Cause SQLException e);
@LogMessage(level = ERROR)
@Message(value = "Issues while closing connection %s", id = 8019)
void sqlFailureClosingConnection(Connection conn, @Cause SQLException e);
// @LogMessage(level = ERROR)
// @Message(value = "Error while instatianting JDBC driver: '%s'", id = 8020)
// void errorInstantiatingJdbcDriver(String driverClass, @Cause Exception e);
// @LogMessage(level = WARN)
// @Message(value = "Could not destroy C3P0 connection pool: %s", id = 8021)
// void couldNotDestroyC3p0ConnectionPool(String pooledDataSource, @Cause SQLException e);
@LogMessage(level = WARN)
@Message(value = "Unexpected sql failure", id = 8022)
void sqlFailureUnexpected(@Cause SQLException e);
@LogMessage(level = WARN)
@Message(value = "Failure while closing the connection to the database", id = 8023)
void failureClosingConnection(@Cause SQLException e);
@LogMessage(level = ERROR)
@Message(value = "Error while storing string key to database; key: '%s'", id = 8024)
void sqlFailureStoringKey(Object lockingKey, @Cause SQLException e);
@LogMessage(level = ERROR)
@Message(value = "Error while removing string keys from database", id = 8025)
void sqlFailureRemovingKeys(@Cause SQLException e);
@LogMessage(level = ERROR)
@Message(value = "In order for JdbcStringBasedStore to support %s, " +
"the Key2StringMapper needs to implement TwoWayKey2StringMapper. " +
"You should either make %s implement TwoWayKey2StringMapper or disable the sql. " +
"See [https://jira.jboss.org/browse/ISPN-579] for more details.", id = 8026)
void invalidKey2StringMapper(String where, String className);
@LogMessage(level = ERROR)
@Message(value = "SQL error while fetching stored entry with key: %s, lockingKey: %s", id = 8027)
void sqlFailureReadingKey(Object key, Object lockingKey, @Cause SQLException e);
@Message(value = "Attribute '%2$s' has not been set on '%1$s'", id = 8028)
CacheConfigurationException tableManipulationAttributeNotSet(String groupName, String name);
@Message(value = "A ConnectionFactory has not been specified for this store", id = 8029)
CacheConfigurationException missingConnectionFactory();
// @Message(value = "Cannot specify a ConnectionFactory and manageConnectionFactory at the same time", id = 8030)
// CacheConfigurationException unmanagedConnectionFactory();
@LogMessage(level = ERROR)
@Message(value = "Error committing JDBC transaction", id = 8031)
void sqlFailureTxCommit(@Cause SQLException e);
@LogMessage(level = ERROR)
@Message(value = "Error during rollback of JDBC transaction", id = 8032)
void sqlFailureTxRollback(@Cause SQLException e);
@Message(value = "Exception encountered when preparing JDBC store Tx", id = 8033)
PersistenceException prepareTxFailure(@Cause Throwable e);
// @LogMessage(level = ERROR)
// @Message(value = "Error when creating Hikari connection pool", id = 8034)
// void errorCreatingHikariCP(@Cause Exception e);
// @LogMessage(level = ERROR)
// @Message(value = "Error loading HikariCP properties file, only the properties set in %s will be loaded", id = 8035)
// void errorLoadingHikariCPProperties(String name);
@LogMessage(level = WARN)
@Message(value = "Unable to notify the PurgeListener of expired cache entries as the configured key2StringMapper " +
"does not implement %s", id = 8036)
void twoWayKey2StringMapperIsMissing(String className);
@Message(value = "Error while writing entries in batch to the database:", id = 8037)
PersistenceException sqlFailureWritingBatch(@Cause Throwable e);
@Message(value = "Error whilst removing keys in batch from the database. Keys: %s", id = 8038)
PersistenceException sqlFailureDeletingBatch(Iterable<Object> keys, @Cause Exception e);
@Message(value = "The existing store was created without segmentation enabled", id = 8039)
CacheConfigurationException existingStoreNoSegmentation();
@Message(value = "The existing store was created with %d segments configured, but the cache is configured with %d", id = 8040)
CacheConfigurationException existingStoreSegmentMismatch(int existing, int cache);
@LogMessage(level = ERROR)
@Message(value = "Error retrieving JDBC metadata", id = 8041)
void sqlFailureMetaRetrieval(@Cause SQLException e);
@LogMessage(level = ERROR)
@Message(value = "SQL failure while retrieving size", id = 8042)
void sqlFailureSize(@Cause SQLException e);
@Message(value = "Primary key has multiple columns but no key message schema defined, which is required when there is more than one key column", id = 8043)
CacheConfigurationException primaryKeyMultipleColumnWithoutSchema();
@Message(value = "Multiple non key columns but no value message schema defined, which is required when there is more than one value column", id = 8044)
CacheConfigurationException valueMultipleColumnWithoutSchema();
@Message(value = "Primary key %s was not found in the key schema %s", id = 8045)
CacheConfigurationException keyNotInSchema(String primaryKeyName, String schemaName);
@Message(value = "Additional value columns %s found that were not part of the schema, make sure the columns returned match the value schema %s", id = 8046)
CacheConfigurationException valueNotInSchema(List<String> columnNames, String schemaName);
@Message(value = "Schema not found for : %s", id = 8047)
CacheConfigurationException schemaNotFound(String schemaName);
@Message(value = "Key cannot be embedded when the value schema %s is an enum", id = 8048)
CacheConfigurationException keyCannotEmbedWithEnum(String schemaName);
@Message(value = "Repeated fields are not supported, found %s in schema %s", id = 8049)
CacheConfigurationException repeatedFieldsNotSupported(String fieldName, String schemaName);
@Message(value = "Duplicate name %s found for nested schema: %s", id = 8050)
CacheConfigurationException duplicateFieldInSchema(String fieldName, String schemaName);
@Message(value = "Schema contained a field %s that is required but wasn't found in the query for schema %s", id = 8051)
CacheConfigurationException requiredSchemaFieldNotPresent(String fieldName, String schemaName);
@Message(value = "Primary key %s was found in the value schema %s but embedded key was not true", id = 8052)
CacheConfigurationException primaryKeyPresentButNotEmbedded(String fieldName, String schemaName);
@Message(value = "Delete and select queries do not have matching arguments. Delete was %s and select was %s", id = 8053)
CacheConfigurationException deleteAndSelectQueryMismatchArguments(List<String> deleteParams, List<String> selectParams);
@Message(value = "Named parameter %s in upsert statement [%s] is not available in columns from selectAll statement [%s]", id = 8054)
CacheConfigurationException deleteAndSelectQueryMismatchArguments(String paramName, String upsertStatement,
String selectStatement);
@Message(value = "No parameters are allowed for select all statement %s", id = 8055)
CacheConfigurationException selectAllCannotHaveParameters(String selectAllStatement);
@Message(value = "No parameters are allowed for delete all statement %s", id = 8056)
CacheConfigurationException deleteAllCannotHaveParameters(String selectAllStatement);
@Message(value = "No parameters are allowed for sizer statement %s", id = 8057)
CacheConfigurationException sizeCannotHaveParameters(String selectAllStatement);
@Message(value = "Not all key columns %s were returned from select all statement %s", id = 8058)
CacheConfigurationException keyColumnsNotReturnedFromSelectAll(String keyColumns, String selectAllStatement);
@Message(value = "Select parameter %s is not returned from select all statement %s, select statement is %s", id = 8059)
CacheConfigurationException namedParamNotReturnedFromSelect(String paramName, String selectAllStatement, String selectStatement);
@Message(value = "Non-terminated named parameter declaration at position %d in statement: %s", id = 8060)
CacheConfigurationException nonTerminatedNamedParamInSql(int position, String sqlStatement);
@Message(value = "Invalid character %s at position %d in statement: %s", id = 8061)
CacheConfigurationException invalidCharacterInSql(char character, int position, String sqlStatement);
@Message(value = "Unnamed parameters are not allowed, found one at %d in statement %s", id = 8062)
CacheConfigurationException unnamedParametersNotAllowed(int position, String sqlStatement);
@Message(value = "Provided table name %s is not in form of (<SCHEMA>.)<TABLE-NAME> where SCHEMA is optional", id = 8063)
CacheConfigurationException tableNotInCorrectFormat(String tableName);
@Message(value = "No primary keys found for table %s, check case sensitivity", id = 8064)
CacheConfigurationException noPrimaryKeysFoundForTable(String tableName);
@Message(value = "No column found that wasn't a primary key for table: %s", id = 8065)
CacheConfigurationException noValueColumnForTable(String tableName);
@Message(value = "Unable to detect database dialect from JDBC driver name or connection metadata. Please provide this manually using the 'dialect' property in your configuration. Supported database dialect strings are %s", id = 8066)
CacheConfigurationException unableToDetectDialect(String supportedDialects);
@Message(value = "The size, select and select all attributes must be set for a query store", id = 8067)
CacheConfigurationException requiredStatementsForQueryStoreLoader();
@Message(value = "The delete, delete all and upsert attributes must be set for a query store that allows writes", id = 8068)
CacheConfigurationException requiredStatementsForQueryStoreWriter();
@Message(value = "Key columns are required for QueryStore", id = 8069)
CacheConfigurationException keyColumnsRequired();
@Message(value = "Message name must not be null if embedded key is true", id = 8070)
CacheConfigurationException messageNameRequiredIfEmbeddedKey();
@Message(value = "Table name must be non null", id = 8071)
CacheConfigurationException tableNameMissing();
@LogMessage(level = WARN)
@Message(value = "There was no JDBC metadata present in table %s, unable to confirm if segments are properly configured! Segments are assumed to be properly configured.", id = 8072)
void sqlMetadataNotPresent(String tableName);
}
| 14,659
| 50.258741
| 238
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/sql/BaseTableOperations.java
|
package org.infinispan.persistence.jdbc.common.sql;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.lang.invoke.MethodHandles;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.concurrent.CompletionStage;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.util.AbstractIterator;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.persistence.jdbc.common.JdbcUtil;
import org.infinispan.persistence.jdbc.common.TableOperations;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfiguration;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Completable;
import io.reactivex.rxjava3.core.Flowable;
public abstract class BaseTableOperations<K, V> implements TableOperations<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass(), Log.class);
protected final AbstractJdbcStoreConfiguration<?> configuration;
public BaseTableOperations(AbstractJdbcStoreConfiguration<?> configuration) {
this.configuration = configuration;
}
public abstract String getSelectRowSql();
public abstract String getSelectAllSql(IntSet segments);
public abstract String getDeleteRowSql();
public abstract String getDeleteAllSql();
public abstract String getUpsertRowSql();
public abstract String getSizeSql();
protected abstract MarshallableEntry<K, V> entryFromResultSet(ResultSet rs, Object keyIfProvided, boolean fetchValue,
Predicate<? super K> keyPredicate) throws SQLException;
protected abstract void prepareKeyStatement(PreparedStatement ps, Object key) throws SQLException;
protected abstract void prepareValueStatement(PreparedStatement ps, int segment, MarshallableEntry<? extends K, ? extends V> entry) throws SQLException;
protected void prepareSizeStatement(PreparedStatement ps) throws SQLException {
// Do nothing by default
}
protected void preparePublishStatement(PreparedStatement ps, IntSet segments) throws SQLException {
// Do nothing by default
}
@Override
public MarshallableEntry<K, V> loadEntry(Connection connection, int segment, Object key) throws SQLException {
PreparedStatement ps = null;
ResultSet rs = null;
try {
String selectSql = getSelectRowSql();
if (log.isTraceEnabled()) {
log.tracef("Running select row sql '%s'", selectSql);
}
ps = connection.prepareStatement(selectSql);
ps.setQueryTimeout(configuration.readQueryTimeout());
prepareKeyStatement(ps, key);
rs = ps.executeQuery();
if (rs.next()) {
return entryFromResultSet(rs, key, true, null);
}
} finally {
JdbcUtil.safeClose(rs);
JdbcUtil.safeClose(ps);
}
return null;
}
@Override
public boolean deleteEntry(Connection connection, int segment, Object key) throws SQLException {
PreparedStatement ps = null;
try {
String deleteSql = getDeleteRowSql();
if (log.isTraceEnabled()) {
log.tracef("Running delete row sql '%s'", deleteSql);
}
ps = connection.prepareStatement(deleteSql);
ps.setQueryTimeout(configuration.writeQueryTimeout());
prepareKeyStatement(ps, key);
return ps.executeUpdate() == 1;
} finally {
JdbcUtil.safeClose(ps);
}
}
@Override
public void deleteAllRows(Connection connection) throws SQLException {
Statement statement = null;
try {
String deleteAllSql = getDeleteAllSql();
if (log.isTraceEnabled()) {
log.tracef("Running delete all sql '%s'", deleteAllSql);
}
statement = connection.createStatement();
int result = statement.executeUpdate(deleteAllSql);
if (log.isTraceEnabled()) {
log.tracef("Successfully removed %d rows.", result);
}
} finally {
JdbcUtil.safeClose(statement);
}
}
@Override
public void upsertEntry(Connection connection, int segment, MarshallableEntry<? extends K, ? extends V> entry) throws SQLException {
PreparedStatement ps = null;
try {
String upsertSql = getUpsertRowSql();
if (log.isTraceEnabled()) {
log.tracef("Running upsert row sql '%s'", upsertSql);
}
ps = connection.prepareStatement(upsertSql);
ps.setQueryTimeout(configuration.writeQueryTimeout());
prepareValueStatement(ps, segment, entry);
ps.executeUpdate();
} finally {
JdbcUtil.safeClose(ps);
}
}
@Override
public long size(Connection connection) throws SQLException {
PreparedStatement ps = null;
ResultSet rs = null;
try {
String sizeSql = getSizeSql();
if (log.isTraceEnabled()) {
log.tracef("Running count sql '%s'", sizeSql);
}
ps = connection.prepareStatement(sizeSql);
prepareSizeStatement(ps);
rs = ps.executeQuery();
rs.next();
return rs.getInt(1);
} finally {
JdbcUtil.safeClose(rs);
JdbcUtil.safeClose(ps);
}
}
// This method invokes some blocking methods, but this method is itself only blocking
@SuppressWarnings("checkstyle:ForbiddenMethod")
@Override
public void batchUpdates(Connection connection, int writePublisherCount, Publisher<Object> removePublisher,
Publisher<NonBlockingStore.SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) throws SQLException {
String upsertSql = getUpsertRowSql();
String deleteSql = getDeleteRowSql();
if (log.isTraceEnabled()) {
log.tracef("Running batch upsert sql '%s'", upsertSql);
log.tracef("Running batch delete sql '%s'", deleteSql);
}
try (PreparedStatement upsertBatch = connection.prepareStatement(upsertSql);
PreparedStatement deleteBatch = connection.prepareStatement(deleteSql)) {
// Note this one is non blocking as we need to subscribe to both Publishers before anything is processed.
CompletionStage<Void> removeStage = Flowable.fromPublisher(removePublisher)
.doOnNext(key -> {
prepareKeyStatement(deleteBatch, key);
deleteBatch.addBatch();
}).lastElement()
.doAfterSuccess(ignore -> deleteBatch.executeBatch())
.flatMapCompletable(ignore -> Completable.complete())
.toCompletionStage(null);
ByRef<Throwable> throwableRef = new ByRef<>(null);
ByRef<Object> hadValue = new ByRef<>(null);
Flowable.fromPublisher(writePublisher)
.concatMapEager(sp ->
Flowable.fromPublisher(sp)
.doOnNext(me -> {
prepareValueStatement(upsertBatch, sp.getSegment(), me);
upsertBatch.addBatch();
}), writePublisherCount, writePublisherCount
).lastElement()
.blockingSubscribe(hadValue::set, throwableRef::set);
if (hadValue.get() != null) {
upsertBatch.executeBatch();
}
Throwable t = throwableRef.get();
if (t != null) {
if (t instanceof SQLException) {
throw (SQLException) t;
}
throw Util.rewrapAsCacheException(t);
}
CompletionStages.join(removeStage);
}
}
@Override
public Flowable<MarshallableEntry<K, V>> publishEntries(Supplier<Connection> connectionSupplier,
Consumer<Connection> connectionCloser, IntSet segments, Predicate<? super K> filter, boolean fetchValue) {
return Flowable.using(() -> {
String selectSql = getSelectAllSql(segments);
if (log.isTraceEnabled()) {
log.tracef("Running select all sql '%s'", selectSql);
}
return new FlowableConnection(connectionSupplier.get(), connectionCloser, selectSql);
}, fc -> {
PreparedStatement ps = fc.statement;
preparePublishStatement(ps, segments);
ps.setFetchSize(configuration.maxBatchSize());
ResultSet rs = ps.executeQuery();
return Flowable.fromIterable(() -> new ResultSetEntryIterator(rs, filter, fetchValue))
.doFinally(() -> JdbcUtil.safeClose(rs));
}, FlowableConnection::close, /* Not eager so result set is closed first */false);
}
protected static class FlowableConnection {
protected final boolean autoCommit;
protected final Connection connection;
protected final Consumer<Connection> connectionCloser;
protected final PreparedStatement statement;
public FlowableConnection(Connection connection, Consumer<Connection> connectionCloser, String sql) throws SQLException {
this.connection = connection;
this.connectionCloser = connectionCloser;
this.autoCommit = connection.getAutoCommit();
this.statement = connection.prepareStatement(sql);
// Some JDBC drivers require auto commit disabled to do paging, however before calling setAutoCommit(false)
// we must ensure that we're not running in a managed transaction by ensuring that getAutoCommit is true.
// Without this check an exception would be thrown when calling setAutoCommit(false) during a managed transaction.
if (autoCommit)
connection.setAutoCommit(false);
}
public boolean isAutoCommit() {
return autoCommit;
}
public Connection getConnection() {
return connection;
}
public Consumer<Connection> getConnectionCloser() {
return connectionCloser;
}
public PreparedStatement getStatement() {
return statement;
}
public void close() {
JdbcUtil.safeClose(statement);
if (autoCommit) {
try {
connection.rollback();
} catch (SQLException e) {
PERSISTENCE.sqlFailureTxRollback(e);
}
}
connectionCloser.accept(connection);
}
}
protected class ResultSetEntryIterator extends AbstractIterator<MarshallableEntry<K, V>> {
private final ResultSet rs;
private final Predicate<? super K> filter;
private final boolean fetchValue;
public ResultSetEntryIterator(ResultSet rs, Predicate<? super K> filter, boolean fetchValue) {
this.rs = rs;
this.filter = filter;
this.fetchValue = fetchValue;
}
@Override
protected MarshallableEntry<K, V> getNext() {
try {
while (rs.next()) {
MarshallableEntry<K, V> entry = entryFromResultSet(rs, null, fetchValue, filter);
if (entry != null) {
return entry;
}
}
} catch (SQLException e) {
throw new CacheException(e);
}
return null;
}
}
}
| 11,690
| 36.712903
| 155
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/BaseJdbcStore.java
|
package org.infinispan.persistence.jdbc.common.impl;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.EnumSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Predicate;
import jakarta.transaction.Transaction;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.persistence.jdbc.common.TableOperations;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfiguration;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.internal.functions.Functions;
public abstract class BaseJdbcStore<K, V, C extends AbstractJdbcStoreConfiguration> implements NonBlockingStore<K, V> {
protected static final Log log = LogFactory.getLog(BaseJdbcStore.class, Log.class);
protected ConnectionFactory connectionFactory;
protected BlockingManager blockingManager;
protected C config;
protected TableOperations<K, V> tableOperations;
protected final Map<Transaction, Connection> transactionConnectionMap = new ConcurrentHashMap<>();
@Override
public Set<Characteristic> characteristics() {
return EnumSet.of(Characteristic.BULK_READ, Characteristic.TRANSACTIONAL, Characteristic.SHAREABLE);
}
Object keyIdentifier(Object key) {
return key;
}
/**
* Extension point to allow for initializing and creating a table operations object. All variables in the {@link
* BaseJdbcStore} will be initialized except for {@link #tableOperations} when this is invoked.
*
* @param ctx store context
* @param config configuration of the store
* @return the table operations to use for future calls
* @throws SQLException if any database exception occurs during creation
*/
abstract protected TableOperations<K, V> createTableOperations(InitializationContext ctx, C config) throws SQLException;
@Override
public CompletionStage<Void> start(InitializationContext ctx) {
this.config = ctx.getConfiguration();
blockingManager = ctx.getBlockingManager();
return blockingManager.runBlocking(() -> {
try {
ConnectionFactory factory = ConnectionFactory.getConnectionFactory(config.connectionFactory().connectionFactoryClass());
factory.start(config.connectionFactory(), factory.getClass().getClassLoader());
this.connectionFactory = factory;
tableOperations = createTableOperations(ctx, config);
} catch (SQLException e) {
throw new PersistenceException(e);
}
}, "jdbcstore-start");
}
/**
* Method to extend to add additional steps when the store is shutting down. This is invoked before the {@link
* #connectionFactory} is shut down and should not do so.
*/
protected void extraStopSteps() {
}
@Override
public CompletionStage<Void> stop() {
return blockingManager.runBlocking(() -> {
extraStopSteps();
try {
log.tracef("Stopping connection factory: %s", connectionFactory);
if (connectionFactory != null) {
connectionFactory.stop();
}
} catch (Throwable t) {
log.debug("Exception while stopping", t);
}
}, "jdbcstore-stop");
}
@Override
public CompletionStage<Boolean> isAvailable() {
return blockingManager.supplyBlocking(() -> {
if (connectionFactory == null)
return false;
Connection connection = null;
try {
connection = connectionFactory.getConnection();
return connection != null && connection.isValid(10);
} catch (Throwable t) {
log.debugf(t, "Exception thrown when checking DB availability");
throw CompletableFutures.asCompletionException(t);
} finally {
connectionFactory.releaseConnection(connection);
}
}, "jdbcstore-available");
}
@Override
public CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
return blockingManager.supplyBlocking(() -> {
Connection conn = null;
try {
conn = connectionFactory.getConnection();
return tableOperations.loadEntry(conn, segment, key);
} catch (SQLException e) {
Object keyIdentifier = keyIdentifier(key);
PERSISTENCE.sqlFailureReadingKey(key, keyIdentifier, e);
throw new PersistenceException(String.format(
"SQL error while fetching stored entry with key: %s, lockingKey: %s",
key, keyIdentifier), e);
} finally {
connectionFactory.releaseConnection(conn);
}
}, "jdbcstore-load");
}
@Override
public CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> entry) {
return blockingManager.runBlocking(() -> {
Connection connection = null;
try {
connection = connectionFactory.getConnection();
tableOperations.upsertEntry(connection, segment, entry);
} catch (SQLException ex) {
PERSISTENCE.sqlFailureStoringKey(entry.getKey(), ex);
throw new PersistenceException(String.format("Error while storing string key to database; key: '%s'", entry.getKey()), ex);
} finally {
connectionFactory.releaseConnection(connection);
}
}, "jdbcstore-write");
}
@Override
public CompletionStage<Boolean> delete(int segment, Object key) {
return blockingManager.supplyBlocking(() -> {
Connection connection = null;
try {
connection = connectionFactory.getConnection();
return tableOperations.deleteEntry(connection, segment, key);
} catch (SQLException ex) {
PERSISTENCE.sqlFailureRemovingKeys(ex);
throw new PersistenceException(String.format("Error while removing key %s from database", key), ex);
} finally {
connectionFactory.releaseConnection(connection);
}
}, "jdbcstore-delete");
}
@Override
public CompletionStage<Void> clear() {
return blockingManager.runBlocking(() -> {
Connection connection = null;
try {
connection = connectionFactory.getConnection();
tableOperations.deleteAllRows(connection);
} catch (SQLException ex) {
PERSISTENCE.failedClearingJdbcCacheStore(ex);
throw new PersistenceException("Failed clearing cache store", ex);
} finally {
connectionFactory.releaseConnection(connection);
}
}, "jdbcstore-delete");
}
@Override
public CompletionStage<Void> batch(int publisherCount,
Publisher<SegmentedPublisher<Object>> removePublisher,
Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
return blockingManager.runBlocking(() -> {
Connection connection = null;
try {
connection = connectionFactory.getConnection();
tableOperations.batchUpdates(connection, publisherCount, Flowable.fromPublisher(removePublisher)
.concatMapEager(Functions.identity(), publisherCount, publisherCount), writePublisher);
} catch (SQLException e) {
throw PERSISTENCE.sqlFailureWritingBatch(e);
} finally {
connectionFactory.releaseConnection(connection);
}
}, "jdbcstore-batch");
}
@Override
public CompletionStage<Void> prepareWithModifications(Transaction tx, int publisherCount,
Publisher<SegmentedPublisher<Object>> removePublisher,
Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
return blockingManager.runBlocking(() -> {
try {
Connection connection = getTxConnection(tx);
connection.setAutoCommit(false);
tableOperations.batchUpdates(connection, publisherCount, Flowable.fromPublisher(removePublisher)
.concatMapEager(Functions.identity(), publisherCount, publisherCount), writePublisher);
// We do not call connection.close() in the event of an exception, as close() on active Tx behaviour is implementation
// dependent. See https://docs.oracle.com/javase/8/docs/api/java/sql/Connection.html#close--
} catch (SQLException e) {
throw PERSISTENCE.prepareTxFailure(e);
}
}, "jdbcstore-prepare");
}
@Override
public CompletionStage<Void> commit(Transaction tx) {
return blockingManager.runBlocking(() -> {
Connection connection;
try {
connection = getTxConnection(tx);
connection.commit();
} catch (SQLException e) {
PERSISTENCE.sqlFailureTxCommit(e);
throw new PersistenceException(String.format("Error during commit of JDBC transaction (%s)", tx), e);
} finally {
destroyTxConnection(tx);
}
}, "jdbcstore-commit");
}
@Override
public CompletionStage<Void> rollback(Transaction tx) {
return blockingManager.runBlocking(() -> {
Connection connection;
try {
connection = getTxConnection(tx);
connection.rollback();
} catch (SQLException e) {
PERSISTENCE.sqlFailureTxRollback(e);
throw new PersistenceException(String.format("Error during rollback of JDBC transaction (%s)", tx), e);
} finally {
destroyTxConnection(tx);
}
}, "jdbcstore-rollback");
}
protected Connection getTxConnection(Transaction tx) {
Connection connection = transactionConnectionMap.get(tx);
if (connection == null) {
connection = connectionFactory.getConnection();
transactionConnectionMap.put(tx, connection);
}
return connection;
}
protected void destroyTxConnection(Transaction tx) {
Connection connection = transactionConnectionMap.remove(tx);
if (connection != null)
connectionFactory.releaseConnection(connection);
}
@Override
public Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean includeValues) {
return blockingManager.blockingPublisher(tableOperations.publishEntries(connectionFactory::getConnection,
connectionFactory::releaseConnection, segments, filter, includeValues));
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
return blockingManager.blockingPublisher(tableOperations.publishKeys(connectionFactory::getConnection,
connectionFactory::releaseConnection, segments, filter));
}
@Override
public CompletionStage<Long> size(IntSet segments) {
return blockingManager.supplyBlocking(() -> {
Connection conn = null;
try {
conn = connectionFactory.getConnection();
return tableOperations.size(conn);
} catch (SQLException e) {
PERSISTENCE.sqlFailureSize(e);
throw new PersistenceException("SQL failure while retrieving size", e);
} finally {
connectionFactory.releaseConnection(conn);
}
}, "jdbcstore-size");
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
return size(segments);
}
}
| 12,146
| 38.957237
| 135
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/GenericSqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
import org.infinispan.persistence.jdbc.common.SqlManager;
public class GenericSqlManager implements SqlManager {
protected final String tableName;
protected final boolean namedParameters;
public GenericSqlManager(String tableName, boolean namedParameters) {
this.tableName = tableName;
this.namedParameters = namedParameters;
}
String parameterName(String columnName) {
return namedParameters ? ":" + columnName : "?";
}
@Override
public String getSelectStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder select = new StringBuilder("SELECT ");
appendStrings(select, allColumns, Function.identity(), ", ");
select.append(" FROM ").append(tableName);
select.append(" WHERE ");
appendStrings(select, keyColumns, key -> key + " = " + parameterName(key), " AND ");
return select.toString();
}
@Override
public String getSelectAllStatement(List<String> allColumns) {
StringBuilder selectAll = new StringBuilder("SELECT ");
appendStrings(selectAll, allColumns, Function.identity(), ", ");
selectAll.append(" FROM ").append(tableName);
return selectAll.toString();
}
@Override
public String getDeleteStatement(List<String> keyColumns) {
StringBuilder delete = new StringBuilder("DELETE FROM ");
delete.append(tableName);
delete.append(" WHERE ");
appendStrings(delete, keyColumns, key -> key + " = " + parameterName(key), " AND ");
return delete.toString();
}
@Override
public String getDeleteAllStatement() {
return "DELETE FROM " + tableName;
}
protected Iterable<String> valueIterable(List<String> keyColumns, List<String> allColumns) {
return () -> allColumns.stream().filter(all -> !keyColumns.contains(all)).iterator();
}
protected void appendStrings(StringBuilder sb, Iterable<String> strings, Function<String, String> valueConversion,
String separator) {
boolean isFirst = true;
for (String columnName : strings) {
if (!isFirst) {
sb.append(separator);
}
sb.append(valueConversion.apply(columnName));
isFirst = false;
}
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
// "MERGE INTO %1$s " +
// "USING (VALUES (?, ?, ?)) AS tmp (%2$s, %3$s, %4$s) " +
// "ON (%2$s = tmp.%2$s) " +
// "WHEN MATCHED THEN UPDATE SET %3$s = tmp.%3$s, %4$s = tmp.%4$s " +
// "WHEN NOT MATCHED THEN INSERT (%2$s, %3$s, %4$s) VALUES (tmp.%2$s, tmp.%3$s, tmp.%4$s)"
StringBuilder upsert = new StringBuilder("MERGE INTO ").append(tableName);
upsert.append(" USING (VALUES (");
appendStrings(upsert, allColumns, this::parameterName, ", ");
upsert.append(")) AS tmp (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") ON (");
appendStrings(upsert, keyColumns, key -> key + " = tmp." + key, ", ");
upsert.append(") WHEN MATCHED THEN UPDATE SET ");
appendStrings(upsert, valueIterable(keyColumns, allColumns), value -> value + " = tmp." + value, ", ");
upsert.append(" WHEN NOT MATCHED THEN INSERT (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") VALUES (");
appendStrings(upsert, allColumns, all -> "tmp." + all, ", ");
upsert.append(')');
return upsert.toString();
}
@Override
public String getSizeCommand() {
return "SELECT COUNT(*) FROM " + tableName;
}
}
| 3,733
| 36.717172
| 117
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/SQLLiteSqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
public class SQLLiteSqlManager extends GenericSqlManager {
public SQLLiteSqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("INSERT OR REPLACE INTO ").append(tableName);
upsert.append(" (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") VALUES (");
appendStrings(upsert, allColumns, this::parameterName, ", ");
upsert.append(')');
return upsert.toString();
}
}
| 756
| 33.409091
| 92
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/OracleSqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
public class OracleSqlManager extends GenericSqlManager {
public OracleSqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("MERGE INTO ").append(tableName);
upsert.append(" t USING (SELECT ");
appendStrings(upsert, allColumns, all -> parameterName(all) + " " + all, ", ");
upsert.append(" from dual) tmp ON (");
appendStrings(upsert, keyColumns, key -> "t." + key + " = tmp." + key, ", ");
upsert.append(") WHEN MATCHED THEN UPDATE SET ");
appendStrings(upsert, valueIterable(keyColumns, allColumns), key -> "t." + key + " = tmp." + key, ", ");
upsert.append(" WHEN NOT MATCHED THEN INSERT (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") VALUES (");
appendStrings(upsert, allColumns, all -> "tmp." + all, ", ");
upsert.append(')');
return upsert.toString();
}
}
| 1,195
| 41.714286
| 110
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/SybaseSqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
public class SybaseSqlManager extends GenericSqlManager {
public SybaseSqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("MERGE INTO ").append(tableName);
upsert.append(" AS t USING (SELECT ");
appendStrings(upsert, allColumns, all -> parameterName(all) + " " + all, ", ");
upsert.append(") AS tmp ON (");
appendStrings(upsert, keyColumns, key -> "t." + key + " = tmp." + key, ", ");
upsert.append(") WHEN MATCHED THEN UPDATE SET ");
appendStrings(upsert, valueIterable(keyColumns, allColumns), value -> "t." + value + " = tmp." + value, ", ");
upsert.append(" WHEN NOT MATCHED THEN INSERT VALUES (");
appendStrings(upsert, allColumns, all -> "tmp." + all, ", ");
upsert.append(')');
return upsert.toString();
}
}
| 1,065
| 41.64
| 116
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/DB2SqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
public class DB2SqlManager extends GenericSqlManager {
public DB2SqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("MERGE INTO ").append(tableName);
upsert.append(" AS t USING (SELECT * FROM TABLE (VALUES(");
appendStrings(upsert, allColumns, this::parameterName, ", ");
upsert.append("))) AS tmp(");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") ON ");
appendStrings(upsert, allColumns, all -> "t." + all + " = tmp." + all, " AND ");
upsert.append(" WHEN MATCHED THEN UPDATE SET (");
appendStrings(upsert, valueIterable(keyColumns, allColumns), v -> "t." + v, ", ");
upsert.append(") = (");
appendStrings(upsert, valueIterable(keyColumns, allColumns), v -> "tmp." + v, ", ");
upsert.append(") WHEN NOT MATCHED THEN INSERT (");
appendStrings(upsert, allColumns, all -> "t." + all, ", ");
upsert.append(") VALUES (");
appendStrings(upsert, allColumns, all -> "tmp." + all, ", ");
upsert.append(')');
return upsert.toString();
}
}
| 1,385
| 42.3125
| 90
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/MySQLSqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
public class MySQLSqlManager extends GenericSqlManager {
public MySQLSqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("INSERT INTO ").append(tableName);
upsert.append(" (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") VALUES (");
appendStrings(upsert, allColumns, this::parameterName, ", ");
upsert.append(") ON DUPLICATE KEY UPDATE ");
appendStrings(upsert, valueIterable(keyColumns, allColumns), value -> value + " = VALUES(" + value + ")", ", ");
return upsert.toString();
}
}
| 885
| 37.521739
| 118
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/PostgresqlSqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
public class PostgresqlSqlManager extends GenericSqlManager {
public PostgresqlSqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("INSERT INTO ").append(tableName);
upsert.append(" (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") VALUES (");
appendStrings(upsert, allColumns, this::parameterName, ", ");
upsert.append(") ON CONFLICT (");
appendStrings(upsert, keyColumns, Function.identity(), ", ");
upsert.append(") DO UPDATE SET ");
appendStrings(upsert, valueIterable(keyColumns, allColumns), value -> value + " = EXCLUDED." + value, ", ");
return upsert.toString();
}
}
| 989
| 38.6
| 114
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/H2SqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
public class H2SqlManager extends GenericSqlManager {
public H2SqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("MERGE INTO ").append(tableName);
upsert.append(" (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") KEY(");
appendStrings(upsert, keyColumns, Function.identity(), ", ");
upsert.append(") VALUES(");
appendStrings(upsert, allColumns, this::parameterName, ", ");
upsert.append(")");
return upsert.toString();
}
}
| 832
| 33.708333
| 87
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/table/SQLServerSqlManager.java
|
package org.infinispan.persistence.jdbc.common.impl.table;
import java.util.List;
import java.util.function.Function;
public class SQLServerSqlManager extends GenericSqlManager {
public SQLServerSqlManager(String tableName, boolean namedParameters) {
super(tableName, namedParameters);
}
@Override
public String getUpsertStatement(List<String> keyColumns, List<String> allColumns) {
StringBuilder upsert = new StringBuilder("MERGE ").append(tableName);
upsert.append(" WITH (TABLOCK) USING (VALUES (");
appendStrings(upsert, allColumns, this::parameterName, ", ");
upsert.append(")) AS tmp (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") ON (");
appendStrings(upsert, keyColumns, key -> tableName + "." + key + " = tmp." + key, ", ");
upsert.append(") WHEN MATCHED THEN UPDATE SET ");
appendStrings(upsert, valueIterable(keyColumns, allColumns), value -> value + " = tmp." + value, ", ");
upsert.append(" WHEN NOT MATCHED THEN INSERT (");
appendStrings(upsert, allColumns, Function.identity(), ", ");
upsert.append(") VALUES (");
appendStrings(upsert, allColumns, all -> "tmp." + all, ", ");
upsert.append(");");
return upsert.toString();
}
}
| 1,293
| 42.133333
| 109
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/connectionfactory/package-info.java
|
/**
* Connection factories for use with the JDBC Cache Store. Simple connection factories delegate
* to a data source if used within a Java EE environment, otherwise C3P0 pooling is used if
* outside of a Java EE environment.
*/
package org.infinispan.persistence.jdbc.common.impl.connectionfactory;
| 305
| 42.714286
| 96
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/connectionfactory/PooledConnectionFactory.java
|
package org.infinispan.persistence.jdbc.common.impl.connectionfactory;
import java.sql.Connection;
import java.sql.SQLException;
import java.time.Duration;
import org.infinispan.commons.util.Util;
import org.infinispan.persistence.jdbc.common.JdbcUtil;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.PooledConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
import io.agroal.api.AgroalDataSource;
import io.agroal.api.configuration.AgroalConnectionFactoryConfiguration;
import io.agroal.api.configuration.supplier.AgroalDataSourceConfigurationSupplier;
import io.agroal.api.configuration.supplier.AgroalPropertiesReader;
import io.agroal.api.security.NamePrincipal;
import io.agroal.api.security.SimplePassword;
/**
* Pooled connection factory based upon Agroa https://agroal.github.io.
*
* @author Mircea.Markus@jboss.com
* @author Tristan Tarrant
* @author Ryan Emerson
*/
public class PooledConnectionFactory extends ConnectionFactory {
private static final String PROPERTIES_PREFIX = "org.infinispan.agroal.";
private static final Log log = LogFactory.getLog(PooledConnectionFactory.class, Log.class);
private AgroalDataSource dataSource;
@Override
public void start(ConnectionFactoryConfiguration config, ClassLoader classLoader) throws PersistenceException {
PooledConnectionFactoryConfiguration poolConfig;
if (config instanceof PooledConnectionFactoryConfiguration) {
poolConfig = (PooledConnectionFactoryConfiguration) config;
} else {
throw new PersistenceException("ConnectionFactoryConfiguration passed in must be an instance of " +
"PooledConnectionFactoryConfiguration");
}
try {
String propsFile = poolConfig.propertyFile();
if (propsFile != null) {
dataSource = AgroalDataSource.from(new AgroalPropertiesReader(PROPERTIES_PREFIX).readProperties(propsFile));
} else {
Class<?> driverClass = Util.loadClass(poolConfig.driverClass(), null);
// Default Agroal configuration with metrics disabled
String password = poolConfig.password() != null ? poolConfig.password() : "";
AgroalDataSourceConfigurationSupplier configuration = new AgroalDataSourceConfigurationSupplier()
.connectionPoolConfiguration(cp -> cp
.maxSize(10)
.acquisitionTimeout(Duration.ofSeconds(30))
.connectionFactoryConfiguration(cf -> cf
.jdbcUrl(poolConfig.connectionUrl())
.connectionProviderClass(driverClass)
.jdbcTransactionIsolation(AgroalConnectionFactoryConfiguration.TransactionIsolation.UNDEFINED)
.principal(new NamePrincipal(poolConfig.username()))
.credential(new SimplePassword(password))
));
dataSource = AgroalDataSource.from(configuration);
}
} catch (Exception e) {
throw new PersistenceException("Failed to create a AgroalDataSource", e);
}
}
@Override
public void stop() {
if (dataSource != null) {
dataSource.close();
if (log.isTraceEnabled()) log.debug("Successfully stopped PooledConnectionFactory.");
}
}
@Override
public Connection getConnection() throws PersistenceException {
try {
Connection connection = dataSource.getConnection();
log(connection, true);
return connection;
} catch (SQLException e) {
throw new PersistenceException("Failed obtaining connection from PooledDataSource", e);
}
}
@Override
public void releaseConnection(Connection conn) {
log(conn, false);
JdbcUtil.safeClose(conn);
}
public int getMaxPoolSize() {
return dataSource.getConfiguration().connectionPoolConfiguration().maxSize();
}
public long getActiveConnections() {
return dataSource.getMetrics().activeCount();
}
private void log(Connection connection, boolean checkout) {
if (log.isTraceEnabled()) {
String operation = checkout ? "checkout" : "release";
log.tracef("Connection %s (active=%d): %s", operation, getActiveConnections(), connection);
}
}
}
| 4,658
| 39.163793
| 124
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/connectionfactory/SimpleConnectionFactory.java
|
package org.infinispan.persistence.jdbc.common.impl.connectionfactory;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import org.infinispan.commons.util.Util;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.SimpleConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
/**
* Connection factory implementation that will create database connection on a per invocation basis. Not recommended in
* production, {@link PooledConnectionFactory} or {@link ManagedConnectionFactory} should rather be used.
*
* @author Mircea.Markus@jboss.com
*/
public class SimpleConnectionFactory extends ConnectionFactory {
private static final Log log = LogFactory.getLog(SimpleConnectionFactory.class, Log.class);
private String connectionUrl;
private String userName;
private String password;
private volatile int connectionCount = 0;
@Override
public void start(ConnectionFactoryConfiguration config, ClassLoader classLoader) throws PersistenceException {
SimpleConnectionFactoryConfiguration factoryConfiguration;
if (config instanceof SimpleConnectionFactoryConfiguration) {
factoryConfiguration = (SimpleConnectionFactoryConfiguration) config;
}
else {
throw new PersistenceException("ConnectionFactoryConfiguration has to be an instance of " +
"SimpleConnectionFactoryConfiguration.");
}
loadDriver(factoryConfiguration.driverClass(), classLoader);
this.connectionUrl = factoryConfiguration.connectionUrl();
this.userName = factoryConfiguration.username();
this.password = factoryConfiguration.password();
if (log.isTraceEnabled()) {
log.tracef("Starting connection %s", this);
}
}
@Override
public void stop() {
//do nothing
}
@Override
public Connection getConnection() throws PersistenceException {
try {
Connection connection = DriverManager.getConnection(connectionUrl, userName, password);
if (connection == null)
throw new PersistenceException("Received null connection from the DriverManager!");
connectionCount++;
return connection;
} catch (SQLException e) {
throw new PersistenceException("Could not obtain a new connection", e);
}
}
@Override
public void releaseConnection(Connection conn) {
try {
if (conn!=null) {
conn.close();
connectionCount--;
}
} catch (SQLException e) {
PERSISTENCE.failureClosingConnection(e);
}
}
private void loadDriver(String driverClass, ClassLoader classLoader) throws PersistenceException {
if (log.isTraceEnabled()) log.tracef("Attempting to load driver %s", driverClass);
Util.getInstance(driverClass, classLoader);
}
public String getConnectionUrl() {
return connectionUrl;
}
public String getUserName() {
return userName;
}
public String getPassword() {
return password;
}
public int getConnectionCount() {
return connectionCount;
}
@Override
public String toString() {
return "SimpleConnectionFactory{" +
"connectionUrl='" + connectionUrl + '\'' +
", userName='" + userName + '\'' +
"} " + super.toString();
}
}
| 3,749
| 32.482143
| 119
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/impl/connectionfactory/ManagedConnectionFactory.java
|
package org.infinispan.persistence.jdbc.common.impl.connectionfactory;
import static org.infinispan.persistence.jdbc.common.logging.Log.PERSISTENCE;
import java.sql.Connection;
import java.sql.SQLException;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import javax.sql.DataSource;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.configuration.ManagedConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.util.logging.LogFactory;
/**
* Connection factory that can be used when on managed environments, like application servers. It knows how to look into
* the JNDI tree at a certain location (configurable) and delegate connection management to the DataSource. In order to
* enable it one should set the following two properties in any Jdbc cache store:
* <pre>
* <property name="connectionFactoryClass" value="ManagedConnectionFactory"/>
* <property name="datasourceJndiLocation" value="java:/ManagedConnectionFactoryTest/DS"/>
* </pre>
*
* @author Mircea.Markus@jboss.com
*/
public class ManagedConnectionFactory extends ConnectionFactory {
private static final Log log = LogFactory.getLog(ManagedConnectionFactory.class, Log.class);
private ManagedConnectionFactoryConfiguration managedConfiguration;
private volatile DataSource dataSource;
@Override
public void start(ConnectionFactoryConfiguration factoryConfiguration, ClassLoader classLoader) throws PersistenceException {
if (factoryConfiguration instanceof ManagedConnectionFactoryConfiguration) {
managedConfiguration = (ManagedConnectionFactoryConfiguration) factoryConfiguration;
} else {
throw new PersistenceException("FactoryConfiguration has to be an instance of " +
"ManagedConnectionFactoryConfiguration");
}
}
private void initDataSource() {
InitialContext ctx = null;
String datasourceName = managedConfiguration.jndiUrl();
try {
ctx = new InitialContext();
dataSource = (DataSource) ctx.lookup(datasourceName);
if (log.isTraceEnabled()) {
log.tracef("Datasource lookup for %s succeeded: %b", datasourceName, dataSource);
}
if (dataSource == null) {
PERSISTENCE.connectionInJndiNotFound(datasourceName);
throw new PersistenceException(String.format("Could not find a connection in jndi under the name '%s'", datasourceName));
}
} catch (NamingException e) {
PERSISTENCE.namingExceptionLookingUpConnection(datasourceName, e);
throw new PersistenceException(e);
} finally {
if (ctx != null) {
try {
ctx.close();
} catch (NamingException e) {
PERSISTENCE.failedClosingNamingCtx(e);
}
}
}
}
@Override
public void stop() {
}
@Override
public Connection getConnection() throws PersistenceException {
if (dataSource == null)
initDataSource();
Connection connection;
try {
connection = dataSource.getConnection();
} catch (SQLException e) {
PERSISTENCE.sqlFailureRetrievingConnection(e);
throw new PersistenceException("This might be related to https://jira.jboss.org/browse/ISPN-604", e);
}
if (log.isTraceEnabled()) {
log.tracef("Connection checked out: %s", connection);
}
return connection;
}
@Override
public void releaseConnection(Connection conn) {
try {
if (conn != null) // Could be null if getConnection failed
conn.close();
} catch (SQLException e) {
PERSISTENCE.sqlFailureClosingConnection(conn, e);
}
}
}
| 4,001
| 36.055556
| 133
|
java
|
null |
infinispan-main/persistence/jdbc-common/src/main/java/org/infinispan/persistence/jdbc/common/connectionfactory/ConnectionFactory.java
|
package org.infinispan.persistence.jdbc.common.connectionfactory;
import java.sql.Connection;
import org.infinispan.commons.util.Util;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.spi.PersistenceException;
/**
* Defines the functionality a connection factory should implement.
*
* @author Mircea.Markus@jboss.com
*/
public abstract class ConnectionFactory {
/**
* Constructs a {@link ConnectionFactory} based on the supplied class
* name.
*/
public static ConnectionFactory getConnectionFactory(Class<? extends ConnectionFactory> connectionFactoryClass) throws PersistenceException {
return Util.getInstance(connectionFactoryClass);
}
/**
* Starts the connection factory. A pooled factory might be create connections here.
*/
public abstract void start(ConnectionFactoryConfiguration factoryConfiguration, ClassLoader classLoader) throws
PersistenceException;
/**
* Closes the connection factory, including all allocated connections etc.
*/
public abstract void stop();
/**
* Fetches a connection from the factory.
*/
public abstract Connection getConnection() throws PersistenceException;
/**
* Destroys a connection. Important: null might be passed in, as an valid argument.
*/
public abstract void releaseConnection(Connection conn);
}
| 1,522
| 33.613636
| 144
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/RocksDBStoreFunctionalTest.java
|
package org.infinispan.persistence.rocksdb;
import static org.testng.AssertJUnit.assertNotNull;
import java.io.File;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.persistence.BaseStoreFunctionalTest;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationBuilder;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "persistence.rocksdb.RocksDBStoreFunctionalTest")
public class RocksDBStoreFunctionalTest extends BaseStoreFunctionalTest {
protected String tmpDirectory = CommonsTestingUtil.tmpDirectory(this.getClass());
@AfterClass(alwaysRun = true)
protected void clearTempDir() {
Util.recursiveFileRemove(tmpDirectory);
}
RocksDBStoreConfigurationBuilder createStoreBuilder(PersistenceConfigurationBuilder loaders) {
new File(tmpDirectory).mkdirs();
return loaders.addStore(RocksDBStoreConfigurationBuilder.class).location(tmpDirectory + "/data").expiredLocation(tmpDirectory + "/expiry").clearThreshold(2);
}
@Override
protected PersistenceConfigurationBuilder createCacheStoreConfig(PersistenceConfigurationBuilder persistence,
String cacheName, boolean preload) {
createStoreBuilder(persistence)
.preload(preload);
return persistence;
}
public void testUnknownProperties() {
ConfigurationBuilder cb = getDefaultCacheConfiguration();
new File(tmpDirectory).mkdirs();
RocksDBStoreConfigurationBuilder storeConfigurationBuilder = createStoreBuilder(cb.persistence());
storeConfigurationBuilder.addProperty(RocksDBStore.DATABASE_PROPERTY_NAME_WITH_SUFFIX + "unknown", "some_value");
Configuration c = cb.build();
String cacheName = "rocksdb-unknown-properties";
TestingUtil.defineConfiguration(cacheManager, cacheName, c);
try {
cacheManager.getCache(cacheName);
} catch (Throwable t) {
Throwable cause;
while ((cause = t.getCause()) != null) {
t = cause;
}
Exceptions.assertException(CacheConfigurationException.class, ".*unknown\\ property$", t);
}
// Stop the cache manager early, otherwise cleanup won't work properly
TestingUtil.killCacheManagers(cacheManager);
}
public void testKnownProperties() {
ConfigurationBuilder cb = getDefaultCacheConfiguration();
new File(tmpDirectory).mkdirs();
RocksDBStoreConfigurationBuilder storeConfigurationBuilder = createStoreBuilder(cb.persistence());
String dbOptionName = "max_background_compactions";
String dbOptionValue = "2";
storeConfigurationBuilder.addProperty(RocksDBStore.DATABASE_PROPERTY_NAME_WITH_SUFFIX + dbOptionName, dbOptionValue);
String columnFamilyOptionName = "write_buffer_size";
String columnFamilyOptionValue = "96MB";
storeConfigurationBuilder.addProperty(RocksDBStore.COLUMN_FAMILY_PROPERTY_NAME_WITH_SUFFIX + columnFamilyOptionName, columnFamilyOptionValue);
Configuration c = cb.build();
String cacheName = "rocksdb-known-properties";
TestingUtil.defineConfiguration(cacheManager, cacheName, c);
// No easy way to ascertain if options are set, however if cache starts up it must have applied them,
// since otherwise this will fail like the unkonwn properties method
assertNotNull(cacheManager.getCache(cacheName));
}
}
| 3,818
| 43.406977
| 163
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/IracRocksDBStoreTest.java
|
package org.infinispan.persistence.rocksdb;
import java.io.File;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.IdentityKeyValueWrapper;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationBuilder;
import org.infinispan.xsite.irac.persistence.BaseIracPersistenceTest;
import org.testng.annotations.Test;
/**
* Tests if the IRAC metadata is properly stored and retrieved from a {@link RocksDBStore}.
*
* @author Pedro Ruivo
* @since 11.0
*/
@Test(groups = "functional", testName = "persistence.rocksdb.IracRocksDBStoreTest")
public class IracRocksDBStoreTest extends BaseIracPersistenceTest<String> {
public IracRocksDBStoreTest() {
super(IdentityKeyValueWrapper.instance());
}
@Override
protected void configure(ConfigurationBuilder builder) {
builder.persistence().addStore(RocksDBStoreConfigurationBuilder.class)
.location(tmpDirectory + File.separator + "data")
.expiredLocation(tmpDirectory + File.separator + "expiry");
}
}
| 1,070
| 33.548387
| 91
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/RocksDBMultiCacheStoreFunctionalTest.java
|
package org.infinispan.persistence.rocksdb;
import java.io.File;
import java.nio.file.Paths;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.persistence.MultiStoresFunctionalTest;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationBuilder;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "persistence.rocksdb.RocksDBMultiCacheStoreFunctionalTest")
public class RocksDBMultiCacheStoreFunctionalTest extends MultiStoresFunctionalTest<RocksDBStoreConfigurationBuilder> {
private File tmpDir = new File(CommonsTestingUtil.tmpDirectory(this.getClass()));
@BeforeMethod
protected void cleanDataFiles() {
if (tmpDir.exists()) {
Util.recursiveFileRemove(tmpDir);
}
}
@Override
protected RocksDBStoreConfigurationBuilder buildCacheStoreConfig(PersistenceConfigurationBuilder p, String discriminator) {
RocksDBStoreConfigurationBuilder store = p.addStore(RocksDBStoreConfigurationBuilder.class);
store.location(Paths.get(tmpDir.getAbsolutePath(), "rocksdb", "data" + discriminator).toString());
store.expiredLocation(Paths.get(tmpDir.getAbsolutePath(), "rocksdb", "expired-data-" + discriminator).toString());
return store;
}
}
| 1,431
| 41.117647
| 126
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/RocksDBStoreCompatibilityTest.java
|
package org.infinispan.persistence.rocksdb;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.AbstractPersistenceCompatibilityTest;
import org.infinispan.persistence.IdentityKeyValueWrapper;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationBuilder;
import org.infinispan.test.data.Value;
import org.testng.annotations.Test;
/**
* Tests if {@link RocksDBStore} can migrate data from Infinispan 10.1.x.
*
* @author Pedro Ruivo
* @since 11.0
*/
@Test(groups = "functional", testName = "persistence.rocksdb.RocksDBStoreCompatibilityTest")
public class RocksDBStoreCompatibilityTest extends AbstractPersistenceCompatibilityTest<Value> {
private static final Map<Version, String> data = new HashMap<>(2);
static {
data.put(Version._10_1, "10_1_x_rocksdb_data");
data.put(Version._11_0, "11_0_x_rocksdb_data");
}
public RocksDBStoreCompatibilityTest() {
super(IdentityKeyValueWrapper.instance());
}
// The rocksdb store should still be able to migrate data from 10.x stream
@Test
public void testReadWriteFrom101() throws Exception {
setParameters(Version._10_1, false, false);
doTestReadWrite();
}
@Test
public void testReadWriteFrom11() throws Exception {
setParameters(Version._11_0, false, false);
doTestReadWrite();
}
protected void beforeStartCache() throws Exception {
String root = data.get(oldVersion);
copyFiles(root, "data");
copyFiles(root, "expired");
}
private void copyFiles(String root, String qualifier) throws IOException {
Path destLocation = getStoreLocation(combinePath(tmpDirectory, qualifier), qualifier);
Files.createDirectories(destLocation);
String resource = root + "/" + qualifier;
File[] files = new File(Thread.currentThread().getContextClassLoader().getResource(resource).getPath()).listFiles();
for (File f : files) {
String filename = f.getName();
String destFile = filename;
if (filename.endsWith("_log")) {
destFile = filename.replace("_log", ".log");
}
copyFile(combinePath(resource, filename), destLocation, destFile);
}
}
@Override
protected String cacheName() {
return "rocksdb-cache-store";
}
@Override
protected void configurePersistence(ConfigurationBuilder builder, boolean generatingData) {
builder.persistence().addStore(RocksDBStoreConfigurationBuilder.class)
.segmented(oldSegmented)
.location(combinePath(tmpDirectory, "data"))
.expiredLocation(combinePath(tmpDirectory, "expired"));
}
}
| 2,846
| 32.494118
| 122
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/RocksDBParallelIterationTest.java
|
package org.infinispan.persistence.rocksdb;
import java.io.File;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.ParallelIterationTest;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationBuilder;
import org.testng.annotations.Test;
@Test (groups = {"functional", "smoke"}, testName = "persistence.rocksdb.RocksDBParallelIterationTest")
public class RocksDBParallelIterationTest extends ParallelIterationTest {
private String tmpDirectory;
@Override
protected void configurePersistence(ConfigurationBuilder cb) {
tmpDirectory = CommonsTestingUtil.tmpDirectory(this.getClass());
new File(tmpDirectory).mkdirs();
cb.persistence()
.addStore(RocksDBStoreConfigurationBuilder.class)
.location(tmpDirectory + "/data")
.expiredLocation(tmpDirectory + "/expiry")
.clearThreshold(2);
}
@Override
protected void teardown() {
Util.recursiveFileRemove(tmpDirectory);
super.teardown();
}
}
| 1,158
| 32.114286
| 103
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/RocksDBStoreTest.java
|
package org.infinispan.persistence.rocksdb;
import static java.util.Collections.singletonList;
import static org.infinispan.commons.util.IntSets.immutableSet;
import static org.infinispan.util.concurrent.CompletionStages.join;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.commons.io.ByteBufferImpl;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.marshall.persistence.impl.MarshalledEntryUtil;
import org.infinispan.persistence.BaseNonBlockingStoreTest;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationBuilder;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.test.fwk.TestInternalCacheEntryFactory;
import org.testng.AssertJUnit;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Factory;
import org.testng.annotations.Test;
import io.reactivex.rxjava3.core.Flowable;
@Test(groups = "unit", testName = "persistence.rocksdb.RocksDBStoreTest")
public class RocksDBStoreTest extends BaseNonBlockingStoreTest {
private String tmpDirectory = CommonsTestingUtil.tmpDirectory(this.getClass());
private boolean segmented;
public static final String KEY_1 = "key1";
public static final String KEY_2 = "key2";
@AfterClass(alwaysRun = true)
protected void clearTempDir() {
Util.recursiveFileRemove(tmpDirectory);
}
public RocksDBStoreTest segmented(boolean segmented) {
this.segmented = segmented;
return this;
}
@Factory
public Object[] factory() {
return new Object[] {
new RocksDBStoreTest().segmented(false),
new RocksDBStoreTest().segmented(true),
};
}
@Override
protected String parameters() {
return "[" + segmented + "]";
}
@Override
protected Configuration buildConfig(ConfigurationBuilder cb) {
createCacheStoreConfig(cb.persistence());
return cb.build();
}
protected RocksDBStoreConfigurationBuilder createCacheStoreConfig(PersistenceConfigurationBuilder lcb) {
RocksDBStoreConfigurationBuilder cfg = lcb.addStore(RocksDBStoreConfigurationBuilder.class);
cfg.segmented(segmented);
cfg.location(tmpDirectory);
cfg.expiredLocation(tmpDirectory);
cfg.clearThreshold(2);
return cfg;
}
@Override
protected NonBlockingStore createStore() {
clearTempDir();
return new RocksDBStore();
}
@Test(groups = "stress")
public void testConcurrentWrite() throws InterruptedException {
final int THREADS = 8;
final AtomicBoolean run = new AtomicBoolean(true);
final AtomicInteger written = new AtomicInteger();
final CountDownLatch started = new CountDownLatch(THREADS);
final CountDownLatch finished = new CountDownLatch(THREADS);
for (int i = 0; i < THREADS; ++i) {
final int thread = i;
fork(() -> {
try {
started.countDown();
int i1 = 0;
while (run.get()) {
InternalCacheEntry entry = TestInternalCacheEntryFactory.create("k" + i1, "v" + i1);
MarshallableEntry me = MarshalledEntryUtil.create(entry, getMarshaller());
try {
store.write(me);
++i1;
int prev;
do {
prev = written.get();
if ((prev & (1 << thread)) != 0) break;
} while (written.compareAndSet(prev, prev | (1 << thread)));
} catch (PersistenceException e) {
// when the store is stopped, exceptions are thrown
}
}
} catch (Exception e) {
log.error("Failed", e);
throw new RuntimeException(e);
} finally {
finished.countDown();
}
});
}
if (finished.await(1, TimeUnit.SECONDS)) {
fail("Test shouldn't have finished yet");
}
run.set(false);
if (!finished.await(30, TimeUnit.SECONDS)) {
fail("Test should have finished!");
}
assertEquals("pre", (1 << THREADS) - 1, written.get());
}
/**
* Test to make sure that when segments are added or removed that there are no issues
*/
public void testSegmentsRemovedAndAdded() {
int segment1 = keyPartitioner.getSegment(KEY_1);
MarshallableEntry me1 = marshallableEntryFactory.create(KEY_1, "value1");
store.write(me1);
assertTrue(join(store.containsKey(segment1, KEY_1)));
int segment2 = keyPartitioner.getSegment(KEY_2);
AssertJUnit.assertTrue(segment1 != segment2);
MarshallableEntry me2 = marshallableEntryFactory.create(KEY_2, "value2");
store.write(me2);
assertTrue(join(store.containsKey(segment2, KEY_2)));
assertEquals(Arrays.asList(KEY_1, KEY_2), listKeys(null));
store.removeSegments(immutableSet(segment1));
assertEquals(0, (long) join(store.size(immutableSet(segment1))));
assertFalse(join(store.containsKey(segment1, KEY_1)));
assertEmpty(immutableSet(segment1));
assertTrue(join(store.containsKey(segment2, KEY_2)));
assertEquals(1, (long) join(store.size(immutableSet(segment2))));
assertEquals(singletonList(KEY_2), listKeys(null));
// Now add the segment back
join(store.addSegments(immutableSet(segment1)));
store.write(me1);
assertTrue(store.contains(KEY_1));
assertEquals(Arrays.asList(KEY_1, KEY_2), listKeys(null));
}
public void testClear() {
MarshallableEntry me1 = marshallableEntryFactory.create(KEY_1, "value");
store.write(1, me1);
assertTrue(join(store.containsKey(1, KEY_1)));
// clear() uses RockDB's DeleteRange call internally
// Create a fake key that is after the end key of this DeleteRange call
// A custom marshaller could in theory create the same kind of key, but this is simpler
// because we don't need to unmarshal the key
int keySize = 10000;
byte[] keyBytes = new byte[keySize];
Arrays.fill(keyBytes, (byte) 0xff);
byte[] valueBytes = new byte[0];
MarshallableEntry me2 = marshallableEntryFactory.create(ByteBufferImpl.create(keyBytes), ByteBufferImpl.create(valueBytes));
store.write(1, me2);
// Because key2 cannot be unmarshalled, we cannot confirm the write with contains(key2) or even with size()
store.clear();
assertFalse(join(store.containsKey(1, KEY_1)));
assertEmpty(null);
}
private void assertEmpty(IntSet segments) {
assertEquals(0, (long) join(store.size(segments)));
assertEquals(Collections.emptyList(), listKeys(segments));
}
private List<Object> listKeys(IntSet segments) {
return Flowable.fromPublisher(store.publishEntries(segments, null, true))
.map(MarshallableEntry::getKey)
.toSortedList()
.blockingGet();
}
}
| 7,887
| 35.688372
| 130
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/config/ConfigurationTest.java
|
package org.infinispan.persistence.rocksdb.config;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.io.IOException;
import java.net.URL;
import org.infinispan.Cache;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ParserRegistry;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfiguration;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationBuilder;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "persistence.rocksdb.configuration.ConfigurationTest")
public class ConfigurationTest extends AbstractInfinispanTest {
private String tmpDirectory = CommonsTestingUtil.tmpDirectory(this.getClass());
private String tmpDataDirectory = tmpDirectory + "/data";
private String tmpExpiredDirectory = tmpDirectory + "/expired";
@AfterClass(alwaysRun = true)
protected void clearTempDir() {
Util.recursiveFileRemove(tmpDirectory);
}
public void testConfigBuilder() {
GlobalConfiguration globalConfig = new GlobalConfigurationBuilder()
.transport().defaultTransport()
.globalState().persistentLocation(tmpDirectory)
.build();
Configuration cacheConfig = new ConfigurationBuilder().persistence().addStore(RocksDBStoreConfigurationBuilder.class).location(tmpDataDirectory)
.expiredLocation(tmpExpiredDirectory).build();
StoreConfiguration cacheLoaderConfig = cacheConfig.persistence().stores().get(0);
assertTrue(cacheLoaderConfig instanceof RocksDBStoreConfiguration);
RocksDBStoreConfiguration rocksdbConfig = (RocksDBStoreConfiguration) cacheLoaderConfig;
assertEquals(tmpDataDirectory, rocksdbConfig.location());
assertEquals(tmpExpiredDirectory, rocksdbConfig.expiredLocation());
EmbeddedCacheManager cacheManager = new DefaultCacheManager(globalConfig);
cacheManager.defineConfiguration("testCache", cacheConfig);
cacheManager.start();
Cache<String, String> cache = cacheManager.getCache("testCache");
cache.put("hello", "there");
cache.stop();
cacheManager.stop();
}
public void testXmlConfig() throws IOException {
URL config = ConfigurationTest.class.getResource("/configs/all/rocksdb-config.xml");
ConfigurationBuilderHolder configHolder = new ParserRegistry().parse(config);
// check persistence attributes
Configuration cacheConfig = configHolder.getNamedConfigurationBuilders().get("testCache").build();
assertFalse(cacheConfig.persistence().passivation());
assertEquals(cacheConfig.persistence().stores().size(), 1);
// check generic store attributes
StoreConfiguration cacheLoaderConfig = cacheConfig.persistence().stores().get(0);
assertFalse(cacheLoaderConfig.shared());
assertTrue(cacheLoaderConfig.preload());
assertTrue(cacheLoaderConfig instanceof RocksDBStoreConfiguration);
// check RocksDB store attributes
RocksDBStoreConfiguration rocksdbConfig = (RocksDBStoreConfiguration) cacheLoaderConfig;
assertEquals("/tmp/rocksdb/52/data", rocksdbConfig.location());
assertEquals("/tmp/rocksdb/52/expired", rocksdbConfig.expiredLocation());
}
}
| 3,989
| 44.862069
| 150
|
java
|
null |
infinispan-main/persistence/rocksdb/src/test/java/org/infinispan/persistence/rocksdb/config/ConfigurationSerializerTest.java
|
package org.infinispan.persistence.rocksdb.config;
import static org.testng.Assert.assertEquals;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.serializer.AbstractConfigurationSerializerTest;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfiguration;
import org.testng.annotations.Test;
@Test(testName = "persistence.rocksdb.configuration.ConfigurationSerializerTest", groups="functional")
public class ConfigurationSerializerTest extends AbstractConfigurationSerializerTest {
@Override
protected void compareStoreConfiguration(String name, StoreConfiguration beforeStore, StoreConfiguration afterStore) {
super.compareStoreConfiguration(name, beforeStore, afterStore);
RocksDBStoreConfiguration before = (RocksDBStoreConfiguration) beforeStore;
RocksDBStoreConfiguration after = (RocksDBStoreConfiguration) afterStore;
assertEquals(before.attributes(), after.attributes());
assertEquals(before.expiration().attributes(), after.expiration().attributes());
}
}
| 1,077
| 50.333333
| 121
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/package-info.java
|
/**
* RocksDB-based {@link org.infinispan.persistence.spi.AdvancedLoadWriteStore}.
*
* @api.public
*/
package org.infinispan.persistence.rocksdb;
| 150
| 20.571429
| 79
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/PersistenceContextInitializer.java
|
package org.infinispan.persistence.rocksdb;
import org.infinispan.marshall.persistence.impl.PersistenceMarshallerImpl;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
/**
* Interface used to initialise the {@link PersistenceMarshallerImpl}'s {@link org.infinispan.protostream.SerializationContext}
* using the specified Pojos, Marshaller implementations and provided .proto schemas.
*
* @author Ryan Emerson
* @since 10.0
*/
@AutoProtoSchemaBuilder(
includeClasses = {
RocksDBStore.ExpiryBucket.class,
RocksDBStore.MetadataImpl.class
},
schemaFileName = "persistence.rocksdb.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.persistence.rocksdb",
service = false
)
interface PersistenceContextInitializer extends SerializationContextInitializer {
}
| 936
| 35.038462
| 127
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/RocksDBStore.java
|
package org.infinispan.persistence.rocksdb;
import static org.infinispan.util.logging.Log.PERSISTENCE;
import java.io.File;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CompletionException;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.Function;
import java.util.function.Predicate;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.marshall.ProtoStreamTypeIds;
import org.infinispan.commons.reactive.RxJavaInterop;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.AbstractIterator;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.Util;
import org.infinispan.commons.util.Version;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.marshall.persistence.impl.MarshallableEntryImpl;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.persistence.internal.PersistenceUtil;
import org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfiguration;
import org.infinispan.persistence.rocksdb.logging.Log;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.spi.MarshalledValue;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.protostream.annotations.ProtoFactory;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.protostream.annotations.ProtoTypeId;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.LogFactory;
import org.reactivestreams.Publisher;
import org.rocksdb.BuiltinComparator;
import org.rocksdb.ColumnFamilyDescriptor;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.ColumnFamilyOptions;
import org.rocksdb.DBOptions;
import org.rocksdb.Options;
import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
import org.rocksdb.RocksIterator;
import org.rocksdb.WriteBatch;
import org.rocksdb.WriteOptions;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.core.Maybe;
import io.reactivex.rxjava3.processors.FlowableProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
@ConfiguredBy(RocksDBStoreConfiguration.class)
public class RocksDBStore<K, V> implements NonBlockingStore<K, V> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass(), Log.class);
private static final byte[] BEGIN_KEY = createAndFillArray(1, (byte) 0x00);
private static final byte[] END_KEY = createAndFillArray(128, (byte) 0xff);
static final String DATABASE_PROPERTY_NAME_WITH_SUFFIX = "database.";
static final String COLUMN_FAMILY_PROPERTY_NAME_WITH_SUFFIX = "data.";
static final byte[] META_COLUMN_FAMILY = "meta-cf".getBytes();
static final byte[] META_COLUMN_FAMILY_KEY = "metadata".getBytes();
protected RocksDBStoreConfiguration configuration;
private RocksDB db;
private RocksDB expiredDb;
private InitializationContext ctx;
private TimeService timeService;
private WriteOptions dataWriteOptions;
private RocksDBHandler handler;
private Properties databaseProperties;
private Properties columnFamilyProperties;
private Marshaller marshaller;
private KeyPartitioner keyPartitioner;
private MarshallableEntryFactory<K, V> entryFactory;
private BlockingManager blockingManager;
@Override
public CompletionStage<Void> start(InitializationContext ctx) {
this.configuration = ctx.getConfiguration();
this.ctx = ctx;
this.timeService = ctx.getTimeService();
this.marshaller = ctx.getPersistenceMarshaller();
this.entryFactory = ctx.getMarshallableEntryFactory();
this.blockingManager = ctx.getBlockingManager();
this.keyPartitioner = ctx.getKeyPartitioner();
ctx.getPersistenceMarshaller().register(new PersistenceContextInitializerImpl());
// Has to be done before we open the database, so we can pass the properties
Properties allProperties = configuration.properties();
for (Map.Entry<Object, Object> entry : allProperties.entrySet()) {
String key = entry.getKey().toString();
if (key.startsWith(DATABASE_PROPERTY_NAME_WITH_SUFFIX)) {
if (databaseProperties == null) {
databaseProperties = new Properties();
}
databaseProperties.setProperty(key.substring(DATABASE_PROPERTY_NAME_WITH_SUFFIX.length()), entry.getValue().toString());
} else if (key.startsWith(COLUMN_FAMILY_PROPERTY_NAME_WITH_SUFFIX)) {
if (columnFamilyProperties == null) {
columnFamilyProperties = new Properties();
}
columnFamilyProperties.setProperty(key.substring(COLUMN_FAMILY_PROPERTY_NAME_WITH_SUFFIX.length()), entry.getValue().toString());
}
}
return blockingManager.runBlocking(() -> {
try {
initDefaultHandler();
MetadataImpl existingMeta = handler.loadMetadata();
if (existingMeta == null && !configuration.purgeOnStartup()) {
String cacheName = ctx.getCache().getName();
// Metadata does not exist, therefore we must be reading from a pre-12.x store. Migrate the old data
PERSISTENCE.startMigratingPersistenceData(cacheName);
migrateFromV11();
PERSISTENCE.persistedDataSuccessfulMigrated(cacheName);
}
// Update the metadata entry to use the current Infinispan version
handler.writeMetadata();
} catch (Exception e) {
throw new CacheConfigurationException("Unable to open database", e);
}
}, "rocksdb-open");
}
private void initDefaultHandler() throws RocksDBException {
this.handler = createHandler(getLocation(), getExpirationLocation());
this.db = handler.db;
this.expiredDb = handler.expiredDb;
}
private RocksDBHandler createHandler(Path data, Path expired) throws RocksDBException {
AdvancedCache<?, ?> cache = ctx.getCache().getAdvancedCache();
if (configuration.segmented()) {
return new SegmentedRocksDBHandler(data, expired, cache.getCacheConfiguration().clustering().hash().numSegments());
}
return new NonSegmentedRocksDBHandler(data, expired, keyPartitioner);
}
private void migrateFromV11() throws IOException, RocksDBException {
IntSet segments;
if (configuration.segmented()) {
int numSegments = ctx.getCache().getCacheConfiguration().clustering().hash().numSegments();
segments = IntSets.immutableRangeSet(numSegments);
} else {
segments = null;
}
// If no entries exist in the store, then nothing to migrate
if (CompletionStages.join(handler.size(segments)) == 0)
return;
Path newDbLocation = getQualifiedLocation("new_data");
Path newExpiredDbLocation = getQualifiedLocation("new_expired");
try {
// Create new DB and open handle
RocksDBHandler migrationHandler = createHandler(newDbLocation, newExpiredDbLocation);
Function<RocksIterator, Flowable<MarshallableEntry<K, V>>> function =
it -> Flowable.fromIterable(() -> new RocksLegacyEntryIterator(it));
// Iterate and convert entries from old handle
Publisher<MarshallableEntry<K, V>> publisher = configuration.segmented() ?
((SegmentedRocksDBHandler) handler).handleIteratorFunction(function, segments) :
handler.publish(-1, function);
WriteBatch batch = new WriteBatch();
Set<MarshallableEntry<K, V>> expirableEntries = new HashSet<>();
Flowable.fromPublisher(publisher)
.subscribe(e -> {
ColumnFamilyHandle handle = migrationHandler.getHandle(keyPartitioner.getSegment(e.getKey()));
batch.put(handle, e.getKeyBytes().copy().getBuf(), marshall(e.getMarshalledValue()));
if (e.expiryTime() > 1)
expirableEntries.add(e);
});
if (batch.count() <= 0)
batch.close();
migrationHandler.db.write(dataWriteOptions(), batch);
for (MarshallableEntry<K, V> e : expirableEntries)
addNewExpiry(migrationHandler.expiredDb, e);
// Close original and new handler
handler.close();
migrationHandler.close();
// Copy new db to original location
Path dataLocation = getLocation();
Path expirationLocation = getExpirationLocation();
Util.recursiveFileRemove(dataLocation);
Util.recursiveFileRemove(expirationLocation);
Files.move(newDbLocation, dataLocation, StandardCopyOption.REPLACE_EXISTING);
Files.move(newExpiredDbLocation, expirationLocation, StandardCopyOption.REPLACE_EXISTING);
// Open db handle to new db at original location
initDefaultHandler();
} finally {
// In the event of a failure, always remove the new dbs
Util.recursiveFileRemove(newDbLocation);
Util.recursiveFileRemove(newExpiredDbLocation);
}
}
private Path getQualifiedLocation(String qualifier) {
return org.infinispan.persistence.PersistenceUtil.getQualifiedLocation(ctx.getGlobalConfiguration(), configuration.location(), ctx.getCache().getName(), qualifier);
}
private Path getLocation() {
return getQualifiedLocation("data");
}
private Path getExpirationLocation() {
return getQualifiedLocation("expired");
}
private WriteOptions dataWriteOptions() {
if (dataWriteOptions == null)
dataWriteOptions = new WriteOptions().setDisableWAL(false);
return dataWriteOptions;
}
protected DBOptions dataDbOptions() {
DBOptions dbOptions;
if (databaseProperties != null) {
dbOptions = DBOptions.getDBOptionsFromProps(databaseProperties);
if (dbOptions == null) {
throw log.rocksDBUnknownPropertiesSupplied(databaseProperties.toString());
}
} else {
dbOptions = new DBOptions();
}
return dbOptions
.setCreateIfMissing(true)
// We have to create missing column families on open.
// Otherwise when we start we won't know what column families this database had if any - thus
// we must specify all of them and later remove them.
.setCreateMissingColumnFamilies(true);
}
protected Options expiredDbOptions() {
return new Options()
.setCreateIfMissing(true)
// Make sure keys are sorted by bytes - we use this sorting to remove entries that have expired most recently
.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR);
}
/**
* Creates database if it doesn't exist.
*/
protected static RocksDB openDatabase(Path location, Options options) throws RocksDBException {
File dir = location.toFile();
dir.mkdirs();
return RocksDB.open(options, location.toString());
}
@Override
public CompletionStage<Void> stop() {
return blockingManager.runBlocking(() -> {
// it could be null if an issue occurs during the initialization
if (handler != null) {
handler.close();
}
}, "rocksdb-stop");
}
@Override
public Set<Characteristic> characteristics() {
return EnumSet.of(Characteristic.BULK_READ, Characteristic.EXPIRATION, Characteristic.SEGMENTABLE);
}
@Override
public CompletionStage<Boolean> isAvailable() {
return blockingManager.supplyBlocking(() -> getLocation().toFile().exists() && getExpirationLocation().toFile().exists(),
"rocksdb-available");
}
@Override
public CompletionStage<Void> clear() {
return handler.clear();
}
@Override
public CompletionStage<Long> size(IntSet segments) {
return handler.size(segments);
}
@Override
public CompletionStage<Long> approximateSize(IntSet segments) {
return handler.approximateSize(segments);
}
@Override
public CompletionStage<Boolean> containsKey(int segment, Object key) {
// This might be able to use RocksDB#keyMayExist - but API is a bit flaky
return load(segment, key)
.thenApply(Objects::nonNull);
}
@Override
public Publisher<K> publishKeys(IntSet segments, Predicate<? super K> filter) {
return Flowable.fromPublisher(handler.publishEntries(segments, filter, false))
.map(MarshallableEntry::getKey);
}
@Override
public Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean includeValues) {
return handler.publishEntries(segments, filter, includeValues);
}
@Override
public CompletionStage<Boolean> delete(int segment, Object key) {
return handler.delete(segment, key);
}
@Override
public CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> entry) {
return handler.write(segment, entry);
}
@Override
public CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
return handler.load(segment, key);
}
@Override
public CompletionStage<Void> batch(int publisherCount, Publisher<SegmentedPublisher<Object>> removePublisher,
Publisher<SegmentedPublisher<MarshallableEntry<K, V>>> writePublisher) {
WriteBatch batch = new WriteBatch();
Set<MarshallableEntry<K, V>> expirableEntries = new HashSet<>();
Flowable.fromPublisher(removePublisher)
.subscribe(sp -> {
ColumnFamilyHandle handle = handler.getHandle(sp.getSegment());
Flowable.fromPublisher(sp)
.subscribe(removed -> batch.delete(handle, marshall(removed)));
});
Flowable.fromPublisher(writePublisher)
.subscribe(sp -> {
ColumnFamilyHandle handle = handler.getHandle(sp.getSegment());
Flowable.fromPublisher(sp)
.subscribe(me -> {
batch.put(handle, marshall(me.getKey()), marshall(me.getMarshalledValue()));
if (me.expiryTime() > -1) {
expirableEntries.add(me);
}
});
});
if (batch.count() <= 0) {
batch.close();
return CompletableFutures.completedNull();
}
return blockingManager.runBlocking(() -> {
try {
db.write(dataWriteOptions(), batch);
for (MarshallableEntry<K, V> me : expirableEntries) {
addNewExpiry(expiredDb, me);
}
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
}, "rocksdb-batch").whenComplete((ignore, t) -> batch.close());
}
@Override
public Publisher<MarshallableEntry<K, V>> purgeExpired() {
Publisher<List<MarshallableEntry<K, V>>> purgedBatches = blockingManager.blockingPublisher(Flowable.defer(() -> {
// We check expiration based on time of subscription only
long now = timeService.wallClockTime();
return actualPurgeExpired(now)
// We return a buffer of expired entries emitted to the non blocking thread
// This prevents waking up the non blocking thread for every entry as they will most likely be
// consumed much faster than emission (since each emission performs a get and remove)
.buffer(16);
}));
return Flowable.fromPublisher(purgedBatches)
.concatMap(Flowable::fromIterable);
}
private Flowable<MarshallableEntry<K, V>> actualPurgeExpired(long now) {
// The following flowable is responsible for emitting entries that have expired from expiredDb and removing the
// given entries
Flowable<byte[]> expiredFlowable = Flowable.using(() -> {
ReadOptions readOptions = new ReadOptions().setFillCache(false);
return new AbstractMap.SimpleImmutableEntry<>(readOptions, expiredDb.newIterator(readOptions));
}, entry -> {
if (entry.getValue() == null) {
return Flowable.empty();
}
RocksIterator iterator = entry.getValue();
iterator.seekToFirst();
return Flowable.fromIterable(() ->
new AbstractIterator<byte[]>() {
@Override
protected byte[] getNext() {
if (!iterator.isValid()) {
return null;
}
byte[] keyBytes = iterator.key();
Long time = unmarshall(keyBytes);
if (time > now)
return null;
try {
expiredDb.delete(keyBytes);
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
byte[] value = iterator.value();
iterator.next();
return value;
}
});
}, entry -> {
entry.getKey().close();
RocksIterator rocksIterator = entry.getValue();
if (rocksIterator != null) {
rocksIterator.close();
}
});
Flowable<MarshallableEntry<K, V>> expiredEntryFlowable = expiredFlowable.flatMap(expiredBytes -> {
Object bucketKey = unmarshall(expiredBytes);
if (bucketKey instanceof ExpiryBucket) {
return Flowable.fromIterable(((ExpiryBucket) bucketKey).entries)
.flatMapMaybe(marshalledKey -> {
ColumnFamilyHandle columnFamilyHandle = handler.getHandleForMarshalledKey(marshalledKey);
MarshalledValue mv = handlePossiblyExpiredKey(columnFamilyHandle, marshalledKey, now);
return mv == null ? Maybe.empty() : Maybe.just(entryFactory.create(unmarshall(marshalledKey), mv));
});
} else {
// The bucketKey is an actual key
ColumnFamilyHandle columnFamilyHandle = handler.getHandle(bucketKey);
MarshalledValue mv = handlePossiblyExpiredKey(columnFamilyHandle, marshall(bucketKey), now);
return mv == null ? Flowable.empty() : Flowable.just(entryFactory.create(bucketKey, mv));
}
});
if (log.isTraceEnabled()) {
// Note this tracing only works properly for one subscriber
FlowableProcessor<MarshallableEntry<K, V>> mirrorEntries = UnicastProcessor.create();
expiredEntryFlowable = expiredEntryFlowable
.doOnEach(mirrorEntries)
.doOnSubscribe(subscription -> log.tracef("Purging entries from RocksDBStore"));
mirrorEntries.count()
.subscribe(count -> log.tracef("Purged %d entries from RocksDBStore", count));
}
return expiredEntryFlowable;
}
private MarshalledValue handlePossiblyExpiredKey(ColumnFamilyHandle columnFamilyHandle, byte[] marshalledKey,
long now) throws RocksDBException {
byte[] valueBytes = db.get(columnFamilyHandle, marshalledKey);
if (valueBytes == null) {
return null;
}
MarshalledValue mv = unmarshall(valueBytes);
if (mv != null) {
// TODO race condition: the entry could be updated between the get and delete!
Metadata metadata = unmarshall(MarshallUtil.toByteArray(mv.getMetadataBytes()));
if (MarshallableEntryImpl.isExpired(metadata, now, mv.getCreated(), mv.getLastUsed())) {
// somewhat inefficient to FIND then REMOVE... but required if the value is updated
db.delete(columnFamilyHandle, marshalledKey);
return mv;
}
}
return null;
}
@Override
public CompletionStage<Void> addSegments(IntSet segments) {
return handler.addSegments(segments);
}
@Override
public CompletionStage<Void> removeSegments(IntSet segments) {
return handler.removeSegments(segments);
}
private byte[] marshall(Object entry) {
try {
return marshaller.objectToByteBuffer(entry);
} catch (IOException e) {
throw new PersistenceException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new PersistenceException(e);
}
}
private <E> E unmarshall(byte[] bytes, Marshaller marshaller) {
if (bytes == null)
return null;
try {
//noinspection unchecked
return (E) marshaller.objectFromByteBuffer(bytes);
} catch (IOException | ClassNotFoundException e) {
throw new PersistenceException(e);
}
}
private <E> E unmarshall(byte[] bytes) {
return unmarshall(bytes, this.marshaller);
}
private MarshallableEntry<K, V> unmarshallEntry(Object key, byte[] valueBytes) {
MarshalledValue value = unmarshall(valueBytes);
if (value == null) return null;
return entryFactory.create(key, value.getValueBytes(), value.getMetadataBytes(), value.getInternalMetadataBytes(),
value.getCreated(), value.getLastUsed());
}
private void addNewExpiry(RocksDB expiredDb, MarshallableEntry<? extends K, ? extends V> entry) throws RocksDBException {
long expiry = entry.expiryTime();
long maxIdle = entry.getMetadata().maxIdle();
if (maxIdle > 0) {
// Coding getExpiryTime() for transient entries has the risk of being a moving target
// which could lead to unexpected results, hence, InternalCacheEntry calls are required
expiry = maxIdle + ctx.getTimeService().wallClockTime();
}
byte[] keyBytes = entry.getKeyBytes().copy().getBuf();
putExpireDbData(expiredDb, new ExpiryEntry(expiry, keyBytes));
}
@ProtoTypeId(ProtoStreamTypeIds.ROCKSDB_EXPIRY_BUCKET)
static final class ExpiryBucket {
@ProtoField(number = 1, collectionImplementation = ArrayList.class)
List<byte[]> entries;
ExpiryBucket(){}
ExpiryBucket(byte[] existingKey, byte[] newKey) {
entries = new ArrayList<>(2);
entries.add(existingKey);
entries.add(newKey);
}
}
@ProtoTypeId(ProtoStreamTypeIds.ROCKSDB_PERSISTED_METADATA)
static final class MetadataImpl {
@ProtoField(number = 1, defaultValue = "-1")
short version;
@ProtoFactory
MetadataImpl(short version) {
this.version = version;
}
}
private static final class ExpiryEntry {
final long expiry;
final byte[] keyBytes;
ExpiryEntry(long expiry, byte[] keyBytes) {
this.expiry = expiry;
this.keyBytes = keyBytes;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ExpiryEntry that = (ExpiryEntry) o;
return expiry == that.expiry &&
Arrays.equals(keyBytes, that.keyBytes);
}
@Override
public int hashCode() {
int result = Objects.hash(expiry);
result = 31 * result + Arrays.hashCode(keyBytes);
return result;
}
}
private class RocksLegacyEntryIterator extends AbstractIterator<MarshallableEntry<K, V>> {
private final RocksIterator it;
private final long now;
private final PersistenceMarshaller pm;
private final Marshaller userMarshaller;
RocksLegacyEntryIterator(RocksIterator it) {
this.it = it;
this.now = timeService.wallClockTime();
this.pm = ctx.getPersistenceMarshaller();
this.userMarshaller = pm.getUserMarshaller();
}
@Override
protected MarshallableEntry<K, V> getNext() {
MarshallableEntry<K, V> entry = null;
while (entry == null && it.isValid()) {
K key = unmarshall(it.key(), userMarshaller);
MarshalledValue mv = unmarshall(it.value(), pm);
V value = unmarshall(mv.getValueBytes().getBuf(), userMarshaller);
Metadata meta;
try {
meta = unmarshall(mv.getMetadataBytes().getBuf(), userMarshaller);
} catch (IllegalArgumentException e) {
// For metadata we need to attempt to read with user-marshaller first in case custom metadata used, otherwise use the persistence marshaller
meta = unmarshall(mv.getMetadataBytes().getBuf(), pm);
}
PrivateMetadata internalMeta = unmarshall(mv.getInternalMetadataBytes().copy().getBuf(), userMarshaller);
MarshallableEntry<K, V> me = entryFactory.create(key, value, meta, internalMeta, mv.getCreated(), mv.getLastUsed());
if (me != null && !me.isExpired(now)) {
entry = me;
}
it.next();
}
return entry;
}
}
private class RocksEntryIterator extends AbstractIterator<MarshallableEntry<K, V>> {
private final RocksIterator it;
private final Predicate<? super K> filter;
private final long now;
RocksEntryIterator(RocksIterator it, Predicate<? super K> filter, long now) {
this.it = it;
this.filter = filter;
this.now = now;
}
@Override
protected MarshallableEntry<K, V> getNext() {
MarshallableEntry<K, V> entry = null;
while (entry == null && it.isValid()) {
K key = unmarshall(it.key());
if (filter == null || filter.test(key)) {
MarshallableEntry<K, V> me = unmarshallEntry(key, it.value());
if (me != null && !me.isExpired(now)) {
entry = me;
}
}
it.next();
}
return entry;
}
}
private abstract class RocksDBHandler {
protected RocksDB db;
protected RocksDB expiredDb;
protected ColumnFamilyHandle metaColumnFamilyHandle;
abstract RocksDB open(Path location, DBOptions options) throws RocksDBException;
abstract void close();
abstract ColumnFamilyHandle getHandle(int segment);
abstract ColumnFamilyHandle getHandle(Object key);
abstract ColumnFamilyHandle getHandleForMarshalledKey(byte[] marshalledKey);
void writeMetadata() throws RocksDBException {
MetadataImpl metadata = new MetadataImpl(Version.getVersionShort());
db.put(metaColumnFamilyHandle, META_COLUMN_FAMILY_KEY, marshall(metadata));
}
MetadataImpl loadMetadata() throws RocksDBException {
return unmarshall(db.get(metaColumnFamilyHandle, META_COLUMN_FAMILY_KEY));
}
ColumnFamilyDescriptor newDescriptor(byte[] name) {
ColumnFamilyOptions columnFamilyOptions;
if (columnFamilyProperties != null) {
columnFamilyOptions = ColumnFamilyOptions.getColumnFamilyOptionsFromProps(columnFamilyProperties);
if (columnFamilyOptions == null) {
throw log.rocksDBUnknownPropertiesSupplied(columnFamilyProperties.toString());
}
} else {
columnFamilyOptions = new ColumnFamilyOptions();
}
if (configuration.attributes().attribute(RocksDBStoreConfiguration.COMPRESSION_TYPE).isModified()) {
columnFamilyOptions.setCompressionType(configuration.compressionType().getValue());
}
return new ColumnFamilyDescriptor(name, columnFamilyOptions);
}
CompletionStage<MarshallableEntry<K, V>> load(int segment, Object key) {
ColumnFamilyHandle handle = getHandle(segment);
if (handle == null) {
log.trace("Ignoring load as handle is not currently configured");
return CompletableFutures.completedNull();
}
try {
CompletionStage<byte[]> entryByteStage = blockingManager.supplyBlocking(() -> {
try {
return db.get(handle, marshall(key));
} catch (RocksDBException e) {
throw new CompletionException(e);
}
}, "rocksdb-load");
return entryByteStage.thenApply(entryBytes -> {
MarshallableEntry<K, V> me = unmarshallEntry(key, entryBytes);
if (me == null || me.isExpired(timeService.wallClockTime())) {
return null;
}
return me;
});
} catch (Exception e) {
throw new PersistenceException(e);
}
}
CompletionStage<Void> write(int segment, MarshallableEntry<? extends K, ? extends V> me) {
ColumnFamilyHandle handle = getHandle(segment);
if (handle == null) {
log.trace("Ignoring write as handle is not currently configured");
return CompletableFutures.completedNull();
}
try {
byte[] marshalledKey = MarshallUtil.toByteArray(me.getKeyBytes());
byte[] marshalledValue = marshall(me.getMarshalledValue());
return blockingManager.runBlocking(() -> {
try {
db.put(handle, marshalledKey, marshalledValue);
if (me.expiryTime() > -1) {
addNewExpiry(expiredDb, me);
}
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
}, "rocksdb-write");
} catch (Exception e) {
throw new PersistenceException(e);
}
}
CompletionStage<Boolean> delete(int segment, Object key) {
try {
byte[] keyBytes = marshall(key);
ColumnFamilyHandle handle = getHandle(segment);
return blockingManager.supplyBlocking(() -> {
try {
db.delete(handle, keyBytes);
return null;
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
}, "rocksdb-delete");
} catch (Exception e) {
throw new PersistenceException(e);
}
}
abstract CompletionStage<Void> clear();
abstract Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter,
boolean fetchValue);
CompletionStage<Long> size(IntSet segments) {
return Flowable.fromPublisher(publishKeys(segments, null))
.count().toCompletionStage();
}
abstract CompletionStage<Long> approximateSize(IntSet segments);
<P> Publisher<P> publish(int segment, Function<RocksIterator, Flowable<P>> function) {
ReadOptions readOptions = new ReadOptions().setFillCache(false);
return blockingManager.blockingPublisher(Flowable.using(() -> wrapIterator(db, readOptions, segment), iterator -> {
if (iterator == null) {
return Flowable.empty();
}
iterator.seekToFirst();
return function.apply(iterator);
}, iterator -> {
if (iterator != null) {
iterator.close();
}
readOptions.close();
}));
}
abstract RocksIterator wrapIterator(RocksDB db, ReadOptions readOptions, int segment);
abstract CompletionStage<Void> addSegments(IntSet segments);
abstract CompletionStage<Void> removeSegments(IntSet segments);
}
private final class NonSegmentedRocksDBHandler extends RocksDBHandler {
private final KeyPartitioner keyPartitioner;
private ColumnFamilyHandle defaultColumnFamilyHandle;
private NonSegmentedRocksDBHandler(Path data, Path expired, KeyPartitioner keyPartitioner) throws RocksDBException {
this.db = open(data, dataDbOptions());
this.expiredDb = openDatabase(expired, expiredDbOptions());
this.keyPartitioner = keyPartitioner;
}
@Override
ColumnFamilyHandle getHandle(int segment) {
return defaultColumnFamilyHandle;
}
@Override
ColumnFamilyHandle getHandle(Object key) {
return defaultColumnFamilyHandle;
}
@Override
ColumnFamilyHandle getHandleForMarshalledKey(byte[] marshalledKey) {
return defaultColumnFamilyHandle;
}
@Override
RocksDB open(Path location, DBOptions options) throws RocksDBException {
File dir = location.toFile();
dir.mkdirs();
List<ColumnFamilyDescriptor> descriptors = new ArrayList<>(2);
List<ColumnFamilyHandle> handles = new ArrayList<>(2);
descriptors.add(newDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY));
descriptors.add(newDescriptor(META_COLUMN_FAMILY));
RocksDB rocksDB = RocksDB.open(options, location.toString(), descriptors, handles);
defaultColumnFamilyHandle = handles.get(0);
metaColumnFamilyHandle = handles.get(1);
return rocksDB;
}
@Override
CompletionStage<Void> clear() {
return clear(null);
}
CompletionStage<Void> clear(IntSet segments) {
return blockingManager.runBlocking(() -> {
if (segments == null) {
clearColumnFamily(defaultColumnFamilyHandle);
} else {
try (ReadOptions readOptions = new ReadOptions().setFillCache(false)) {
try (RocksIterator it = db.newIterator(defaultColumnFamilyHandle, readOptions)) {
for (it.seekToFirst(); it.isValid(); it.next()) {
byte[] keyBytes = it.key();
Object key = unmarshall(keyBytes);
int segment = keyPartitioner.getSegment(key);
if (segments.contains(segment)) {
db.delete(defaultColumnFamilyHandle, keyBytes);
}
}
}
} catch (Exception e) {
throw new PersistenceException(e);
}
}
}, "rocksdb-clear");
}
@Override
void close() {
defaultColumnFamilyHandle.close();
db.close();
expiredDb.close();
}
protected RocksIterator wrapIterator(RocksDB db, ReadOptions readOptions, int segment) {
return db.newIterator(defaultColumnFamilyHandle, readOptions);
}
@Override
Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean fetchValue) {
Predicate<? super K> combinedFilter = PersistenceUtil.combinePredicate(segments, keyPartitioner, filter);
return publish(-1, it -> Flowable.fromIterable(() -> {
// Make sure this is taken when the iterator is created
long now = timeService.wallClockTime();
return new RocksEntryIterator(it, combinedFilter, now);
}));
}
@Override
CompletionStage<Void> addSegments(IntSet segments) {
// Do nothing
return CompletableFutures.completedNull();
}
@Override
CompletionStage<Void> removeSegments(IntSet segments) {
// Unfortunately we have to clear all entries that map to each entry, which requires a full iteration and
// segment check on every entry
return clear(segments);
}
@Override
CompletionStage<Long> approximateSize(IntSet segments) {
return blockingManager.supplyBlocking(() -> {
try {
return Long.parseLong(db.getProperty(defaultColumnFamilyHandle, "rocksdb.estimate-num-keys"));
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
}, "rocksdb-approximateSize");
}
}
private class SegmentedRocksDBHandler extends RocksDBHandler {
private final AtomicReferenceArray<ColumnFamilyHandle> handles;
private SegmentedRocksDBHandler(Path data, Path expired, int segmentCount) throws RocksDBException {
this.handles = new AtomicReferenceArray<>(segmentCount);
this.db = open(data, dataDbOptions());
this.expiredDb = openDatabase(expired, expiredDbOptions());
}
byte[] byteArrayFromInt(int val) {
return new byte[] {
(byte) (val >>> 24),
(byte) (val >>> 16),
(byte) (val >>> 8),
(byte) (val)
};
}
@Override
ColumnFamilyHandle getHandle(int segment) {
return handles.get(segment);
}
@Override
ColumnFamilyHandle getHandle(Object key) {
return handles.get(keyPartitioner.getSegment(key));
}
@Override
ColumnFamilyHandle getHandleForMarshalledKey(byte[] marshalledKey) {
return getHandle(unmarshall(marshalledKey));
}
@Override
RocksDB open(Path location, DBOptions options) throws RocksDBException {
File dir = location.toFile();
dir.mkdirs();
int segmentCount = handles.length();
List<ColumnFamilyDescriptor> descriptors = new ArrayList<>(segmentCount + 2);
List<ColumnFamilyHandle> outHandles = new ArrayList<>(segmentCount + 2);
// You have to open the default column family
descriptors.add(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyOptions()));
// Create the meta column family
descriptors.add(new ColumnFamilyDescriptor(META_COLUMN_FAMILY, new ColumnFamilyOptions()));
for (int i = 0; i < segmentCount; ++i) {
descriptors.add(newDescriptor(byteArrayFromInt(i)));
}
RocksDB rocksDB = RocksDB.open(options, location.toString(), descriptors, outHandles);
metaColumnFamilyHandle = outHandles.get(1);
for (int i = 0; i < segmentCount; ++i) {
handles.set(i, outHandles.get(i + 2));
}
return rocksDB;
}
@Override
CompletionStage<Void> clear() {
return blockingManager.runBlocking(() -> {
for (int i = 0; i < handles.length(); ++i) {
clearForSegment(i);
}
}, "rocksdb-clear");
}
/**
* Clear out the entries for a segment
* @param segment the segment to clear out
*/
private void clearForSegment(int segment) {
ColumnFamilyHandle handle = handles.get(segment);
RocksDBStore.this.clearColumnFamily(handle);
}
@Override
void close() {
for (int i = 0; i < handles.length(); ++i) {
ColumnFamilyHandle handle = handles.getAndSet(i, null);
if (handle != null) {
handle.close();
}
}
db.close();
expiredDb.close();
}
@Override
Publisher<MarshallableEntry<K, V>> publishEntries(IntSet segments, Predicate<? super K> filter, boolean fetchValue) {
Function<RocksIterator, Flowable<MarshallableEntry<K, V>>> function = it -> Flowable.fromIterable(() -> {
long now = timeService.wallClockTime();
return new RocksEntryIterator(it, filter, now);
});
return handleIteratorFunction(function, segments);
}
<R> Publisher<R> handleIteratorFunction(Function<RocksIterator, Flowable<R>> function, IntSet segments) {
// Short circuit if only a single segment - assumed to be invoked from persistence thread
if (segments != null && segments.size() == 1) {
return publish(segments.iterator().nextInt(), function);
}
IntSet segmentsToUse = segments == null ? IntSets.immutableRangeSet(handles.length()) : segments;
return Flowable.fromStream(segmentsToUse.intStream().mapToObj(i -> publish(i, function)))
.concatMap(RxJavaInterop.identityFunction());
}
@Override
RocksIterator wrapIterator(RocksDB db, ReadOptions readOptions, int segment) {
ColumnFamilyHandle handle = handles.get(segment);
if (handle != null) {
return db.newIterator(handle, readOptions);
}
return null;
}
@Override
CompletionStage<Void> addSegments(IntSet segments) {
Flowable<Integer> segmentFlowable = Flowable.fromIterable(segments)
.filter(segment -> handles.get(segment) == null);
return blockingManager.subscribeBlockingConsumer(segmentFlowable, segment -> {
if (log.isTraceEnabled()) {
log.tracef("Creating column family for segment %d", segment);
}
byte[] cfName = byteArrayFromInt(segment);
try {
ColumnFamilyHandle handle = db.createColumnFamily(newDescriptor(cfName));
handles.set(segment, handle);
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
}, "testng-addSegments");
}
@Override
CompletionStage<Void> removeSegments(IntSet segments) {
Flowable<ColumnFamilyHandle> handleFlowable = Flowable.fromIterable(segments)
.map(segment -> {
ColumnFamilyHandle cf = handles.getAndSet(segment, null);
return cf != null ? cf : this;
}).ofType(ColumnFamilyHandle.class);
return blockingManager.subscribeBlockingConsumer(handleFlowable, handle -> {
if (log.isTraceEnabled()) {
log.tracef("Dropping column family %s", handle);
}
try {
db.dropColumnFamily(handle);
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
handle.close();
}, "testng-removeSegments");
}
@Override
CompletionStage<Long> approximateSize(IntSet segments) {
return blockingManager.supplyBlocking(() -> {
long size = 0;
for (int segment : segments) {
ColumnFamilyHandle handle = getHandle(segment);
try {
size += Long.parseLong(db.getProperty(handle, "rocksdb.estimate-num-keys"));
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
}
return size;
}, "rocksdb-approximateSize");
}
}
private void putExpireDbData(RocksDB expiredDb, ExpiryEntry entry) throws RocksDBException {
final byte[] expiryBytes = marshall(entry.expiry);
final byte[] existingBytes = expiredDb.get(expiryBytes);
if (existingBytes != null) {
// in the case of collision make the value a List ...
final Object existing = unmarshall(existingBytes);
if (existing instanceof ExpiryBucket) {
((ExpiryBucket) existing).entries.add(entry.keyBytes);
expiredDb.put(expiryBytes, marshall(existing));
} else {
ExpiryBucket bucket = new ExpiryBucket(existingBytes, entry.keyBytes);
expiredDb.put(expiryBytes, marshall(bucket));
}
} else {
expiredDb.put(expiryBytes, entry.keyBytes);
}
}
/*
* Instead of iterate in RocksIterator we use the first and last byte array
*/
private void clearColumnFamily(ColumnFamilyHandle handle) {
try {
// when the data under a segment was removed, the handle will be null
if (handle != null) {
db.deleteRange(handle, BEGIN_KEY, END_KEY);
// We don't control the keys, the marshaller does.
// In theory it is possible that a custom marshaller would generate a key of 10k 0xff bytes
// That key would be after END_KEY, so the deleteRange call wouldn't remove it
// If there are remaining keys, remove.
try (ReadOptions iteratorOptions = new ReadOptions().setFillCache(false)) {
try (RocksIterator it = db.newIterator(handle, iteratorOptions)) {
for (it.seekToFirst(); it.isValid(); it.next()) {
db.delete(handle, it.key());
}
}
}
}
} catch (RocksDBException e) {
throw new PersistenceException(e);
}
}
private static byte[] createAndFillArray(int length, byte value) {
byte[] array = new byte[length];
Arrays.fill(array, value);
return array;
}
}
| 45,609
| 38.35289
| 170
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/RocksDBStoreConfigurationBuilder.java
|
package org.infinispan.persistence.rocksdb.configuration;
import static org.infinispan.persistence.rocksdb.configuration.RocksDBExpirationConfiguration.EXPIRED_LOCATION;
import static org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfiguration.COMPRESSION_TYPE;
import static org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfiguration.LOCATION;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfigurationBuilder;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.persistence.PersistenceUtil;
/**
* @author <a href="mailto:rtsang@redhat.com">Ray Tsang</a>
*/
public class RocksDBStoreConfigurationBuilder extends AbstractStoreConfigurationBuilder<RocksDBStoreConfiguration, RocksDBStoreConfigurationBuilder> {
protected RocksDBExpirationConfigurationBuilder expiration = new RocksDBExpirationConfigurationBuilder();
public RocksDBStoreConfigurationBuilder(PersistenceConfigurationBuilder builder) {
this(builder, RocksDBStoreConfiguration.attributeDefinitionSet());
}
public RocksDBStoreConfigurationBuilder(PersistenceConfigurationBuilder builder, AttributeSet attributeSet) {
super(builder, attributeSet);
}
public RocksDBStoreConfigurationBuilder location(String location) {
attributes.attribute(LOCATION).set(location);
return self();
}
public RocksDBStoreConfigurationBuilder expiredLocation(String expiredLocation) {
expiration.expiredLocation(expiredLocation);
return self();
}
@Deprecated
public RocksDBStoreConfigurationBuilder blockSize(int blockSize) {
return self();
}
@Deprecated
public RocksDBStoreConfigurationBuilder cacheSize(long cacheSize) {
return self();
}
/**
* @deprecated Since 10.1, there is no more queue in {@link org.infinispan.persistence.rocksdb.RocksDBStore}
*/
@Deprecated
public RocksDBStoreConfigurationBuilder expiryQueueSize(int expiryQueueSize) {
expiration.expiryQueueSize(expiryQueueSize);
return self();
}
/**
* @deprecated Since 12.0, no longer used. Will be removed in 15.0
*/
@Deprecated
public RocksDBStoreConfigurationBuilder clearThreshold(int clearThreshold) {
return self();
}
public RocksDBStoreConfigurationBuilder compressionType(CompressionType compressionType) {
attributes.attribute(COMPRESSION_TYPE).set(compressionType);
return self();
}
@Override
public void validate() {
// how do you validate required attributes?
super.validate();
expiration.validate();
}
@Override
public void validate(GlobalConfiguration globalConfig) {
PersistenceUtil.validateGlobalStateStoreLocation(globalConfig, RocksDBStoreConfiguration.class.getSimpleName(),
attributes.attribute(LOCATION),
expiration.attributes().attribute(EXPIRED_LOCATION));
super.validate(globalConfig);
}
@Override
public RocksDBStoreConfiguration create() {
return new RocksDBStoreConfiguration(attributes.protect(), async.create(), expiration.create());
}
@Override
public Builder<?> read(RocksDBStoreConfiguration template, Combine combine) {
super.read(template, combine);
expiration.read(template.expiration(), combine);
return self();
}
@Override
public RocksDBStoreConfigurationBuilder self() {
return this;
}
}
| 3,672
| 33.650943
| 150
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/package-info.java
|
/**
* Configuration for {@link org.infinispan.persistence.rocksdb.RocksDBStore}.
*
* @api.public
*/
package org.infinispan.persistence.rocksdb.configuration;
| 162
| 22.285714
| 77
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/RocksDBExpirationConfigurationBuilder.java
|
package org.infinispan.persistence.rocksdb.configuration;
import static org.infinispan.persistence.rocksdb.configuration.RocksDBExpirationConfiguration.EXPIRED_LOCATION;
import static org.infinispan.persistence.rocksdb.configuration.RocksDBExpirationConfiguration.EXPIRY_QUEUE_SIZE;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
/**
* since 10.0
*/
public class RocksDBExpirationConfigurationBuilder implements Builder<RocksDBExpirationConfiguration> {
private final AttributeSet attributes;
RocksDBExpirationConfigurationBuilder() {
attributes = RocksDBExpirationConfiguration.attributeDefinitionSet();
}
public AttributeSet attributes() {
return attributes;
}
public RocksDBExpirationConfigurationBuilder expiredLocation(String expiredLocation) {
attributes.attribute(EXPIRED_LOCATION).set(expiredLocation);
return this;
}
/**
* @deprecated Since 10.1, there is no more queue in {@link org.infinispan.persistence.rocksdb.RocksDBStore}
*/
@Deprecated
RocksDBExpirationConfigurationBuilder expiryQueueSize(int expiryQueueSize) {
attributes.attribute(EXPIRY_QUEUE_SIZE).set(expiryQueueSize);
return this;
}
@Override
public RocksDBExpirationConfiguration create() {
return new RocksDBExpirationConfiguration(attributes.protect());
}
@Override
public Builder<?> read(RocksDBExpirationConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 1,646
| 31.294118
| 112
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/Element.java
|
package org.infinispan.persistence.rocksdb.configuration;
import java.util.HashMap;
import java.util.Map;
/**
* An enumeration of all the recognized XML element local names for the RocksDB cache store
*
* @author <a href="mailto:rtsang@redhat.com">Ray Tsang</a>
*/
public enum Element {
// must be first
UNKNOWN(null),
COMPRESSION("compression"),
EXPIRATION("expiration"),
ROCKSDB_STORE("rocksdb-store"),
;
private final String name;
Element(final String name) {
this.name = name;
}
/**
* Get the local name of this element.
*
* @return the local name
*/
public String getLocalName() {
return name;
}
private static final Map<String, Element> MAP;
static {
final Map<String, Element> map = new HashMap<>(values().length);
for (Element element : values()) {
final String name = element.getLocalName();
if (name != null) {
map.put(name, element);
}
}
MAP = map;
}
public static Element forName(final String localName) {
final Element element = MAP.get(localName);
return element == null ? UNKNOWN : element;
}
@Override
public String toString() {
return name;
}
}
| 1,245
| 20.482759
| 91
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/RocksDBStoreConfiguration.java
|
package org.infinispan.persistence.rocksdb.configuration;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.ConfigurationFor;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.serializing.SerializedWith;
import org.infinispan.persistence.rocksdb.RocksDBStore;
/**
*
* @author <a href="mailto:rtsang@redhat.com">Ray Tsang</a>
*
*/
@ConfigurationFor(RocksDBStore.class)
@BuiltBy(RocksDBStoreConfigurationBuilder.class)
@SerializedWith(RocksDBStoreConfigurationSerializer.class)
public class RocksDBStoreConfiguration extends AbstractStoreConfiguration<RocksDBStoreConfiguration> {
final static AttributeDefinition<String> LOCATION = AttributeDefinition.builder(org.infinispan.persistence.rocksdb.configuration.Attribute.PATH, null, String.class).immutable().build();
public final static AttributeDefinition<CompressionType> COMPRESSION_TYPE = AttributeDefinition.builder(org.infinispan.persistence.rocksdb.configuration.Attribute.COMPRESSION_TYPE, CompressionType.NONE).immutable().autoPersist(false).build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(RocksDBStoreConfiguration.class, AbstractStoreConfiguration.attributeDefinitionSet(), LOCATION, COMPRESSION_TYPE);
}
private final RocksDBExpirationConfiguration expiration;
public RocksDBStoreConfiguration(AttributeSet attributes, AsyncStoreConfiguration async, RocksDBExpirationConfiguration expiration) {
super(Element.ROCKSDB_STORE, attributes, async);
this.expiration = expiration;
}
public RocksDBExpirationConfiguration expiration() {
return expiration;
}
public String location() {
return attributes.attribute(LOCATION).get();
}
public String expiredLocation() {
return expiration.expiredLocation();
}
public CompressionType compressionType() {
return attributes.attribute(COMPRESSION_TYPE).get();
}
/**
* @deprecated There is no more queue in {@link org.infinispan.persistence.rocksdb.RocksDBStore}
*/
@Deprecated
public int expiryQueueSize() {
return expiration.expiryQueueSize();
}
}
| 2,424
| 39.416667
| 245
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/RocksDBExpirationConfiguration.java
|
package org.infinispan.persistence.rocksdb.configuration;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.attributes.ConfigurationElement;
/**
* @since 10.0
*/
public class RocksDBExpirationConfiguration extends ConfigurationElement<RocksDBExpirationConfiguration> {
final static AttributeDefinition<String> EXPIRED_LOCATION = AttributeDefinition.builder(org.infinispan.persistence.rocksdb.configuration.Attribute.PATH, null, String.class).immutable().autoPersist(false).build();
final static AttributeDefinition<Integer> EXPIRY_QUEUE_SIZE = AttributeDefinition.builder(org.infinispan.persistence.rocksdb.configuration.Attribute.EXPIRY_QUEUE_SIZE, 10000).immutable().autoPersist(false).build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(RocksDBExpirationConfiguration.class, EXPIRED_LOCATION, EXPIRY_QUEUE_SIZE);
}
RocksDBExpirationConfiguration(AttributeSet attributes) {
super(Element.EXPIRATION, attributes);
}
public String expiredLocation() {
return attributes.attribute(EXPIRED_LOCATION).get();
}
/**
* @deprecated Since 10.1, there is no more queue in {@link org.infinispan.persistence.rocksdb.RocksDBStore}
*/
@Deprecated
int expiryQueueSize() {
return attributes.attribute(EXPIRY_QUEUE_SIZE).get();
}
}
| 1,467
| 39.777778
| 217
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/RocksDBStoreConfigurationParser.java
|
package org.infinispan.persistence.rocksdb.configuration;
import static org.infinispan.configuration.parsing.ParseUtils.ignoreAttribute;
import static org.infinispan.persistence.rocksdb.configuration.RocksDBStoreConfigurationParser.NAMESPACE;
import org.infinispan.commons.configuration.io.ConfigurationReader;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ConfigurationParser;
import org.infinispan.configuration.parsing.Namespace;
import org.infinispan.configuration.parsing.ParseUtils;
import org.infinispan.configuration.parsing.Parser;
import org.kohsuke.MetaInfServices;
/**
* RocksDB XML Parser
* @author Tristan Tarrant
* @since 9.0
*/
@MetaInfServices
@Namespace(root = "rocksdb-store")
@Namespace(uri = NAMESPACE + "*", root = "rocksdb-store", since = "9.0")
public class RocksDBStoreConfigurationParser implements ConfigurationParser {
static final String NAMESPACE = Parser.NAMESPACE + "store:rocksdb:";
public RocksDBStoreConfigurationParser() {
}
@Override
public void readElement(ConfigurationReader reader, ConfigurationBuilderHolder holder) {
ConfigurationBuilder builder = holder.getCurrentConfigurationBuilder();
Element element = Element.forName(reader.getLocalName());
switch (element) {
case ROCKSDB_STORE: {
parseRocksDBCacheStore(reader, builder.persistence().addStore(RocksDBStoreConfigurationBuilder.class));
break;
}
default: {
throw ParseUtils.unexpectedElement(reader);
}
}
}
private void parseRocksDBCacheStore(ConfigurationReader reader, RocksDBStoreConfigurationBuilder builder) {
String path = null;
String relativeTo = null;
for (int i = 0; i < reader.getAttributeCount(); i++) {
ParseUtils.requireNoNamespaceAttribute(reader, i);
String value = reader.getAttributeValue(i);
String attrName = reader.getAttributeName(i);
Attribute attribute = Attribute.forName(attrName);
switch (attribute) {
case PATH: {
path = value;
break;
}
case RELATIVE_TO: {
relativeTo = ParseUtils.requireAttributeProperty(reader, i);
break;
}
case CLEAR_THRESHOLD: {
if (!reader.getSchema().since(12, 0)) {
ignoreAttribute(reader, i);
break;
} else {
throw ParseUtils.attributeRemoved(reader, i);
}
}
case BLOCK_SIZE: {
builder.blockSize(Integer.parseInt(value));
break;
}
case CACHE_SIZE: {
builder.cacheSize(Long.parseLong(value));
break;
}
default: {
Parser.parseStoreAttribute(reader, i, builder);
}
}
}
path = ParseUtils.resolvePath(path, relativeTo);
if (path != null) {
builder.location(path);
}
while (reader.inTag()) {
Element element = Element.forName(reader.getLocalName());
switch (element) {
case EXPIRATION: {
this.parseExpiry(reader, builder);
break;
}
case COMPRESSION: {
this.parseCompression(reader, builder);
break;
}
default: {
Parser.parseStoreElement(reader, builder);
}
}
}
}
private void parseExpiry(ConfigurationReader reader, RocksDBStoreConfigurationBuilder builder) {
for (int i = 0; i < reader.getAttributeCount(); i++) {
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
switch (attribute) {
case PATH: {
builder.expiredLocation(value);
break;
}
case QUEUE_SIZE: {
if (!reader.getSchema().since(12, 0)) {
ignoreAttribute(reader, i);
break;
} else {
throw ParseUtils.attributeRemoved(reader, i);
}
}
default:
throw ParseUtils.unexpectedAttribute(reader, i);
}
}
ParseUtils.requireNoContent(reader);
}
private void parseCompression(ConfigurationReader reader, RocksDBStoreConfigurationBuilder builder) {
for (int i = 0; i < reader.getAttributeCount(); i++) {
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
switch (attribute) {
case TYPE: {
builder.compressionType(CompressionType.valueOf(value));
break;
}
default:
throw ParseUtils.unexpectedAttribute(reader, i);
}
}
ParseUtils.requireNoContent(reader);
}
@Override
public Namespace[] getNamespaces() {
return ParseUtils.getNamespaceAnnotations(getClass());
}
}
| 5,239
| 33.473684
| 115
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/Attribute.java
|
package org.infinispan.persistence.rocksdb.configuration;
import java.util.HashMap;
import java.util.Map;
/**
* Enumerates the attributes used by the LevelDB cache stores configuration
*
* @author <a href="mailto:rtsang@redhat.com">Ray Tsang</a>
*/
public enum Attribute {
// must be first
UNKNOWN(null),
BLOCK_SIZE("block-size"),
CACHE_SIZE("cache-size"),
CLEAR_THRESHOLD("clear-threshold"),
COMPRESSION_TYPE("compressionType"),
EXPIRED_LOCATION("expiredLocation"),
EXPIRY_QUEUE_SIZE("expiryQueueSize"),
IMPLEMENTATION_TYPE("implementationType"),
LOCATION("location"),
PATH("path"),
RELATIVE_TO("relative-to"),
QUEUE_SIZE("queue-size"),
TYPE("type"),
;
private final String name;
Attribute(final String name) {
this.name = name;
}
/**
* Get the local name of this element.
*
* @return the local name
*/
public String getLocalName() {
return name;
}
private static final Map<String, Attribute> attributes;
static {
Map<String, Attribute> map = new HashMap<>();
for (Attribute attribute : values()) {
final String name = attribute.getLocalName();
if (name != null) {
map.put(name, attribute);
}
}
attributes = map;
}
public static Attribute forName(final String localName) {
final Attribute attribute = attributes.get(localName);
return attribute == null ? UNKNOWN : attribute;
}
@Override
public String toString() {
return name;
}
}
| 1,543
| 22.044776
| 75
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/CompressionType.java
|
package org.infinispan.persistence.rocksdb.configuration;
public enum CompressionType {
NONE(org.rocksdb.CompressionType.NO_COMPRESSION),
SNAPPY(org.rocksdb.CompressionType.SNAPPY_COMPRESSION),
ZLIB(org.rocksdb.CompressionType.ZLIB_COMPRESSION),
BZLIB2(org.rocksdb.CompressionType.BZLIB2_COMPRESSION),
LZ4(org.rocksdb.CompressionType.LZ4_COMPRESSION),
LZ4HC(org.rocksdb.CompressionType.LZ4HC_COMPRESSION),
XPRESS(org.rocksdb.CompressionType.XPRESS_COMPRESSION),
ZSTD(org.rocksdb.CompressionType.ZSTD_COMPRESSION);
private final org.rocksdb.CompressionType value;
CompressionType(org.rocksdb.CompressionType value) {
this.value = value;
}
public org.rocksdb.CompressionType getValue() {
return value;
}
}
| 759
| 32.043478
| 58
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/configuration/RocksDBStoreConfigurationSerializer.java
|
package org.infinispan.persistence.rocksdb.configuration;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.io.ConfigurationWriter;
import org.infinispan.commons.util.Version;
import org.infinispan.configuration.serializing.AbstractStoreSerializer;
import org.infinispan.configuration.serializing.ConfigurationSerializer;
/**
* RocksDBStoreConfigurationSerializer.
*
* @author Tristan Tarrant
* @since 9.0
*/
public class RocksDBStoreConfigurationSerializer extends AbstractStoreSerializer implements ConfigurationSerializer<RocksDBStoreConfiguration> {
@Override
public void serialize(ConfigurationWriter writer, RocksDBStoreConfiguration configuration) {
AttributeSet attributes = configuration.attributes();
writer.writeStartElement(Element.ROCKSDB_STORE);
writer.writeDefaultNamespace(RocksDBStoreConfigurationParser.NAMESPACE + Version.getMajorMinor());
configuration.attributes().write(writer);
writeCommonStoreSubAttributes(writer, configuration);
if (attributes.attribute(RocksDBStoreConfiguration.COMPRESSION_TYPE).isModified()) {
writer.writeStartElement(Element.COMPRESSION);
attributes.write(writer, RocksDBStoreConfiguration.COMPRESSION_TYPE, Attribute.TYPE);
writer.writeEndElement();
}
RocksDBExpirationConfiguration expiration = configuration.expiration();
AttributeSet expirationAttrs = expiration.attributes();
if (expirationAttrs.attribute(RocksDBExpirationConfiguration.EXPIRED_LOCATION).isModified() || expirationAttrs.attribute(RocksDBExpirationConfiguration.EXPIRY_QUEUE_SIZE).isModified()) {
writer.writeStartElement(Element.EXPIRATION);
expirationAttrs.write(writer, RocksDBExpirationConfiguration.EXPIRED_LOCATION, Attribute.PATH);
expirationAttrs.write(writer, RocksDBExpirationConfiguration.EXPIRY_QUEUE_SIZE, Attribute.QUEUE_SIZE);
writer.writeEndElement();
}
writeCommonStoreElements(writer, configuration);
writer.writeEndElement();
}
}
| 2,090
| 48.785714
| 192
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/logging/Log.java
|
package org.infinispan.persistence.rocksdb.logging;
import org.infinispan.commons.CacheConfigurationException;
import org.jboss.logging.BasicLogger;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
/**
* Log abstraction for the RocksDB cache store. For this module, message ids ranging from 23001 to
* 24000 inclusively have been reserved.
*/
@MessageLogger(projectCode = "ISPN")
public interface Log extends BasicLogger {
// @LogMessage(level = ERROR)
// @Message(value = "Error executing parallel store task", id = 252)
// void errorExecutingParallelStoreTask(@Cause Throwable cause);
// @LogMessage(level = INFO)
// @Message(value = "Ignoring XML attribute %s, please remove from configuration file", id = 293)
// void ignoreXmlAttribute(Object attribute);
@Message(value = "RocksDB properties %s, contains an unknown property", id = 294)
CacheConfigurationException rocksDBUnknownPropertiesSupplied(String properties);
}
| 999
| 39
| 99
|
java
|
null |
infinispan-main/persistence/rocksdb/src/main/java/org/infinispan/persistence/rocksdb/internal/RocksDBBlockHoundIntegration.java
|
package org.infinispan.persistence.rocksdb.internal;
import org.kohsuke.MetaInfServices;
import org.rocksdb.RocksDB;
import reactor.blockhound.BlockHound;
import reactor.blockhound.integration.BlockHoundIntegration;
@MetaInfServices
public class RocksDBBlockHoundIntegration implements BlockHoundIntegration {
@Override
public void applyTo(BlockHound.Builder builder) {
try {
Class.forName("org.rocksdb.RocksDB");
builder.markAsBlocking(RocksDB.class, "get", "(Lorg/rocksdb/ColumnFamilyHandle;[B)[B");
builder.markAsBlocking(RocksDB.class, "put", "(Lorg/rocksdb/ColumnFamilyHandle;[B[B)V");
builder.markAsBlocking(RocksDB.class, "delete", "(Lorg/rocksdb/ColumnFamilyHandle;[B)V");
builder.markAsBlocking(RocksDB.class, "write", "(Lorg/rocksdb/WriteOptions;Lorg/rocksdb/WriteBatch;)V");
builder.markAsBlocking(RocksDB.class, "close", "()V");
builder.markAsBlocking(RocksDB.class, "open", "(Lorg/rocksdb/DBOptions;Ljava/lang/String;Ljava/util/List;Ljava/util/List;)Lorg/rocksdb/RocksDB;");
} catch (ClassNotFoundException e) {
// Skipping rocks db checks as not in classpath
}
}
}
| 1,181
| 44.461538
| 155
|
java
|
null |
infinispan-main/query-dsl/src/test/java/org/infinispan/query/dsl/impl/CreationTest.java
|
package org.infinispan.query.dsl.impl;
import org.infinispan.query.dsl.FilterConditionContext;
import org.infinispan.query.dsl.Query;
import org.infinispan.query.dsl.QueryFactory;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* @author anistor@redhat.com
* @since 7.0
*/
public class CreationTest {
@Rule
public ExpectedException expectedException = ExpectedException.none();
@Test
public void testWithDifferentFactory1() {
QueryFactory qf1 = new DummyQueryFactory();
QueryFactory qf2 = new DummyQueryFactory();
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("ISPN014809: The given condition was created by another factory");
qf1.from("MyDummyType")
.not(qf2.having("attr1").eq("1")); // exception expected
}
@Test
public void testWithDifferentFactory2() {
QueryFactory qf1 = new DummyQueryFactory();
QueryFactory qf2 = new DummyQueryFactory();
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("ISPN014809: The given condition was created by another factory");
qf1.from("MyDummyType")
.having("attr1").eq("1")
.and(qf2.having("attr2").eq("2")); // exception expected
}
@Test
public void testWithDifferentFactory3() {
QueryFactory qf1 = new DummyQueryFactory();
QueryFactory qf2 = new DummyQueryFactory();
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("ISPN014809: The given condition was created by another factory");
qf1.from("MyDummyType")
.having("attr1").eq("1")
.or(qf2.having("attr2").eq("2")); // exception expected
}
@Test
public void testWithDifferentBuilder1() {
QueryFactory qf1 = new DummyQueryFactory();
FilterConditionContext fcc = qf1.having("attr1").eq("1");
Query q1 = qf1.from("MyDummyType")
.not(fcc)
.build();
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("The given condition is already in use by another builder");
qf1.from("MyDummyType")
.not(fcc); // exception expected
}
@Test
public void testWithDifferentBuilder2() {
QueryFactory qf1 = new DummyQueryFactory();
FilterConditionContext fcc = qf1.having("attr1").eq("1");
Query q1 = qf1.from("MyDummyType")
.not(fcc)
.build();
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("The given condition is already in use by another builder");
qf1.from("MyDummyType")
.having("attr1").eq("1")
.and(fcc); // exception expected
}
@Test
public void testWithDifferentBuilder3() {
QueryFactory qf1 = new DummyQueryFactory();
FilterConditionContext fcc = qf1.having("attr1").eq("1");
Query q1 = qf1.from("MyDummyType")
.not(fcc)
.build();
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("The given condition is already in use by another builder");
qf1.from("MyDummyType")
.having("attr1").eq("1")
.or(fcc); // exception expected
}
}
| 3,384
| 29.772727
| 104
|
java
|
null |
infinispan-main/query-dsl/src/test/java/org/infinispan/query/dsl/impl/DummyQuery.java
|
package org.infinispan.query.dsl.impl;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.query.dsl.Query;
import org.infinispan.query.dsl.QueryResult;
import org.infinispan.query.dsl.TotalHitCount;
/**
* @author anistor@redhat.com
* @since 7.0
*/
class DummyQuery<T> implements Query<T> {
@Override
public Map<String, Object> getParameters() {
return null;
}
@Override
public Query<T> setParameter(String paramName, Object paramValue) {
return this;
}
@Override
public Query<T> setParameters(Map<String, Object> paramValues) {
return null;
}
@Override
public CloseableIterator<T> iterator() {
return new CloseableIterator<>() {
@Override
public void close() {
}
@Override
public boolean hasNext() {
return false;
}
@Override
public T next() {
throw new NoSuchElementException();
}
};
}
@Override
public <K> CloseableIterator<Map.Entry<K, T>> entryIterator() {
return new CloseableIterator<Map.Entry<K, T>>() {
@Override
public void close() {
}
@Override
public boolean hasNext() {
return false;
}
@Override
public Map.Entry<K, T> next() {
throw new NoSuchElementException();
}
};
}
@Override
public Query<T> timeout(long timeout, TimeUnit timeUnit) {
return this;
}
@Override
public Query<T> local(boolean local) {
return this;
}
@Override
public String getQueryString() {
return null;
}
@Override
public List<T> list() {
return Collections.emptyList();
}
@Override
public QueryResult<T> execute() {
return new QueryResult<T>() {
@Override
public TotalHitCount count() {
return TotalHitCount.EMPTY;
}
@Override
public List<T> list() {
return Collections.emptyList();
}
};
}
@Override
public int executeStatement() {
return 0;
}
@Override
public int getResultSize() {
return 0;
}
@Override
public String[] getProjection() {
return null;
}
@Override
public boolean hasProjections() {
return false;
}
@Override
public long getStartOffset() {
return 0;
}
@Override
public Query<T> startOffset(long startOffset) {
return this;
}
@Override
public int getMaxResults() {
return Integer.MAX_VALUE;
}
@Override
public Query<T> maxResults(int maxResults) {
return this;
}
@Override
public Integer hitCountAccuracy() {
return null;
}
@Override
public Query<T> hitCountAccuracy(int hitCountAccuracy) {
return this;
}
}
| 3,025
| 17.9125
| 70
|
java
|
null |
infinispan-main/query-dsl/src/test/java/org/infinispan/query/dsl/impl/DummyQueryFactory.java
|
package org.infinispan.query.dsl.impl;
import org.infinispan.query.dsl.Query;
/**
* @author anistor@redhat.com
* @since 7.0
*/
class DummyQueryFactory extends BaseQueryFactory {
@Override
public <T> Query<T> create(String queryString) {
return new DummyQuery<>();
}
@Override
public DummyQueryBuilder from(Class<?> entityType) {
return new DummyQueryBuilder(this, entityType.getName());
}
@Override
public DummyQueryBuilder from(String entityType) {
return new DummyQueryBuilder(this, entityType);
}
}
| 555
| 20.384615
| 63
|
java
|
null |
infinispan-main/query-dsl/src/test/java/org/infinispan/query/dsl/impl/DummyQueryBuilder.java
|
package org.infinispan.query.dsl.impl;
import org.infinispan.query.dsl.Query;
/**
* @author anistor@redhat.com
* @since 7.0
*/
class DummyQueryBuilder extends BaseQueryBuilder {
DummyQueryBuilder(DummyQueryFactory queryFactory, String rootTypeName) {
super(queryFactory, rootTypeName);
}
@Override
public <T> Query<T> build() {
return new DummyQuery<>();
}
}
| 393
| 18.7
| 75
|
java
|
null |
infinispan-main/query-dsl/src/main/java/org/infinispan/query/SearchTimeoutException.java
|
package org.infinispan.query;
/**
* Thrown when a query timeout occurs.
*
* @since 11.0
*/
public class SearchTimeoutException extends RuntimeException {
public SearchTimeoutException() {
}
public SearchTimeoutException(String msg) {
super(msg);
}
}
| 275
| 15.235294
| 62
|
java
|
null |
infinispan-main/query-dsl/src/main/java/org/infinispan/query/api/continuous/package-info.java
|
/**
* Continuous querying API.
*
* @author anistor@redhat.com
* @since 8.2
*
* @api.public
*/
package org.infinispan.query.api.continuous;
| 146
| 13.7
| 44
|
java
|
null |
infinispan-main/query-dsl/src/main/java/org/infinispan/query/api/continuous/ContinuousQueryListener.java
|
package org.infinispan.query.api.continuous;
/**
* Listener for continuous query events.
*
* @author anistor@redhat.com
* @since 8.2
*/
public interface ContinuousQueryListener<K, V> {
/**
* Receives notification that a cache entry has joined the matching set. This is invoked initially when receiving the
* existing entries that match the query and subsequently whenever a previously non-matching entry is updated and
* starts to match.
*
* @param key the key of the joining entry
* @param value the joining entry or the Object[] projection if a projection was specified
*/
default void resultJoining(K key, V value) {
}
/**
* Receives notification that a cache entry from the matching set was updated and continues to match the query. The
* modified attributes causing this update are not necessarily part of the query.
*
* @param key the key of the joining entry
* @param value the joining entry or the Object[] projection if specified
*/
default void resultUpdated(K key, V value) {
}
/**
* Receives notification that a cache entry has left the matching set. This can happen due to an update or removal.
*
* @param key the key of the leaving entry
*/
default void resultLeaving(K key) {
}
}
| 1,303
| 31.6
| 120
|
java
|
null |
infinispan-main/query-dsl/src/main/java/org/infinispan/query/api/continuous/ContinuousQuery.java
|
package org.infinispan.query.api.continuous;
import java.util.List;
import java.util.Map;
import org.infinispan.query.dsl.Query;
/**
* A container of continuous query listeners for a cache.
* <p>
* Implementations are not expected to be threadsafe.
*
* @author anistor@redhat.com
* @since 8.2
*/
public interface ContinuousQuery<K, V> {
/**
* Add a listener for a continuous query.
*
* @param queryString the query
* @param listener the listener
*/
<C> void addContinuousQueryListener(String queryString, ContinuousQueryListener<K, C> listener);
/**
* Add a listener for a continuous query.
*
* @param queryString the query
* @param namedParameters the query parameters
* @param listener the listener
*/
<C> void addContinuousQueryListener(String queryString, Map<String, Object> namedParameters, ContinuousQueryListener<K, C> listener);
/**
* Add a listener for a continuous query.
*
* @param query the query object
* @param listener the listener
*/
<C> void addContinuousQueryListener(Query<?> query, ContinuousQueryListener<K, C> listener);
/**
* Remove a continuous query listener.
*
* @param listener the listener to remove
*/
void removeContinuousQueryListener(ContinuousQueryListener<K, ?> listener);
/**
* Get the list of currently registered listeners.
*/
List<ContinuousQueryListener<K, ?>> getListeners();
/**
* Unregisters all listeners.
*/
void removeAllListeners();
}
| 1,547
| 24.8
| 136
|
java
|
null |
infinispan-main/query-dsl/src/main/java/org/infinispan/query/dsl/package-info.java
|
/**
* Query DSL API. This API offers a fluent builder for the Ickle query string.
* <p>
* <b>WARNING:</b> Most classes in this package are deprecated although the package itself is not entirely deprecated
* yet. See deprecation note on {@link org.infinispan.query.dsl.QueryBuilder}. Please do not use any of the deprecated
* methods or else you will experience difficulties in porting your code to the new query API that will be introduced by
* Infinispan 12.
*
* @author anistor@redhat.com
* @since 6.0
*
* @api.public
*/
package org.infinispan.query.dsl;
| 569
| 37
| 120
|
java
|
null |
infinispan-main/query-dsl/src/main/java/org/infinispan/query/dsl/Expression.java
|
package org.infinispan.query.dsl;
import org.infinispan.query.dsl.impl.ParameterExpression;
import org.infinispan.query.dsl.impl.PathExpression;
/**
* @author anistor@redhat.com
* @since 8.0
* @deprecated since 10.1. See deprecation note on {@link QueryBuilder}.
*/
@Deprecated
public interface Expression {
static Expression param(String paramName) {
return new ParameterExpression(paramName);
}
static Expression property(String attributePath) {
return new PathExpression(null, attributePath);
}
static Expression count(String attributePath) {
return new PathExpression(PathExpression.AggregationType.COUNT, attributePath);
}
static Expression sum(String attributePath) {
return new PathExpression(PathExpression.AggregationType.SUM, attributePath);
}
static Expression avg(String attributePath) {
return new PathExpression(PathExpression.AggregationType.AVG, attributePath);
}
static Expression min(String attributePath) {
return new PathExpression(PathExpression.AggregationType.MIN, attributePath);
}
static Expression max(String attributePath) {
return new PathExpression(PathExpression.AggregationType.MAX, attributePath);
}
}
| 1,231
| 28.333333
| 85
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.