repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/integrationtests/endpoints-interop-it/src/test/java/org/infinispan/it/endpoints/NonIndexJsonTest.java
|
package org.infinispan.it.endpoints;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
/**
* Test for json/hotrod interop without indexing.
*
* @since 9.2
*/
@Test(groups = "functional", testName = "it.endpoints.NonIndexJsonTest")
public class NonIndexJsonTest extends JsonIndexingProtobufStoreTest {
@Override
protected ConfigurationBuilder getIndexCacheConfiguration() {
ConfigurationBuilder configurationBuilder = new ConfigurationBuilder();
configurationBuilder.encoding().key().mediaType(MediaType.APPLICATION_PROTOSTREAM_TYPE);
configurationBuilder.encoding().value().mediaType(MediaType.APPLICATION_PROTOSTREAM_TYPE);
return configurationBuilder;
}
}
| 804
| 34
| 96
|
java
|
null |
infinispan-main/integrationtests/endpoints-interop-it/src/test/java/org/infinispan/it/endpoints/EndpointsCacheFactory.java
|
package org.infinispan.it.endpoints;
import static org.infinispan.client.hotrod.test.HotRodClientTestingUtil.killRemoteCacheManager;
import static org.infinispan.client.hotrod.test.HotRodClientTestingUtil.killServers;
import static org.infinispan.client.hotrod.test.HotRodClientTestingUtil.startHotRodServer;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_OBJECT;
import static org.infinispan.server.core.test.ServerTestingUtil.findFreePort;
import static org.infinispan.server.core.test.ServerTestingUtil.startProtocolServer;
import static org.infinispan.server.memcached.test.MemcachedTestingUtil.killMemcachedClient;
import static org.infinispan.server.memcached.test.MemcachedTestingUtil.killMemcachedServer;
import static org.infinispan.server.memcached.test.MemcachedTestingUtil.serverBuilder;
import static org.infinispan.test.TestingUtil.killCacheManagers;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Collections;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder;
import org.infinispan.client.rest.RestCacheClient;
import org.infinispan.client.rest.RestClient;
import org.infinispan.client.rest.configuration.RestClientConfigurationBuilder;
import org.infinispan.commons.dataconversion.IdentityEncoder;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.TranscoderMarshallerAdapter;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.configuration.internal.PrivateGlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.marshall.core.EncoderRegistry;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.rest.RestServer;
import org.infinispan.rest.configuration.RestServerConfigurationBuilder;
import org.infinispan.server.core.DummyServerManagement;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.server.memcached.MemcachedServer;
import org.infinispan.server.memcached.configuration.MemcachedProtocol;
import org.infinispan.server.memcached.configuration.MemcachedServerConfigurationBuilder;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import net.spy.memcached.ClientMode;
import net.spy.memcached.ConnectionFactoryBuilder;
import net.spy.memcached.MemcachedClient;
import net.spy.memcached.transcoders.Transcoder;
/**
* Takes care of construction and destruction of caches, servers and clients for each of the endpoints being tested.
*
* @author Galder Zamarreño
* @since 5.3
*/
public class EndpointsCacheFactory<K, V> {
private static final int DEFAULT_NUM_OWNERS = 2;
private EmbeddedCacheManager cacheManager;
private HotRodServer hotrod;
private RemoteCacheManager hotrodClient;
private RestServer rest;
private MemcachedServer memcached;
private Cache<K, V> embeddedCache;
private RemoteCache<K, V> hotrodCache;
private RestClient restClient;
private RestCacheClient restCacheClient;
private MemcachedClient memcachedClient;
private final Transcoder<Object> transcoder;
private final String cacheName;
private final Marshaller marshaller;
private final CacheMode cacheMode;
private final SerializationContextInitializer contextInitializer;
private final int numOwners;
private final boolean l1Enable;
private final boolean memcachedWithDecoder;
private EndpointsCacheFactory(String cacheName, Marshaller marshaller, CacheMode cacheMode, int numOwners, boolean l1Enable,
Transcoder<Object> transcoder, SerializationContextInitializer contextInitializer) {
this.cacheName = cacheName;
this.marshaller = marshaller;
this.cacheMode = cacheMode;
this.numOwners = numOwners;
this.l1Enable = l1Enable;
this.transcoder = transcoder;
this.memcachedWithDecoder = transcoder != null;
this.contextInitializer = contextInitializer;
}
private EndpointsCacheFactory<K, V> setup() throws Exception {
createEmbeddedCache();
createHotRodCache();
createRestMemcachedCaches();
return this;
}
void addRegexAllowList(String regex) {
cacheManager.getClassAllowList().addRegexps(regex);
}
private void createRestMemcachedCaches() throws Exception {
createRestCache();
createMemcachedCache();
}
private void createEmbeddedCache() {
GlobalConfigurationBuilder globalBuilder;
if (cacheMode.isClustered()) {
globalBuilder = new GlobalConfigurationBuilder();
globalBuilder.transport().defaultTransport();
} else {
globalBuilder = new GlobalConfigurationBuilder().nonClusteredDefault();
}
globalBuilder.addModule(PrivateGlobalConfigurationBuilder.class).serverMode(true);
globalBuilder.defaultCacheName(cacheName);
if (contextInitializer != null)
globalBuilder.serialization().addContextInitializer(contextInitializer);
org.infinispan.configuration.cache.ConfigurationBuilder builder =
new org.infinispan.configuration.cache.ConfigurationBuilder();
builder.clustering().cacheMode(cacheMode)
.encoding().key().mediaType(MediaType.APPLICATION_OBJECT_TYPE)
.encoding().value().mediaType(MediaType.APPLICATION_OBJECT_TYPE);
if (cacheMode.isDistributed() && numOwners != DEFAULT_NUM_OWNERS) {
builder.clustering().hash().numOwners(numOwners);
}
if (cacheMode.isDistributed() && l1Enable) {
builder.clustering().l1().enable();
}
cacheManager = cacheMode.isClustered()
? TestCacheManagerFactory.createClusteredCacheManager(globalBuilder, builder)
: TestCacheManagerFactory.createCacheManager(globalBuilder, builder);
embeddedCache = cacheManager.getCache(cacheName);
EncoderRegistry encoderRegistry = embeddedCache.getAdvancedCache().getComponentRegistry().getGlobalComponentRegistry().getComponent(EncoderRegistry.class);
if (marshaller != null) {
boolean isConversionSupported = encoderRegistry.isConversionSupported(marshaller.mediaType(), APPLICATION_OBJECT);
if (!isConversionSupported) {
encoderRegistry.registerTranscoder(new TranscoderMarshallerAdapter(marshaller));
}
}
}
private void createHotRodCache() {
createHotRodCache(startHotRodServer(cacheManager));
}
private void createHotRodCache(HotRodServer server) {
hotrod = server;
hotrodClient = new RemoteCacheManager(new ConfigurationBuilder()
.addServers("localhost:" + hotrod.getPort())
.addJavaSerialAllowList(".*Person.*", ".*CustomEvent.*")
.marshaller(marshaller)
.addContextInitializer(contextInitializer)
.build());
hotrodCache = cacheName.isEmpty()
? hotrodClient.getCache()
: hotrodClient.getCache(cacheName);
}
private void createRestCache() {
RestServer restServer = startProtocolServer(findFreePort(), p -> {
RestServerConfigurationBuilder builder = new RestServerConfigurationBuilder();
builder.port(p);
rest = new RestServer();
rest.setServerManagement(new DummyServerManagement(), true);
rest.start(builder.build(), cacheManager);
return rest;
});
RestClientConfigurationBuilder builder = new RestClientConfigurationBuilder();
builder.addServer().host(restServer.getHost()).port(restServer.getPort());
restClient = RestClient.forConfiguration(builder.build());
restCacheClient = restClient.cache(cacheName);
}
private void createMemcachedCache() throws IOException {
MediaType clientEncoding = marshaller == null ? MediaType.APPLICATION_OCTET_STREAM : marshaller.mediaType();
memcached = startProtocolServer(findFreePort(), p -> {
MemcachedServerConfigurationBuilder builder = serverBuilder().port(p);
builder.clientEncoding(clientEncoding).protocol(MemcachedProtocol.TEXT);
if (memcachedWithDecoder) {
builder.defaultCacheName(cacheName);
}
MemcachedServer server = new MemcachedServer();
server.start(builder.build(), cacheManager);
return server;
});
memcachedClient = createMemcachedClient(60000, memcached.getPort());
}
private MemcachedClient createMemcachedClient(long timeout, int port) throws IOException {
ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder().setOpTimeout(timeout).setClientMode(ClientMode.Static);
if (transcoder != null) {
builder.setTranscoder(transcoder);
}
return new MemcachedClient(builder.build(), Collections.singletonList(new InetSocketAddress("127.0.0.1", port)));
}
public static void killCacheFactories(EndpointsCacheFactory... cacheFactories) {
if (cacheFactories != null) {
for (EndpointsCacheFactory cacheFactory : cacheFactories) {
if (cacheFactory != null)
cacheFactory.teardown();
}
}
}
void teardown() {
Util.close(restClient);
restClient = null;
killRemoteCacheManager(hotrodClient);
hotrodClient = null;
killServers(hotrod);
hotrod = null;
killRestServer(rest);
rest = null;
killMemcachedClient(memcachedClient);
memcachedClient = null;
killMemcachedServer(memcached);
memcached = null;
killCacheManagers(cacheManager);
cacheManager = null;
}
private void killRestServer(RestServer rest) {
if (rest != null) {
try {
rest.stop();
} catch (Exception e) {
// Ignore
}
}
}
public Marshaller getMarshaller() {
return marshaller;
}
public Cache<K, V> getEmbeddedCache() {
return (Cache<K, V>) embeddedCache.getAdvancedCache().withEncoding(IdentityEncoder.class);
}
public RemoteCache<K, V> getHotRodCache() {
return hotrodCache;
}
public MemcachedClient getMemcachedClient() {
return memcachedClient;
}
int getMemcachedPort() {
return memcached.getPort();
}
public RestCacheClient getRestCacheClient() {
return restCacheClient;
}
HotRodServer getHotrodServer() {
return hotrod;
}
public static class Builder<K, V> {
private CacheMode cacheMode;
private int numOwners = DEFAULT_NUM_OWNERS;
private boolean l1Enable = false;
private SerializationContextInitializer contextInitializer = null;
private String cacheName = "test";
private Marshaller marshaller = null;
private Transcoder<Object> transcoder = null;
public Builder<K, V> withCacheMode(CacheMode cacheMode) {
this.cacheMode = cacheMode;
return this;
}
public Builder<K, V> withNumOwners(int numOwners) {
this.numOwners = numOwners;
return this;
}
public Builder<K, V> withL1(boolean l1Enable) {
this.l1Enable = l1Enable;
return this;
}
public Builder<K, V> withContextInitializer(SerializationContextInitializer contextInitializer) {
this.contextInitializer = contextInitializer;
return this;
}
public Builder<K, V> withCacheName(String cacheName) {
this.cacheName = cacheName;
return this;
}
public Builder<K, V> withMarshaller(Marshaller marshaller) {
this.marshaller = marshaller;
return this;
}
public Builder<K, V> withMemcachedTranscoder(Transcoder<Object> transcoder) {
this.transcoder = transcoder;
return this;
}
public EndpointsCacheFactory<K, V> build() throws Exception {
EndpointsCacheFactory<K, V> endpointsCacheFactory =
new EndpointsCacheFactory<>(cacheName, marshaller, cacheMode, numOwners, l1Enable, transcoder, contextInitializer);
return endpointsCacheFactory.setup();
}
}
}
| 12,334
| 37.070988
| 161
|
java
|
null |
infinispan-main/integrationtests/jboss-marshalling-it/src/test/java/org/infinispan/it/marshalling/jboss/JBMARRemoteQueryDslConditionsTest.java
|
package org.infinispan.it.marshalling.jboss;
import static org.infinispan.client.hotrod.test.HotRodClientTestingUtil.killRemoteCacheManager;
import static org.infinispan.client.hotrod.test.HotRodClientTestingUtil.killServers;
import static org.infinispan.configuration.cache.IndexStorage.LOCAL_HEAP;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.Search;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
import org.infinispan.client.hotrod.impl.query.RemoteQueryFactory;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.jboss.marshalling.commons.GenericJBossMarshaller;
import org.infinispan.query.dsl.QueryFactory;
import org.infinispan.query.dsl.embedded.QueryDslConditionsTest;
import org.infinispan.search.mapper.mapping.SearchMapping;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
/**
* Test for query conditions (filtering). Exercises the whole query DSL on the sample domain model.
* Uses jboss-marshalling and Hibernate Search annotations for configuring indexing.
*
* @author anistor@redhat.com
* @since 9.1
*/
@Test(groups = "functional", testName = "client.hotrod.query.JBMARRemoteQueryDslConditionsTest")
public class JBMARRemoteQueryDslConditionsTest extends QueryDslConditionsTest {
protected HotRodServer hotRodServer;
protected RemoteCacheManager remoteCacheManager;
protected RemoteCache<Object, Object> remoteCache;
protected Cache<Object, Object> cache;
@Override
protected QueryFactory getQueryFactory() {
return Search.getQueryFactory(remoteCache);
}
/**
* Both populating the cache and querying are done via remote cache.
*/
@Override
protected RemoteCache<Object, Object> getCacheForQuery() {
return remoteCache;
}
protected ProtocolVersion getProtocolVersion() {
return ProtocolVersion.DEFAULT_PROTOCOL_VERSION;
}
protected Cache<Object, Object> getEmbeddedCache() {
return cache;
}
@Override
protected void createCacheManagers() {
GlobalConfigurationBuilder globalCfg = GlobalConfigurationBuilder.defaultClusteredBuilder();
globalCfg.serialization().marshaller(new GenericJBossMarshaller());
ConfigurationBuilder cfg = getConfigurationBuilder();
createClusteredCaches(1, globalCfg, cfg, true);
cache = manager(0).getCache();
hotRodServer = HotRodClientTestingUtil.startHotRodServer(manager(0));
org.infinispan.client.hotrod.configuration.ConfigurationBuilder clientBuilder = HotRodClientTestingUtil.newRemoteConfigurationBuilder();
clientBuilder.addServer().host("127.0.0.1").port(hotRodServer.getPort());
clientBuilder.version(getProtocolVersion());
clientBuilder.marshaller(new GenericJBossMarshaller());
remoteCacheManager = new RemoteCacheManager(clientBuilder.build());
remoteCache = remoteCacheManager.getCache();
cacheManagers.forEach(c -> c.getClassAllowList().addRegexps(".*"));
}
protected ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder builder = hotRodCacheConfiguration();
builder.encoding().key().mediaType(MediaType.APPLICATION_OBJECT_TYPE);
builder.encoding().value().mediaType(MediaType.APPLICATION_OBJECT_TYPE);
builder.indexing().enable()
.storage(LOCAL_HEAP)
.addIndexedEntity(getModelFactory().getUserImplClass())
.addIndexedEntity(getModelFactory().getAccountImplClass())
.addIndexedEntity(getModelFactory().getTransactionImplClass());
return builder;
}
@AfterClass(alwaysRun = true)
public void release() {
killRemoteCacheManager(remoteCacheManager);
remoteCacheManager = null;
killServers(hotRodServer);
hotRodServer = null;
}
@Override
public void testIndexPresence() {
SearchMapping searchMapping = TestingUtil.extractComponent(cache, SearchMapping.class);
verifyClassIsIndexed(searchMapping, getModelFactory().getUserImplClass());
verifyClassIsIndexed(searchMapping, getModelFactory().getAccountImplClass());
verifyClassIsIndexed(searchMapping, getModelFactory().getTransactionImplClass());
verifyClassIsNotIndexed(searchMapping, getModelFactory().getAddressImplClass());
}
private void verifyClassIsNotIndexed(SearchMapping searchMapping, Class<?> type) {
assertNull(searchMapping.indexedEntity(type));
}
private void verifyClassIsIndexed(SearchMapping searchMapping, Class<?> type) {
assertNotNull(searchMapping.indexedEntity(type));
}
@Override
public void testQueryFactoryType() {
assertEquals(RemoteQueryFactory.class, getQueryFactory().getClass());
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = ".*ISPN028503:.*")
@Override
public void testInvalidEmbeddedAttributeQuery() {
// the original exception gets wrapped in HotRodClientException
super.testInvalidEmbeddedAttributeQuery();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN014027: The property path 'addresses.postCode' cannot be projected because it is multi-valued")
@Override
public void testRejectProjectionOfRepeatedProperty() {
// the original exception gets wrapped in HotRodClientException
super.testRejectProjectionOfRepeatedProperty();
}
@Override
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN014026: The expression 'surname' must be part of an aggregate function or it should be included in the GROUP BY clause")
public void testGroupBy3() {
// the original exception gets wrapped in HotRodClientException
super.testGroupBy3();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN014021: Queries containing grouping and aggregation functions must use projections.")
@Override
public void testGroupBy5() {
// the original exception gets wrapped in HotRodClientException
super.testGroupBy5();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "java.lang.IllegalStateException: Aggregation SUM cannot be applied to property of type java.lang.String")
public void testGroupBy6() {
// the original exception gets wrapped in HotRodClientException
super.testGroupBy6();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN028515: Cannot have aggregate functions in the WHERE clause : SUM.")
public void testGroupBy7() {
// the original exception gets wrapped in HotRodClientException
super.testGroupBy7();
}
@Test(expectedExceptions = IllegalStateException.class, expectedExceptionsMessageRegExp = "ISPN014825: Query parameter 'param2' was not set")
@Override
public void testMissingParamWithParameterMap() {
// exception message code is different because it is generated by a different logger
super.testMissingParamWithParameterMap();
}
@Test(expectedExceptions = IllegalStateException.class, expectedExceptionsMessageRegExp = "ISPN014825: Query parameter 'param2' was not set")
@Override
public void testMissingParam() {
// exception message code is different because it is generated by a different logger
super.testMissingParam();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN014023: Using the multi-valued property path 'addresses.street' in the GROUP BY clause is not currently supported")
@Override
public void testGroupByMustNotAcceptRepeatedProperty() {
// the original exception gets wrapped in HotRodClientException
super.testGroupByMustNotAcceptRepeatedProperty();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN014024: The property path 'addresses.street' cannot be used in the ORDER BY clause because it is multi-valued")
@Override
public void testOrderByMustNotAcceptRepeatedProperty() {
// the original exception gets wrapped in HotRodClientException
super.testOrderByMustNotAcceptRepeatedProperty();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN028515: Cannot have aggregate functions in the WHERE clause : MIN.")
@Override
public void testRejectAggregationsInWhereClause() {
// the original exception gets wrapped in HotRodClientException
super.testRejectAggregationsInWhereClause();
}
}
| 9,644
| 46.04878
| 264
|
java
|
null |
infinispan-main/integrationtests/jboss-marshalling-it/src/test/java/org/infinispan/it/marshalling/jboss/JBMARRemoteQueryOldClientDslConditionsTest.java
|
package org.infinispan.it.marshalling.jboss;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "client.hotrod.query.JBMARRemoteQueryOldClientDslConditionsTest")
public class JBMARRemoteQueryOldClientDslConditionsTest extends JBMARRemoteQueryDslConditionsTest {
@Override
protected ProtocolVersion getProtocolVersion() {
return ProtocolVersion.PROTOCOL_VERSION_27;
}
}
| 465
| 32.285714
| 105
|
java
|
null |
infinispan-main/integrationtests/jboss-marshalling-it/src/test/java/org/infinispan/it/marshalling/jboss/JBMARSerializeWithClientPojoTest.java
|
package org.infinispan.it.marshalling.jboss;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.client.hotrod.test.InternalRemoteCacheManager;
import org.infinispan.client.hotrod.test.SingleHotRodServerTest;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.jboss.marshalling.commons.GenericJBossMarshaller;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "client.hotrod.marshall.JBMARSerializeWithClientPojoTest")
public class JBMARSerializeWithClientPojoTest extends SingleHotRodServerTest {
@Override
protected EmbeddedCacheManager createCacheManager() {
GlobalConfigurationBuilder globalBuilder = new GlobalConfigurationBuilder();
globalBuilder.serialization().allowList().addClasses(UserPojo.class, UserPojo.Externalizer.class);
org.infinispan.configuration.cache.ConfigurationBuilder builder = hotRodCacheConfiguration();
builder.encoding().key().mediaType(MediaType.APPLICATION_OBJECT_TYPE);
builder.encoding().value().mediaType(MediaType.APPLICATION_OBJECT_TYPE);
return TestCacheManagerFactory.createCacheManager(globalBuilder, builder);
}
@Override
protected RemoteCacheManager getRemoteCacheManager() {
ConfigurationBuilder builder = HotRodClientTestingUtil.newRemoteConfigurationBuilder();
builder.addJavaSerialAllowList(UserPojo.class.getName()).marshaller(GenericJBossMarshaller.class);
builder.addServer().host("127.0.0.1").port(hotrodServer.getPort());
return new InternalRemoteCacheManager(builder.build());
}
@Test
public void testSerializeWithPojoMarshallable() {
remoteCacheManager.getCache().put(1, new UserPojo());
}
@SerializeWith(UserPojo.Externalizer.class)
public static final class UserPojo {
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<UserPojo> {
@Override
public void writeObject(ObjectOutput output, UserPojo object) {
}
@Override
public UserPojo readObject(ObjectInput input) {
return new UserPojo();
}
}
}
}
| 2,679
| 42.225806
| 106
|
java
|
null |
infinispan-main/integrationtests/jboss-marshalling-it/src/test/java/org/infinispan/it/marshalling/jboss/JBMARRemoteNonIndexedQueryDslConditionsTest.java
|
package org.infinispan.it.marshalling.jboss;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import org.infinispan.client.hotrod.exceptions.HotRodClientException;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
/**
* Test for query conditions (filtering). Exercises the whole query DSL on the sample domain model.
*
* @author anistor@redhat.com
* @since 9.1
*/
@Test(groups = "functional", testName = "client.hotrod.query.JBMARRemoteNonIndexedQueryDslConditionsTest")
public class JBMARRemoteNonIndexedQueryDslConditionsTest extends JBMARRemoteQueryDslConditionsTest {
protected ConfigurationBuilder getConfigurationBuilder() {
ConfigurationBuilder builder = hotRodCacheConfiguration();
builder.encoding().key().mediaType(MediaType.APPLICATION_OBJECT_TYPE);
builder.encoding().value().mediaType(MediaType.APPLICATION_OBJECT_TYPE);
return builder;
}
@Test(expectedExceptions = IllegalStateException.class, expectedExceptionsMessageRegExp = "Indexing was not enabled on cache.*")
@Override
public void testIndexPresence() {
org.infinispan.query.Search.getIndexer(getEmbeddedCache());
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN028521: Full-text queries cannot be applied to property 'longDescription' in type org.infinispan.query.dsl.embedded.testdomain.hsearch.TransactionHS unless the property is indexed and analyzed.")
@Override
public void testFullTextTerm() {
super.testFullTextTerm();
}
@Test(expectedExceptions = HotRodClientException.class, expectedExceptionsMessageRegExp = "org.infinispan.objectfilter.ParsingException: ISPN028521: Full-text queries cannot be applied to property 'longDescription' in type org.infinispan.query.dsl.embedded.testdomain.hsearch.TransactionHS unless the property is indexed and analyzed.")
@Override
public void testFullTextPhrase() {
super.testFullTextPhrase();
}
}
| 2,154
| 47.977273
| 339
|
java
|
null |
infinispan-main/integrationtests/cdi-weld-se-it/src/test/java/org/infinispan/integrationtests/cdi/weld/CDITestingBean.java
|
package org.infinispan.integrationtests.cdi.weld;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.infinispan.Cache;
/**
* CDI bean for testing.
*
* @author Sebastian Laskawiec
*/
@ApplicationScoped
public class CDITestingBean {
@Inject
private Cache<String, String> cache;
public void putValueInCache(String key, String value) {
cache.put(key, value);
}
public String getValueFromCache(String key) {
return cache.get(key);
}
}
| 513
| 18.037037
| 58
|
java
|
null |
infinispan-main/integrationtests/cdi-weld-se-it/src/test/java/org/infinispan/integrationtests/cdi/weld/Config.java
|
package org.infinispan.integrationtests.cdi.weld;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.enterprise.inject.Disposes;
import jakarta.enterprise.inject.Produces;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.TestingUtil;
/**
* Cache configuration
*
* @author Sebastian Laskawiec
*/
@ApplicationScoped
public class Config {
@Produces
@ApplicationScoped
public EmbeddedCacheManager defaultEmbeddedCacheManager() {
ConfigurationBuilderHolder holder = new ConfigurationBuilderHolder();
holder.getGlobalConfigurationBuilder().defaultCacheName("cdi");
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.memory().size(100);
holder.getNamedConfigurationBuilders().put("cdi", builder);
return new DefaultCacheManager(holder, true);
}
/**
* Stops cache manager.
*
* @param cacheManager to be stopped
*/
@SuppressWarnings("unused")
public void killCacheManager(@Disposes EmbeddedCacheManager cacheManager) {
TestingUtil.killCacheManagers(cacheManager);
}
}
| 1,303
| 27.977778
| 78
|
java
|
null |
infinispan-main/integrationtests/cdi-weld-se-it/src/test/java/org/infinispan/integrationtests/cdi/weld/WeldStandaloneTest.java
|
package org.infinispan.integrationtests.cdi.weld;
import static org.testng.Assert.assertEquals;
import org.jboss.weld.environment.se.Weld;
import org.jboss.weld.environment.se.WeldContainer;
import org.testng.annotations.Test;
/**
* Tests Weld integration in standalone (desktop) app.
*
* @author Sebastian Laskawiec
*/
@Test(groups="functional", testName="cdi.test.weld.WeldStandaloneTest")
public class WeldStandaloneTest {
public void testWeldStandaloneInitialisation() {
WeldContainer weld = null;
try {
//given
weld = new Weld().initialize();
CDITestingBean testedBean = weld.instance().select(CDITestingBean.class).get();
//when
testedBean.putValueInCache("test", "abcd");
String retrievedValue = testedBean.getValueFromCache("test");
//then
assertEquals(retrievedValue, "abcd");
} finally {
if(weld != null) {
//cleanup
weld.shutdown();
}
}
}
}
| 1,007
| 24.846154
| 88
|
java
|
null |
infinispan-main/gridfs/src/test/java/org/infinispan/gridfs/GridFileTest.java
|
package org.infinispan.gridfs;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileFilter;
import java.io.FileNotFoundException;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(testName = "io.GridFileTest", groups = "functional")
public class GridFileTest extends SingleCacheManagerTest {
private Cache<String, byte[]> dataCache;
private Cache<String, GridFile.Metadata> metadataCache;
private GridFilesystem fs;
@Override
protected EmbeddedCacheManager createCacheManager() {
EmbeddedCacheManager cacheManager = TestCacheManagerFactory.createCacheManager();
Configuration configuration = new ConfigurationBuilder().build();
cacheManager.defineConfiguration("data", configuration);
cacheManager.defineConfiguration("metadata", configuration);
return cacheManager;
}
@BeforeMethod
protected void setUp() {
dataCache = cacheManager.getCache("data");
metadataCache = cacheManager.getCache("metadata");
fs = new GridFilesystem(dataCache, metadataCache);
}
public void testGridFS() throws IOException {
File gridDir = fs.getFile("/test");
assert gridDir.mkdirs();
File gridFile = fs.getFile("/test/myfile.txt");
assert gridFile.createNewFile();
}
public void testGetFile() {
assertEquals(fs.getFile("file.txt").getPath(), "file.txt");
assertEquals(fs.getFile("/file.txt").getPath(), "/file.txt");
assertEquals(fs.getFile("myDir/file.txt").getPath(), "myDir/file.txt");
assertEquals(fs.getFile("/myDir/file.txt").getPath(), "/myDir/file.txt");
assertEquals(fs.getFile("myDir", "file.txt").getPath(), "myDir/file.txt");
assertEquals(fs.getFile("/myDir", "file.txt").getPath(), "/myDir/file.txt");
File dir = fs.getFile("/myDir");
assertEquals(fs.getFile(dir, "file.txt").getPath(), "/myDir/file.txt");
dir = fs.getFile("myDir");
assertEquals(fs.getFile(dir, "file.txt").getPath(), "myDir/file.txt");
}
public void testCreateNewFile() throws IOException {
File file = fs.getFile("file.txt");
assertTrue(file.createNewFile()); // file should be created successfully
assertFalse(file.createNewFile()); // file should not be created, because it already exists
}
@Test(expectedExceptions = IOException.class)
public void testCreateNewFileInNonExistentDir() throws IOException {
File file = fs.getFile("nonExistent/file.txt");
file.createNewFile();
}
public void testNonExistentFileIsNeitherFileNorDirectory() {
File file = fs.getFile("nonExistentFile.txt");
assertFalse(file.exists());
assertFalse(file.isFile());
assertFalse(file.isDirectory());
}
public void testMkdir() throws IOException {
assertFalse(mkdir(""));
assertFalse(mkdir("/"));
assertFalse(mkdir("/nonExistentParentDir/subDir"));
assertTrue(mkdir("myDir1"));
assertTrue(mkdir("myDir1/mySubDir1"));
assertTrue(mkdir("/myDir2"));
assertTrue(mkdir("/myDir2/mySubDir2"));
createFile("/file.txt");
assertFalse(mkdir("/file.txt/dir"));
}
private boolean mkdir(String pathname) {
return fs.getFile(pathname).mkdir();
}
public void testMkdirs() throws IOException {
assertFalse(mkdirs(""));
assertFalse(mkdirs("/"));
assertTrue(mkdirs("myDir1"));
assertTrue(mkdirs("myDir2/mySubDir"));
assertTrue(mkdirs("/myDir3"));
assertTrue(mkdirs("/myDir4/mySubDir"));
assertTrue(mkdirs("/myDir5/subDir/secondSubDir"));
createFile("/file.txt");
assertFalse(mkdirs("/file.txt/dir"));
}
private boolean mkdirs(String pathname) {
return fs.getFile(pathname).mkdirs();
}
public void testGetParent() {
File file = fs.getFile("file.txt");
assertEquals(file.getParent(), null);
file = fs.getFile("/parentdir/file.txt");
assertEquals(file.getParent(), "/parentdir");
file = fs.getFile("/parentdir/subdir/file.txt");
assertEquals(file.getParent(), "/parentdir/subdir");
}
public void testGetParentFile() {
File file = fs.getFile("file.txt");
assertNull(file.getParentFile());
file = fs.getFile("/parentdir/file.txt");
File parentDir = file.getParentFile();
assertTrue(parentDir instanceof GridFile);
assertEquals(parentDir.getPath(), "/parentdir");
}
@Test(expectedExceptions = FileNotFoundException.class)
public void testWritingToDirectoryThrowsException1() throws IOException {
GridFile dir = (GridFile) createDir();
fs.getOutput(dir); // should throw exception
}
@Test(expectedExceptions = FileNotFoundException.class)
public void testWritingToDirectoryThrowsException2() throws IOException {
File dir = createDir();
fs.getOutput(dir.getPath()); // should throw exception
}
@Test(expectedExceptions = FileNotFoundException.class)
public void testReadingFromDirectoryThrowsException1() throws IOException {
File dir = createDir();
fs.getInput(dir); // should throw exception
}
@Test(expectedExceptions = FileNotFoundException.class)
public void testReadingFromDirectoryThrowsException2() throws IOException {
File dir = createDir();
fs.getInput(dir.getPath()); // should throw exception
}
private File createDir() {
return createDir("mydir");
}
private File createDir(String pathname) {
File dir = fs.getFile(pathname);
boolean created = dir.mkdir();
assert created;
return dir;
}
public void testWriteAcrossMultipleChunksWithNonDefaultChunkSize() throws Exception {
writeToFile("multipleChunks.txt",
"This text spans multiple chunks, because each chunk is only 10 bytes long.",
10); // chunkSize = 10
String text = getContents("multipleChunks.txt");
assertEquals(text, "This text spans multiple chunks, because each chunk is only 10 bytes long.");
}
public void testWriteAcrossMultipleChunksWithNonDefaultChunkSizeAfterFileIsExplicitlyCreated() throws Exception {
GridFile file = (GridFile) fs.getFile("multipleChunks.txt", 20); // chunkSize = 20
file.createNewFile();
writeToFile("multipleChunks.txt",
"This text spans multiple chunks, because each chunk is only 20 bytes long.",
10); // chunkSize = 10 (but it is ignored, because the file was already created with chunkSize = 20
String text = getContents("multipleChunks.txt");
assertEquals(text, "This text spans multiple chunks, because each chunk is only 20 bytes long.");
}
public void testAppendToFileThatEndsWithFullChunk() throws Exception {
writeToFile("endsWithFullChunk.txt", "1234" + "5678", 4); // chunkSize = 4; two chunks will be written; both chunks will be full
appendToFile("endsWithFullChunk.txt", "", 4);
assertEquals(getContents("endsWithFullChunk.txt"), "12345678");
}
public void testAppend() throws Exception {
writeToFile("append.txt", "Hello");
appendToFile("append.txt", "World");
assertEquals(getContents("append.txt"), "HelloWorld");
}
public void testAppendWithDifferentChunkSize() throws Exception {
writeToFile("append.txt", "Hello", 2); // chunkSize = 2
appendToFile("append.txt", "World", 5); // chunkSize = 5
assertEquals(getContents("append.txt"), "HelloWorld");
}
public void testAppendToEmptyFile() throws Exception {
appendToFile("empty.txt", "Hello");
assertEquals(getContents("empty.txt"), "Hello");
}
public void testDeleteRemovesAllChunks() throws Exception {
assertEquals(numberOfChunksInCache(), 0);
assertEquals(numberOfMetadataEntries(), 0);
writeToFile("delete.txt", "delete me", 100);
GridFile file = (GridFile) fs.getFile("delete.txt");
boolean deleted = file.delete();
assertTrue(deleted);
assertFalse(file.exists());
assertEquals(numberOfChunksInCache(), 0);
assertEquals(numberOfMetadataEntries(), 0);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testDeleteOnExit() {
fs.getFile("nonsuch.txt").deleteOnExit();
}
public void testOverwritingFileDoesNotLeaveExcessChunksInCache() throws Exception {
assertEquals(numberOfChunksInCache(), 0);
writeToFile("leak.txt", "12345abcde12345", 5); // file length = 15, chunkSize = 5. Chunk size should "upgrade" to 8
assertEquals(numberOfChunksInCache(), 2);
writeToFile("leak.txt", "12345678", 5); // file length = 5, chunkSize = 5. Chunk size should "upgrade" to 8
assertEquals(numberOfChunksInCache(), 1);
}
//ISPN-2157
public void testWriteAndReadNegativeByte() throws Exception {
String filePath = "negative.dat";
OutputStream out = fs.getOutput(filePath);
try{
out.write(-1);
}finally{
out.close();
}
InputStream in = fs.getInput(filePath);
try{
assertEquals(in.read(), 255);
}finally{
in.close();
}
}
public void testWriteAfterClose() throws Exception {
String filePath = "test_write_to_closed.dat";
OutputStream out = fs.getOutput(filePath);
try{
out.write(1);
}finally{
out.close();
}
IOException e = null;
try{
out.write(2);
}catch (IOException ex){
e = ex;
}
assertNotNull(e);
File f = fs.getFile(filePath);
assertEquals(f.length(), 1);
}
public void testMultiClose() throws Exception {
String filePath = "test_close.dat";
OutputStream out = fs.getOutput(filePath);
try{
out.write(1);
}finally{
out.close();
out.close();
}
File f = fs.getFile(filePath);
assertEquals(f.length(), 1);
}
public void testCanReadClosed() throws Exception {
String filePath = "file_read_closed.txt";
OutputStream out = fs.getOutput(filePath);
try{
out.write(1);
out.write(2);
out.write(3);
}finally{
out.close();
}
InputStream in = fs.getInput(filePath);
in.read();
in.close();
IOException e = null;
try{
in.read();
}catch(IOException ex){
e = ex;
}
assertNotNull(e);
}
public void testSkip() throws Exception {
String filePath = "skip.txt";
writeToFile(filePath, "abcde" + "fghij" + "klmno" + "pqrst" + "uvwxy" + "z", 5);
InputStream in = fs.getInput(filePath);
try {
long skipped = in.skip(2); // skip inside current chunk
assertEquals(skipped, 2);
assertEquals((char)in.read(), 'c');
skipped = in.skip(2); // skip to end of chunk
assertEquals(skipped, 2);
assertEquals((char)in.read(), 'f');
skipped = in.skip(6); // skip into next chunk
assertEquals(skipped, 6);
assertEquals((char)in.read(), 'm');
skipped = in.skip(9); // skip _over_ next chunk
assertEquals(skipped, 9);
assertEquals((char)in.read(), 'w');
skipped = in.skip(-1); // negative skip
assertEquals(skipped, 0);
assertEquals((char)in.read(), 'x');
skipped = in.skip(10); // skip beyond EOF
assertEquals(skipped, 2);
assertEquals(in.read(), -1);
} finally {
in.close();
}
}
@SuppressWarnings("ResultOfMethodCallIgnored")
public void testAvailable() throws Exception {
String filePath = "available.txt";
writeToFile(filePath, "abcde" + "fghij" + "klmno" + "pqrst" + "uvwxy" + "z", 5); // Chunk size should get "upgraded" to 8
InputStream in = fs.getInput(filePath);
try {
assertEquals(in.available(), 0); // since first chunk hasn't been fetched yet
in.read();
assertEquals(in.available(), 7);
in.skip(3);
assertEquals(in.available(), 4);
in.read();
assertEquals(in.available(), 3);
in.read();
assertEquals(in.available(), 2);
} finally {
in.close();
}
}
public void testLastModified() throws Exception {
assertEquals(fs.getFile("nonExistentFile.txt").lastModified(), 0);
long time1 = System.currentTimeMillis();
File file = createFile("file.txt");
long time2 = System.currentTimeMillis();
assertTrue(time1 <= file.lastModified());
assertTrue(file.lastModified() <= time2);
Thread.sleep(100);
time1 = System.currentTimeMillis();
writeToFile(file.getPath(), "foo");
time2 = System.currentTimeMillis();
assertTrue(time1 <= file.lastModified());
assertTrue(file.lastModified() <= time2);
}
public void testSetLastModified() throws IOException {
assertFalse(fs.getFile("nonsuch").setLastModified(23));
File file = createFile("file.txt");
assertTrue(file.setLastModified(42));
assertEquals(fs.getFile("file.txt").lastModified(), 42);
}
public void testList() throws Exception {
assertNull(fs.getFile("nonExistentDir").list());
assertEquals(createDir("/emptyDir").list().length, 0);
File dir = createDirWithFiles();
String[] filenames = dir.list();
assertEquals(
asSet(filenames),
asSet("foo1.txt", "foo2.txt", "bar1.txt", "bar2.txt", "fooDir", "barDir"));
}
public void testListWithFilenameFilter() throws Exception {
File dir = createDirWithFiles();
String[] filenames = dir.list(new FooFilenameFilter());
assertEquals(
asSet(filenames),
asSet("foo1.txt", "foo2.txt", "fooDir"));
}
public void testListFiles() throws Exception {
assertNull(fs.getFile("nonExistentDir").listFiles());
assertEquals(createDir("/emptyDir").listFiles().length, 0);
File dir = createDirWithFiles();
File[] files = dir.listFiles();
assertEquals(
asSet(getPaths(files)),
asSet("/myDir/foo1.txt", "/myDir/foo2.txt", "/myDir/fooDir",
"/myDir/bar1.txt", "/myDir/bar2.txt", "/myDir/barDir"));
}
public void testListFilesWhereNonChildPathStartsWithParent() throws Exception {
File parentDir = createDir("/parentDir");
assertEquals(parentDir.listFiles().length, 0);
assertEquals(createDir("/parentDir-NOT-CHILD").listFiles().length, 0);
assertEquals(parentDir.listFiles().length, 0);
}
public void testListFilesWithFilenameFilter() throws Exception {
File dir = createDirWithFiles();
FooFilenameFilter filter = new FooFilenameFilter();
filter.expectDir(dir);
File[] files = dir.listFiles(filter);
assertEquals(
asSet(getPaths(files)),
asSet("/myDir/foo1.txt", "/myDir/foo2.txt", "/myDir/fooDir"));
}
public void testListFilesWithFileFilter() throws Exception {
File dir = createDirWithFiles();
File[] files = dir.listFiles(new FooFileFilter());
assertEquals(
asSet(getPaths(files)),
asSet("/myDir/foo1.txt", "/myDir/foo2.txt", "/myDir/fooDir"));
}
public void testRootDir() throws Exception {
File rootDir = fs.getFile("/");
assertTrue(rootDir.exists());
assertTrue(rootDir.isDirectory());
createFile("/foo.txt");
String[] filenames = rootDir.list();
assertNotNull(filenames);
assertEquals(filenames.length, 1);
assertEquals(filenames[0], "foo.txt");
}
public void testReadableChannel() throws Exception {
String content = "This is the content of channelTest.txt";
writeToFile("/channelTest.txt", content, 10);
ReadableGridFileChannel channel = fs.getReadableChannel("/channelTest.txt");
try {
assertTrue(channel.isOpen());
ByteBuffer buffer = ByteBuffer.allocate(1000);
channel.read(buffer);
assertEquals(getStringFrom(buffer), content);
} finally {
channel.close();
}
assertFalse(channel.isOpen());
}
public void testReadableChannelPosition() throws Exception {
writeToFile("/position.txt", "0123456789", 3);
ReadableGridFileChannel channel = fs.getReadableChannel("/position.txt");
try {
assertEquals(channel.position(), 0);
channel.position(5);
assertEquals(channel.position(), 5);
assertEquals(getStringFromChannel(channel, 3), "567");
assertEquals(channel.position(), 8);
channel.position(2);
assertEquals(channel.position(), 2);
assertEquals(getStringFromChannel(channel, 5), "23456");
assertEquals(channel.position(), 7);
} finally {
channel.close();
}
}
public void testWritableChannel() throws Exception {
WritableGridFileChannel channel = fs.getWritableChannel("/channelTest.txt", false, 10);
try {
assertTrue(channel.isOpen());
channel.write(ByteBuffer.wrap("This file spans multiple chunks.".getBytes()));
} finally {
channel.close();
}
assertFalse(channel.isOpen());
assertEquals(getContents("/channelTest.txt"), "This file spans multiple chunks.");
}
public void testWritableChannelAppend() throws Exception {
writeToFile("/append.txt", "Initial text.", 3);
WritableGridFileChannel channel = fs.getWritableChannel("/append.txt", true);
try {
channel.write(ByteBuffer.wrap("Appended text.".getBytes()));
} finally {
channel.close();
}
assertEquals(getContents("/append.txt"), "Initial text.Appended text.");
}
public void testReadLoop() throws Exception {
WritableGridFileChannel wgfc = fs.getWritableChannel("/readTest.txt", false, 100);
try {
assertTrue(wgfc.isOpen());
wgfc.write(ByteBuffer.wrap("This tests read loop.".getBytes()));
} finally {
wgfc.close();
}
ReadableGridFileChannel rgfc = fs.getReadableChannel("/readTest.txt");
try {
assertTrue("This tests read loop.".equals(new String(toBytes(Channels.newInputStream(rgfc)))));
} finally {
rgfc.close();
}
}
public void testGetAbsolutePath() throws IOException {
assertEquals(fs.getFile("/file.txt").getAbsolutePath(), "/file.txt");
assertEquals(fs.getFile("file.txt").getAbsolutePath(), "/file.txt");
assertEquals(fs.getFile("dir/file.txt").getAbsolutePath(), "/dir/file.txt");
}
public void testGetAbsoluteFile() throws IOException {
assertTrue(fs.getFile("file.txt").getAbsoluteFile() instanceof GridFile);
assertEquals(fs.getFile("/file.txt").getAbsoluteFile().getPath(), "/file.txt");
assertEquals(fs.getFile("file.txt").getAbsoluteFile().getPath(), "/file.txt");
assertEquals(fs.getFile("dir/file.txt").getAbsoluteFile().getPath(), "/dir/file.txt");
}
public void testIsAbsolute() throws IOException {
assertTrue(fs.getFile("/file.txt").isAbsolute());
assertFalse(fs.getFile("file.txt").isAbsolute());
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testRenameTo(){
fs.getFile("file.txt").renameTo(null);
}
public void testLeadingSeparatorIsOptional() throws IOException {
File gridFile = fs.getFile("file.txt");
assert gridFile.createNewFile();
assertTrue(fs.getFile("file.txt").exists());
assertTrue(fs.getFile("/file.txt").exists());
File dir = fs.getFile("dir");
boolean dirCreated = dir.mkdir();
assertTrue(dirCreated);
assertTrue(fs.getFile("dir").exists());
assertTrue(fs.getFile("/dir").exists());
}
public void testGetName() throws IOException {
assertEquals(fs.getFile("").getName(), "");
assertEquals(fs.getFile("/").getName(), "");
assertEquals(fs.getFile("file.txt").getName(), "file.txt");
assertEquals(fs.getFile("/file.txt").getName(), "file.txt");
assertEquals(fs.getFile("/dir/file.txt").getName(), "file.txt");
assertEquals(fs.getFile("/dir/subdir/file.txt").getName(), "file.txt");
assertEquals(fs.getFile("dir/subdir/file.txt").getName(), "file.txt");
}
public void testEquals() throws Exception {
assertFalse(fs.getFile("").equals(null));
assertTrue(fs.getFile("").equals(fs.getFile("")));
assertTrue(fs.getFile("").equals(fs.getFile("/")));
assertTrue(fs.getFile("foo.txt").equals(fs.getFile("foo.txt")));
assertTrue(fs.getFile("foo.txt").equals(fs.getFile("/foo.txt")));
assertFalse(fs.getFile("foo.txt").equals(fs.getFile("FOO.TXT")));
assertFalse(fs.getFile("/foo.txt").equals(new File("/foo.txt")));
}
public void testCanRead() throws Exception {
File gridFile = fs.getFile("file.txt");
assert gridFile.createNewFile();
assertTrue(gridFile.canRead());
assertFalse(fs.getFile("nonsuch.txt").canRead());
}
public void testCanWrite() throws Exception {
File gridFile = fs.getFile("file.txt");
assert gridFile.createNewFile();
assertTrue(gridFile.canWrite());
assertFalse(fs.getFile("nonsuch.txt").canWrite());
}
public void testIsHidden(){
assertFalse(fs.getFile("nonsuch.txt").isHidden());
}
public void testCanExecute(){
assertFalse(fs.getFile("nonsuch.txt").isHidden());
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testGetCanonicalPath() throws IOException {
fs.getFile("nonsuch.txt").getCanonicalPath();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testGetCanonicalFile() throws IOException {
fs.getFile("nonsuch.txt").getCanonicalFile();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testToURL() throws MalformedURLException {
fs.getFile("nonsuch.txt").toURL();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testToURI() {
fs.getFile("nonsuch.txt").toURI();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testSetReadOnly() {
fs.getFile("nonsuch.txt").setReadOnly();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testSetWritable() {
fs.getFile("nonsuch.txt").setWritable(true);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testSetWritable2() {
fs.getFile("nonsuch.txt").setWritable(true, true);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testSetReadable() {
fs.getFile("nonsuch.txt").setReadable(true);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testSetReadable2() {
fs.getFile("nonsuch.txt").setReadable(true, true);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testSetExecutable() {
fs.getFile("nonsuch.txt").setExecutable(true);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testSetExecutable2() {
fs.getFile("nonsuch.txt").setExecutable(true, true);
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testGetTotalSpace() {
fs.getFile("nonsuch.txt").getTotalSpace();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testGetFreeSpace() {
fs.getFile("nonsuch.txt").getFreeSpace();
}
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testGetUsableSpace() {
fs.getFile("nonsuch.txt").getUsableSpace();
}
private String getStringFromChannel(ReadableByteChannel channel, int length) throws IOException {
ByteBuffer buffer = ByteBuffer.allocate(length);
channel.read(buffer);
return getStringFrom(buffer);
}
private String getStringFrom(ByteBuffer buffer) {
buffer.flip();
byte[] buf = new byte[buffer.remaining()];
buffer.get(buf);
return new String(buf);
}
private String[] getPaths(File[] files) {
String[] paths = new String[files.length];
for (int i = 0; i < files.length; i++) {
File file = files[i];
paths[i] = file.getPath();
}
return paths;
}
private Set<String> asSet(String... strings) {
return new HashSet<String>(Arrays.asList(strings));
}
private File createDirWithFiles() throws IOException {
File dir = createDir("/myDir");
createFile("/myDir/foo1.txt");
createFile("/myDir/foo2.txt");
createFile("/myDir/bar1.txt");
createFile("/myDir/bar2.txt");
createDir("/myDir/fooDir");
createFile("/myDir/fooDir/foo.txt");
createFile("/myDir/fooDir/bar.txt");
createDir("/myDir/barDir");
return dir;
}
private File createFile(String pathname) throws IOException {
File file = fs.getFile(pathname);
assert file.createNewFile();
return file;
}
private int numberOfChunksInCache() {
return dataCache.size();
}
private int numberOfMetadataEntries() {
return metadataCache.size();
}
private void appendToFile(String filePath, String text) throws IOException {
appendToFile(filePath, text, null);
}
private void appendToFile(String filePath, String text, Integer chunkSize) throws IOException {
writeToFile(filePath, text, true, chunkSize);
}
private void writeToFile(String filePath, String text) throws IOException {
writeToFile(filePath, text, null);
}
private void writeToFile(String filePath, String text, Integer chunkSize) throws IOException {
writeToFile(filePath, text, false, chunkSize);
}
private void writeToFile(String filePath, String text, boolean append, Integer chunkSize) throws IOException {
OutputStream out = chunkSize == null
? fs.getOutput(filePath, append)
: fs.getOutput(filePath, append, chunkSize);
try {
out.write(text.getBytes());
} finally {
out.close();
}
}
private String getContents(String filePath) throws IOException {
InputStream in = fs.getInput(filePath);
return getString(in);
}
private String getString(InputStream in) throws IOException {
try {
byte[] buf = new byte[1000];
int bytesRead = in.read(buf);
return new String(buf, 0, bytesRead);
} finally {
in.close();
}
}
private static byte[] toBytes(InputStream is) throws IOException {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[16384];
while ((nRead = is.read(data, 0, data.length)) != -1) {
buffer.write(data, 0, nRead);
}
buffer.flush();
return buffer.toByteArray();
}
private static class FooFilenameFilter implements FilenameFilter {
private File expectedDir;
@Override
public boolean accept(File dir, String name) {
if (expectedDir != null)
assertEquals(dir, expectedDir, "accept() invoked with unexpected dir");
return name.startsWith("foo");
}
public void expectDir(File dir) {
expectedDir = dir;
}
}
private static class FooFileFilter implements FileFilter {
@Override
public boolean accept(File file) {
return file.getName().startsWith("foo");
}
}
}
| 28,628
| 33.001188
| 135
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/package-info.java
|
/**
* Infinispan Grid Filesystem
*
* @api.public
*/
package org.infinispan.gridfs;
| 87
| 11.571429
| 30
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/UnclosableObjectOutputStream.java
|
package org.infinispan.gridfs;
import java.io.IOException;
import java.io.ObjectOutput;
/**
* An unclosable version of an {@link java.io.ObjectOutput}. This delegates all methods except {@link #flush()} and
* {@link #close()}.
*
* @author Manik Surtani
* @since 4.0
*/
public class UnclosableObjectOutputStream implements ObjectOutput {
private final ObjectOutput delegate;
public UnclosableObjectOutputStream(ObjectOutput delegate) {
this.delegate = delegate;
}
@Override
public final void writeObject(Object obj) throws IOException {
delegate.writeObject(obj);
}
@Override
public final void write(int b) throws IOException {
delegate.write(b);
}
@Override
public final void write(byte[] b) throws IOException {
delegate.write(b);
}
@Override
public final void write(byte[] b, int off, int len) throws IOException {
delegate.write(b, off, len);
}
@Override
public final void writeBoolean(boolean v) throws IOException {
delegate.writeBoolean(v);
}
@Override
public final void writeByte(int v) throws IOException {
delegate.writeByte(v);
}
@Override
public final void writeShort(int v) throws IOException {
delegate.writeShort(v);
}
@Override
public final void writeChar(int v) throws IOException {
delegate.writeChar(v);
}
@Override
public final void writeInt(int v) throws IOException {
delegate.writeInt(v);
}
@Override
public final void writeLong(long v) throws IOException {
delegate.writeLong(v);
}
@Override
public final void writeFloat(float v) throws IOException {
delegate.writeFloat(v);
}
@Override
public final void writeDouble(double v) throws IOException {
delegate.writeDouble(v);
}
@Override
public final void writeBytes(String s) throws IOException {
delegate.writeBytes(s);
}
@Override
public final void writeChars(String s) throws IOException {
delegate.writeChars(s);
}
@Override
public final void writeUTF(String str) throws IOException {
delegate.writeUTF(str);
}
@Override
public final void flush() {
throw new UnsupportedOperationException("flush() not supported in an UnclosableObjectOutputStream!");
}
@Override
public final void close() {
throw new UnsupportedOperationException("close() not supported in an UnclosableObjectOutputStream!");
}
}
| 2,470
| 22.311321
| 116
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/ModularArithmetic.java
|
package org.infinispan.gridfs;
/**
* For compatibility
*
* @author Manik Surtani
*/
public class ModularArithmetic {
public static final boolean CANNOT_ASSUME_DENOM_IS_POWER_OF_TWO = Boolean.getBoolean("infinispan.compat");
public static int mod(int numerator, int denominator) {
if (CANNOT_ASSUME_DENOM_IS_POWER_OF_TWO)
return numerator % denominator;
else
return numerator & (denominator - 1);
}
public static long mod(long numerator, int denominator) {
if (CANNOT_ASSUME_DENOM_IS_POWER_OF_TWO)
return numerator % denominator;
else
return numerator & (denominator - 1);
}
}
| 659
| 24.384615
| 109
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/WritableGridFileChannel.java
|
package org.infinispan.gridfs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.WritableByteChannel;
import org.infinispan.Cache;
/**
* @author Marko Luksa
*/
public class WritableGridFileChannel implements WritableByteChannel {
private final GridOutputStream gridOutputStream;
private final WritableByteChannel delegate;
WritableGridFileChannel(GridFile file, Cache<String, byte[]> cache, boolean append) {
this.gridOutputStream = new GridOutputStream(file, append, cache);
this.delegate = Channels.newChannel(gridOutputStream);
}
@Override
public int write(ByteBuffer src) throws IOException {
checkOpen();
return delegate.write(src);
}
public void flush() {
gridOutputStream.flush();
}
@Override
public boolean isOpen() {
return delegate.isOpen();
}
@Override
public void close() throws IOException {
delegate.close();
}
private void checkOpen() throws ClosedChannelException {
if (!isOpen()) {
throw new ClosedChannelException();
}
}
}
| 1,182
| 22.66
| 88
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/GridOutputStream.java
|
package org.infinispan.gridfs;
import java.io.IOException;
import java.io.OutputStream;
import org.infinispan.Cache;
/**
* @author Bela Ban
* @author Marko Luksa
* @author Manik Surtani
*/
public class GridOutputStream extends OutputStream {
private int index; // index into the file for writing
private int localIndex;
private final byte[] currentBuffer;
private final int numberOfChunksWhenOpened;
private final FileChunkMapper fileChunkMapper;
private final int chunkSize; // Guaranteed to be a power of 2
private final GridFile file;
private boolean streamClosed;
GridOutputStream(GridFile file, boolean append, Cache<String, byte[]> cache) {
fileChunkMapper = new FileChunkMapper(file, cache);
chunkSize = fileChunkMapper.getChunkSize();
this.file = file;
index = append ? (int) file.length() : 0;
localIndex = append && isLastChunkFull() ? chunkSize : ModularArithmetic.mod(index, chunkSize);
currentBuffer = append ? fetchLastChunk() : createEmptyChunk();
numberOfChunksWhenOpened = getLastChunkNumber() + 1;
}
private byte[] createEmptyChunk() {
return new byte[chunkSize];
}
private boolean isLastChunkFull() {
long bytesRemainingInLastChunk = ModularArithmetic.mod(file.length(), chunkSize);
return bytesRemainingInLastChunk == 0;
}
private byte[] fetchLastChunk() {
byte[] chunk = fileChunkMapper.fetchChunk(getLastChunkNumber());
return createFullSizeCopy(chunk);
}
private byte[] createFullSizeCopy(byte[] val) {
byte[] chunk = createEmptyChunk();
if (val != null) {
System.arraycopy(val, 0, chunk, 0, val.length);
}
return chunk;
}
private int getLastChunkNumber() {
return getChunkNumber((int) file.length() - 1);
}
@Override
public void write(int b) throws IOException {
assertOpen();
int remaining = getBytesRemainingInChunk();
if (remaining == 0) {
flush();
localIndex = 0;
}
currentBuffer[localIndex] = (byte) b;
localIndex++;
index++;
}
private void assertOpen() throws IOException {
if (streamClosed) throw new IOException("Stream is closed");
}
@Override
public void write(byte[] b) throws IOException {
assertOpen();
if (b != null)
write(b, 0, b.length);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
assertOpen();
while (len > 0) {
int bytesWritten = writeToChunk(b, off, len);
off += bytesWritten;
len -= bytesWritten;
}
}
private int writeToChunk(byte[] b, int off, int len) throws IOException {
int remaining = getBytesRemainingInChunk();
if (remaining == 0) {
flush();
localIndex = 0;
remaining = chunkSize;
}
int bytesToWrite = Math.min(remaining, len);
System.arraycopy(b, off, currentBuffer, localIndex, bytesToWrite);
localIndex += bytesToWrite;
index += bytesToWrite;
return bytesToWrite;
}
@Override
public void close() throws IOException {
if (streamClosed) return;
flush();
removeExcessChunks();
reset();
streamClosed = true;
}
private void removeExcessChunks() {
for (int i = getLastChunkNumber()+1; i<numberOfChunksWhenOpened; i++) {
fileChunkMapper.removeChunk(i);
}
}
@Override
public void flush() {
storeChunk();
file.setLength(index);
}
private void storeChunk() {
fileChunkMapper.storeChunk(getChunkNumber(index - 1), currentBuffer, localIndex);
}
private int getBytesRemainingInChunk() {
return chunkSize - localIndex;
}
private int getChunkNumber(int position) {
return position / chunkSize;
}
private void reset() {
index = localIndex = 0;
}
}
| 3,925
| 25.527027
| 101
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/FileChunkMapper.java
|
package org.infinispan.gridfs;
import org.infinispan.Cache;
import org.jgroups.logging.Log;
import org.jgroups.logging.LogFactory;
/**
* Takes care of properly storing and retrieving file chunks from/to cache.
* Each chunk's key is composed of the file path and the chunk's number. The value is a byte array, which
* is either chunkSize bytes long or less than that in the case of the last chunk.
*
* @author Marko Luksa
*/
class FileChunkMapper {
private static final Log log = LogFactory.getLog(FileChunkMapper.class);
private final GridFile file;
private final Cache<String, byte[]> cache;
public FileChunkMapper(GridFile file, Cache<String, byte[]> cache) {
this.file = file;
this.cache = cache;
}
/**
* Guaranteed to be a power of two
*/
public int getChunkSize() {
return file.getChunkSize();
}
public byte[] fetchChunk(int chunkNumber) {
String key = getChunkKey(chunkNumber);
byte[] val = cache.get(key);
if (log.isTraceEnabled())
log.trace("fetching key=" + key + ": " + (val != null ? val.length + " bytes" : "null"));
return val;
}
public void storeChunk(int chunkNumber, byte[] buffer, int length) {
String key = getChunkKey(chunkNumber);
byte[] val = trim(buffer, length);
cache.put(key, val);
if (log.isTraceEnabled())
log.trace("put(): key=" + key + ": " + val.length + " bytes");
}
public void removeChunk(int chunkNumber) {
cache.remove(getChunkKey(chunkNumber));
}
private byte[] trim(byte[] buffer, int length) {
byte[] val = new byte[length];
System.arraycopy(buffer, 0, val, 0, length);
return val;
}
private String getChunkKey(int chunkNumber) {
return getChunkKey(file.getAbsolutePath(), chunkNumber);
}
static String getChunkKey(String absoluteFilePath, int chunkNumber) {
return absoluteFilePath + ".#" + chunkNumber;
}
}
| 1,953
| 28.164179
| 105
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/GridInputStream.java
|
package org.infinispan.gridfs;
import java.io.IOException;
import java.io.InputStream;
import org.infinispan.Cache;
/**
* @author Bela Ban
* @author Marko Luksa
* @author Manik Surtani
*/
public class GridInputStream extends InputStream {
private int index = 0; // index into the file for writing
private int localIndex = 0;
private byte[] currentBuffer = null;
private int fSize;
private boolean streamClosed = false;
private final FileChunkMapper fileChunkMapper;
private final int chunkSize; // Guaranteed to be a power of 2
GridInputStream(GridFile file, Cache<String, byte[]> cache) {
fileChunkMapper = new FileChunkMapper(file, cache);
chunkSize = fileChunkMapper.getChunkSize();
fSize = (int)file.length();
}
@Override public int read() throws IOException {
assertOpen();
if (isEndReached())
return -1;
if (getBytesRemainingInChunk() == 0)
getChunk();
int retval = 0x0ff & currentBuffer[localIndex++];
index++;
return retval;
}
@Override
public int read(byte[] b) throws IOException {
return read(b, 0, b.length);
}
@Override
public int read(byte[] bytes, int offset, int length) throws IOException {
assertOpen();
int totalBytesRead = 0;
while (length > 0) {
int bytesRead = readFromChunk(bytes, offset, length);
if (bytesRead == -1)
return totalBytesRead > 0 ? totalBytesRead : -1;
offset += bytesRead;
length -= bytesRead;
totalBytesRead += bytesRead;
}
return totalBytesRead;
}
private int readFromChunk(byte[] b, int off, int len) {
if (isEndReached())
return -1;
int remaining = getBytesRemainingInChunk();
if (remaining == 0) {
getChunk();
remaining = getBytesRemainingInChunk();
}
int bytesToRead = Math.min(len, remaining);
System.arraycopy(currentBuffer, localIndex, b, off, bytesToRead);
localIndex += bytesToRead;
index += bytesToRead;
return bytesToRead;
}
@Override public long skip(long length) throws IOException {
assertOpen();
if (length <= 0)
return 0;
int bytesToSkip = Math.min((int)length, getBytesRemainingInStream());
index += bytesToSkip;
if (bytesToSkip <= getBytesRemainingInChunk()) {
localIndex += bytesToSkip;
} else {
getChunk();
localIndex = ModularArithmetic.mod(index, chunkSize);
}
return bytesToSkip;
}
int position() {
return index;
}
void position(long newPosition) throws IOException {
if (newPosition < 0) {
throw new IllegalArgumentException("newPosition may not be negative");
}
assertOpen();
int newPos = (int) newPosition;
int chunkNumberOfNewPosition = getChunkNumber(newPos);
if (getChunkNumber(index - 1) != chunkNumberOfNewPosition) {
currentBuffer = fileChunkMapper.fetchChunk(chunkNumberOfNewPosition);
}
index = newPos;
localIndex = ModularArithmetic.mod(newPos, chunkSize);
}
@Override
public int available() throws IOException {
assertOpen();
return getBytesRemainingInChunk(); // Return bytes in chunk
}
@Override
public void close() throws IOException {
localIndex = index = 0;
streamClosed = true;
}
private boolean isEndReached() {
return index == fSize;
}
private void assertOpen() throws IOException{
if (streamClosed) throw new IOException("Stream is closed");
}
private int getBytesRemainingInChunk() {
return currentBuffer == null ? 0 : currentBuffer.length - localIndex;
}
private int getBytesRemainingInStream() {
return fSize - index;
}
private void getChunk() {
currentBuffer = fileChunkMapper.fetchChunk(getChunkNumber());
localIndex = 0;
}
private int getChunkNumber() {
return getChunkNumber(index);
}
private int getChunkNumber(int position) {
return position < 0 ? -1 : (position / chunkSize);
}
int getFileSize() {
return fSize;
}
}
| 4,185
| 25.833333
| 79
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/ReadableGridFileChannel.java
|
package org.infinispan.gridfs;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ReadableByteChannel;
import org.infinispan.Cache;
/**
* @author Marko Luksa
*/
public class ReadableGridFileChannel implements ReadableByteChannel {
private final GridInputStream gridInputStream;
private final ReadableByteChannel delegate;
ReadableGridFileChannel(GridFile file, Cache<String, byte[]> cache) {
this.gridInputStream = new GridInputStream(file, cache);
this.delegate = Channels.newChannel(gridInputStream);
}
@Override
public int read(ByteBuffer dst) throws IOException {
checkOpen();
return delegate.read(dst);
}
public long position() throws IOException {
checkOpen();
return gridInputStream.position();
}
public void position(long newPosition) throws IOException {
checkOpen();
gridInputStream.position(newPosition);
}
@Override
public boolean isOpen() {
return delegate.isOpen();
}
@Override
public void close() throws IOException {
delegate.close();
}
private void checkOpen() throws ClosedChannelException {
if (!isOpen()) {
throw new ClosedChannelException();
}
}
public long size() {
return gridInputStream.getFileSize();
}
}
| 1,408
| 22.483333
| 72
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/GridFile.java
|
package org.infinispan.gridfs;
import static java.lang.String.format;
import java.io.Externalizable;
import java.io.File;
import java.io.FileFilter;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.net.URI;
import java.net.URL;
import java.util.Collection;
import java.util.Date;
import java.util.LinkedList;
import java.util.Set;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.jgroups.util.Util;
/**
* Subclass of File to iterate through directories and files in a grid
*
* @author Bela Ban
* @author Marko Luksa
*/
public class GridFile extends File {
private static final long serialVersionUID = 552534285862004134L;
private static final Metadata ROOT_DIR_METADATA = new Metadata(0, 0, 0, Metadata.DIR);
private static final char SEPARATOR_CHAR = '/';
private static final String SEPARATOR = "" + SEPARATOR_CHAR;
private final AdvancedCache<String, Metadata> metadataCache;
private final GridFilesystem fs;
private final String path;
private int chunkSize;
/**
* Creates a GridFile instance
* @param pathname path of file
* @param metadataCache cache to use to store metadata
* @param chunkSize chunk size. Will be upgraded to next highest power of two.
* @param fs GridFilesystem instance
*/
GridFile(String pathname, Cache<String, Metadata> metadataCache, int chunkSize, GridFilesystem fs) {
super(pathname);
this.fs = fs;
this.path = formatPath(pathname);
this.metadataCache = metadataCache.getAdvancedCache();
this.chunkSize = ModularArithmetic.CANNOT_ASSUME_DENOM_IS_POWER_OF_TWO ? chunkSize : org.infinispan.commons.util.Util.findNextHighestPowerOfTwo(chunkSize);
initChunkSizeFromMetadata();
}
GridFile(String parent, String child, Cache<String, Metadata> metadataCache, int chunkSize, GridFilesystem fs) {
this(parent + File.separator + child, metadataCache, chunkSize, fs);
}
GridFile(File parent, String child, Cache<String, Metadata> metadataCache, int chunkSize, GridFilesystem fs) {
this(parent.getPath(), child, metadataCache, chunkSize, fs);
}
@Override
public String getName() {
return filename(getPath());
}
/**
* Returns path of this file. To avoid issues arising from file separator differences between different
* operative systems, the path returned always uses Unix-like path separator, '/' character. Any client
* code calling this method should bear that if disecting the path.
*
* @return String containing path of file.
*/
@Override
public String getPath() {
return path;
}
@Override
public String getAbsolutePath() {
return convertToAbsolute(getPath());
}
@Override
public File getAbsoluteFile() {
return new GridFile(getAbsolutePath(), metadataCache, getChunkSize(), fs);
}
@Override
public String getCanonicalPath() {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public File getCanonicalFile() {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean isAbsolute() {
return getPath().startsWith(SEPARATOR);
}
@Override
public boolean renameTo(File dest) {
// implementing this based on the current storage structure is complex and very expensive.
// a redesign is nessesary, especially must be avoid storing paths as key
// maybe file name + reference to the parent in metadata and as key is used a uuid, so movements or renames
// are only affected on the current file. metadata should also contain list of uuids of the corresponding data chunks
throw new UnsupportedOperationException("Not implemented");
}
@Override
public void deleteOnExit() {
// there exists no pre-CacheShutdown event, so unable to remove the entry
throw new UnsupportedOperationException("Not implemented");
}
private String convertToAbsolute(String path) {
if (!path.startsWith(SEPARATOR))
return SEPARATOR + path;
else
return path;
}
private static String formatPath(String path) {
if (path == null)
return null;
// Regardless of platform, always use the same separator char, otherwise
// keys might not be found when transfering metadata between different OS
path = path.replace('\\', SEPARATOR_CHAR);
if (path.endsWith(SEPARATOR)) {
int index = path.lastIndexOf(SEPARATOR);
if (index != -1)
path = path.substring(0, index);
}
return path;
}
@Override
public long length() {
Metadata metadata = getMetadata();
if (metadata != null)
return metadata.length;
return 0;
}
private Metadata getMetadata() {
if (isRootDir()) {
return ROOT_DIR_METADATA;
}
return metadataCache.get(getAbsolutePath());
}
private boolean isRootDir() {
return "/".equals(getAbsolutePath());
}
void setLength(int newLength) {
Metadata metadata = getMetadata();
if (metadata == null)
throw new IllegalStateException("metadata for " + getAbsolutePath() + " not found.");
metadata.setLength(newLength);
metadata.setModificationTime(System.currentTimeMillis());
metadataCache.put(getAbsolutePath(), metadata);
}
/**
* Guaranteed to be a power of two
*/
public int getChunkSize() {
return chunkSize;
}
@Override
public boolean createNewFile() throws IOException {
if (exists())
return false;
if (!checkParentDirs(getAbsolutePath(), false))
throw new IOException("Cannot create file " + getAbsolutePath() + " (parent dir does not exist)");
metadataCache.put(getAbsolutePath(), new Metadata(0, System.currentTimeMillis(), chunkSize, Metadata.FILE));
return true;
}
@Override
public boolean delete() {
if (!exists())
return false;
if (isDirectory() && hasChildren())
return false;
fs.remove(getAbsolutePath()); // removes all the chunks belonging to the file
metadataCache.remove(getAbsolutePath()); // removes the metadata information
return true;
}
private boolean hasChildren() {
File[] files = listFiles();
return files != null && files.length > 0;
}
@Override
public boolean mkdir() {
return mkdir(false);
}
@Override
public boolean mkdirs() {
return mkdir(true);
}
private boolean mkdir(boolean alsoCreateParentDirs) {
try {
boolean parentsExist = checkParentDirs(getAbsolutePath(), alsoCreateParentDirs);
if (!parentsExist)
return false;
metadataCache.put(getAbsolutePath(),new Metadata(0, System.currentTimeMillis(), chunkSize, Metadata.DIR));
return true;
}
catch (IOException e) {
return false;
}
}
@Override
public boolean exists() {
return getMetadata() != null;
}
@Override
public String getParent() {
return formatPath(super.getParent());
}
@Override
public File getParentFile() {
String parentPath = getParent();
if (parentPath == null)
return null;
return new GridFile(parentPath, metadataCache, chunkSize, fs);
}
@Override
public long lastModified() {
Metadata metadata = getMetadata();
return metadata == null ? 0 : metadata.getModificationTime();
}
@Override
public boolean setLastModified(long time) {
if (time < 0){
throw new IllegalArgumentException("Negative time");
}
Metadata metadata = getMetadata();
if(metadata == null){
return false;
}
metadata.setModificationTime(time);
metadataCache.put(getAbsolutePath(), metadata);
return true;
}
@Override
public String[] list() {
return list(null);
}
@Override
public String[] list(FilenameFilter filter) {
return _list(filter);
}
@Override
public File[] listFiles() {
return listFiles((FilenameFilter) null);
}
@Override
public File[] listFiles(FilenameFilter filter) {
return _listFiles(filter);
}
@Override
public File[] listFiles(FileFilter filter) {
return _listFiles(filter);
}
@Override
public boolean isDirectory() {
Metadata metadata = getMetadata();
return metadata != null && metadata.isDirectory();
}
@Override
public boolean isFile() {
Metadata metadata = getMetadata();
return metadata != null && metadata.isFile();
}
protected void initChunkSizeFromMetadata() {
Metadata metadata = getMetadata();
if (metadata != null)
this.chunkSize = metadata.getChunkSize();
}
protected File[] _listFiles(Object filter) {
String[] filenames = _list(filter);
return convertFilenamesToFiles(filenames);
}
private File[] convertFilenamesToFiles(String[] files) {
if (files == null)
return null;
File[] retval = new File[files.length];
for (int i = 0; i < files.length; i++)
retval[i] = new GridFile(this, files[i], metadataCache, chunkSize, fs);
return retval;
}
protected String[] _list(Object filter) {
if (!isDirectory())
return null;
Set<String> paths = metadataCache.keySet();
Collection<String> list = new LinkedList<String>();
for (String path : paths) {
if (isChildOf(getAbsolutePath(), path)) {
if (filter instanceof FilenameFilter && !((FilenameFilter) filter).accept(this, filename(path)))
continue;
else if (filter instanceof FileFilter && !((FileFilter) filter).accept(new File(path)))
continue;
list.add(filename(path));
}
}
return list.toArray(new String[0]);
}
/**
* Verifies whether child is a child (dir or file) of parent
*
* @param parent
* @param child
* @return True if child is a child, false otherwise
*/
protected static boolean isChildOf(String parent, String child) {
if (parent == null || child == null)
return false;
if (!child.startsWith((parent.endsWith(SEPARATOR) ? parent : parent + SEPARATOR)))
return false;
if (child.length() <= parent.length())
return false;
int from = parent.equals(SEPARATOR) ? parent.length() : parent.length() + 1;
// if(from-1 > child.length())
// return false;
String[] comps = Util.components(child.substring(from), SEPARATOR);
return comps != null && comps.length <= 1;
}
protected static String filename(String fullPath) {
String[] comps = Util.components(fullPath, SEPARATOR);
return comps != null ? comps[comps.length - 1] : "";
}
/**
* Checks whether the parent directories are present (and are directories). If createIfAbsent is true,
* creates missing dirs
*
* @param path
* @param createIfAbsent
* @return
*/
protected boolean checkParentDirs(String path, boolean createIfAbsent) throws IOException {
String[] components = Util.components(path, SEPARATOR);
if (components == null)
return false;
if (components.length == 1) // no parent directories to create, e.g. "data.txt"
return true;
StringBuilder sb = new StringBuilder();
boolean first = true;
for (int i = 0; i < components.length - 1; i++) {
String tmp = components[i];
if (!tmp.equals(SEPARATOR)) {
if (first)
first = false;
else
sb.append(SEPARATOR);
}
sb.append(tmp);
String comp = sb.toString();
if (comp.equals(SEPARATOR))
continue;
Metadata val = exists(comp);
if (val != null) {
if (val.isFile())
throw new IOException(format("cannot create %s as component %s is a file", path, comp));
} else if (createIfAbsent) {
metadataCache.put(comp, new Metadata(0, System.currentTimeMillis(), chunkSize, Metadata.DIR));
} else {
// Couldn't find a component and we're not allowed to create components!
return false;
}
}
// check that we have found all the components we need.
return true;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof GridFile) {
return compareTo((GridFile)obj) == 0;
}
return false;
}
@Override
public boolean canRead() {
return isFile();
}
@Override
public boolean canWrite() {
return isFile();
}
@Override
public boolean isHidden() {
return false;
}
@Override
public boolean canExecute() {
return false;
}
@Override
public int compareTo(File file) {
return getAbsolutePath().compareTo(file.getAbsolutePath());
}
@Override
public int hashCode() {
return getAbsolutePath().hashCode();
}
@Override
public String toString() {
return "GridFile{" +
"path='" + path + '\'' +
'}';
}
@Override
public URL toURL() {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public URI toURI() {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean setReadOnly() {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean setWritable(boolean writable, boolean ownerOnly) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean setWritable(boolean writable) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean setReadable(boolean readable, boolean ownerOnly) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean setReadable(boolean readable) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean setExecutable(boolean executable, boolean ownerOnly) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public boolean setExecutable(boolean executable) {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public long getTotalSpace() {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public long getFreeSpace() {
throw new UnsupportedOperationException("Not implemented");
}
@Override
public long getUsableSpace() {
throw new UnsupportedOperationException("Not implemented");
}
private Metadata exists(String key) {
return metadataCache.get(key);
}
public static class Metadata implements Externalizable {
public static final byte FILE = 1;
public static final byte DIR = 1 << 1;
private int length = 0;
private long modificationTime = 0;
private int chunkSize;
private byte flags;
public Metadata() {
chunkSize = 1;
flags = 0;
}
public Metadata(int length, long modificationTime, int chunkSize, byte flags) {
this.length = length;
this.modificationTime = modificationTime;
this.chunkSize = ModularArithmetic.CANNOT_ASSUME_DENOM_IS_POWER_OF_TWO ? chunkSize : org.infinispan.commons.util.Util.findNextHighestPowerOfTwo(chunkSize);
this.flags = flags;
}
public int getLength() {
return length;
}
public void setLength(int length) {
this.length = length;
}
public long getModificationTime() {
return modificationTime;
}
public void setModificationTime(long modificationTime) {
this.modificationTime = modificationTime;
}
public int getChunkSize() {
return chunkSize;
}
public boolean isFile() {
return Util.isFlagSet(flags, FILE);
}
public boolean isDirectory() {
return Util.isFlagSet(flags, DIR);
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(getType());
if (isFile())
sb.append(", len=").append(Util.printBytes(length)).append(", chunkSize=").append(chunkSize);
sb.append(", modTime=").append(new Date(modificationTime));
return sb.toString();
}
private String getType() {
if (isFile())
return "file";
if (isDirectory())
return "dir";
return "n/a";
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
out.writeInt(length);
out.writeLong(modificationTime);
out.writeInt(chunkSize);
out.writeByte(flags);
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
length = in.readInt();
modificationTime = in.readLong();
chunkSize = in.readInt();
flags = in.readByte();
}
}
}
| 17,277
| 27.3711
| 164
|
java
|
null |
infinispan-main/gridfs/src/main/java/org/infinispan/gridfs/GridFilesystem.java
|
package org.infinispan.gridfs;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.infinispan.Cache;
import org.infinispan.commons.util.Util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Entry point for GridFile and GridInputStream / GridOutputStream
*
* @author Bela Ban
* @author Marko Luksa
*/
public class GridFilesystem {
private static final Log log = LogFactory.getLog(GridFilesystem.class);
protected final Cache<String, byte[]> data;
protected final Cache<String, GridFile.Metadata> metadata;
protected final int defaultChunkSize;
/**
* Creates an instance. The data and metadata caches should already have been setup and started
*
* @param data the cache where the actual file contents are stored
* @param metadata the cache where file meta-data is stored
* @param defaultChunkSize the default size of the file chunks
*/
public GridFilesystem(Cache<String, byte[]> data, Cache<String, GridFile.Metadata> metadata, int defaultChunkSize) {
if(metadata.getCacheConfiguration().clustering().cacheMode().isClustered() &&
!metadata.getCacheConfiguration().clustering().cacheMode().isSynchronous()){
log.warnGridFSMetadataCacheRequiresSync();
}
this.data = data;
this.metadata = metadata;
this.defaultChunkSize = ModularArithmetic.CANNOT_ASSUME_DENOM_IS_POWER_OF_TWO ? defaultChunkSize : Util.findNextHighestPowerOfTwo(defaultChunkSize);
}
public GridFilesystem(Cache<String, byte[]> data, Cache<String, GridFile.Metadata> metadata) {
this(data, metadata, ModularArithmetic.CANNOT_ASSUME_DENOM_IS_POWER_OF_TWO ? 8000 : 8192);
}
/**
* Returns the file denoted by pathname.
* @param pathname the full path of the requested file
* @return the File stored at pathname
*/
public File getFile(String pathname) {
return getFile(pathname, defaultChunkSize);
}
/**
* Returns the file denoted by pathname. If the file does not yet exist, it is initialized with the given chunkSize.
* However, if the file at pathname already exists, the chunkSize parameter is ignored and the file's actual
* chunkSize is used.
* @param pathname the full path of the requested file
* @param chunkSize the size of the file's chunks. This parameter is only used for non-existing files.
* @return the File stored at pathname
*/
public File getFile(String pathname, int chunkSize) {
return new GridFile(pathname, metadata, chunkSize, this);
}
public File getFile(String parent, String child) {
return getFile(parent, child, defaultChunkSize);
}
public File getFile(String parent, String child, int chunkSize) {
return new GridFile(parent, child, metadata, chunkSize, this);
}
public File getFile(File parent, String child) {
return getFile(parent, child, defaultChunkSize);
}
public File getFile(File parent, String child, int chunkSize) {
return new GridFile(parent, child, metadata, chunkSize, this);
}
/**
* Opens an OutputStream for writing to the file denoted by pathname. If a file at pathname already exists, writing
* to the returned OutputStream will overwrite the contents of the file.
* @param pathname the path to write to
* @return an OutputStream for writing to the file at pathname
* @throws IOException if an error occurs
*/
public OutputStream getOutput(String pathname) throws IOException {
return getOutput(pathname, false, defaultChunkSize);
}
/**
* Opens an OutputStream for writing to the file denoted by pathname. The OutputStream can either overwrite the
* existing file or append to it.
* @param pathname the path to write to
* @param append if true, the bytes written to the OutputStream will be appended to the end of the file. If false,
* the bytes will overwrite the original contents.
* @return an OutputStream for writing to the file at pathname
* @throws IOException if an error occurs
*/
public OutputStream getOutput(String pathname, boolean append) throws IOException {
return getOutput(pathname, append, defaultChunkSize);
}
/**
* Opens an OutputStream for writing to the file denoted by pathname.
* @param pathname the file to write to
* @param append if true, the bytes written to the OutputStream will be appended to the end of the file
* @param chunkSize the size of the file's chunks. This parameter is honored only when the file at pathname does
* not yet exist. If the file already exists, the file's own chunkSize has precedence.
* @return the OutputStream for writing to the file
* @throws IOException if the file is a directory, cannot be created or some other error occurs
*/
public OutputStream getOutput(String pathname, boolean append, int chunkSize) throws IOException {
GridFile file = (GridFile) getFile(pathname, chunkSize);
checkIsNotDirectory(file);
createIfNeeded(file);
return new GridOutputStream(file, append, data);
}
/**
* Opens an OutputStream for writing to the given file.
* @param file the file to write to
* @return an OutputStream for writing to the file
* @throws IOException if an error occurs
*/
public OutputStream getOutput(GridFile file) throws IOException {
checkIsNotDirectory(file);
createIfNeeded(file);
return new GridOutputStream(file, false, data);
}
private void checkIsNotDirectory(GridFile file) throws FileNotFoundException {
if (file.isDirectory()) {
throw new FileNotFoundException(file + " is a directory.");
}
}
private void createIfNeeded(GridFile file) throws IOException {
if (!file.exists() && !file.createNewFile())
throw new IOException("creation of " + file + " failed");
}
/**
* Opens an InputStream for reading from the file denoted by pathname.
* @param pathname the full path of the file to read from
* @return an InputStream for reading from the file
* @throws FileNotFoundException if the file does not exist or is a directory
*/
public InputStream getInput(String pathname) throws FileNotFoundException {
GridFile file = (GridFile) getFile(pathname);
checkFileIsReadable(file);
return new GridInputStream(file, data);
}
private void checkFileIsReadable(GridFile file) throws FileNotFoundException {
checkFileExists(file);
checkIsNotDirectory(file);
}
private void checkFileExists(GridFile file) throws FileNotFoundException {
if (!file.exists())
throw new FileNotFoundException(file.getPath());
}
/**
* Opens an InputStream for reading from the given file.
* @param file the file to open for reading
* @return an InputStream for reading from the file
* @throws FileNotFoundException if the file does not exist or is a directory
*/
public InputStream getInput(File file) throws FileNotFoundException {
return file != null ? getInput(file.getPath()) : null;
}
/**
* Opens a ReadableGridFileChannel for reading from the file denoted by the given file path. One of the benefits
* of using a channel over an InputStream is the possibility to randomly seek to any position in the file (see
* #ReadableGridChannel.position()).
* @param pathname path of the file to open for reading
* @return a ReadableGridFileChannel for reading from the file
* @throws FileNotFoundException if the file does not exist or is a directory
*/
public ReadableGridFileChannel getReadableChannel(String pathname) throws FileNotFoundException {
GridFile file = (GridFile) getFile(pathname);
checkFileIsReadable(file);
return new ReadableGridFileChannel(file, data);
}
/**
* Opens a WritableGridFileChannel for writing to the file denoted by pathname. If a file at pathname already exists,
* writing to the returned channel will overwrite the contents of the file.
* @param pathname the path to write to
* @return a WritableGridFileChannel for writing to the file at pathname
* @throws IOException if an error occurs
*/
public WritableGridFileChannel getWritableChannel(String pathname) throws IOException {
return getWritableChannel(pathname, false);
}
/**
* Opens a WritableGridFileChannel for writing to the file denoted by pathname. The channel can either overwrite the
* existing file or append to it.
* @param pathname the path to write to
* @param append if true, the bytes written to the WritableGridFileChannel will be appended to the end of the file.
* If false, the bytes will overwrite the original contents.
* @return a WritableGridFileChannel for writing to the file at pathname
* @throws IOException if an error occurs
*/
public WritableGridFileChannel getWritableChannel(String pathname, boolean append) throws IOException {
return getWritableChannel(pathname, append, defaultChunkSize);
}
/**
* Opens a WritableGridFileChannel for writing to the file denoted by pathname.
* @param pathname the file to write to
* @param append if true, the bytes written to the channel will be appended to the end of the file
* @param chunkSize the size of the file's chunks. This parameter is honored only when the file at pathname does
* not yet exist. If the file already exists, the file's own chunkSize has precedence.
* @return a WritableGridFileChannel for writing to the file
* @throws IOException if the file is a directory, cannot be created or some other error occurs
*/
public WritableGridFileChannel getWritableChannel(String pathname, boolean append, int chunkSize) throws IOException {
GridFile file = (GridFile) getFile(pathname, chunkSize);
checkIsNotDirectory(file);
createIfNeeded(file);
return new WritableGridFileChannel(file, data, append);
}
/**
* Removes the file denoted by absolutePath.
* @param absolutePath the absolute path of the file to remove
*/
void remove(String absolutePath) {
if (absolutePath == null)
return;
GridFile.Metadata md = metadata.get(absolutePath);
if (md == null)
return;
int numChunks = md.getLength() / md.getChunkSize() + 1;
for (int i = 0; i < numChunks; i++)
data.remove(FileChunkMapper.getChunkKey(absolutePath, i));
}
}
| 10,629
| 41.350598
| 154
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/AvailableReliabilitySplitBrainTest.java
|
package org.infinispan.lock;
import static java.util.Arrays.asList;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.configuration.Reliability;
import org.infinispan.manager.EmbeddedCacheManager;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.AvailableReliabilitySplitBrainTest")
public class AvailableReliabilitySplitBrainTest extends BaseClusteredLockSplitBrainTest {
public AvailableReliabilitySplitBrainTest() {
super();
reliability = Reliability.AVAILABLE;
numOwner = 6;
cacheMode = CacheMode.DIST_SYNC;
}
@Override
protected String getLockName() {
return "AvailableReliabilitySplitBrainTest";
}
@Test
public void testLockCreationWhenPartitionHappening() {
ClusteredLockManager clusteredLockManager = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(0));
await(clusteredLockManager.remove(getLockName()));
splitCluster(new int[]{0, 1, 2}, new int[]{3, 4, 5});
for (EmbeddedCacheManager cm : getCacheManagers()) {
ClusteredLockManager clm = EmbeddedClusteredLockManagerFactory.from(cm);
clm.defineLock(getLockName());
}
}
@Test
public void testLockUseAfterPartitionWithoutMajority() {
ClusteredLockManager clm0 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(0));
ClusteredLockManager clm1 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(1));
ClusteredLockManager clm2 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(2));
ClusteredLockManager clm3 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(3));
ClusteredLockManager clm4 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(4));
ClusteredLockManager clm5 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(5));
clm0.defineLock(getLockName());
assertTrue(clm0.isDefined(getLockName()));
splitCluster(new int[]{0, 1, 2}, new int[]{3, 4, 5});
partition(0).assertDegradedMode();
partition(1).assertDegradedMode();
ClusteredLock lock0 = clm0.get(getLockName());
ClusteredLock lock1 = clm1.get(getLockName());
ClusteredLock lock2 = clm2.get(getLockName());
ClusteredLock lock3 = clm3.get(getLockName());
ClusteredLock lock4 = clm4.get(getLockName());
ClusteredLock lock5 = clm5.get(getLockName());
asList(lock0, lock1, lock2, lock3, lock4, lock5).forEach(lock -> {
assertNotNull(lock);
Boolean tryLock = await(lock.tryLock());
assertTrue(tryLock);
await(lock.unlock());
});
}
}
| 2,965
| 38.026316
| 118
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ConfigurationTest.java
|
package org.infinispan.lock;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.lock.configuration.ClusteredLockManagerConfiguration;
import org.infinispan.lock.configuration.ClusteredLockManagerConfigurationBuilder;
import org.infinispan.lock.configuration.Reliability;
import org.infinispan.lock.exception.ClusteredLockException;
import org.infinispan.lock.impl.ClusteredLockModuleLifecycle;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.test.AbstractCacheTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.TransactionMode;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
/**
* Configuration test
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
@Test(groups = "unit", testName = "clusteredLock.ConfigurationTest")
public class ConfigurationTest extends AbstractCacheTest {
public void testDefaultConfiguration() {
TestingUtil.withCacheManager(() -> buildCacheManager(GlobalConfigurationBuilder.defaultClusteredBuilder()),
cacheManager -> {
ClusteredLockManagerConfiguration configuration = ClusteredLockManagerConfigurationBuilder.defaultConfiguration();
Configuration cacheConfiguration = getClusteredLockCacheConfiguration(cacheManager);
assertLockAndCacheConfiguration(configuration, cacheConfiguration);
});
}
public void testReliabilityAvailable() {
final GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();
final ClusteredLockManagerConfiguration config = builder.addModule(ClusteredLockManagerConfigurationBuilder.class)
.reliability(Reliability.AVAILABLE).create();
TestingUtil.withCacheManager(() -> buildCacheManager(builder), cacheManager -> {
Configuration cacheConfiguration = getClusteredLockCacheConfiguration(cacheManager);
assertLockAndCacheConfiguration(config, cacheConfiguration);
});
}
public void testReliabilityConsistent() {
final GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();
final ClusteredLockManagerConfiguration config = builder.addModule(ClusteredLockManagerConfigurationBuilder.class)
.reliability(Reliability.CONSISTENT).create();
TestingUtil.withCacheManager(() -> buildCacheManager(builder), cacheManager -> {
Configuration cacheConfiguration = getClusteredLockCacheConfiguration(cacheManager);
assertLockAndCacheConfiguration(config, cacheConfiguration);
});
}
public void testNumOwner() {
final GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();
final ClusteredLockManagerConfiguration config = builder.addModule(ClusteredLockManagerConfigurationBuilder.class)
.numOwner(5).create();
TestingUtil.withCacheManager(() -> buildCacheManager(builder), cacheManager -> {
Configuration cacheConfiguration = getClusteredLockCacheConfiguration(cacheManager);
assertLockAndCacheConfiguration(config, cacheConfiguration);
});
}
public void testMinusOneNumberOfOwner() {
final GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();
final ClusteredLockManagerConfiguration config = builder.addModule(ClusteredLockManagerConfigurationBuilder.class)
.numOwner(-1).create();
TestingUtil.withCacheManager(() -> buildCacheManager(builder), cacheManager -> {
Configuration cacheConfiguration = getClusteredLockCacheConfiguration(cacheManager);
assertLockAndCacheConfiguration(config, cacheConfiguration);
});
}
public void testInvalidReliability() {
GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();
ClusteredLockManagerConfigurationBuilder clBuilder = builder.addModule(ClusteredLockManagerConfigurationBuilder.class);
clBuilder.reliability(null);
assertClusteredLockConfigurationException(builder);
}
public void testInvalidNumOwner() {
GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();
ClusteredLockManagerConfigurationBuilder clBuilder = builder.addModule(ClusteredLockManagerConfigurationBuilder.class);
clBuilder.numOwner(0);
assertClusteredLockConfigurationException(builder);
}
private static Configuration getClusteredLockCacheConfiguration(EmbeddedCacheManager cacheManager) {
return cacheManager.getCache(ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME).getCacheConfiguration();
}
private static EmbeddedCacheManager buildCacheManager(GlobalConfigurationBuilder builder) {
return new DefaultCacheManager(builder.build());
}
private static void assertLockAndCacheConfiguration(ClusteredLockManagerConfiguration config,
Configuration cacheConfig) {
assertEquals(config.numOwners() < 0 ? CacheMode.REPL_SYNC : CacheMode.DIST_SYNC, cacheConfig.clustering().cacheMode());
if (config.numOwners() > 0) {
assertEquals(config.numOwners(), cacheConfig.clustering().hash().numOwners());
}
assertEquals(config.reliability() == Reliability.CONSISTENT ? PartitionHandling.DENY_READ_WRITES : PartitionHandling.ALLOW_READ_WRITES,
cacheConfig.clustering().partitionHandling().whenSplit());
assertFalse(cacheConfig.clustering().l1().enabled());
assertEquals(TransactionMode.NON_TRANSACTIONAL, cacheConfig.transaction().transactionMode());
}
private void assertClusteredLockConfigurationException(GlobalConfigurationBuilder builder) {
try {
builder.build();
AssertJUnit.fail("CacheConfigurationExpected");
} catch (ClusteredLockException | CacheConfigurationException expected) {
log.trace("Expected", expected);
}
}
}
| 6,386
| 48.898438
| 141
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ConsistentReliabilitySplitBrainTest.java
|
package org.infinispan.lock;
import static java.util.Arrays.asList;
import static org.infinispan.commons.test.Exceptions.expectCompletionException;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.exception.ClusteredLockException;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.AvailabilityException;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ConsistentReliabilitySplitBrainTest")
public class ConsistentReliabilitySplitBrainTest extends BaseClusteredLockSplitBrainTest {
@Override
protected String getLockName() {
return "ConsistentReliabilitySplitBrainTest";
}
@Test
public void testLockCreationWhenPartitionHappening() {
ClusteredLockManager clusteredLockManager = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(0));
await(clusteredLockManager.remove(getLockName()));
splitCluster(new int[]{0, 1, 2}, new int[]{3, 4, 5});
for (EmbeddedCacheManager cm : getCacheManagers()) {
ClusteredLockManager clm = EmbeddedClusteredLockManagerFactory.from(cm);
eventually(() -> availabilityExceptionRaised(clm));
}
}
@Test
public void testLockUseAfterPartitionWithoutMajority() {
ClusteredLockManager clm0 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(0));
ClusteredLockManager clm1 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(1));
ClusteredLockManager clm2 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(2));
ClusteredLockManager clm3 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(3));
ClusteredLockManager clm4 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(4));
ClusteredLockManager clm5 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(5));
clm0.defineLock(getLockName());
assertTrue(clm0.isDefined(getLockName()));
ClusteredLock lock0 = clm0.get(getLockName());
ClusteredLock lock1 = clm1.get(getLockName());
ClusteredLock lock2 = clm2.get(getLockName());
ClusteredLock lock3 = clm3.get(getLockName());
ClusteredLock lock4 = clm4.get(getLockName());
ClusteredLock lock5 = clm5.get(getLockName());
splitCluster(new int[]{0, 1, 2}, new int[]{3, 4, 5});
// Wait for degraded topologies to work around ISPN-9008
partition(0).assertDegradedMode();
partition(1).assertDegradedMode();
asList(lock0, lock1, lock2, lock3, lock4, lock5).forEach(lock -> {
assertNotNull(lock);
expectCompletionException(ClusteredLockException.class, AvailabilityException.class, lock.tryLock());
});
}
@Test
public void testLockUseAfterPartitionWithMajority() {
ClusteredLockManager clm0 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(0));
ClusteredLockManager clm1 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(1));
ClusteredLockManager clm2 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(2));
ClusteredLockManager clm3 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(3));
ClusteredLockManager clm4 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(4));
ClusteredLockManager clm5 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(5));
assertTrue(clm0.defineLock(getLockName()));
assertFalse(clm1.defineLock(getLockName()));
assertFalse(clm2.defineLock(getLockName()));
assertFalse(clm3.defineLock(getLockName()));
assertFalse(clm4.defineLock(getLockName()));
assertFalse(clm5.defineLock(getLockName()));
ClusteredLock lock0 = clm0.get(getLockName());
ClusteredLock lock1 = clm1.get(getLockName());
ClusteredLock lock2 = clm2.get(getLockName());
ClusteredLock lock3 = clm3.get(getLockName());
ClusteredLock lock4 = clm4.get(getLockName());
ClusteredLock lock5 = clm5.get(getLockName());
splitCluster(new int[]{0, 1, 2, 3}, new int[]{4, 5});
asList(lock0, lock1, lock2, lock3).forEach(this::assertTryLock);
assertFailureFromMinorityPartition(lock4);
assertFailureFromMinorityPartition(lock5);
}
@Test
public void testAutoReleaseIfLockIsAcquiredFromAMinorityPartition() {
ClusteredLockManager clm0 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(0));
ClusteredLockManager clm1 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(1));
ClusteredLockManager clm2 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(2));
assertTrue(clm0.defineLock(getLockName()));
ClusteredLock lock0 = clm0.get(getLockName());
ClusteredLock lock1 = clm1.get(getLockName());
ClusteredLock lock2 = clm2.get(getLockName());
await(lock0.tryLock());
assertTrue(await(lock0.isLockedByMe()));
splitCluster(new int[]{0}, new int[]{1, 2, 3, 4, 5});
CompletableFuture<Boolean> tryLock1Result = lock1.tryLock(1, TimeUnit.SECONDS);
CompletableFuture<Boolean> tryLock2Result = lock2.tryLock(1, TimeUnit.SECONDS);
assertTrue("Just one of the locks has to work", await(tryLock1Result) ^ await(tryLock2Result));
assertFailureFromMinorityPartition(lock0);
}
@Test
public void testTryLocksBeforeSplitBrain() {
ClusteredLockManager clm0 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(0));
ClusteredLockManager clm1 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(1));
ClusteredLockManager clm2 = EmbeddedClusteredLockManagerFactory.from(getCacheManagers().get(2));
assertTrue(clm0.defineLock(getLockName()));
ClusteredLock lock0 = clm0.get(getLockName());
ClusteredLock lock1 = clm1.get(getLockName());
ClusteredLock lock2 = clm2.get(getLockName());
CompletableFuture<Boolean> tryLock1 = lock1.tryLock();
CompletableFuture<Boolean> tryLock2 = lock2.tryLock();
splitCluster(new int[]{0}, new int[]{1, 2, 3, 4, 5});
assertTrue("Just one of the locks has to work", await(tryLock1) ^ await(tryLock2));
assertFailureFromMinorityPartition(lock0);
}
private void assertTryLock(ClusteredLock lock) {
Boolean locked = await(lock.tryLock(29, TimeUnit.SECONDS));
if (locked) {
await(lock.unlock());
}
assertTrue("Lock acquisition should be true " + lock, locked);
}
private void assertFailureFromMinorityPartition(ClusteredLock lock) {
expectCompletionException(ClusteredLockException.class, AvailabilityException.class, lock.tryLock());
}
}
| 7,083
| 41.674699
| 118
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ClusteredLockAvailableReliabilityTest.java
|
package org.infinispan.lock;
import org.infinispan.lock.configuration.Reliability;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockAvailableReliabilityTest")
public class ClusteredLockAvailableReliabilityTest extends ClusteredLockTest {
public ClusteredLockAvailableReliabilityTest() {
super();
reliability = Reliability.AVAILABLE;
numOwner = 3;
}
@Override
protected int clusterSize() {
return 6;
}
}
| 501
| 24.1
| 94
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ClusteredLockWithoutClusterTest.java
|
package org.infinispan.lock;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.lock.exception.ClusteredLockException;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.SingleCacheManagerTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockWithoutClusterTest")
public class ClusteredLockWithoutClusterTest extends SingleCacheManagerTest {
@Override
protected EmbeddedCacheManager createCacheManager() {
ConfigurationBuilder c = getDefaultStandaloneCacheConfig(false);
EmbeddedCacheManager cm = TestCacheManagerFactory.createCacheManager(false);
cm.defineConfiguration("test", c.build());
cache = cm.getCache("test");
return cm;
}
public void testNeedsCluster() {
Exceptions.expectException(ClusteredLockException.class, () -> EmbeddedClusteredLockManagerFactory.from(cacheManager));
}
}
| 1,066
| 38.518519
| 125
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ClusteredLockWith2NodesTest.java
|
package org.infinispan.lock;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.infinispan.test.TestingUtil.killCacheManagers;
import static org.testng.AssertJUnit.fail;
import java.util.concurrent.TimeUnit;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockConfiguration;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.testng.annotations.Test;
/**
* Vert.x-Infinispan cluster manager has some tests where there are just 2 nodes. To avoid failures, we need to add some
* tests where the cluster is formed by 2 nodes where we will one and the other can acquire and release the lock
*/
@CleanupAfterMethod
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockWith2NodesTest")
public class ClusteredLockWith2NodesTest extends BaseClusteredLockTest {
private static final String LOCK_NAME = "ClusteredLockWith2NodesTest";
@Override
protected int clusterSize() {
return 2;
}
public void testTryLockAndKillCoordinator() {
doTest(0, 1);
}
@Test
public void testTryLockAndKillNode() {
doTest(1, 0);
}
private void doTest(int killedNode, int survivingNode) {
ClusteredLockManager m1 = clusteredLockManager(0);
m1.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
ClusteredLock firstLockOwner = clusteredLockManager(killedNode).get(LOCK_NAME);
ClusteredLock secondLockOwner = clusteredLockManager(survivingNode).get(LOCK_NAME);
Boolean acquired = await(firstLockOwner.tryLock());
if (!acquired) {
fail("Manager 0 could not acquire the lock");
}
try {
killCacheManagers(manager(killedNode));
await(secondLockOwner.tryLock(1, TimeUnit.SECONDS));
} finally {
ClusteredLockManager clusteredLockManager = clusteredLockManager(survivingNode);
await(clusteredLockManager.remove(LOCK_NAME));
}
}
}
| 2,016
| 32.065574
| 120
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ClusteredLockWithZeroCapacityNodesTest.java
|
package org.infinispan.lock;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockWithZeroCapacityNodesTest")
public class ClusteredLockWithZeroCapacityNodesTest extends ClusteredLockTest {
@Override
public Object[] factory() {
return new Object[]{
// REPL
new ClusteredLockWithZeroCapacityNodesTest().numOwner(-1),
// DIST
new ClusteredLockWithZeroCapacityNodesTest().numOwner(1),
new ClusteredLockWithZeroCapacityNodesTest().numOwner(9),
};
}
protected int clusterSize() {
return 3;
}
@Override
protected GlobalConfigurationBuilder configure(int nodeId) {
return super.configure(nodeId).zeroCapacityNode(nodeId % 2 == 1);
}
}
| 869
| 29
| 95
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ConfigurationSerializerTest.java
|
package org.infinispan.lock;
import static org.infinispan.test.TestingUtil.withCacheManager;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createClusteredCacheManager;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ParserRegistry;
import org.infinispan.configuration.serializer.AbstractConfigurationSerializerTest;
import org.infinispan.lock.configuration.ClusteredLockConfiguration;
import org.infinispan.lock.configuration.ClusteredLockManagerConfiguration;
import org.infinispan.lock.configuration.Reliability;
import org.infinispan.lock.impl.ClusteredLockModuleLifecycle;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.testng.annotations.Test;
/**
* Tests the configuration parser and serializer.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
@Test(groups = "functional", testName = "counter.ConfigurationSerializerTest")
@CleanupAfterMethod
public class ConfigurationSerializerTest extends AbstractConfigurationSerializerTest {
public void testParserAvailableReliability() throws IOException {
ConfigurationBuilderHolder holder = new ParserRegistry().parseFile("configs/all/clustered-locks-dist.xml");
withCacheManager(() -> createClusteredCacheManager(holder), cacheManager -> {
cacheManager.getCache(ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME);
GlobalConfiguration globalConfiguration = cacheManager.getGlobalComponentRegistry().getGlobalConfiguration();
ClusteredLockManagerConfiguration clmConfig = globalConfiguration
.module(ClusteredLockManagerConfiguration.class);
assertNotNull(clmConfig);
assertEquals(3, clmConfig.numOwners());
assertEquals(Reliability.AVAILABLE, clmConfig.reliability());
assertTrue(clmConfig.locks().containsKey("lock1"));
assertTrue(clmConfig.locks().containsKey("lock2"));
});
}
public void testParserConsistentReliability() throws IOException {
ConfigurationBuilderHolder holder = new ParserRegistry().parseFile("configs/all/clustered-locks-repl.xml");
withCacheManager(() -> createClusteredCacheManager(holder), cacheManager -> {
cacheManager.getCache(ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME);
GlobalConfiguration globalConfiguration = cacheManager.getGlobalComponentRegistry().getGlobalConfiguration();
ClusteredLockManagerConfiguration clmConfig = globalConfiguration
.module(ClusteredLockManagerConfiguration.class);
assertNotNull(clmConfig);
assertEquals(-1, clmConfig.numOwners());
assertEquals(Reliability.CONSISTENT, clmConfig.reliability());
Map<String, ClusteredLockConfiguration> clusteredLockConfig = new HashMap<>();
assertTrue(clmConfig.locks().containsKey("consi-lock1"));
assertTrue(clmConfig.locks().containsKey("consi-lock2"));
});
}
}
| 3,268
| 49.292308
| 118
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/BaseClusteredLockTest.java
|
package org.infinispan.lock;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.configuration.ClusteredLockManagerConfigurationBuilder;
import org.infinispan.lock.configuration.Reliability;
import org.infinispan.lock.impl.ClusteredLockModuleLifecycle;
import org.infinispan.test.MultipleCacheManagersTest;
public abstract class BaseClusteredLockTest extends MultipleCacheManagersTest {
protected Reliability reliability = Reliability.CONSISTENT;
protected int numOwner = -1;
protected int clusterSize() {
return 3;
}
protected BaseClusteredLockTest numOwner(int numOwner) {
this.numOwner = numOwner;
return this;
}
protected GlobalConfigurationBuilder configure(int nodeId) {
GlobalConfigurationBuilder globalConfigurationBuilder = GlobalConfigurationBuilder.defaultClusteredBuilder();
globalConfigurationBuilder.metrics().gauges(false);
globalConfigurationBuilder
.addModule(ClusteredLockManagerConfigurationBuilder.class)
.numOwner(numOwner)
.reliability(reliability);
return globalConfigurationBuilder;
}
@Override
protected final void createCacheManagers() throws Throwable {
final int size = clusterSize();
for (int i = 0; i < size; ++i) {
addClusterEnabledCacheManager(configure(i), null);
}
waitForClusteredLockCaches();
}
protected final void waitForClusteredLockCaches() {
waitForClusterToForm(ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME);
}
protected final ClusteredLockManager clusteredLockManager(int index) {
return EmbeddedClusteredLockManagerFactory.from(manager(index));
}
@Override
protected String[] parameterNames() {
return concat(super.parameterNames(), "lockOwners");
}
@Override
protected Object[] parameterValues() {
// Omit the numOwner parameter if it's 0
Integer numOwnerParameter = numOwner != 0 ? numOwner : null;
return concat(super.parameterValues(), numOwnerParameter);
}
}
| 2,140
| 31.938462
| 115
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/BaseClusteredLockSplitBrainTest.java
|
package org.infinispan.lock;
import static org.infinispan.lock.impl.ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.configuration.ClusteredLockManagerConfigurationBuilder;
import org.infinispan.lock.configuration.Reliability;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.partitionhandling.AvailabilityException;
import org.infinispan.partitionhandling.BasePartitionHandlingTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
public abstract class BaseClusteredLockSplitBrainTest extends BasePartitionHandlingTest {
protected Reliability reliability = Reliability.CONSISTENT;
protected int numOwner = -1;
public BaseClusteredLockSplitBrainTest() {
this.numMembersInCluster = 6;
this.cacheMode = null;
}
@Override
protected void createCacheManagers() {
ConfigurationBuilder dcc = cacheConfiguration();
dcc.clustering().cacheMode(CacheMode.REPL_SYNC).partitionHandling().whenSplit(partitionHandling);
createClusteredCaches(numMembersInCluster, dcc, new TransportFlags().withFD(true).withMerge(true));
waitForClusterToForm(CLUSTERED_LOCK_CACHE_NAME);
}
@Override
protected EmbeddedCacheManager addClusterEnabledCacheManager(ConfigurationBuilder builder, TransportFlags flags) {
GlobalConfigurationBuilder gcb = GlobalConfigurationBuilder.defaultClusteredBuilder();
gcb.addModule(ClusteredLockManagerConfigurationBuilder.class)
.numOwner(numOwner)
.reliability(reliability);
EmbeddedCacheManager cm = TestCacheManagerFactory.createClusteredCacheManager(false, gcb, builder, flags);
amendCacheManagerBeforeStart(cm);
cacheManagers.add(cm);
cm.start();
return cm;
}
protected boolean availabilityExceptionRaised(ClusteredLockManager clm) {
Exception ex = null;
try {
clm.defineLock(getLockName());
} catch (AvailabilityException a) {
ex = a;
}
return ex != null;
}
protected abstract String getLockName();
}
| 2,363
| 37.129032
| 117
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ClusteredLockTest.java
|
package org.infinispan.lock;
import static org.infinispan.commons.test.Exceptions.assertException;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Stream;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockConfiguration;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.exception.ClusteredLockException;
import org.infinispan.lock.logging.Log;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockTest")
public class ClusteredLockTest extends BaseClusteredLockTest {
protected static final String LOCK_NAME = "ClusteredLockTest";
public ClusteredLockTest() {
super();
}
@BeforeMethod(alwaysRun = true)
public void createLock() throws Throwable {
ClusteredLockManager m1 = clusteredLockManager(0);
m1.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
}
@AfterMethod(alwaysRun = true)
protected void destroyLock() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
if (clusteredLockManager.isDefined(LOCK_NAME)) {
// The test method must either unlock or remove the lock itself
assertFalse(await(clusteredLockManager.get(LOCK_NAME).isLocked()));
await(clusteredLockManager.remove(LOCK_NAME));
}
}
@Test
public void testLockAndUnlockVisibility() throws Throwable {
ClusteredLockManager cm0 = clusteredLockManager(0);
ClusteredLockManager cm1 = clusteredLockManager(1);
ClusteredLockManager cm2 = clusteredLockManager(2);
ClusteredLock lock0 = cm0.get(LOCK_NAME);
ClusteredLock lock1 = cm1.get(LOCK_NAME);
ClusteredLock lock2 = cm2.get(LOCK_NAME);
// Is not locked
assertFalse(await(lock0.isLocked()));
assertFalse(await(lock1.isLocked()));
assertFalse(await(lock2.isLocked()));
// lock0 from cm0 locks
await(lock0.lock());
// Is locked by everybody
assertTrue(await(lock0.isLocked()));
assertTrue(await(lock1.isLocked()));
assertTrue(await(lock2.isLocked()));
// lock1 from cm1 tries to unlock unsuccessfully
await(lock1.unlock());
assertTrue(await(lock0.isLocked()));
assertTrue(await(lock1.isLocked()));
assertTrue(await(lock2.isLocked()));
// lock2 from cm2 tries to unlock unsuccessfully
await(lock2.unlock());
assertTrue(await(lock0.isLocked()));
assertTrue(await(lock1.isLocked()));
assertTrue(await(lock2.isLocked()));
// lock0 from cm0 tries to unlock successfully
await(lock0.unlock());
assertFalse(await(lock0.isLocked()));
assertFalse(await(lock1.isLocked()));
assertFalse(await(lock2.isLocked()));
}
@Test
public void testLockOwnership() throws Throwable {
ClusteredLockManager cm0 = clusteredLockManager(0);
ClusteredLockManager cm1 = clusteredLockManager(1);
ClusteredLockManager cm2 = clusteredLockManager(2);
ClusteredLock lock0 = cm0.get(LOCK_NAME);
ClusteredLock lock1 = cm1.get(LOCK_NAME);
ClusteredLock lock2 = cm2.get(LOCK_NAME);
// nobody owns the lock
assertFalse(await(lock0.isLockedByMe()));
assertFalse(await(lock1.isLockedByMe()));
assertFalse(await(lock2.isLockedByMe()));
// lock1 from cm1 acquires the lock
await(lock1.lock());
try {
// lock1 from cm1 holds the lock
assertFalse(await(lock0.isLockedByMe()));
assertTrue(await(lock1.isLockedByMe()));
assertFalse(await(lock2.isLockedByMe()));
} finally {
await(lock1.unlock());
}
}
@Test
public void testLockWhenLockIsRemoved() throws Throwable {
ClusteredLockManager cm0 = clusteredLockManager(0);
ClusteredLockManager cm1 = clusteredLockManager(1);
ClusteredLockManager cm2 = clusteredLockManager(2);
ClusteredLock lock0 = cm0.get(LOCK_NAME);
ClusteredLock lock1 = cm1.get(LOCK_NAME);
ClusteredLock lock2 = cm2.get(LOCK_NAME);
// lock0 from cm0 acquires the lock
await(lock0.lock());
CompletableFuture<Void> lock1Request = lock1.lock();
CompletableFuture<Void> lock2Request = lock2.lock();
assertFalse(lock1Request.isDone());
assertFalse(lock2Request.isDone());
assertTrue(await(cm0.remove(LOCK_NAME)));
assertNull(await(lock1Request
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
assertTrue(e.getMessage().contains(Log.LOCK_DELETE_MSG));
return null;
})));
assertNull(await(lock2Request
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
assertTrue(e.getMessage().contains(Log.LOCK_DELETE_MSG));
return null;
})));
}
@Test
public void testTryLockWithTimeoutWithCountersInParallelOnSingleLock() throws Throwable {
AtomicInteger counter = new AtomicInteger();
ClusteredLock lock = clusteredLockManager(0).get(LOCK_NAME);
CompletableFuture<Void> lockRes0 = lock.tryLock(1000, TimeUnit.MILLISECONDS).thenAccept(r -> {
if (r) {
counter.incrementAndGet();
await(lock.unlock());
}
});
CompletableFuture<Void> lockRes1 = lock.tryLock(1000, TimeUnit.MILLISECONDS).thenAccept(r -> {
if (r) {
counter.incrementAndGet();
await(lock.unlock());
}
});
CompletableFuture<Void> lockRes2 = lock.tryLock(1000, TimeUnit.MILLISECONDS).thenAccept(r -> {
if (r) {
counter.incrementAndGet();
await(lock.unlock());
}
});
await(lockRes0);
await(lockRes1);
await(lockRes2);
assertEquals(3, counter.get());
}
@Test
public void testTryLockWithTimeoutWithCountersInParallelOnMultiLocks() throws Throwable {
AtomicInteger counter = new AtomicInteger();
ClusteredLock lock0 = clusteredLockManager(0).get(LOCK_NAME);
ClusteredLock lock1 = clusteredLockManager(1).get(LOCK_NAME);
ClusteredLock lock2 = clusteredLockManager(2).get(LOCK_NAME);
CompletableFuture<Void> lockRes0 = lock0.tryLock(1000, TimeUnit.MILLISECONDS).thenAccept(r -> {
if (r) {
counter.incrementAndGet();
await(lock0.unlock());
}
});
CompletableFuture<Void> lockRes1 = lock1.tryLock(1000, TimeUnit.MILLISECONDS).thenAccept(r -> {
if (r) {
counter.incrementAndGet();
await(lock1.unlock());
}
});
CompletableFuture<Void> lockRes2 = lock2.tryLock(1000, TimeUnit.MILLISECONDS).thenAccept(r -> {
if (r) {
counter.incrementAndGet();
await(lock2.unlock());
}
});
await(lockRes0);
await(lockRes1);
await(lockRes2);
assertEquals(3, counter.get());
}
@Test
public void testTryLockWithCountersInParallel() throws Throwable {
ClusteredLock lock0 = clusteredLockManager(0).get(LOCK_NAME);
ClusteredLock lock1 = clusteredLockManager(1).get(LOCK_NAME);
ClusteredLock lock2 = clusteredLockManager(2).get(LOCK_NAME);
long successTryLocks = Stream.of(lock0, lock1, lock2)
.map(this::callTryLock)
.map(ClusteredLockTest::awaitTryLock)
.filter(Boolean::booleanValue)
.count();
try {
assertEquals(1, successTryLocks);
} finally {
await(lock0.unlock());
await(lock1.unlock());
await(lock2.unlock());
}
}
private Future<Boolean> callTryLock(ClusteredLock lock) {
return fork(() -> await(lock.tryLock()));
}
private static Boolean awaitTryLock(Future<Boolean> result) {
try {
return result.get();
} catch (Exception e) {
throw new AssertionError(e);
}
}
}
| 8,461
| 32.713147
| 101
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/ClusteredLockKillingNodesTest.java
|
package org.infinispan.lock;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.infinispan.test.TestingUtil.killCacheManagers;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockConfiguration;
import org.infinispan.lock.api.ClusteredLockManager;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockKillingNodesTest")
public class ClusteredLockKillingNodesTest extends BaseClusteredLockTest {
protected static final String LOCK_NAME = "ClusteredLockKillingNodesTest";
@BeforeMethod(alwaysRun = true)
public void createLock() throws Throwable {
ClusteredLockManager m1 = clusteredLockManager(0);
m1.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
}
@AfterMethod(alwaysRun = true)
protected void destroyLock() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
await(clusteredLockManager.remove(LOCK_NAME));
}
@Test
public void testLockWithAcquisitionAndKill() throws Throwable {
ClusteredLock firstLockOwner = clusteredLockManager(1).get(LOCK_NAME);
ClusteredLock secondLockOwner = clusteredLockManager(2).get(LOCK_NAME);
StringBuilder value = new StringBuilder();
await(firstLockOwner.lock().thenRun(() -> {
killCacheManagers(manager(1));
await(secondLockOwner.lock().thenRun(() -> value.append("hello")));
}));
assertEquals(value.toString(), "hello");
}
}
| 1,666
| 35.23913
| 86
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/jmx/ClusteredLockJmxTest.java
|
package org.infinispan.lock.jmx;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.infinispan.test.fwk.TestCacheManagerFactory.configureJmx;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Optional;
import javax.management.InstanceNotFoundException;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import org.infinispan.Cache;
import org.infinispan.commons.jmx.MBeanServerLookup;
import org.infinispan.commons.jmx.TestMBeanServerLookup;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.lock.BaseClusteredLockTest;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.impl.ClusteredLockModuleLifecycle;
import org.infinispan.lock.impl.manager.EmbeddedClusteredLockManager;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* JMX operations tests.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
@Test(groups = "functional", testName = "clusteredLock.jmx.ClusteredLockJmxTest")
public class ClusteredLockJmxTest extends BaseClusteredLockTest {
private static final String LOCK_NAME = ClusteredLockJmxTest.class.getSimpleName();
private final MBeanServerLookup mBeanServerLookup = TestMBeanServerLookup.create();
public void testForceRelease() {
ClusteredLockManager clm = clusteredLockManager(0);
assertTrue(clusteredLockManager(0).defineLock(LOCK_NAME));
ClusteredLock lock = clm.get(LOCK_NAME);
assertTrue(await(lock.tryLock()));
assertTrue(await(lock.isLocked()));
assertTrue(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.FORCE_RELEASE, LOCK_NAME));
assertFalse(await(lock.isLocked()));
}
public void testRemove() {
ClusteredLockManager clm = clusteredLockManager(0);
assertTrue(clm.defineLock(LOCK_NAME));
assertFalse(clm.defineLock(LOCK_NAME));
// first call remove result should be true because the lock exists
assertTrue(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.REMOVE, LOCK_NAME));
// second call remove result should be false
assertFalse(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.REMOVE, LOCK_NAME));
assertTrue(clm.defineLock(LOCK_NAME));
}
public void testIsDefined() {
assertFalse(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.IS_DEFINED, LOCK_NAME));
assertTrue(clusteredLockManager(0).defineLock(LOCK_NAME));
assertTrue(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.IS_DEFINED, LOCK_NAME));
}
public void testIsLocked() {
assertFalse(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.IS_LOCKED, LOCK_NAME));
ClusteredLockManager clm = clusteredLockManager(0);
assertTrue(clm.defineLock(LOCK_NAME));
assertFalse(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.IS_LOCKED, LOCK_NAME));
ClusteredLock lock = clm.get(LOCK_NAME);
assertTrue(await(lock.tryLock()));
assertTrue(await(lock.isLocked()));
assertTrue(executeClusteredLockNameArgOperation(0, EmbeddedClusteredLockManager.IS_LOCKED, LOCK_NAME));
}
@AfterMethod(alwaysRun = true)
@Override
protected void clearContent() throws Throwable {
super.clearContent();
findCache(ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME).ifPresent(Cache::clear);
}
@Override
protected int clusterSize() {
return 2;
}
@Override
protected GlobalConfigurationBuilder configure(int nodeId) {
GlobalConfigurationBuilder builder = GlobalConfigurationBuilder.defaultClusteredBuilder();
String jmxDomain = getClass().getSimpleName() + nodeId;
configureJmx(builder, jmxDomain, mBeanServerLookup);
return builder;
}
private Optional<Cache<?, ?>> findCache(String cacheName) {
return Optional.ofNullable(manager(0).getCache(cacheName, false));
}
private <T> T executeClusteredLockNameArgOperation(int index, String operationName, String arg) {
MBeanServer server = mBeanServerLookup.getMBeanServer();
try {
//noinspection unchecked
return (T) server
.invoke(clusteredLockObjectName(index), operationName, new Object[]{arg},
new String[]{String.class.getName()});
} catch (InstanceNotFoundException | MBeanException | ReflectionException e) {
throw new RuntimeException(e);
}
}
private ObjectName clusteredLockObjectName(int managerIndex) {
final String domain = manager(managerIndex).getCacheManagerConfiguration().jmx().domain();
return TestingUtil.getCacheManagerObjectName(domain, "DefaultCacheManager", EmbeddedClusteredLockManager.OBJECT_NAME);
}
}
| 5,087
| 38.44186
| 124
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/impl/manager/EmbeddedClusteredLockManagerTest.java
|
package org.infinispan.lock.impl.manager;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import org.infinispan.Cache;
import org.infinispan.lock.BaseClusteredLockTest;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockConfiguration;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.api.OwnershipLevel;
import org.infinispan.lock.exception.ClusteredLockException;
import org.infinispan.lock.impl.ClusteredLockModuleLifecycle;
import org.infinispan.lock.impl.lock.ClusteredLockImpl;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.EmbeddedClusteredLockManagerTest")
public class EmbeddedClusteredLockManagerTest extends BaseClusteredLockTest {
private static final String LOCK_NAME = "EmbeddedClusteredLockManagerTest";
@AfterMethod
public void after() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
await(clusteredLockManager.remove(LOCK_NAME));
}
public void testDefineLock() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
assertTrue(clusteredLockManager.defineLock(LOCK_NAME, new ClusteredLockConfiguration()));
assertFalse(clusteredLockManager.defineLock(LOCK_NAME, new ClusteredLockConfiguration()));
}
public void testGetWithLockDefinition() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
clusteredLockManager.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
ClusteredLock lock = clusteredLockManager.get(LOCK_NAME);
assertNotNull(lock);
}
public void testGetWithLockDefinitionFromAnotherNode() {
ClusteredLockManager cm0 = clusteredLockManager(0);
cm0.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
ClusteredLockImpl lock0 = (ClusteredLockImpl) cm0.get(LOCK_NAME);
ClusteredLockManager cm1 = clusteredLockManager(1);
ClusteredLockImpl lock1 = (ClusteredLockImpl) cm1.get(LOCK_NAME);
assertNotNull(lock0);
assertNotNull(lock1);
assertEquals(lock0.getName(), lock1.getName());
assertEquals(manager(0).getAddress(), lock0.getOriginator());
assertEquals(manager(1).getAddress(), lock1.getOriginator());
}
public void testGetWithoutLockDefinition() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
expectException(ClusteredLockException.class, () -> clusteredLockManager.get(LOCK_NAME));
}
public void testGetConfigurationWithLockDefinition() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
clusteredLockManager.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
ClusteredLockConfiguration configuration = clusteredLockManager.getConfiguration(LOCK_NAME);
assertEquals(OwnershipLevel.NODE, configuration.getOwnershipLevel());
assertFalse(configuration.isReentrant());
}
public void testGetConfigurationWithoutLockDefinition() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
expectException(ClusteredLockException.class, () -> clusteredLockManager.getConfiguration(LOCK_NAME));
}
public void testIsDefined() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
assertFalse(clusteredLockManager.isDefined(LOCK_NAME));
clusteredLockManager.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
assertTrue(clusteredLockManager.isDefined(LOCK_NAME));
}
public void testForceRelease() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
clusteredLockManager.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
ClusteredLock lock = clusteredLockManager.get(LOCK_NAME);
await(lock.lock());
assertTrue(await(lock.isLocked()));
assertTrue(await(clusteredLockManager.forceRelease(LOCK_NAME)));
assertFalse(await(lock.isLocked()));
}
public void testForceReleaseUndefinedLock() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
assertFalse(await(clusteredLockManager.forceRelease(LOCK_NAME)));
}
public void testRemove() {
ClusteredLockManager clusteredLockManager = clusteredLockManager(0);
Cache<Object, Object> clusteredLocksCache = cache(0, ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME);
int beforeSize = clusteredLocksCache.getListeners().size();
clusteredLockManager.defineLock(LOCK_NAME);
clusteredLockManager.get(LOCK_NAME);
int afterSize = clusteredLocksCache.getListeners().size();
assertTrue(beforeSize < afterSize);
await(clusteredLockManager.remove(LOCK_NAME));
int afterRemoveSize = clusteredLocksCache.getListeners().size();
assertFalse(clusteredLockManager.isDefined(LOCK_NAME));
assertTrue(beforeSize == afterRemoveSize);
}
}
| 5,221
| 44.408696
| 115
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/impl/lock/RequestExpirationSchedulerTest.java
|
package org.infinispan.lock.impl.lock;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.mockito.ArgumentCaptor;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.RequestExpirationSchedulerTest")
public class RequestExpirationSchedulerTest {
private RequestExpirationScheduler expirationScheduler;
private ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
private ScheduledFuture scheduledFutureMock;
private CompletableFuture<Boolean> booleanCompletableFuture;
@BeforeMethod
public void createRequestExpirationScheduler() {
expirationScheduler = new RequestExpirationScheduler(scheduledExecutorService);
scheduledFutureMock = mock(ScheduledFuture.class);
booleanCompletableFuture = new CompletableFuture();
when(scheduledExecutorService.schedule(any(Runnable.class), anyLong(), any(TimeUnit.class))).thenReturn(scheduledFutureMock);
}
@Test
public void testScheduleForCompletionAndRunScheduling() throws Exception {
ArgumentCaptor<Runnable> argument = ArgumentCaptor.forClass(Runnable.class);
expirationScheduler.scheduleForCompletion("123", booleanCompletableFuture, 10, TimeUnit.MILLISECONDS);
assertEquals(1, expirationScheduler.countScheduledRequests());
verify(scheduledExecutorService).schedule(argument.capture(), eq(10L), eq(TimeUnit.MILLISECONDS));
// We capture the argument so we can run as if the scheduler worked and assert
Runnable runnable = argument.getValue();
runnable.run();
assertTrue(booleanCompletableFuture.isDone());
assertFalse(booleanCompletableFuture.get());
assertEquals(0, expirationScheduler.countScheduledRequests());
}
@Test
public void testScheduleForCompletionAddsASingleRequestById() throws Exception {
expirationScheduler.scheduleForCompletion("123", booleanCompletableFuture, 100, TimeUnit.SECONDS);
expectException(IllegalStateException.class, () -> expirationScheduler.scheduleForCompletion("123", booleanCompletableFuture, 50, TimeUnit.SECONDS));
RequestExpirationScheduler.ScheduledRequest scheduledRequest = expirationScheduler.get("123");
assertEquals(scheduledFutureMock, scheduledRequest.getScheduledFuture());
assertEquals(booleanCompletableFuture, scheduledRequest.getRequest());
assertEquals(1, expirationScheduler.countScheduledRequests());
verify(scheduledExecutorService).schedule(any(Runnable.class), eq(100L), eq(TimeUnit.SECONDS));
verify(scheduledExecutorService, never()).schedule(any(Runnable.class), eq(0L), eq(TimeUnit.SECONDS));
verify(scheduledExecutorService, never()).schedule(any(Runnable.class), eq(50L), eq(TimeUnit.SECONDS));
}
@Test
public void testCompletedRequestsShouldNotBeScheduled() throws Exception {
CompletableFuture<Boolean> request = new CompletableFuture();
request.complete(true);
expirationScheduler.scheduleForCompletion("123", request, 10, TimeUnit.MILLISECONDS);
assertEquals(0, expirationScheduler.countScheduledRequests());
}
@Test
public void testAbortSchedulingWithCompletedRequest() throws Exception {
expirationScheduler.scheduleForCompletion("123", booleanCompletableFuture, 42, TimeUnit.SECONDS);
RequestExpirationScheduler.ScheduledRequest scheduledRequest = expirationScheduler.get("123");
assertEquals(1, expirationScheduler.countScheduledRequests());
booleanCompletableFuture.complete(true);
expirationScheduler.abortScheduling("123");
assertTrue(booleanCompletableFuture.isDone());
assertTrue(booleanCompletableFuture.get());
verify(scheduledFutureMock).cancel(false);
assertEquals(0, expirationScheduler.countScheduledRequests());
}
@Test
public void testAbortSchedulingShouldNotWorkIfRequestIsNotCompleted() throws Exception {
expirationScheduler.scheduleForCompletion("123", booleanCompletableFuture, 42, TimeUnit.SECONDS);
RequestExpirationScheduler.ScheduledRequest scheduledRequest = expirationScheduler.get("123");
assertEquals(1, expirationScheduler.countScheduledRequests());
expirationScheduler.abortScheduling("123");
assertFalse(booleanCompletableFuture.isDone());
verify(scheduledFutureMock, never()).cancel(false);
assertEquals(1, expirationScheduler.countScheduledRequests());
}
@Test
public void testAbortSchedulingShouldWorkIfRequestIsNotCompletedAndForce() throws Exception {
expirationScheduler.scheduleForCompletion("123", booleanCompletableFuture, 42, TimeUnit.SECONDS);
RequestExpirationScheduler.ScheduledRequest scheduledRequest = expirationScheduler.get("123");
assertEquals(1, expirationScheduler.countScheduledRequests());
expirationScheduler.abortScheduling("123", true);
assertFalse(booleanCompletableFuture.isDone());
verify(scheduledFutureMock).cancel(false);
assertEquals(0, expirationScheduler.countScheduledRequests());
}
@Test
public void testAbortSchedulingDoNothingForUnexistingRequests() throws Exception {
expirationScheduler.abortScheduling("unexisting");
assertEquals(0, expirationScheduler.countScheduledRequests());
}
}
| 5,992
| 42.744526
| 155
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/impl/lock/ClusteredLockImplLowAvailableReliabilityTest.java
|
package org.infinispan.lock.impl.lock;
import org.infinispan.lock.configuration.Reliability;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockImplLowAvailableReliabilityTest")
public class ClusteredLockImplLowAvailableReliabilityTest extends ClusteredLockImplTest {
public ClusteredLockImplLowAvailableReliabilityTest() {
super();
reliability = Reliability.AVAILABLE;
numOwner = 1;
}
}
| 469
| 28.375
| 101
|
java
|
null |
infinispan-main/lock/src/test/java/org/infinispan/lock/impl/lock/ClusteredLockImplTest.java
|
package org.infinispan.lock.impl.lock;
import static org.infinispan.functional.FunctionalTestUtils.await;
import static org.infinispan.commons.test.Exceptions.assertException;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import org.infinispan.lock.BaseClusteredLockTest;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockConfiguration;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.exception.ClusteredLockException;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "clusteredLock.ClusteredLockImplTest")
public class ClusteredLockImplTest extends BaseClusteredLockTest {
protected static final String LOCK_NAME = "ClusteredLockImplTest";
private ClusteredLockManager cm;
private ClusteredLock lock;
@Override
protected int clusterSize() {
return 2;
}
@BeforeClass
public void createLockManager() throws Throwable {
cm = clusteredLockManager(0);
}
@AfterClass
public void destroyLock() throws Throwable {
await(cm.remove(LOCK_NAME));
}
@BeforeMethod
public void createLock() {
cm.defineLock(LOCK_NAME, new ClusteredLockConfiguration());
lock = clusteredLockManager(0).get(LOCK_NAME);
await(lock.unlock());
}
@AfterMethod
public void releaseLock() {
if (cm.isDefined(LOCK_NAME)) {
await(lock.unlock());
}
}
public void testLock() throws Throwable {
assertFalse(await(lock.isLocked()));
await(lock.lock());
assertTrue(await(lock.isLocked()));
}
public void testTryLock() throws Throwable {
assertTrue(await(lock.tryLock()));
assertFalse(await(lock.tryLock()));
}
public void testTryLockWithZeroTimeout() throws Throwable {
assertTrue(await(lock.tryLock(0, TimeUnit.HOURS)));
assertFalse(await(lock.tryLock(0, TimeUnit.HOURS)));
assertTrue(await(lock.isLockedByMe()));
assertTrue(await(lock.isLocked()));
}
public void testTryLockWithNegativeTimeout() throws Throwable {
assertTrue(await(lock.tryLock(-10, TimeUnit.HOURS)));
assertFalse(await(lock.tryLock(-10, TimeUnit.HOURS)));
assertTrue(await(lock.isLockedByMe()));
assertTrue(await(lock.isLocked()));
}
public void testFastLockWithTimeout() throws Throwable {
assertTrue(await(lock.tryLock(1, TimeUnit.NANOSECONDS)));
assertTrue(await(lock.isLockedByMe()));
assertTrue(await(lock.isLocked()));
}
public void testTryLockWithTimeoutAfterLockWithSmallTimeout() throws Throwable {
assertTrue(await(lock.tryLock()));
assertFalse(await(lock.tryLock(1, TimeUnit.NANOSECONDS)));
}
public void testTryLockWithTimeoutAfterLockWithBigTimeout() throws Throwable {
assertTrue(await(lock.tryLock()));
CompletableFuture<Boolean> tryLock = lock.tryLock(1, TimeUnit.SECONDS);
await(lock.unlock());
assertTrue(await(tryLock));
}
public void testUnlock() throws Throwable {
assertTrue(await(lock.tryLock()));
assertFalse(await(lock.tryLock()));
await(lock.unlock());
assertTrue(await(lock.tryLock()));
}
public void testIsLockedByMe() throws Throwable {
assertFalse(await(lock.isLockedByMe()));
await(lock.lock());
assertTrue(await(lock.isLockedByMe()));
}
public void testLockAfterLockRemove() throws Throwable {
await(cm.remove(LOCK_NAME));
CompletableFuture<Void> call = lock.lock();
await(call
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
return null;
}));
assertTrue(call.isCompletedExceptionally());
}
public void testTryLockAfterLockRemove() throws Throwable {
await(cm.remove(LOCK_NAME));
CompletableFuture<Boolean> call = lock.tryLock();
await(call
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
return null;
}));
assertTrue(call.isCompletedExceptionally());
}
public void testTryLockWithTimeoutAfterLockRemove() throws Throwable {
await(cm.remove(LOCK_NAME));
CompletableFuture<Boolean> call = lock.tryLock(100, TimeUnit.MILLISECONDS);
await(call
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
return null;
}));
assertTrue(call.isCompletedExceptionally());
}
public void testIsLockedAfterLockRemove() throws Throwable {
await(cm.remove(LOCK_NAME));
CompletableFuture<Boolean> call = lock.isLocked();
await(call
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
return null;
}));
assertTrue(call.isCompletedExceptionally());
}
public void testIsLockedByMeAfterLockRemove() throws Throwable {
await(cm.remove(LOCK_NAME));
CompletableFuture<Boolean> call = lock.isLockedByMe();
await(call
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
return null;
}));
assertTrue(call.isCompletedExceptionally());
}
public void testRemoveHoldLock() throws Throwable {
await(lock.lock());
assertTrue(await(cm.remove(LOCK_NAME)));
CompletableFuture<Boolean> call = lock.isLocked();
await(call
.exceptionally(e -> {
assertException(ClusteredLockException.class, e);
return null;
}));
assertTrue(call.isCompletedExceptionally());
}
}
| 5,971
| 30.765957
| 83
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/package-info.java
|
/**
* Embedded Clustered Locks.
*
* @api.public
*/
package org.infinispan.lock;
| 84
| 11.142857
| 28
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/EmbeddedClusteredLockManagerFactory.java
|
package org.infinispan.lock;
import static java.util.Objects.requireNonNull;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.logging.Log;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.actions.SecurityActions;
/**
* A {@link ClusteredLockManager} factory for embedded caches.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public final class EmbeddedClusteredLockManagerFactory {
private static final Log log = LogFactory.getLog(EmbeddedClusteredLockManagerFactory.class, Log.class);
private EmbeddedClusteredLockManagerFactory() {
}
public static ClusteredLockManager from(EmbeddedCacheManager cacheManager) {
requireNonNull(cacheManager, "EmbeddedCacheManager can't be null.");
if (!cacheManager.getCacheManagerConfiguration().isClustered()) {
throw log.requireClustered();
}
return SecurityActions.getGlobalComponentRegistry(cacheManager)
.getComponent(ClusteredLockManager.class);
}
}
| 1,093
| 31.176471
| 106
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/Element.java
|
package org.infinispan.lock.configuration;
import java.util.HashMap;
import java.util.Map;
/**
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
public enum Element {
//must be first
UNKNOWN(null),
CLUSTERED_LOCKS("clustered-locks"),
CLUSTERED_LOCK("clustered-lock"),
;
private static final Map<String, Element> ELEMENTS;
static {
final Map<String, Element> map = new HashMap<>(8);
for (Element element : values()) {
final String name = element.name;
if (name != null) {
map.put(name, element);
}
}
ELEMENTS = map;
}
private final String name;
Element(final String name) {
this.name = name;
}
public static Element forName(final String localName) {
final Element element = ELEMENTS.get(localName);
return element == null ? UNKNOWN : element;
}
@Override
public String toString() {
return name;
}
}
| 954
| 19.319149
| 58
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/ClusteredLockConfigurationParser.java
|
package org.infinispan.lock.configuration;
import static org.infinispan.lock.configuration.ClusteredLockConfigurationParser.NAMESPACE;
import org.infinispan.commons.configuration.io.ConfigurationReader;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ConfigurationParser;
import org.infinispan.configuration.parsing.Namespace;
import org.infinispan.configuration.parsing.ParseUtils;
import org.infinispan.configuration.parsing.Parser;
import org.infinispan.configuration.parsing.ParserScope;
import org.infinispan.lock.logging.Log;
import org.kohsuke.MetaInfServices;
/**
* Clustered Locks configuration parser
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
@MetaInfServices
@Namespace(root = "clustered-locks")
@Namespace(uri = NAMESPACE + "*", root = "clustered-locks", since = "9.4")
public class ClusteredLockConfigurationParser implements ConfigurationParser {
static final String NAMESPACE = Parser.NAMESPACE + "clustered-locks:";
private static final Log log = LogFactory.getLog(ClusteredLockConfigurationParser.class, Log.class);
@Override
public void readElement(ConfigurationReader reader, ConfigurationBuilderHolder holder) {
if (!holder.inScope(ParserScope.CACHE_CONTAINER)) {
throw log.invalidScope(holder.getScope());
}
GlobalConfigurationBuilder builder = holder.getGlobalConfigurationBuilder();
Element element = Element.forName(reader.getLocalName());
switch (element) {
case CLUSTERED_LOCKS: {
parseClusteredLocksElement(reader, builder.addModule(ClusteredLockManagerConfigurationBuilder.class));
break;
}
default: {
throw ParseUtils.unexpectedElement(reader);
}
}
}
@Override
public Namespace[] getNamespaces() {
return ParseUtils.getNamespaceAnnotations(getClass());
}
private void parseClusteredLocksElement(ConfigurationReader reader, ClusteredLockManagerConfigurationBuilder builder) {
for (int i = 0; i < reader.getAttributeCount(); i++) {
ParseUtils.requireNoNamespaceAttribute(reader, i);
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
switch (attribute) {
case NUM_OWNERS:
builder.numOwner(Integer.parseInt(value));
break;
case RELIABILITY:
builder.reliability(Reliability.valueOf(value));
break;
default:
throw ParseUtils.unexpectedAttribute(reader, i);
}
}
while (reader.inTag()) {
Element element = Element.forName(reader.getLocalName());
switch (element) {
case CLUSTERED_LOCK:
parseClusteredLock(reader, builder.addClusteredLock());
break;
default:
throw ParseUtils.unexpectedElement(reader);
}
}
}
private void parseClusteredLock(ConfigurationReader reader, ClusteredLockConfigurationBuilder builder) {
for (int i = 0; i < reader.getAttributeCount(); i++) {
ParseUtils.requireNoNamespaceAttribute(reader, i);
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
switch (attribute) {
case NAME:
builder.name(value);
break;
default:
throw ParseUtils.unexpectedAttribute(reader, i);
}
}
ParseUtils.requireNoContent(reader);
}
}
| 3,770
| 36.71
| 122
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/Reliability.java
|
package org.infinispan.lock.configuration;
/**
* Locks are stored in a container that can privilege availability or consistency.
* Most of the time, locks are both available and consistent.
* But in some situations, e.g. when the cluster splits, there is a choice between keeping the locks available
* everywhere (potentially allowing multiple nodes to acquire the same lock) or making it unavailable in the minority
* partition(s) (potentially requiring administrator intervention to become available again).
* @see org.infinispan.partitionhandling.PartitionHandling
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
public enum Reliability {
AVAILABLE,
CONSISTENT;
}
| 697
| 37.777778
| 117
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/ClusteredLockConfigurationBuilder.java
|
package org.infinispan.lock.configuration;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeSet;
/**
* {@link org.infinispan.lock.api.ClusteredLock} configuration builder.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
public class ClusteredLockConfigurationBuilder implements Builder<ClusteredLockConfiguration> {
private final AttributeSet attributes = ClusteredLockConfiguration.attributeDefinitionSet();
@Override
public AttributeSet attributes() {
return attributes;
}
@Override
public void validate() {
attributes.attributes().forEach(Attribute::validate);
}
@Override
public ClusteredLockConfiguration create() {
return new ClusteredLockConfiguration(attributes.protect());
}
@Override
public Builder<?> read(ClusteredLockConfiguration template, Combine combine) {
this.attributes.read(template.attributes(), combine);
return this;
}
public ClusteredLockConfigurationBuilder name(String name) {
attributes.attribute(ClusteredLockConfiguration.NAME).set(name);
return this;
}
}
| 1,285
| 28.227273
| 95
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/ClusteredLockManagerConfigurationBuilder.java
|
package org.infinispan.lock.configuration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.lock.logging.Log;
/**
* The {@link org.infinispan.lock.api.ClusteredLockManager} configuration builder.
* <p>
* It configures the number of owner and the {@link Reliability}
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
public class ClusteredLockManagerConfigurationBuilder implements Builder<ClusteredLockManagerConfiguration> {
private static final ClusteredLockManagerConfiguration DEFAULT = new ClusteredLockManagerConfigurationBuilder(null).create();
private static final Log log = LogFactory.getLog(ClusteredLockManagerConfigurationBuilder.class, Log.class);
private final AttributeSet attributes = ClusteredLockManagerConfiguration.attributeDefinitionSet();
private final List<ClusteredLockConfigurationBuilder> locksConfig = new ArrayList<>();
private final GlobalConfigurationBuilder builder;
public ClusteredLockManagerConfigurationBuilder(GlobalConfigurationBuilder builder) {
this.builder = builder;
}
@Override
public AttributeSet attributes() {
return attributes;
}
/**
* @return the default {@link ClusteredLockManagerConfiguration}.
*/
public static ClusteredLockManagerConfiguration defaultConfiguration() {
return DEFAULT;
}
/**
* Sets the number of copies of the counter's value available in the cluster.
* <p>
* A higher value will provide better availability at the cost of more expensive updates.
* <p>
* Default value is 2.
*
* @param numOwners the number of copies.
*/
public ClusteredLockManagerConfigurationBuilder numOwner(int numOwners) {
attributes.attribute(ClusteredLockManagerConfiguration.NUM_OWNERS).set(numOwners);
return this;
}
/**
* Sets the {@link Reliability} mode.
* <p>
* Default value is {@link Reliability#AVAILABLE}.
*
* @param reliability the {@link Reliability} mode.
* @see Reliability
*/
public ClusteredLockManagerConfigurationBuilder reliability(Reliability reliability) {
attributes.attribute(ClusteredLockManagerConfiguration.RELIABILITY).set(reliability);
return this;
}
@Override
public void validate() {
attributes.attributes().forEach(Attribute::validate);
}
@Override
public ClusteredLockManagerConfiguration create() {
Map<String, ClusteredLockConfiguration> clusteredLocks = new HashMap<>(locksConfig.size());
for (ClusteredLockConfigurationBuilder builder : locksConfig) {
ClusteredLockConfiguration lockConfiguration = builder.create();
clusteredLocks.put(lockConfiguration.name(), lockConfiguration);
}
return new ClusteredLockManagerConfiguration(attributes.protect(), clusteredLocks);
}
@Override
public Builder<?> read(ClusteredLockManagerConfiguration template, Combine combine) {
this.attributes.read(template.attributes(), combine);
return this;
}
public ClusteredLockConfigurationBuilder addClusteredLock() {
ClusteredLockConfigurationBuilder builder = new ClusteredLockConfigurationBuilder();
locksConfig.add(builder);
return builder;
}
}
| 3,651
| 34.115385
| 128
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/ClusteredLockManagerConfiguration.java
|
package org.infinispan.lock.configuration;
import java.util.Map;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.lock.logging.Log;
/**
* The {@link org.infinispan.lock.api.ClusteredLockManager} configuration.
* <p>
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
@BuiltBy(ClusteredLockManagerConfigurationBuilder.class)
public class ClusteredLockManagerConfiguration {
private static final Log log = LogFactory.getLog(ClusteredLockManagerConfiguration.class, Log.class);
static final AttributeDefinition<Reliability> RELIABILITY = AttributeDefinition
.builder(Attribute.RELIABILITY, Reliability.CONSISTENT)
.validator(value -> {
if (value == null) {
throw log.invalidReliabilityMode();
}
})
.immutable().build();
static final AttributeDefinition<Integer> NUM_OWNERS = AttributeDefinition.builder(Attribute.NUM_OWNERS, -1)
.validator(value -> {
if (value <= 0 && value != -1) {
throw log.invalidNumOwners(value);
}
})
.immutable().build();
private final AttributeSet attributes;
private final Map<String, ClusteredLockConfiguration> locks;
ClusteredLockManagerConfiguration(AttributeSet attributes, Map<String, ClusteredLockConfiguration> locks) {
this.attributes = attributes;
this.locks = locks;
}
static AttributeSet attributeDefinitionSet() {
return new AttributeSet(ClusteredLockManagerConfiguration.class, NUM_OWNERS, RELIABILITY);
}
public int numOwners() {
return attributes.attribute(NUM_OWNERS).get();
}
public Reliability reliability() {
return attributes.attribute(RELIABILITY).get();
}
AttributeSet attributes() {
return attributes;
}
public Map<String, ClusteredLockConfiguration> locks() {
return locks;
}
}
| 2,110
| 30.044118
| 111
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/ClusteredLockConfiguration.java
|
package org.infinispan.lock.configuration;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.lock.logging.Log;
/**
* {@link org.infinispan.lock.api.ClusteredLock} configuration.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
public class ClusteredLockConfiguration {
private static final Log log = LogFactory.getLog(ClusteredLockConfiguration.class, Log.class);
static final AttributeDefinition<String> NAME = AttributeDefinition.builder(Attribute.NAME, null, String.class)
.validator(value -> {
if (value == null) {
throw log.missingName();
}
})
.immutable()
.build();
final AttributeSet attributes;
ClusteredLockConfiguration(AttributeSet attributes) {
this.attributes = attributes;
}
static AttributeSet attributeDefinitionSet() {
return new AttributeSet(ClusteredLockConfiguration.class, NAME);
}
final AttributeSet attributes() {
return attributes;
}
public String name() {
return attributes.attribute(NAME).get();
}
}
| 1,250
| 27.431818
| 114
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/configuration/Attribute.java
|
package org.infinispan.lock.configuration;
import java.util.HashMap;
import java.util.Map;
/**
* @author Katia Aresti, karesti@redhat.com
* @since 9.4
*/
public enum Attribute {
// must be first
UNKNOWN(null),
NAME("name"),
NUM_OWNERS("num-owners"),
RELIABILITY("reliability");
private static final Map<String, Attribute> ATTRIBUTES;
static {
final Map<String, Attribute> map = new HashMap<>(64);
for (Attribute attribute : values()) {
final String name = attribute.name;
if (name != null) {
map.put(name, attribute);
}
}
ATTRIBUTES = map;
}
private final String name;
Attribute(final String name) {
this.name = name;
}
public static Attribute forName(String localName) {
final Attribute attribute = ATTRIBUTES.get(localName);
return attribute == null ? UNKNOWN : attribute;
}
@Override
public String toString() {
return name;
}
}
| 977
| 20.26087
| 60
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/logging/Log.java
|
package org.infinispan.lock.logging;
import static org.jboss.logging.Logger.Level.INFO;
import org.infinispan.lock.exception.ClusteredLockException;
import org.jboss.logging.BasicLogger;
import org.jboss.logging.annotations.Cause;
import org.jboss.logging.annotations.LogMessage;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageLogger;
/**
* Range: 29001 - 30000
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
@MessageLogger(projectCode = "ISPN")
public interface Log extends BasicLogger {
String LOCK_DELETE_MSG = "The lock was deleted.";
String UNLOCK_FAILED_MSG = "LOCK[%s] Unlock failed from node %s";
@Message(value = LOCK_DELETE_MSG, id = 29001)
ClusteredLockException lockDeleted();
// @Message(value = "The node has left the cluster.", id = 29002)
// ClusteredLockException nodeShutdown();
@Message(value = UNLOCK_FAILED_MSG, id = 29003)
ClusteredLockException unlockFailed(String lockName, Object originator);
@Message(value = "Missing name for the clustered lock", id = 29004)
ClusteredLockException missingName();
@Message(value = "Invalid number of owner. It must be higher than zero or -1 but it was %s", id = 29005)
ClusteredLockException invalidNumOwners(Integer value);
@Message(value = "Invalid reliability mode. Modes are AVAILABLE or CONSISTENT", id = 29006)
ClusteredLockException invalidReliabilityMode();
@Message(value = "Invalid scope for tag <clustered-lock>. Expected CACHE_CONTAINER but was %s", id = 29007)
ClusteredLockException invalidScope(String scope);
@Message(value = "Cannot create clustered locks when clustering is not enabled", id = 29008)
ClusteredLockException requireClustered();
@LogMessage(level = INFO)
@Message(value = "Configuration is not clustered, clustered locks are disabled", id = 29009)
void configurationNotClustered();
@Message(value = "MBean registration failed", id = 29010)
ClusteredLockException jmxRegistrationFailed(@Cause Throwable cause);
}
| 2,046
| 36.907407
| 110
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/ClusteredLockModuleLifecycle.java
|
package org.infinispan.lock.impl;
import java.util.EnumSet;
import java.util.Map;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.HashConfiguration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.annotations.InfinispanModule;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.jmx.CacheManagerJmxRegistration;
import org.infinispan.lifecycle.ModuleLifecycle;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.configuration.ClusteredLockManagerConfiguration;
import org.infinispan.lock.configuration.ClusteredLockManagerConfigurationBuilder;
import org.infinispan.lock.configuration.Reliability;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
import org.infinispan.lock.impl.functions.IsLocked;
import org.infinispan.lock.impl.functions.LockFunction;
import org.infinispan.lock.impl.functions.UnlockFunction;
import org.infinispan.lock.impl.lock.ClusteredLockFilter;
import org.infinispan.lock.impl.manager.EmbeddedClusteredLockManager;
import org.infinispan.lock.logging.Log;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.transaction.TransactionMode;
/**
* Locks module configuration
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
@InfinispanModule(name = "clustered-lock", requiredModules = "core")
public class ClusteredLockModuleLifecycle implements ModuleLifecycle {
private static final Log log = LogFactory.getLog(ClusteredLockModuleLifecycle.class, Log.class);
public static final String CLUSTERED_LOCK_CACHE_NAME = "org.infinispan.LOCKS";
@Override
public void cacheManagerStarting(GlobalComponentRegistry gcr, GlobalConfiguration globalConfiguration) {
if (!globalConfiguration.isClustered()) {
log.configurationNotClustered();
return;
}
final Map<Integer, AdvancedExternalizer<?>> externalizerMap = globalConfiguration.serialization()
.advancedExternalizers();
externalizerMap.put(ClusteredLockKey.EXTERNALIZER.getId(), ClusteredLockKey.EXTERNALIZER);
externalizerMap.put(ClusteredLockValue.EXTERNALIZER.getId(), ClusteredLockValue.EXTERNALIZER);
externalizerMap.put(LockFunction.EXTERNALIZER.getId(), LockFunction.EXTERNALIZER);
externalizerMap.put(UnlockFunction.EXTERNALIZER.getId(), UnlockFunction.EXTERNALIZER);
externalizerMap.put(IsLocked.EXTERNALIZER.getId(), IsLocked.EXTERNALIZER);
externalizerMap.put(ClusteredLockFilter.EXTERNALIZER.getId(), ClusteredLockFilter.EXTERNALIZER);
ClusteredLockManagerConfiguration config = extractConfiguration(gcr);
InternalCacheRegistry internalCacheRegistry = gcr.getComponent(InternalCacheRegistry.class);
Configuration lockConfig = createClusteredLockCacheConfiguration(config, globalConfiguration);
internalCacheRegistry.registerInternalCache(CLUSTERED_LOCK_CACHE_NAME, lockConfig, EnumSet.of(InternalCacheRegistry.Flag.EXCLUSIVE));
registerClusteredLockManager(gcr.getComponent(BasicComponentRegistry.class), globalConfiguration, config);
}
private static ClusteredLockManagerConfiguration extractConfiguration(GlobalComponentRegistry globalComponentRegistry) {
ClusteredLockManagerConfiguration config = globalComponentRegistry.getGlobalConfiguration()
.module(ClusteredLockManagerConfiguration.class);
return config == null ? ClusteredLockManagerConfigurationBuilder.defaultConfiguration() : config;
}
private static Configuration createClusteredLockCacheConfiguration(ClusteredLockManagerConfiguration config, GlobalConfiguration globalConfig) {
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.transaction().transactionMode(TransactionMode.NON_TRANSACTIONAL);
if (config.numOwners() > 0) {
builder.clustering().cacheMode(CacheMode.DIST_SYNC)
.hash().numOwners(config.numOwners());
} else {
builder.clustering().cacheMode(CacheMode.REPL_SYNC);
if (globalConfig.isZeroCapacityNode()) {
log.warn("When the node is configured as a zero-capacity node, you need to specify the number of owners for the lock");
}
}
// If numOwners = 1, we can't use DENY_READ_WRITES as a single node leaving will cause the cluster to become DEGRADED
int numOwners = config.numOwners() < 0 ? HashConfiguration.NUM_OWNERS.getDefaultValue() : config.numOwners();
if (config.reliability() == Reliability.CONSISTENT && numOwners > 1) {
builder.clustering().partitionHandling().whenSplit(PartitionHandling.DENY_READ_WRITES);
} else {
builder.clustering().partitionHandling().whenSplit(PartitionHandling.ALLOW_READ_WRITES);
}
return builder.build();
}
private static void registerClusteredLockManager(BasicComponentRegistry registry,
GlobalConfiguration globalConfig,
ClusteredLockManagerConfiguration config) {
ClusteredLockManager clusteredLockManager = new EmbeddedClusteredLockManager(config);
registry.registerComponent(ClusteredLockManager.class, clusteredLockManager, true);
if (globalConfig.jmx().enabled()) {
try {
CacheManagerJmxRegistration jmxRegistration = registry.getComponent(CacheManagerJmxRegistration.class).running();
jmxRegistration.registerMBean(clusteredLockManager);
} catch (Exception e) {
throw log.jmxRegistrationFailed(e);
}
}
}
}
| 6,057
| 51.678261
| 147
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/ClusteredLockBlockHoundIntegration.java
|
package org.infinispan.lock.impl;
import org.infinispan.lock.impl.manager.EmbeddedClusteredLockManager;
import org.kohsuke.MetaInfServices;
import reactor.blockhound.BlockHound;
import reactor.blockhound.integration.BlockHoundIntegration;
@MetaInfServices
public class ClusteredLockBlockHoundIntegration implements BlockHoundIntegration {
@Override
public void applyTo(BlockHound.Builder builder) {
// The creation of an initial lock is blocking
// https://issues.redhat.com/browse/ISPN-11835
builder.allowBlockingCallsInside(EmbeddedClusteredLockManager.class.getName(), "createLock");
builder.allowBlockingCallsInside(EmbeddedClusteredLockManager.class.getName(), "defineLock");
}
}
| 721
| 37
| 99
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/functions/UnlockFunction.java
|
package org.infinispan.lock.impl.functions;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import java.util.function.Function;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.functional.EntryView;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockState;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
import org.infinispan.lock.impl.externalizers.ExternalizerIds;
import org.infinispan.lock.logging.Log;
/**
* Function that allows to unlock the lock, if it's not already released.
* <p>
* <p>
* <ul>
* <li>If the requestor is not the owner, the lock won't be released. </li>
* <li>If the requestId is null, this value does not affect the unlock </li>
* <li>If the requestId is not null, the lock will be released only if the requestId and the owner match</li>
* <li>If lock is already released, nothing happens</li>
* </ul>
* <p>
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public class UnlockFunction implements Function<EntryView.ReadWriteEntryView<ClusteredLockKey, ClusteredLockValue>, Boolean> {
private static final Log log = LogFactory.getLog(UnlockFunction.class, Log.class);
public static final AdvancedExternalizer<UnlockFunction> EXTERNALIZER = new Externalizer();
private final String requestId;
private final Set<Object> requestors;
public UnlockFunction(Object requestor) {
this.requestId = null;
this.requestors = Collections.singleton(requestor);
}
public UnlockFunction(String requestId, Set<Object> requestors) {
this.requestId = requestId;
this.requestors = requestors;
}
@Override
public Boolean apply(EntryView.ReadWriteEntryView<ClusteredLockKey, ClusteredLockValue> entryView) {
if (log.isTraceEnabled()) {
log.tracef("Lock[%s] unlock request by reqId [%s] requestors %s", entryView.key().getName(), requestId, requestors);
}
ClusteredLockValue lockValue = entryView.find().orElseThrow(() -> log.lockDeleted());
// If the lock is already released return true
if (lockValue.getState() == ClusteredLockState.RELEASED) {
if (log.isTraceEnabled()) {
log.tracef("Lock[%s] Already free. State[RELEASED], reqId [%s], owner [%s]", entryView.key().getName(), lockValue.getRequestId(), lockValue.getOwner());
}
return Boolean.TRUE;
}
boolean requestIdMatches = requestId == null || (lockValue.getRequestId() != null && lockValue.getRequestId().equals(requestId));
boolean ownerMatches = lockValue.getOwner() != null && requestors.contains(lockValue.getOwner());
// If the requestId and the owner match, unlock and return true
if (requestIdMatches && ownerMatches) {
if (log.isTraceEnabled()) {
log.tracef("Lock[%s] Unlocked by reqId [%s] requestors %s", entryView.key().getName(), requestId, requestors);
}
entryView.set(ClusteredLockValue.INITIAL_STATE);
return Boolean.TRUE;
}
// Trace and return false if unlock is not possible
if (log.isTraceEnabled()) {
log.tracef("Lock[%s] Unlock not possible by reqId [%s] requestors %s. Current State[ACQUIRED], reqId [%s], owner [%s]",
entryView.key().getName(),
requestId,
requestors,
lockValue.getRequestId(),
lockValue.getOwner());
}
return Boolean.FALSE;
}
private static class Externalizer implements AdvancedExternalizer<UnlockFunction> {
@Override
public Set<Class<? extends UnlockFunction>> getTypeClasses() {
return Collections.singleton(UnlockFunction.class);
}
@Override
public Integer getId() {
return ExternalizerIds.UNLOCK_FUNCTION;
}
@Override
public void writeObject(ObjectOutput output, UnlockFunction object) throws IOException {
MarshallUtil.marshallString(object.requestId, output);
output.writeObject(object.requestors);
}
@Override
public UnlockFunction readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new UnlockFunction(MarshallUtil.unmarshallString(input), (Set<Object>) input.readObject());
}
}
}
| 4,528
| 36.741667
| 164
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/functions/LockFunction.java
|
package org.infinispan.lock.impl.functions;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import java.util.function.Function;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.functional.EntryView;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockState;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
import org.infinispan.lock.impl.externalizers.ExternalizerIds;
import org.infinispan.lock.logging.Log;
/**
* Lock function that allows to acquire the lock by a requestor, if such action is possible. It returns {@link
* Boolean#TRUE} when the lock is acquired and {@link Boolean#FALSE} when it is not.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public class LockFunction implements Function<EntryView.ReadWriteEntryView<ClusteredLockKey, ClusteredLockValue>, Boolean> {
private static final Log log = LogFactory.getLog(LockFunction.class, Log.class);
public static final AdvancedExternalizer<LockFunction> EXTERNALIZER = new Externalizer();
private final String requestId;
private final Object requestor;
public LockFunction(String requestId, Object requestor) {
this.requestId = requestId;
this.requestor = requestor;
}
@Override
public Boolean apply(EntryView.ReadWriteEntryView<ClusteredLockKey, ClusteredLockValue> entryView) {
ClusteredLockValue lock = entryView.find().orElseThrow(() -> log.lockDeleted());
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] lock request by reqId %s requestor %s", entryView.key().getName(), requestId, requestor);
}
if (lock.getState() == ClusteredLockState.RELEASED) {
entryView.set(new ClusteredLockValue(requestId, requestor, ClusteredLockState.ACQUIRED));
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] lock acquired by %s %s", entryView.key().getName(), requestId, requestor);
}
return Boolean.TRUE;
} else if (lock.getState() == ClusteredLockState.ACQUIRED && lock.getRequestId().equals(requestId) && lock.getOwner().equals(requestor)) {
log.tracef("LOCK[%s] lock already acquired by %s %s", entryView.key().getName(), requestId, requestor);
return Boolean.TRUE;
}
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] lock not available, owned by %s %s", entryView.key().getName(), lock.getRequestId(), lock.getOwner());
}
return Boolean.FALSE;
}
private static class Externalizer implements AdvancedExternalizer<LockFunction> {
@Override
public Set<Class<? extends LockFunction>> getTypeClasses() {
return Collections.singleton(LockFunction.class);
}
@Override
public Integer getId() {
return ExternalizerIds.LOCK_FUNCTION;
}
@Override
public void writeObject(ObjectOutput output, LockFunction object) throws IOException {
MarshallUtil.marshallString(object.requestId, output);
output.writeObject(object.requestor);
}
@Override
public LockFunction readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new LockFunction(MarshallUtil.unmarshallString(input), input.readObject());
}
}
}
| 3,493
| 39.627907
| 144
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/functions/IsLocked.java
|
package org.infinispan.lock.impl.functions;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import java.util.function.Function;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.functional.EntryView;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockState;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
import org.infinispan.lock.impl.externalizers.ExternalizerIds;
import org.infinispan.lock.logging.Log;
/**
* IsLocked function that allows to know if a lock is already acquired. It returns {@link Boolean#TRUE} when the lock is
* acquired and {@link Boolean#FALSE} when it is not.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public class IsLocked implements Function<EntryView.ReadWriteEntryView<ClusteredLockKey, ClusteredLockValue>, Boolean> {
private static final Log log = LogFactory.getLog(IsLocked.class, Log.class);
public static final AdvancedExternalizer<IsLocked> EXTERNALIZER = new Externalizer();
private final Object requestor;
public IsLocked() {
requestor = null;
}
public IsLocked(Object requestor) {
this.requestor = requestor;
}
@Override
public Boolean apply(EntryView.ReadWriteEntryView<ClusteredLockKey, ClusteredLockValue> entryView) {
ClusteredLockValue lock = entryView.find().orElseThrow(() -> log.lockDeleted());
Boolean result = Boolean.FALSE;
if (lock.getState() == ClusteredLockState.ACQUIRED &&
(requestor == null || (lock.getOwner() != null && lock.getOwner().equals(requestor)))) {
result = Boolean.TRUE;
}
return result;
}
private static class Externalizer implements AdvancedExternalizer<IsLocked> {
@Override
public Set<Class<? extends IsLocked>> getTypeClasses() {
return Collections.singleton(IsLocked.class);
}
@Override
public Integer getId() {
return ExternalizerIds.IS_LOCKED_FUNCTION;
}
@Override
public void writeObject(ObjectOutput output, IsLocked object) throws IOException {
output.writeObject(object.requestor);
}
@Override
public IsLocked readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new IsLocked(input.readObject());
}
}
}
| 2,503
| 31.947368
| 120
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/manager/EmbeddedClusteredLockManager.java
|
package org.infinispan.lock.impl.manager;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledExecutorService;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.context.Flag;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.jmx.annotations.MBean;
import org.infinispan.jmx.annotations.ManagedOperation;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.ClusteredLockConfiguration;
import org.infinispan.lock.api.ClusteredLockManager;
import org.infinispan.lock.configuration.ClusteredLockManagerConfiguration;
import org.infinispan.lock.exception.ClusteredLockException;
import org.infinispan.lock.impl.ClusteredLockModuleLifecycle;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockState;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
import org.infinispan.lock.impl.lock.ClusteredLockImpl;
import org.infinispan.lock.logging.Log;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.util.ByteString;
/**
* The Embedded version for the lock cluster manager
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
@Scope(Scopes.GLOBAL)
@MBean(objectName = EmbeddedClusteredLockManager.OBJECT_NAME, description = "Component to manage clustered locks")
public class EmbeddedClusteredLockManager implements ClusteredLockManager {
public static final String OBJECT_NAME = "ClusteredLockManager";
private static final Log log = LogFactory.getLog(EmbeddedClusteredLockManager.class, Log.class);
public static final String FORCE_RELEASE = "forceRelease";
public static final String REMOVE = "remove";
public static final String IS_DEFINED = "isDefined";
public static final String IS_LOCKED = "isLocked";
private final ConcurrentHashMap<String, ClusteredLock> locks = new ConcurrentHashMap<>();
private final ClusteredLockManagerConfiguration config;
private volatile boolean started = false;
@Inject
EmbeddedCacheManager cacheManager;
@Inject @ComponentName(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR)
ScheduledExecutorService scheduledExecutorService;
private AdvancedCache<ClusteredLockKey, ClusteredLockValue> cache;
public EmbeddedClusteredLockManager(ClusteredLockManagerConfiguration config) {
this.config = config;
}
@Start
public void start() {
if (log.isTraceEnabled())
log.trace("Starting EmbeddedClusteredLockManager");
cache = cacheManager.<ClusteredLockKey, ClusteredLockValue>getCache(ClusteredLockModuleLifecycle.CLUSTERED_LOCK_CACHE_NAME)
.getAdvancedCache()
.withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE);
started = true;
}
@Stop
public void stop() {
if (log.isTraceEnabled())
log.trace("Stopping EmbeddedClusteredLockManager");
started = false;
cache = null;
}
private AdvancedCache<ClusteredLockKey, ClusteredLockValue> cache() {
if (!started)
throw new IllegalStateException("Component not running, cannot request the lock cache");
return cache;
}
@Override
public boolean defineLock(String name) {
ClusteredLockConfiguration configuration = new ClusteredLockConfiguration();
if (log.isTraceEnabled())
log.tracef("LOCK[%s] defineLock with default configuration has been called %s", name, configuration);
return defineLock(name, configuration);
}
@Override
public boolean defineLock(String name, ClusteredLockConfiguration configuration) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] defineLock has been called %s", name, configuration);
ClusteredLockKey key = new ClusteredLockKey(ByteString.fromString(name));
ClusteredLockValue clv = cache().putIfAbsent(key, ClusteredLockValue.INITIAL_STATE);
locks.putIfAbsent(name, new ClusteredLockImpl(name, key, cache, this));
return clv == null;
}
@Override
public ClusteredLock get(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] get has been called", name);
return locks.computeIfAbsent(name, this::createLock);
}
private ClusteredLockImpl createLock(String lockName) {
ClusteredLockConfiguration configuration = getConfiguration(lockName);
if (configuration == null) {
throw new ClusteredLockException(String.format("Lock %s does not exist", lockName));
}
ClusteredLockKey key = new ClusteredLockKey(ByteString.fromString(lockName));
cache().putIfAbsent(key, ClusteredLockValue.INITIAL_STATE);
ClusteredLockImpl lock = new ClusteredLockImpl(lockName, key, cache(), this);
return lock;
}
@Override
public ClusteredLockConfiguration getConfiguration(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] getConfiguration has been called", name);
if (cache().containsKey(new ClusteredLockKey(ByteString.fromString(name))))
return new ClusteredLockConfiguration();
if (config.locks().containsKey(name))
return new ClusteredLockConfiguration();
throw new ClusteredLockException(String.format("Lock %s does not exist", name));
}
@ManagedOperation(
description = "Returns true if the lock is defined",
displayName = "Is Lock Defined",
name = IS_DEFINED
)
@Override
public boolean isDefined(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] isDefined has been called", name);
return cache().containsKey(new ClusteredLockKey(ByteString.fromString(name)));
}
@Override
public CompletableFuture<Boolean> remove(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] remove has been called", name);
ClusteredLockImpl clusteredLock = (ClusteredLockImpl) locks.get(name);
if (clusteredLock != null) {
clusteredLock.stop();
locks.remove(name);
}
return cache()
.removeAsync(new ClusteredLockKey(ByteString.fromString(name)))
.thenApply(Objects::nonNull);
}
@ManagedOperation(
description = "Removes the lock from the cluster. The lock has to be recreated to access next time.",
displayName = "Remove Clustered Lock",
name = REMOVE
)
public boolean removeSync(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] remove sync has been called", name);
ClusteredLockImpl clusteredLock = (ClusteredLockImpl) locks.get(name);
if (clusteredLock != null) {
clusteredLock.stop();
locks.remove(name);
}
return cache().remove(new ClusteredLockKey(ByteString.fromString(name))) != null;
}
public CompletableFuture<Boolean> forceRelease(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] forceRelease has been called", name);
ClusteredLockKey lockLey = new ClusteredLockKey(ByteString.fromString(name));
return cache()
.computeIfPresentAsync(lockLey, (k, v) -> ClusteredLockValue.INITIAL_STATE)
.thenApply(clv -> clv != null && clv.getState() == ClusteredLockState.RELEASED);
}
@ManagedOperation(
description = "Forces a release of the lock if such exist",
displayName = "Release Clustered Lock",
name = FORCE_RELEASE
)
public boolean forceReleaseSync(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] forceRelease sync has been called", name);
return forceRelease(name).join();
}
@ManagedOperation(
description = "Returns true if the lock exists and is acquired",
displayName = "Is Locked",
name = IS_LOCKED
)
public boolean isLockedSync(String name) {
if (log.isTraceEnabled())
log.tracef("LOCK[%s] isLocked sync has been called", name);
ClusteredLockValue clv = cache().get(new ClusteredLockKey(ByteString.fromString(name)));
return clv != null && clv.getState() == ClusteredLockState.ACQUIRED;
}
public ScheduledExecutorService getScheduledExecutorService() {
return scheduledExecutorService;
}
@Override
public String toString() {
return "EmbeddedClusteredLockManager{" +
"address=" + cacheManager.getAddress() +
", locks=" + locks +
'}';
}
}
| 8,838
| 35.829167
| 129
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/manager/CacheHolder.java
|
package org.infinispan.lock.impl.manager;
import org.infinispan.AdvancedCache;
import org.infinispan.context.Flag;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
public class CacheHolder {
private final AdvancedCache<? extends ClusteredLockKey, ClusteredLockValue> clusteredLockCache;
public CacheHolder(AdvancedCache<? extends ClusteredLockKey, ClusteredLockValue> clusteredLockCache) {
this.clusteredLockCache = clusteredLockCache;
}
<K extends ClusteredLockKey> AdvancedCache<K, ClusteredLockValue> getClusteredLockCache() {
//noinspection unchecked
return (AdvancedCache<K, ClusteredLockValue>) clusteredLockCache.withFlags(Flag.SKIP_CACHE_LOAD, Flag.SKIP_CACHE_STORE);
}
}
| 786
| 38.35
| 126
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/lock/ClusteredLockFilter.java
|
package org.infinispan.lock.impl.lock;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Set;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
import org.infinispan.lock.impl.externalizers.ExternalizerIds;
import org.infinispan.metadata.Metadata;
import org.infinispan.notifications.cachelistener.filter.CacheEventFilter;
import org.infinispan.notifications.cachelistener.filter.EventType;
/**
* This listener is used to monitor lock state changes.
* More about listeners {@see http://infinispan.org/docs/stable/user_guide/user_guide.html#cache_level_notifications}
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public class ClusteredLockFilter implements CacheEventFilter<ClusteredLockKey, ClusteredLockValue> {
public static final AdvancedExternalizer<ClusteredLockFilter> EXTERNALIZER = new ClusteredLockFilter.Externalizer();
private final ClusteredLockKey name;
public ClusteredLockFilter(ClusteredLockKey name) {
this.name = name;
}
@Override
public boolean accept(ClusteredLockKey key, ClusteredLockValue oldValue, Metadata oldMetadata, ClusteredLockValue newValue, Metadata newMetadata, EventType eventType) {
return name.equals(key);
}
public static class Externalizer extends AbstractExternalizer<ClusteredLockFilter> {
@Override
public Set<Class<? extends ClusteredLockFilter>> getTypeClasses() {
return Collections.singleton(ClusteredLockFilter.class);
}
@Override
public void writeObject(ObjectOutput output, ClusteredLockFilter object) throws IOException {
output.writeObject(object.name);
}
@Override
public ClusteredLockFilter readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new ClusteredLockFilter((ClusteredLockKey) input.readObject());
}
@Override
public Integer getId() {
return ExternalizerIds.CLUSTERED_LOCK_FILTER;
}
}
}
| 2,230
| 34.983871
| 171
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/lock/RequestExpirationScheduler.java
|
package org.infinispan.lock.impl.lock;
import java.lang.invoke.MethodHandles;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.lock.logging.Log;
/**
* This class holds the logic to schedule/abort requests that need to be completed at a given time.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public class RequestExpirationScheduler {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass(), Log.class);
private final ScheduledExecutorService scheduledExecutorService;
private final ConcurrentMap<String, ScheduledRequest> scheduledRequests = new ConcurrentHashMap<>();
public RequestExpirationScheduler(ScheduledExecutorService scheduledExecutorService) {
this.scheduledExecutorService = scheduledExecutorService;
}
class ScheduledRequest {
private CompletableFuture<Boolean> request;
private ScheduledFuture<?> scheduledFuture;
public ScheduledRequest(CompletableFuture<Boolean> request, ScheduledFuture<?> scheduledFuture) {
this.request = request;
this.scheduledFuture = scheduledFuture;
}
public CompletableFuture<Boolean> getRequest() {
return request;
}
public ScheduledFuture<?> getScheduledFuture() {
return scheduledFuture;
}
}
/**
* Schedules a request for completion
*
* @param requestId, the unique identifier if the request
* @param request, the request
* @param time, time expressed in long
* @param unit, {@link TimeUnit}
*/
public void scheduleForCompletion(String requestId, CompletableFuture<Boolean> request, long time, TimeUnit unit) {
if (request.isDone()) {
if (log.isTraceEnabled()) {
log.tracef("Request[%s] is not scheduled because is already done", requestId);
}
return;
}
if (scheduledRequests.containsKey(requestId)) {
String message = String.format("Request[%s] is not scheduled because it is already scheduled", requestId);
log.error(message);
throw new IllegalStateException(message);
}
if (log.isTraceEnabled()) {
log.tracef("Request[%s] being scheduled to be completed in [%d, %s]", requestId, time, unit);
}
ScheduledFuture<?> scheduledFuture = scheduledExecutorService.schedule(() -> {
request.complete(false);
scheduledRequests.remove(requestId);
}, time, unit);
scheduledRequests.putIfAbsent(requestId, new ScheduledRequest(request, scheduledFuture));
}
/**
* Aborts the scheduled request if the request is already completed
*
* @param requestId, unique identifier of the request
*/
public void abortScheduling(String requestId) {
abortScheduling(requestId, false);
}
/**
* Aborts the scheduled request. If force is true, it will abort even if the request is not completed
*
* @param requestId, unique identifier of the request
* @param force, force abort
*/
public void abortScheduling(String requestId, boolean force) {
if (log.isTraceEnabled()) {
log.tracef("Request[%s] abort scheduling", requestId);
}
ScheduledRequest scheduledRequest = scheduledRequests.get(requestId);
if (scheduledRequest != null && (scheduledRequest.request.isDone() || force)) {
scheduledRequest.scheduledFuture.cancel(false);
scheduledRequests.remove(requestId);
}
}
/**
* Returns the size of the currently scheduled requests
*
* @return the number of requests that are pending on the scheduler
*/
public int countScheduledRequests() {
return scheduledRequests.size();
}
/**
* Get scheduled request reference by id if such exist
*
* @param requestId, the id of the scheduled request
* @return {@link ScheduledRequest} the request
*/
public ScheduledRequest get(String requestId) {
return scheduledRequests.get(requestId);
}
/**
* Clears all the scheduled requests
*/
public void clear() {
scheduledRequests.clear();
}
}
| 4,422
| 31.762963
| 118
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/lock/ClusteredLockImpl.java
|
package org.infinispan.lock.impl.lock;
import java.util.Collections;
import java.util.List;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.commons.util.Util;
import org.infinispan.functional.FunctionalMap;
import org.infinispan.functional.impl.FunctionalMapImpl;
import org.infinispan.functional.impl.ReadWriteMapImpl;
import org.infinispan.lock.api.ClusteredLock;
import org.infinispan.lock.api.OwnershipLevel;
import org.infinispan.lock.exception.ClusteredLockException;
import org.infinispan.lock.impl.entries.ClusteredLockKey;
import org.infinispan.lock.impl.entries.ClusteredLockState;
import org.infinispan.lock.impl.entries.ClusteredLockValue;
import org.infinispan.lock.impl.functions.IsLocked;
import org.infinispan.lock.impl.functions.LockFunction;
import org.infinispan.lock.impl.functions.UnlockFunction;
import org.infinispan.lock.impl.manager.EmbeddedClusteredLockManager;
import org.infinispan.lock.logging.Log;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
import org.infinispan.notifications.cachelistener.event.CacheEntryModifiedEvent;
import org.infinispan.notifications.cachelistener.event.CacheEntryRemovedEvent;
import org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged;
import org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent;
import org.infinispan.remoting.RemoteException;
import org.infinispan.remoting.transport.Address;
/**
* Implements {@link ClusteredLock} interface.
* <p>
* This lock implements a non reentrant where the ownership is {@link OwnershipLevel#NODE}.
* <p>
* <h2>Non reentrant lock, Owner Node</h2> <lu> <li>Originator of the requests is the {@link Address} of the {@link
* org.infinispan.manager.EmbeddedCacheManager}</li> <li>When a lock is acquired by a Node, it cannot be re-acquired,
* even by the actual node til the lock is released.</li> <li>The lock can be unlocked only by the lock owner, in this
* case the node</li> <li>lock method does not expire til the lock is acquired, so this can cause thread starvation in
* the actual implementation</li> </lu>
* <p>
* <h2>Partition handling</h2>
*
* @author Katia Aresti, karesti@redhat.com
* @see <a href="http://infinispan.org/documentation/">Infinispan documentation</a>
* @since 9.2
*/
public class ClusteredLockImpl implements ClusteredLock {
private static final Log log = LogFactory.getLog(ClusteredLockImpl.class, Log.class);
private final String name;
private final ClusteredLockKey lockKey;
private final AdvancedCache<ClusteredLockKey, ClusteredLockValue> clusteredLockCache;
private final EmbeddedClusteredLockManager clusteredLockManager;
private final FunctionalMap.ReadWriteMap<ClusteredLockKey, ClusteredLockValue> readWriteMap;
private final Queue<RequestHolder> pendingRequests;
private final Object originator;
private final AtomicInteger viewChangeUnlockHappening = new AtomicInteger(0);
private final RequestExpirationScheduler requestExpirationScheduler;
private final ClusterChangeListener clusterChangeListener;
private final LockReleasedListener lockReleasedListener;
public ClusteredLockImpl(String name,
ClusteredLockKey lockKey,
AdvancedCache<ClusteredLockKey, ClusteredLockValue> clusteredLockCache,
EmbeddedClusteredLockManager clusteredLockManager) {
this.name = name;
this.lockKey = lockKey;
this.clusteredLockCache = clusteredLockCache;
this.clusteredLockManager = clusteredLockManager;
this.pendingRequests = new ConcurrentLinkedQueue<>();
this.readWriteMap = ReadWriteMapImpl.create(FunctionalMapImpl.create(clusteredLockCache));
this.originator = clusteredLockCache.getCacheManager().getAddress();
this.requestExpirationScheduler = new RequestExpirationScheduler(clusteredLockManager.getScheduledExecutorService());
this.clusterChangeListener = new ClusterChangeListener();
this.lockReleasedListener = new LockReleasedListener();
this.clusteredLockCache.getCacheManager().addListener(clusterChangeListener);
this.clusteredLockCache.addFilteredListener(lockReleasedListener, new ClusteredLockFilter(lockKey), null,
Util.asSet(CacheEntryModified.class, CacheEntryRemoved.class));
}
public void stop() {
clusteredLockCache.removeListener(clusterChangeListener);
clusteredLockCache.removeListener(lockReleasedListener);
requestExpirationScheduler.clear();
}
public abstract class RequestHolder<E> {
protected final CompletableFuture<E> request;
protected final String requestId;
protected final Object requestor;
public RequestHolder(Object requestor, CompletableFuture<E> request) {
this.requestId = createRequestId();
this.requestor = requestor;
this.request = request;
}
public boolean isDone() {
return request.isDone();
}
public void handleLockResult(Boolean result, Throwable ex) {
if (ex != null) {
log.errorf(ex, "LOCK[%s] Exception on lock request %s", getName(), this.toString());
request.completeExceptionally(handleException(ex));
return;
}
if (result == null) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Result is null on request %s", getName(), this.toString());
}
request.completeExceptionally(new ClusteredLockException("Lock result is null, something is wrong"));
return;
}
handle(result);
}
protected abstract void handle(Boolean result);
protected abstract void forceFailed();
}
public class LockRequestHolder extends RequestHolder<Void> {
public LockRequestHolder(Object requestor, CompletableFuture<Void> request) {
super(requestor, request);
}
@Override
protected void handle(Boolean result) {
if (result) request.complete(null);
}
@Override
protected void forceFailed() {
request.complete(null);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("LockRequestHolder{");
sb.append("name=").append(getName());
sb.append(", requestId=").append(requestId);
sb.append(", requestor=").append(requestor);
sb.append(", completed=").append(request.isDone());
sb.append(", completedExceptionally=").append(request.isCompletedExceptionally());
sb.append('}');
return sb.toString();
}
}
public class TryLockRequestHolder extends RequestHolder<Boolean> {
private final long time;
private final TimeUnit unit;
private boolean isScheduled;
public TryLockRequestHolder(Object requestor, CompletableFuture<Boolean> request) {
super(requestor, request);
this.time = 0;
this.unit = null;
}
public TryLockRequestHolder(Object requestor, CompletableFuture<Boolean> request, long time, TimeUnit unit) {
super(requestor, request);
this.time = time;
this.unit = unit;
}
@Override
protected void handle(Boolean result) {
if (time <= 0) {
// The answer has to be returned without holding the CompletableFuture
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Result[%b] for request %s", getName(), result, this);
}
request.complete(result);
} else if (result) {
// The lock might have been acquired correctly
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] LockResult[%b] for %s", getName(), result, this);
}
request.complete(true);
requestExpirationScheduler.abortScheduling(requestId);
Boolean tryLockRealResult = request.join();
if (!tryLockRealResult) {
// Even if we complete true just before, the lock request can be completed false just before by the scheduler.
// This means that tryLock reached the max time waiting before the lock was actually acquired
// In this case, even if the lock was marked as acquired in the cache, it has to be released because the call expired.
// We have to unlock the lock if the requestor and the requestId match.
// Meanwhile another request for this owner might have locked it successfully and we don't want to unlock in that case
unlock(requestId, Collections.singleton(requestor));
}
} else if (!isScheduled) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Schedule for expiration %s", getName(), this);
}
// If the lock was not acquired, then schedule a complete false for the given timeout
isScheduled = true;
requestExpirationScheduler.scheduleForCompletion(requestId, request, time, unit);
}
}
@Override
protected void forceFailed() {
request.complete(false);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("TryLockRequestHolder{");
sb.append("name=").append(getName());
sb.append(", requestId=").append(requestId);
sb.append(", requestor=").append(requestor);
sb.append(", time=").append(time);
sb.append(", unit=").append(unit);
sb.append(", completed=").append(request.isDone());
sb.append(", completedExceptionally=").append(request.isCompletedExceptionally());
sb.append('}');
return sb.toString();
}
public boolean hasTimeout() {
return time > 0;
}
}
@Listener(clustered = true)
class LockReleasedListener {
@CacheEntryModified
public void entryModified(CacheEntryModifiedEvent event) {
ClusteredLockValue value = (ClusteredLockValue) event.getValue();
if (value.getState() == ClusteredLockState.RELEASED) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Lock has been released, %s notified", getName(), originator);
}
retryPendingRequests(value);
}
}
@CacheEntryRemoved
public void entryRemoved(CacheEntryRemovedEvent event) {
while (!pendingRequests.isEmpty()) {
RequestHolder requestHolder = pendingRequests.poll();
requestHolder.handleLockResult(null, log.lockDeleted());
requestExpirationScheduler.abortScheduling(requestHolder.requestId);
}
}
}
private void retryPendingRequests(ClusteredLockValue value) {
if (isChangeViewUnlockInProgress()) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Hold pending requests while view change unlock is happening in %s", getName(), originator);
}
} else {
RequestHolder nextRequestor = null;
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Pending requests size[%d] in %s", getName(), pendingRequests.size(), originator);
}
while (!pendingRequests.isEmpty() && (nextRequestor == null || nextRequestor.isDone() || isSameRequest(nextRequestor, value)))
nextRequestor = pendingRequests.poll();
if (nextRequestor != null) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] About to retry lock for %s", getName(), nextRequestor);
}
final RequestHolder requestor = nextRequestor;
lock(requestor);
}
}
}
private void retryPendingRequests() {
retryPendingRequests(null);
}
private boolean isSameRequest(RequestHolder nextRequestor, ClusteredLockValue value) {
if (value == null) return false;
return nextRequestor.requestId.equals(value.getRequestId()) && nextRequestor.requestor.equals(value.getOwner());
}
@Listener
class ClusterChangeListener {
@ViewChanged
public void viewChange(ViewChangedEvent event) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] ViewChange event has been fired %s", getName(), originator);
}
List<Address> newMembers = event.getNewMembers();
List<Address> oldMembers = event.getOldMembers();
if (newMembers.size() <= 1 && oldMembers.size() > 2) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] A single new node %s is this notification. Do nothing", getName(), originator);
}
return;
}
Set<Object> leavingNodes = oldMembers.stream().filter(a -> !newMembers.contains(a)).collect(Collectors.toSet());
if (leavingNodes.isEmpty()) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Nothing to do, all nodes are present %s", getName(), originator);
}
return;
}
if (leavingNodes.size() >= newMembers.size() && oldMembers.size() > 2) {
// If the oldMembers size is greater than 2, we do nothing because the other nodes will handle
// If the cluster was formed by 2 members and one leaves, we should not enter here
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Nothing to do, we are on a minority partition notification on %s", getName(), originator);
}
return;
}
if (clusteredLockManager.isDefined(name)) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] %s launches unlock for each leaving node", getName(), originator);
}
forceUnlockForLeavingMembers(leavingNodes);
}
}
/**
* This method forces unlock for each of the Address that is not present in the cluster. We don't know which node
* holds the lock, so we force an unlock
*
* @param possibleOwners
*/
private void forceUnlockForLeavingMembers(Set<Object> possibleOwners) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Call force unlock for %s from %s ", getName(), possibleOwners, originator);
}
int viewChangeUnlockValue = viewChangeUnlockHappening.incrementAndGet();
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] viewChangeUnlockHappening value in %s ", getName(), viewChangeUnlockValue, originator);
}
unlock(null, possibleOwners)
.whenComplete((unlockResult, ex) -> {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Force unlock call completed for %s from %s ", getName(), possibleOwners, originator);
}
int viewChangeUnlockValueAfterUnlock = viewChangeUnlockHappening.decrementAndGet();
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] viewChangeUnlockHappening value in %s ", getName(), viewChangeUnlockValueAfterUnlock, originator);
}
if (ex == null) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Force unlock result %b for %s from %s ", getName(), unlockResult, possibleOwners, originator);
}
} else {
log.error(ex, log.unlockFailed(getName(), getOriginator()));
// TODO: handle the exception. Retry ? End all the pending requests in this lock ?
}
retryPendingRequests();
});
}
}
@Override
public CompletableFuture<Void> lock() {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] lock called from %s", getName(), originator);
}
CompletableFuture<Void> lockRequest = new CompletableFuture<>();
lock(new LockRequestHolder(originator, lockRequest));
return lockRequest;
}
private void lock(RequestHolder<Void> requestHolder) {
if (requestHolder == null || requestHolder.isDone())
return;
pendingRequests.offer(requestHolder);
if (isChangeViewUnlockInProgress()) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] View change unlock is happening in %s. Do not try to lock", getName(), originator);
}
} else {
readWriteMap.eval(lockKey, new LockFunction(requestHolder.requestId, requestHolder.requestor)).whenComplete((lockResult, ex) -> {
requestHolder.handleLockResult(lockResult, ex);
});
}
}
@Override
public CompletableFuture<Boolean> tryLock() {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] tryLock called from %s", getName(), originator);
}
CompletableFuture<Boolean> tryLockRequest = new CompletableFuture<>();
tryLock(new TryLockRequestHolder(originator, tryLockRequest));
return tryLockRequest;
}
@Override
public CompletableFuture<Boolean> tryLock(long time, TimeUnit unit) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] tryLock with timeout (%d, %s) called from %s", getName(), time, unit, originator);
}
CompletableFuture<Boolean> tryLockRequest = new CompletableFuture<>();
tryLock(new TryLockRequestHolder(originator, tryLockRequest, time, unit));
return tryLockRequest;
}
private void tryLock(TryLockRequestHolder requestHolder) {
if (requestHolder == null || requestHolder.isDone()) {
return;
}
if (requestHolder.hasTimeout()) pendingRequests.offer(requestHolder);
if (isChangeViewUnlockInProgress()) {
requestHolder.handleLockResult(false, null);
} else {
readWriteMap.eval(lockKey, new LockFunction(requestHolder.requestId, requestHolder.requestor)).whenComplete((lockResult, ex) -> {
requestHolder.handleLockResult(lockResult, ex);
});
}
}
@Override
public CompletableFuture<Void> unlock() {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] unlock called from %s", getName(), originator);
}
CompletableFuture<Void> unlockRequest = new CompletableFuture<>();
readWriteMap.eval(lockKey, new UnlockFunction(originator)).whenComplete((unlockResult, ex) -> {
if (ex == null) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] Unlock result for %s is %b", getName(), originator, unlockResult);
}
unlockRequest.complete(null);
} else {
unlockRequest.completeExceptionally(handleException(ex));
}
});
return unlockRequest;
}
@Override
public CompletableFuture<Boolean> isLocked() {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] isLocked called from %s", getName(), originator);
}
CompletableFuture<Boolean> isLockedRequest = new CompletableFuture<>();
readWriteMap.eval(lockKey, new IsLocked()).whenComplete((isLocked, ex) -> {
if (ex == null) {
isLockedRequest.complete(isLocked);
} else {
isLockedRequest.completeExceptionally(handleException(ex));
}
});
return isLockedRequest;
}
@Override
public CompletableFuture<Boolean> isLockedByMe() {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] isLockedByMe called from %s", getName(), originator);
}
CompletableFuture<Boolean> isLockedByMeRequest = new CompletableFuture<>();
readWriteMap.eval(lockKey, new IsLocked(originator)).whenComplete((isLockedByMe, ex) -> {
if (ex == null) {
isLockedByMeRequest.complete(isLockedByMe);
} else {
isLockedByMeRequest.completeExceptionally(handleException(ex));
}
});
return isLockedByMeRequest;
}
private CompletableFuture<Boolean> unlock(String requestId, Set<Object> possibleOwners) {
if (log.isTraceEnabled()) {
log.tracef("LOCK[%s] unlock called for %s %s", getName(), requestId, possibleOwners);
}
CompletableFuture<Boolean> unlockRequest = new CompletableFuture<>();
readWriteMap.eval(lockKey, new UnlockFunction(requestId, possibleOwners)).whenComplete((unlockResult, ex) -> {
if (ex == null) {
unlockRequest.complete(unlockResult);
} else {
unlockRequest.completeExceptionally(handleException(ex));
}
});
return unlockRequest;
}
private String createRequestId() {
return Util.threadLocalRandomUUID().toString();
}
private boolean isChangeViewUnlockInProgress() {
return viewChangeUnlockHappening.get() > 0;
}
private Throwable handleException(Throwable ex) {
Throwable lockException = ex;
if (ex instanceof RemoteException) {
lockException = ex.getCause();
}
if (!(lockException instanceof ClusteredLockException)) {
lockException = new ClusteredLockException(ex);
}
return lockException;
}
public String getName() {
return name;
}
public Object getOriginator() {
return originator;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("ClusteredLockImpl{");
sb.append("lock=").append(getName());
sb.append(", originator=").append(originator);
sb.append('}');
return sb.toString();
}
}
| 21,962
| 39.373162
| 140
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/externalizers/ExternalizerIds.java
|
package org.infinispan.lock.impl.externalizers;
/**
* Ids range: 2100 - 2149
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public interface ExternalizerIds {
Integer CLUSTERED_LOCK_KEY = 2100;
Integer CLUSTERED_LOCK_VALUE = 2101;
Integer LOCK_FUNCTION = 2102;
Integer UNLOCK_FUNCTION = 2103;
Integer IS_LOCKED_FUNCTION = 2104;
Integer CLUSTERED_LOCK_FILTER = 2105;
}
| 407
| 21.666667
| 47
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/entries/ClusteredLockValue.java
|
package org.infinispan.lock.impl.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.lock.impl.externalizers.ExternalizerIds;
/**
* Lock object inside the cache. Holds the lock owner, the lock request id and the status of the lock.
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public class ClusteredLockValue {
public static final ClusteredLockValue INITIAL_STATE = new ClusteredLockValue();
public static final AdvancedExternalizer<ClusteredLockValue> EXTERNALIZER = new Externalizer();
private final String requestId;
private final Object owner;
private final ClusteredLockState state;
public ClusteredLockValue(String requestId, Object owner, ClusteredLockState state) {
this.requestId = requestId;
this.owner = owner;
this.state = state;
}
private ClusteredLockValue() {
this.requestId = null;
this.owner = null;
this.state = ClusteredLockState.RELEASED;
}
public ClusteredLockState getState() {
return state;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ClusteredLockValue that = (ClusteredLockValue) o;
return Objects.equals(requestId, that.requestId) && Objects.equals(owner, that.owner) && Objects.equals(state, that.state);
}
@Override
public int hashCode() {
return Objects.hash(requestId, owner, state);
}
@Override
public String toString() {
return "ClusteredLockValue{" +
" requestId=" + requestId +
" owner=" + owner +
" state=" + state +
'}';
}
public String getRequestId() {
return requestId;
}
public Object getOwner() {
return owner;
}
private static class Externalizer implements AdvancedExternalizer<ClusteredLockValue> {
@Override
public Set<Class<? extends ClusteredLockValue>> getTypeClasses() {
return Collections.singleton(ClusteredLockValue.class);
}
@Override
public Integer getId() {
return ExternalizerIds.CLUSTERED_LOCK_VALUE;
}
@Override
public void writeObject(ObjectOutput output, ClusteredLockValue object) throws IOException {
MarshallUtil.marshallString(object.requestId, output);
output.writeObject(object.owner);
MarshallUtil.marshallEnum(object.state, output);
}
@Override
public ClusteredLockValue readObject(ObjectInput input) throws IOException, ClassNotFoundException {
String requestId = MarshallUtil.unmarshallString(input);
Object owner = input.readObject();
ClusteredLockState state = MarshallUtil.unmarshallEnum(input, ClusteredLockState::valueOf);
return new ClusteredLockValue(requestId, owner, state);
}
}
}
| 3,148
| 28.707547
| 129
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/entries/ClusteredLockKey.java
|
package org.infinispan.lock.impl.entries;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
import org.infinispan.commons.marshall.AdvancedExternalizer;
import org.infinispan.lock.impl.externalizers.ExternalizerIds;
import org.infinispan.util.ByteString;
/**
* Used to retrieve and identify a lock in the cache
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public class ClusteredLockKey {
public static final AdvancedExternalizer<ClusteredLockKey> EXTERNALIZER = new Externalizer();
private final ByteString name;
public ClusteredLockKey(ByteString name) {
this.name = Objects.requireNonNull(name);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ClusteredLockKey that = (ClusteredLockKey) o;
return name.equals(that.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
@Override
public String toString() {
return "ClusteredLockKey{" +
"name=" + name +
'}';
}
public ByteString getName() {
return name;
}
private static class Externalizer implements AdvancedExternalizer<ClusteredLockKey> {
private Externalizer() {
}
@Override
public Set<Class<? extends ClusteredLockKey>> getTypeClasses() {
return Collections.singleton(ClusteredLockKey.class);
}
@Override
public Integer getId() {
return ExternalizerIds.CLUSTERED_LOCK_KEY;
}
@Override
public void writeObject(ObjectOutput output, ClusteredLockKey object) throws IOException {
ByteString.writeObject(output, object.name);
}
@Override
public ClusteredLockKey readObject(ObjectInput input) throws IOException, ClassNotFoundException {
return new ClusteredLockKey(ByteString.readObject(input));
}
}
}
| 2,086
| 23.552941
| 104
|
java
|
null |
infinispan-main/lock/src/main/java/org/infinispan/lock/impl/entries/ClusteredLockState.java
|
package org.infinispan.lock.impl.entries;
/**
* Enum that represents the state of the lock.
* Currently, two states are supported : {@link ClusteredLockState#ACQUIRED} and {@link ClusteredLockState#RELEASED}
*
* @author Katia Aresti, karesti@redhat.com
* @since 9.2
*/
public enum ClusteredLockState {
ACQUIRED,
RELEASED;
private static final ClusteredLockState[] CACHED_VALUES = ClusteredLockState.values();
public static ClusteredLockState valueOf(int index) {
return CACHED_VALUES[index];
}
}
| 526
| 25.35
| 116
|
java
|
null |
infinispan-main/persistence/sql/src/test/java/util/JdbcConnection.java
|
package util;
public class JdbcConnection {
private String jdbcUrl, username, password;
public JdbcConnection(String jdbcUrl, String username, String password) {
this.jdbcUrl = jdbcUrl;
this.username = username;
this.password = password;
}
public String getJdbcUrl() {
return jdbcUrl;
}
public String getUsername() {
return username;
}
public String getPassword() {
return password;
}
@Override
public String toString() {
return "JdbcConnection{" +
"jdbcUrl='" + jdbcUrl + '\'' +
", username='" + username + '\'' +
", password='" + password + '\'' +
'}';
}
}
| 734
| 20.617647
| 77
|
java
|
null |
infinispan-main/persistence/sql/src/test/java/org/infinispan/persistence/sql/TableJdbcStoreFunctionalTest.java
|
package org.infinispan.persistence.sql;
import java.util.Arrays;
import java.util.stream.Stream;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.DatabaseType;
import org.infinispan.persistence.sql.configuration.TableJdbcStoreConfigurationBuilder;
import org.testng.annotations.Factory;
import org.testng.annotations.Test;
@Test(groups = {"functional", "smoke"}, testName = "persistence.jdbc.stringbased.TableJdbcStoreFunctionalTest")
public class TableJdbcStoreFunctionalTest extends AbstractSQLStoreFunctionalTest {
public TableJdbcStoreFunctionalTest(DatabaseType databaseType, boolean transactionalCache,
boolean transactionalStore) {
super(databaseType, transactionalCache, transactionalStore);
}
@Factory
public static Object[] factory() {
DatabaseType[] databases;
if(DATABASE == null) {
databases = new DatabaseType[]{
DatabaseType.H2,
DatabaseType.SQLITE
};
} else {
databases = databasesFromSystemProperty.keySet().stream().toArray(DatabaseType[] :: new);
}
return Arrays.stream(databases)
.flatMap(dt -> Stream.of(
new TableJdbcStoreFunctionalTest(dt, true, true),
new TableJdbcStoreFunctionalTest(dt, true, false),
new TableJdbcStoreFunctionalTest(dt, false, false)
)).toArray();
}
@Override
protected PersistenceConfigurationBuilder createCacheStoreConfig(PersistenceConfigurationBuilder persistence,
String cacheName, boolean preload) {
TableJdbcStoreConfigurationBuilder storeBuilder = persistence
.addStore(TableJdbcStoreConfigurationBuilder.class)
.transactional(transactionalStore)
.preload(preload);
configureCommonConfiguration(storeBuilder);
String tableName = tableToSearch(getClass().getSimpleName().subSequence(0, 1) + cacheName);
storeBuilder.tableName(tableName);
createTable(cacheName, tableName, storeBuilder.getConnectionFactory());
return persistence;
}
}
| 2,157
| 36.859649
| 112
|
java
|
null |
infinispan-main/persistence/sql/src/test/java/org/infinispan/persistence/sql/AbstractSQLStoreFunctionalTest.java
|
package org.infinispan.persistence.sql;
import static org.infinispan.persistence.jdbc.common.DatabaseType.H2;
import static org.infinispan.persistence.jdbc.common.DatabaseType.SQLITE;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.fail;
import java.io.File;
import java.lang.reflect.Method;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletionException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import jakarta.transaction.NotSupportedException;
import jakarta.transaction.SystemException;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.commons.util.Util;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.marshall.protostream.impl.SerializationContextRegistry;
import org.infinispan.persistence.BaseStoreFunctionalTest;
import org.infinispan.persistence.jdbc.common.DatabaseType;
import org.infinispan.persistence.jdbc.common.UnitTestDatabaseManager;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.configuration.PooledConnectionFactoryConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.sql.configuration.AbstractSchemaJdbcConfigurationBuilder;
import org.infinispan.protostream.ProtobufUtil;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.data.Address;
import org.infinispan.test.data.Key;
import org.infinispan.test.data.Person;
import org.infinispan.test.data.Sex;
import org.infinispan.transaction.TransactionMode;
import org.mockito.Mockito;
import org.postgresql.Driver;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import util.JdbcConnection;
public abstract class AbstractSQLStoreFunctionalTest extends BaseStoreFunctionalTest {
protected final DatabaseType DB_TYPE;
protected final boolean transactionalCache;
protected final boolean transactionalStore;
protected String tmpDirectory;
protected Consumer<AbstractSchemaJdbcConfigurationBuilder<?, ?>> schemaConsumer;
protected static final String DATABASE = System.getProperty("org.infinispan.test.sqlstore.database");
protected static final String JDBC_URL = System.getProperty("org.infinispan.test.sqlstore.jdbc.url");
protected static final String JDBC_USERNAME = System.getProperty("org.infinispan.test.sqlstore.jdbc.username");
protected static final String JDBC_PASSWORD = System.getProperty("org.infinispan.test.sqlstore.jdbc.password");
protected static Map<DatabaseType, JdbcConnection> databasesFromSystemProperty = new HashMap<>();
static {
if(DATABASE != null) {
databasesFromSystemProperty = getDatabases();
}
}
public AbstractSQLStoreFunctionalTest(DatabaseType databaseType, boolean transactionalCache,
boolean transactionalStore) {
this.DB_TYPE = databaseType;
this.transactionalCache = transactionalCache;
this.transactionalStore = transactionalStore;
}
@BeforeClass(alwaysRun = true)
protected void setUpTempDir() {
tmpDirectory = CommonsTestingUtil.tmpDirectory(getClass());
new File(tmpDirectory).mkdirs();
}
@BeforeMethod(alwaysRun = true)
@Override
protected void createBeforeMethod() throws Exception {
schemaConsumer = null;
super.createBeforeMethod();
}
@AfterClass(alwaysRun = true)
protected void clearTempDir() {
Util.recursiveFileRemove(tmpDirectory);
}
// DB table is denormalized when read
@Override
protected Person createEmptyPerson(String name) {
return new Person(name, new Address());
}
@Override
protected String parameters() {
return "[" + DB_TYPE + ", transactionalCache=" + transactionalCache + ", transactionalStore=" + transactionalStore
+ "]";
}
@Override
protected ConfigurationBuilder getDefaultCacheConfiguration() {
ConfigurationBuilder builder = super.getDefaultCacheConfiguration();
builder.encoding().mediaType(MediaType.APPLICATION_PROTOSTREAM_TYPE);
if (transactionalCache) {
builder.transaction().transactionMode(TransactionMode.TRANSACTIONAL);
}
return builder;
}
@Override
protected void assertPersonEqual(Person firstPerson, Person secondPerson) {
switch (DB_TYPE) {
// These databases right pad CHAR to use up the entire space
case H2:
case DB2:
case ORACLE:
case SQL_SERVER:
case POSTGRES:
if (!firstPerson.equalsIgnoreWhitespaceAddress(secondPerson)) {
fail("expected:<" + firstPerson + "> but was:<" + secondPerson + ">");
}
break;
default:
super.assertPersonEqual(firstPerson, secondPerson);
}
}
@Override
public void testPreloadStoredAsBinary() {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(false)
.messageName("Person")
.packageName("org.infinispan.test.core");
super.testPreloadStoredAsBinary();
}
@Test(enabled = false, description = "Expiration not supported")
@Override
public void testPreloadAndExpiry() {
// Expiration not supported
}
@Test(enabled = false, description = "Not applicable")
@Override
public void testTwoCachesSameCacheStore() {
// Stores are always shared
}
@Override
public void testRemoveCacheWithPassivation() {
if (!transactionalStore) {
super.testRemoveCacheWithPassivation();
}
}
@Test(expectedExceptions=CacheConfigurationException.class, expectedExceptionsMessageRegExp = ".*ISPN000651.*")
public void testMaxIdleNotAllowedWithoutPassivation() {
String cacheName = "testMaxIdleNotAllowedWithoutPassivation";
ConfigurationBuilder cb = getDefaultCacheConfiguration();
cb.expiration().maxIdle(1);
createCacheStoreConfig(cb.persistence(), cacheName, false);
TestingUtil.defineConfiguration(cacheManager, cacheName, cb.build());
// will start the cache
cacheManager.getCache(cacheName);
}
public void testRollback() throws SystemException, NotSupportedException {
if (!transactionalCache) {
return;
}
String cacheName = "testRollback";
ConfigurationBuilder cb = getDefaultCacheConfiguration();
createCacheStoreConfig(cb.persistence(), cacheName, false);
TestingUtil.defineConfiguration(cacheManager, cacheName, cb.build());
Cache<String, Object> cache = cacheManager.getCache(cacheName);
String key = "rollback-test";
assertNull(cache.get(key));
TransactionManager manager = cache.getAdvancedCache().getTransactionManager();
String value = "the-value";
manager.begin();
cache.put(key, value);
assertEquals(value, cache.get(key));
manager.rollback();
assertNull(cache.get(key));
}
public void testDBHasMoreKeyColumnsWithKeySchema(Method m) {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(false)
.keyMessageName("Key")
.packageName("org.infinispan.test.core");
Exceptions.expectException(CacheConfigurationException.class, CompletionException.class,
CacheConfigurationException.class,
".*Primary key (?i)(KEYCOLUMN2) was not found.*",
() -> testSimpleGetAndPut(m.getName(), new Key("mykey"), "value1"));
}
public void testDBHasMoreKeyColumnsWithNoKeySchema(Method m) {
Exceptions.expectException(CacheConfigurationException.class, CompletionException.class,
CacheConfigurationException.class,
".*Primary key has multiple columns .*",
() -> testSimpleGetAndPut(m.getName(), "key", "value"));
}
// public void testDBHasLessKeyColumnsWithSchema(Method m) {
// // TODO: add new schema key with 2 columns
// schemaConsumer = builder ->
// builder.schemaJdbcConfigurationBuilder()
// .embeddedKey(false)
// .keyMessageName("Key")
// .packageName("org.infinispan.test.core");
// Exceptions.expectException(CacheConfigurationException.class, CompletionException.class,
// CacheConfigurationException.class,
// "Primary key has multiple columns .*",
// () -> testSimpleGetAndPut(m.getName(), "key", "value"));
// }
public void testDBHasMoreValueColumnsWithValueSchema(Method m) {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(false)
.messageName("Person")
.packageName("org.infinispan.test.core");
Exceptions.expectException(CacheConfigurationException.class, CompletionException.class,
CacheConfigurationException.class,
".*Additional value columns .* found that were not part of the schema,.*",
() -> testSimpleGetAndPut(m.getName(), "key", new Person("man2")));
}
public void testDBHasMoreValueColumnsWithNoValueSchema(Method m) {
Exceptions.expectException(CacheConfigurationException.class, CompletionException.class,
CacheConfigurationException.class,
".*Multiple non key columns but no value message schema defined.*",
() -> testSimpleGetAndPut(m.getName(), "key", "value"));
}
public void testDBHasLessValueColumnsWithSchema(Method m) {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(false)
.messageName("Person")
.packageName("org.infinispan.test.core");
testSimpleGetAndPut(m.getName(), "key", new Person("joe"));
}
public void testEmbeddedKey(Method m) {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(true)
.messageName("Person")
.packageName("org.infinispan.test.core");
testSimpleGetAndPut(m.getName(), "joe", new Person("joe"));
}
public void testEnumForKey(Method m) {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(false)
.keyMessageName("Sex")
.packageName("org.infinispan.test.core");
testSimpleGetAndPut(m.getName(), Sex.FEMALE, "samantha");
}
public void testEnumForValue(Method m) {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(false)
.messageName("Sex")
.packageName("org.infinispan.test.core");
testSimpleGetAndPut(m.getName(), "samantha", Sex.FEMALE);
}
public void testEmbeddedLoadSchemaAfterCreation(Method m) {
schemaConsumer = builder ->
builder.schema()
.embeddedKey(true)
.messageName("Person")
.packageName("org.infinispan.test.core");
final AtomicBoolean loadSchema = new AtomicBoolean(false);
SerializationContextRegistry scr = TestingUtil.extractGlobalComponent(cacheManager, SerializationContextRegistry.class);
SerializationContextRegistry spyScr = Mockito.spy(scr);
Mockito.when(spyScr.getUserCtx()).then(ivk -> {
if (loadSchema.get()) return scr.getUserCtx();
return ProtobufUtil.newSerializationContext();
});
TestingUtil.replaceComponent(cacheManager, SerializationContextRegistry.class, spyScr, true);
String cacheName = m.getName();
ConfigurationBuilder cb = getDefaultCacheConfiguration();
createCacheStoreConfig(cb.persistence(), cacheName, false);
TestingUtil.defineConfiguration(cacheManager, cacheName, cb.build());
// This should fail because the schema does not exist.
Exceptions.expectException(CacheConfigurationException.class, CompletionException.class,
CacheConfigurationException.class,
"ISPN008047: Schema not found for : org.infinispan.test.core.Person",
() -> cacheManager.getCache(cacheName)
);
loadSchema.set(true);
// Schema registered successfully afterwards.
// Should be fully functional now.
Cache<Object, Object> cache = cacheManager.getCache(cacheName);
String key = "joe";
Person value = new Person("joe");
assertNull(cache.get(key));
cache.put(key, value);
assertEquals(value, cache.get(key));
}
private void testSimpleGetAndPut(String cacheName, Object key, Object value) {
ConfigurationBuilder cb = getDefaultCacheConfiguration();
createCacheStoreConfig(cb.persistence(), cacheName, false);
TestingUtil.defineConfiguration(cacheManager, cacheName, cb.build());
Cache<Object, Object> cache = cacheManager.getCache(cacheName);
assertNull(cache.get(key));
cache.put(key, value);
assertEquals(value, cache.get(key));
List<Map.Entry<Object, Object>> entryList = cache.entrySet().stream().collect(Collectors.toList());
assertEquals(1, entryList.size());
Map.Entry<Object, Object> entry = entryList.get(0);
assertEquals(key, entry.getKey());
assertEquals(value, entry.getValue());
}
protected void configureCommonConfiguration(AbstractSchemaJdbcConfigurationBuilder<?, ?> builder) {
if (schemaConsumer != null) {
schemaConsumer.accept(builder);
}
PooledConnectionFactoryConfigurationBuilder<?> connectionPool = null;
if(!(DB_TYPE == SQLITE || DB_TYPE == H2)) {
connectionPool = addJdbcConnection(builder);
}
switch (DB_TYPE) {
case POSTGRES:
connectionPool
.driverClass(Driver.class);
break;
case ORACLE:
connectionPool
.driverClass("oracle.jdbc.OracleDriver");
break;
case MARIA_DB:
connectionPool
.driverClass("org.mariadb.jdbc.Driver");
break;
case DB2:
connectionPool
.driverClass("com.ibm.db2.jcc.DB2Driver");
break;
case MYSQL:
connectionPool
.driverClass("com.mysql.cj.jdbc.Driver");
break;
case SQLITE:
builder.connectionPool()
.driverClass("org.sqlite.JDBC")
.connectionUrl("jdbc:sqlite:" + tmpDirectory + File.separator + "sqllite.data")
.username("sa");
break;
case SQL_SERVER:
connectionPool
.driverClass("com.microsoft.sqlserver.jdbc.SQLServerDriver");
break;
case SYBASE:
connectionPool
.driverClass("com.sybase.jdbc4.jdbc.SybDriver");
break;
case H2:
default:
UnitTestDatabaseManager.configureUniqueConnectionFactory(builder);
}
}
String binaryType() {
switch (DB_TYPE) {
case POSTGRES:
return "BYTEA";
case ORACLE:
return "RAW(255)";
case SQLITE:
return "BINARY";
default:
return "VARBINARY(255)";
}
}
String booleanType() {
switch (DB_TYPE) {
case SQL_SERVER:
return "BIT";
case ORACLE:
case ORACLE_XE:
return "NUMBER(1, 0)";
default:
return "BOOLEAN";
}
}
String dateTimeType() {
switch (DB_TYPE) {
case SYBASE:
case MYSQL:
case MARIA_DB:
return "DATETIME";
case SQL_SERVER:
return "DATETIME2";
case POSTGRES:
case H2:
default:
return "TIMESTAMP";
}
}
protected void createTable(String cacheName, String tableName, ConnectionFactoryConfigurationBuilder<ConnectionFactoryConfiguration> builder) {
String tableCreation;
String upperCaseCacheName = cacheName.toUpperCase();
if (cacheName.equalsIgnoreCase("testPreloadStoredAsBinary")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
"keycolumn VARCHAR(255) NOT NULL, " +
"NAME VARCHAR(255) NOT NULL, " +
"street CHAR(255), " +
"city VARCHAR(255), " +
"zip INT, " +
"picture " + binaryType() + ", " +
"accepted_tos " + booleanType() + ", " +
"sex VARCHAR(255), " +
"birthdate " + dateTimeType() + ", " +
"moneyOwned NUMERIC(10, 4), " +
"moneyOwed FLOAT, " +
"decimalField DECIMAL(10, 4), " +
"realField REAL, " +
"PRIMARY KEY (keycolumn))";
} else if (cacheName.equalsIgnoreCase("testStoreByteArrays")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
"keycolumn " + binaryType() + " NOT NULL, " +
"value1 " + binaryType() + " NOT NULL, " +
"PRIMARY KEY (keycolumn))";
} else if (upperCaseCacheName.startsWith("TESTDBHASMOREVALUECOLUMNS")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
"keycolumn VARCHAR(255) NOT NULL, " +
"NAME VARCHAR(255) NOT NULL, " +
"street VARCHAR(255), " +
"city VARCHAR(255), " +
"zip INT, " +
"picture " + binaryType() + ", " +
"sex VARCHAR(255), " +
"birthdate " + dateTimeType() + ", " +
"value2 VARCHAR(255), " +
"value3 VARCHAR(255), " +
"PRIMARY KEY (keycolumn))";
} else if (upperCaseCacheName.startsWith("TESTDBHASMOREKEYCOLUMNS")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
// The name of the field for the Key schema is "value1"
"value1 VARCHAR(255) NOT NULL, " +
"keycolumn2 VARCHAR(255) NOT NULL, " +
"value2 VARCHAR(255) NOT NULL, " +
"PRIMARY KEY (value1, keycolumn2))";
} else if (upperCaseCacheName.startsWith("TESTDBHASLESSVALUECOLUMNS")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
"keycolumn VARCHAR(255) NOT NULL, " +
"NAME VARCHAR(255) NOT NULL, " +
"street VARCHAR(255), " +
"PRIMARY KEY (keycolumn))";
} else if (upperCaseCacheName.startsWith("TESTEMBEDDED")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
"NAME VARCHAR(255) NOT NULL, " +
"street VARCHAR(255), " +
"city VARCHAR(255), " +
"zip INT, " +
"picture " + binaryType() + ", " +
"sex VARCHAR(255), " +
"birthdate " + dateTimeType() + ", " +
"PRIMARY KEY (name))";
} else if (upperCaseCacheName.startsWith("TESTENUMFORVALUE")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
"NAME VARCHAR(255) NOT NULL, " +
"sex VARCHAR(255), " +
"PRIMARY KEY (name))";
} else if (upperCaseCacheName.startsWith("TESTENUMFORKEY")) {
tableCreation = "CREATE TABLE " + tableName + " (" +
"sex VARCHAR(255) NOT NULL, " +
"name VARCHAR(255), " +
"PRIMARY KEY (sex))";
} else {
tableCreation = "CREATE TABLE " + tableName + " (" +
"keycolumn VARCHAR(255) NOT NULL, " +
"value1 VARCHAR(255) NOT NULL, " +
"PRIMARY KEY (keycolumn))";
}
ConnectionFactoryConfiguration config = builder.create();
ConnectionFactory factory = ConnectionFactory.getConnectionFactory(config.connectionFactoryClass());
factory.start(config, getClass().getClassLoader());
Connection connection = null;
try {
connection = factory.getConnection();
String modifiedTableName = tableToSearch(tableName);
try (ResultSet rs = connection.getMetaData().getTables(null, null, modifiedTableName,
new String[]{"TABLE"})) {
if (!rs.next()) {
try (Statement stmt = connection.createStatement()) {
log.debugf("Table: %s doesn't exist, creating via %s%n", modifiedTableName, tableCreation);
stmt.execute(tableCreation);
}
}
}
} catch (SQLException t) {
throw new AssertionError(t);
} finally {
factory.releaseConnection(connection);
factory.stop();
}
}
String tableToSearch(String tableName) {
if (DB_TYPE == DatabaseType.POSTGRES) return tableName.toLowerCase();
return tableName.toUpperCase();
}
private PooledConnectionFactoryConfigurationBuilder<?> addJdbcConnection(AbstractSchemaJdbcConfigurationBuilder<?, ?> builder) {
if(JDBC_URL != null && JDBC_PASSWORD != null && JDBC_USERNAME != null) {
JdbcConnection jdbcConnection = databasesFromSystemProperty.get(DB_TYPE);
return builder.connectionPool()
.connectionUrl(jdbcConnection.getJdbcUrl())
.username(jdbcConnection.getUsername())
.password(jdbcConnection.getPassword());
}
throw new IllegalArgumentException("JDBC connection wasn't provided through System Properties");
}
protected static HashMap<DatabaseType, JdbcConnection> getDatabases() {
Objects.requireNonNull(JDBC_URL);
Objects.requireNonNull(JDBC_USERNAME);
Objects.requireNonNull(JDBC_PASSWORD);
Objects.requireNonNull(DATABASE);
List<DatabaseType> databaseTypes = Arrays.stream(DATABASE.split(",")).map(DatabaseType::guessDialect).collect(Collectors.toList());
HashMap<DatabaseType, JdbcConnection> map = new HashMap<>();
for (int i = 0; i < databaseTypes.size(); i++) {
String jdbcURL = JDBC_URL.split(",")[i];
String username = JDBC_USERNAME.split(",")[i];
String password = JDBC_PASSWORD.split(",")[i];
JdbcConnection jdbcConnection = new JdbcConnection(jdbcURL, username, password);
DatabaseType databaseType = databaseTypes.get(i);
map.put(databaseType, jdbcConnection);
}
return map;
}
}
| 23,136
| 38.281834
| 146
|
java
|
null |
infinispan-main/persistence/sql/src/test/java/org/infinispan/persistence/sql/QueriesJdbcJoinTest.java
|
package org.infinispan.persistence.sql;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Timestamp;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletionException;
import java.util.stream.Stream;
import org.infinispan.Cache;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.test.ExceptionRunnable;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.manager.EmbeddedCacheManagerStartupException;
import org.infinispan.persistence.jdbc.common.UnitTestDatabaseManager;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.sql.configuration.QueriesJdbcConfigurationBuilder;
import org.infinispan.persistence.sql.configuration.QueriesJdbcStoreConfigurationBuilder;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.data.Address;
import org.infinispan.test.data.Person;
import org.infinispan.test.data.Sex;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "persistence.sql.QueriesJdbcJoinTest")
public class QueriesJdbcJoinTest extends AbstractInfinispanTest {
private static final String TABLE1_NAME = "Person";
private static final String TABLE2_NAME = "Address";
private ConnectionFactory FACTORY;
@AfterMethod(alwaysRun = true)
public void afterClass() {
if (FACTORY != null) {
FACTORY.stop();
}
}
enum TestType {
TOO_MANY_COLUMNS {
@Override
void runTest(ExceptionRunnable runnable) {
Exceptions.expectException(".*Additional value columns.*found that were not part of the schema.*", runnable,
EmbeddedCacheManagerStartupException.class, CacheConfigurationException.class,
CompletionException.class, CacheConfigurationException.class);
}
@Override
void modifyConfiguration(QueriesJdbcStoreConfigurationBuilder builder, boolean idJoin) {
super.modifyConfiguration(builder, idJoin);
QueriesJdbcConfigurationBuilder queryBuilder = builder.queries();
if (idJoin) {
queryBuilder
// Note these return * which will include the joined columns as well
.select("SELECT * FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 ON t1.address = t2.id WHERE t1.name = :name")
.selectAll("SELECT * FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 ON t1.address = t2.id");
} else {
queryBuilder
// Note these return * which will include the joined columns as well
.select("SELECT * FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 WHERE t1.name = :name AND t2.name = :name")
.selectAll("SELECT * FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 WHERE t1.name = t2.name");
}
}
},
NOT_EMBEDDED_KEY {
@Override
void runTest(ExceptionRunnable runnable) {
Exceptions.expectException(".*was found in the value schema .* but embedded key was not true", runnable,
EmbeddedCacheManagerStartupException.class, CacheConfigurationException.class,
CompletionException.class, CacheConfigurationException.class);
}
@Override
void modifyConfiguration(QueriesJdbcStoreConfigurationBuilder builder, boolean idJoin) {
super.modifyConfiguration(builder, idJoin);
builder.schema().embeddedKey(false);
}
},
PASS;
void runTest(ExceptionRunnable runnable) throws Exception {
runnable.run();
}
void modifyConfiguration(QueriesJdbcStoreConfigurationBuilder builder, boolean idJoin) {
QueriesJdbcConfigurationBuilder queryBuilder = builder.queries();
queryBuilder.size("SELECT COUNT(*) FROM " + TABLE1_NAME);
builder.schema().embeddedKey(true);
if (idJoin) {
builder.queries()
.select("SELECT t1.name, t1.picture, t1.sex, t1.birthdate, t1.accepted_tos, t2.street, t2.city, t2.zip FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 ON t1.address = t2.id WHERE t1.name = :name")
.selectAll("SELECT t1.name, t1.picture, t1.sex, t1.birthdate, t1.accepted_tos, t2.street, t2.city, t2.zip FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 ON t1.address = t2.id");
} else {
builder.queries()
.select("SELECT t1.name, t1.picture, t1.sex, t1.birthdate, t1.accepted_tos, t2.street, t2.city, t2.zip FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 WHERE t1.name = :name AND t2.name = :name")
.selectAll("SELECT t1.name, t1.picture, t1.sex, t1.birthdate, t1.accepted_tos, t2.street, t2.city, t2.zip FROM " + TABLE1_NAME + " t1 JOIN " + TABLE2_NAME + " t2 WHERE t1.name = t2.name");
}
}
}
protected EmbeddedCacheManager createCacheManager(TestType type, boolean idJoin) {
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.encoding().mediaType(MediaType.APPLICATION_PROTOSTREAM_TYPE);
QueriesJdbcStoreConfigurationBuilder queriesBuilder = builder.persistence()
.addStore(QueriesJdbcStoreConfigurationBuilder.class)
.ignoreModifications(true);
queriesBuilder.keyColumns("name");
queriesBuilder.schema()
.messageName("Person")
.packageName("org.infinispan.test.core");
UnitTestDatabaseManager.configureUniqueConnectionFactory(queriesBuilder);
createTables(queriesBuilder.getConnectionFactory(), idJoin);
type.modifyConfiguration(queriesBuilder, idJoin);
return TestCacheManagerFactory.createCacheManager(TestDataSCI.INSTANCE, builder);
}
private void createTables(ConnectionFactoryConfigurationBuilder<ConnectionFactoryConfiguration> builder, boolean idJoin) {
ConnectionFactoryConfiguration config = builder.create();
FACTORY = ConnectionFactory.getConnectionFactory(config.connectionFactoryClass());
FACTORY.start(config, getClass().getClassLoader());
Connection connection = null;
try {
connection = FACTORY.getConnection();
try (Statement stmt = connection.createStatement()) {
String tableCreation = "CREATE TABLE " + TABLE1_NAME + " (" +
"name VARCHAR(255) NOT NULL, " +
(idJoin ? "address INT, " : "") +
"picture VARBINARY(255), " +
"sex VARCHAR(255), " +
"birthdate TIMESTAMP, " +
"accepted_tos boolean, " +
"notused VARCHAR(255), " +
"PRIMARY KEY (NAME))";
stmt.execute(tableCreation);
tableCreation = "create TABLE " + TABLE2_NAME + " (" +
(idJoin ? "id INT NOT NULL, " : "name VARCHAR(255) NOT NULL, ") +
"street VARCHAR(255), " +
"city VARCHAR(255), " +
"zip INT, " +
"PRIMARY KEY (" + (idJoin ? "id" : "name") + "))";
stmt.execute(tableCreation);
}
} catch (SQLException t) {
throw new AssertionError(t);
} finally {
FACTORY.releaseConnection(connection);
}
}
public void testUpsertMultipleValues() {
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.encoding().mediaType(MediaType.APPLICATION_PROTOSTREAM_TYPE);
QueriesJdbcStoreConfigurationBuilder queriesBuilder = builder.persistence()
.addStore(QueriesJdbcStoreConfigurationBuilder.class);
queriesBuilder.keyColumns("name");
queriesBuilder.schema()
.messageName("Person")
.packageName("org.infinispan.test.core");
UnitTestDatabaseManager.configureUniqueConnectionFactory(queriesBuilder);
createTables(queriesBuilder.getConnectionFactory(), false);
TestType.PASS.modifyConfiguration(queriesBuilder, false);
queriesBuilder.queries()
.delete("DELETE FROM " + TABLE1_NAME + " t1 WHERE t1.name = :name; DELETE FROM " + TABLE2_NAME + " t2 where t2.name = :name")
.deleteAll("DELETE FROM " + TABLE1_NAME + "; DELETE FROM " + TABLE2_NAME)
.upsert(insertTable1Statement(false, true) +
"; " + insertTable2Statement(false, true));
EmbeddedCacheManager embeddedCacheManager = TestCacheManagerFactory.createCacheManager(TestDataSCI.INSTANCE, builder);
Cache<String, Person> cache = embeddedCacheManager.getCache();
String name = "Mircea Markus";
Person person = samplePerson(name);
cache.put(name, person);
assertEquals(person, cache.get(name));
cache.remove(name);
assertNull(cache.get(name));
}
@DataProvider(name = "testTypes")
public static Object[][] testTypes() {
return Stream.of(TestType.values())
.flatMap(t -> Stream.of(new Object[]{t, true}, new Object[]{t, false}))
.toArray(Object[][]::new);
}
@Test(dataProvider = "testTypes")
public void testIdJoinTypes(TestType type, boolean idJoin) throws Exception {
type.runTest(() -> {
EmbeddedCacheManager cacheManager = createCacheManager(type, idJoin);
Cache<String, Person> cache = cacheManager.getCache();
Connection connection = FACTORY.getConnection();
try {
String name = "Manik Surtani";
Person person = samplePerson(name);
insertData(connection, Collections.singleton(person), idJoin);
assertEquals(person, cache.get(name));
} finally {
FACTORY.releaseConnection(connection);
}
});
}
private Person samplePerson(String name) {
Address address = new Address();
address.setCity("London");
address.setStreet("Cool Street");
address.setZip(1321);
Person person = new Person(name, address);
person.setPicture(new byte[]{0x1, 0x12});
person.setSex(Sex.MALE);
person.setBirthDate(new java.util.Date(1629495308));
person.setAcceptedToS(true);
return person;
}
private String insertTable1Statement(boolean idJoin, boolean namedParams) {
return "INSERT INTO " + TABLE1_NAME +
" (name, " + (idJoin ? "address, " : "") + " picture, sex, birthdate, accepted_tos) " +
(namedParams ? "VALUES (:name" + (idJoin ? ", :address" : "") + ", :picture, :sex, :birthdate, :accepted_tos)" :
"VALUES (?, ?, ?, ?, ?" + (idJoin ? ", ?)" : ")"));
}
private String insertTable2Statement(boolean idJoin, boolean namedParams) {
return "INSERT INTO " + TABLE2_NAME +
"(" + (idJoin ? "id" : "name") + ", street, city, zip) " +
(namedParams ? "VALUES (" + (idJoin ? ":id" : ":name") + ", :street, :city, :zip)" :
"VALUES (?, ?, ?, ?)");
}
private void insertData(Connection connection, Set<Person> peopleToCreate, boolean idJoin) throws SQLException {
String insertStatement = insertTable1Statement(idJoin, false);
int addressCount = 0;
Map<Address, Integer> addressIntegerMap = idJoin ? new HashMap<>() : null;
try (PreparedStatement ps = connection.prepareStatement(insertStatement)) {
for (Person person : peopleToCreate) {
int offset = 1;
ps.setString(offset++, person.getName());
if (addressIntegerMap != null) {
Address address = person.getAddress();
Integer addressNumber = addressIntegerMap.get(address);
if (addressNumber == null) {
addressNumber = addressCount++;
addressIntegerMap.put(address, addressNumber);
}
ps.setInt(offset++, addressNumber);
}
ps.setBytes(offset++, person.getPicture());
ps.setString(offset++, person.getSex().toString());
ps.setTimestamp(offset++, new Timestamp(person.getBirthDate().getTime()));
ps.setBoolean(offset, person.isAcceptedToS());
ps.addBatch();
}
ps.executeBatch();
}
insertStatement = insertTable2Statement(idJoin, false);
try (PreparedStatement ps = connection.prepareStatement(insertStatement)) {
for (Person person : peopleToCreate) {
Address address = person.getAddress();
if (addressIntegerMap != null) {
Integer id = addressIntegerMap.get(address);
assert id != null;
ps.setInt(1, id);
} else {
ps.setString(1, person.getName());
}
ps.setString(2, address.getStreet());
ps.setString(3, address.getCity());
ps.setInt(4, address.getZip());
ps.addBatch();
}
ps.executeBatch();
}
}
}
| 13,787
| 43.766234
| 222
|
java
|
null |
infinispan-main/persistence/sql/src/test/java/org/infinispan/persistence/sql/QueriesJdbcStoreFunctionalTest.java
|
package org.infinispan.persistence.sql;
import java.util.Arrays;
import java.util.Collections;
import java.util.stream.Stream;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.DatabaseType;
import org.infinispan.persistence.jdbc.common.SqlManager;
import org.infinispan.persistence.sql.configuration.QueriesJdbcStoreConfigurationBuilder;
import org.testng.annotations.Factory;
import org.testng.annotations.Test;
@Test(groups = {"functional", "smoke"}, testName = "persistence.jdbc.stringbased.QuerySchemaJdbcStoreFunctionalTest")
public class QueriesJdbcStoreFunctionalTest extends AbstractSQLStoreFunctionalTest {
public QueriesJdbcStoreFunctionalTest(DatabaseType databaseType, boolean transactionalCache,
boolean transactionalStore) {
super(databaseType, transactionalCache, transactionalStore);
}
@Factory
public static Object[] factory() {
DatabaseType[] databases;
if(DATABASE == null) {
databases = new DatabaseType[]{
DatabaseType.H2,
DatabaseType.SQLITE
};
} else {
databases = databasesFromSystemProperty.keySet().stream().toArray(DatabaseType[] :: new);
}
return Arrays.stream(databases)
.flatMap(dt -> Stream.of(
new QueriesJdbcStoreFunctionalTest(dt, true, true),
new QueriesJdbcStoreFunctionalTest(dt, true, false),
new QueriesJdbcStoreFunctionalTest(dt, false, false)
)).toArray();
}
@Override
protected PersistenceConfigurationBuilder createCacheStoreConfig(PersistenceConfigurationBuilder persistence,
String cacheName, boolean preload) {
QueriesJdbcStoreConfigurationBuilder storeBuilder = persistence
.addStore(QueriesJdbcStoreConfigurationBuilder.class)
.transactional(transactionalStore)
.preload(preload);
configureCommonConfiguration(storeBuilder);
// Just prepend the first letter of the Test to make the tables unique so we can run them in parallel
String tableName = getClass().getSimpleName().subSequence(0, 1) + cacheName;
SqlManager manager = SqlManager.fromDatabaseType(DB_TYPE, tableName, true);
String KEY_COLUMN = "keycolumn";
storeBuilder.queries()
.deleteAll("DELETE FROM " + tableName)
.size("SELECT COUNT(*) FROM " + tableName);
storeBuilder.keyColumns(KEY_COLUMN);
if (cacheName.equalsIgnoreCase("testPreloadStoredAsBinary")) {
storeBuilder.queries()
.select("SELECT " + KEY_COLUMN + ", name, STREET, city, ZIP, picture, sex, birthdate, accepted_tos, moneyOwned, moneyOwed, decimalField, realField FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN)
.selectAll("SELECT " + KEY_COLUMN + ", name, street, city, zip, picture, sex, birthdate, accepted_tos, moneyOwned, moneyOwed, decimalField, realField FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList(KEY_COLUMN),
Arrays.asList(KEY_COLUMN, "name", "street", "CITY", "zip", "picture", "sex", "birthdate", "accepted_tos", "moneyOwned", "moneyOwed", "decimalField", "realField")))
.delete("DELETE FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN);
} else if (cacheName.equalsIgnoreCase("testStoreByteArrays")) {
storeBuilder.queries()
.select("SELECT " + KEY_COLUMN + ", value1 FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN)
.selectAll("SELECT " + KEY_COLUMN + ", value1 FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList(KEY_COLUMN),
Arrays.asList(KEY_COLUMN, "value1")))
.delete("DELETE FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN);
} else if (cacheName.toUpperCase().startsWith("TESTDBHASMOREVALUECOLUMNS")) {
storeBuilder.queries()
.select("SELECT " + KEY_COLUMN + ", name, STREET, city, ZIP, picture, sex, birthdate, value2, value3 FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN)
.selectAll("SELECT " + KEY_COLUMN + ", name, street, city, zip, picture, sex, birthdate, value2, value3 FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList(KEY_COLUMN),
Arrays.asList(KEY_COLUMN, "name", "street", "CITY", "zip", "picture", "sex", "birthdate", "value2", "value3")))
.delete("DELETE FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN);
} else if (cacheName.toUpperCase().startsWith("TESTDBHASMOREKEYCOLUMNS")) {
// The colum has to be value1 to match our Key proto schema
KEY_COLUMN = "value1";
storeBuilder.keyColumns(KEY_COLUMN + ", keycolumn2");
storeBuilder.queries()
.select("SELECT " + KEY_COLUMN + ", keycolumn2, value2 FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN)
.selectAll("SELECT " + KEY_COLUMN + ", keycolumn2, value2 FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList(KEY_COLUMN),
Arrays.asList(KEY_COLUMN, "value2")))
.delete("DELETE FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN + " AND keycolumn2 = :keycolumn2");
} else if (cacheName.toUpperCase().startsWith("TESTDBHASLESSVALUECOLUMNS")) {
storeBuilder.queries()
.select("SELECT " + KEY_COLUMN + ", name, STREET FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN)
.selectAll("SELECT " + KEY_COLUMN + ", name, street FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList(KEY_COLUMN),
Arrays.asList(KEY_COLUMN, "name", "street")))
.delete("DELETE FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN);
} else if (cacheName.toUpperCase().startsWith("TESTEMBEDDED")) {
storeBuilder.keyColumns("name");
storeBuilder.queries()
.select("SELECT name, STREET, city, ZIP, picture, sex, birthdate FROM " + tableName + " WHERE name = :name")
.selectAll("SELECT name, street, city, zip, picture, sex, birthdate FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList("name"),
Arrays.asList("name", "street", "CITY", "zip", "picture", "sex", "birthdate")))
.delete("DELETE FROM " + tableName + " WHERE name = :name");
} else if (cacheName.toUpperCase().startsWith("TESTENUMFORVALUE")) {
storeBuilder.keyColumns("name");
storeBuilder.queries()
.select("SELECT name, sex FROM " + tableName + " WHERE name = :name")
.selectAll("SELECT name, sex FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList("name"),
Arrays.asList("name", "sex")))
.delete("DELETE FROM " + tableName + " WHERE name = :name");
} else if (cacheName.toUpperCase().startsWith("TESTENUMFORKEY")) {
storeBuilder.keyColumns("sex");
storeBuilder.queries()
.select("SELECT name, sex FROM " + tableName + " WHERE sex = :sex")
.selectAll("SELECT name, sex FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList("sex"),
Arrays.asList("name", "sex")))
.delete("DELETE FROM " + tableName + " WHERE sex = :sex");
} else {
storeBuilder.queries()
.select("SELECT " + KEY_COLUMN + ", value1 FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN)
.selectAll("SELECT " + KEY_COLUMN + ", value1 FROM " + tableName)
.upsert(manager.getUpsertStatement(Collections.singletonList(KEY_COLUMN),
Arrays.asList(KEY_COLUMN, "value1")))
.delete("DELETE FROM " + tableName + " WHERE " + KEY_COLUMN + " = :" + KEY_COLUMN);
}
createTable(cacheName, tableName, storeBuilder.getConnectionFactory());
return persistence;
}
}
| 8,357
| 60.007299
| 228
|
java
|
null |
infinispan-main/persistence/sql/src/test/java/org/infinispan/persistence/sql/configuration/ConfigurationSerializerTest.java
|
package org.infinispan.persistence.sql.configuration;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.serializer.AbstractConfigurationSerializerTest;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfiguration;
import org.testng.annotations.Test;
@Test(testName = "persistence.sql.configuration.ConfigurationSerializerTest", groups = "functional")
public class ConfigurationSerializerTest extends AbstractConfigurationSerializerTest {
@Override
protected void compareStoreConfiguration(String name, StoreConfiguration beforeStore, StoreConfiguration afterStore) {
if (beforeStore instanceof AbstractJdbcStoreConfiguration) {
AbstractJdbcStoreConfiguration before = (AbstractJdbcStoreConfiguration) beforeStore;
AbstractJdbcStoreConfiguration after = (AbstractJdbcStoreConfiguration) afterStore;
assertEquals("Configuration " + name + " JDBC connection factory", before.connectionFactory(), after.connectionFactory());
}
if (beforeStore instanceof QueriesJdbcStoreConfiguration) {
QueriesJdbcStoreConfiguration before = (QueriesJdbcStoreConfiguration) beforeStore;
QueriesJdbcStoreConfiguration after = (QueriesJdbcStoreConfiguration) afterStore;
compareAttributeSets("Configuration " + name + " schema", before.schema().attributes(), after.schema().attributes());
compareAttributeSets("Configuration " + name + " queries", before.getQueriesJdbcConfiguration().attributes(), after.getQueriesJdbcConfiguration().attributes());
} else if (beforeStore instanceof TableJdbcStoreConfiguration) {
TableJdbcStoreConfiguration before = (TableJdbcStoreConfiguration) beforeStore;
TableJdbcStoreConfiguration after = (TableJdbcStoreConfiguration) afterStore;
compareAttributeSets("Configuration " + name + " schema", before.schema().attributes(), after.schema().attributes());
}
super.compareStoreConfiguration(name, beforeStore, afterStore);
}
}
| 2,107
| 62.878788
| 169
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/package-info.java
|
/**
* This package contains a {@link org.infinispan.persistence.spi.AdvancedLoadWriteStore} implementation based on
* a JDBC database connection.
*
* @api.public
*/
package org.infinispan.persistence.sql;
| 210
| 25.375
| 112
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/TableJdbcStore.java
|
package org.infinispan.persistence.sql;
import java.lang.invoke.MethodHandles;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.util.IntSet;
import org.infinispan.persistence.jdbc.common.DatabaseType;
import org.infinispan.persistence.jdbc.common.SqlManager;
import org.infinispan.persistence.jdbc.common.TableOperations;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.persistence.sql.configuration.TableJdbcStoreConfiguration;
import org.infinispan.util.logging.LogFactory;
@ConfiguredBy(TableJdbcStoreConfiguration.class)
public class TableJdbcStore<K, V> extends AbstractSchemaJdbcStore<K, V, TableJdbcStoreConfiguration> {
private static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass(), Log.class);
@Override
protected TableOperations<K, V> actualCreateTableOperations(ProtoSchemaOptions<K, V, TableJdbcStoreConfiguration> schemaOptions) {
return new TableTableOperations(schemaOptions, schemaOptions.valueParameters);
}
public class TableTableOperations extends SchemaTableOperations<K, V, TableJdbcStoreConfiguration> {
private final String selectSql;
private final String selectAllSql;
private final String deleteSql;
private final String deleteAllSql;
private final String upsertSql;
private final String sizeSql;
public TableTableOperations(ProtoSchemaOptions<K, V, TableJdbcStoreConfiguration> options, Parameter[] upsertParameters) {
super(options, upsertParameters);
DatabaseType type = options.config.dialect();
if (type == null) {
Connection connection = null;
try {
connection = connectionFactory.getConnection();
String dbProduct = connection.getMetaData().getDatabaseProductName();
type = DatabaseType.guessDialect(dbProduct);
log.debugf("Guessing database dialect as '%s'. If this is incorrect, please specify the correct " +
"dialect using the 'dialect' attribute in your configuration. Supported database dialect strings are %s",
type, Arrays.toString(DatabaseType.values()));
} catch (Exception e) {
throw log.unableToDetectDialect(Arrays.toString(DatabaseType.values()));
} finally {
connectionFactory.releaseConnection(connection);
}
}
SqlManager statements = SqlManager.fromDatabaseType(type, config.tableName());
List<String> keyNames = Arrays.stream(options.keyParameters)
.map(Parameter::getName).collect(Collectors.toList());
List<String> allNames = Arrays.stream(options.valueParameters)
.map(Parameter::getName).collect(Collectors.toList());
selectSql = statements.getSelectStatement(keyNames, allNames);
selectAllSql = statements.getSelectAllStatement(allNames);
deleteSql = statements.getDeleteStatement(keyNames);
deleteAllSql = statements.getDeleteAllStatement();
upsertSql = statements.getUpsertStatement(keyNames, allNames);
sizeSql = statements.getSizeCommand();
}
@Override
public String getSelectRowSql() {
return selectSql;
}
@Override
public String getDeleteRowSql() {
return deleteSql;
}
@Override
public String getUpsertRowSql() {
return upsertSql;
}
@Override
public String getSelectAllSql(IntSet segments) {
return selectAllSql;
}
@Override
public String getDeleteAllSql() {
return deleteAllSql;
}
@Override
public String getSizeSql() {
return sizeSql;
}
}
@Override
Parameter[] generateParameterInformation(TableJdbcStoreConfiguration config, ConnectionFactory connectionFactory)
throws SQLException {
String schemaAndTableName = config.tableName();
String[] tableAndSchemaSplit = schemaAndTableName.split("\\.");
String tableName;
String schemaName;
if (tableAndSchemaSplit.length == 1) {
schemaName = null;
tableName = schemaAndTableName;
} else if (tableAndSchemaSplit.length == 2) {
schemaName = tableAndSchemaSplit[0];
tableName = tableAndSchemaSplit[1];
} else {
throw log.tableNotInCorrectFormat(schemaAndTableName);
}
Connection connection = connectionFactory.getConnection();
DatabaseMetaData databaseMetaData = connection.getMetaData();
List<String> primaryKeyList = new ArrayList<>();
try (ResultSet rs = databaseMetaData.getPrimaryKeys(null, schemaName, tableName)) {
while (rs.next()) {
primaryKeyList.add(rs.getString("COLUMN_NAME").toUpperCase());
}
}
if (primaryKeyList.isEmpty()) {
throw log.noPrimaryKeysFoundForTable(schemaAndTableName);
}
boolean containsNonPrimary = false;
List<Parameter> parameters = new ArrayList<>();
try (ResultSet rs = databaseMetaData.getColumns(null, schemaName, tableName, null)) {
while (rs.next()) {
String name = rs.getString("COLUMN_NAME");
int sqlColumnType = rs.getInt("DATA_TYPE");
int scale = rs.getInt("DECIMAL_DIGITS");
int actualType = typeWeUse(sqlColumnType, rs.getString("TYPE_NAME"), scale);
ProtostreamFieldType schemaType = ProtostreamFieldType.from(actualType);
boolean isPrimary = primaryKeyList.contains(name.toUpperCase());
parameters.add(new Parameter(name, schemaType, isPrimary, sqlColumnType));
containsNonPrimary |= !isPrimary;
}
}
if (!containsNonPrimary) {
throw log.noValueColumnForTable(schemaAndTableName);
}
return parameters.toArray(new Parameter[0]);
}
@Override
Parameter[] handleUnusedValueParams(Parameter[] parameters, List<Parameter> unusedValueParams) {
// If it is a loader, we can ignore missing values as it is read only
if (!config.ignoreModifications()) {
throw unusedValueParamsException(unusedValueParams);
}
Log.CONFIG.debugf("TableJdbcStore has extra columns that are not part of the schema %s, ignoring since read only", unusedValueParams);
Parameter[] newParams = new Parameter[parameters.length - unusedValueParams.size()];
int i = 0;
for (Parameter parameter : parameters) {
if (!unusedValueParams.contains(parameter)) {
newParams[i++] = parameter;
}
}
return newParams;
}
}
| 6,989
| 40.117647
| 140
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/QueriesJdbcStore.java
|
package org.infinispan.persistence.sql;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.commons.configuration.ConfiguredBy;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.TableOperations;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.sql.configuration.QueriesJdbcConfiguration;
import org.infinispan.persistence.sql.configuration.QueriesJdbcConfigurationBuilder;
import org.infinispan.persistence.sql.configuration.QueriesJdbcStoreConfiguration;
import org.infinispan.persistence.sql.configuration.QueriesJdbcStoreConfigurationBuilder;
@ConfiguredBy(QueriesJdbcStoreConfigurationBuilder.class)
public class QueriesJdbcStore<K, V> extends AbstractSchemaJdbcStore<K, V, QueriesJdbcStoreConfiguration> {
@Override
protected TableOperations<K, V> actualCreateTableOperations(ProtoSchemaOptions<K, V, QueriesJdbcStoreConfiguration> options) {
QueriesJdbcConfigurationBuilder<?> builder = new QueriesJdbcConfigurationBuilder<>(new ConfigurationBuilder().persistence().addStore(QueriesJdbcStoreConfigurationBuilder.class));
QueriesJdbcConfiguration originalConfig = config.getQueriesJdbcConfiguration();
builder.read(originalConfig);
QueryNamedParameterParser.ParserResults selectResults = QueryNamedParameterParser.parseSqlStatement(originalConfig.select());
builder.select(selectResults.getSqlToUse());
if (config.ignoreModifications()) {
return new QueryTableOperations(options, null, builder.create());
}
QueryNamedParameterParser.ParserResults deleteResults = QueryNamedParameterParser.parseSqlStatement(originalConfig.delete());
builder.delete(deleteResults.getSqlToUse());
// Delete all should not have any parameters
if (QueryNamedParameterParser.parseSqlStatement(originalConfig.deleteAll()).getOrderedParameters().size() > 0) {
throw log.deleteAllCannotHaveParameters(config.getQueriesJdbcConfiguration().selectAll());
}
// Size should not have any parameters
if (QueryNamedParameterParser.parseSqlStatement(originalConfig.size()).getOrderedParameters().size() > 0) {
throw log.sizeCannotHaveParameters(config.getQueriesJdbcConfiguration().selectAll());
}
// This ensures that delete and select parameters match, so we only need on instance of key parameters for both
if (!deleteResults.getOrderedParameters().equals(selectResults.getOrderedParameters())) {
throw log.deleteAndSelectQueryMismatchArguments(deleteResults.getOrderedParameters(), selectResults.getOrderedParameters());
}
// (e.g.) INSERT INTO books (isbn, title) VALUES (:key, :value) ON CONFLICT (isbn) DO UPDATE SET title = :value
QueryNamedParameterParser.ParserResults upsertResults = QueryNamedParameterParser.parseSqlStatement(
originalConfig.upsert());
builder.upsert(upsertResults.getSqlToUse());
Map<String, Parameter> parameterMap = new HashMap<>();
// This includes all the keys as well
for (Parameter parameter : options.valueParameters) {
parameterMap.put(parameter.getName().toUpperCase(), parameter);
}
Parameter[] upsertParameters = upsertResults.getOrderedParameters().stream().map(name -> {
Parameter param = parameterMap.get(name.toUpperCase());
if (param == null) {
throw log.deleteAndSelectQueryMismatchArguments(name, originalConfig.upsert(), originalConfig.selectAll());
}
return param;
}).toArray(Parameter[]::new);
return new QueryTableOperations(options, upsertParameters, builder.create());
}
@Override
Parameter[] generateParameterInformation(QueriesJdbcStoreConfiguration config, ConnectionFactory connectionFactory)
throws SQLException {
QueryNamedParameterParser.ParserResults parserResults = QueryNamedParameterParser.parseSqlStatement(
config.getQueriesJdbcConfiguration().selectAll());
if (parserResults.getOrderedParameters().size() > 0) {
throw log.selectAllCannotHaveParameters(config.getQueriesJdbcConfiguration().selectAll());
}
String selectAllSql = parserResults.getSqlToUse();
String[] keyColumns = config.keyColumns().split(",");
int keyCount = keyColumns.length;
Map<String, Parameter> namedParams = new HashMap<>();
Connection connection = connectionFactory.getConnection();
try (PreparedStatement ps = connection.prepareStatement(selectAllSql)) {
// Only retrieve 1 - we can't do 0 as this means use default
ps.setFetchSize(1);
try (ResultSet rs = ps.executeQuery()) {
ResultSetMetaData rsMetadata = rs.getMetaData();
Parameter[] parameters = new Parameter[rsMetadata.getColumnCount()];
for (int i = 1; i <= rsMetadata.getColumnCount(); ++i) {
int columnType = rsMetadata.getColumnType(i);
String name = rsMetadata.getColumnName(i);
int scale = rsMetadata.getScale(i);
int actualType = typeWeUse(columnType, rsMetadata.getColumnTypeName(i), scale);
ProtostreamFieldType type = ProtostreamFieldType.from(actualType);
String lowerCaseName = name.toLowerCase();
// Make sure to reuse same parameter instance just with different offset
Parameter parameter = namedParams.get(lowerCaseName);
if (parameter == null) {
boolean primaryIdentifier = isPresent(keyColumns, name);
if (primaryIdentifier) {
keyCount--;
}
parameter = new Parameter(name.toLowerCase(), type, primaryIdentifier, columnType);
namedParams.put(lowerCaseName, parameter);
}
// TODO: what if the schema is in camel case?
parameters[i - 1] = parameter;
}
if (keyCount != 0) {
throw log.keyColumnsNotReturnedFromSelectAll(Arrays.toString(keyColumns),
config.getQueriesJdbcConfiguration().selectAll());
}
return parameters;
}
} finally {
connectionFactory.releaseConnection(connection);
}
}
@Override
protected Parameter[] determinePrimaryParameters(QueriesJdbcStoreConfiguration config, Parameter[] allParameters) {
QueryNamedParameterParser.ParserResults selectResults = QueryNamedParameterParser.parseSqlStatement(
config.getQueriesJdbcConfiguration().select());
return selectResults.getOrderedParameters().stream().map(name -> {
for (Parameter parameter : allParameters) {
if (parameter.getName().equals(name)) {
return parameter;
}
}
throw log.namedParamNotReturnedFromSelect(name, config.getQueriesJdbcConfiguration().selectAll(),
config.getQueriesJdbcConfiguration().select());
}).toArray(Parameter[]::new);
}
private static boolean isPresent(String[] array, String value) {
for (String s : array) {
// TODO: some DBs may not be case sensitive?
if (s.trim().equalsIgnoreCase(value)) {
return true;
}
}
return false;
}
public class QueryTableOperations extends SchemaTableOperations<K, V, QueriesJdbcStoreConfiguration> {
private final QueriesJdbcConfiguration modifiedQueryConfig;
public QueryTableOperations(ProtoSchemaOptions<K, V, QueriesJdbcStoreConfiguration> options, Parameter[] upsertParameters,
QueriesJdbcConfiguration modifiedQueryConfig) {
super(options, upsertParameters);
this.modifiedQueryConfig = modifiedQueryConfig;
}
@Override
public String getSelectRowSql() {
return modifiedQueryConfig.select();
}
@Override
public String getSelectAllSql(IntSet segments) {
return modifiedQueryConfig.selectAll();
}
@Override
public String getDeleteRowSql() {
return modifiedQueryConfig.delete();
}
@Override
public String getUpsertRowSql() {
return modifiedQueryConfig.upsert();
}
@Override
public String getDeleteAllSql() {
return modifiedQueryConfig.deleteAll();
}
@Override
public String getSizeSql() {
return modifiedQueryConfig.size();
}
}
}
| 8,777
| 44.247423
| 184
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/AbstractSchemaJdbcStore.java
|
package org.infinispan.persistence.sql;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.sql.Types;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.infinispan.AdvancedCache;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.encoding.DataConversion;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.marshall.protostream.impl.SerializationContextRegistry;
import org.infinispan.persistence.jdbc.common.TableOperations;
import org.infinispan.persistence.jdbc.common.connectionfactory.ConnectionFactory;
import org.infinispan.persistence.jdbc.common.impl.BaseJdbcStore;
import org.infinispan.persistence.jdbc.common.sql.BaseTableOperations;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.MarshallableEntryFactory;
import org.infinispan.persistence.sql.configuration.AbstractSchemaJdbcConfiguration;
import org.infinispan.persistence.sql.configuration.SchemaJdbcConfiguration;
import org.infinispan.protostream.ImmutableSerializationContext;
import org.infinispan.protostream.descriptors.Descriptor;
import org.infinispan.protostream.descriptors.EnumDescriptor;
import org.infinispan.protostream.descriptors.FieldDescriptor;
import org.infinispan.protostream.descriptors.GenericDescriptor;
import org.infinispan.protostream.descriptors.Type;
public abstract class AbstractSchemaJdbcStore<K, V, C extends AbstractSchemaJdbcConfiguration> extends BaseJdbcStore<K, V, C> {
@Override
protected TableOperations<K, V> createTableOperations(InitializationContext ctx, C config) throws SQLException {
AdvancedCache<K, V> advancedCache = ctx.getCache().getAdvancedCache();
// We use a type as the protostream -> json conversion leaves it as a String instead of byte[]
MediaType jsonStringType = MediaType.fromString(MediaType.APPLICATION_JSON_TYPE + ";type=String");
// This seems like a bug that `withRequestMediaType` isn't injected...
DataConversion keyDataConversion = advancedCache.getKeyDataConversion()
.withRequestMediaType(jsonStringType);
DataConversion valueDataConversion = advancedCache.getValueDataConversion()
.withRequestMediaType(jsonStringType);
ComponentRegistry componentRegistry = advancedCache.getComponentRegistry();
componentRegistry.wireDependencies(keyDataConversion, true);
componentRegistry.wireDependencies(valueDataConversion, true);
Parameter[] parameters = generateParameterInformation(config, connectionFactory);
assert parameters.length != 0;
Parameter[] primaryParameters = determinePrimaryParameters(config, parameters);
assert primaryParameters.length != 0;
assert Arrays.stream(primaryParameters).allMatch(Parameter::isPrimaryIdentifier);
// We have to use the user serialization context as it will have the schemas they registered
ImmutableSerializationContext serializationContext = componentRegistry.getComponent(SerializationContextRegistry.class).getUserCtx();
ProtoSchemaOptions<K, V, C> options = verifySchemaAndCreateOptions(serializationContext,
config.schema(), parameters, primaryParameters, keyDataConversion, valueDataConversion,
ctx.getMarshallableEntryFactory());
return actualCreateTableOperations(options);
}
protected Parameter[] determinePrimaryParameters(C config, Parameter[] allParameters) {
return Arrays.stream(allParameters)
.filter(Parameter::isPrimaryIdentifier)
.toArray(Parameter[]::new);
}
/**
* Implementation specific method to return a table operations which will then be used appropriately for store
* operations. It is recommended to extend {@link SchemaTableOperations} providing ways to retrieve the statements
* needed.
*
* @param schemaOptions the operations for the schema for this store
* @return the operations object to use
*/
protected abstract TableOperations<K, V> actualCreateTableOperations(ProtoSchemaOptions<K, V, C> schemaOptions);
/**
* Method to be overridden to determine what the parameters are for the various sql statements that will be used Only
* the {@link #connectionFactory} will be initialized at this point
*
* @param config store configuration object
* @param connectionFactory connection factory to use
* @return all the parameters for this table. This can include duplicate named columns
* @throws SQLException exception if there is any problem determining the paramaters from the DB
*/
abstract Parameter[] generateParameterInformation(C config, ConnectionFactory connectionFactory) throws SQLException;
int typeWeUse(int sqlType, String typeName, int scale) {
if (sqlType == Types.VARCHAR) {
// Some DBs store VARBINARY as VARCHAR FOR BIT DATA (ahem... DB2)
if (typeName.contains("BIT") || typeName.contains("BINARY")) {
return Types.VARBINARY;
}
} else if (typeName.toUpperCase().startsWith("BOOL")) {
// Some databases store as int32 or something similar but have the typename as BOOLEAN or some derivation
return Types.BOOLEAN;
} else if (sqlType == Types.NUMERIC && scale == 0) {
// If scale is 0 we don't want to use float or double types
return Types.INTEGER;
}
return sqlType;
}
ProtoSchemaOptions<K, V, C> verifySchemaAndCreateOptions(ImmutableSerializationContext ctx,
SchemaJdbcConfiguration schemaJdbcConfiguration, Parameter[] parameters, Parameter[] primaryParameters,
DataConversion keyConversion, DataConversion valueConversion, MarshallableEntryFactory<K, V> marshallableEntryFactory) {
// Keys should all be upper case to provide case insensitivity
Map<String, Parameter> parameterMap = new HashMap<>();
int uniquePrimaryParameters = 0;
// Load up a map of names to parameter while also tracking the number of unique primary identifiers
for (Parameter parameter : parameters) {
// We can have mixed cases for the characters so just force all upper case to allow for O(1)
if (parameterMap.put(parameter.name.toUpperCase(), parameter) == null && parameter.primaryIdentifier) {
uniquePrimaryParameters++;
}
}
String packageName = schemaJdbcConfiguration.packageName();
String keyMessageName = schemaJdbcConfiguration.keyMessageName();
String fullKeyMessageName = null;
// Only generate a schema for the key if there is more than 1 field or they explicitly defined one
if (uniquePrimaryParameters != 1 || keyMessageName != null) {
if (keyMessageName == null || packageName == null) {
throw log.primaryKeyMultipleColumnWithoutSchema();
}
String fullMessageName = packageName + "." + keyMessageName;
verifyParametersPresentForMessage(ctx, fullMessageName, parameterMap, true);
fullKeyMessageName = fullMessageName;
} else {
updatePrimitiveJsonConsumer(primaryParameters[0], true);
}
String valueMessageName = schemaJdbcConfiguration.messageName();
String fullValueMessageName = null;
boolean hasEmbeddedKey = config.schema().embeddedKey();
if (parameterMap.size() - (hasEmbeddedKey ? 0 : uniquePrimaryParameters) > 1 || valueMessageName != null) {
if (valueMessageName == null || packageName == null) {
throw log.valueMultipleColumnWithoutSchema();
}
String fullMessageName = packageName + "." + valueMessageName;
verifyParametersPresentForMessage(ctx, fullMessageName, parameterMap, false);
fullValueMessageName = fullMessageName;
} else {
// This variable is only for assertion - it should be that we can only have 1 non primary parameter,
// but just in case
boolean updatedPrimitive = false;
for (Parameter parameter : parameters) {
if (parameter.primaryIdentifier) {
continue;
}
assert !updatedPrimitive;
updatePrimitiveJsonConsumer(parameter, false);
updatedPrimitive = true;
}
}
List<Parameter> unusedValueParams = null;
for (Parameter parameter : parameters) {
if (parameter.jsonConsumerValue == null && parameter.jsonConsumerKey == null) {
if (parameter.primaryIdentifier) {
throw log.keyNotInSchema(parameter.name, fullKeyMessageName);
} else {
if (unusedValueParams == null) {
unusedValueParams = new ArrayList<>();
}
unusedValueParams.add(parameter);
}
}
}
if (unusedValueParams != null) {
parameters = handleUnusedValueParams(parameters, unusedValueParams);
}
if (hasEmbeddedKey) {
// Make sure all values are mapped as they must be when embedded key
assert Arrays.stream(parameters).noneMatch(parameter -> parameter.unwrapJsonValue == null);
} else {
// Primary identifiers shouldn't have any values mapped as they aren't embedded
assert Arrays.stream(parameters).noneMatch(parameter -> parameter.primaryIdentifier && parameter.unwrapJsonValue != null);
assert Arrays.stream(parameters).noneMatch(parameter -> !parameter.primaryIdentifier && parameter.unwrapJsonValue == null);
}
assert Arrays.stream(parameters).filter(Parameter::isPrimaryIdentifier).noneMatch(parameter -> parameter.unwrapJsonKey == null);
return new ProtoSchemaOptions<>(config, primaryParameters, fullKeyMessageName, parameters, fullValueMessageName,
keyConversion, valueConversion, marshallableEntryFactory);
}
Parameter[] handleUnusedValueParams(Parameter[] parameters, List<Parameter> unusedValueParams) {
throw unusedValueParamsException(unusedValueParams);
}
CacheConfigurationException unusedValueParamsException(List<Parameter> unusedParamNames) {
return log.valueNotInSchema(unusedParamNames.stream().map(Parameter::getName).collect(Collectors.toList()),
config.schema().messageName());
}
private void updatePrimitiveJsonConsumer(Parameter parameter, boolean key) {
updateUnwrap(parameter, key, json -> json.at("_value"));
updateJsonConsumer(parameter, key, (json, value) -> {
json.set("_type", parameter.getType().protostreamType);
json.set("_value", value);
});
}
void verifyParametersPresentForMessage(ImmutableSerializationContext ctx, String fullTypeName, Map<String, Parameter> parameterMap, boolean key) {
GenericDescriptor genericDescriptor;
try {
genericDescriptor = ctx.getDescriptorByName(fullTypeName);
} catch (IllegalArgumentException t) {
throw log.schemaNotFound(fullTypeName);
}
Set<String> seenNames = new HashSet<>();
if (genericDescriptor instanceof Descriptor) {
recursiveUpdateParameters((Descriptor) genericDescriptor, parameterMap, null, seenNames, key);
} else if (genericDescriptor instanceof EnumDescriptor) {
if (!key && config.schema().embeddedKey()) {
throw log.keyCannotEmbedWithEnum(fullTypeName);
}
String name = genericDescriptor.getName();
// treat an enum as just a string
Parameter enumParam = parameterMap.get(name.toUpperCase());
if (enumParam != null) {
assert enumParam.getType() == ProtostreamFieldType.STRING;
updateUnwrap(enumParam, key, json -> json.at("_value"));
updateJsonConsumer(enumParam, key, (json, o) -> {
json.set("_type", fullTypeName);
json.set("_value", o);
});
}
} else {
throw new UnsupportedOperationException("Unsupported descriptor found " + genericDescriptor);
}
}
void recursiveUpdateParameters(Descriptor descriptor, Map<String, Parameter> parameterMap,
String[] nestedMessageNames, Set<String> seenNames, boolean key) {
for (FieldDescriptor fieldDescriptor : descriptor.getFields()) {
String name = fieldDescriptor.getName();
if (fieldDescriptor.isRepeated()) {
throw log.repeatedFieldsNotSupported(name, fieldDescriptor.getTypeName());
}
Descriptor fieldMessageDescriptor = fieldDescriptor.getMessageType();
if (fieldMessageDescriptor != null) {
String[] newNestedMessageNames;
if (nestedMessageNames == null) {
newNestedMessageNames = new String[1];
newNestedMessageNames[0] = name;
} else {
newNestedMessageNames = Arrays.copyOf(nestedMessageNames, nestedMessageNames.length + 1);
newNestedMessageNames[nestedMessageNames.length] = name;
}
recursiveUpdateParameters(fieldMessageDescriptor, parameterMap, newNestedMessageNames, seenNames, key);
continue;
}
if (!seenNames.add(name)) {
throw log.duplicateFieldInSchema(name, fieldDescriptor.getTypeName());
}
Parameter parameter = parameterMap.get(name.toUpperCase());
if (parameter == null) {
if (fieldDescriptor.isRequired()) {
throw log.requiredSchemaFieldNotPresent(name, fieldDescriptor.getTypeName());
}
continue;
}
if (parameter.primaryIdentifier && !key && !config.schema().embeddedKey()) {
throw log.primaryKeyPresentButNotEmbedded(parameter.name, fieldDescriptor.getTypeName());
}
Function<Json, Json> retrievalFunction;
BiConsumer<Json, Object> valueConsumer;
// Oracle doesn't have a boolean type, so use a number of 0 or 1 instead
if (parameter.type == ProtostreamFieldType.INT_32 && fieldDescriptor.getType() == Type.BOOL) {
retrievalFunction = json -> Json.factory().number(json.at(name).asBoolean() ? 1 : 0);
valueConsumer = (json, o) -> json.set(name, ((Integer) o) == 1);
} else {
retrievalFunction = json -> json.at(name);
valueConsumer = (json, o) -> json.set(name, o);
}
if (nestedMessageNames == null) {
updateUnwrap(parameter, key, retrievalFunction);
updateJsonConsumer(parameter, key, valueConsumer);
} else {
updateUnwrap(parameter, key, json -> {
for (String nestedName : nestedMessageNames) {
json = json.at(nestedName);
if (json == null) return null;
}
return retrievalFunction.apply(json);
});
updateJsonConsumer(parameter, key, (json, o) -> {
Json nestedJSon = json;
for (String nestedName : nestedMessageNames) {
nestedJSon = json.at(nestedName);
if (nestedJSon == null) {
nestedJSon = Json.object();
json.set(nestedName, nestedJSon);
}
json = nestedJSon;
}
valueConsumer.accept(nestedJSon, o);
});
}
}
}
private void updateUnwrap(Parameter parameter, boolean key, Function<Json, Json> function) {
if (key) {
parameter.unwrapJsonKey = function;
} else {
parameter.unwrapJsonValue = function;
}
}
private void updateJsonConsumer(Parameter parameter, boolean key, BiConsumer<Json, Object> jsonBiConsumer) {
if (key) {
parameter.jsonConsumerKey = jsonBiConsumer;
} else {
parameter.jsonConsumerValue = jsonBiConsumer;
}
}
protected enum ProtostreamFieldType {
INT_32("int32"),
INT_64("int64"),
FLOAT("float"),
DOUBLE("double"),
BOOL("bool"),
STRING("string"),
BYTES("bytes"),
DATE("fixed64");
/**
* This field matches {@link org.infinispan.protostream.impl.JsonUtils} types
*/
private final String protostreamType;
ProtostreamFieldType(String protostreamType) {
this.protostreamType = protostreamType;
}
protected static ProtostreamFieldType from(int sqlType) {
switch (sqlType) {
case Types.INTEGER:
return INT_32;
case Types.BIGINT:
return INT_64;
case Types.FLOAT:
case Types.REAL:
return FLOAT;
case Types.DOUBLE:
case Types.NUMERIC:
case Types.DECIMAL:
return DOUBLE;
case Types.BIT:
case Types.BOOLEAN:
return BOOL;
case Types.CHAR:
case Types.VARCHAR:
case Types.NVARCHAR:
case Types.LONGVARCHAR:
case Types.LONGNVARCHAR:
return STRING;
case Types.BLOB:
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return BYTES;
case Types.DATE:
case Types.TIMESTAMP:
case Types.TIMESTAMP_WITH_TIMEZONE:
return DATE;
default:
throw new IllegalArgumentException("SqlType not supported: " + sqlType);
}
}
}
protected static class Parameter {
private final String name;
private final ProtostreamFieldType type;
private final boolean primaryIdentifier;
private final int sqlType;
private BiConsumer<Json, Object> jsonConsumerValue;
private BiConsumer<Json, Object> jsonConsumerKey;
private Function<Json, Json> unwrapJsonValue;
private Function<Json, Json> unwrapJsonKey;
Parameter(String name, ProtostreamFieldType type, boolean primaryIdentifier, int sqlType) {
this.name = name;
this.type = type;
this.primaryIdentifier = primaryIdentifier;
this.sqlType = sqlType;
}
public String getName() {
return name;
}
public ProtostreamFieldType getType() {
return type;
}
public int getSqlType() {
return sqlType;
}
public boolean isPrimaryIdentifier() {
return primaryIdentifier;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Parameter parameter = (Parameter) o;
return Objects.equals(name, parameter.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
@Override
public String toString() {
return "Parameter{" +
"name='" + name + '\'' +
", type=" + type +
", primaryIdentifier=" + primaryIdentifier +
'}';
}
}
protected static class ProtoSchemaOptions<K, V, C extends AbstractSchemaJdbcConfiguration> {
protected final C config;
protected final Parameter[] keyParameters;
protected final String keyMessageName;
protected final Parameter[] valueParameters;
protected final String valueMessageName;
protected final DataConversion keyConversion;
protected final DataConversion valueConversion;
protected final MarshallableEntryFactory<K, V> marshallableEntryFactory;
public ProtoSchemaOptions(C config, Parameter[] keyParameters, String keyMessageName, Parameter[] valueParameters,
String valueMessageName, DataConversion keyConversion, DataConversion valueConversion,
MarshallableEntryFactory<K, V> marshallableEntryFactory) {
this.config = config;
this.keyParameters = keyParameters;
this.keyMessageName = keyMessageName;
this.valueParameters = valueParameters;
this.valueMessageName = valueMessageName;
this.keyConversion = keyConversion;
this.valueConversion = valueConversion;
this.marshallableEntryFactory = marshallableEntryFactory;
}
}
protected abstract static class SchemaTableOperations<K, V, C extends AbstractSchemaJdbcConfiguration> extends BaseTableOperations<K, V> {
private final ProtoSchemaOptions<K, V, C> schemaOptions;
private final Parameter[] upsertParameters;
public SchemaTableOperations(ProtoSchemaOptions<K, V, C> schemaOptions, Parameter[] upsertParameters) {
super(schemaOptions.config);
this.schemaOptions = schemaOptions;
this.upsertParameters = upsertParameters;
}
/**
* This method assigns a parameter based on the type using the parameter type. Normally this code would live in
* the enum, but some implementations may require a different assignment based on the database and thus this
* method can be extended to change that behavior.
*
* @param ps
* @param type
* @param position
* @param json
*/
protected void setParameter(PreparedStatement ps, ProtostreamFieldType type, int position, Json json) throws SQLException {
switch (type) {
case INT_32:
ps.setInt(position, json.asInteger());
break;
case INT_64:
ps.setLong(position, json.asLong());
break;
case FLOAT:
ps.setFloat(position, json.asFloat());
break;
case DOUBLE:
ps.setDouble(position, json.asDouble());
break;
case BOOL:
ps.setBoolean(position, json.asBoolean());
break;
case STRING:
ps.setString(position, json.asString());
break;
case BYTES:
String base64Bytes = json.asString();
byte[] bytes = Base64.getDecoder().decode(base64Bytes);
ps.setBytes(position, bytes);
break;
case DATE:
long dateTime = json.asLong();
ps.setTimestamp(position, new Timestamp(dateTime));
break;
default:
throw new IllegalArgumentException("Type " + type + " not supported!");
}
}
protected void updateJsonWithParameter(ResultSet rs, Parameter parameter, int offset, Json json, boolean key) throws SQLException {
Object value;
switch (parameter.getType()) {
case INT_32:
value = rs.getInt(offset);
break;
case INT_64:
value = rs.getLong(offset);
break;
case FLOAT:
value = rs.getFloat(offset);
break;
case DOUBLE:
value = rs.getDouble(offset);
break;
case BOOL:
value = rs.getBoolean(offset);
break;
case STRING:
value = rs.getString(offset);
break;
case BYTES:
byte[] bytes = rs.getBytes(offset);
value = bytes != null ? Base64.getEncoder().encodeToString(bytes) : null;
break;
case DATE:
Timestamp timestamp = rs.getTimestamp(offset);
value = timestamp != null ? timestamp.getTime() : null;
break;
default:
throw new IllegalArgumentException("Type " + parameter.getType() + " not supported!");
}
if (value != null) {
if (key) {
parameter.jsonConsumerKey.accept(json, value);
} else {
parameter.jsonConsumerValue.accept(json, value);
}
}
}
@Override
protected MarshallableEntry<K, V> entryFromResultSet(ResultSet rs, Object keyIfProvided, boolean fetchValue,
Predicate<? super K> keyPredicate) throws SQLException {
Json keyJson = keyIfProvided == null ? Json.object() : null;
if (keyJson != null && schemaOptions.keyMessageName != null) {
keyJson.set("_type", schemaOptions.keyMessageName);
}
Json valueJson = Json.object();
if (schemaOptions.valueMessageName != null) {
valueJson.set("_type", schemaOptions.valueMessageName);
}
Parameter[] valueParameters = schemaOptions.valueParameters;
for (int i = 0; i < valueParameters.length; ++i) {
Parameter parameter = valueParameters[i];
if (parameter.isPrimaryIdentifier()) {
if (keyJson != null) {
updateJsonWithParameter(rs, parameter, i + 1, keyJson, true);
}
if (!schemaOptions.config.schema().embeddedKey()) {
continue;
}
}
updateJsonWithParameter(rs, parameter, i + 1, valueJson, false);
}
if (keyJson != null) {
keyIfProvided = schemaOptions.keyConversion.toStorage(keyJson.toString());
}
if (keyPredicate != null && !keyPredicate.test((K) keyIfProvided)) {
return null;
}
Object value = schemaOptions.valueConversion.toStorage(valueJson.toString());
return schemaOptions.marshallableEntryFactory.create(keyIfProvided, value);
}
@Override
protected void prepareKeyStatement(PreparedStatement ps, Object key) throws SQLException {
Object jsonString = schemaOptions.keyConversion.fromStorage(key);
Json json = Json.read((String) jsonString);
for (int i = 0; i < schemaOptions.keyParameters.length; ++i) {
Parameter parameter = schemaOptions.keyParameters[i];
if (!parameter.primaryIdentifier) {
continue;
}
Json innerJson = parameter.unwrapJsonKey.apply(json);
if (innerJson != null) {
setParameter(ps, parameter.getType(), i + 1, innerJson);
} else {
ps.setNull(i + 1, parameter.getSqlType());
}
}
}
@Override
protected void prepareValueStatement(PreparedStatement ps, int segment, MarshallableEntry<? extends K, ? extends V> entry) throws SQLException {
boolean embeddedKey = schemaOptions.config.schema().embeddedKey();
Json valueJson = Json.read((String) schemaOptions.valueConversion.fromStorage(entry.getValue()));
Json keyJson = embeddedKey ? valueJson : Json.read((String) schemaOptions.keyConversion.fromStorage(entry.getKey()));
for (int i = 0; i < upsertParameters.length; ++i) {
Parameter parameter = upsertParameters[i];
Json json;
if (parameter.primaryIdentifier) {
json = embeddedKey ? parameter.unwrapJsonValue.apply(keyJson) : parameter.unwrapJsonKey.apply(keyJson);
} else {
json = parameter.unwrapJsonValue.apply(valueJson);
}
if (json != null) {
setParameter(ps, parameter.getType(), i + 1, json);
} else {
ps.setNull(i + 1, parameter.getSqlType());
}
}
}
}
}
| 27,901
| 41.926154
| 150
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/QueryNamedParameterParser.java
|
package org.infinispan.persistence.sql;
import java.util.ArrayList;
import java.util.List;
import org.infinispan.persistence.jdbc.common.logging.Log;
import org.infinispan.util.logging.LogFactory;
public class QueryNamedParameterParser {
private static final Log log = LogFactory.getLog(QueryNamedParameterParser.class, Log.class);
/**
* Set of characters that qualify as comment or quotes starting characters.
*/
private static final String[] START_SKIP = new String[]{"'", "\"", "--", "/*"};
/**
* Set of characters that at are the corresponding comment or quotes ending characters.
*/
private static final String[] STOP_SKIP = new String[]{"'", "\"", "\n", "*/"};
/**
* Set of characters that qualify as parameter separators,
* indicating that a parameter name in an SQL String has ended.
*/
private static final String PARAMETER_SEPARATORS = "\"':&,;()|=+-*%/\\<>^";
/**
* An index with separator flags per character code.
* Technically only needed between 34 and 124 at this point.
*/
private static final boolean[] separatorIndex = new boolean[128];
static {
for (char c : PARAMETER_SEPARATORS.toCharArray()) {
separatorIndex[c] = true;
}
}
//-------------------------------------------------------------------------
// Core methods used by NamedParameterJdbcTemplate and SqlQuery/SqlUpdate
//-------------------------------------------------------------------------
/**
* Parse the SQL statement and locate any placeholders or named parameters. Named parameters are substituted for a
* JDBC placeholder.
*
* @param sql the SQL statement
* @return the parsed statement, represented as ParsedSql instance
*/
public static ParserResults parseSqlStatement(final String sql) {
StringBuilder sqlToUse = new StringBuilder(sql);
List<String> parameterList = new ArrayList<>();
char[] statement = sql.toCharArray();
int escapes = 0;
int i = 0;
while (i < statement.length) {
int skipToPosition;
while (i < statement.length) {
skipToPosition = skipCommentsAndQuotes(statement, i);
if (i == skipToPosition) {
break;
}
else {
i = skipToPosition;
}
}
if (i >= statement.length) {
break;
}
char c = statement[i];
if (c == ':' || c == '&') {
int j = i + 1;
if (c == ':' && j < statement.length && statement[j] == ':') {
// Postgres-style "::" casting operator should be skipped
i = i + 2;
continue;
}
String parameter;
if (c == ':' && j < statement.length && statement[j] == '{') {
// :{x} style parameter
while (statement[j] != '}') {
j++;
if (j >= statement.length) {
throw log.nonTerminatedNamedParamInSql(i, sql);
}
if (statement[j] == ':' || statement[j] == '{') {
throw log.invalidCharacterInSql(statement[j], i, sql);
}
}
if (j - i > 2) {
sqlToUse.replace(i - escapes, j - escapes, "?");
escapes += j - i - 1;
parameter = sql.substring(i + 2, j);
parameterList.add(parameter);
}
j++;
}
else {
while (j < statement.length && !isParameterSeparator(statement[j])) {
j++;
}
if (j - i > 1) {
sqlToUse.replace(i - escapes, j - escapes, "?");
escapes += j - i - 1;
parameter = sql.substring(i + 1, j);
parameterList.add(parameter);
}
}
i = j - 1;
}
else {
if (c == '\\') {
int j = i + 1;
if (j < statement.length && statement[j] == ':') {
// escaped ":" should be skipped
sqlToUse.deleteCharAt(i - escapes);
escapes++;
i = i + 2;
continue;
}
}
if (c == '?') {
throw log.unnamedParametersNotAllowed(i, sql);
}
}
i++;
}
return new ParserResults(sqlToUse.toString(), parameterList);
}
/**
* Skip over comments and quoted names present in an SQL statement.
* @param statement character array containing SQL statement
* @param position current position of statement
* @return next position to process after any comments or quotes are skipped
*/
private static int skipCommentsAndQuotes(char[] statement, int position) {
for (int i = 0; i < START_SKIP.length; i++) {
if (statement[position] == START_SKIP[i].charAt(0)) {
boolean match = true;
for (int j = 1; j < START_SKIP[i].length(); j++) {
if (statement[position + j] != START_SKIP[i].charAt(j)) {
match = false;
break;
}
}
if (match) {
int offset = START_SKIP[i].length();
for (int m = position + offset; m < statement.length; m++) {
if (statement[m] == STOP_SKIP[i].charAt(0)) {
boolean endMatch = true;
int endPos = m;
for (int n = 1; n < STOP_SKIP[i].length(); n++) {
if (m + n >= statement.length) {
// last comment not closed properly
return statement.length;
}
if (statement[m + n] != STOP_SKIP[i].charAt(n)) {
endMatch = false;
break;
}
endPos = m + n;
}
if (endMatch) {
// found character sequence ending comment or quote
return endPos + 1;
}
}
}
// character sequence ending comment or quote not found
return statement.length;
}
}
}
return position;
}
/**
* Determine whether a parameter name ends at the current position, that is, whether the given character qualifies as
* a separator.
*/
private static boolean isParameterSeparator(char c) {
return (c < 128 && separatorIndex[c]) || Character.isWhitespace(c);
}
static public class ParserResults {
private final String sqlToUse;
private final List<String> orderedParameters;
public ParserResults(String sqlToUse, List<String> orderedParameters) {
this.sqlToUse = sqlToUse;
this.orderedParameters = orderedParameters;
}
public String getSqlToUse() {
return sqlToUse;
}
public List<String> getOrderedParameters() {
return orderedParameters;
}
}
}
| 7,268
| 33.947115
| 120
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/QueriesJdbcConfiguration.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.attributes.ConfigurationElement;
import org.infinispan.persistence.jdbc.common.configuration.Attribute;
import org.infinispan.persistence.jdbc.common.configuration.Element;
public class QueriesJdbcConfiguration extends ConfigurationElement<QueriesJdbcConfiguration> {
public static final AttributeDefinition<String> SELECT = AttributeDefinition.builder(Attribute.SELECT_SINGLE, null, String.class).immutable().build();
public static final AttributeDefinition<String> SELECT_ALL = AttributeDefinition.builder(Attribute.SELECT_ALL, null, String.class).immutable().build();
public static final AttributeDefinition<String> DELETE = AttributeDefinition.builder(Attribute.DELETE_SINGLE, null, String.class).immutable().build();
public static final AttributeDefinition<String> DELETE_ALL = AttributeDefinition.builder(Attribute.DELETE_ALL, null, String.class).immutable().build();
public static final AttributeDefinition<String> UPSERT = AttributeDefinition.builder(Attribute.UPSERT, null, String.class).immutable().build();
public static final AttributeDefinition<String> SIZE = AttributeDefinition.builder(Attribute.SIZE, null, String.class).immutable().build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(QueriesJdbcConfiguration.class, SELECT, SELECT_ALL, DELETE, DELETE_ALL, UPSERT, SIZE);
}
QueriesJdbcConfiguration(AttributeSet attributes) {
super(Element.QUERIES, attributes);
}
public String select() {
return attributes.attribute(SELECT).get();
}
public String selectAll() {
return attributes.attribute(SELECT_ALL).get();
}
public String delete() {
return attributes.attribute(DELETE).get();
}
public String deleteAll() {
return attributes.attribute(DELETE_ALL).get();
}
public String upsert() {
return attributes.attribute(UPSERT).get();
}
public String size() {
return attributes.attribute(SIZE).get();
}
}
| 2,214
| 44.204082
| 154
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/SchemaJdbcConfiguration.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.configuration.attributes.ConfigurationElement;
import org.infinispan.persistence.jdbc.common.configuration.Attribute;
import org.infinispan.persistence.jdbc.common.configuration.Element;
public class SchemaJdbcConfiguration extends ConfigurationElement<SchemaJdbcConfiguration> {
public static final AttributeDefinition<String> MESSAGE_NAME = AttributeDefinition.builder(Attribute.MESSAGE_NAME, null, String.class).immutable().build();
public static final AttributeDefinition<String> KEY_MESSAGE_NAME = AttributeDefinition.builder(Attribute.KEY_MESSAGE_NAME, null, String.class).immutable().build();
public static final AttributeDefinition<String> PACKAGE = AttributeDefinition.builder(Attribute.PACKAGE, null, String.class).immutable().build();
public static final AttributeDefinition<Boolean> EMBEDDED_KEY = AttributeDefinition.builder(Attribute.EMBEDDED_KEY, Boolean.FALSE).immutable().build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(SchemaJdbcConfiguration.class, MESSAGE_NAME, KEY_MESSAGE_NAME, PACKAGE, EMBEDDED_KEY);
}
SchemaJdbcConfiguration(AttributeSet attributes) {
super(Element.SCHEMA, attributes);
}
public String messageName() {
return attributes.attribute(MESSAGE_NAME).get();
}
public String keyMessageName() {
return attributes.attribute(KEY_MESSAGE_NAME).get();
}
public String packageName() {
return attributes.attribute(PACKAGE).get();
}
public boolean embeddedKey() {
return attributes.attribute(EMBEDDED_KEY).get();
}
}
| 1,803
| 45.25641
| 166
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/QueriesJdbcStoreConfiguration.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.ConfigurationFor;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.serializing.SerializedWith;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.Element;
import org.infinispan.persistence.sql.QueriesJdbcStore;
@BuiltBy(QueriesJdbcStoreConfigurationBuilder.class)
@ConfigurationFor(QueriesJdbcStore.class)
@SerializedWith(QueriesJdbcStoreConfigurationSerializer.class)
public class QueriesJdbcStoreConfiguration extends AbstractSchemaJdbcConfiguration<QueriesJdbcStoreConfiguration> {
static final AttributeDefinition<String> KEY_COLUMNS = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.KEY_COLUMNS, null, String.class).immutable().build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(QueriesJdbcStoreConfiguration.class, AbstractSchemaJdbcConfiguration.attributeDefinitionSet(), KEY_COLUMNS);
}
private final QueriesJdbcConfiguration queriesJdbcConfiguration;
private final Attribute<String> keyColumns;
public QueriesJdbcStoreConfiguration(AttributeSet attributes, AsyncStoreConfiguration async,
ConnectionFactoryConfiguration connectionFactory, SchemaJdbcConfiguration schemaJdbcConfiguration,
QueriesJdbcConfiguration queriesJdbcConfiguration) {
super(Element.QUERY_JDBC_STORE, attributes, async, connectionFactory, schemaJdbcConfiguration);
this.queriesJdbcConfiguration = queriesJdbcConfiguration;
keyColumns = attributes.attribute(KEY_COLUMNS);
}
public String keyColumns() {
return keyColumns.get();
}
public QueriesJdbcConfiguration getQueriesJdbcConfiguration() {
return queriesJdbcConfiguration;
}
}
| 2,201
| 49.045455
| 202
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/SqlSerializerUtil.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.io.ConfigurationWriter;
import org.infinispan.persistence.jdbc.common.configuration.Element;
class SqlSerializerUtil {
static void writeSchemaElement(ConfigurationWriter writer, AbstractSchemaJdbcConfiguration configuration) {
SchemaJdbcConfiguration schemaConfig = configuration.schema();
if (schemaConfig.attributes().isModified()) {
writer.writeStartElement(Element.SCHEMA);
schemaConfig.attributes().write(writer);
writer.writeEndElement();
}
}
}
| 603
| 34.529412
| 110
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/AbstractSchemaJdbcConfigurationBuilder.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.commons.logging.Log;
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationBuilder;
public abstract class AbstractSchemaJdbcConfigurationBuilder<T extends AbstractSchemaJdbcConfiguration,
S extends AbstractSchemaJdbcConfigurationBuilder<T, S>> extends AbstractJdbcStoreConfigurationBuilder<T, S> {
protected final SchemaJdbcConfigurationBuilder<S> schemaBuilder = new SchemaJdbcConfigurationBuilder<>(this);
public AbstractSchemaJdbcConfigurationBuilder(PersistenceConfigurationBuilder builder, AttributeSet attributes) {
super(builder, attributes);
}
/**
* Retrieves the schema configuration builder
*
* @return builder to configure the schema
* @deprecated use {@link #schema()} instead
*/
@Deprecated
public SchemaJdbcConfigurationBuilder<S> schemaJdbcConfigurationBuilder() {
return schemaBuilder;
}
public SchemaJdbcConfigurationBuilder<S> schema() {
return schemaBuilder;
}
@Override
public void validate() {
super.validate();
schemaBuilder.validate();
Attribute<Boolean> segmentedAttr = attributes.attribute(AbstractStoreConfiguration.SEGMENTED);
if (!segmentedAttr.isModified()) {
Log.CONFIG.debugf("%s is defaulting to not being segmented", getClass().getSimpleName());
segmentedAttr.set(Boolean.FALSE);
} else if (segmentedAttr.get()) {
throw org.infinispan.util.logging.Log.CONFIG.storeDoesNotSupportBeingSegmented(getClass().getSimpleName());
}
}
@Override
public Builder<?> read(T template, Combine combine) {
super.read(template, combine);
schemaBuilder.read(template.schema(), combine);
return this;
}
}
| 2,195
| 38.214286
| 116
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/TableJdbcStoreConfigurationBuilder.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfiguration;
/**
* TableJdbcStoreConfigurationBuilder.
*
* @author William Burns
* @since 13.0
*/
public class TableJdbcStoreConfigurationBuilder extends AbstractSchemaJdbcConfigurationBuilder<TableJdbcStoreConfiguration, TableJdbcStoreConfigurationBuilder> {
public TableJdbcStoreConfigurationBuilder(PersistenceConfigurationBuilder builder) {
super(builder, TableJdbcStoreConfiguration.attributeDefinitionSet());
}
@Override
public AttributeSet attributes() {
return attributes;
}
@Override
public TableJdbcStoreConfigurationBuilder self() {
return this;
}
@Override
public void validate(GlobalConfiguration globalConfig) {
super.validate(globalConfig);
if (attributes.attribute(TableJdbcStoreConfiguration.TABLE_NAME).get() == null) {
throw org.infinispan.persistence.jdbc.common.logging.Log.CONFIG.tableNameMissing();
}
}
/**
* Configures the table name to use for this store
*
* @param tableName table to use
* @return this
*/
public TableJdbcStoreConfigurationBuilder tableName(String tableName) {
attributes.attribute(TableJdbcStoreConfiguration.TABLE_NAME).set(tableName);
return this;
}
@Override
public TableJdbcStoreConfiguration create() {
return new TableJdbcStoreConfiguration(attributes.protect(), async.create(),
connectionFactory != null ? connectionFactory.create() : null,
schemaBuilder.create());
}
@Override
public String toString() {
return "TableJdbcStoreConfigurationBuilder [connectionFactory=" + connectionFactory +
", attributes=" + attributes + ", async=" + async + "]";
}
}
| 1,953
| 30.516129
| 161
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/SqlStoreConfigurationParser.java
|
package org.infinispan.persistence.sql.configuration;
import static org.infinispan.persistence.sql.configuration.SqlStoreConfigurationParser.NAMESPACE;
import org.infinispan.commons.configuration.io.ConfigurationReader;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.configuration.parsing.CacheParser;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.configuration.parsing.ConfigurationParser;
import org.infinispan.configuration.parsing.Namespace;
import org.infinispan.configuration.parsing.ParseUtils;
import org.infinispan.configuration.parsing.Parser;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationParser;
import org.infinispan.persistence.jdbc.common.configuration.Attribute;
import org.infinispan.persistence.jdbc.common.configuration.Element;
import org.kohsuke.MetaInfServices;
@MetaInfServices(ConfigurationParser.class)
@Namespace(root = "table-jdbc-store")
@Namespace(root = "query-jdbc-store")
@Namespace(uri = NAMESPACE + "*", root = "table-jdbc-store")
@Namespace(uri = NAMESPACE + "*", root = "query-jdbc-store")
public class SqlStoreConfigurationParser extends AbstractJdbcStoreConfigurationParser {
static final String NAMESPACE = Parser.NAMESPACE + "store:sql:";
@Override
public void readElement(ConfigurationReader reader, ConfigurationBuilderHolder holder) {
ConfigurationBuilder builder = holder.getCurrentConfigurationBuilder();
Element element = Element.forName(reader.getLocalName());
switch (element) {
case TABLE_JDBC_STORE: {
parseTableJdbcStore(reader, builder.persistence());
break;
}
case QUERY_JDBC_STORE: {
parseQueryJdbcStore(reader, builder.persistence());
break;
}
default: {
throw ParseUtils.unexpectedElement(reader);
}
}
}
private void parseTableJdbcStore(ConfigurationReader reader, PersistenceConfigurationBuilder persistenceBuilder) {
TableJdbcStoreConfigurationBuilder builder = persistenceBuilder.addStore(TableJdbcStoreConfigurationBuilder.class);
for (int i = 0; i < reader.getAttributeCount(); i++) {
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
if (!handleCommonAttributes(reader, builder, attribute, value)) {
if (attribute == Attribute.TABLE_NAME) {
builder.tableName(value);
} else {
CacheParser.parseStoreAttribute(reader, i, builder);
}
}
}
while (reader.inTag()) {
Element element = Element.forName(reader.getLocalName());
if (!handleCommonElement(builder, element, reader)) {
if (element == Element.SCHEMA) {
parseSchema(reader, builder.schema());
} else {
CacheParser.parseStoreElement(reader, builder);
}
}
}
}
private void parseQueryJdbcStore(ConfigurationReader reader, PersistenceConfigurationBuilder persistenceBuilder) {
QueriesJdbcStoreConfigurationBuilder builder = persistenceBuilder.addStore(QueriesJdbcStoreConfigurationBuilder.class);
for (int i = 0; i < reader.getAttributeCount(); i++) {
String value = reader.getAttributeValue(i);
Attribute attribute = Attribute.forName(reader.getAttributeName(i));
if (!handleCommonAttributes(reader, builder, attribute, value)) {
if (attribute == Attribute.KEY_COLUMNS) {
builder.keyColumns(value);
} else {
CacheParser.parseStoreAttribute(reader, i, builder);
}
}
}
while (reader.inTag()) {
Element element = Element.forName(reader.getLocalName());
if (!handleCommonElement(builder, element, reader)) {
switch (element) {
case QUERIES:
parseQueries(reader, builder.queries());
break;
case SCHEMA:
parseSchema(reader, builder.schema());
break;
default:
CacheParser.parseStoreElement(reader, builder);
break;
}
}
}
}
private void parseQueries(ConfigurationReader reader, QueriesJdbcConfigurationBuilder builder) {
// YAML and JSON this has to be an attribute so support both named attributes under queries
// as well as elements with XML
ParseUtils.parseAttributes(reader, builder);
if (reader.inTag()) {
Element element = Element.forName(reader.getLocalName());
throw ParseUtils.unexpectedElement(reader, element);
}
}
private void parseSchema(ConfigurationReader reader, SchemaJdbcConfigurationBuilder<?> builder) {
ParseUtils.parseAttributes(reader, builder);
if (reader.inTag()) {
Element element = Element.forName(reader.getLocalName());
throw ParseUtils.unexpectedElement(reader, element);
}
}
@Override
public Namespace[] getNamespaces() {
return ParseUtils.getNamespaceAnnotations(getClass());
}
}
| 5,328
| 41.293651
| 125
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/QueriesJdbcStoreConfigurationSerializer.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.io.ConfigurationWriter;
import org.infinispan.commons.util.Version;
import org.infinispan.configuration.serializing.ConfigurationSerializer;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationSerializer;
import org.infinispan.persistence.jdbc.common.configuration.Element;
/**
* QueriesJdbcStoreConfigurationSerializer.
*
* @author William Burns
* @since 13.0
*/
public class QueriesJdbcStoreConfigurationSerializer extends AbstractJdbcStoreConfigurationSerializer implements ConfigurationSerializer<QueriesJdbcStoreConfiguration> {
@Override
public void serialize(ConfigurationWriter writer, QueriesJdbcStoreConfiguration configuration) {
writer.writeStartElement(Element.QUERY_JDBC_STORE);
writer.writeDefaultNamespace(AbstractSchemaJdbcConfiguration.NAMESPACE + Version.getMajorMinor());
writeJdbcStoreAttributes(writer, configuration);
writeCommonStoreSubAttributes(writer, configuration);
SqlSerializerUtil.writeSchemaElement(writer, configuration);
writeQueryElements(writer, configuration);
writeJDBCStoreConnection(writer, configuration);
writeCommonStoreElements(writer, configuration);
writer.writeEndElement();
}
static void writeQueryElements(ConfigurationWriter writer, QueriesJdbcStoreConfiguration configuration) {
QueriesJdbcConfiguration queryConfig = configuration.getQueriesJdbcConfiguration();
if (queryConfig.attributes().isModified()) {
writer.writeStartElement(Element.QUERIES);
queryConfig.attributes().write(writer);
writer.writeEndElement();
}
}
}
| 1,734
| 43.487179
| 169
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/TableJdbcStoreConfiguration.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.BuiltBy;
import org.infinispan.commons.configuration.ConfigurationFor;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeDefinition;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.serializing.SerializedWith;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.Element;
import org.infinispan.persistence.sql.TableJdbcStore;
@BuiltBy(TableJdbcStoreConfigurationBuilder.class)
@ConfigurationFor(TableJdbcStore.class)
@SerializedWith(TableJdbcStoreConfigurationSerializer.class)
public class TableJdbcStoreConfiguration extends AbstractSchemaJdbcConfiguration<TableJdbcStoreConfiguration> {
static final AttributeDefinition<String> TABLE_NAME = AttributeDefinition.builder(org.infinispan.persistence.jdbc.common.configuration.Attribute.TABLE_NAME, null, String.class).immutable().build();
public static AttributeSet attributeDefinitionSet() {
return new AttributeSet(TableJdbcStoreConfiguration.class, AbstractJdbcStoreConfiguration.attributeDefinitionSet(), TABLE_NAME);
}
private final Attribute<String> tableName;
public TableJdbcStoreConfiguration(AttributeSet attributes, AsyncStoreConfiguration async,
ConnectionFactoryConfiguration connectionFactory, SchemaJdbcConfiguration schemaJdbcConfiguration) {
super(Element.TABLE_JDBC_STORE, attributes, async, connectionFactory, schemaJdbcConfiguration);
tableName = attributes.attribute(TABLE_NAME);
}
public String tableName() {
return tableName.get();
}
}
| 1,963
| 52.081081
| 200
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/TableJdbcStoreConfigurationSerializer.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.io.ConfigurationWriter;
import org.infinispan.commons.util.Version;
import org.infinispan.configuration.serializing.ConfigurationSerializer;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationSerializer;
import org.infinispan.persistence.jdbc.common.configuration.Element;
/**
* TableJdbcStoreConfigurationSerializer.
*
* @author William Burns
* @since 13.0
*/
public class TableJdbcStoreConfigurationSerializer extends AbstractJdbcStoreConfigurationSerializer implements ConfigurationSerializer<TableJdbcStoreConfiguration> {
@Override
public void serialize(ConfigurationWriter writer, TableJdbcStoreConfiguration configuration) {
writer.writeStartElement(Element.TABLE_JDBC_STORE);
writer.writeDefaultNamespace(AbstractSchemaJdbcConfiguration.NAMESPACE + Version.getMajorMinor());
writeJdbcStoreAttributes(writer, configuration);
writeCommonStoreSubAttributes(writer, configuration);
SqlSerializerUtil.writeSchemaElement(writer, configuration);
writeJDBCStoreConnection(writer, configuration);
writeCommonStoreElements(writer, configuration);
writer.writeEndElement();
}
}
| 1,279
| 40.290323
| 165
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/SchemaJdbcConfigurationBuilder.java
|
package org.infinispan.persistence.sql.configuration;
import static org.infinispan.persistence.sql.configuration.SchemaJdbcConfiguration.EMBEDDED_KEY;
import static org.infinispan.persistence.sql.configuration.SchemaJdbcConfiguration.KEY_MESSAGE_NAME;
import static org.infinispan.persistence.sql.configuration.SchemaJdbcConfiguration.MESSAGE_NAME;
import static org.infinispan.persistence.sql.configuration.SchemaJdbcConfiguration.PACKAGE;
import java.util.Objects;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationChildBuilder;
public class SchemaJdbcConfigurationBuilder<S extends AbstractJdbcStoreConfigurationBuilder<?, S>> extends AbstractJdbcStoreConfigurationChildBuilder<S> implements Builder<SchemaJdbcConfiguration> {
private final AttributeSet attributes;
protected SchemaJdbcConfigurationBuilder(AbstractJdbcStoreConfigurationBuilder<?, S> builder) {
super(builder);
this.attributes = SchemaJdbcConfiguration.attributeDefinitionSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
/**
* The protobuf message name to use to marshall the cache entry to the database. If the value is a single column,
* this is not required.
*
* @param messageName the protobuf message name for the value
* @return this
*/
public SchemaJdbcConfigurationBuilder<S> messageName(String messageName) {
attributes.attribute(MESSAGE_NAME).set(messageName);
return this;
}
/**
* The protobuf message name to use to marshall the cache key to the database. If the key is a single column, this is
* not required.
*
* @param keyMessageName the protobuf message name for the key
* @return this
*/
public SchemaJdbcConfigurationBuilder<S> keyMessageName(String keyMessageName) {
attributes.attribute(KEY_MESSAGE_NAME).set(keyMessageName);
return this;
}
/**
* Sets the package name to be used when determining which schemas are used with {@link #messageName(String)} and
* {@link #keyMessageName(String)}.
*
* @param packageName the package for the message or key message
* @return this
*/
public SchemaJdbcConfigurationBuilder<S> packageName(String packageName) {
attributes.attribute(PACKAGE).set(Objects.requireNonNull(packageName));
return this;
}
/**
* Whether the key column(s) should be also written into the value object. When this is enabled, {@link
* #messageName(String) <b>must</b> also be configured.
*
* @param embeddedKey whether the key is embedded in the value
* @return this
*/
public SchemaJdbcConfigurationBuilder<S> embeddedKey(boolean embeddedKey) {
attributes.attribute(EMBEDDED_KEY).set(embeddedKey);
return this;
}
@Override
public void validate() {
Boolean embedKey = attributes.attribute(EMBEDDED_KEY).get();
if (embedKey != null && embedKey) {
if (attributes.attribute(MESSAGE_NAME).get() == null) {
throw org.infinispan.persistence.jdbc.common.logging.Log.CONFIG.messageNameRequiredIfEmbeddedKey();
}
}
}
@Override
public SchemaJdbcConfiguration create() {
return new SchemaJdbcConfiguration(attributes.protect());
}
@Override
public Builder<?> read(SchemaJdbcConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 3,738
| 36.767677
| 198
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/AbstractSchemaJdbcConfiguration.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AsyncStoreConfiguration;
import org.infinispan.configuration.parsing.Parser;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfiguration;
import org.infinispan.persistence.jdbc.common.configuration.ConnectionFactoryConfiguration;
public class AbstractSchemaJdbcConfiguration<T extends AbstractSchemaJdbcConfiguration<T>> extends AbstractJdbcStoreConfiguration<T> {
static final String NAMESPACE = Parser.NAMESPACE + "store:sql:";
private final SchemaJdbcConfiguration schemaJdbcConfiguration;
protected AbstractSchemaJdbcConfiguration(Enum<?> element, AttributeSet attributes, AsyncStoreConfiguration async,
ConnectionFactoryConfiguration connectionFactory, SchemaJdbcConfiguration schemaJdbcConfiguration) {
super(element, attributes, async, connectionFactory);
this.schemaJdbcConfiguration = schemaJdbcConfiguration;
}
public SchemaJdbcConfiguration schema() {
return schemaJdbcConfiguration;
}
/**
* @deprecated use {@link #schema()} instead.
*/
@Deprecated
public SchemaJdbcConfiguration getSchemaJdbcConfiguration() {
return schemaJdbcConfiguration;
}
}
| 1,332
| 40.65625
| 134
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/QueriesJdbcStoreConfigurationBuilder.java
|
package org.infinispan.persistence.sql.configuration;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.Attribute;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.configuration.cache.AbstractStoreConfiguration;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
/**
* QueriesJdbcStoreConfigurationBuilder.
*
* @author William Burns
* @since 13.0
*/
public class QueriesJdbcStoreConfigurationBuilder extends AbstractSchemaJdbcConfigurationBuilder<QueriesJdbcStoreConfiguration, QueriesJdbcStoreConfigurationBuilder> {
private final QueriesJdbcConfigurationBuilder<?> queriesBuilder = new QueriesJdbcConfigurationBuilder<>(this);
public QueriesJdbcStoreConfigurationBuilder(PersistenceConfigurationBuilder builder) {
super(builder, QueriesJdbcStoreConfiguration.attributeDefinitionSet());
}
@Override
public AttributeSet attributes() {
return attributes;
}
@Override
public QueriesJdbcStoreConfigurationBuilder self() {
return this;
}
@Override
public void validate() {
super.validate();
queriesBuilder.validate(attributes.attribute(AbstractStoreConfiguration.READ_ONLY).get());
Attribute<String> keyAttr = attributes.attribute(QueriesJdbcStoreConfiguration.KEY_COLUMNS);
if (!keyAttr.isModified() || keyAttr.isNull() || keyAttr.get().isEmpty()) {
throw org.infinispan.persistence.jdbc.common.logging.Log.CONFIG.keyColumnsRequired();
}
}
/**
* @deprecated use {@link #queries()} instead
*/
@Deprecated
public QueriesJdbcConfigurationBuilder<?> queriesJdbcConfigurationBuilder() {
return queriesBuilder;
}
public QueriesJdbcConfigurationBuilder<?> queries() {
return queriesBuilder;
}
public QueriesJdbcStoreConfigurationBuilder keyColumns(String keyColumns) {
attributes.attribute(QueriesJdbcStoreConfiguration.KEY_COLUMNS).set(keyColumns);
return this;
}
@Override
public QueriesJdbcStoreConfiguration create() {
return new QueriesJdbcStoreConfiguration(attributes.protect(), async.create(),
connectionFactory != null ? connectionFactory.create() : null,
schemaBuilder.create(), queriesBuilder.create());
}
@Override
public Builder<?> read(QueriesJdbcStoreConfiguration template, Combine combine) {
super.read(template, combine);
queriesBuilder.read(template.getQueriesJdbcConfiguration(), combine);
return this;
}
@Override
public String toString() {
return "QueriesJdbcStoreConfigurationBuilder [connectionFactory=" + connectionFactory +
", attributes=" + attributes + ", async=" + async + "]";
}
}
| 2,840
| 33.228916
| 167
|
java
|
null |
infinispan-main/persistence/sql/src/main/java/org/infinispan/persistence/sql/configuration/QueriesJdbcConfigurationBuilder.java
|
package org.infinispan.persistence.sql.configuration;
import static org.infinispan.persistence.sql.configuration.QueriesJdbcConfiguration.DELETE;
import static org.infinispan.persistence.sql.configuration.QueriesJdbcConfiguration.DELETE_ALL;
import static org.infinispan.persistence.sql.configuration.QueriesJdbcConfiguration.SELECT;
import static org.infinispan.persistence.sql.configuration.QueriesJdbcConfiguration.SELECT_ALL;
import static org.infinispan.persistence.sql.configuration.QueriesJdbcConfiguration.SIZE;
import static org.infinispan.persistence.sql.configuration.QueriesJdbcConfiguration.UPSERT;
import org.infinispan.commons.configuration.Builder;
import org.infinispan.commons.configuration.Combine;
import org.infinispan.commons.configuration.attributes.AttributeSet;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationBuilder;
import org.infinispan.persistence.jdbc.common.configuration.AbstractJdbcStoreConfigurationChildBuilder;
/**
* QueriesJdbcConfigurationBuilder.
*
* @author William Burns
* @since 13.0
*/
public class QueriesJdbcConfigurationBuilder<S extends AbstractJdbcStoreConfigurationBuilder<?, S>> extends AbstractJdbcStoreConfigurationChildBuilder<S> implements Builder<QueriesJdbcConfiguration> {
private final AttributeSet attributes;
public QueriesJdbcConfigurationBuilder(AbstractJdbcStoreConfigurationBuilder<?, S> builder) {
super(builder);
this.attributes = QueriesJdbcConfiguration.attributeDefinitionSet();
}
@Override
public AttributeSet attributes() {
return attributes;
}
/**
* Configures the select statement to be used when reading entries from the database. Note all parameters must be
* named (i.e. <b>:myname</b>) and the parameters must be the same name and order as the one provided to {@link
* #delete(String)}.
*
* @param select the select statement to use
* @return this
*/
public QueriesJdbcConfigurationBuilder<S> select(String select) {
attributes.attribute(SELECT).set(select);
return this;
}
/**
* Configures the select all statement to be used when reading all entries from the database. No parameters may be
* used.
*
* @param selectAll the select all statement to use
* @return this
*/
public QueriesJdbcConfigurationBuilder<S> selectAll(String selectAll) {
attributes.attribute(SELECT_ALL).set(selectAll);
return this;
}
/**
* Configures the delete statement to be used when removing entries from the database. Note all parameters must be
* named (i.e. <b>:myname</b>) and the parameters must be the same name and order as the one provided to {@link
* #select(String)}.
*
* @param delete the delete statement to use
* @return this
*/
public QueriesJdbcConfigurationBuilder<S> delete(String delete) {
attributes.attribute(DELETE).set(delete);
return this;
}
/**
* Configures the delete all statement to be used when clearing the store. No parameters may be used.
*
* @param deleteAll the delete all statement to use
* @return this
*/
public QueriesJdbcConfigurationBuilder<S> deleteAll(String deleteAll) {
attributes.attribute(DELETE_ALL).set(deleteAll);
return this;
}
/**
* Configures the upsert statement to be used when writing entries to the database. Note all parameters must be named
* (i.e. <b>:myname</b>).
*
* @param upsert the upsert statement to use
* @return this
*/
public QueriesJdbcConfigurationBuilder<S> upsert(String upsert) {
attributes.attribute(UPSERT).set(upsert);
return this;
}
/**
* Configures the size statement to be used when determining the size of the store. No parameters may be used.
*
* @param size the size statement to use
* @return this
*/
public QueriesJdbcConfigurationBuilder<S> size(String size) {
attributes.attribute(SIZE).set(size);
return this;
}
@Override
public void validate() {
if (attributes.attribute(SIZE).isNull() || attributes.attribute(SELECT).isNull() ||
attributes.attribute(SELECT_ALL).isNull()) {
throw org.infinispan.persistence.jdbc.common.logging.Log.CONFIG.requiredStatementsForQueryStoreLoader();
}
}
public void validate(boolean isLoader) {
validate();
if (!isLoader && (attributes.attribute(DELETE).isNull() || attributes.attribute(DELETE_ALL).isNull() ||
attributes.attribute(UPSERT).isNull())) {
throw org.infinispan.persistence.jdbc.common.logging.Log.CONFIG.requiredStatementsForQueryStoreWriter();
}
}
@Override
public QueriesJdbcConfiguration create() {
return new QueriesJdbcConfiguration(attributes.protect());
}
@Override
public Builder<?> read(QueriesJdbcConfiguration template, Combine combine) {
attributes.read(template.attributes(), combine);
return this;
}
}
| 4,995
| 36.007407
| 200
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/RemoteStoreSSLTest.java
|
package org.infinispan.persistence.remote;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import static org.testng.AssertJUnit.assertEquals;
import java.io.IOException;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.commons.marshall.WrappedByteArray;
import org.infinispan.commons.test.security.TestCertificates;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.interceptors.impl.CacheMgmtInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.persistence.BaseNonBlockingStoreTest;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.persistence.remote.configuration.SecurityConfigurationBuilder;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.server.core.security.simple.SimpleSaslAuthenticator;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.server.hotrod.configuration.HotRodServerConfigurationBuilder;
import org.infinispan.server.hotrod.test.HotRodTestingUtil;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
/**
* @author Tristan Tarrant
* @since 9.1
*/
@Test(testName = "persistence.remote.RemoteStoreSSLTest", groups = "functional")
public class RemoteStoreSSLTest extends BaseNonBlockingStoreTest {
private static final String REMOTE_CACHE = "remote-cache";
private EmbeddedCacheManager serverCacheManager;
private Cache<Object, Object> serverCache;
private HotRodServer hrServer;
@Override
protected Configuration buildConfig(ConfigurationBuilder builder) {
serverCacheManager = TestCacheManagerFactory.createCacheManager(
new GlobalConfigurationBuilder().defaultCacheName(REMOTE_CACHE),
hotRodCacheConfiguration(builder));
ClassLoader cl = RemoteStoreSSLTest.class.getClassLoader();
// Unfortunately BaseNonBlockingStoreTest stops and restarts the store, which can start a second hrServer - prevent that
if (hrServer == null) {
serverCache = serverCacheManager.getCache(REMOTE_CACHE);
TestingUtil.replaceComponent(serverCacheManager, TimeService.class, timeService, true);
SimpleSaslAuthenticator ssa = new SimpleSaslAuthenticator();
HotRodServerConfigurationBuilder serverBuilder = HotRodTestingUtil.getDefaultHotRodConfiguration();
serverBuilder
.ssl()
.enable()
.requireClientAuth(true)
.keyStoreFileName(TestCertificates.certificate("server"))
.keyStorePassword(TestCertificates.KEY_PASSWORD)
.keyAlias("server")
.trustStoreFileName(TestCertificates.certificate("trust"))
.trustStorePassword(TestCertificates.KEY_PASSWORD);
serverBuilder
.authentication()
.enable()
.sasl()
.serverName("localhost")
.addAllowedMech("EXTERNAL")
.authenticator(ssa);
hrServer = new HotRodServer();
hrServer.start(serverBuilder.build(), serverCacheManager);
}
SecurityConfigurationBuilder remoteSecurity = builder
.persistence()
.addStore(RemoteStoreConfigurationBuilder.class)
.remoteCacheName(REMOTE_CACHE)
.shared(true)
// Store cannot be segmented as the remote cache is LOCAL and it doesn't report its segments?
.segmented(false)
.remoteSecurity();
remoteSecurity
.ssl().enable()
.keyStoreFileName(TestCertificates.certificate("client"))
.keyStorePassword(TestCertificates.KEY_PASSWORD)
.trustStoreFileName(TestCertificates.certificate("ca"))
.trustStorePassword(TestCertificates.KEY_PASSWORD)
.addServer()
.host(hrServer.getHost())
.port(hrServer.getPort());
remoteSecurity
.authentication().enable()
.saslMechanism("EXTERNAL");
return builder.build();
}
@Override
protected NonBlockingStore<Object, Object> createStore() throws Exception {
return new RemoteStore<>();
}
@Override
protected PersistenceMarshaller getMarshaller() {
return TestingUtil.extractPersistenceMarshaller(serverCacheManager);
}
@Override
@AfterMethod(alwaysRun = true)
public void tearDown() {
configuration = null;
super.tearDown();
HotRodClientTestingUtil.killServers(hrServer);
hrServer = null;
TestingUtil.killCacheManagers(serverCacheManager);
}
@Override
protected boolean storePurgesAllExpired() {
return false;
}
@Override
protected Object keyToStorage(Object key) {
try {
return new WrappedByteArray(marshaller.objectToByteBuffer(key));
} catch (IOException | InterruptedException e) {
throw new AssertionError(e);
}
}
@Override
public void testApproximateSize() {
// The server only reports the approximate size when the cache's statistics are enabled
TestingUtil.findInterceptor(serverCache, CacheMgmtInterceptor.class).setStatisticsEnabled(true);
super.testApproximateSize();
TestingUtil.findInterceptor(serverCache, CacheMgmtInterceptor.class).setStatisticsEnabled(false);
assertEquals(-1L, store.approximateSizeWait(segments));
}
}
| 5,867
| 38.38255
| 126
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/GetWithMetadataReadCommittedTest.java
|
package org.infinispan.persistence.remote;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* @author gustavonalle
* @since 9.0
*/
@Test(testName = "persistence.remote.GetWithMetadataReadCommittedTest", groups = "functional")
public class GetWithMetadataReadCommittedTest extends GetWithMetadataTest {
@Override
protected ConfigurationBuilder getTargetCacheConfiguration(int sourcePort) {
ConfigurationBuilder targetCacheConfiguration = super.getTargetCacheConfiguration(sourcePort);
targetCacheConfiguration.locking().isolationLevel(IsolationLevel.READ_COMMITTED);
return targetCacheConfiguration;
}
}
| 745
| 34.52381
| 100
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/RemoteStoreConfigUriTest.java
|
package org.infinispan.persistence.remote;
import org.testng.annotations.Test;
/**
* Simple test to sample how remote cache store is configured with URI attribute.
*
* @author Durgesh Anaokar
* @since 13.0.0
*/
@Test(testName = "persistence.remote.RemoteStoreConfigUriTest", groups = "functional")
public class RemoteStoreConfigUriTest extends RemoteStoreConfigTest {
private static final int PORT = 19811;
public static final String CACHE_LOADER_CONFIG = "remote-cl-uri-config.xml";
public static final String STORE_CACHE_NAME = "RemoteStoreConfigUriTest";
public RemoteStoreConfigUriTest() {
super(CACHE_LOADER_CONFIG,STORE_CACHE_NAME, PORT);
}
}
| 679
| 29.909091
| 86
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/RemoteStoreWrapperTest.java
|
package org.infinispan.persistence.remote;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.assertHotRodEquals;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.jboss.marshalling.commons.GenericJBossMarshaller;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(testName = "persistence.remote.RemoteStoreWrapperTest", groups="functional")
public class RemoteStoreWrapperTest extends AbstractInfinispanTest {
private final Marshaller marshaller = new GenericJBossMarshaller();
private HotRodServer sourceServer;
private HotRodServer targetServer;
private EmbeddedCacheManager serverCacheManager;
private Cache<byte[], byte[]> serverCache;
private EmbeddedCacheManager targetCacheManager;
private Cache<byte[], byte[]> targetCache;
private RemoteCacheManager remoteSourceCacheManager;
private RemoteCache<String, String> remoteSourceCache;
private RemoteCacheManager remoteTargetCacheManager;
private RemoteCache<String, String> remoteTargetCache;
@BeforeClass
public void setup() throws Exception {
ConfigurationBuilder serverBuilder = TestCacheManagerFactory.getDefaultCacheConfiguration(false);
serverCacheManager = TestCacheManagerFactory
.createCacheManager(hotRodCacheConfiguration(serverBuilder, MediaType.APPLICATION_JBOSS_MARSHALLING));
serverCache = serverCacheManager.getCache();
sourceServer = HotRodClientTestingUtil.startHotRodServer(serverCacheManager);
remoteSourceCacheManager = createAndStartRemoteCacheManager(sourceServer);
remoteSourceCache = remoteSourceCacheManager.getCache();
ConfigurationBuilder clientBuilder = TestCacheManagerFactory.getDefaultCacheConfiguration(false);
clientBuilder.persistence().addStore(RemoteStoreConfigurationBuilder.class)
.hotRodWrapping(true)
// LOCAL cache doesn't have segments
.segmented(false)
.addServer()
.host(sourceServer.getHost())
.port(sourceServer.getPort());
targetCacheManager = TestCacheManagerFactory
.createCacheManager(hotRodCacheConfiguration(clientBuilder, MediaType.APPLICATION_JBOSS_MARSHALLING));
targetCache = targetCacheManager.getCache();
targetServer = HotRodClientTestingUtil.startHotRodServer(targetCacheManager);
remoteTargetCacheManager = createAndStartRemoteCacheManager(targetServer);
remoteTargetCache = remoteTargetCacheManager.getCache();
}
private RemoteCacheManager createAndStartRemoteCacheManager(HotRodServer server) {
RemoteCacheManager rcm = new RemoteCacheManager(
HotRodClientTestingUtil.newRemoteConfigurationBuilder(server)
.marshaller(marshaller)
.build()
);
rcm.start();
return rcm;
}
public void testEntryWrapping() throws Exception {
remoteSourceCache.put("k1", "v1");
remoteSourceCache.put("k2", "v2");
assertHotRodEquals(serverCacheManager, marshall("k1"), marshall("v1"));
assertHotRodEquals(serverCacheManager, marshall("k2"), marshall("v2"));
String v1 = remoteTargetCache.get("k1");
assertEquals("v1", v1);
String v2 = remoteTargetCache.get("k2");
assertEquals("v2", v2);
}
private byte[] marshall(Object o) throws Exception {
return marshaller.objectToByteBuffer(o);
}
@BeforeMethod
public void cleanup() {
serverCache.clear();
targetCache.clear();
}
@AfterClass
public void tearDown() {
HotRodClientTestingUtil.killRemoteCacheManagers(remoteSourceCacheManager, remoteTargetCacheManager);
remoteSourceCacheManager = null;
remoteTargetCacheManager = null;
HotRodClientTestingUtil.killServers(sourceServer, targetServer);
sourceServer = null;
targetServer = null;
TestingUtil.killCacheManagers(targetCacheManager, serverCacheManager);
targetCacheManager = null;
serverCacheManager = null;
}
}
| 4,908
| 42.061404
| 114
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/GetWithMetadataTest.java
|
package org.infinispan.persistence.remote;
import static org.infinispan.client.hotrod.test.HotRodClientTestingUtil.killRemoteCacheManager;
import static org.infinispan.client.hotrod.test.HotRodClientTestingUtil.killServers;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import static org.infinispan.test.TestingUtil.killCacheManagers;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNotNull;
import static org.testng.Assert.assertTrue;
import java.util.concurrent.TimeUnit;
import org.infinispan.client.hotrod.MetadataValue;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.jboss.marshalling.commons.GenericJBossMarshaller;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* Test for getWithMetadata backed with a remote store
*/
@Test(testName = "persistence.remote.GetWithMetadataTest", groups = "functional")
public class GetWithMetadataTest extends AbstractInfinispanTest {
public static final String CACHE_NAME = "testCache";
private <K, V> RemoteCache<K, V> getRemoteCache(HotRodServer hotRodServer) {
RemoteCacheManager remoteCacheManager = new RemoteCacheManager(
HotRodClientTestingUtil.newRemoteConfigurationBuilder(hotRodServer)
.marshaller(GenericJBossMarshaller.class)
.build()
);
return remoteCacheManager.getCache(CACHE_NAME);
}
protected ConfigurationBuilder getTargetCacheConfiguration(int sourcePort) {
ConfigurationBuilder cb = hotRodCacheConfiguration(MediaType.APPLICATION_JBOSS_MARSHALLING);
cb.persistence()
.addStore(RemoteStoreConfigurationBuilder.class)
.remoteCacheName(CACHE_NAME)
.hotRodWrapping(true)
// Store cannot be segmented as the remote cache is LOCAL and it doesn't report its segments?
.segmented(false)
.addServer()
.host("localhost")
.port(sourcePort)
.shared(true);
return cb;
}
public void testGetWithMetadata() {
EmbeddedCacheManager sourceCacheManager = null;
EmbeddedCacheManager targetCacheManager = null;
HotRodServer sourceServer = null;
HotRodServer targetServer = null;
RemoteCache<String, String> sourceRemoteCache = null;
RemoteCache<String, String> targetRemoteCache = null;
try {
// Create source hotrod server
sourceCacheManager = TestCacheManagerFactory.createCacheManager(hotRodCacheConfiguration());
sourceCacheManager.defineConfiguration(CACHE_NAME, hotRodCacheConfiguration(MediaType.APPLICATION_JBOSS_MARSHALLING).build());
sourceServer = HotRodClientTestingUtil.startHotRodServer(sourceCacheManager);
// Put some entries
sourceRemoteCache = getRemoteCache(sourceServer);
sourceRemoteCache.put("key", "value");
sourceRemoteCache.put("key2", "value2", 24, TimeUnit.HOURS, 1, TimeUnit.DAYS);
sourceRemoteCache.put("key3", "value2");
MetadataValue<String> key2Metadata = sourceRemoteCache.getWithMetadata("key2");
long k2Created = key2Metadata.getCreated();
// Create target hotrod server, with a remote cache loader pointing to the source one
targetCacheManager = TestCacheManagerFactory.createCacheManager(hotRodCacheConfiguration(MediaType.APPLICATION_JBOSS_MARSHALLING));
targetServer = HotRodClientTestingUtil.startHotRodServer(targetCacheManager);
ConfigurationBuilder targetCacheConfiguration = getTargetCacheConfiguration(sourceServer.getPort());
targetCacheManager.defineConfiguration(CACHE_NAME, targetCacheConfiguration.build());
// Try a get with metadata from the target server
targetRemoteCache = getRemoteCache(targetServer);
MetadataValue<String> metadataEntry = targetRemoteCache.getWithMetadata("key");
assertNotNull(metadataEntry);
MetadataValue<String> otherMetadataEntry = targetRemoteCache.getWithMetadata("key2");
assertNotNull(otherMetadataEntry);
assertEquals(otherMetadataEntry.getLifespan(), 24 * 3600);
assertEquals(otherMetadataEntry.getMaxIdle(), 24 * 3600);
assertEquals(otherMetadataEntry.getCreated(), k2Created);
assertTrue(otherMetadataEntry.getLastUsed() > 0);
} finally {
killRemoteCacheManager(targetRemoteCache != null ? targetRemoteCache.getRemoteCacheManager() : null);
killRemoteCacheManager(sourceRemoteCache != null ? sourceRemoteCache.getRemoteCacheManager() : null);
killCacheManagers(targetCacheManager, sourceCacheManager);
killServers(targetServer, sourceServer);
}
}
}
| 5,268
| 46.9
| 140
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/RemoteStoreTest.java
|
package org.infinispan.persistence.remote;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.io.IOException;
import java.util.function.Predicate;
import java.util.function.ToIntBiFunction;
import org.infinispan.AdvancedCache;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.marshall.ProtoStreamMarshaller;
import org.infinispan.commons.marshall.WrappedByteArray;
import org.infinispan.commons.time.TimeService;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.interceptors.impl.CacheMgmtInterceptor;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.marshall.protostream.impl.MarshallableUserObject;
import org.infinispan.persistence.BaseNonBlockingStoreTest;
import org.infinispan.persistence.internal.PersistenceUtil;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.spi.PersistenceException;
import org.infinispan.protostream.ProtobufUtil;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.SkipException;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Factory;
import org.testng.annotations.Test;
import io.reactivex.rxjava3.core.Flowable;
/**
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@Test(testName = "persistence.remote.RemoteStoreTest", groups = "functional")
public class RemoteStoreTest extends BaseNonBlockingStoreTest {
private static final String CACHE_NAME = "remote-cache";
private EmbeddedCacheManager serverCacheManager;
private AdvancedCache<Object, Object> serverCache;
private HotRodServer hrServer;
private boolean segmented;
private MediaType cacheMediaType;
private boolean isRawValues;
private ProtoStreamMarshaller marshaller = new ProtoStreamMarshaller(ProtobufUtil.newSerializationContext());
public RemoteStoreTest segmented(boolean segmented) {
this.segmented = segmented;
return this;
}
public RemoteStoreTest cacheMediaType(MediaType cacheMediaType) {
this.cacheMediaType = cacheMediaType;
return this;
}
public RemoteStoreTest rawValues(boolean isRawValues) {
this.isRawValues = isRawValues;
return this;
}
@Factory
public Object[] factory() {
return new Object[] {
new RemoteStoreTest().segmented(false).cacheMediaType(MediaType.APPLICATION_OBJECT).rawValues(true),
new RemoteStoreTest().segmented(false).cacheMediaType(MediaType.APPLICATION_OBJECT).rawValues(false),
new RemoteStoreTest().segmented(false).cacheMediaType(MediaType.APPLICATION_PROTOSTREAM).rawValues(true),
new RemoteStoreTest().segmented(false).cacheMediaType(MediaType.APPLICATION_PROTOSTREAM).rawValues(false),
new RemoteStoreTest().segmented(true).cacheMediaType(MediaType.APPLICATION_OBJECT).rawValues(true),
new RemoteStoreTest().segmented(true).cacheMediaType(MediaType.APPLICATION_OBJECT).rawValues(false),
new RemoteStoreTest().segmented(true).cacheMediaType(MediaType.APPLICATION_PROTOSTREAM).rawValues(true),
new RemoteStoreTest().segmented(true).cacheMediaType(MediaType.APPLICATION_PROTOSTREAM).rawValues(false),
};
}
@Override
protected String parameters() {
return "[" + segmented + ", " + cacheMediaType + ", " + isRawValues + "]";
}
@Override
protected Configuration buildConfig(ConfigurationBuilder cb) {
cb.memory().maxCount(WRITE_DELETE_BATCH_MAX_ENTRIES)
.expiration().wakeUpInterval(10L);
// Unfortunately BaseNonBlockingStoreTest stops and restarts the store, which can start a second hrServer - prevent that
if (hrServer == null) {
GlobalConfigurationBuilder globalConfig = new GlobalConfigurationBuilder().clusteredDefault();
globalConfig.defaultCacheName(CACHE_NAME);
ConfigurationBuilder configurationBuilder = hotRodCacheConfiguration(cb);
configurationBuilder.encoding().mediaType(cacheMediaType.toString());
configurationBuilder.clustering().cacheMode(CacheMode.DIST_SYNC);
configurationBuilder.statistics().enable();
serverCacheManager = TestCacheManagerFactory.createClusteredCacheManager(
globalConfig, configurationBuilder);
TestingUtil.replaceComponent(serverCacheManager, TimeService.class, timeService, true);
serverCache = serverCacheManager.getCache(CACHE_NAME).getAdvancedCache()
.withMediaType(MediaType.APPLICATION_OBJECT, MediaType.APPLICATION_OBJECT);
keyPartitioner = TestingUtil.extractComponent(serverCache, KeyPartitioner.class);
hrServer = HotRodClientTestingUtil.startHotRodServer(serverCacheManager);
}
// Set it to dist so it has segments
cb.clustering().cacheMode(CacheMode.DIST_SYNC);
cb.encoding().mediaType(cacheMediaType.toString());
RemoteStoreConfigurationBuilder storeConfigurationBuilder = cb
.persistence()
.addStore(RemoteStoreConfigurationBuilder.class)
.remoteCacheName(CACHE_NAME)
.rawValues(isRawValues);
storeConfigurationBuilder
.addServer()
.host(hrServer.getHost())
.port(hrServer.getPort());
storeConfigurationBuilder.segmented(segmented);
storeConfigurationBuilder.shared(true);
return cb.build();
}
@Override
protected NonBlockingStore<Object, Object> createStore() {
return new RemoteStore<>();
}
@Override
protected PersistenceMarshaller getMarshaller() {
return TestingUtil.extractPersistenceMarshaller(serverCacheManager);
}
@Override
@AfterMethod(alwaysRun = true)
public void tearDown() {
configuration = null;
super.tearDown();
HotRodClientTestingUtil.killServers(hrServer);
hrServer = null;
TestingUtil.killCacheManagers(serverCacheManager);
}
@Override
protected boolean storePurgesAllExpired() {
return false;
}
@Override
protected Object keyToStorage(Object key) {
if (cacheMediaType.equals(MediaType.APPLICATION_PROTOSTREAM)) {
try {
return new WrappedByteArray(marshaller.objectToByteBuffer(key));
} catch (IOException | InterruptedException e) {
throw new AssertionError(e);
}
}
return super.keyToStorage(key);
}
@Override
protected Object valueToStorage(Object value) {
return keyToStorage(value);
}
@Override
public void testReplaceExpiredEntry() {
store.write(marshalledEntry(internalCacheEntry("k1", "v1", 100)));
// Hot Rod does not support milliseconds, so 100ms is rounded to the nearest second,
// and so data is stored for 1 second here. Adjust waiting time accordingly.
timeService.advance(1101);
Object storedKey = keyToStorage("k1");
assertNull(store.loadEntry(storedKey));
long start = System.currentTimeMillis();
store.write(marshalledEntry(internalCacheEntry("k1", "v2", 100)));
assertTrue(store.loadEntry(storedKey).getValue().equals(valueToStorage("v2")) ||
TestingUtil.moreThanDurationElapsed(start, 100));
}
void countWithSegments(ToIntBiFunction<NonBlockingStore<Object, Object>, IntSet> countFunction) {
// TODO: Needs to be addressed in https://issues.redhat.com/browse/ISPN-14533
if (segmented && MediaType.APPLICATION_OBJECT.equals(cacheMediaType)) {
throw new SkipException("Test disabled for now");
}
store.write(marshalledEntry(internalCacheEntry("k1", "v1", 100)));
int segment = getKeySegment("k1");
// Publish keys should return our key if we use a set that contains that segment
assertEquals(1, countFunction.applyAsInt(store, IntSets.immutableSet(segment)));
// Create int set that includes all segments but the one that maps to the key
int maxSegments = serverCache.getCacheConfiguration().clustering().hash().numSegments();
IntSet intSet = IntSets.mutableEmptySet(maxSegments);
for (int i = 0; i < maxSegments; ++i) {
if (i != segment) {
intSet.set(i);
}
}
// Publish keys shouldn't return our key since the IntSet doesn't contain our segment
assertEquals(0, countFunction.applyAsInt(store, intSet));
}
int getKeySegment(Object obj) {
Object key = keyToStorage(obj);
if (segmented && !isRawValues && cacheMediaType.equals(MediaType.APPLICATION_OBJECT))
key = new MarshallableUserObject<>(key);
return keyPartitioner.getSegment(key);
}
public void testPublishKeysWithSegments() {
countWithSegments((salws, intSet) -> {
IntSet segments;
Predicate<Object> predicate;
if (segmented) {
segments = intSet;
predicate = null;
} else {
segments = null;
predicate = PersistenceUtil.<Object>combinePredicate(intSet, keyPartitioner, null);
}
return Flowable.fromPublisher(salws.publishKeys(segments, predicate))
.count()
.blockingGet().intValue();
});
}
public void testPublishEntriesWithSegments() {
countWithSegments((salws, intSet) -> {
IntSet segments;
Predicate<Object> predicate;
if (segmented) {
segments = intSet;
predicate = null;
} else {
segments = null;
predicate = PersistenceUtil.<Object>combinePredicate(intSet, keyPartitioner, null);
}
return Flowable.fromPublisher(salws.publishEntries(segments, predicate, false))
.count()
.blockingGet().intValue();
});
}
@Override
@Test(enabled = false)
public void testLoadAndStoreBytesValues() throws PersistenceException, IOException, InterruptedException {
// This test messes with the actual types provided which can fail due to different media types
}
@Override
public void testApproximateSize() {
// The server only reports the approximate size when the cache's statistics are enabled
TestingUtil.findInterceptor(serverCache, CacheMgmtInterceptor.class).setStatisticsEnabled(true);
super.testApproximateSize();
TestingUtil.findInterceptor(serverCache, CacheMgmtInterceptor.class).setStatisticsEnabled(false);
assertEquals(-1L, store.approximateSizeWait(segments));
}
@Override
protected void purgeExpired(InternalCacheEntry... expiredEntries) {
// RemoteStore does nothing for purgeExpired
}
}
| 11,533
| 39.048611
| 126
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/GetWithMetadataRepeatableReadTest.java
|
package org.infinispan.persistence.remote;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
/**
* @author gustavonalle
* @since 9.0
*/
@Test(testName = "persistence.remote.GetWithMetadataRepeatableReadTest", groups = "functional")
public class GetWithMetadataRepeatableReadTest extends GetWithMetadataTest {
@Override
protected ConfigurationBuilder getTargetCacheConfiguration(int sourcePort) {
ConfigurationBuilder targetCacheConfiguration = super.getTargetCacheConfiguration(sourcePort);
targetCacheConfiguration.locking().isolationLevel(IsolationLevel.REPEATABLE_READ);
return targetCacheConfiguration;
}
}
| 748
| 34.666667
| 100
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/RemoteStoreFunctionalTest.java
|
package org.infinispan.persistence.remote;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.PersistenceConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.BaseStoreFunctionalTest;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.Test;
/**
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@Test(testName = "persistence.remote.RemoteStoreFunctionalTest", groups = "functional")
public class RemoteStoreFunctionalTest extends BaseStoreFunctionalTest {
private EmbeddedCacheManager localCacheManager;
private HotRodServer hrServer;
@Override
protected PersistenceConfigurationBuilder createCacheStoreConfig(PersistenceConfigurationBuilder persistence,
String cacheName, boolean preload) {
localCacheManager = TestCacheManagerFactory.createCacheManager(hotRodCacheConfiguration());
hrServer = HotRodClientTestingUtil.startHotRodServer(localCacheManager);
persistence
.addStore(RemoteStoreConfigurationBuilder.class)
.remoteCacheName("")
.preload(preload)
// local cache encoding is object where as server is protostream so we can't be segmented
.segmented(false)
.addServer()
.host("localhost")
.port(hrServer.getPort());
return persistence;
}
@Override
protected void teardown() {
super.teardown();
HotRodClientTestingUtil.killServers(hrServer);
hrServer = null;
TestingUtil.killCacheManagers(localCacheManager);
localCacheManager = null;
}
@Override
public void testTwoCachesSameCacheStore() {
//not applicable
}
@Test(expectedExceptions = CacheConfigurationException.class)
public void testSegmentedWithUnsupportedVersion() {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.persistence()
.addStore(RemoteStoreConfigurationBuilder.class)
.segmented(true)
.protocolVersion(ProtocolVersion.PROTOCOL_VERSION_21);
cb.build();
}
@Test(expectedExceptions = CacheConfigurationException.class)
public void testSegmentedWithGroups() {
ConfigurationBuilder cb = new ConfigurationBuilder();
cb.clustering().hash().groups().enabled();
cb.persistence()
.addStore(RemoteStoreConfigurationBuilder.class)
.segmented(true);
cb.build();
}
}
| 2,960
| 36.961538
| 112
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/RemoteStoreConfigTest.java
|
package org.infinispan.persistence.remote;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import static org.infinispan.test.TestingUtil.withCacheManager;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.server.hotrod.test.HotRodTestingUtil;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.CacheManagerCallable;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Simple test to sample how remote cache store is configured.
*
* @author Mircea.Markus@jboss.com
* @since 4.1
*/
@Test(testName = "persistence.remote.RemoteStoreConfigTest", groups = "functional")
public class RemoteStoreConfigTest extends AbstractInfinispanTest {
private static final int PORT = 19711;
public static final String CACHE_LOADER_CONFIG = "remote-cl-config.xml";
public static final String STORE_CACHE_NAME = "RemoteStoreConfigTest";
private EmbeddedCacheManager cacheManager;
private HotRodServer hotRodServer;
private String cacheLoaderConfig;
private String storeCacheName;
private int port;
public RemoteStoreConfigTest(String cacheLoaderConfig, String storeCacheName, int port) {
super();
this.cacheLoaderConfig = cacheLoaderConfig;
this.storeCacheName = storeCacheName;
this.port = port;
}
public RemoteStoreConfigTest() {
this(CACHE_LOADER_CONFIG, STORE_CACHE_NAME, PORT);
}
@BeforeClass
public void startUp() {
cacheManager = TestCacheManagerFactory.createCacheManager();
Cache<?, ?> storeCache = cacheManager.createCache(this.storeCacheName, hotRodCacheConfiguration().build());
assertEquals(storeCache.size(), 0);
hotRodServer = HotRodTestingUtil.startHotRodServer(cacheManager, this.port);
}
public void simpleTest() throws Exception {
String cacheName = this.storeCacheName;
withCacheManager(new CacheManagerCallable(TestCacheManagerFactory.fromXml(this.cacheLoaderConfig)) {
@Override
public void call() {
Cache<Object, Object> cache = cm.getCache(cacheName);
cache.put("k", "v");
Cache<Object, Object> storeCache = cacheManager.getCache(cacheName);
assertEquals(1, storeCache.size());
cache.stop();
assertEquals(1, storeCache.size());
}
});
withCacheManager(new CacheManagerCallable(TestCacheManagerFactory.fromXml(this.cacheLoaderConfig)) {
@Override
public void call() {
Cache cache = cm.getCache(cacheName);
assertEquals("v", cache.get("k"));
}
});
}
@AfterClass
public void tearDown() {
HotRodClientTestingUtil.killServers(hotRodServer);
hotRodServer = null;
TestingUtil.killCacheManagers(cacheManager);
cacheManager = null;
}
}
| 3,227
| 34.086957
| 113
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/RemoteStoreMixedAccessTest.java
|
package org.infinispan.persistence.remote;
import static org.infinispan.server.hotrod.test.HotRodTestingUtil.hotRodCacheConfiguration;
import static org.testng.AssertJUnit.assertEquals;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.client.hotrod.MetadataValue;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.client.hotrod.RemoteCacheManager;
import org.infinispan.client.hotrod.test.HotRodClientTestingUtil;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.persistence.remote.configuration.RemoteStoreConfigurationBuilder;
import org.infinispan.server.hotrod.HotRodServer;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(testName = "persistence.remote.RemoteStoreMixedAccessTest", groups="functional")
public class RemoteStoreMixedAccessTest extends AbstractInfinispanTest {
private HotRodServer hrServer;
private EmbeddedCacheManager serverCacheManager;
private Cache<String, String> serverCache;
private EmbeddedCacheManager clientCacheManager;
private Cache<String, String> clientCache;
private RemoteCacheManager remoteCacheManager;
private RemoteCache<String, String> remoteCache;
@BeforeClass
public void setup() throws Exception {
ConfigurationBuilder serverBuilder = TestCacheManagerFactory.getDefaultCacheConfiguration(false);
serverBuilder.memory().size(100)
.expiration().wakeUpInterval(10L);
serverCacheManager = TestCacheManagerFactory.createCacheManager(
hotRodCacheConfiguration(serverBuilder));
serverCache = serverCacheManager.getCache();
hrServer = HotRodClientTestingUtil.startHotRodServer(serverCacheManager);
ConfigurationBuilder clientBuilder = TestCacheManagerFactory.getDefaultCacheConfiguration(false);
clientBuilder.persistence().addStore(RemoteStoreConfigurationBuilder.class)
.rawValues(true)
.segmented(false)
.addServer()
.host(hrServer.getHost())
.port(hrServer.getPort());
clientCacheManager = TestCacheManagerFactory.createCacheManager(clientBuilder);
clientCache = clientCacheManager.getCache();
remoteCacheManager = new RemoteCacheManager(
HotRodClientTestingUtil.newRemoteConfigurationBuilder(hrServer)
.build()
);
remoteCacheManager.start();
remoteCache = remoteCacheManager.getCache();
}
public void testMixedAccess() {
remoteCache.put("k1", "v1");
String rv1 = remoteCache.get("k1");
assertEquals("v1", rv1);
MetadataValue<String> mv1 = remoteCache.getWithMetadata("k1");
assertEquals("v1", mv1.getValue());
String cv1 = clientCache.get("k1");
assertEquals("v1", cv1);
}
public void testMixedAccessWithLifespan() {
remoteCache.put("k1", "v1", 120, TimeUnit.SECONDS);
MetadataValue<String> mv1 = remoteCache.getWithMetadata("k1");
assertEquals("v1", mv1.getValue());
assertEquals(120, mv1.getLifespan());
String cv1 = clientCache.get("k1");
assertEquals("v1", cv1);
InternalCacheEntry ice1 = clientCache.getAdvancedCache().getDataContainer().get("k1");
assertEquals(120000, ice1.getLifespan());
}
public void testMixedAccessWithLifespanAndMaxIdle() {
remoteCache.put("k1", "v1", 120, TimeUnit.SECONDS, 30, TimeUnit.SECONDS);
MetadataValue<String> mv1 = remoteCache.getWithMetadata("k1");
assertEquals("v1", mv1.getValue());
assertEquals(120, mv1.getLifespan());
assertEquals(30, mv1.getMaxIdle());
String cv1 = clientCache.get("k1");
assertEquals("v1", cv1);
InternalCacheEntry ice1 = clientCache.getAdvancedCache().getDataContainer().get("k1");
assertEquals(120000, ice1.getLifespan());
assertEquals(30000, ice1.getMaxIdle());
}
@BeforeMethod
public void cleanup() {
serverCache.clear();
clientCache.clear();
}
@AfterClass
public void tearDown() {
HotRodClientTestingUtil.killRemoteCacheManager(remoteCacheManager);
remoteCacheManager = null;
HotRodClientTestingUtil.killServers(hrServer);
hrServer = null;
TestingUtil.killCacheManagers(clientCacheManager, serverCacheManager);
clientCacheManager = null;
serverCacheManager = null;
}
}
| 4,728
| 39.418803
| 103
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/configuration/ConfigurationTest.java
|
package org.infinispan.persistence.remote.configuration;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "persistence.remote.configuration.ConfigurationTest")
public class ConfigurationTest {
public void testRemoteCacheStoreConfigurationAdaptor() {
ConfigurationBuilder b = new ConfigurationBuilder();
b.persistence().addStore(RemoteStoreConfigurationBuilder.class)
.remoteCacheName("RemoteCache")
.protocolVersion(ProtocolVersion.PROTOCOL_VERSION_27)
.fetchPersistentState(true)
.addServer()
.host("one").port(12111)
.addServer()
.host("two")
.connectionPool()
.maxActive(10)
.minIdle(5)
.exhaustedAction(ExhaustedAction.EXCEPTION)
.minEvictableIdleTime(10_000)
.async().enable();
Configuration configuration = b.build();
RemoteStoreConfiguration store = (RemoteStoreConfiguration) configuration.persistence().stores().get(0);
assertEquals("RemoteCache", store.remoteCacheName());
assertEquals(2, store.servers().size());
assertEquals(10, store.connectionPool().maxActive());
assertEquals(5, store.connectionPool().minIdle());
assertEquals(ExhaustedAction.EXCEPTION, store.connectionPool().exhaustedAction());
assertEquals(10_000, store.connectionPool().minEvictableIdleTime());
assertTrue(store.async().enabled());
assertEquals(ProtocolVersion.PROTOCOL_VERSION_27, store.protocol());
b = new ConfigurationBuilder();
b.persistence().addStore(RemoteStoreConfigurationBuilder.class).read(store);
Configuration configuration2 = b.build();
RemoteStoreConfiguration store2 = (RemoteStoreConfiguration) configuration2.persistence().stores().get(0);
assertEquals("RemoteCache", store2.remoteCacheName());
assertEquals(2, store2.servers().size());
assertEquals(10, store2.connectionPool().maxActive());
assertEquals(5, store2.connectionPool().minIdle());
assertEquals(ExhaustedAction.EXCEPTION, store2.connectionPool().exhaustedAction());
assertEquals(10_000, store2.connectionPool().minEvictableIdleTime());
assertTrue(store2.async().enabled());
assertEquals(ProtocolVersion.PROTOCOL_VERSION_27, store2.protocol());
}
}
| 2,602
| 44.666667
| 112
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/configuration/JsonRemoteStoreOnlyParsingTest.java
|
package org.infinispan.persistence.remote.configuration;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.io.IOException;
import java.util.List;
import org.infinispan.client.hotrod.ProtocolVersion;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.util.TypedProperties;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.persistence.remote.upgrade.SerializationUtils;
import org.testng.annotations.Test;
/**
* Test parsing of a JSON document containing only a remote-store config
*
* @since 13.0
*/
@Test(groups = "unit", testName = "persistence.remote.configuration.JsonRemoteStoreOnlyParsingTest")
public class JsonRemoteStoreOnlyParsingTest {
@Test
public void testJsonParsing() throws IOException {
String json = "{\n" +
" \"remote-store\":{\n" +
" \"cache\":\"ccc\",\n" +
" \"shared\":true,\n" +
" \"read-only\":false,\n" +
" \"hotrod-wrapping\":false,\n" +
" \"raw-values\":false,\n" +
" \"socket-timeout\":60000,\n" +
" \"protocol-version\":\"2.8\",\n" +
" \"remote-server\":[\n" +
" {\n" +
" \"host\":\"127.0.0.2\",\n" +
" \"port\":12222\n" +
" }\n" +
" ],\n" +
" \"connection-pool\":{\n" +
" \"max-active\":110,\n" +
" \"exhausted-action\":\"CREATE_NEW\"\n" +
" },\n" +
" \"async-executor\":{\n" +
" \"properties\":{\n" +
" \"name\":4\n" +
" }\n" +
" },\n" +
" \"properties\":{\n" +
" \"key\":\"value\"\n" +
" },\n" +
" \"security\":{\n" +
" \"authentication\":{\n" +
" \"server-name\":\"servername\",\n" +
" \"digest\":{\n" +
" \"username\":\"username\",\n" +
" \"password\":\"password\",\n" +
" \"realm\":\"realm\"\n" +
" }\n" +
" },\n" +
" \"encryption\":{\n" +
" \"protocol\":\"TLSv1.2\",\n" +
" \"sni-hostname\":\"snihostname\",\n" +
" \"keystore\":{\n" +
" \"filename\":\"${project.build.testOutputDirectory}/keystore_client.jks\",\n" +
" \"password\":\"secret\",\n" +
" \"certificate-password\":\"secret\",\n" +
" \"key-alias\":\"hotrod\",\n" +
" \"type\":\"JKS\"\n" +
" },\n" +
" \"truststore\":{\n" +
" \"filename\":\"${project.build.testOutputDirectory}/ca.jks\",\n" +
" \"type\":\"pem\"\n" +
" }\n" +
" }\n" +
" }\n" +
" }\n" +
"}";
RemoteStoreConfiguration store = SerializationUtils.fromJson(json);
assertEquals("ccc", store.remoteCacheName());
assertTrue(store.shared());
assertFalse(store.ignoreModifications());
assertFalse(store.hotRodWrapping());
assertFalse(store.rawValues());
assertEquals(60000, store.socketTimeout());
assertEquals(ProtocolVersion.PROTOCOL_VERSION_28, store.protocol());
List<RemoteServerConfiguration> servers = store.servers();
RemoteServerConfiguration firstServer = servers.iterator().next();
assertEquals(1, servers.size());
assertEquals("127.0.0.2", firstServer.host());
assertEquals(12222, firstServer.port());
TypedProperties asyncExecutorProps = store.asyncExecutorFactory().properties();
assertEquals(1, asyncExecutorProps.size());
assertEquals(4, asyncExecutorProps.getLongProperty("name", 0L));
ConnectionPoolConfiguration poolConfiguration = store.connectionPool();
assertEquals(ExhaustedAction.CREATE_NEW, poolConfiguration.exhaustedAction());
assertEquals(110, poolConfiguration.maxActive());
AuthenticationConfiguration authentication = store.security().authentication();
assertEquals("servername", authentication.serverName());
MechanismConfiguration mechanismConfiguration = authentication.mechanismConfiguration();
assertEquals(mechanismConfiguration.saslMechanism(), "DIGEST-MD5");
SslConfiguration ssl = store.security().ssl();
assertEquals("snihostname", ssl.sniHostName());
assertEquals("secret", new String(ssl.keyStorePassword()));
}
@Test
public void testJsonSerializing() {
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.persistence().addStore(RemoteStoreConfigurationBuilder.class)
.remoteCacheName("remote").addServer().host("127.0.0.2").port(1111)
.remoteSecurity()
.authentication().enable().saslMechanism("DIGEST-MD5")
.username("user")
.password("pass")
.realm("default");
RemoteStoreConfiguration remoteStoreConfiguration = (RemoteStoreConfiguration) builder.build().persistence().stores().iterator().next();
Json serialized = Json.read(SerializationUtils.toJson(remoteStoreConfiguration));
assertEquals(1, serialized.asJsonMap().size());
assertNotNull(serialized.at("remote-store"));
}
}
| 5,997
| 42.463768
| 142
|
java
|
null |
infinispan-main/persistence/remote/src/test/java/org/infinispan/persistence/remote/configuration/ConfigurationSerializerTest.java
|
package org.infinispan.persistence.remote.configuration;
import org.infinispan.configuration.cache.StoreConfiguration;
import org.infinispan.configuration.serializer.AbstractConfigurationSerializerTest;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
@Test(testName = "persistence.remote.configuration.ConfigurationSerializerTest", groups="functional")
public class ConfigurationSerializerTest extends AbstractConfigurationSerializerTest {
@Override
protected void compareStoreConfiguration(String name, StoreConfiguration beforeStore, StoreConfiguration afterStore) {
super.compareStoreConfiguration(name, beforeStore, afterStore);
RemoteStoreConfiguration before = (RemoteStoreConfiguration) beforeStore;
RemoteStoreConfiguration after = (RemoteStoreConfiguration) afterStore;
AssertJUnit.assertEquals("Wrong connection pool for " + name + " configuration.", before.connectionPool(), after.connectionPool());
AssertJUnit.assertEquals("Wrong security config for " + name + " configuration.", before.security(), after.security());
AssertJUnit.assertEquals("Wrong remote server config for " + name + " configuration.", before.servers(), after.servers());
}
}
| 1,224
| 60.25
| 137
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.