focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public PluginDescriptor find(Path pluginPath) { Properties properties = readProperties(pluginPath); return createPluginDescriptor(properties); }
@Test public void testNotFound() { PluginDescriptorFinder descriptorFinder = new PropertiesPluginDescriptorFinder(); assertThrows(PluginRuntimeException.class, () -> descriptorFinder.find(pluginsPath.resolve("test-plugin-3"))); }
@Override @Nullable public final Object readObject() throws EOFException { return service.readObject(this); }
@Test public void testReadObject() throws Exception { in.readObject(); verify(mockSerializationService).readObject(in); }
@Override public void importData(JsonReader reader) throws IOException { logger.info("Reading configuration for 1.0"); // this *HAS* to start as an object reader.beginObject(); while (reader.hasNext()) { JsonToken tok = reader.peek(); switch (tok) { case NAME: String name = reader.nextName(); // find out which member it is if (name.equals(CLIENTS)) { readClients(reader); } else if (name.equals(GRANTS)) { readGrants(reader); } else if (name.equals(WHITELISTEDSITES)) { readWhitelistedSites(reader); } else if (name.equals(BLACKLISTEDSITES)) { readBlacklistedSites(reader); } else if (name.equals(AUTHENTICATIONHOLDERS)) { readAuthenticationHolders(reader); } else if (name.equals(ACCESSTOKENS)) { readAccessTokens(reader); } else if (name.equals(REFRESHTOKENS)) { readRefreshTokens(reader); } else if (name.equals(SYSTEMSCOPES)) { readSystemScopes(reader); } else { for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { if (extension.supportsVersion(THIS_VERSION)) { extension.importExtensionData(name, reader); break; } } } // unknown token, skip it reader.skipValue(); } break; case END_OBJECT: // the object ended, we're done here reader.endObject(); continue; default: logger.debug("Found unexpected entry"); reader.skipValue(); continue; } } fixObjectReferences(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.fixExtensionObjectReferences(maps); break; } } maps.clearAll(); }
@Test public void testImportAuthenticationHolders() throws IOException { OAuth2Request req1 = new OAuth2Request(new HashMap<String, String>(), "client1", new ArrayList<GrantedAuthority>(), true, new HashSet<String>(), new HashSet<String>(), "http://foo.com", new HashSet<String>(), null); Authentication mockAuth1 = mock(Authentication.class, withSettings().serializable()); OAuth2Authentication auth1 = new OAuth2Authentication(req1, mockAuth1); AuthenticationHolderEntity holder1 = new AuthenticationHolderEntity(); holder1.setId(1L); holder1.setAuthentication(auth1); OAuth2Request req2 = new OAuth2Request(new HashMap<String, String>(), "client2", new ArrayList<GrantedAuthority>(), true, new HashSet<String>(), new HashSet<String>(), "http://bar.com", new HashSet<String>(), null); Authentication mockAuth2 = mock(Authentication.class, withSettings().serializable()); OAuth2Authentication auth2 = new OAuth2Authentication(req2, mockAuth2); AuthenticationHolderEntity holder2 = new AuthenticationHolderEntity(); holder2.setId(2L); holder2.setAuthentication(auth2); String configJson = "{" + "\"" + MITREidDataService.CLIENTS + "\": [], " + "\"" + MITREidDataService.ACCESSTOKENS + "\": [], " + "\"" + MITREidDataService.REFRESHTOKENS + "\": [], " + "\"" + MITREidDataService.GRANTS + "\": [], " + "\"" + MITREidDataService.WHITELISTEDSITES + "\": [], " + "\"" + MITREidDataService.BLACKLISTEDSITES + "\": [], " + "\"" + MITREidDataService.SYSTEMSCOPES + "\": [], " + "\"" + MITREidDataService.AUTHENTICATIONHOLDERS + "\": [" + "{\"id\":1,\"authentication\":{\"clientAuthorization\":{\"clientId\":\"client1\",\"redirectUri\":\"http://foo.com\"}," + "\"userAuthentication\":null}}," + "{\"id\":2,\"authentication\":{\"clientAuthorization\":{\"clientId\":\"client2\",\"redirectUri\":\"http://bar.com\"}," + "\"userAuthentication\":null}}" + " ]" + "}"; System.err.println(configJson); JsonReader reader = new JsonReader(new StringReader(configJson)); final Map<Long, AuthenticationHolderEntity> fakeDb = new HashMap<>(); when(authHolderRepository.save(isA(AuthenticationHolderEntity.class))).thenAnswer(new Answer<AuthenticationHolderEntity>() { Long id = 356L; @Override public AuthenticationHolderEntity answer(InvocationOnMock invocation) throws Throwable { AuthenticationHolderEntity _holder = (AuthenticationHolderEntity) invocation.getArguments()[0]; if(_holder.getId() == null) { _holder.setId(id++); } fakeDb.put(_holder.getId(), _holder); return _holder; } }); dataService.importData(reader); verify(authHolderRepository, times(2)).save(capturedAuthHolders.capture()); List<AuthenticationHolderEntity> savedAuthHolders = capturedAuthHolders.getAllValues(); assertThat(savedAuthHolders.size(), is(2)); assertThat(savedAuthHolders.get(0).getAuthentication().getOAuth2Request().getClientId(), equalTo(holder1.getAuthentication().getOAuth2Request().getClientId())); assertThat(savedAuthHolders.get(1).getAuthentication().getOAuth2Request().getClientId(), equalTo(holder2.getAuthentication().getOAuth2Request().getClientId())); }
@Override protected IdentifiedDataSerializable getConfig() { UserCodeNamespaceConfig config = new UserCodeNamespaceConfig(parameters.name); parameters.resources.forEach(holder -> ConfigAccessor.add(config, holder)); return config; }
@Test public void test() { UserCodeNamespaceConfig config = new UserCodeNamespaceConfig("my-namespace"); AddUserCodeNamespaceConfigMessageTask task = createMessageTask( DynamicConfigAddUserCodeNamespaceConfigCodec.encodeRequest(config.getName(), ConfigAccessor.getResourceDefinitions(config) .stream() .map(ResourceDefinitionHolder::new) .collect(Collectors.toList()))); task.run(); assertEquals(config, task.getConfig()); }
@Nonnull public JobConfig addJarsInZip(@Nonnull URL url) { return add(url, null, ResourceType.JARS_IN_ZIP); }
@Test public void testMultipleClassesFromZIP() { JobConfig jobConfig = new JobConfig(); jobConfig.addJarsInZip(getClass().getResource("/zip-resources/person-jar.zip")); jobConfig.addJarsInZip(getClass().getResource("/zip-resources/person-car-jar.zip")); }
public CompletableFuture<URL> newLRA(Exchange exchange) { HttpRequest request = prepareRequest(URI.create(lraUrl + COORDINATOR_PATH_START), exchange) .POST(HttpRequest.BodyPublishers.ofString("")) .build(); CompletableFuture<HttpResponse<String>> future = client.sendAsync(request, HttpResponse.BodyHandlers.ofString()); return future.thenApply(res -> { if (res.statusCode() >= HttpURLConnection.HTTP_BAD_REQUEST) { LOG.debug("LRA coordinator responded with error code {}. Message: {}", res.statusCode(), res.body()); throw new IllegalStateException( "Cannot obtain LRA id from LRA coordinator due to response status code " + res.statusCode()); } // See if there's a location header containing the LRA URL List<String> location = res.headers().map().get("Location"); if (ObjectHelper.isNotEmpty(location)) { return toURL(location.get(0)); } // If there's no location header try the Long-Running-Action header, assuming there's only one present in the response List<String> lraHeaders = res.headers().map().get(Exchange.SAGA_LONG_RUNNING_ACTION); if (ObjectHelper.isNotEmpty(lraHeaders) && lraHeaders.size() == 1) { return toURL(lraHeaders.get(0)); } // Fallback to reading the URL from the response body String responseBody = res.body(); if (ObjectHelper.isNotEmpty(responseBody)) { return toURL(responseBody); } throw new IllegalStateException("Cannot obtain LRA id from LRA coordinator"); }); }
@DisplayName("Tests whether LRAClient is calling prepareRequest with exchange from newLRA()") @Test void testCallsPrepareRequestWithExchangeInNewLra() { LRASagaService sagaService = new LRASagaService(); applyMockProperties(sagaService); LRAClient client = new LRAClient(sagaService) { protected HttpRequest.Builder prepareRequest(URI uri, Exchange exchange) { throw new ExchangeRuntimeException(exchange); } }; Exchange exchange = Mockito.mock(Exchange.class); Assertions.assertThrows(ExchangeRuntimeException.class, () -> client.newLRA(exchange)); }
public static String renderTemplate(String template, Map<String, Object> newContext) throws IOException, ClassNotFoundException { Map<String, Object> contextMap = getDefaultContextMap(); contextMap.putAll(newContext); String templateRendered = GROOVY_TEMPLATE_ENGINE.createTemplate(template).make(contextMap).toString(); GROOVY_SHELL.resetLoadedClasses(); return templateRendered; }
@Test public void testDefaultRenderTemplate() throws IOException, ClassNotFoundException { Date today = new Date(Instant.now().toEpochMilli()); Date yesterday = new Date(Instant.now().minus(1, ChronoUnit.DAYS).toEpochMilli()); SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd"); dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); Assert.assertEquals(GroovyTemplateUtils.renderTemplate("${ today }"), dateFormat.format(today)); Assert.assertEquals(GroovyTemplateUtils.renderTemplate("${ yesterday }"), dateFormat.format(yesterday)); }
public String migrate(String oldJSON, int targetVersion) { LOGGER.debug("Migrating to version {}: {}", targetVersion, oldJSON); Chainr transform = getTransformerFor(targetVersion); Object transformedObject = transform.transform(JsonUtils.jsonToMap(oldJSON), getContextMap(targetVersion)); String transformedJSON = JsonUtils.toJsonString(transformedObject); LOGGER.debug("After migration to version {}: {}", targetVersion, transformedJSON); return transformedJSON; }
@Test void migrateV3ToV4_shouldDefaultDisplayOrderWeightsToMinusOneOnlyForPipelinesWithoutIt() { ConfigRepoDocumentMother documentMother = new ConfigRepoDocumentMother(); String oldJSON = documentMother.v3ComprehensiveWithDisplayOrderWeightsOf10AndNull(); String newJSON = documentMother.v4ComprehensiveWithDisplayOrderWeightsOf10AndMinusOne(); String transformedJSON = migrator.migrate(oldJSON, 4); assertThatJson(newJSON).isEqualTo(transformedJSON); }
public void flush() { keys.ensureCapacity(2); keys.setShort(0, (short) keyToIndex.size()); long keyBytePointer = 2; for (int i = 0; i < indexToKey.size(); i++) { String key = indexToKey.get(i); byte[] keyBytes = getBytesForValue(String.class, key); keys.ensureCapacity(keyBytePointer + 2 + keyBytes.length); keys.setShort(keyBytePointer, (short) keyBytes.length); keyBytePointer += 2; keys.setBytes(keyBytePointer, keyBytes, keyBytes.length); keyBytePointer += keyBytes.length; Class<?> clazz = indexToClass.get(i); byte[] clazzBytes = getBytesForValue(String.class, classToShortName(clazz)); if (clazzBytes.length != 1) throw new IllegalArgumentException("class name byte length must be 1 but was " + clazzBytes.length); keys.ensureCapacity(keyBytePointer + 1); keys.setBytes(keyBytePointer, clazzBytes, 1); keyBytePointer += 1; } keys.setHeader(0, Constants.VERSION_KV_STORAGE); keys.flush(); vals.setHeader(0, bitUtil.getIntLow(bytePointer)); vals.setHeader(4, bitUtil.getIntHigh(bytePointer)); vals.setHeader(8, Constants.VERSION_KV_STORAGE); vals.flush(); }
@Test public void testFlush() { Helper.removeDir(new File(location)); KVStorage index = new KVStorage(new RAMDirectory(location, true).create(), true); long pointer = index.add(createMap("", "test")); index.flush(); index.close(); index = new KVStorage(new RAMDirectory(location, true), true); assertTrue(index.loadExisting()); assertEquals("test", index.get(pointer, "", false)); // make sure bytePointer is correctly set after loadExisting long newPointer = index.add(createMap("", "testing")); assertEquals(pointer + 1 + 3 + "test".getBytes().length, newPointer, newPointer + ">" + pointer); index.close(); Helper.removeDir(new File(location)); }
public void listenToCluster(String clusterName) { // if cluster name is a symlink, watch for D2SymlinkNode instead String resourceName = D2_CLUSTER_NODE_PREFIX + clusterName; if (SymlinkUtil.isSymlinkNodeOrPath(clusterName)) { listenToSymlink(clusterName, resourceName); } else { _watchedClusterResources.computeIfAbsent(clusterName, k -> { XdsClient.NodeResourceWatcher watcher = getClusterResourceWatcher(clusterName); _xdsClient.watchXdsResource(resourceName, watcher); return watcher; }); } }
@Test public void testListenToClusterSymlink() { XdsToD2PropertiesAdaptorFixture fixture = new XdsToD2PropertiesAdaptorFixture(); fixture.getSpiedAdaptor().listenToCluster(SYMLINK_NAME); // verify symlink is watched verify(fixture._xdsClient).watchXdsResource(eq(CLUSTER_SYMLINK_RESOURCE_NAME), anyNodeWatcher()); // update symlink data NodeResourceWatcher symlinkNodeWatcher = fixture._nodeWatcher; fixture._nodeWatcher.onChanged(getSymlinkNodeUpdate(PRIMARY_CLUSTER_RESOURCE_NAME)); // verify both cluster and uri data of the actual cluster is watched verify(fixture._xdsClient).watchXdsResource(eq(PRIMARY_CLUSTER_RESOURCE_NAME), anyNodeWatcher()); verify(fixture._xdsClient).watchXdsResource(eq(PRIMARY_URI_RESOURCE_NAME), anyNodeWatcher()); // update cluster data NodeResourceWatcher clusterNodeWatcher = fixture._nodeWatcher; clusterNodeWatcher.onChanged(getClusterNodeUpdate(PRIMARY_CLUSTER_NAME)); // verify cluster data is published under symlink name and actual cluster name verify(fixture._clusterEventBus).publishInitialize(SYMLINK_NAME, PRIMARY_CLUSTER_PROPERTIES); verify(fixture._clusterEventBus).publishInitialize(PRIMARY_CLUSTER_NAME, PRIMARY_CLUSTER_PROPERTIES); // test update symlink to a new primary cluster String primaryClusterResourceName2 = CLUSTER_NODE_PREFIX + PRIMARY_CLUSTER_NAME_2; ClusterStoreProperties primaryClusterProperties2 = new ClusterStoreProperties(PRIMARY_CLUSTER_NAME_2); symlinkNodeWatcher.onChanged(getSymlinkNodeUpdate(primaryClusterResourceName2)); verify(fixture._xdsClient).watchXdsResource(eq(primaryClusterResourceName2), anyNodeWatcher()); verify(fixture._xdsClient).watchXdsResource(eq(URI_NODE_PREFIX + PRIMARY_CLUSTER_NAME_2), anyMapWatcher()); verifyClusterNodeUpdate(fixture, PRIMARY_CLUSTER_NAME_2, SYMLINK_NAME, primaryClusterProperties2); // if the old primary cluster gets an update, it will be published under its original cluster name // since the symlink points to the new primary cluster now. clusterNodeWatcher.onChanged(getClusterNodeUpdate(PRIMARY_CLUSTER_NAME_2)); verify(fixture._clusterEventBus).publishInitialize(PRIMARY_CLUSTER_NAME, primaryClusterProperties2); // verify symlink is published just once verify(fixture._clusterEventBus).publishInitialize(SYMLINK_NAME, primaryClusterProperties2); }
public static RateLimiterAlgorithm<?> newInstance(final String name) { return Optional.ofNullable(ExtensionLoader.getExtensionLoader(RateLimiterAlgorithm.class).getJoin(name)).orElseGet(TokenBucketRateLimiterAlgorithm::new); }
@Test public void newInstanceTest() { RateLimiterAlgorithm<?> leakyBucketLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance("leakyBucket"); assertThat(leakyBucketLimiterAlgorithm.getClass().getName(), is("org.apache.shenyu.plugin.ratelimiter.algorithm.LeakyBucketRateLimiterAlgorithm")); RateLimiterAlgorithm<?> concurrentRateLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance("concurrent"); assertThat(concurrentRateLimiterAlgorithm.getClass().getName(), is("org.apache.shenyu.plugin.ratelimiter.algorithm.ConcurrentRateLimiterAlgorithm")); RateLimiterAlgorithm<?> tokenBucketRateLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance("tokenBucket"); assertThat(tokenBucketRateLimiterAlgorithm.getClass().getName(), is("org.apache.shenyu.plugin.ratelimiter.algorithm.TokenBucketRateLimiterAlgorithm")); RateLimiterAlgorithm<?> slidingWindowRateLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance("slidingWindow"); assertThat(slidingWindowRateLimiterAlgorithm.getClass().getName(), is("org.apache.shenyu.plugin.ratelimiter.algorithm.SlidingWindowRateLimiterAlgorithm")); }
public static int sampled(boolean sampled, int flags) { if (sampled) { flags |= FLAG_SAMPLED | FLAG_SAMPLED_SET; } else { flags |= FLAG_SAMPLED_SET; flags &= ~FLAG_SAMPLED; } return flags; }
@Test void set_sampled_true() { assertThat(sampled(true, 0)) .isEqualTo(FLAG_SAMPLED_SET + FLAG_SAMPLED); }
void checkForUpdate() throws IOException { final String anonymousData = getAnonymousData(); final HttpURLConnection connection = (HttpURLConnection) new URL(serverUrl) .openConnection(); connection.setUseCaches(false); connection.setDoOutput(true); connection.setRequestMethod("POST"); connection.setConnectTimeout(60000); connection.setReadTimeout(60000); connection.setRequestProperty("data", anonymousData); connection.connect(); final Properties properties = new Properties(); try (InputStream input = connection.getInputStream()) { properties.load(input); } final String javamelodyVersion = properties.getProperty("version"); if (javamelodyVersion != null && Parameters.JAVAMELODY_VERSION != null && javamelodyVersion.compareTo(Parameters.JAVAMELODY_VERSION) > 0) { setNewJavamelodyVersion(javamelodyVersion); } }
@Test public void testCheckForUpdate() throws IOException { final ServletContext context = createNiceMock(ServletContext.class); expect(context.getMajorVersion()).andReturn(5).anyTimes(); expect(context.getMinorVersion()).andReturn(0).anyTimes(); expect(context.getContextPath()).andReturn("/test").anyTimes(); replay(context); Parameters.initialize(context); verify(context); final Collector collector = new Collector("test", List.of(new Counter("http", null), new Counter("sql", null))); JRobin.initBackendFactory(new Timer(getClass().getSimpleName(), true)); assertNotNull("SessionListener", new SessionListener()); TestDatabaseInformations.initJdbcDriverParameters(); collector.collectWithoutErrors(List.of(new JavaInformations(null, true))); final String serverUrl = "http://dummy"; final UpdateChecker updateCheckerCollectorServer = UpdateChecker.createForTest(null, UpdateChecker.COLLECTOR_SERVER_APPLICATION_TYPE, serverUrl); try { updateCheckerCollectorServer.checkForUpdate(); } catch (final UnknownHostException e) { // UnknownHostException is ok for url http://dummy assertNotNull("updateCheckerCollectorServer", updateCheckerCollectorServer); } Utils.setProperty(Parameter.NO_DATABASE, "true"); Utils.setProperty(Parameter.LOG, "true"); if (CacheManager.getInstance().getCache("test") == null) { CacheManager.getInstance().addCache("test"); } final UpdateChecker updateChecker = UpdateChecker.createForTest(collector, "Classic", serverUrl); try { updateChecker.checkForUpdate(); } catch (final UnknownHostException e) { // UnknownHostException is ok for url http://dummy assertNotNull("updateChecker", updateChecker); } CacheManager.getInstance().removeCache("test"); }
@Override public Resource getMaximumResourceCapability(String queueName) { if(queueName == null || queueName.isEmpty()) { return getMaximumResourceCapability(); } FSQueue queue = queueMgr.getQueue(queueName); Resource schedulerLevelMaxResourceCapability = getMaximumResourceCapability(); if (queue == null) { return schedulerLevelMaxResourceCapability; } Resource queueMaxResourceCapability = queue.getMaximumContainerAllocation(); if (queueMaxResourceCapability.equals(Resources.unbounded())) { return schedulerLevelMaxResourceCapability; } else { return Resources.componentwiseMin(schedulerLevelMaxResourceCapability, queueMaxResourceCapability); } }
@Test public void testQueueMaximumCapacityAllocations() throws IOException { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); int tooHighQueueAllocation = RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE +1; AllocationFileWriter.create() .addQueue(new AllocationFileQueue.Builder("queueA") .maxContainerAllocation("512 mb 1 vcores").build()) .addQueue(new AllocationFileQueue.Builder("queueB").build()) .addQueue(new AllocationFileQueue.Builder("queueC") .maxContainerAllocation("2048 mb 3 vcores") .subQueue(new AllocationFileQueue.Builder("queueD").build()) .build()) .addQueue(new AllocationFileQueue.Builder("queueE") .maxContainerAllocation(tooHighQueueAllocation + " mb 1 vcores") .build()) .writeToFile(ALLOC_FILE); scheduler.init(conf); Assert.assertEquals(1, scheduler.getMaximumResourceCapability("root.queueA") .getVirtualCores()); Assert.assertEquals(512, scheduler.getMaximumResourceCapability("root.queueA").getMemorySize()); Assert.assertEquals(DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, scheduler.getMaximumResourceCapability("root.queueB") .getVirtualCores()); Assert.assertEquals(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, scheduler.getMaximumResourceCapability("root.queueB").getMemorySize()); Assert.assertEquals(3, scheduler.getMaximumResourceCapability("root.queueC") .getVirtualCores()); Assert.assertEquals(2048, scheduler.getMaximumResourceCapability("root.queueC").getMemorySize()); Assert.assertEquals(3, scheduler .getMaximumResourceCapability("root.queueC.queueD").getVirtualCores()); Assert.assertEquals(2048, scheduler .getMaximumResourceCapability("root.queueC.queueD").getMemorySize()); Assert.assertEquals(RM_SCHEDULER_MAXIMUM_ALLOCATION_MB_VALUE, scheduler .getMaximumResourceCapability("root.queueE").getMemorySize()); }
@Override public void setFetchSize(final Statement statement) throws SQLException { int configuredFetchSize = ProxyContext.getInstance() .getContextManager().getMetaDataContexts().getMetaData().getProps().<Integer>getValue(ConfigurationPropertyKey.PROXY_BACKEND_QUERY_FETCH_SIZE); statement.setFetchSize(ConfigurationPropertyKey.PROXY_BACKEND_QUERY_FETCH_SIZE.getDefaultValue().equals(String.valueOf(configuredFetchSize)) ? 1 : configuredFetchSize); }
@Test void assertSetFetchSize() throws SQLException { Statement statement = mock(Statement.class); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); new PostgreSQLStatementMemoryStrictlyFetchSizeSetter().setFetchSize(statement); verify(statement).setFetchSize(1); }
public void setCapacity(int capacity) { final int oldCapacity = this.capacity; this.capacity = capacity; final int size = count.get(); if (capacity > size && size >= oldCapacity) { signalNotFull(); } }
@Test public void testIncreaseResizableCapacityLinkedBlockingQueue() throws InterruptedException { MyRejectedExecutionHandler myRejectedExecutionHandler = new MyRejectedExecutionHandler(); ResizableCapacityLinkedBlockingQueue<Runnable> queue = new ResizableCapacityLinkedBlockingQueue(); ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(1, 1, 60, TimeUnit.SECONDS, queue, myRejectedExecutionHandler); Assert.assertSame(queue, threadPoolExecutor.getQueue()); threadPoolExecutor.prestartAllCoreThreads(); queue.setCapacity(6); IntStream.range(0, 4).forEach(s -> { threadPoolExecutor.execute(() -> ThreadUtil.sleep(0L)); }); threadPoolExecutor.shutdown(); while (!threadPoolExecutor.isTerminated()) { } Assert.assertEquals(4, threadPoolExecutor.getCompletedTaskCount()); Assert.assertEquals(0, myRejectedExecutionHandler.getCount()); }
public Record convert(final AbstractWALEvent event) { if (filter(event)) { return createPlaceholderRecord(event); } if (!(event instanceof AbstractRowEvent)) { return createPlaceholderRecord(event); } PipelineTableMetaData tableMetaData = getPipelineTableMetaData(((AbstractRowEvent) event).getTableName()); if (event instanceof WriteRowEvent) { return handleWriteRowEvent((WriteRowEvent) event, tableMetaData); } if (event instanceof UpdateRowEvent) { return handleUpdateRowEvent((UpdateRowEvent) event, tableMetaData); } if (event instanceof DeleteRowEvent) { return handleDeleteRowEvent((DeleteRowEvent) event, tableMetaData); } throw new UnsupportedSQLOperationException(""); }
@Test void assertConvertCommitTXEvent() { CommitTXEvent commitTXEvent = new CommitTXEvent(1L, 3468L); commitTXEvent.setLogSequenceNumber(new PostgreSQLLogSequenceNumber(logSequenceNumber)); Record record = walEventConverter.convert(commitTXEvent); assertInstanceOf(PlaceholderRecord.class, record); assertThat(((WALPosition) record.getPosition()).getLogSequenceNumber().asString(), is(logSequenceNumber.asString())); }
@Override public boolean accept(final Path file, final Local local, final TransferStatus parent) throws BackgroundException { if(parent.isExists()) { if(local.isFile()) { if(find.find(file)) { if(log.isInfoEnabled()) { log.info(String.format("Skip file %s", file)); } return false; } } } return super.accept(file, local, parent); }
@Test public void testAcceptDirectory() throws Exception { SkipFilter f = new SkipFilter(new DisabledUploadSymlinkResolver(), new NullSession(new Host(new TestProtocol())) { @Override public AttributedList<Path> list(final Path file, final ListProgressListener listener) { return AttributedList.emptyList(); } }); f.withAttributes(new AttributesFinder() { @Override public PathAttributes find(final Path file, final ListProgressListener listener) { return file.attributes(); } }); assertTrue(f.accept(new Path("a", EnumSet.of(Path.Type.directory)), new NullLocal("a") { @Override public boolean exists() { return true; } }, new TransferStatus().exists(true))); }
public static Snowflake getSnowflake(long workerId, long datacenterId) { return Singleton.get(Snowflake.class, workerId, datacenterId); }
@Test @Disabled public void snowflakeBenchTest2() { final Set<Long> set = new ConcurrentHashSet<>(); //线程数 int threadCount = 100; //每个线程生成的ID数 final int idCountPerThread = 10000; final CountDownLatch latch = new CountDownLatch(threadCount); for(int i =0; i < threadCount; i++) { ThreadUtil.execute(() -> { for(int i1 = 0; i1 < idCountPerThread; i1++) { long id = IdUtil.getSnowflake(1, 1).nextId(); set.add(id); // Console.log("Add new id: {}", id); } latch.countDown(); }); } //等待全部线程结束 try { latch.await(); } catch (InterruptedException e) { throw new UtilException(e); } assertEquals(threadCount * idCountPerThread, set.size()); }
@Override public void monitor(RedisServer master) { connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(), master.getPort().intValue(), master.getQuorum().intValue()); }
@Test public void testMonitor() { Collection<RedisServer> masters = connection.masters(); RedisServer master = masters.iterator().next(); master.setName(master.getName() + ":"); connection.monitor(master); }
@Override public V fetch(final K key, final long time) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType); for (final ReadOnlyWindowStore<K, V> windowStore : stores) { try { final V result = windowStore.fetch(key, time); if (result != null) { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException( "State store is not available anymore and may have been migrated to another instance; " + "please re-discover its location from the state metadata."); } } return null; }
@Test public void shouldFindValueForKeyWhenMultiStores() { final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new ReadOnlyWindowStoreStub<>(WINDOW_SIZE); stubProviderTwo.addStore(storeName, secondUnderlying); underlyingWindowStore.put("key-one", "value-one", 0L); secondUnderlying.put("key-two", "value-two", 10L); final List<KeyValue<Long, String>> keyOneResults = StreamsTestUtils.toList(windowStore.fetch("key-one", ofEpochMilli(0L), ofEpochMilli(1L))); final List<KeyValue<Long, String>> keyTwoResults = StreamsTestUtils.toList(windowStore.fetch("key-two", ofEpochMilli(10L), ofEpochMilli(11L))); assertEquals(Collections.singletonList(KeyValue.pair(0L, "value-one")), keyOneResults); assertEquals(Collections.singletonList(KeyValue.pair(10L, "value-two")), keyTwoResults); }
@Override public boolean cleanupRecoverableState(ResumeRecoverable resumable) { // we can't safely clean up any state prior to commit, so do nothing here // see discussion: https://github.com/apache/flink/pull/15599#discussion_r623127365 return true; }
@Test public void testCleanupRecoverableState() { assertTrue(writer.cleanupRecoverableState(resumeRecoverable)); }
public static ChangeStreamContinuationToken getTokenWithCorrectPartition( ByteStringRange parentPartition, ChangeStreamContinuationToken token) throws IllegalArgumentException { return ChangeStreamContinuationToken.create( getIntersectingPartition(parentPartition, token.getPartition()), token.getToken()); }
@Test public void testGetTokenWithCorrectPartition() { ChangeStreamContinuationToken token1 = ChangeStreamContinuationToken.create(ByteStringRange.create("A", "D"), "token1"); ChangeStreamContinuationToken token2 = ChangeStreamContinuationToken.create(ByteStringRange.create("B", "E"), "token2"); ByteStringRange parentPartition1 = ByteStringRange.create("A", "F"); ByteStringRange parentPartition2 = ByteStringRange.create("C", "F"); ByteStringRange parentPartition3 = ByteStringRange.create("C", "D"); ByteStringRange parentPartition4 = ByteStringRange.create("A", "B"); assertEquals( ByteStringRange.create("A", "D"), getTokenWithCorrectPartition(parentPartition1, token1).getPartition()); assertEquals( ByteStringRange.create("B", "E"), getTokenWithCorrectPartition(parentPartition1, token2).getPartition()); assertEquals( ByteStringRange.create("C", "D"), getTokenWithCorrectPartition(parentPartition2, token1).getPartition()); assertEquals( ByteStringRange.create("C", "E"), getTokenWithCorrectPartition(parentPartition2, token2).getPartition()); assertEquals( ByteStringRange.create("C", "D"), getTokenWithCorrectPartition(parentPartition3, token1).getPartition()); assertEquals( ByteStringRange.create("C", "D"), getTokenWithCorrectPartition(parentPartition3, token2).getPartition()); assertEquals( ByteStringRange.create("A", "B"), getTokenWithCorrectPartition(parentPartition4, token1).getPartition()); assertThrows( IllegalArgumentException.class, () -> getTokenWithCorrectPartition(parentPartition4, token2)); }
public static void main(String[] args) { var dispatcher = new EventDispatcher(); dispatcher.registerHandler(UserCreatedEvent.class, new UserCreatedEventHandler()); dispatcher.registerHandler(UserUpdatedEvent.class, new UserUpdatedEventHandler()); var user = new User("iluwatar"); dispatcher.dispatch(new UserCreatedEvent(user)); dispatcher.dispatch(new UserUpdatedEvent(user)); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@Override public void updateInstance(String namespaceId, String serviceName, Instance instance) throws NacosException { NamingUtils.checkInstanceIsLegal(instance); Service service = getService(namespaceId, serviceName, instance.isEphemeral()); if (!ServiceManager.getInstance().containSingleton(service)) { throw new NacosApiException(NacosException.INVALID_PARAM, ErrorCode.INSTANCE_ERROR, "service not found, namespace: " + namespaceId + ", service: " + service); } String metadataId = InstancePublishInfo.genMetadataId(instance.getIp(), instance.getPort(), instance.getClusterName()); metadataOperateService.updateInstanceMetadata(service, metadataId, buildMetadata(instance)); }
@Test void testUpdateInstance() throws NacosException { Instance instance = new Instance(); instance.setServiceName("C"); instanceOperatorClient.updateInstance("A", "C", instance); Mockito.verify(metadataOperateService).updateInstanceMetadata(Mockito.any(), Mockito.any(), Mockito.any()); }
@SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler, boolean userPercentage, FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); if (conf.getBoolean( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); int interval = conf.getInt( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS); yarnSiteConfig.setInt(PREFIX + "schedule-asynchronously.scheduling-interval-ms", interval); } // This should be always true to trigger cs auto // refresh queue. yarnSiteConfig.setBoolean( YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION, FairSchedulerConfiguration.DEFAULT_PREEMPTION)) { preemptionEnabled = true; String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); int waitTimeBeforeKill = conf.getInt( FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL); yarnSiteConfig.setInt( CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, waitTimeBeforeKill); long waitBeforeNextStarvationCheck = conf.getLong( FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS); yarnSiteConfig.setLong( CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, waitBeforeNextStarvationCheck); } else { if (preemptionMode == FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ""); } } // For auto created queue's auto deletion. if (!userPercentage) { String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); // Set the expired for deletion interval to 10s, consistent with fs. yarnSiteConfig.setInt(CapacitySchedulerConfiguration. AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10); } if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true); } else { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false); } // Make auto cs conf refresh enabled. yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, addMonitorPolicy(QueueConfigurationAutoRefreshPolicy .class.getCanonicalName(), yarnSiteConfig)); int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN, FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN); if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) { yarnSiteConfig.setInt( CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT, maxAssign); } float localityThresholdNode = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE); if (localityThresholdNode != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) { yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY, localityThresholdNode); } float localityThresholdRack = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK); if (localityThresholdRack != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) { yarnSiteConfig.setFloat( CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY, localityThresholdRack); } if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) { sizeBasedWeight = true; } if (drfUsed) { yarnSiteConfig.set( CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getCanonicalName()); } if (enableAsyncScheduler) { yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); } }
@Test public void testAsyncSchedulingDisabledConversion() { converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false, false, null); assertFalse("Asynchronous scheduling", yarnConvertedConfig.getBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE)); }
public EdgeResult convertForViaWays(LongArrayList fromWays, LongArrayList viaWays, LongArrayList toWays) throws OSMRestrictionException { if (fromWays.isEmpty() || toWays.isEmpty() || viaWays.isEmpty()) throw new IllegalArgumentException("There must be at least one from-, via- and to-way"); if (fromWays.size() > 1 && toWays.size() > 1) throw new IllegalArgumentException("There can only be multiple from- or to-ways, but not both"); List<IntArrayList> solutions = new ArrayList<>(); for (LongCursor fromWay : fromWays) for (LongCursor toWay : toWays) findEdgeChain(fromWay.value, viaWays, toWay.value, solutions); if (solutions.size() < fromWays.size() * toWays.size()) throw new OSMRestrictionException("has disconnected member ways"); else if (solutions.size() > fromWays.size() * toWays.size()) throw new OSMRestrictionException("has member ways that do not form a unique path"); return buildResult(solutions, new EdgeResult(fromWays.size(), viaWays.size(), toWays.size())); }
@Test void convertForViaWays_multipleEdgesForViaWay() throws OSMRestrictionException { BaseGraph graph = new BaseGraph.Builder(1).create(); graph.edge(0, 1); graph.edge(1, 2); graph.edge(2, 3); graph.edge(3, 4); LongFunction<Iterator<IntCursor>> edgesByWay = way -> { if (way == 0) return IntArrayList.from(0).iterator(); // way 1 is split into the two edges 1 and 2 else if (way == 1) return IntArrayList.from(1, 2).iterator(); else if (way == 2) return IntArrayList.from(3).iterator(); else throw new IllegalArgumentException(); }; WayToEdgeConverter.EdgeResult edgeResult = new WayToEdgeConverter(graph, edgesByWay).convertForViaWays(ways(0), ways(1), ways(2)); assertEquals(IntArrayList.from(1, 2), edgeResult.getViaEdges()); assertEquals(IntArrayList.from(1, 2, 3), edgeResult.getNodes()); }
static Properties resolveProducerProperties(Map<String, String> options, Object keySchema, Object valueSchema) { Properties properties = from(options); withSerdeProducerProperties(true, options, keySchema, properties); withSerdeProducerProperties(false, options, valueSchema, properties); return properties; }
@Test @Parameters(method = "producerValues") public void test_producerProperties_java(String clazz, String serializer) { // key assertThat(resolveProducerProperties(Map.of( OPTION_KEY_FORMAT, JAVA_FORMAT, OPTION_KEY_CLASS, clazz ))).containsExactlyEntriesOf(Map.of(KEY_SERIALIZER, serializer)); // value assertThat(resolveProducerProperties(Map.of( OPTION_KEY_FORMAT, UNKNOWN_FORMAT, OPTION_VALUE_FORMAT, JAVA_FORMAT, OPTION_VALUE_CLASS, clazz) )).containsExactlyEntriesOf(Map.of(VALUE_SERIALIZER, serializer)); }
public SCRAMSHA1Mechanism() { super(SHA_1_SCRAM_HMAC); }
@Test public void testScramSha1Mechanism() throws NotConnectedException, SmackException, InterruptedException { final DummyConnection con = new DummyConnection(); SCRAMSHA1Mechanism mech = new SCRAMSHA1Mechanism() { @Override public String getRandomAscii() { this.connection = con; return "fyko+d2lbbFgONRv9qkxdawL"; } }; mech.authenticate(USERNAME, "unusedFoo", JidTestUtil.DOMAIN_BARE_JID_1, PASSWORD, null, null); AuthMechanism authMechanism = con.getSentPacket(); assertEquals(SCRAMSHA1Mechanism.NAME, authMechanism.getMechanism()); assertEquals(CLIENT_FIRST_MESSAGE, saslLayerString(authMechanism.getAuthenticationText())); mech.challengeReceived(Base64.encode(SERVER_FIRST_MESSAGE), false); Response response = con.getSentPacket(); assertEquals(CLIENT_FINAL_MESSAGE, saslLayerString(response.getAuthenticationText())); mech.challengeReceived(Base64.encode(SERVER_FINAL_MESSAGE), true); mech.checkIfSuccessfulOrThrow(); }
@Override public ExplodedPlugin explode(PluginInfo pluginInfo) { File tempDir = new File(fs.getTempDir(), TEMP_RELATIVE_PATH); File toDir = new File(tempDir, pluginInfo.getKey()); try { org.sonar.core.util.FileUtils.cleanDirectory(toDir); File jarSource = pluginInfo.getNonNullJarFile(); File jarTarget = new File(toDir, jarSource.getName()); FileUtils.copyFile(jarSource, jarTarget); ZipUtils.unzip(jarSource, toDir, newLibFilter()); return explodeFromUnzippedDir(pluginInfo, jarTarget, toDir); } catch (Exception e) { throw new IllegalStateException(String.format( "Fail to unzip plugin [%s] %s to %s", pluginInfo.getKey(), pluginInfo.getNonNullJarFile().getAbsolutePath(), toDir.getAbsolutePath()), e); } }
@Test public void plugins_do_not_overlap() { PluginInfo info1 = PluginInfo.create(plugin1Jar()); PluginInfo info2 = PluginInfo.create(plugin2Jar()); ExplodedPlugin exploded1 = underTest.explode(info1); ExplodedPlugin exploded2 = underTest.explode(info2); assertThat(exploded1.getKey()).isEqualTo("test"); assertThat(exploded1.getMain()).isFile().exists().hasName("sonar-test-plugin-0.1-SNAPSHOT.jar"); assertThat(exploded2.getKey()).isEqualTo("test2"); assertThat(exploded2.getMain()).isFile().exists().hasName("sonar-test2-plugin-0.1-SNAPSHOT.jar"); }
public static Set<Result> anaylze(String log) { Set<Result> results = new HashSet<>(); for (Rule rule : Rule.values()) { Matcher matcher = rule.pattern.matcher(log); if (matcher.find()) { results.add(new Result(rule, log, matcher)); } } return results; }
@Test public void incompleteForgeInstallation5() throws IOException { CrashReportAnalyzer.Result result = findResultByRule( CrashReportAnalyzer.anaylze(loadLog("/logs/incomplete_forge_installation5.txt")), CrashReportAnalyzer.Rule.INCOMPLETE_FORGE_INSTALLATION); }
public static void main(String[] args) { var app = new App(); app.run(); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[] {})); }
@Override public void validateNameUniqueness(Map<CaseInsensitiveString, AbstractMaterialConfig> map) { if (StringUtils.isBlank(scmId)) { return; } if (map.containsKey(new CaseInsensitiveString(scmId))) { AbstractMaterialConfig material = map.get(new CaseInsensitiveString(scmId)); material.addError(SCM_ID, "Duplicate SCM material detected!"); addError(SCM_ID, "Duplicate SCM material detected!"); } else { map.put(new CaseInsensitiveString(scmId), this); } }
@Test public void shouldPassMaterialUniquenessIfIfNoDuplicateSCMFound() { Map<CaseInsensitiveString, AbstractMaterialConfig> nameToMaterialMap = new HashMap<>(); nameToMaterialMap.put(new CaseInsensitiveString("scm-id-new"), new PluggableSCMMaterialConfig("scm-id-new")); nameToMaterialMap.put(new CaseInsensitiveString("foo"), git("url")); pluggableSCMMaterialConfig.validateNameUniqueness(nameToMaterialMap); assertThat(pluggableSCMMaterialConfig.errors().getAll().size(), is(0)); assertThat(nameToMaterialMap.size(), is(3)); }
@Override public AllocatedSlotReport createAllocatedSlotReport(ResourceID taskManagerId) { assertHasBeenStarted(); final Collection<AllocatedSlotInfo> allocatedSlotInfos = new ArrayList<>(); for (SlotInfo slotInfo : declarativeSlotPool.getAllSlotsInformation()) { if (slotInfo.getTaskManagerLocation().getResourceID().equals(taskManagerId)) { allocatedSlotInfos.add( new AllocatedSlotInfo( slotInfo.getPhysicalSlotNumber(), slotInfo.getAllocationId())); } } return new AllocatedSlotReport(jobId, allocatedSlotInfos); }
@Test void testCreateAllocatedSlotReport() throws Exception { final LocalTaskManagerLocation taskManagerLocation1 = new LocalTaskManagerLocation(); final LocalTaskManagerLocation taskManagerLocation2 = new LocalTaskManagerLocation(); final TestingPhysicalSlot testingPhysicalSlot2 = createTestingPhysicalSlot(taskManagerLocation2); final Collection<SlotInfo> slotInfos = Arrays.asList( createTestingPhysicalSlot(taskManagerLocation1), testingPhysicalSlot2); try (DeclarativeSlotPoolService declarativeSlotPoolService = createDeclarativeSlotPoolService( new TestingDeclarativeSlotPoolFactory( new TestingDeclarativeSlotPoolBuilder() .setGetAllSlotsInformationSupplier(() -> slotInfos)))) { final AllocatedSlotReport allocatedSlotReport = declarativeSlotPoolService.createAllocatedSlotReport( taskManagerLocation2.getResourceID()); assertThat(allocatedSlotReport.getAllocatedSlotInfos()) .allMatch( context -> context.getAllocationId() .equals(testingPhysicalSlot2.getAllocationId()) && context.getSlotIndex() == testingPhysicalSlot2 .getPhysicalSlotNumber()); } }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { if(new B2ThresholdUploadService(session, fileid, threshold).threshold(status)) { return new B2LargeCopyFeature(session, fileid).copy(source, target, status, callback, listener); } else { return new B2CopyFeature(session, fileid).copy(source, target, status, callback, listener); } }
@Test public void testCopyFileSizeBelowPartSize() throws Exception { final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final Path container = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final String name = new AlphanumericRandomStringService().random(); final byte[] content = RandomUtils.nextBytes(4 * 1000 * 1000); final Path test = new Path(container, name, EnumSet.of(Path.Type.file)); final OutputStream out = new B2WriteFeature(session, fileid).write(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); new StreamCopier(new TransferStatus(), new TransferStatus().withLength(content.length)).transfer(new ByteArrayInputStream(content), out); out.close(); assertTrue(new B2FindFeature(session, fileid).find(test)); final B2ThresholdCopyFeature feature = new B2ThresholdCopyFeature(session, fileid, 5 * 1000L * 1000L); final Path copy = feature.copy(test, new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withLength(content.length), new DisabledConnectionCallback(), new DisabledStreamListener()); assertNotEquals(test.attributes().getVersionId(), copy.attributes().getVersionId()); assertTrue(new B2FindFeature(session, fileid).find(new Path(container, name, EnumSet.of(Path.Type.file)))); assertTrue(new B2FindFeature(session, fileid).find(copy)); final byte[] compare = new byte[content.length]; final InputStream stream = new B2ReadFeature(session, fileid).read(copy, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(content, compare); new B2DeleteFeature(session, fileid).delete(Arrays.asList(test, copy), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public Path relativePathFromScmRoot(Path path) { RepositoryBuilder builder = getVerifiedRepositoryBuilder(path); return builder.getGitDir().toPath().getParent().relativize(path); }
@Test public void relativePathFromScmRoot_should_return_dot_project_root() { assertThat(newGitScmProvider().relativePathFromScmRoot(worktree)).isEqualTo(Paths.get("")); }
public static TimeLimiterRegistry of(Configuration configuration, CompositeCustomizer<TimeLimiterConfigCustomizer> customizer){ CommonTimeLimiterConfigurationProperties timeLimiterProperties = CommonsConfigurationTimeLimiterConfiguration.of(configuration); Map<String, TimeLimiterConfig> timeLimiterConfigMap = timeLimiterProperties.getInstances() .entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, entry -> timeLimiterProperties.createTimeLimiterConfig(entry.getKey(), entry.getValue(), customizer))); return TimeLimiterRegistry.of(timeLimiterConfigMap); }
@Test public void testRateLimiterRegistryFromYamlFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(YAMLConfiguration.class, TestConstants.RESILIENCE_CONFIG_YAML_FILE_NAME); TimeLimiterRegistry registry = CommonsConfigurationTimeLimiterRegistry.of(config, new CompositeCustomizer<>(List.of())); Assertions.assertThat(registry.timeLimiter(TestConstants.BACKEND_A).getName()).isEqualTo(TestConstants.BACKEND_A); Assertions.assertThat(registry.timeLimiter(TestConstants.BACKEND_B).getName()).isEqualTo(TestConstants.BACKEND_B); }
@ScalarFunction @SqlNullable @Description("Returns an array of spatial partition IDs for a given geometry") @SqlType("array(int)") public static Block spatialPartitions(@SqlType(KdbTreeType.NAME) Object kdbTree, @SqlType(GEOMETRY_TYPE_NAME) Slice geometry) { Envelope envelope = deserializeEnvelope(geometry); if (envelope.isEmpty()) { // Empty geometry return null; } return spatialPartitions((KdbTree) kdbTree, new Rectangle(envelope.getXMin(), envelope.getYMin(), envelope.getXMax(), envelope.getYMax())); }
@Test public void testSpatialPartitions() { String kdbTreeJson = makeKdbTreeJson(); assertSpatialPartitions(kdbTreeJson, "POINT EMPTY", null); // points inside partitions assertSpatialPartitions(kdbTreeJson, "POINT (0 0)", ImmutableList.of(0)); assertSpatialPartitions(kdbTreeJson, "POINT (3 1)", ImmutableList.of(1)); // point on the border between two partitions assertSpatialPartitions(kdbTreeJson, "POINT (1 2.5)", ImmutableList.of(2)); // point at the corner of three partitions assertSpatialPartitions(kdbTreeJson, "POINT (4.5 2.5)", ImmutableList.of(5)); // points outside assertSpatialPartitions(kdbTreeJson, "POINT (-10 -10)", ImmutableList.of(0)); assertSpatialPartitions(kdbTreeJson, "POINT (-10 10)", ImmutableList.of(2)); assertSpatialPartitions(kdbTreeJson, "POINT (10 -10)", ImmutableList.of(4)); assertSpatialPartitions(kdbTreeJson, "POINT (10 10)", ImmutableList.of(5)); // geometry within a partition assertSpatialPartitions(kdbTreeJson, "MULTIPOINT (5 0.1, 6 2)", ImmutableList.of(3)); // geometries spanning multiple partitions assertSpatialPartitions(kdbTreeJson, "MULTIPOINT (5 0.1, 5.5 3, 6 2)", ImmutableList.of(5, 3)); assertSpatialPartitions(kdbTreeJson, "MULTIPOINT (3 2, 8 3)", ImmutableList.of(5, 4, 3, 2, 1)); // geometry outside assertSpatialPartitions(kdbTreeJson, "MULTIPOINT (2 6, 5 7)", ImmutableList.of(5, 2)); // with distance assertSpatialPartitions(kdbTreeJson, "POINT EMPTY", 1.2, null); assertSpatialPartitions(kdbTreeJson, "POINT (1 1)", 1.2, ImmutableList.of(0)); assertSpatialPartitions(kdbTreeJson, "POINT (1 1)", 2.3, ImmutableList.of(2, 1, 0)); assertSpatialPartitions(kdbTreeJson, "MULTIPOINT (5 0.1, 6 2)", 0.2, ImmutableList.of(3)); assertSpatialPartitions(kdbTreeJson, "MULTIPOINT (5 0.1, 6 2)", 1.2, ImmutableList.of(5, 3, 2, 1)); assertSpatialPartitions(kdbTreeJson, "MULTIPOINT (2 6, 3 7)", 1.2, ImmutableList.of(2)); }
public OAuth2Authorization extractAuthorization(String redirectLocation) { final OAuth2Authorization authorization = new OAuth2Authorization(); int end = redirectLocation.indexOf('#'); if (end == -1) { end = redirectLocation.length(); } for (String param : redirectLocation.substring(redirectLocation.indexOf('?') + 1, end).split("&")) { final String[] keyValue = param.split("="); if (keyValue.length == 2) { try { switch (keyValue[0]) { case "code": authorization.setCode(URLDecoder.decode(keyValue[1], "UTF-8")); break; case "state": authorization.setState(URLDecoder.decode(keyValue[1], "UTF-8")); break; default: //just ignore any other param; } } catch (UnsupportedEncodingException ueE) { throw new IllegalStateException("jvm without UTF-8, really?", ueE); } } } return authorization; }
@Test public void testOAuthExtractAuthorization() { final OAuth20Service service = new ServiceBuilder("your_api_key") .apiSecret("your_api_secret") .build(new OAuth20ApiUnit()); OAuth2Authorization authorization = service.extractAuthorization("https://cl.ex.com/cb?code=SplxlOB&state=xyz"); assertEquals("SplxlOB", authorization.getCode()); assertEquals("xyz", authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?state=xyz&code=SplxlOB"); assertEquals("SplxlOB", authorization.getCode()); assertEquals("xyz", authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?key=value&state=xyz&code=SplxlOB"); assertEquals("SplxlOB", authorization.getCode()); assertEquals("xyz", authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?state=xyz&code=SplxlOB&key=value&"); assertEquals("SplxlOB", authorization.getCode()); assertEquals("xyz", authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?code=SplxlOB&state="); assertEquals("SplxlOB", authorization.getCode()); assertEquals(null, authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?code=SplxlOB"); assertEquals("SplxlOB", authorization.getCode()); assertEquals(null, authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?code="); assertEquals(null, authorization.getCode()); assertEquals(null, authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?code"); assertEquals(null, authorization.getCode()); assertEquals(null, authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb?"); assertEquals(null, authorization.getCode()); assertEquals(null, authorization.getState()); authorization = service.extractAuthorization("https://cl.ex.com/cb"); assertEquals(null, authorization.getCode()); assertEquals(null, authorization.getState()); }
public static File getCsvLogDir(Map<String, Object> daemonConf) { String csvMetricsLogDirectory = ObjectReader.getString(daemonConf.get(DaemonConfig.STORM_DAEMON_METRICS_REPORTER_CSV_LOG_DIR), null); if (csvMetricsLogDirectory == null) { csvMetricsLogDirectory = ConfigUtils.absoluteStormLocalDir(daemonConf); csvMetricsLogDirectory = csvMetricsLogDirectory + ConfigUtils.FILE_SEPARATOR + "csvmetrics"; } File csvMetricsDir = new File(csvMetricsLogDirectory); validateCreateOutputDir(csvMetricsDir); return csvMetricsDir; }
@Test public void getCsvLogDir() { Map<String, Object> daemonConf = new HashMap<>(); String currentPath = new File("").getAbsolutePath(); daemonConf.put(Config.STORM_LOCAL_DIR, currentPath); File dir = new File(currentPath, "csvmetrics"); assertEquals(dir, MetricsUtils.getCsvLogDir(daemonConf)); daemonConf.put(DaemonConfig.STORM_DAEMON_METRICS_REPORTER_CSV_LOG_DIR, "./"); assertEquals(new File("./"), MetricsUtils.getCsvLogDir(daemonConf)); }
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException { lock.lock(); try { checkArgument(!req.completed, () -> "given SendRequest has already been completed"); log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(), req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString()); // Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us // with the actual outputs that'll be used to gather the required amount of value. In this way, users // can customize coin selection policies. The call below will ignore immature coinbases and outputs // we don't have the keys for. List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW); // Connect (add a value amount) unconnected inputs List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs()); req.tx.clearInputs(); inputs.forEach(req.tx::addInput); // Warn if there are remaining unconnected inputs whose value we do not know // TODO: Consider throwing if there are inputs that we don't have a value for if (req.tx.getInputs().stream() .map(TransactionInput::getValue) .anyMatch(Objects::isNull)) log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee."); // If any inputs have already been added, we don't need to get their value from wallet Coin totalInput = req.tx.getInputSum(); // Calculate the amount of value we need to import. Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput); // Enforce the OP_RETURN limit if (req.tx.getOutputs().stream() .filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey())) .count() > 1) // Only 1 OP_RETURN per transaction allowed. throw new MultipleOpReturnRequested(); // Check for dusty sends if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet. if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust)) throw new DustySendRequested(); } // Filter out candidates that are already included in the transaction inputs List<TransactionOutput> candidates = prelimCandidates.stream() .filter(output -> alreadyIncluded(req.tx.getInputs(), output)) .collect(StreamUtils.toUnmodifiableList()); CoinSelection bestCoinSelection; TransactionOutput bestChangeOutput = null; List<Coin> updatedOutputValues = null; if (!req.emptyWallet) { // This can throw InsufficientMoneyException. FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates); bestCoinSelection = feeCalculation.bestCoinSelection; bestChangeOutput = feeCalculation.bestChangeOutput; updatedOutputValues = feeCalculation.updatedOutputValues; } else { // We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output // of the total value we can currently spend as determined by the selector, and then subtracting the fee. checkState(req.tx.getOutputs().size() == 1, () -> "empty wallet TX must have a single output only"); CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector; bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates); candidates = null; // Selector took ownership and might have changed candidates. Don't access again. req.tx.getOutput(0).setValue(bestCoinSelection.totalValue()); log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString()); } bestCoinSelection.outputs() .forEach(req.tx::addInput); if (req.emptyWallet) { if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee)) throw new CouldNotAdjustDownwards(); } if (updatedOutputValues != null) { for (int i = 0; i < updatedOutputValues.size(); i++) { req.tx.getOutput(i).setValue(updatedOutputValues.get(i)); } } if (bestChangeOutput != null) { req.tx.addOutput(bestChangeOutput); log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString()); } // Now shuffle the outputs to obfuscate which is the change. if (req.shuffleOutputs) req.tx.shuffleOutputs(); // Now sign the inputs, thus proving that we are entitled to redeem the connected outputs. if (req.signInputs) signTransaction(req); // Check size. final int size = req.tx.messageSize(); if (size > Transaction.MAX_STANDARD_TX_SIZE) throw new ExceededMaxTransactionSize(); // Label the transaction as being self created. We can use this later to spend its change output even before // the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much // point - the user isn't interested in a confidence transition they made themselves. getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF); // Label the transaction as being a user requested payment. This can be used to render GUI wallet // transaction lists more appropriately, especially when the wallet starts to generate transactions itself // for internal purposes. req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT); // Record the exchange rate that was valid when the transaction was completed. req.tx.setExchangeRate(req.exchangeRate); req.tx.setMemo(req.memo); req.completed = true; log.info(" completed: {}", req.tx); } finally { lock.unlock(); } }
@Test public void opReturnOneOutputTest() throws Exception { // Tests basic send of transaction with one output that doesn't transfer any value but just writes OP_RETURN. receiveATransaction(wallet, myAddress); Transaction tx = new Transaction(); Coin messagePrice = Coin.ZERO; Script script = ScriptBuilder.createOpReturnScript("hello world!".getBytes()); tx.addOutput(messagePrice, script); SendRequest request = SendRequest.forTx(tx); request.ensureMinRequiredFee = true; wallet.completeTx(request); }
@Experimental public static Configuration forReporter(Configuration configuration, String reporterName) { return new DelegatingConfiguration( configuration, ConfigConstants.METRICS_REPORTER_PREFIX + reporterName + "."); }
@Test void testForReporterWrite() { Configuration configuration = new Configuration(); MetricOptions.forReporter(configuration, "my_reporter").set(SUB_OPTION, "value"); assertThat(configuration.get(FULL_OPTION)).isEqualTo("value"); }
@CanIgnoreReturnValue public PrefItem addValue(String key, String value) { mValues.put(validKey(key), value); return this; }
@Test(expected = java.lang.IllegalArgumentException.class) public void testFailsIfKeyHasNonAsciiLetterCharacters() { mPrefItem.addValue("$", "value"); }
@Override public void rotate(IndexSet indexSet) { indexRotator.rotate(indexSet, this::shouldRotate); }
@Test public void testDontRotate() { when(indices.numberOfMessages("name")).thenReturn(1L); when(indexSet.getNewestIndex()).thenReturn("name"); when(indexSet.getConfig()).thenReturn(indexSetConfig); when(indexSetConfig.rotationStrategyConfig()).thenReturn(MessageCountRotationStrategyConfig.create(5)); final MessageCountRotationStrategy strategy = createStrategy(); strategy.rotate(indexSet); verify(indexSet, never()).cycle(); reset(indexSet); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotTimestampWithEmptyTable() { ScanContext scanContextWithInvalidSnapshotId = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_TIMESTAMP) .startSnapshotTimestamp(1L) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl( TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null); assertThatThrownBy(() -> splitPlanner.planSplits(null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot find a snapshot after: 1"); }
@Override public Type classify(final Throwable e) { Type type = Type.UNKNOWN; if (e instanceof KsqlSerializationException || (e instanceof StreamsException && (ExceptionUtils.indexOfThrowable(e, KsqlSerializationException.class) != -1))) { if (!hasInternalTopicPrefix(e)) { type = Type.USER; LOG.info( "Classified error as USER error based on schema mismatch. Query ID: {} Exception: {}", queryId, e); } } return type; }
@Test public void shouldClassifyKsqlSerializationExceptionWithUserTopicAsUserError() { // Given: final String topic = "foo.bar"; final Exception e = new KsqlSerializationException( topic, "Error serializing message to topic: " + topic); // When: final Type type = new KsqlSerializationClassifier("").classify(e); // Then: assertThat(type, is(Type.USER)); }
@Override public void write(final OutputStream out) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity try { out.write("[".getBytes(StandardCharsets.UTF_8)); write(out, buildHeader()); final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); while (!connectionClosed && queryMetadata.isRunning() && !limitReached && !complete) { final KeyValueMetadata<List<?>, GenericRow> row = rowQueue.poll( disconnectCheckInterval, TimeUnit.MILLISECONDS ); if (row != null) { write(out, buildRow(row)); } else { // If no new rows have been written, the user may have terminated the connection without // us knowing. Check by trying to write a single newline. out.write("\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } drainAndThrowOnError(out); } if (connectionClosed) { return; } drain(out); if (limitReached) { objectMapper.writeValue(out, StreamedRow.finalMessage("Limit Reached")); } else if (complete) { objectMapper.writeValue(out, StreamedRow.finalMessage("Query Completed")); } out.write("]\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } catch (final EOFException exception) { // The user has terminated the connection; we can stop writing log.warn("Query terminated due to exception:" + exception.toString()); } catch (final InterruptedException exception) { // The most likely cause of this is the server shutting down. Should just try to close // gracefully, without writing any more to the connection stream. log.warn("Interrupted while writing to connection stream"); } catch (final Exception exception) { log.error("Exception occurred while writing to connection stream: ", exception); outputException(out, exception); } finally { close(); } }
@Test public void shouldExitAndDrainIfLimitReached() { // Given: doAnswer(streamRows("Row1", "Row2", "Row3")) .when(rowQueue).drainTo(any()); createWriter(); limitHandler.limitReached(); // When: writer.write(out); // Then: final List<String> lines = getOutput(out); assertThat(lines, hasItems( containsString("Row1"), containsString("Row2"), containsString("Row3"))); }
@Override protected Result check() throws IOException { final boolean isHealthy = tcpCheck(host, port); if (isHealthy) { LOGGER.debug("Health check against url={}:{} successful", host, port); return Result.healthy(); } LOGGER.debug("Health check against url={}:{} failed", host, port); return Result.unhealthy("TCP health check against host=%s port=%s failed", host, port); }
@Test void tcpHealthCheckShouldReturnHealthyIfCanConnect() throws IOException { final ExecutorService executorService = Executors.newSingleThreadExecutor(); executorService.submit(() -> serverSocket.accept()); assertThat(tcpHealthCheck.check().isHealthy()) .isTrue(); }
public static TypeDescriptor<String> strings() { return new TypeDescriptor<String>() {}; }
@Test public void testTypeDescriptorsTypeParameterOfErased() throws Exception { Generic<Integer, String> instance = TypeDescriptorsTest.typeErasedGeneric(); TypeDescriptor<Integer> fooT = extractFooT(instance); assertNotNull(fooT); // Using toString() assertions because verifying the contents of a Type is very cumbersome, // and the expected types can not be easily constructed directly. assertEquals("ActualFooT", fooT.toString()); assertEquals(strings(), extractBarT(instance)); TypeDescriptor<KV<Integer, String>> kvT = extractKV(instance); assertNotNull(kvT); assertThat(kvT.toString(), CoreMatchers.containsString("KV<ActualFooT, java.lang.String>")); }
@Override public int getOrder() { return SQLParserOrder.ORDER; }
@Test void assertGetOrder() { assertThat(new SQLParserRuleBuilder().getOrder(), is(SQLParserOrder.ORDER)); }
public void validateConvertedConfig(String outputDir) throws Exception { QueueMetrics.clearQueueMetrics(); Path configPath = new Path(outputDir, "capacity-scheduler.xml"); CapacitySchedulerConfiguration csConfig = new CapacitySchedulerConfiguration( new Configuration(false), false); csConfig.addResource(configPath); Path convertedSiteConfigPath = new Path(outputDir, "yarn-site.xml"); Configuration siteConf = new YarnConfiguration( new Configuration(false)); siteConf.addResource(convertedSiteConfigPath); RMContextImpl rmContext = new RMContextImpl(); siteConf.set(YarnConfiguration.FS_BASED_RM_CONF_STORE, outputDir); ConfigurationProvider provider = new FileSystemBasedConfigurationProvider(); provider.init(siteConf); rmContext.setConfigurationProvider(provider); RMNodeLabelsManager mgr = new RMNodeLabelsManager(); mgr.init(siteConf); rmContext.setNodeLabelManager(mgr); try (CapacityScheduler cs = new CapacityScheduler()) { cs.setConf(siteConf); cs.setRMContext(rmContext); cs.serviceInit(csConfig); cs.serviceStart(); LOG.info("Capacity scheduler was successfully started"); cs.serviceStop(); } catch (Exception e) { LOG.error("Could not start Capacity Scheduler", e); throw new VerificationException( "Verification of converted configuration failed", e); } }
@Test(expected = VerificationException.class) public void testValidationFails() throws Exception { validator.validateConvertedConfig(CONFIG_DIR_FAIL); }
@Override public NSImage documentIcon(final String extension, final Integer size) { NSImage image = this.load(extension, size); if(null == image) { return this.cache(extension, this.convert(extension, workspace.iconForFileType(extension), size), size); } return image; }
@Test public void testDocumentIconNullExtension() { final NSImage icon = new NSImageIconCache().documentIcon(null, 64); assertNotNull(icon); assertTrue(icon.isValid()); assertFalse(icon.isTemplate()); assertEquals(64, icon.size().width.intValue()); assertEquals(64, icon.size().height.intValue()); }
@Override protected void handleCommandWatchTopicListClose(CommandWatchTopicListClose commandWatchTopicListClose) { checkArgument(state == State.Connected); topicListService.handleWatchTopicListClose(commandWatchTopicListClose); }
@Test(expectedExceptions = IllegalArgumentException.class) public void shouldFailHandleCommandWatchTopicListClose() throws Exception { ServerCnx serverCnx = mock(ServerCnx.class, CALLS_REAL_METHODS); Field stateUpdater = ServerCnx.class.getDeclaredField("state"); stateUpdater.setAccessible(true); stateUpdater.set(serverCnx, ServerCnx.State.Failed); serverCnx.handleCommandWatchTopicListClose(any()); }
@Override public TransportResponse<T> get() throws InterruptedException, ExecutionException { return _futureCallback.get(); }
@Test(timeOut = 1000L, expectedExceptions = TimeoutException.class) public void testGetTimeout() throws Exception { FutureTransportCallback<Object> futureTransportCallback = new FutureTransportCallback<>(); futureTransportCallback.get(0, TimeUnit.MILLISECONDS); }
public HostAndPort getHttpBindAddress() { return httpBindAddress .requireBracketsForIPv6() .withDefaultPort(GRAYLOG_DEFAULT_PORT); }
@Test public void testHttpBindAddressIsValidHostname() throws RepositoryException, ValidationException { jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_bind_address", "example.com:9000"))) .addConfigurationBean(configuration) .process(); assertThat(configuration.getHttpBindAddress()).isEqualTo(HostAndPort.fromParts("example.com", 9000)); }
@Override public void writeShort(final int v) throws IOException { ensureAvailable(SHORT_SIZE_IN_BYTES); Bits.writeShort(buffer, pos, (short) v, isBigEndian); pos += SHORT_SIZE_IN_BYTES; }
@Test public void testWriteShortForPositionVAndByteOrder() throws IOException { short expected = 42; out.pos = 2; out.writeShort(42, LITTLE_ENDIAN); short actual = Bits.readShortL(out.buffer, 2); assertEquals(expected, actual); }
@Override public Optional<ProfileDescription> compare(final ProfileDescription next) { // Filter out profiles with matching checksum final Optional<ProfileDescription> found = repository.stream() .filter(description -> Objects.equals(description.getChecksum(), next.getChecksum())) .findFirst(); if(found.isPresent()) { // Found matching checksum. Determine if latest version if(found.get().isLatest()) { // Latest version already installed return Optional.empty(); } else { // Read last profile version from server as we found matching checksum for previous version return found; } } log.warn(String.format("Local only profile %s", next)); return Optional.empty(); }
@Test public void testLocalOnly() throws Exception { // Local only profile final ProfileDescription local = new ProfileDescription( ProtocolFactory.get(), new Checksum(HashAlgorithm.md5, "d41d8cd98f00b204e9800998ecf8427e"), null); assertFalse(new ChecksumProfileMatcher(Stream.<ProfileDescription>empty().collect(Collectors.toSet())).compare(local).isPresent()); }
@CanIgnoreReturnValue public final Ordered containsAtLeast( @Nullable Object k0, @Nullable Object v0, @Nullable Object... rest) { return containsAtLeastEntriesIn(accumulateMultimap(k0, v0, rest)); }
@Test public void containsAtLeastVarargInOrderFailureValuesOnly() { ImmutableMultimap<Integer, String> actual = ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); assertThat(actual).containsAtLeast(3, "two", 3, "one", 4, "five", 4, "four"); expectFailureWhenTestingThat(actual) .containsAtLeast(3, "two", 3, "one", 4, "five", 4, "four") .inOrder(); assertFailureKeys( "contents match, but order was wrong", "keys with out-of-order values", "---", "expected to contain at least", "but was"); assertFailureValue("keys with out-of-order values", "[3]"); }
public static boolean createTopic(AdminClient adminClient, NewTopic topicToBeCreated) { try { boolean retryResponse = CruiseControlMetricsUtils.retry(() -> { try { CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singletonList(topicToBeCreated)); createTopicsResult.values().get(topicToBeCreated.name()).get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS); LOG.info("Topic {} has been created.", topicToBeCreated.name()); return false; } catch (InterruptedException | ExecutionException | TimeoutException e) { if (e.getCause() instanceof TopicExistsException) { throw (TopicExistsException) e.getCause(); } LOG.warn("Unable to create topic {}.", topicToBeCreated.name(), e); return true; } }, TOPIC_AUTO_CREATE_RETRIES); if (!retryResponse) { throw new IllegalStateException(String.format("Failed to create topic %s.", topicToBeCreated.name())); } } catch (TopicExistsException e) { return false; } return true; }
@Test public void testCreateTopic() throws ExecutionException, InterruptedException, TimeoutException { AdminClient adminClient = EasyMock.createMock(AdminClient.class); CreateTopicsResult createTopicsResult = EasyMock.createMock(CreateTopicsResult.class); KafkaFuture<Void> createTopicsResultFuture = EasyMock.createMock(KafkaFuture.class); Map<String, KafkaFuture<Void>> createTopicsResultValues = Collections.singletonMap(TEST_TOPIC.name(), createTopicsResultFuture); // 1. Existing topic EasyMock.expect(adminClient.createTopics(Collections.singletonList(TEST_TOPIC))).andReturn(createTopicsResult).once(); EasyMock.expect(createTopicsResult.values()).andReturn(createTopicsResultValues).once(); EasyMock.expect(createTopicsResultFuture.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)) .andThrow(new ExecutionException(Errors.TOPIC_ALREADY_EXISTS.exception())); EasyMock.replay(adminClient, createTopicsResult, createTopicsResultFuture); assertFalse(createTopic(adminClient, TEST_TOPIC)); EasyMock.verify(adminClient, createTopicsResult, createTopicsResultFuture); EasyMock.reset(adminClient, createTopicsResult, createTopicsResultFuture); // 2. New topic EasyMock.expect(adminClient.createTopics(Collections.singletonList(TEST_TOPIC))).andReturn(createTopicsResult).once(); EasyMock.expect(createTopicsResult.values()).andReturn(createTopicsResultValues).once(); EasyMock.expect(createTopicsResultFuture.get(CLIENT_REQUEST_TIMEOUT_MS, TimeUnit.MILLISECONDS)).andReturn(null).once(); EasyMock.replay(adminClient, createTopicsResult, createTopicsResultFuture); assertTrue(createTopic(adminClient, TEST_TOPIC)); EasyMock.verify(adminClient, createTopicsResult, createTopicsResultFuture); }
public BundleProcessor getProcessor( BeamFnApi.ProcessBundleDescriptor descriptor, List<RemoteInputDestination> remoteInputDesinations) { checkState( !descriptor.hasStateApiServiceDescriptor(), "The %s cannot support a %s containing a state %s.", BundleProcessor.class.getSimpleName(), BeamFnApi.ProcessBundleDescriptor.class.getSimpleName(), Endpoints.ApiServiceDescriptor.class.getSimpleName()); return getProcessor(descriptor, remoteInputDesinations, NoOpStateDelegator.INSTANCE); }
@Test public void testRegisterCachesBundleProcessors() throws Exception { ProcessBundleDescriptor descriptor1 = ProcessBundleDescriptor.newBuilder().setId("descriptor1").build(); ProcessBundleDescriptor descriptor2 = ProcessBundleDescriptor.newBuilder().setId("descriptor2").build(); List<RemoteInputDestination> remoteInputs = Collections.singletonList( RemoteInputDestination.of( (FullWindowedValueCoder) FullWindowedValueCoder.of(VarIntCoder.of(), GlobalWindow.Coder.INSTANCE), SDK_GRPC_READ_TRANSFORM)); BundleProcessor processor1 = sdkHarnessClient.getProcessor(descriptor1, remoteInputs); BundleProcessor processor2 = sdkHarnessClient.getProcessor(descriptor2, remoteInputs); assertNotSame(processor1, processor2); // Ensure that caching works. assertSame(processor1, sdkHarnessClient.getProcessor(descriptor1, remoteInputs)); }
public static <K, N, V, S extends State> InternalKvState<K, N, ?> createStateAndWrapWithLatencyTrackingIfEnabled( InternalKvState<K, N, ?> kvState, StateDescriptor<S, V> stateDescriptor, LatencyTrackingStateConfig latencyTrackingStateConfig) throws Exception { if (latencyTrackingStateConfig.isEnabled()) { return new LatencyTrackingStateFactory<>( kvState, stateDescriptor, latencyTrackingStateConfig) .createState(); } return kvState; }
@TestTemplate @SuppressWarnings("unchecked") <K, N> void testTrackReducingState() throws Exception { InternalReducingState<K, N, Long> reducingState = mock(InternalReducingState.class); ReducingStateDescriptor<Long> reducingStateDescriptor = new ReducingStateDescriptor<>("reducing", Long::sum, Long.class); InternalKvState<K, N, ?> latencyTrackingState = LatencyTrackingStateFactory.createStateAndWrapWithLatencyTrackingIfEnabled( reducingState, reducingStateDescriptor, getLatencyTrackingStateConfig()); if (enableLatencyTracking) { assertThat(latencyTrackingState).isInstanceOf(LatencyTrackingReducingState.class); } else { assertThat(latencyTrackingState).isEqualTo(reducingState); } }
@Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } }
@Test public void testLookup() { assertEquals( Optional.of("vespa/service/config.id"), findSlobrokServiceName("container", "config.id")); assertEquals( Optional.empty(), findSlobrokServiceName("logserver", "config.id")); }
@Override public Write.Append append(final Path file, final TransferStatus status) throws BackgroundException { try { final S3DefaultMultipartService multipartService = new S3DefaultMultipartService(session); final List<MultipartUpload> upload = multipartService.find(file); if(!upload.isEmpty()) { Long size = 0L; for(MultipartPart completed : multipartService.list(upload.iterator().next())) { size += completed.getSize(); } return new Write.Append(true).withStatus(status).withOffset(size); } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Ignore failure listing incomplete multipart uploads. %s", e)); } return Write.override; }
@Test public void testAppendNoMultipartFound() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final S3MultipartUploadService feature = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl, 5 * 1024L * 1024L, 5); assertFalse(feature.append(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withLength(Long.MAX_VALUE)).append); assertEquals(Write.override, feature.append(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withLength(Long.MAX_VALUE))); assertEquals(Write.override, feature.append(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withLength(0L))); }
public static String toString(final Object[] array) { if (array == null) { return "null"; } if (array.length == 0) { return "[]"; } return CycleDependencyHandler.wrap(array, o -> { StringBuilder sb = new StringBuilder(32); sb.append("["); for (Object obj : array) { if (sb.length() > 1) { sb.append(", "); } if (obj == array) { sb.append("(this ").append(obj.getClass().getSimpleName()).append(")"); } else { sb.append(StringUtils.toString(obj)); } } sb.append("]"); return sb.toString(); }); }
@Test public void testToString() { Assertions.assertEquals("null", ArrayUtils.toString((Object[]) null)); Assertions.assertEquals("[]", ArrayUtils.toString(new Object[]{})); Assertions.assertEquals("[\"1\", \"2\", \"3\"]", ArrayUtils.toString(new String[]{"1", "2", "3"})); Assertions.assertEquals("null", ArrayUtils.toString((Object) null)); Assertions.assertEquals("[]", ArrayUtils.toString((Object) new Object[]{})); Assertions.assertEquals("123", ArrayUtils.toString(123)); Assertions.assertEquals("[1, 2, 3]", ArrayUtils.toString((Object) new int[]{1, 2, 3})); Assertions.assertEquals("[\"1\", \"2\", \"3\"]", ArrayUtils.toString((Object) new String[]{"1", "2", "3"})); }
public Account changeNumber(final Account account, final String number, @Nullable final IdentityKey pniIdentityKey, @Nullable final Map<Byte, ECSignedPreKey> deviceSignedPreKeys, @Nullable final Map<Byte, KEMSignedPreKey> devicePqLastResortPreKeys, @Nullable final List<IncomingMessage> deviceMessages, @Nullable final Map<Byte, Integer> pniRegistrationIds) throws InterruptedException, MismatchedDevicesException, StaleDevicesException { if (ObjectUtils.allNotNull(pniIdentityKey, deviceSignedPreKeys, deviceMessages, pniRegistrationIds)) { // AccountsManager validates the device set on deviceSignedPreKeys and pniRegistrationIds validateDeviceMessages(account, deviceMessages); } else if (!ObjectUtils.allNull(pniIdentityKey, deviceSignedPreKeys, deviceMessages, pniRegistrationIds)) { throw new IllegalArgumentException("PNI identity key, signed pre-keys, device messages, and registration IDs must be all null or all non-null"); } if (number.equals(account.getNumber())) { // The client has gotten confused/desynchronized with us about their own phone number, most likely due to losing // our OK response to an immediately preceding change-number request, and are sending a change they don't realize // is a no-op change. // // We don't need to actually do a number-change operation in our DB, but we *do* need to accept their new key // material and distribute the sync messages, to be sure all clients agree with us and each other about what their // keys are. Pretend this change-number request was actually a PNI key distribution request. if (pniIdentityKey == null) { return account; } return updatePniKeys(account, pniIdentityKey, deviceSignedPreKeys, devicePqLastResortPreKeys, deviceMessages, pniRegistrationIds); } final Account updatedAccount = accountsManager.changeNumber( account, number, pniIdentityKey, deviceSignedPreKeys, devicePqLastResortPreKeys, pniRegistrationIds); if (deviceMessages != null) { sendDeviceMessages(updatedAccount, deviceMessages); } return updatedAccount; }
@Test void changeNumberMismatchedRegistrationId() { final Account account = mock(Account.class); when(account.getNumber()).thenReturn("+18005551234"); final List<Device> devices = new ArrayList<>(); for (byte i = 1; i <= 3; i++) { final Device device = mock(Device.class); when(device.getId()).thenReturn(i); when(device.getRegistrationId()).thenReturn((int) i); devices.add(device); when(account.getDevice(i)).thenReturn(Optional.of(device)); } when(account.getDevices()).thenReturn(devices); final byte destinationDeviceId2 = 2; final byte destinationDeviceId3 = 3; final List<IncomingMessage> messages = List.of( new IncomingMessage(1, destinationDeviceId2, 1, "foo"), new IncomingMessage(1, destinationDeviceId3, 1, "foo")); final ECKeyPair pniIdentityKeyPair = Curve.generateKeyPair(); final ECPublicKey pniIdentityKey = pniIdentityKeyPair.getPublicKey(); final Map<Byte, ECSignedPreKey> preKeys = Map.of(Device.PRIMARY_ID, KeysHelper.signedECPreKey(1, pniIdentityKeyPair), destinationDeviceId2, KeysHelper.signedECPreKey(2, pniIdentityKeyPair), destinationDeviceId3, KeysHelper.signedECPreKey(3, pniIdentityKeyPair)); final Map<Byte, Integer> registrationIds = Map.of(Device.PRIMARY_ID, 17, destinationDeviceId2, 47, destinationDeviceId3, 89); assertThrows(StaleDevicesException.class, () -> changeNumberManager.changeNumber(account, "+18005559876", new IdentityKey(Curve.generateKeyPair().getPublicKey()), preKeys, null, messages, registrationIds)); }
public static String localDateTimeToString(final LocalDateTime localDateTime) { return DATE_TIME_FORMATTER.format(localDateTime); }
@Test public void testLocalDateTimeToStringWithPattern() { LocalDateTime localDateTime = LocalDateTime.of(2020, 1, 1, 23, 50, 0, 0); assertEquals("2020-01-01", DateUtils.localDateTimeToString(localDateTime, "yyyy-MM-dd")); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() != 6) { onInvalidDataReceived(device, data); return; } final int featuresValue = data.getIntValue(Data.FORMAT_UINT24_LE, 0); final int typeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 3); final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 4); final CGMFeatures features = new CGMFeatures(featuresValue); if (features.e2eCrcSupported) { final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 4); if (actualCrc != expectedCrc) { onContinuousGlucoseMonitorFeaturesReceivedWithCrcError(device, data); return; } } else { // If the device doesn't support E2E-safety the value of the field shall be set to 0xFFFF. if (expectedCrc != 0xFFFF) { onInvalidDataReceived(device, data); return; } } @SuppressLint("WrongConstant") final int type = typeAndSampleLocation & 0x0F; // least significant nibble final int sampleLocation = typeAndSampleLocation >> 4; // most significant nibble onContinuousGlucoseMonitorFeaturesReceived(device, features, type, sampleLocation, features.e2eCrcSupported); }
@Test public void onInvalidDataReceived_noCrc() { final DataReceivedCallback callback = new CGMFeatureDataCallback() { @Override public void onContinuousGlucoseMonitorFeaturesReceived(@NonNull final BluetoothDevice device, @NonNull final CGMFeatures features, final int type, final int sampleLocation, final boolean secured) { assertEquals("Invalid data but data reported", 1, 2); } @Override public void onContinuousGlucoseMonitorFeaturesReceivedWithCrcError(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Invalid data but wrong CRC reported", 1, 2); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { called = true; } }; final MutableData data = new MutableData(new byte[4]); assertTrue(data.setValue(0b11001111001101110, Data.FORMAT_UINT24_LE, 0)); assertTrue(data.setValue(0x16, Data.FORMAT_UINT8, 3)); called = false; //noinspection DataFlowIssue callback.onDataReceived(null, data); assertTrue(called); }
public String getPrefix() { return _prefix.get(); }
@Test public void testPrefix() { // The prefix should be an empty string if no directory is provided. // Otherwise, listFiles will return no matches. GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/")); assertEquals("", gcsUri.getPrefix()); gcsUri = new GcsUri(URI.create("gs://bucket_name")); assertEquals("", gcsUri.getPrefix()); // The prefix should end with the delimiter if there are directories, which // ensures searches do not return false positive matches. gcsUri = new GcsUri(URI.create("gs://bucket_name/dir/")); assertEquals("dir/", gcsUri.getPrefix()); gcsUri = new GcsUri(URI.create("gs://bucket_name/dir")); assertEquals("dir/", gcsUri.getPrefix()); }
@VisibleForTesting static Optional<String> performUpdateCheck( Path configDir, String currentVersion, String versionUrl, String toolName, Consumer<LogEvent> log) { Path lastUpdateCheck = configDir.resolve(LAST_UPDATE_CHECK_FILENAME); try { // Check time of last update check if (Files.exists(lastUpdateCheck)) { try { String fileContents = new String(Files.readAllBytes(lastUpdateCheck), StandardCharsets.UTF_8); Instant modifiedTime = Instant.parse(fileContents); if (modifiedTime.plus(Duration.ofDays(1)).isAfter(Instant.now())) { return Optional.empty(); } } catch (DateTimeParseException | IOException ex) { // If reading update time failed, file might be corrupt, so delete it log.accept(LogEvent.debug("Failed to read lastUpdateCheck; " + ex.getMessage())); Files.delete(lastUpdateCheck); } } // Check for update FailoverHttpClient httpClient = new FailoverHttpClient(true, false, ignored -> {}); try { Response response = httpClient.get( new URL(versionUrl), Request.builder() .setHttpTimeout(3000) .setUserAgent("jib " + currentVersion + " " + toolName) .build()); VersionJsonTemplate version = JsonTemplateMapper.readJson(response.getBody(), VersionJsonTemplate.class); Path lastUpdateCheckTemp = Files.createTempFile(configDir, LAST_UPDATE_CHECK_FILENAME, null); lastUpdateCheckTemp.toFile().deleteOnExit(); Files.write(lastUpdateCheckTemp, Instant.now().toString().getBytes(StandardCharsets.UTF_8)); Files.move(lastUpdateCheckTemp, lastUpdateCheck, StandardCopyOption.REPLACE_EXISTING); if (currentVersion.equals(version.latest)) { return Optional.empty(); } return Optional.of(version.latest); } finally { httpClient.shutDown(); } } catch (IOException ex) { log.accept(LogEvent.debug("Update check failed; " + ex.getMessage())); } return Optional.empty(); }
@Test public void testPerformUpdateCheck_failSilently() throws InterruptedException, GeneralSecurityException, URISyntaxException, IOException { String response = "HTTP/1.1 400 Bad Request\nContent-Length: 0\n\n"; try (TestWebServer badServer = new TestWebServer(false, Collections.singletonList(response), 1)) { Optional<String> message = UpdateChecker.performUpdateCheck( configDir, "1.0.2", badServer.getEndpoint(), "tool-name", logEvent -> { assertThat(logEvent.getLevel()).isEqualTo(LogEvent.Level.DEBUG); assertThat(logEvent.getMessage()).contains("Update check failed; "); }); assertThat(message).isEmpty(); } }
@Override public void update() { if (patrollingLeft) { position -= 1; if (position == PATROLLING_LEFT_BOUNDING) { patrollingLeft = false; } } else { position += 1; if (position == PATROLLING_RIGHT_BOUNDING) { patrollingLeft = true; } } logger.info("Skeleton {} is on position {}.", id, position); }
@Test void testUpdateForReverseDirectionFromLeftToRight() { skeleton.patrollingLeft = true; skeleton.setPosition(1); skeleton.update(); assertEquals(0, skeleton.getPosition()); assertFalse(skeleton.patrollingLeft); }
public static int bytesToCodePoint(ByteBuffer bytes) { bytes.mark(); byte b = bytes.get(); bytes.reset(); int extraBytesToRead = bytesFromUTF8[(b & 0xFF)]; if (extraBytesToRead < 0) return -1; // trailing byte! int ch = 0; switch (extraBytesToRead) { case 5: ch += (bytes.get() & 0xFF); ch <<= 6; /* remember, illegal UTF-8 */ case 4: ch += (bytes.get() & 0xFF); ch <<= 6; /* remember, illegal UTF-8 */ case 3: ch += (bytes.get() & 0xFF); ch <<= 6; case 2: ch += (bytes.get() & 0xFF); ch <<= 6; case 1: ch += (bytes.get() & 0xFF); ch <<= 6; case 0: ch += (bytes.get() & 0xFF); } ch -= offsetsFromUTF8[extraBytesToRead]; return ch; }
@Test public void testBytesToCodePoint() { try { ByteBuffer bytes = ByteBuffer.wrap(new byte[] {-2, 45, 23, 12, 76, 89}); Text.bytesToCodePoint(bytes); assertTrue("testBytesToCodePoint error !!!", bytes.position() == 6 ); } catch (BufferUnderflowException ex) { fail("testBytesToCodePoint unexp exception"); } catch (Exception e) { fail("testBytesToCodePoint unexp exception"); } }
@Override public int partition(Integer bucketId, int numPartitions) { Preconditions.checkNotNull(bucketId, BUCKET_NULL_MESSAGE); Preconditions.checkArgument(bucketId >= 0, BUCKET_LESS_THAN_LOWER_BOUND_MESSAGE, bucketId); Preconditions.checkArgument( bucketId < maxNumBuckets, BUCKET_GREATER_THAN_UPPER_BOUND_MESSAGE, bucketId, maxNumBuckets); if (numPartitions <= maxNumBuckets) { return bucketId % numPartitions; } else { return getPartitionWithMoreWritersThanBuckets(bucketId, numPartitions); } }
@Test public void testPartitionerBucketIdOutOfRangeFail() { PartitionSpec partitionSpec = TableSchemaType.ONE_BUCKET.getPartitionSpec(DEFAULT_NUM_BUCKETS); BucketPartitioner bucketPartitioner = new BucketPartitioner(partitionSpec); int negativeBucketId = -1; assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> bucketPartitioner.partition(negativeBucketId, 1)) .withMessage(BUCKET_LESS_THAN_LOWER_BOUND_MESSAGE, negativeBucketId); int tooBigBucketId = DEFAULT_NUM_BUCKETS; assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> bucketPartitioner.partition(tooBigBucketId, 1)) .withMessage(BUCKET_GREATER_THAN_UPPER_BOUND_MESSAGE, tooBigBucketId, DEFAULT_NUM_BUCKETS); }
public static URepeated create(CharSequence identifier, UExpression expression) { return new AutoValue_URepeated(identifier.toString(), expression); }
@Test public void unifies() { JCExpression expr = parseExpression("\"abcdefg\".charAt(x + 1)"); URepeated ident = URepeated.create("foo", UFreeIdent.create("foo")); assertThat(ident.unify(expr, unifier)).isNotNull(); assertThat(unifier.getBindings()).containsExactly(new UFreeIdent.Key("foo"), expr); }
@VisibleForTesting List<MessageSummary> getMessageBacklog(EventNotificationContext ctx, TeamsEventNotificationConfig config) { List<MessageSummary> backlog = notificationCallbackService.getBacklogForEvent(ctx); if (config.backlogSize() > 0 && backlog != null) { return backlog.stream().limit(config.backlogSize()).collect(Collectors.toList()); } return backlog; }
@Test public void testBacklogMessageLimitWhenBacklogSizeIsFive() { TeamsEventNotificationConfig TeamsConfig = TeamsEventNotificationConfig.builder() .backlogSize(5) .build(); //global setting is at N and the message override is 5 then the backlog size = 5 List<MessageSummary> messageSummaries = teamsEventNotification.getMessageBacklog(eventNotificationContext, TeamsConfig); assertThat(messageSummaries.size()).isEqualTo(5); }
public static boolean safeContains(final Range<Comparable<?>> range, final Comparable<?> endpoint) { try { return range.contains(endpoint); } catch (final ClassCastException ex) { Comparable<?> rangeUpperEndpoint = range.hasUpperBound() ? range.upperEndpoint() : null; Comparable<?> rangeLowerEndpoint = range.hasLowerBound() ? range.lowerEndpoint() : null; Optional<Class<?>> clazz = getTargetNumericType(Arrays.asList(rangeLowerEndpoint, rangeUpperEndpoint, endpoint)); if (!clazz.isPresent()) { throw ex; } Range<Comparable<?>> newRange = createTargetNumericTypeRange(range, clazz.get()); return newRange.contains(parseNumberByClazz(endpoint.toString(), clazz.get())); } }
@Test void assertSafeContainsForInteger() { Range<Comparable<?>> range = Range.closed(12, 100); assertFalse(SafeNumberOperationUtils.safeContains(range, 500)); }
public Collection<RepositoryTuple> swapToRepositoryTuples(final YamlRuleConfiguration yamlRuleConfig) { RepositoryTupleEntity tupleEntity = yamlRuleConfig.getClass().getAnnotation(RepositoryTupleEntity.class); if (null == tupleEntity) { return Collections.emptyList(); } if (tupleEntity.leaf()) { return Collections.singleton(new RepositoryTuple(tupleEntity.value(), YamlEngine.marshal(yamlRuleConfig))); } Collection<RepositoryTuple> result = new LinkedList<>(); RuleNodePath ruleNodePath = TypedSPILoader.getService(RuleNodePathProvider.class, yamlRuleConfig.getRuleConfigurationType()).getRuleNodePath(); for (Field each : getFields(yamlRuleConfig.getClass())) { boolean isAccessible = each.isAccessible(); each.setAccessible(true); result.addAll(swapToRepositoryTuples(yamlRuleConfig, ruleNodePath, each)); each.setAccessible(isAccessible); } return result; }
@Test void assertSwapToRepositoryTuplesWithEmptyNodeYamlRuleConfiguration() { assertTrue(new RepositoryTupleSwapperEngine().swapToRepositoryTuples(new NodeYamlRuleConfiguration()).isEmpty()); }
public Map<String, List<String>> getTableToBrokersMap() { Map<String, Set<String>> brokerUrlsMap = new HashMap<>(); try { byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true); brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData); JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData)); JsonNode brokerResourceNode = jsonObject.get("mapFields"); Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields(); while (resourceEntries.hasNext()) { Entry<String, JsonNode> resourceEntry = resourceEntries.next(); String resourceName = resourceEntry.getKey(); String tableName = resourceName.replace(OFFLINE_SUFFIX, "").replace(REALTIME_SUFFIX, ""); Set<String> brokerUrls = brokerUrlsMap.computeIfAbsent(tableName, k -> new HashSet<>()); JsonNode resource = resourceEntry.getValue(); Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields(); while (brokerEntries.hasNext()) { Entry<String, JsonNode> brokerEntry = brokerEntries.next(); String brokerName = brokerEntry.getKey(); if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) { brokerUrls.add(getHostPort(brokerName)); } } } } catch (Exception e) { LOGGER.warn("Exception while reading External view from zookeeper", e); // ignore } Map<String, List<String>> tableToBrokersMap = new HashMap<>(); for (Entry<String, Set<String>> entry : brokerUrlsMap.entrySet()) { tableToBrokersMap.put(entry.getKey(), new ArrayList<>(entry.getValue())); } return tableToBrokersMap; }
@Test public void testGetTableToBrokersMap() { // Setup final Map<String, List<String>> expectedResult = new HashMap<>(); expectedResult.put("field1", Arrays.asList("12.34.56.78:1234")); when(_mockZkClient.readData(Mockito.anyString(), Mockito.anyBoolean())).thenReturn("json".getBytes()); // Run the test final Map<String, List<String>> result = _externalViewReaderUnderTest.getTableToBrokersMap(); // Verify the results assertEquals(expectedResult, result); }
public Response get(URL url, Request request) throws IOException { return call(HttpMethods.GET, url, request); }
@Test public void testGet() throws IOException { verifyCall(HttpMethods.GET, FailoverHttpClient::get); }
@GetMapping() @TpsControl(pointName = "NamingServiceQuery", name = "HttpNamingServiceQuery") @Secured(action = ActionTypes.READ) public Result<ServiceDetailInfo> detail( @RequestParam(value = "namespaceId", defaultValue = Constants.DEFAULT_NAMESPACE_ID) String namespaceId, @RequestParam("serviceName") String serviceName, @RequestParam(value = "groupName", defaultValue = Constants.DEFAULT_GROUP) String groupName) throws Exception { ServiceDetailInfo result = serviceOperatorV2.queryService( Service.newService(namespaceId, groupName, serviceName)); return Result.success(result); }
@Test void testDetail() throws Exception { ServiceDetailInfo expected = new ServiceDetailInfo(); when(serviceOperatorV2.queryService( Service.newService(Constants.DEFAULT_NAMESPACE_ID, Constants.DEFAULT_GROUP, "service"))).thenReturn(expected); Result<ServiceDetailInfo> actual = serviceController.detail(Constants.DEFAULT_NAMESPACE_ID, "service", Constants.DEFAULT_GROUP); assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode()); assertEquals(expected, actual.getData()); }
public boolean containsAll(final IntHashSet coll) { for (final int value : coll.values) { if (MISSING_VALUE != value && !contains(value)) { return false; } } return containsMissingValue || !coll.containsMissingValue; }
@Test void containsEmptySet() { final IntHashSet other = new IntHashSet(100); assertTrue(testSet.containsAll(other)); assertTrue(testSet.containsAll((Collection<?>)other)); }
public Optional<LocalSession> getActiveLocalSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.activeSessionOf(applicationId).map(aLong -> tenant.getSessionRepository().getLocalSession(aLong)); }
@Test public void prepareAndActivateWithTenantMetaData() { long startTime = clock.instant().toEpochMilli(); Duration duration = Duration.ofHours(1); clock.advance(duration); long deployTime = clock.instant().toEpochMilli(); PrepareResult result = prepareAndActivate(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getReindexActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); applicationRepository.getActiveLocalSession(tenant(), applicationId()).get().getAllocatedHosts(); assertEquals(startTime, tenantMetaData(tenant()).createdTimestamp().toEpochMilli()); assertEquals(deployTime, tenantMetaData(tenant()).lastDeployTimestamp().toEpochMilli()); // Creating a new tenant will have metadata with timestamp equal to current time clock.advance(duration); long createTenantTime = clock.instant().toEpochMilli(); Tenant fooTenant = tenantRepository.addTenant(TenantName.from("foo")); assertEquals(createTenantTime, tenantMetaData(fooTenant).createdTimestamp().toEpochMilli()); assertEquals(createTenantTime, tenantMetaData(fooTenant).lastDeployTimestamp().toEpochMilli()); }
public static Date parseHttpDate(CharSequence txt) { return parseHttpDate(txt, 0, txt.length()); }
@Test public void testParseWithDashSeparatorSingleDigitDay() { assertEquals(DATE, parseHttpDate("Sunday, 6-Nov-94 08:49:37 GMT")); }
@Override public void suspend(Throwable cause) {}
@Test void testSuspendIgnored() { MockFinishedContext ctx = new MockFinishedContext(); createFinishedState(ctx).suspend(new RuntimeException()); assertThat(ctx.getArchivedExecutionGraph().getState()).isEqualTo(testJobStatus); }
@Override public void setMonochrome(boolean monochrome) { formats = monochrome ? monochrome() : ansi(); }
@Test void should_print_error_message_for_before_hooks() { Feature feature = TestFeatureParser.parse("path/test.feature", "" + "Feature: feature name\n" + " Scenario: scenario name\n" + " Given first step\n"); ByteArrayOutputStream out = new ByteArrayOutputStream(); Runtime.builder() .withFeatureSupplier(new StubFeatureSupplier(feature)) .withAdditionalPlugins(new PrettyFormatter(out)) .withRuntimeOptions(new RuntimeOptionsBuilder().setMonochrome().build()) .withBackendSupplier(new StubBackendSupplier( singletonList(new StubHookDefinition(new StubException())), singletonList(new StubStepDefinition("first step", "path/step_definitions.java:3")), emptyList())) .build() .run(); assertThat(out, bytes(equalToCompressingWhiteSpace("" + "Scenario: scenario name # path/test.feature:2\n" + " the stack trace\n" + " Given first step # path/step_definitions.java:3\n"))); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldFailOnDowncast() { // Given: givenSourceStreamWithSchema(BIG_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of()); final ConfiguredStatement<InsertValues> statement = givenInsertValues( ImmutableList.of(INT_COL), ImmutableList.of( new DoubleLiteral(1.1) ) ); // When: final Exception e = assertThrows( KsqlException.class, () -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext) ); // Then: assertThat(e.getCause(), (hasMessage(containsString("Expected type INTEGER for field")))); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "/export/by-query", produces = MediaType.APPLICATION_OCTET_STREAM) @Operation( tags = {"Templates"}, summary = "Export templates as a ZIP archive of yaml sources." ) public HttpResponse<byte[]> exportByQuery( @Parameter(description = "A string filter") @Nullable @QueryValue(value = "q") String query, @Parameter(description = "A namespace filter prefix") @Nullable @QueryValue String namespace ) throws IOException { var templates = templateRepository.find(tenantService.resolveTenant(), query, namespace); var bytes = zipTemplates(templates); return HttpResponse.ok(bytes).header("Content-Disposition", "attachment; filename=\"templates.zip\""); }
@Test void exportByQuery() throws IOException { // create 3 templates, so we have at least 3 of them client.toBlocking().retrieve(POST("/api/v1/templates", createTemplate()), Template.class); client.toBlocking().retrieve(POST("/api/v1/templates", createTemplate()), Template.class); client.toBlocking().retrieve(POST("/api/v1/templates", createTemplate()), Template.class); int size = client.toBlocking().retrieve(HttpRequest.GET("/api/v1/templates/search?namespace=kestra.test"), Argument.of(PagedResults.class, Template.class)).getResults().size(); byte[] zip = client.toBlocking().retrieve(HttpRequest.GET("/api/v1/templates/export/by-query?namespace=kestra.test"), Argument.of(byte[].class)); File file = File.createTempFile("templates", ".zip"); Files.write(file.toPath(), zip); try (ZipFile zipFile = new ZipFile(file)) { assertThat(zipFile.stream().count(), is((long) size)); } file.delete(); }
@Override public long put(long key, long value) { assert value != nullValue : "put() called with null-sentinel value " + nullValue; SlotAssignmentResult slot = hsa.ensure(key); long result; if (!slot.isNew()) { result = mem.getLong(slot.address()); } else { result = nullValue; } mem.putLong(slot.address(), value); return result; }
@Test public void testPut_withTheSameValue() { long key = newKey(); long value = newValue(); map.put(key, value); long oldValue = map.put(key, value); assertEqualsKV(value, oldValue, key, value); }
@Override public boolean test(Pickle pickle) { if (expressions.isEmpty()) { return true; } List<String> tags = pickle.getTags(); return expressions.stream() .allMatch(expression -> expression.evaluate(tags)); }
@Test void not_tag_predicate_does_not_match_pickle_with_same_single_tag() { Pickle pickle = createPickleWithTags("@FOO"); TagPredicate predicate = createPredicate("not @FOO"); assertFalse(predicate.test(pickle)); }
@VisibleForTesting static void initTrustStore(Properties consumerProps) { Path trustStorePath = getTrustStorePath(consumerProps); if (Files.exists(trustStorePath)) { deleteFile(trustStorePath); } LOGGER.info("Initializing the SSL trust store"); try { // Create the trust store path createFile(trustStorePath); } catch (FileAlreadyExistsException fex) { LOGGER.warn("SSL trust store initialization failed as trust store already exists."); return; } catch (IOException iex) { throw new RuntimeException(String.format("Failed to create the trust store path: %s", trustStorePath), iex); } try { String trustStorePassword = consumerProps.getProperty(SSL_TRUSTSTORE_PASSWORD); String serverCertificate = consumerProps.getProperty(STREAM_KAFKA_SSL_SERVER_CERTIFICATE); String certificateType = consumerProps.getProperty(STREAM_KAFKA_SSL_CERTIFICATE_TYPE, DEFAULT_CERTIFICATE_TYPE); String trustStoreType = consumerProps.getProperty(SSL_TRUSTSTORE_TYPE, DEFAULT_TRUSTSTORE_TYPE); consumerProps.setProperty(SSL_TRUSTSTORE_TYPE, trustStoreType); // Decode the Base64 string byte[] certBytes = Base64.getDecoder().decode(serverCertificate); InputStream certInputStream = new ByteArrayInputStream(certBytes); // Create a Certificate object CertificateFactory certificateFactory = CertificateFactory.getInstance(certificateType); Certificate certificate = certificateFactory.generateCertificate(certInputStream); // Create a TrustStore and load the default TrustStore KeyStore trustStore = KeyStore.getInstance(trustStoreType); // Initialize the TrustStore trustStore.load(null, null); // Add the server certificate to the truststore trustStore.setCertificateEntry(DEFAULT_SERVER_ALIAS, certificate); // Save the keystore to a file try (FileOutputStream fos = new FileOutputStream(trustStorePath.toString())) { trustStore.store(fos, trustStorePassword.toCharArray()); } LOGGER.info("Initialized the SSL trust store."); } catch (Exception ex) { throw new RuntimeException("Error initializing the SSL trust store", ex); } }
@Test public void testInitTrustStore() throws CertificateException, NoSuchAlgorithmException, OperatorCreationException, NoSuchProviderException, IOException, KeyStoreException { Properties consumerProps = new Properties(); setTrustStoreProps(consumerProps); // should not throw any exceptions KafkaSSLUtils.initTrustStore(consumerProps); validateTrustStoreCertificateCount(1); }
@Override public ConfigHistoryInfo detailConfigHistory(Long nid) { HistoryConfigInfoMapper historyConfigInfoMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.HIS_CONFIG_INFO); String sqlFetchRows = historyConfigInfoMapper.select( Arrays.asList("nid", "data_id", "group_id", "tenant_id", "app_name", "content", "md5", "src_user", "src_ip", "op_type", "gmt_create", "gmt_modified", "encrypted_data_key"), Collections.singletonList("nid")); return databaseOperate.queryOne(sqlFetchRows, new Object[] {nid}, HISTORY_DETAIL_ROW_MAPPER); }
@Test void testDetailConfigHistory() { long nid = 256789; //mock query ConfigHistoryInfo mockConfigHistoryInfo = createMockConfigHistoryInfo(0); Mockito.when(databaseOperate.queryOne(anyString(), eq(new Object[] {nid}), eq(HISTORY_DETAIL_ROW_MAPPER))) .thenReturn(mockConfigHistoryInfo); //execute & verify ConfigHistoryInfo historyReturn = embeddedHistoryConfigInfoPersistService.detailConfigHistory(nid); assertEquals(mockConfigHistoryInfo, historyReturn); }
@Override public String getName() { return FUNCTION_NAME; }
@Test public void testSubtractionNullColumn() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("sub(%s, 0)", INT_SV_NULL_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof SubtractionTransformFunction); Assert.assertEquals(transformFunction.getName(), TransformFunctionType.SUB.getName()); double[] expectedValues = new double[NUM_ROWS]; RoaringBitmap roaringBitmap = new RoaringBitmap(); for (int i = 0; i < NUM_ROWS; i++) { if (isNullRow(i)) { roaringBitmap.add(i); } else { expectedValues[i] = _intSVValues[i]; } } testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap); }
public static Optional<DbScmInfo> create(List<DbFileSources.Line> lines, int lineCount, String fileHash) { LineToChangeset lineToChangeset = new LineToChangeset(); Changeset[] lineChanges = new Changeset[lineCount]; boolean lineAdded = false; for (DbFileSources.Line line : lines) { Changeset changeset = lineToChangeset.apply(line); if (changeset == null) { continue; } lineChanges[line.getLine() - 1] = changeset; lineAdded = true; } if (!lineAdded) { return Optional.empty(); } return Optional.of(new DbScmInfo(new ScmInfoImpl(lineChanges), fileHash)); }
@Test public void return_absent_dsm_info_when_no_changeset() { DbFileSources.Data.Builder fileDataBuilder = DbFileSources.Data.newBuilder(); fileDataBuilder.addLinesBuilder().setLine(1); assertThat(DbScmInfo.create(fileDataBuilder.getLinesList(), 1, "hash")).isNotPresent(); }
public CruiseConfig deserializeConfig(String content) throws Exception { String md5 = md5Hex(content); Element element = parseInputStream(new ByteArrayInputStream(content.getBytes())); LOGGER.debug("[Config Save] Updating config cache with new XML"); CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse(); setMd5(configForEdit, md5); configForEdit.setOrigins(new FileConfigOrigin()); return configForEdit; }
@Test void shouldLoadRakeBuilderWithEmptyOnCancel() throws Exception { CruiseConfig cruiseConfig = xmlLoader.deserializeConfig(CONFIG_WITH_NANT_AND_EXEC_BUILDER); JobConfig plan = cruiseConfig.jobConfigByName("pipeline1", "mingle", "cardlist", true); RakeTask builder = (RakeTask) plan.tasks().findFirstByType(RakeTask.class); assertThat(builder).isNotNull(); }
@Override public void get(final String listenTo, final Callback<T> callback) { String path = getPath(listenTo); // Note ChildrenCallback is compatible with a ZK 3.2 server; Children2Callback is // compatible only with ZK 3.3+ server. AsyncCallback.ChildrenCallback zkCallback = new AsyncCallback.ChildrenCallback() { @Override public void processResult(int rc, String path, Object context, List<String> children) { KeeperException.Code result = KeeperException.Code.get(rc); switch (result) { case NONODE: callback.onSuccess(null); break; case OK: getMergedChildren(path, _zookeeperChildFilter.filter(children), null, callback); break; default: callback.onError(KeeperException.create(result)); break; } } }; _zk.getChildren(path, null, zkCallback, null); }
@Test(groups = { "small", "back-end" }) public void testShutdown() throws InterruptedException, IOException, PropertyStoreException, ExecutionException { PropertyStore<String> store = getStore(); final FutureCallback<None> callback = new FutureCallback<>(); store.shutdown(callback); try { callback.get(5, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { fail("unable to shut down store"); } }
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"}) public static boolean isScalablePushQuery( final Statement statement, final KsqlExecutionContext ksqlEngine, final KsqlConfig ksqlConfig, final Map<String, Object> overrides ) { if (!isPushV2Enabled(ksqlConfig, overrides)) { return false; } if (! (statement instanceof Query)) { return false; } final Query query = (Query) statement; final SourceFinder sourceFinder = new SourceFinder(); sourceFinder.process(query.getFrom(), null); // It will be present if it's not a join, which we don't handle if (!sourceFinder.getSourceName().isPresent()) { return false; } // Find all of the writers to this particular source. final SourceName sourceName = sourceFinder.getSourceName().get(); final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName); // See if the config or override have set the stream to be "latest" final boolean isLatest = isLatest(ksqlConfig, overrides); // Cannot be a pull query, i.e. must be a push return !query.isPullQuery() // Group by is not supported && !query.getGroupBy().isPresent() // Windowing is not supported && !query.getWindow().isPresent() // Having clause is not supported && !query.getHaving().isPresent() // Partition by is not supported && !query.getPartitionBy().isPresent() // There must be an EMIT CHANGES clause && (query.getRefinement().isPresent() && query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES) // Must be reading from "latest" && isLatest // We only handle a single sink source at the moment from a CTAS/CSAS && upstreamQueries.size() == 1 // ROWPARTITION and ROWOFFSET are not currently supported in SPQs && !containsDisallowedColumns(query); }
@Test public void isScalablePushQuery_false_hasWindow() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { // When: expectIsSPQ(ColumnName.of("foo"), columnExtractor); when(query.getWindow()).thenReturn(Optional.of(new WindowExpression("foo", new TumblingWindowExpression(new WindowTimeClause(1, TimeUnit.MILLISECONDS))))); // Then: assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig, ImmutableMap.of(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")), equalTo(false)); } }
static <RequestT, ResponseT> PTransform<PCollection<KV<RequestT, ResponseT>>, Result<KV<RequestT, ResponseT>>> writeUsingRedis( Duration expiry, RedisClient client, Coder<RequestT> requestTCoder, Coder<@Nullable ResponseT> responseTCoder) throws NonDeterministicException { return write( new UsingRedis<>(requestTCoder, responseTCoder, client).write(expiry), KvCoder.of(requestTCoder, responseTCoder)); }
@Test public void givenNonDeterministicCoder_writeUsingRedis_throwsError() throws Coder.NonDeterministicException { URI uri = URI.create("redis://localhost:6379"); Duration expiry = Duration.standardSeconds(1L); assertThrows( NonDeterministicException.class, () -> Cache.writeUsingRedis( expiry, new RedisClient(uri), CallTest.NON_DETERMINISTIC_REQUEST_CODER, CallTest.DETERMINISTIC_RESPONSE_CODER)); assertThrows( NonDeterministicException.class, () -> Cache.writeUsingRedis( expiry, new RedisClient(uri), CallTest.DETERMINISTIC_REQUEST_CODER, CallTest.NON_DETERMINISTIC_RESPONSE_CODER)); Cache.writeUsingRedis( expiry, new RedisClient(uri), CallTest.DETERMINISTIC_REQUEST_CODER, CallTest.DETERMINISTIC_RESPONSE_CODER); }
@Override public void exportData(JsonWriter writer) throws IOException { // version tag at the root writer.name(THIS_VERSION); writer.beginObject(); // clients list writer.name(CLIENTS); writer.beginArray(); writeClients(writer); writer.endArray(); writer.name(GRANTS); writer.beginArray(); writeGrants(writer); writer.endArray(); writer.name(WHITELISTEDSITES); writer.beginArray(); writeWhitelistedSites(writer); writer.endArray(); writer.name(BLACKLISTEDSITES); writer.beginArray(); writeBlacklistedSites(writer); writer.endArray(); writer.name(AUTHENTICATIONHOLDERS); writer.beginArray(); writeAuthenticationHolders(writer); writer.endArray(); writer.name(ACCESSTOKENS); writer.beginArray(); writeAccessTokens(writer); writer.endArray(); writer.name(REFRESHTOKENS); writer.beginArray(); writeRefreshTokens(writer); writer.endArray(); writer.name(SYSTEMSCOPES); writer.beginArray(); writeSystemScopes(writer); writer.endArray(); for (MITREidDataServiceExtension extension : extensions) { if (extension.supportsVersion(THIS_VERSION)) { extension.exportExtensionData(writer); break; } } writer.endObject(); // end mitreid-connect-1.3 }
@Test public void testExportAuthenticationHolders() throws IOException { OAuth2Request req1 = new OAuth2Request(new HashMap<String, String>(), "client1", new ArrayList<GrantedAuthority>(), true, new HashSet<String>(), new HashSet<String>(), "http://foo.com", new HashSet<String>(), null); Authentication mockAuth1 = new UsernamePasswordAuthenticationToken("user1", "pass1", AuthorityUtils.commaSeparatedStringToAuthorityList("ROLE_USER")); OAuth2Authentication auth1 = new OAuth2Authentication(req1, mockAuth1); AuthenticationHolderEntity holder1 = new AuthenticationHolderEntity(); holder1.setId(1L); holder1.setAuthentication(auth1); OAuth2Request req2 = new OAuth2Request(new HashMap<String, String>(), "client2", new ArrayList<GrantedAuthority>(), true, new HashSet<String>(), new HashSet<String>(), "http://bar.com", new HashSet<String>(), null); OAuth2Authentication auth2 = new OAuth2Authentication(req2, null); AuthenticationHolderEntity holder2 = new AuthenticationHolderEntity(); holder2.setId(2L); holder2.setAuthentication(auth2); List<AuthenticationHolderEntity> allAuthHolders = ImmutableList.of(holder1, holder2); when(clientRepository.getAllClients()).thenReturn(new HashSet<ClientDetailsEntity>()); when(approvedSiteRepository.getAll()).thenReturn(new HashSet<ApprovedSite>()); when(wlSiteRepository.getAll()).thenReturn(new HashSet<WhitelistedSite>()); when(blSiteRepository.getAll()).thenReturn(new HashSet<BlacklistedSite>()); when(authHolderRepository.getAll()).thenReturn(allAuthHolders); when(tokenRepository.getAllAccessTokens()).thenReturn(new HashSet<OAuth2AccessTokenEntity>()); when(tokenRepository.getAllRefreshTokens()).thenReturn(new HashSet<OAuth2RefreshTokenEntity>()); when(sysScopeRepository.getAll()).thenReturn(new HashSet<SystemScope>()); // do the data export StringWriter stringWriter = new StringWriter(); JsonWriter writer = new JsonWriter(stringWriter); writer.beginObject(); dataService.exportData(writer); writer.endObject(); writer.close(); // parse the output as a JSON object for testing JsonElement elem = new JsonParser().parse(stringWriter.toString()); JsonObject root = elem.getAsJsonObject(); // make sure the root is there assertThat(root.has(MITREidDataService.MITREID_CONNECT_1_3), is(true)); JsonObject config = root.get(MITREidDataService.MITREID_CONNECT_1_3).getAsJsonObject(); // make sure all the root elements are there assertThat(config.has(MITREidDataService.CLIENTS), is(true)); assertThat(config.has(MITREidDataService.GRANTS), is(true)); assertThat(config.has(MITREidDataService.WHITELISTEDSITES), is(true)); assertThat(config.has(MITREidDataService.BLACKLISTEDSITES), is(true)); assertThat(config.has(MITREidDataService.REFRESHTOKENS), is(true)); assertThat(config.has(MITREidDataService.ACCESSTOKENS), is(true)); assertThat(config.has(MITREidDataService.SYSTEMSCOPES), is(true)); assertThat(config.has(MITREidDataService.AUTHENTICATIONHOLDERS), is(true)); // make sure the root elements are all arrays assertThat(config.get(MITREidDataService.CLIENTS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.GRANTS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.WHITELISTEDSITES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.BLACKLISTEDSITES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.REFRESHTOKENS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.ACCESSTOKENS).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.SYSTEMSCOPES).isJsonArray(), is(true)); assertThat(config.get(MITREidDataService.AUTHENTICATIONHOLDERS).isJsonArray(), is(true)); // check our holder list (this test) JsonArray holders = config.get(MITREidDataService.AUTHENTICATIONHOLDERS).getAsJsonArray(); assertThat(holders.size(), is(2)); // check for both of our clients in turn Set<AuthenticationHolderEntity> checked = new HashSet<>(); for (JsonElement e : holders) { assertThat(e.isJsonObject(), is(true)); JsonObject holder = e.getAsJsonObject(); AuthenticationHolderEntity compare = null; if (holder.get("id").getAsLong() == holder1.getId()) { compare = holder1; } else if (holder.get("id").getAsLong() == holder2.getId()) { compare = holder2; } if (compare == null) { fail("Could not find matching authentication holder id: " + holder.get("id").getAsString()); } else { assertTrue(holder.get("clientId").getAsString().equals(compare.getClientId())); assertTrue(holder.get("approved").getAsBoolean() == compare.isApproved()); assertTrue(holder.get("redirectUri").getAsString().equals(compare.getRedirectUri())); if (compare.getUserAuth() != null) { assertTrue(holder.get("savedUserAuthentication").isJsonObject()); JsonObject savedAuth = holder.get("savedUserAuthentication").getAsJsonObject(); assertTrue(savedAuth.get("name").getAsString().equals(compare.getUserAuth().getName())); assertTrue(savedAuth.get("authenticated").getAsBoolean() == compare.getUserAuth().isAuthenticated()); assertTrue(savedAuth.get("sourceClass").getAsString().equals(compare.getUserAuth().getSourceClass())); } checked.add(compare); } } // make sure all of our clients were found assertThat(checked.containsAll(allAuthHolders), is(true)); }
@Override @CheckForNull public EmailMessage format(Notification notification) { if (!BuiltInQPChangeNotification.TYPE.equals(notification.getType())) { return null; } BuiltInQPChangeNotificationBuilder profilesNotification = parse(notification); StringBuilder message = new StringBuilder("The following built-in profiles have been updated:\n\n"); profilesNotification.getProfiles().stream() .sorted(Comparator.comparing(Profile::getLanguageName).thenComparing(Profile::getProfileName)) .forEach(profile -> { message.append("\"") .append(profile.getProfileName()) .append("\" - ") .append(profile.getLanguageName()) .append(": ") .append(server.getPublicRootUrl()).append("/profiles/changelog?language=") .append(profile.getLanguageKey()) .append("&name=") .append(encode(profile.getProfileName())) .append("&since=") .append(formatDate(new Date(profile.getStartDate()))) .append("&to=") .append(formatDate(new Date(profile.getEndDate()))) .append("\n"); int newRules = profile.getNewRules(); if (newRules > 0) { message.append(" ").append(newRules).append(" new rule") .append(plural(newRules)) .append('\n'); } int updatedRules = profile.getUpdatedRules(); if (updatedRules > 0) { message.append(" ").append(updatedRules).append(" rule") .append(updatedRules > 1 ? "s have been updated" : " has been updated") .append("\n"); } int removedRules = profile.getRemovedRules(); if (removedRules > 0) { message.append(" ").append(removedRules).append(" rule") .append(plural(removedRules)) .append(" removed\n"); } message.append("\n"); }); message.append("This is a good time to review your quality profiles and update them to benefit from the latest evolutions: "); message.append(server.getPublicRootUrl()).append("/profiles"); // And finally return the email that will be sent return new EmailMessage() .setMessageId(BuiltInQPChangeNotification.TYPE) .setSubject("Built-in quality profiles have been updated") .setPlainTextMessage(message.toString()); }
@Test public void notification_contains_encoded_profile_name() { BuiltInQPChangeNotificationBuilder notification = new BuiltInQPChangeNotificationBuilder() .addProfile(Profile.newBuilder() .setProfileName("Sonar Way") .setLanguageKey("java") .setLanguageName(newLanguageName()) .build()); EmailMessage emailMessage = underTest.format(notification.build()); assertThat(emailMessage.getMessage()).contains(server.getPublicRootUrl() + "/profiles/changelog?language=java&name=Sonar+Way"); }
public static NetconfRpcReply parseRpcReply(CharSequence xml) { XMLInputFactory xif = XMLInputFactory.newFactory(); try { XMLStreamReader xsr = xif.createXMLStreamReader(CharSource.wrap(xml).openStream()); return parseRpcReply(xsr); } catch (XMLStreamException | IOException e) { log.error("Exception thrown creating XMLStreamReader", e); return null; } }
@Test public void testOkParse() { NetconfRpcReply ok = NetconfRpcParserUtil.parseRpcReply(OK_DATA1); assertThat(ok.isOk(), is(true)); assertThat(ok.messageId(), is("3")); }
public static ParseResult parse(String text) { Map<String, String> localProperties = new HashMap<>(); String intpText = ""; String scriptText = null; Matcher matcher = REPL_PATTERN.matcher(text); if (matcher.find()) { String headingSpace = matcher.group(1); intpText = matcher.group(2); int startPos = headingSpace.length() + intpText.length() + 1; if (startPos < text.length() && text.charAt(startPos) == '(') { startPos = parseLocalProperties(text, startPos, localProperties); } scriptText = text.substring(startPos); } else { intpText = ""; scriptText = text; } return new ParseResult(intpText, removeLeadingWhiteSpaces(scriptText), localProperties); }
@Test void testSparkSubmit() { ParagraphTextParser.ParseResult parseResult = ParagraphTextParser.parse( "%spark-submit --class A a.jar"); assertEquals("spark-submit", parseResult.getIntpText()); assertEquals("--class A a.jar", parseResult.getScriptText()); }