focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static Method getMostSpecificMethod(Method method, Class<?> targetClass) { if (targetClass != null && targetClass != method.getDeclaringClass() && isOverridable(method, targetClass)) { try { if (Modifier.isPublic(method.getModifiers())) { try { return targetClass.getMethod(method.getName(), method.getParameterTypes()); } catch (NoSuchMethodException ex) { return method; } } else { return method; } } catch (SecurityException ex) { // Security settings are disallowing reflective access; fall back to 'method' below. } } return method; }
@Test public void testGetMostSpecificMethodFromSameClass() throws NoSuchMethodException { Method method = AbstractMap.class.getDeclaredMethod("clone"); Method specificMethod = ClassUtils.getMostSpecificMethod(method, AbstractMap.class); assertEquals(AbstractMap.class, specificMethod.getDeclaringClass()); }
public Duration delay(int attempt) { if (attempt < 1) throw new IllegalArgumentException("Attempt must be positive"); double currentDelay = attempt * initialDelay.toMillis(); double delta = RANDOMIZATION_FACTOR * currentDelay; double lowerDelay = currentDelay - delta; double upperDelay = currentDelay + delta; long millis = (long) Math.min(lowerDelay + (random.nextDouble() * (upperDelay - lowerDelay + 1)), maxDelay.toMillis()); return Duration.ofMillis(millis); }
@Test public void delay() { ExponentialBackoff b = new ExponentialBackoff(Duration.ofSeconds(1), Duration.ofSeconds(10), new Random(1000)); assertEquals(List.of(Duration.ofMillis(1210), Duration.ofMillis(2150), Duration.ofMillis(4340), Duration.ofMillis(2157), Duration.ofMillis(4932)), IntStream.rangeClosed(1, 5) .mapToObj(b::delay) .toList()); }
public MetricConsumer newInstance() { if (factories.length == 1) { return factories[0].newInstance(); } MetricConsumer[] consumers = new MetricConsumer[factories.length]; for (int i = 0; i < factories.length; ++i) { consumers[i] = factories[i].newInstance(); } return new ForwardingMetricConsumer(consumers); }
@Test void requireThatMultipleConsumersAreDelegated() { MetricConsumer foo = Mockito.mock(MetricConsumer.class); MetricConsumer bar = Mockito.mock(MetricConsumer.class); MetricConsumerProvider provider = MetricConsumerProviders.newSingletonFactories(foo, bar); MetricConsumer consumer = provider.newInstance(); assertNotSame(foo, consumer); assertNotSame(bar, consumer); consumer.add("foo", 6, null); Mockito.verify(foo, Mockito.times(1)).add("foo", 6, null); Mockito.verify(bar, Mockito.times(1)).add("foo", 6, null); }
@GET @Produces(MediaType.APPLICATION_JSON) public Response getVirtualNetworks() { Set<TenantId> tenantIds = vnetAdminService.getTenantIds(); List<VirtualNetwork> allVnets = tenantIds.stream() .map(tenantId -> vnetService.getVirtualNetworks(tenantId)) .flatMap(Collection::stream) .collect(Collectors.toList()); return ok(encodeArray(VirtualNetwork.class, "vnets", allVnets)).build(); }
@Test public void testGetVirtualNetworksByTenantId() { final Set<VirtualNetwork> vnetSet = ImmutableSet.of(vnet1, vnet2, vnet3, vnet4); expect(mockVnetAdminService.getTenantIds()).andReturn(ImmutableSet.of(tenantId3)).anyTimes(); replay(mockVnetAdminService); expect(mockVnetService.getVirtualNetworks(tenantId3)).andReturn(vnetSet).anyTimes(); replay(mockVnetService); WebTarget wt = target(); String response = wt.path("vnets/" + tenantId3.id()).request().get(String.class); assertThat(response, containsString("{\"vnets\":[")); final JsonObject result = Json.parse(response).asObject(); assertThat(result, notNullValue()); assertThat(result.names(), hasSize(1)); assertThat(result.names().get(0), is("vnets")); final JsonArray vnetJsonArray = result.get("vnets").asArray(); assertThat(vnetJsonArray, notNullValue()); assertEquals("Virtual networks array is not the correct size.", vnetSet.size(), vnetJsonArray.size()); vnetSet.forEach(vnet -> assertThat(vnetJsonArray, hasVnet(vnet))); verify(mockVnetService); verify(mockVnetAdminService); }
public Class<?> getClassByNameOrNull(String name) { Map<String, WeakReference<Class<?>>> map; synchronized (CACHE_CLASSES) { map = CACHE_CLASSES.get(classLoader); if (map == null) { map = Collections.synchronizedMap( new WeakHashMap<String, WeakReference<Class<?>>>()); CACHE_CLASSES.put(classLoader, map); } } Class<?> clazz = null; WeakReference<Class<?>> ref = map.get(name); if (ref != null) { clazz = ref.get(); } if (clazz == null) { try { clazz = Class.forName(name, true, classLoader); } catch (ClassNotFoundException e) { // Leave a marker that the class isn't found map.put(name, new WeakReference<Class<?>>(NEGATIVE_CACHE_SENTINEL)); return null; } // two putters can race here, but they'll put the same class map.put(name, new WeakReference<Class<?>>(clazz)); return clazz; } else if (clazz == NEGATIVE_CACHE_SENTINEL) { return null; // not found } else { // cache hit return clazz; } }
@Test public void testGetClassByNameOrNull() throws Exception { Configuration config = new Configuration(); Class<?> clazz = config.getClassByNameOrNull("java.lang.Object"); assertNotNull(clazz); }
@Override public void setProperty(String key, String value) { propertiesPropertySource.setProperty(key, value); }
@Test void testInitWithInvalidOrder() throws IllegalAccessException, InvocationTargetException { System.setProperty(Constants.SysEnv.NACOS_ENV_FIRST, "invalid"); List<SourceType> order = (List<SourceType>) initMethod.invoke(null); assertOrder(order, SourceType.PROPERTIES, SourceType.JVM, SourceType.ENV); }
public static synchronized void addName(Class<?> writableClass, String name) { NAME_TO_CLASS.put(name, writableClass); }
@Test public void testAddName() throws Exception { Configuration conf = new Configuration(); String altName = testName + ".alt"; WritableName.setName(SimpleWritable.class, testName); WritableName.addName(SimpleWritable.class, altName); Class<?> test = WritableName.getClass(altName, conf); assertTrue(test.equals(SimpleWritable.class)); // check original name still works test = WritableName.getClass(testName, conf); assertTrue(test.equals(SimpleWritable.class)); }
@Override public EndOfDataStatus hasReceivedEndOfData() { if (!inputGatesWithRemainingUserData.isEmpty()) { return EndOfDataStatus.NOT_END_OF_DATA; } else if (shouldDrainOnEndOfData) { return EndOfDataStatus.DRAINED; } else { return EndOfDataStatus.STOPPED; } }
@Test void testDrainFlagComputation() throws Exception { // Setup final SingleInputGate inputGate1 = createInputGate(); final SingleInputGate inputGate2 = createInputGate(); final TestInputChannel[] inputChannels1 = new TestInputChannel[] { new TestInputChannel(inputGate1, 0), new TestInputChannel(inputGate1, 1) }; inputGate1.setInputChannels(inputChannels1); final TestInputChannel[] inputChannels2 = new TestInputChannel[] { new TestInputChannel(inputGate2, 0), new TestInputChannel(inputGate2, 1) }; inputGate2.setInputChannels(inputChannels2); // Test inputChannels1[1].readEndOfData(StopMode.DRAIN); inputChannels1[0].readEndOfData(StopMode.NO_DRAIN); inputChannels2[1].readEndOfData(StopMode.DRAIN); inputChannels2[0].readEndOfData(StopMode.DRAIN); final UnionInputGate unionInputGate = new UnionInputGate(inputGate1, inputGate2); inputGate1.notifyChannelNonEmpty(inputChannels1[0]); inputGate1.notifyChannelNonEmpty(inputChannels1[1]); inputGate2.notifyChannelNonEmpty(inputChannels2[0]); inputGate2.notifyChannelNonEmpty(inputChannels2[1]); verifyBufferOrEvent(unionInputGate, false, 0, true); verifyBufferOrEvent(unionInputGate, false, 2, true); // we have received EndOfData on a single input only assertThat(unionInputGate.hasReceivedEndOfData()) .isEqualTo(PullingAsyncDataInput.EndOfDataStatus.NOT_END_OF_DATA); verifyBufferOrEvent(unionInputGate, false, 1, true); verifyBufferOrEvent(unionInputGate, false, 3, true); // both channels received EndOfData, one channel said we should not drain assertThat(unionInputGate.hasReceivedEndOfData()) .isEqualTo(PullingAsyncDataInput.EndOfDataStatus.STOPPED); }
@SuppressWarnings("unchecked") @Override public synchronized ProxyInfo<T> getProxy() { if (currentUsedHandler != null) { return currentUsedHandler; } Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>(); StringBuilder combinedInfo = new StringBuilder("["); for (int i = 0; i < proxies.size(); i++) { ProxyInfo<T> pInfo = super.getProxy(); incrementProxyIndex(); targetProxyInfos.put(pInfo.proxyInfo, pInfo); combinedInfo.append(pInfo.proxyInfo).append(','); } combinedInfo.append(']'); T wrappedProxy = (T) Proxy.newProxyInstance( RequestHedgingInvocationHandler.class.getClassLoader(), new Class<?>[]{xface}, new RequestHedgingInvocationHandler(targetProxyInfos)); currentUsedHandler = new ProxyInfo<T>(wrappedProxy, combinedInfo.toString()); return currentUsedHandler; }
@Test public void testHedgingWhenOneFails() throws Exception { final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class); Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() { @Override public long[] answer(InvocationOnMock invocation) throws Throwable { Thread.sleep(1000); return new long[]{1}; } }); final ClientProtocol badMock = Mockito.mock(ClientProtocol.class); Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!")); RequestHedgingProxyProvider<ClientProtocol> provider = new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class, createFactory(badMock, goodMock)); Assert.assertTrue(Proxy.getInvocationHandler( provider.getProxy().proxy) instanceof RpcInvocationHandler); long[] stats = provider.getProxy().proxy.getStats(); Assert.assertTrue(stats.length == 1); Mockito.verify(badMock).getStats(); Mockito.verify(goodMock).getStats(); }
@Override @MethodNotAvailable public Object executeOnKey(K key, com.hazelcast.map.EntryProcessor entryProcessor) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testExecuteOnKey() { adapter.executeOnKey(23, new IMapReplaceEntryProcessor("value", "newValue")); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } DefaultRequest that = (DefaultRequest) o; return Objects.equals(key, that.key) && Objects.equals(RID, that.RID); }
@Test public void testEquals() { Request request = new DefaultRequest(rid, name); Assert.assertTrue(request.equals(request)); Assert.assertFalse(request == null); }
@Override public Set<EntityExcerpt> listEntityExcerpts() { return eventDefinitionService.streamAll() .filter(ed -> ed.config().isContentPackExportable()) .map(this::createExcerpt) .collect(Collectors.toSet()); }
@Test @MongoDBFixtures("EventDefinitionFacadeTest.json") public void listExcerpts() { final Set<EntityExcerpt> excerpts = facade.listEntityExcerpts(); final EntityExcerpt excerpt = excerpts.iterator().next(); assertThat(excerpt.title()).isEqualTo("title"); assertThat(excerpt.id()).isEqualTo(ModelId.of("5d4032513d2746703d1467f6")); assertThat(excerpt.type()).isEqualTo(ModelTypes.EVENT_DEFINITION_V1); }
@Override public void run() throws Exception { if (isJCacheAvailable()) { ICacheService cacheService = getService(); for (CacheConfig cacheConfig : configs) { cacheService.putCacheConfigIfAbsent(cacheConfig); } } else { // if JCache is not in classpath and no Cache configurations need to be processed, do not fail the operation // instead log a warning that if JCache API will be used then it will fail. if (configs.isEmpty()) { getLogger().warning("This member is joining a cluster whose members support JCache, however the cache-api " + "artifact is missing from this member's classpath. In case JCache API will be used, add cache-api " + "artifact in this member's classpath and restart the member."); } else { // JCache is already in use by other cluster members, so log an informative message to resolve the issue and // throw the CacheService not found exception. getLogger().severe("This member cannot support JCache because the cache-api artifact is missing from " + "its classpath. Add the JCache API JAR in the classpath and restart the member."); throw new HazelcastException("Service with name '" + SERVICE_NAME + "' not found!", new ServiceNotFoundException("Service with name '" + SERVICE_NAME + "' not found!")); } } }
@Test public void test_cachePostJoinOperationSucceeds_whenJCacheNotAvailable_noCacheConfigs() throws Exception { // JCache is not available in classpath OnJoinCacheOperation onJoinCacheOperation = createTestOnJoinCacheOperation(false); onJoinCacheOperation.setNodeEngine(nodeEngine); onJoinCacheOperation.run(); verify(nodeEngine).getLogger(onJoinCacheOperation.getClass()); // verify a warning was logged verify(logger).warning(anyString()); // verify CacheService instance was not requested in OnJoinCacheOperation.run verifyNoMoreInteractions(nodeEngine); }
@Path("/.well-known/openid-federation") @GET @Produces(MEDIA_TYPE_ENTITY_STATEMENT) public Response get() { var federationEntityJwks = federationConfig.entitySigningKeys().toPublicJWKSet(); var relyingPartyJwks = federationConfig.relyingPartyKeys().toPublicJWKSet(); var now = Instant.now(); var exp = now.plus(federationConfig.ttl()); var jws = EntityStatement.create() .iat(now) .nbf(now) .exp(exp) .iss(federationConfig.iss().toString()) .sub(federationConfig.sub().toString()) .authorityHints(List.of(federationConfig.federationMaster().toString())) .metadata( Metadata.create() .openIdRelyingParty( OpenIdRelyingParty.create() .clientName(federationConfig.appName()) .jwks(relyingPartyJwks) .responseTypes(List.of("code")) .grantTypes(List.of("authorization_code")) .requirePushedAuthorizationRequests(true) .idTokenSignedResponseAlg("ES256") .idTokenEncryptedResponseAlg("ECDH-ES") .idTokenEncryptedResponseEnc("A256GCM") .scope(String.join(" ", federationConfig.scopes())) .redirectUris(federationConfig.redirectUris()) .clientRegistrationTypes(List.of("automatic")) .tokenEndpointAuthMethodsSupported( List.of("self_signed_tls_client_auth")) // according to the federation spec this is not required here, some // sectoral IdPs require it though .defaultAcrValues(List.of("gematik-ehealth-loa-high")) // warn: this is a non-standard field, but needed by some sectoral IdPs .tokenEndpointAuthMethod("self_signed_tls_client_auth") .build()) .federationEntity( FederationEntity.create().name(federationConfig.appName()).build()) .build()) .jwks(federationEntityJwks) .build() .sign(federationConfig.entitySigningKey()); return Response.ok(jws.serialize()) .header("x-kc-provider", "ovi") .cacheControl(cacheForTtl(now)) .build(); }
@Test void get_basic() { given() .baseUri(server.configuration().baseUri().toString()) .get("/.well-known/openid-federation") .then() .statusCode(200) .body(jwsPayloadAt("/iss", is(ISSUER.toString()))) .body(jwsPayloadAt("/sub", is(ISSUER.toString()))); }
@Override public void removeDevice(DeviceId deviceId) { if (!netconfDeviceMap.containsKey(deviceId)) { log.warn("Device {} is not present", deviceId); for (NetconfDeviceListener l : netconfDeviceListeners) { l.deviceRemoved(deviceId); } } else { stopDevice(deviceId, true); } }
@Test public void testRemoveDevice() throws Exception { ctrl.removeDevice(deviceInfo1.getDeviceId()); assertFalse("Incorrect device removal", ctrl.getDevicesMap().containsKey(deviceId1)); }
@Override public CompletableFuture<Void> deleteStateAsync(String key) { ensureStateEnabled(); return defaultStateStore.deleteAsync(key); }
@Test public void testDeleteStateStateEnabled() throws Exception { context.defaultStateStore = mock(BKStateStoreImpl.class); ByteBuffer buffer = ByteBuffer.wrap("test-value".getBytes(UTF_8)); context.deleteStateAsync("test-key"); verify(context.defaultStateStore, times(1)).deleteAsync(eq("test-key")); }
@Override public String toString() { return serializedPipelineOptions; }
@Test public void equalityTest() { PipelineOptions options = PipelineOptionsFactory.create(); SerializablePipelineOptions serializablePipelineOptions = new SerializablePipelineOptions(options); String json = serializablePipelineOptions.toString(); SerializablePipelineOptions serializablePipelineOptions2 = new SerializablePipelineOptions(json); assertEquals( "SerializablePipelineOptions created from options and from json differ", serializablePipelineOptions, serializablePipelineOptions2); }
@Override public ListenableFuture<?> execute(CreateFunction statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters) { Map<NodeRef<com.facebook.presto.sql.tree.Parameter>, Expression> parameterLookup = parameterExtractor(statement, parameters); Session session = stateMachine.getSession(); Analyzer analyzer = new Analyzer(session, metadata, sqlParser, accessControl, Optional.empty(), parameters, parameterLookup, stateMachine.getWarningCollector()); Analysis analysis = analyzer.analyze(statement); if (analysis.getFunctionHandles().values().stream() .anyMatch(SqlFunctionHandle.class::isInstance)) { throw new PrestoException(NOT_SUPPORTED, "Invoking a dynamically registered function in SQL function body is not supported"); } SqlInvokedFunction function = createSqlInvokedFunction(statement, metadata, analysis); if (statement.isTemporary()) { stateMachine.addSessionFunction(new SqlFunctionId(function.getSignature().getName(), function.getSignature().getArgumentTypes()), function); } else { metadata.getFunctionAndTypeManager().createFunction(function, statement.isReplace()); } return immediateFuture(null); }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Session function .* has already been defined") public void testCreateTemporaryFunctionWithSameNameFails() { SqlParser parser = new SqlParser(); String sqlString = "CREATE TEMPORARY FUNCTION foo() RETURNS int RETURN 1"; CreateFunction statement = (CreateFunction) parser.createStatement(sqlString, ParsingOptions.builder().build()); TransactionManager transactionManager = createTestTransactionManager(); QueryStateMachine stateMachine = createQueryStateMachine(sqlString, TEST_SESSION, false, transactionManager, executorService, metadataManager); new CreateFunctionTask(parser).execute(statement, transactionManager, metadataManager, new AllowAllAccessControl(), stateMachine, emptyList()); new CreateFunctionTask(parser).execute(statement, transactionManager, metadataManager, new AllowAllAccessControl(), stateMachine, emptyList()); }
Set<SourceName> analyzeExpression( final Expression expression, final String clauseType ) { final Validator extractor = new Validator(clauseType); extractor.process(expression, null); return extractor.referencedSources; }
@Test public void shouldIncludeLocationInErrorIfKnown() { // Given: final Expression expression = new UnqualifiedColumnReferenceExp( Optional.of(new NodeLocation(10, 23)), ColumnName.of("just-name") ); when(sourceSchemas.sourcesWithField(any(), any())) .thenReturn(ImmutableSet.of()); // When: final Exception e = assertThrows( UnknownColumnException.class, () -> analyzer.analyzeExpression(expression, CLAUSE_TYPE) ); // Then: assertThat(e.getMessage(), containsString( "Line: 10, Col: 24: " + CLAUSE_TYPE)); }
@Override public void start() { fetchInitialPipelineGlobalConfig(); schedulePeriodicGlobalConfigRequests(); }
@Test public void testStart_requiresInitialConfig() throws IOException, InterruptedException { WorkItem initialConfig = new WorkItem() .setJobId("job") .setStreamingConfigTask(new StreamingConfigTask().setMaxWorkItemCommitBytes(10L)); CountDownLatch waitForInitialConfig = new CountDownLatch(1); Set<StreamingEnginePipelineConfig> receivedPipelineConfig = new HashSet<>(); when(mockDataflowServiceClient.getGlobalStreamingConfigWorkItem()) .thenReturn(Optional.of(initialConfig)); streamingEngineConfigFetcher = createConfigFetcher( /* waitForInitialConfig= */ true, 0, config -> { try { receivedPipelineConfig.add(config); waitForInitialConfig.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } }); Thread asyncStartConfigLoader = new Thread(streamingEngineConfigFetcher::start); asyncStartConfigLoader.start(); waitForInitialConfig.countDown(); asyncStartConfigLoader.join(); assertThat(receivedPipelineConfig) .containsExactly( StreamingEnginePipelineConfig.builder() .setMaxWorkItemCommitBytes( initialConfig.getStreamingConfigTask().getMaxWorkItemCommitBytes()) .build()); }
@NonNull @Override public EntityStatementJWS fetchFederationStatement( URI federationFetchUrl, String issuer, String subject) { var key = "%s|%s|%s".formatted(federationFetchUrl, issuer, subject); return federationStatementCache.computeIfAbsent( key, k -> delegate.fetchFederationStatement(federationFetchUrl, issuer, subject)); }
@Test void fetchFederationStatement() { var url = URI.create("https://example.com"); var iss = "myiss"; var sub = "mysub"; var expected = new EntityStatementJWS(null, null); when(delegate.fetchFederationStatement(url, iss, sub)).thenReturn(expected); // when var got = sut.fetchFederationStatement(url, iss, sub); // then verify(delegate).fetchFederationStatement(url, iss, sub); assertEquals(expected, got); }
protected RemotingChannel createChannel(Channel channel, String group, String clientId, Set<SubscriptionData> subscriptionData) { this.groupChannelMap.compute(group, (groupKey, clientIdMap) -> { if (clientIdMap == null) { clientIdMap = new ConcurrentHashMap<>(); } clientIdMap.computeIfAbsent(channel, clientIdKey -> new RemotingChannel(remotingProxyOutClient, proxyRelayService, channel, clientId, subscriptionData)); return clientIdMap; }); return getChannel(group, channel); }
@Test public void testCreateChannel() { String group = "group"; String clientId = RandomStringUtils.randomAlphabetic(10); Channel producerChannel = createMockChannel(); RemotingChannel producerRemotingChannel = this.remotingChannelManager.createProducerChannel(ctx, producerChannel, group, clientId); assertNotNull(producerRemotingChannel); assertSame(producerRemotingChannel, this.remotingChannelManager.createProducerChannel(ctx, producerChannel, group, clientId)); Channel consumerChannel = createMockChannel(); RemotingChannel consumerRemotingChannel = this.remotingChannelManager.createConsumerChannel(ctx, consumerChannel, group, clientId, new HashSet<>()); assertSame(consumerRemotingChannel, this.remotingChannelManager.createConsumerChannel(ctx, consumerChannel, group, clientId, new HashSet<>())); assertNotNull(consumerRemotingChannel); assertNotSame(producerRemotingChannel, consumerRemotingChannel); }
@Override public AnalysisPhase getAnalysisPhase() { return ANALYSIS_PHASE; }
@Test public void testGetAnalysisPhaze() { assertEquals(AnalysisPhase.INFORMATION_COLLECTION, instance.getAnalysisPhase()); }
public static String post(HttpURLConnection con, Map<String, String> headers, String requestBody, Integer connectTimeoutMs, Integer readTimeoutMs) throws IOException, UnretryableException { handleInput(con, headers, requestBody, connectTimeoutMs, readTimeoutMs); return handleOutput(con); }
@Test public void testErrorReadingResponse() throws IOException { HttpURLConnection mockedCon = createHttpURLConnection("dummy"); when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); assertThrows(IOException.class, () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); }
public static void throwTooManyActivePersistentQueriesException( final KsqlExecutionContext executionContext, final KsqlConfig ksqlConfig, final String statementStr ) { final String sanitizedMessage = String.format( "Not executing statement(s) as it would cause the number " + "of active, persistent queries to exceed the configured limit. " + "Use the TERMINATE command to terminate existing queries, " + "or increase the '%s' setting via the 'ksql-server.properties' file. " + "Current persistent query count: %d. Configured limit: %d.", KsqlConfig.KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_CONFIG, executionContext.getPersistentQueries().size(), getQueryLimit(ksqlConfig) ); final String unloggedMessage = String.format( "Not executing statement(s) '%s' as it would cause the number " + "of active, persistent queries to exceed the configured limit. " + "Use the TERMINATE command to terminate existing queries, " + "or increase the '%s' setting via the 'ksql-server.properties' file. " + "Current persistent query count: %d. Configured limit: %d.", statementStr, KsqlConfig.KSQL_ACTIVE_PERSISTENT_QUERY_LIMIT_CONFIG, executionContext.getPersistentQueries().size(), getQueryLimit(ksqlConfig) ); throw new KsqlStatementException( sanitizedMessage, unloggedMessage, statementStr, KsqlStatementException.Problem.REQUEST ); }
@Test public void shouldThrowWhenAsked() { // Given: final String statementStr = "my statement"; givenActivePersistentQueries(3); givenQueryLimit(2); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> QueryCapacityUtil.throwTooManyActivePersistentQueriesException( ksqlEngine, ksqlConfig, statementStr) ); // Then: assertThat(e.getUnloggedMessage(), containsString( "Not executing statement(s) 'my statement' as it would cause the number " + "of active, persistent queries to exceed the configured limit. " + "Use the TERMINATE command to terminate existing queries, " + "or increase the 'ksql.query.persistent.active.limit' setting " + "via the 'ksql-server.properties' file. " + "Current persistent query count: 3. Configured limit: 2.")); assertThat(e.getMessage(), containsString( "Not executing statement(s) as it would cause the number " + "of active, persistent queries to exceed the configured limit. " + "Use the TERMINATE command to terminate existing queries, " + "or increase the 'ksql.query.persistent.active.limit' setting " + "via the 'ksql-server.properties' file. " + "Current persistent query count: 3. Configured limit: 2.")); assertThat(e.getSqlStatement(), containsString("my statement")); }
@Override public String toString() { return "MergePolicyConfig{" + "policy='" + policy + '\'' + ", batchSize=" + batchSize + '}'; }
@Test public void testToString() { config.setPolicy(HigherHitsMergePolicy.class.getName()); config.setBatchSize(2342); String configString = config.toString(); assertThat(configString).contains("MergePolicyConfig"); assertThat(configString).contains("policy='" + HigherHitsMergePolicy.class.getName() + "'"); assertThat(configString).contains("batchSize=2342"); }
@Override public Set<Long> calculateUsers(DelegateExecution execution, String param) { Set<Long> postIds = StrUtils.splitToLongSet(param); List<AdminUserRespDTO> users = adminUserApi.getUserListByPostIds(postIds); return convertSet(users, AdminUserRespDTO::getId); }
@Test public void testCalculateUsers() { // ๅ‡†ๅค‡ๅ‚ๆ•ฐ String param = "1,2"; // mock ๆ–นๆณ• List<AdminUserRespDTO> users = convertList(asSet(11L, 22L), id -> new AdminUserRespDTO().setId(id)); when(adminUserApi.getUserListByPostIds(eq(asSet(1L, 2L)))).thenReturn(users); // ่ฐƒ็”จ Set<Long> results = strategy.calculateUsers(null, param); // ๆ–ญ่จ€ assertEquals(asSet(11L, 22L), results); }
public static RedissonClient create() { Config config = new Config(); config.useSingleServer() .setAddress("redis://127.0.0.1:6379"); return create(config); }
@Test public void testSentinelConnectionFail() { Assertions.assertThrows(RedisConnectionException.class, () -> { Config config = new Config(); config.useSentinelServers().addSentinelAddress("redis://127.99.0.1:1111").setMasterName("test"); Redisson.create(config); Thread.sleep(1500); }); }
@Override public boolean isEmpty() { return tables.isEmpty(); }
@Test void assertIsEmpty() { assertTrue(new MaskRuleConfiguration(Collections.emptyList(), Collections.emptyMap()).isEmpty()); }
public List<ButtonId> buttons() { return buttons; }
@Test public void buttons() { basic(); pp.addButton(BD_A) .addButton(BD_B); assertEquals("wrong buttons", 2, pp.buttons().size()); verifyButtons(KEY_A, KEY_B); pp.removeButtons(BD_B) .addButton(BD_C) .addButton(BD_Z); assertEquals("wrong buttons", 3, pp.buttons().size()); verifyButtons(KEY_A, KEY_C, KEY_Z); pp.removeAllButtons() .addButton(BD_B); assertEquals("wrong buttons", 1, pp.buttons().size()); verifyButtons(KEY_B); }
@Override @PublicAPI(usage = ACCESS) public String getName() { return name; }
@Test public void predicate_declaredIn() { JavaCall<?> call = simulateCall().from(Origin.class, "call").to(Target.class, "called"); assertThat(declaredIn(Target.class)) .accepts(call.getTarget()) .hasDescription("declared in " + Target.class.getName()); assertThat(declaredIn(Target.class.getName())) .accepts(call.getTarget()) .hasDescription("declared in " + Target.class.getName()); assertThat(declaredIn(equivalentTo(Target.class).as("custom"))) .accepts(call.getTarget()) .hasDescription("declared in custom"); assertThat(declaredIn(Origin.class)) .rejects(call.getTarget()); assertThat(declaredIn(Origin.class.getName())) .rejects(call.getTarget()); assertThat(declaredIn(equivalentTo(Origin.class))) .rejects(call.getTarget()); }
public Command create( final ConfiguredStatement<? extends Statement> statement, final KsqlExecutionContext context) { return create(statement, context.getServiceContext(), context); }
@Test public void shouldCreateCommandForPlannedQueryInSharedRuntime() { // Given: givenPlannedQuery(); BinPackedPersistentQueryMetadataImpl queryMetadata = mock(BinPackedPersistentQueryMetadataImpl.class); when(executionContext.execute(any(), any(ConfiguredKsqlPlan.class))).thenReturn(result); when(result.getQuery()).thenReturn(Optional.ofNullable(queryMetadata)); // When: final Command command = commandFactory.create(configuredStatement, executionContext); // Then: assertThat(command, is(Command.of(ConfiguredKsqlPlan.of(A_PLAN, SessionConfig.of(config, overrides))))); }
public static void checkBrokerConfig(final Properties brokerConfig) throws MQClientException { // TODO: use MixAll.isPropertyValid() when jdk upgrade to 1.8 if (brokerConfig.containsKey("brokerPermission") && !PermName.isValid(brokerConfig.getProperty("brokerPermission"))) { throw new MQClientException(ResponseCode.NO_PERMISSION, String.format("brokerPermission value: %s is invalid.", brokerConfig.getProperty("brokerPermission"))); } }
@Test public void testBrokerConfigValid() throws MQClientException { Properties brokerConfig = new Properties(); brokerConfig.setProperty("brokerPermission", String.valueOf(PermName.PERM_INHERIT | PermName.PERM_WRITE | PermName.PERM_READ)); Validators.checkBrokerConfig(brokerConfig); brokerConfig.setProperty("brokerPermission", String.valueOf(PermName.PERM_WRITE | PermName.PERM_READ)); Validators.checkBrokerConfig(brokerConfig); brokerConfig.setProperty("brokerPermission", String.valueOf(PermName.PERM_READ)); Validators.checkBrokerConfig(brokerConfig); try { brokerConfig.setProperty("brokerPermission", String.valueOf(PermName.PERM_PRIORITY)); Validators.checkBrokerConfig(brokerConfig); } catch (MQClientException e) { assertThat(e.getResponseCode()).isEqualTo(ResponseCode.NO_PERMISSION); assertThat(e.getErrorMessage()).isEqualTo(String.format("brokerPermission value: %s is invalid.", brokerConfig.getProperty("brokerPermission"))); } try { brokerConfig.setProperty("brokerPermission", String.valueOf(PermName.PERM_PRIORITY | PermName.PERM_INHERIT)); Validators.checkBrokerConfig(brokerConfig); } catch (MQClientException e) { assertThat(e.getResponseCode()).isEqualTo(ResponseCode.NO_PERMISSION); assertThat(e.getErrorMessage()).isEqualTo(String.format("brokerPermission value: %s is invalid.", brokerConfig.getProperty("brokerPermission"))); } }
public static Object get(Object object, int index) { if (index < 0) { throw new IndexOutOfBoundsException("Index cannot be negative: " + index); } if (object instanceof Map) { Map map = (Map) object; Iterator iterator = map.entrySet().iterator(); return get(iterator, index); } else if (object instanceof List) { return ((List) object).get(index); } else if (object instanceof Object[]) { return ((Object[]) object)[index]; } else if (object instanceof Iterator) { Iterator it = (Iterator) object; while (it.hasNext()) { index--; if (index == -1) { return it.next(); } else { it.next(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object instanceof Collection) { Iterator iterator = ((Collection) object).iterator(); return get(iterator, index); } else if (object instanceof Enumeration) { Enumeration it = (Enumeration) object; while (it.hasMoreElements()) { index--; if (index == -1) { return it.nextElement(); } else { it.nextElement(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object == null) { throw new IllegalArgumentException("Unsupported object type: null"); } else { try { return Array.get(object, index); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName()); } } }
@Test void testGet1() { assertThrows(IllegalArgumentException.class, () -> { CollectionUtils.get(null, 0); }); }
public List<ChangeStreamRecord> toChangeStreamRecords( PartitionMetadata partition, ChangeStreamResultSet resultSet, ChangeStreamResultSetMetadata resultSetMetadata) { if (this.isPostgres()) { // In PostgresQL, change stream records are returned as JsonB. return Collections.singletonList( toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata)); } // In GoogleSQL, change stream records are returned as an array of structs. return resultSet.getCurrentRowAsStruct().getStructList(0).stream() .flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata)) .collect(Collectors.toList()); }
@Test public void testMappingUpdateStructRowNewRowAndOldValuesToDataChangeRecord() { final DataChangeRecord dataChangeRecord = new DataChangeRecord( "partitionToken", Timestamp.ofTimeSecondsAndNanos(10L, 20), "serverTransactionId", true, "1", "tableName", Arrays.asList( new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L), new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)), Collections.singletonList( new Mod( "{\"column1\":\"value1\"}", "{\"column2\":\"oldValue2\"}", "{\"column2\":\"newValue2\"}")), ModType.UPDATE, ValueCaptureType.NEW_ROW, 10L, 2L, "transactionTag", true, null); final Struct jsonFieldsStruct = recordsToStructWithJson(dataChangeRecord); ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); when(resultSet.getCurrentRowAsStruct()).thenReturn(jsonFieldsStruct); assertEquals( Collections.singletonList(dataChangeRecord), mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata)); }
@Nonnull @Override public ProgressState call() { progTracker.reset(); stateMachineStep(); return progTracker.toProgressState(); }
@Test public void test_waitingForFlushesToComplete() { // When Entry<String, String> entry = entry("k", "v"); init(asList(entry, new SnapshotBarrier(2, false))); ssContext.startNewSnapshotPhase1(2, "map", 0); assertEquals(MADE_PROGRESS, sst.call()); assertEquals(MADE_PROGRESS, sst.call()); assertEquals(NO_PROGRESS, sst.call()); assertTrue(mockSsWriter.hasPendingFlushes); // Then mockSsWriter.hasPendingFlushes = false; assertEquals(MADE_PROGRESS, sst.call()); assertEquals(NO_PROGRESS, sst.call()); assertEquals(3, sst.pendingSnapshotId); }
public List<Role> memberRoles(Admin admin) { List<Role> memberRoles = new ArrayList<>(); for (Role role : this) { if (admin.belongsTo(role)) { memberRoles.add(role); } } return memberRoles; }
@Test public void shouldListItselfWhenARoleExists() { Role firstRole = new RoleConfig(new CaseInsensitiveString("role1"), new RoleUser(new CaseInsensitiveString("USER1")), new RoleUser(new CaseInsensitiveString("user2"))); Role secondRole = new RoleConfig(new CaseInsensitiveString("ROLE2"), new RoleUser(new CaseInsensitiveString("user1")), new RoleUser(new CaseInsensitiveString("user3"))); RolesConfig rolesConfig = new RolesConfig(firstRole, secondRole); assertThat(rolesConfig.memberRoles(new AdminRole(new CaseInsensitiveString("role1"))), is(List.of(firstRole))); assertThat(rolesConfig.memberRoles(new AdminRole(new CaseInsensitiveString("role2"))), is(List.of(secondRole))); }
public void close(ThreadLocal<DelegatingDbSessionSupplier> dbSessionThreadLocal, String label) { DelegatingDbSessionSupplier delegatingDbSessionSupplier = dbSessionThreadLocal.get(); boolean getCalled = delegatingDbSessionSupplier.isPopulated(); if (getCalled) { try { DbSession res = delegatingDbSessionSupplier.get(); res.close(); } catch (Exception e) { LOG.error(format("Failed to close %s connection in %s", label, currentThread()), e); } } }
@Test void openSession_with_caching_returns_DbSession_that_does_not_roll_back_on_close_if_any_mutation_call_was_followed_by_rollback_without_parameters() throws SQLException { DbSession dbSession = openSessionAndDoSeveralMutatingAndNeutralCalls(); dbSession.rollback(); dbSession.close(); verify(myBatisDbSession, times(1)).rollback(); }
public URL convert(String value) { if (isBlank(value)) { throw new ParameterException(getErrorString("a blank value", "a valid URL")); } try { return URLUtil.parseURL(value); } catch (IllegalArgumentException e) { throw new ParameterException(getErrorString(value, "a valid URL")); } }
@Test public void urlIsCreatedFromFileUrl() { URL url = converter.convert("file:/path/to/something"); assertThat(url.toString(), is("file:/path/to/something")); }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldResolveTypeForAddBigintIntegerLiteral() { final Expression expression = new ArithmeticBinaryExpression(Operator.ADD, TestExpressions.COL0, literal(10) ); final SqlType type = expressionTypeManager.getExpressionSqlType(expression); assertThat(type, is(SqlTypes.BIGINT)); }
@Override public ResultMerger newInstance(final String databaseName, final DatabaseType protocolType, final ShardingRule shardingRule, final ConfigurationProperties props, final SQLStatementContext sqlStatementContext) { if (sqlStatementContext instanceof SelectStatementContext) { return new ShardingDQLResultMerger(protocolType); } if (sqlStatementContext.getSqlStatement() instanceof DDLStatement) { return new ShardingDDLResultMerger(); } if (sqlStatementContext.getSqlStatement() instanceof DALStatement) { return new ShardingDALResultMerger(databaseName, shardingRule); } return new TransparentResultMerger(); }
@Test void assertNewInstanceWithOtherStatement() { InsertStatement insertStatement = new MySQLInsertStatement(); InsertColumnsSegment insertColumnsSegment = new InsertColumnsSegment(0, 0, Collections.singletonList(new ColumnSegment(0, 0, new IdentifierValue("col")))); insertStatement.setTable(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("tbl")))); insertStatement.setInsertColumns(insertColumnsSegment); InsertStatementContext sqlStatementContext = createInsertStatementContext(insertStatement); ConfigurationProperties props = new ConfigurationProperties(new Properties()); assertThat(new ShardingResultMergerEngine().newInstance(DefaultDatabase.LOGIC_NAME, TypedSPILoader.getService(DatabaseType.class, "MySQL"), null, props, sqlStatementContext), instanceOf(TransparentResultMerger.class)); }
public void addNotificationFilter(NotificationFilter another) { checkForDuplicates(another); notificationFilters.add(another); }
@Test void shouldThrowExceptionIfFilterWithAllEventAlreadyExist() { User user = new User("foo"); user.addNotificationFilter(new NotificationFilter("cruise", "dev", StageEvent.All, false)); try { user.addNotificationFilter(new NotificationFilter("cruise", "dev", StageEvent.Fixed, false)); fail("shouldThrowExceptionIfFilterWithAllEventAlreadyExist"); } catch (Exception e) { assertThat(e.getMessage()).contains("Duplicate notification filter"); } }
public static Float parseHexIntLongToFloat(String value, boolean bigEndian) { String valueP = prepareNumberString(value, bigEndian); if (valueP != null) { int radixValue = isValidStringAndRadix(valueP, HEX_RADIX, value); if (radixValue == HEX_RADIX) { int bits = (int) Long.parseLong(valueP, HEX_RADIX); // If the length is not equal to 8 characters, we process it as an integer (eg "0x0A" for 10.0f). float floatValue = (float) bits; return Float.valueOf(floatValue); } } return null; }
@Test public void parseHexIntLongToFloat_Test() { Float valueExpected = 10.0f; Float valueActual = TbUtils.parseHexIntLongToFloat("0x0A", true); Assertions.assertEquals(valueExpected, valueActual); valueActual = TbUtils.parseHexIntLongToFloat("0x0A", false); Assertions.assertEquals(valueExpected, valueActual); valueActual = TbUtils.parseHexIntLongToFloat("0x00000A", true); Assertions.assertEquals(valueExpected, valueActual); valueActual = TbUtils.parseHexIntLongToFloat("0x0A0000", false); Assertions.assertEquals(valueExpected, valueActual); valueExpected = 2570.0f; valueActual = TbUtils.parseHexIntLongToFloat("0x000A0A", true); Assertions.assertEquals(valueExpected, valueActual); valueActual = TbUtils.parseHexIntLongToFloat("0x0A0A00", false); Assertions.assertEquals(valueExpected, valueActual); }
public static byte[] encode(RlpType value) { if (value instanceof RlpString) { return encodeString((RlpString) value); } else { return encodeList((RlpList) value); } }
@Test public void testEncode() { assertArrayEquals( RlpEncoder.encode(RlpString.create("dog")), (new byte[] {(byte) 0x83, 'd', 'o', 'g'})); assertArrayEquals( RlpEncoder.encode(new RlpList(RlpString.create("cat"), RlpString.create("dog"))), (new byte[] {(byte) 0xc8, (byte) 0x83, 'c', 'a', 't', (byte) 0x83, 'd', 'o', 'g'})); assertArrayEquals(RlpEncoder.encode(RlpString.create("")), (new byte[] {(byte) 0x80})); assertArrayEquals( RlpEncoder.encode(RlpString.create(new byte[] {})), (new byte[] {(byte) 0x80})); assertArrayEquals(RlpEncoder.encode(new RlpList()), (new byte[] {(byte) 0xc0})); assertArrayEquals( RlpEncoder.encode(RlpString.create(BigInteger.valueOf(0x0f))), (new byte[] {(byte) 0x0f})); assertArrayEquals( RlpEncoder.encode(RlpString.create(BigInteger.valueOf(0x0400))), (new byte[] {(byte) 0x82, (byte) 0x04, (byte) 0x00})); assertArrayEquals( RlpEncoder.encode( new RlpList( new RlpList(), new RlpList(new RlpList()), new RlpList(new RlpList(), new RlpList(new RlpList())))), (new byte[] { (byte) 0xc7, (byte) 0xc0, (byte) 0xc1, (byte) 0xc0, (byte) 0xc3, (byte) 0xc0, (byte) 0xc1, (byte) 0xc0 })); assertArrayEquals( RlpEncoder.encode( RlpString.create( "Lorem ipsum dolor sit amet, consectetur adipisicing elit")), (new byte[] { (byte) 0xb8, (byte) 0x38, 'L', 'o', 'r', 'e', 'm', ' ', 'i', 'p', 's', 'u', 'm', ' ', 'd', 'o', 'l', 'o', 'r', ' ', 's', 'i', 't', ' ', 'a', 'm', 'e', 't', ',', ' ', 'c', 'o', 'n', 's', 'e', 'c', 't', 'e', 't', 'u', 'r', ' ', 'a', 'd', 'i', 'p', 'i', 's', 'i', 'c', 'i', 'n', 'g', ' ', 'e', 'l', 'i', 't' })); assertArrayEquals( RlpEncoder.encode(RlpString.create(BigInteger.ZERO)), (new byte[] {(byte) 0x80})); // https://github.com/paritytech/parity-common/blob/master/rlp/tests/tests.rs#L237 assertArrayEquals( RlpEncoder.encode(RlpString.create(new byte[] {0})), (new byte[] {(byte) 0x00})); assertArrayEquals( RlpEncoder.encode( new RlpList( RlpString.create("zw"), new RlpList(RlpString.create(4)), RlpString.create(1))), (new byte[] { (byte) 0xc6, (byte) 0x82, (byte) 0x7a, (byte) 0x77, (byte) 0xc1, (byte) 0x04, (byte) 0x01 })); // 55 bytes. See https://github.com/web3j/web3j/issues/519 byte[] encodeMe = new byte[55]; Arrays.fill(encodeMe, (byte) 0); byte[] expectedEncoding = new byte[56]; expectedEncoding[0] = (byte) 0xb7; System.arraycopy(encodeMe, 0, expectedEncoding, 1, encodeMe.length); assertArrayEquals(RlpEncoder.encode(RlpString.create(encodeMe)), (expectedEncoding)); }
public void isEmpty() { if (actual == null) { failWithActual(simpleFact("expected an empty string")); } else if (!actual.isEmpty()) { failWithActual(simpleFact("expected to be empty")); } }
@Test public void stringIsEmptyFail() { expectFailureWhenTestingThat("abc").isEmpty(); assertFailureKeys("expected to be empty", "but was"); }
@Override public void updateNetwork(Network osNet) { checkNotNull(osNet, ERR_NULL_NETWORK); checkArgument(!Strings.isNullOrEmpty(osNet.getId()), ERR_NULL_NETWORK_ID); osNetworkStore.updateNetwork(osNet); OpenstackNetwork finalAugmentedNetwork = buildAugmentedNetworkFromType(osNet); augmentedNetworkMap.compute(osNet.getId(), (id, existing) -> { final String error = osNet.getId() + ERR_NOT_FOUND; checkArgument(existing != null, error); return finalAugmentedNetwork; }); log.info(String.format(MSG_NETWORK, osNet.getId(), MSG_UPDATED)); }
@Test(expected = IllegalArgumentException.class) public void testUpdateNetworkWithNullId() { final Network testNet = NeutronNetwork.builder().build(); target.updateNetwork(testNet); }
private void deleteMissing(Configuration conf) throws IOException { LOG.info("-delete option is enabled. About to remove entries from " + "target that are missing in source"); long listingStart = System.currentTimeMillis(); // Sort the source-file listing alphabetically. Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH)); FileSystem clusterFS = sourceListing.getFileSystem(conf); Path sortedSourceListing = DistCpUtils.sortListing(conf, sourceListing); long sourceListingCompleted = System.currentTimeMillis(); LOG.info("Source listing completed in {}", formatDuration(sourceListingCompleted - listingStart)); // Similarly, create the listing of target-files. Sort alphabetically. Path targetListing = new Path(sourceListing.getParent(), "targetListing.seq"); Path sortedTargetListing = new Path(targetListing.toString() + "_sorted"); Path targetFinalPath = listTargetFiles(conf, targetListing, sortedTargetListing); long totalLen = clusterFS.getFileStatus(sortedTargetListing).getLen(); SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(sortedSourceListing)); SequenceFile.Reader targetReader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(sortedTargetListing)); // Walk both source and target file listings. // Delete all from target that doesn't also exist on source. long deletionStart = System.currentTimeMillis(); LOG.info("Destination listing completed in {}", formatDuration(deletionStart - sourceListingCompleted)); long deletedEntries = 0; long filesDeleted = 0; long missingDeletes = 0; long failedDeletes = 0; long skippedDeletes = 0; long deletedDirectories = 0; // this is an arbitrary constant. final DeletedDirTracker tracker = new DeletedDirTracker(1000); try { CopyListingFileStatus srcFileStatus = new CopyListingFileStatus(); Text srcRelPath = new Text(); CopyListingFileStatus trgtFileStatus = new CopyListingFileStatus(); Text trgtRelPath = new Text(); final FileSystem targetFS = targetFinalPath.getFileSystem(conf); boolean showProgress; boolean srcAvailable = sourceReader.next(srcRelPath, srcFileStatus); while (targetReader.next(trgtRelPath, trgtFileStatus)) { // Skip sources that don't exist on target. while (srcAvailable && trgtRelPath.compareTo(srcRelPath) > 0) { srcAvailable = sourceReader.next(srcRelPath, srcFileStatus); } Path targetEntry = trgtFileStatus.getPath(); LOG.debug("Comparing {} and {}", srcFileStatus.getPath(), targetEntry); if (srcAvailable && trgtRelPath.equals(srcRelPath)) continue; // Target doesn't exist at source. Try to delete it. if (tracker.shouldDelete(trgtFileStatus)) { showProgress = true; try { if (targetFS.delete(targetEntry, true)) { // the delete worked. Unless the file is actually missing, this is the LOG.info("Deleted " + targetEntry + " - missing at source"); deletedEntries++; if (trgtFileStatus.isDirectory()) { deletedDirectories++; } else { filesDeleted++; } } else { // delete returned false. // For all the filestores which implement the FS spec properly, // this means "the file wasn't there". // so track but don't worry about it. LOG.info("delete({}) returned false ({})", targetEntry, trgtFileStatus); missingDeletes++; } } catch (IOException e) { if (!ignoreFailures) { throw e; } else { // failed to delete, but ignoring errors. So continue LOG.info("Failed to delete {}, ignoring exception {}", targetEntry, e.toString()); LOG.debug("Failed to delete {}", targetEntry, e); // count and break out the loop failedDeletes++; } } } else { LOG.debug("Skipping deletion of {}", targetEntry); skippedDeletes++; showProgress = false; } if (showProgress) { // update progress if there's been any FS IO/files deleted. taskAttemptContext.progress(); taskAttemptContext.setStatus("Deleting removed files from target. [" + targetReader.getPosition() * 100 / totalLen + "%]"); } } // if the FS toString() call prints statistics, they get logged here LOG.info("Completed deletion of files from {}", targetFS); } finally { IOUtils.closeStream(sourceReader); IOUtils.closeStream(targetReader); } long deletionEnd = System.currentTimeMillis(); long deletedFileCount = deletedEntries - deletedDirectories; LOG.info("Deleted from target: files: {} directories: {};" + " skipped deletions {}; deletions already missing {};" + " failed deletes {}", deletedFileCount, deletedDirectories, skippedDeletes, missingDeletes, failedDeletes); LOG.info("Number of tracked deleted directories {}", tracker.size()); LOG.info("Duration of deletions: {}", formatDuration(deletionEnd - deletionStart)); LOG.info("Total duration of deletion operation: {}", formatDuration(deletionEnd - listingStart)); }
@Test public void testDeleteMissing() throws IOException { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String sourceBase; String targetBase; FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); sourceBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); targetBase = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); String targetBaseAdd = TestDistCpUtils.createTestSetup(fs, FsPermission.getDefault()); fs.rename(new Path(targetBaseAdd), new Path(targetBase)); final DistCpOptions options = new DistCpOptions.Builder( Collections.singletonList(new Path(sourceBase)), new Path("/out")) .withSyncFolder(true).withDeleteMissing(true).build(); options.appendToConf(conf); final DistCpContext context = new DistCpContext(options); CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, context); conf.set(CONF_LABEL_TARGET_WORK_PATH, targetBase); conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); committer.commitJob(jobContext); verifyFoldersAreInSync(fs, targetBase, sourceBase); verifyFoldersAreInSync(fs, sourceBase, targetBase); //Test for idempotent commit committer.commitJob(jobContext); verifyFoldersAreInSync(fs, targetBase, sourceBase); verifyFoldersAreInSync(fs, sourceBase, targetBase); } finally { TestDistCpUtils.delete(fs, "/tmp1"); conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false"); } }
@Override @NonNull public Mono<Void> filter(@NonNull ServerWebExchange exchange, @NonNull WebFilterChain chain) { return redirectMatcher.matches(exchange) .flatMap(matched -> { if (!matched.isMatch()) { return chain.filter(exchange); } return initializationStateGetter.userInitialized() .defaultIfEmpty(false) .flatMap(initialized -> { if (initialized) { return chain.filter(exchange); } // Redirect to set up page if system is not initialized. return redirectStrategy.sendRedirect(exchange, location); }); }); }
@Test void shouldNotRedirectWhenSystemInitialized() { when(initializationStateGetter.userInitialized()).thenReturn(Mono.just(true)); WebFilterChain chain = mock(WebFilterChain.class); MockServerHttpRequest request = MockServerHttpRequest.get("/").build(); MockServerWebExchange exchange = MockServerWebExchange.from(request); when(chain.filter(any())).thenReturn(Mono.empty().then()); Mono<Void> result = filter.filter(exchange, chain); StepVerifier.create(result) .expectNextCount(0) .expectComplete() .verify(); verify(serverRedirectStrategy, never()).sendRedirect(eq(exchange), eq(URI.create("/console"))); verify(chain).filter(eq(exchange)); }
@SuppressWarnings("unchecked") @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { NodeStatus remoteNodeStatus = request.getNodeStatus(); /** * Here is the node heartbeat sequence... * 1. Check if it's a valid (i.e. not excluded) node * 2. Check if it's a registered node * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat * 4. Send healthStatus to RMNode * 5. Update node's labels if distributed Node Labels configuration is enabled */ NodeId nodeId = remoteNodeStatus.getNodeId(); // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // in decommissioning. if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } // 2. Check if it's a registered node RMNode rmNode = this.rmContext.getRMNodes().get(nodeId); if (rmNode == null) { /* node does not exist */ String message = "Node not found resyncing " + remoteNodeStatus.getNodeId(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Send ping this.nmLivelinessMonitor.receivedPing(nodeId); this.decommissioningWatcher.update(rmNode, remoteNodeStatus); // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse(); if (getNextResponseId( remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse .getResponseId()) { LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId()); return lastNodeHeartbeatResponse; } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse .getResponseId()) { String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId(); LOG.info(message); // TODO: Just sending reboot is not enough. Think more. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING)); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED. if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned( rmNode.getNodeID())) { String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned"; LOG.info(message); this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); this.nmLivelinessMonitor.unregister(nodeId); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } if (timelineServiceV2Enabled) { // Check & update collectors info from request. updateAppCollectorsMap(request); } // Heartbeat response long newInterval = nextHeartBeatInterval; if (heartBeatIntervalScalingEnable) { newInterval = rmNode.calculateHeartBeatInterval( nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor); } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); populateTokenSequenceNo(request, nodeHeartBeatResponse); if (timelineServiceV2Enabled) { // Return collectors' map that NM needs to know setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse); } // 4. Send status to RMNode, saving the latest response. RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { nodeStatusEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent); // 5. Update node's labels to RM's NodeLabelManager. if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) { try { updateNodeLabelsFromNMReport( NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage()); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false); } } // 6. check if node's capacity is load from dynamic-resources.xml // if so, send updated resource back to NM. String nid = nodeId.toString(); Resource capability = loadNodeResourceFromDRConfiguration(nid); // sync back with new resource if not null. if (capability != null) { nodeHeartBeatResponse.setResource(capability); } // Check if we got an event (AdminService) that updated the resources if (rmNode.isUpdatedCapability()) { nodeHeartBeatResponse.setResource(rmNode.getTotalCapability()); rmNode.resetUpdatedCapability(); } // 7. Send Container Queuing Limits back to the Node. This will be used by // the node to truncate the number of Containers queued for execution. if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) { nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); } // 8. Get node's attributes and update node-to-attributes mapping // in RMNodeAttributeManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex .getMessage(); nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false); } } return nodeHeartBeatResponse; }
@Test public void testReboot() throws Exception { Configuration conf = new Configuration(); rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); MockNM nm2 = rm.registerNode("host2:1234", 2048); int initialMetricCount = ClusterMetrics.getMetrics().getNumRebootedNMs(); NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat = nm2.nodeHeartbeat( new HashMap<ApplicationId, List<ContainerStatus>>(), true, -100); Assert.assertTrue(NodeAction.RESYNC.equals(nodeHeartbeat.getNodeAction())); Assert.assertEquals("Too far behind rm response id:0 nm response id:-100", nodeHeartbeat.getDiagnosticsMessage()); checkRebootedNMCount(rm, ++initialMetricCount); }
@Override public PageResult<RewardActivityDO> getRewardActivityPage(RewardActivityPageReqVO pageReqVO) { return rewardActivityMapper.selectPage(pageReqVO); }
@Test public void testGetRewardActivityPage() { // mock ๆ•ฐๆฎ RewardActivityDO dbRewardActivity = randomPojo(RewardActivityDO.class, o -> { // ็ญ‰ไผšๆŸฅ่ฏขๅˆฐ o.setName("่Š‹่‰ฟ"); o.setStatus(PromotionActivityStatusEnum.CLOSE.getStatus()); }); rewardActivityMapper.insert(dbRewardActivity); // ๆต‹่ฏ• name ไธๅŒน้… rewardActivityMapper.insert(cloneIgnoreId(dbRewardActivity, o -> o.setName("ๅœŸ่ฑ†"))); // ๆต‹่ฏ• status ไธๅŒน้… rewardActivityMapper.insert(cloneIgnoreId(dbRewardActivity, o -> o.setStatus(PromotionActivityStatusEnum.RUN.getStatus()))); // ๅ‡†ๅค‡ๅ‚ๆ•ฐ RewardActivityPageReqVO reqVO = new RewardActivityPageReqVO(); reqVO.setName("่Š‹่‰ฟ"); reqVO.setStatus(PromotionActivityStatusEnum.CLOSE.getStatus()); // ่ฐƒ็”จ PageResult<RewardActivityDO> pageResult = rewardActivityService.getRewardActivityPage(reqVO); // ๆ–ญ่จ€ assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbRewardActivity, pageResult.getList().get(0), "rules"); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testMethodConflictProvidesSameValue() throws Exception { ProxyInvocationHandler handler = new ProxyInvocationHandler(Maps.newHashMap()); MethodConflict methodConflict = handler.as(MethodConflict.class); methodConflict.setString("conflictValue"); assertEquals("conflictValue", methodConflict.getString()); assertEquals("conflictValue", methodConflict.as(Simple.class).getString()); }
public Cert getConsumerConnectionConfig(URL remoteAddress) { for (CertProvider certProvider : certProviders) { if (certProvider.isSupport(remoteAddress)) { Cert cert = certProvider.getConsumerConnectionConfig(remoteAddress); if (cert != null) { return cert; } } } return null; }
@Test void testGetConsumerConnectionConfig() { CertManager certManager = new CertManager(frameworkModel); Assertions.assertNull(certManager.getConsumerConnectionConfig(url)); Cert cert1 = Mockito.mock(Cert.class); FirstCertProvider.setCert(cert1); Assertions.assertNull(certManager.getConsumerConnectionConfig(url)); FirstCertProvider.setSupport(true); Assertions.assertEquals(cert1, certManager.getConsumerConnectionConfig(url)); Cert cert2 = Mockito.mock(Cert.class); SecondCertProvider.setCert(cert2); Assertions.assertEquals(cert1, certManager.getConsumerConnectionConfig(url)); SecondCertProvider.setSupport(true); Assertions.assertEquals(cert1, certManager.getConsumerConnectionConfig(url)); FirstCertProvider.setSupport(false); Assertions.assertEquals(cert2, certManager.getConsumerConnectionConfig(url)); FirstCertProvider.setSupport(true); FirstCertProvider.setCert(null); Assertions.assertEquals(cert2, certManager.getConsumerConnectionConfig(url)); }
@Override public boolean test(Pickle pickle) { if (expressions.isEmpty()) { return true; } List<String> tags = pickle.getTags(); return expressions.stream() .allMatch(expression -> expression.evaluate(tags)); }
@Test void single_tag_predicate_does_not_match_pickle_with_different_single_tag() { Pickle pickle = createPickleWithTags("@BAR"); TagPredicate predicate = createPredicate("@FOO"); assertFalse(predicate.test(pickle)); }
public static ScmInfo create(ScannerReport.Changesets changesets) { requireNonNull(changesets); Changeset[] lineChangesets = new Changeset[changesets.getChangesetIndexByLineCount()]; LineIndexToChangeset lineIndexToChangeset = new LineIndexToChangeset(changesets); for (int i = 0; i < changesets.getChangesetIndexByLineCount(); i++) { lineChangesets[i] = lineIndexToChangeset.apply(i); } return new ScmInfoImpl(lineChangesets); }
@Test public void return_latest_changeset() { ScmInfo scmInfo = ReportScmInfo.create(ScannerReport.Changesets.newBuilder() .setComponentRef(FILE_REF) .addChangeset(ScannerReport.Changesets.Changeset.newBuilder() .setAuthor("john") .setDate(123456789L) .setRevision("rev-1") .build()) // Older changeset .addChangeset(ScannerReport.Changesets.Changeset.newBuilder() .setAuthor("henry") .setDate(1234567810L) .setRevision("rev-2") .build()) .addChangesetIndexByLine(0) .addChangesetIndexByLine(1) .addChangesetIndexByLine(0) .build()); Changeset latestChangeset = scmInfo.getLatestChangeset(); assertThat(latestChangeset.getAuthor()).isEqualTo("henry"); assertThat(latestChangeset.getDate()).isEqualTo(1234567810L); assertThat(latestChangeset.getRevision()).isEqualTo("rev-2"); }
static @Nullable String resolveConsumerArn(Read spec, PipelineOptions options) { String streamName = Preconditions.checkArgumentNotNull(spec.getStreamName()); KinesisIOOptions sourceOptions = options.as(KinesisIOOptions.class); Map<String, String> streamToArnMapping = sourceOptions.getKinesisIOConsumerArns(); String consumerArn; if (streamToArnMapping.containsKey(streamName)) { consumerArn = streamToArnMapping.get(streamName); // can resolve to null too } else { consumerArn = spec.getConsumerArn(); } return consumerArn; }
@Test public void testConsumerArnForSpecificStreamNotPassedInPipelineOptions() { KinesisIO.Read readSpec = KinesisIO.read().withStreamName("stream-xxx"); KinesisIOOptions options = createIOOptions("--kinesisIOConsumerArns={\"stream-01\": \"arn-01\"}"); assertThat(KinesisSource.resolveConsumerArn(readSpec, options)).isNull(); }
public JobDetails getJobDetails() { invokeInstructions(); final JobDetails jobDetails = new JobDetails(jobDetailsClassName, jobDetailsStaticFieldName, jobDetailsMethodName, jobDetailsJobParameters); return postProcessJobDetails(jobDetails); }
@Test void reproduceIssueStringBuilderAppend() { final JobDetailsBuilder jobDetailsBuilder = getJobDetailsBuilder(); new ALoadOperandInstruction(jobDetailsBuilder).load(1); new InvokeSpecialInstruction(jobDetailsBuilder).load("java/lang/StringBuilder", "<init>", "()V", false); new LdcInstruction(jobDetailsBuilder).load("Hello "); new InvokeVirtualInstruction(jobDetailsBuilder).load("java/lang/StringBuilder", "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;", false); new ALoadOperandInstruction(jobDetailsBuilder).load(0); new InvokeVirtualInstruction(jobDetailsBuilder).load("java/lang/StringBuilder", "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;", false); new InvokeVirtualInstruction(jobDetailsBuilder).load("java/lang/StringBuilder", "toString", "()Ljava/lang/String;", false); new InvokeVirtualInstruction(jobDetailsBuilder).load("org/jobrunr/stubs/TestService", "doWork", "(Ljava/lang/String;)V", false); final JobDetails jobDetails = jobDetailsBuilder.getJobDetails(); assertThat(jobDetails) .hasClass(TestService.class) .hasMethodName("doWork") .hasArgs("Hello World"); }
Map<String, String> mergeReleaseConfigurations(List<Release> releases) { Map<String, String> result = Maps.newLinkedHashMap(); for (Release release : Lists.reverse(releases)) { result.putAll(gson.fromJson(release.getConfigurations(), configurationTypeReference)); } return result; }
@Test public void testMergeConfigurations() throws Exception { Gson gson = new Gson(); String key1 = "key1"; String value1 = "value1"; String anotherValue1 = "anotherValue1"; String key2 = "key2"; String value2 = "value2"; Map<String, String> config = ImmutableMap.of(key1, anotherValue1); Map<String, String> anotherConfig = ImmutableMap.of(key1, value1, key2, value2); Release releaseWithHighPriority = new Release(); releaseWithHighPriority.setConfigurations(gson.toJson(config)); Release releaseWithLowPriority = new Release(); releaseWithLowPriority.setConfigurations(gson.toJson(anotherConfig)); Map<String, String> result = configController.mergeReleaseConfigurations( Lists.newArrayList(releaseWithHighPriority, releaseWithLowPriority)); assertEquals(2, result.keySet().size()); assertEquals(anotherValue1, result.get(key1)); assertEquals(value2, result.get(key2)); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParseLastMonth() throws Exception { DateTime reference = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("12.06.2021 09:45:23"); NaturalDateParser.Result result = naturalDateParser.parse("last month", reference.toDate()); DateTime lastMonthStart = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("01.05.2021 00:00:00"); DateTime lastMonthEnd = DateTimeFormat.forPattern("dd.MM.yyyy HH:mm:ss").withZoneUTC().parseDateTime("01.06.2021 00:00:00"); assertThat(result.getFrom()).as("should be equal to").isEqualTo(lastMonthStart); assertThat(result.getTo()).as("should be equal to").isEqualTo(lastMonthEnd); }
public void print(final ByteBuffer encodedMessage, final StringBuilder output) { final UnsafeBuffer buffer = new UnsafeBuffer(encodedMessage); print(output, buffer, 0); }
@Test public void removeTrailingGarbage() throws Exception { final ByteBuffer encodedSchemaBuffer = ByteBuffer.allocate(SCHEMA_BUFFER_CAPACITY); encodeSchema(encodedSchemaBuffer); final ByteBuffer encodedMsgBuffer = ByteBuffer.allocate(MSG_BUFFER_CAPACITY); final UnsafeBuffer buffer = new UnsafeBuffer(encodedMsgBuffer); final CarEncoder encoder = new CarEncoder(); encoder.wrapAndApplyHeader(buffer, 0, new MessageHeaderEncoder()); encoder.vehicleCode("vc\0๏พ‰๏ฟฝ"); encodedMsgBuffer.position(encoder.encodedLength()); encodedSchemaBuffer.flip(); final Ir ir = decodeIr(encodedSchemaBuffer); final JsonPrinter printer = new JsonPrinter(ir); final String result = printer.print(encodedMsgBuffer); assertEquals("{\n" + " \"serialNumber\": 0,\n" + " \"modelYear\": 0,\n" + " \"available\": \"F\",\n" + " \"code\": \"null\",\n" + " \"someNumbers\": [0, 0, 0, 0, 0],\n" + " \"vehicleCode\": \"vc\",\n" + //trailing garbage removed " \"extras\": { \"sunRoof\": false, \"sportsPack\": false, \"cruiseControl\": false },\n" + " \"engine\": \n" + " {\n" + " \"capacity\": 0,\n" + " \"numCylinders\": 0,\n" + " \"maxRpm\": 9000,\n" + " \"manufacturerCode\": \"\",\n" + " \"fuel\": \"Petrol\"\n" + " },\n" + " \"uuid\": [0, 0],\n" + " \"cupHolderCount\": 0,\n" + " \"fuelFigures\": [],\n" + " \"performanceFigures\": [],\n" + " \"manufacturer\": \"\",\n" + " \"model\": \"\",\n" + " \"activationCode\": \"\"\n" + "}", result); }
@Override public long[] getValues() { return Arrays.copyOf(values, values.length); }
@Test public void hasValues() { assertThat(snapshot.getValues()) .containsOnly(1, 2, 3, 4, 5); }
@Override public Promise<PooledConnection> acquire( EventLoop eventLoop, CurrentPassport passport, AtomicReference<? super InetAddress> selectedHostAddr) { if (draining) { throw new IllegalStateException("Attempt to acquire connection while draining"); } requestConnCounter.increment(); updateServerStatsOnAcquire(); Promise<PooledConnection> promise = eventLoop.newPromise(); // Try getting a connection from the pool. final PooledConnection conn = tryGettingFromConnectionPool(eventLoop); if (conn != null) { // There was a pooled connection available, so use this one. reusePooledConnection(passport, selectedHostAddr, conn, promise); } else { // connection pool empty, create new connection using client connection factory. tryMakingNewConnection(eventLoop, promise, passport, selectedHostAddr); } return promise; }
@Test void acquireNewConnectionHitsMaxConnections() { CurrentPassport currentPassport = CurrentPassport.create(); clientConfig.set(Keys.MaxConnectionsPerHost, 1); discoveryResult.incrementOpenConnectionsCount(); Promise<PooledConnection> promise = pool.acquire(CLIENT_EVENT_LOOP, currentPassport, new AtomicReference<>()); assertFalse(promise.isSuccess()); assertTrue(promise.cause() instanceof OriginConnectException); assertEquals(1, maxConnsPerHostExceededCounter.count()); }
@Override int addRawRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { final RecordQueue recordQueue = partitionQueues.get(partition); if (recordQueue == null) { throw new IllegalStateException("Partition " + partition + " not found."); } final int oldSize = recordQueue.size(); final int newSize = recordQueue.addRawRecords(rawRecords); // add this record queue to be considered for processing in the future if it was empty before if (oldSize == 0 && newSize > 0) { nonEmptyQueuesByTime.offer(recordQueue); // if all partitions now are non-empty, set the flag // we do not need to update the stream-time here since this task will definitely be // processed next, and hence the stream-time will be updated when we retrieved records by then if (nonEmptyQueuesByTime.size() == this.partitionQueues.size()) { allBuffered = true; } } totalBuffered += newSize - oldSize; return newSize; }
@Test public void shouldThrowIllegalStateExceptionUponAddRecordsIfPartitionUnknown() { final PartitionGroup group = getBasicGroup(); final IllegalStateException exception = assertThrows( IllegalStateException.class, () -> group.addRawRecords(unknownPartition, null)); assertThat(errMessage, equalTo(exception.getMessage())); }
public static String getAppName() { String appName; appName = getAppNameByProjectName(); if (appName != null) { return appName; } appName = getAppNameByServerHome(); if (appName != null) { return appName; } return DEFAULT_APP_NAME; }
@Test void testGetAppName() { System.setProperty(PARAM_MARKING_PROJECT, SERVER_UNKNOWN); assertEquals(SERVER_UNKNOWN, AppNameUtils.getAppName()); System.clearProperty(PARAM_MARKING_PROJECT); System.setProperty(PARAM_MARKING_JBOSS, LINUX_ADMIN_HOME + SERVER_JBOSS + File.separator); assertEquals(SERVER_JBOSS, AppNameUtils.getAppName()); System.clearProperty(PARAM_MARKING_JBOSS); System.setProperty(PARAM_MARKING_JETTY, LINUX_ADMIN_HOME + SERVER_JETTY + File.separator); assertEquals(SERVER_JETTY, AppNameUtils.getAppName()); System.clearProperty(PARAM_MARKING_JETTY); System.setProperty(PARAM_MARKING_TOMCAT, LINUX_ADMIN_HOME + SERVER_TOMCAT + File.separator); assertEquals(SERVER_TOMCAT, AppNameUtils.getAppName()); System.clearProperty(PARAM_MARKING_TOMCAT); assertEquals(DEFAULT_APP_NAME, AppNameUtils.getAppName()); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void setChatStickerSet() { BaseResponse response = bot.execute(new SetChatStickerSet(groupId, "PengradTest")); assertFalse(response.isOk()); assertEquals(400, response.errorCode()); }
public static Optional<String> findFirstManifestAttribute(File jarFile, String... attributes) throws IOException { if (attributes.length == 0) { return Optional.empty(); } try (JarFile f = new JarFile(jarFile)) { return findFirstManifestAttribute(f, attributes); } }
@Test void testFindFirstManifestAttributeWithNoAttribute() throws IOException { assertThat(JarManifestParser.findFirstManifestAttribute(TestJob.getTestJobJar())).isEmpty(); }
public static StrictFieldProjectionFilter fromSemicolonDelimitedString(String columnsToKeepGlobs) { return new StrictFieldProjectionFilter(parseSemicolonDelimitedString(columnsToKeepGlobs)); }
@Test public void testProjection() { StrictFieldProjectionFilter filter = StrictFieldProjectionFilter.fromSemicolonDelimitedString( "home.phone_number;home.address;work.address.zip;base_info;*.average;a.b.c.pre{x,y,z{a,b,c}}post"); assertMatches( filter, "home.phone_number", "home.address", "work.address.zip", "base_info", "foo.average", "bar.x.y.z.average", "base_info.nested.field", "a.b.c.prexpost", "a.b.c.prezapost"); assertDoesNotMatch( filter, "home2.phone_number", "home2.address", "work.address", "base_info2", "foo_average", "bar.x.y.z_average", "base_info_nested.field", "hi", "average", "a.b.c.pre{x,y,z{a,b,c}}post", ""); }
public ApprovalRequest applyTemplate(final ApprovalRequest template) { if (template == null) { return this; } final ApprovalRequest withTemplateValues = new ApprovalRequest(); for (final Field field : FieldHolder.INSTANCE.fields) { try { final Object currentValue = field.get(this); // if a field has not been set, and the template has it set use // the template value if (currentValue == null) { final Object templateValue = field.get(template); if (templateValue != null) { field.set(withTemplateValues, templateValue); } } else { field.set(withTemplateValues, currentValue); } } catch (IllegalArgumentException | IllegalAccessException e) { throw new IllegalStateException("Unable to apply values from template", e); } } return withTemplateValues; }
@Test public void shouldTolerateNullTemplates() { final ApprovalRequest request = new ApprovalRequest(); final ApprovalRequest appliedTo = request.applyTemplate(null); assertThat("For null templates applyTemplate should return same object", appliedTo, sameInstance(request)); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldShowCorrectHintsWhenIncorrectSourceMatchesWithThree() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create table \"Foo\" as select * from test2; " + "create table foo as select * from test2; " + "create stream \"foo\" as select * from test1;", ksqlConfig, Collections.emptyMap() ); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "select * from \"FoO\";", ksqlConfig, Collections.emptyMap() ) ); // Then: assertThat(e, rawMessage(is( "Exception while preparing statement: FoO does not exist.\n" + "Did you mean \"FOO\" (TABLE), \"Foo\" (TABLE), or \"foo\" (STREAM)? Hint: wrap the source name in double quotes to make it case-sensitive."))); assertThat(e, statementText(is("select * from \"FoO\";"))); }
@Override public boolean equals(@Nullable Object obj) { if (!(obj instanceof EncodableThrowable)) { return false; } Throwable other = ((EncodableThrowable) obj).throwable; // Assuming class preservation is enough to know that serialization/deserialization worked. return throwable.getClass().equals(other.getClass()); }
@Test public void testEquals() { IllegalStateException exception = new IllegalStateException( "Some illegal state", new RuntimeException( "Some nested exception", new Exception("Deeply nested exception"))); EncodableThrowable comparable1 = EncodableThrowable.forThrowable(exception); EncodableThrowable comparable2 = EncodableThrowable.forThrowable(exception); assertEquals(comparable1, comparable1); assertEquals(comparable1, comparable2); }
@Override public void register(ServiceCombRegistration registration) { fillClientInfo(registration); GraceHelper.configWarmUpParams(registration.getMetadata(), PluginConfigManager.getPluginConfig(GraceConfig.class)); ZoneUtils.setZone(registration.getMetadata()); RegisterCenterService registerService = getRegisterCenterService(); if (registerService == null) { LOGGER.severe("registerCenterService is null, fail to register!"); return; } registerService.register(new FixedResult()); }
@Test public void register() { String host = "localhost"; Map<String, String> meta = new HashMap<>(); int port = 8888; String serviceId = "test"; Mockito.when(registration.getHost()).thenReturn(host); Mockito.when(registration.getMetadata()).thenReturn(meta); Mockito.when(registration.getPort()).thenReturn(port); Mockito.when(registration.getServiceId()).thenReturn(serviceId); registry.register(registration); final ClientInfo clientInfo = RegisterContext.INSTANCE.getClientInfo(); Assert.assertEquals(clientInfo.getHost(), host); Assert.assertEquals(clientInfo.getMeta(), meta); Assert.assertEquals(clientInfo.getPort(), port); Assert.assertEquals(clientInfo.getServiceId(), serviceId); Mockito.verify(spyService, Mockito.times(1)).register(Mockito.any()); clientInfo.setServiceId(null); clientInfo.setHost(null); clientInfo.setZone(null); clientInfo.setMeta(null); clientInfo.setPort(0); }
static void cleanStackTrace(Throwable throwable) { new StackTraceCleaner(throwable).clean(Sets.<Throwable>newIdentityHashSet()); }
@Test public void failureFromJUnitInfrastructureIncludesItInStack() { Throwable throwable = createThrowableWithStackTrace( "com.google.common.truth.StringSubject", SomeStatement.class.getName(), "com.google.example.SomeClass"); StackTraceCleaner.cleanStackTrace(throwable); assertThat(throwable.getStackTrace()) .isEqualTo( new StackTraceElement[] { createStackTraceElement(SomeStatement.class.getName()), createStackTraceElement("com.google.example.SomeClass"), }); }
static <T extends CompoundPredicate> T flattenCompound(Predicate predicateLeft, Predicate predicateRight, Class<T> klass) { // The following could have been achieved with {@link com.hazelcast.query.impl.predicates.FlatteningVisitor}, // however since we only care for 2-argument flattening, we can avoid constructing a visitor and its internals // for each token pass at the cost of the following explicit code. Predicate[] predicates; if (klass.isInstance(predicateLeft) || klass.isInstance(predicateRight)) { Predicate[] left = getSubPredicatesIfClass(predicateLeft, klass); Predicate[] right = getSubPredicatesIfClass(predicateRight, klass); predicates = new Predicate[left.length + right.length]; ArrayUtils.concat(left, right, predicates); } else { predicates = new Predicate[]{predicateLeft, predicateRight}; } try { T compoundPredicate = klass.getDeclaredConstructor().newInstance(); compoundPredicate.setPredicates(predicates); return compoundPredicate; } catch (ReflectiveOperationException e) { throw new IllegalArgumentException(String.format("%s must have a public default constructor", klass.getName())); } }
@Test public void testAnd_whenLeftPredicateAnd() { AndPredicate predicate1 = new AndPredicate(new SqlPredicate("a == 1"), new SqlPredicate("a == 2")); Predicate<Object, Object> predicate2 = Predicates.alwaysTrue(); AndPredicate concatenatedOr = SqlPredicate.flattenCompound(predicate1, predicate2, AndPredicate.class); assertEquals(3, concatenatedOr.getPredicates().length); assertInstanceOf(SqlPredicate.class, concatenatedOr.getPredicates()[0]); assertInstanceOf(SqlPredicate.class, concatenatedOr.getPredicates()[1]); assertSame(predicate2, concatenatedOr.getPredicates()[2]); }
@Override protected CouchbaseEndpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { CouchbaseEndpoint endpoint = new CouchbaseEndpoint(uri, remaining, this); setProperties(endpoint, parameters); return endpoint; }
@Test public void testCouchbaseAdditionalHosts() throws Exception { Map<String, Object> params = new HashMap<>(); params.put("additionalHosts", "127.0.0.1,example.com,another-host"); params.put("bucket", "bucket"); String uri = "couchbase:http://localhost"; String remaining = "http://localhost"; CouchbaseEndpoint endpoint = new CouchbaseComponent(context).createEndpoint(uri, remaining, params); URI[] endpointArray = endpoint.makeBootstrapURI(); assertEquals(new URI("http://localhost:8091/pools"), endpointArray[0]); assertEquals(new URI("http://127.0.0.1:8091/pools"), endpointArray[1]); assertEquals(new URI("http://example.com:8091/pools"), endpointArray[2]); assertEquals(new URI("http://another-host:8091/pools"), endpointArray[3]); assertEquals(4, endpointArray.length); }
private X509KeyManager createKeyManager() { return new X509KeyManager() { @Override public String[] getClientAliases(String s, Principal[] principals) { return new String[]{"client"}; } @Override public String chooseClientAlias(String[] strings, Principal[] principals, Socket socket) { return "client"; } @Override public String[] getServerAliases(String s, Principal[] principals) { return null; } @Override public String chooseServerAlias(String s, Principal[] principals, Socket socket) { return null; } @Override public X509Certificate[] getCertificateChain(String s) { return new X509Certificate[]{caCert}; } @Override public PrivateKey getPrivateKey(String s) { return caKeyPair.getPrivate(); } }; }
@Test void testCreateKeyManager() throws Exception { ProxyCA proxyCA = new ProxyCA(); proxyCA.init(); X509KeyManager keyManager = proxyCA.getX509KeyManager(); assertArrayEquals(new String[]{"client"}, keyManager.getClientAliases(null, null)); assertEquals("client", keyManager.chooseClientAlias(null, null, null)); assertNull(keyManager.getServerAliases(null, null)); assertNull(keyManager.chooseServerAlias(null, null, null)); byte[] truststoreBytes = proxyCA.getChildTrustStore("password"); KeyStore truststore = KeyStoreTestUtil.bytesToKeyStore(truststoreBytes, "password"); assertEquals(1, truststore.size()); X509Certificate caCert = (X509Certificate) truststore.getCertificate("client"); assertArrayEquals(new X509Certificate[]{caCert}, keyManager.getCertificateChain(null)); assertEquals(proxyCA.getCaCert(), caCert); PrivateKey caPrivateKey = keyManager.getPrivateKey(null); PublicKey caPublicKey = caCert.getPublicKey(); checkPrivatePublicKeys(caPrivateKey, caPublicKey); assertEquals(proxyCA.getCaKeyPair().getPublic(), caPublicKey); assertEquals(proxyCA.getCaKeyPair().getPrivate(), caPrivateKey); }
public Boolean updateLookup(LookupUpdateRequest request) throws NacosException { memberManager.switchLookup(request.getType()); return true; }
@Test void testUpdateLookup() throws NacosException { LookupUpdateRequest lookupUpdateRequest = new LookupUpdateRequest(); lookupUpdateRequest.setType("test"); Boolean result = nacosClusterOperationService.updateLookup(lookupUpdateRequest); verify(serverMemberManager).switchLookup("test"); assertTrue(result); }
public static void flatCopyTo(String prefix, Map<String, Object> sourceMap, Map<String, String> dstMap) { for (Map.Entry<String, Object> entry : sourceMap.entrySet()) { String key = prefix + entry.getKey(); Object value = entry.getValue(); if (value instanceof String) { dstMap.put(key, (String) value); } else if (value instanceof Number) { dstMap.put(key, value.toString()); } else if (value instanceof Map) { flatCopyTo(key + ".", (Map<String, Object>) value, dstMap); } } }
@Test public void flatCopyTo() { Map<String, Object> requestProps = new HashMap<String, Object>(); requestProps.put("xx", "xxxxxxx"); requestProps.put("yyy", new String[] { "yyyy" }); // stringๆ•ฐ็ป„ๆ— ๆณ•ไผ ้€’ requestProps.put("zzzz", 333); Map<String, String> header = new HashMap<String, String>(); Map<String, String> context = new HashMap<String, String>(); context.put("sofaCallerApp", "test"); context.put("sofaCallerIp", "10.15.233.63"); context.put("sofaPenAttrs", ""); context.put("sofaRpcId", "0"); context.put("sofaTraceId", "0a0fe93f1488349732342100153695"); context.put("sysPenAttrs", ""); context.put("penAttrs", "Hello=world&"); String rpcTraceContext = "rpc_trace_context"; requestProps.put(rpcTraceContext, context); Map<String, String> requestBaggage = new HashMap<String, String>(); requestBaggage.put("aaa", "reqasdhjaksdhaksdyiasdhasdhaskdhaskd"); requestBaggage.put("bbb", "req10.15.233.63"); requestBaggage.put("ccc", "reqwhat 's wrong"); String rpcReqBaggage = "rpc_req_baggage"; requestProps.put(rpcReqBaggage, requestBaggage); Map<String, String> responseBaggage = new HashMap<String, String>(); responseBaggage.put("xxx", "respasdhjaksdhaksdyiasdhasdhaskdhaskd"); responseBaggage.put("yyy", "resp10.15.233.63"); responseBaggage.put("zzz", "resphehehe"); String rpcRespBaggage = "rpc_resp_baggage"; requestProps.put(rpcRespBaggage, responseBaggage); // rpcSerialization. CodecUtils.flatCopyTo("", requestProps, header); Assert.assertTrue(header.size() == 15); for (Map.Entry<String, String> entry : header.entrySet()) { LOGGER.info(entry.getKey() + " : " + entry.getValue()); } LOGGER.info(""); Map<String, Object> newRequestProps = new HashMap<String, Object>(); Map<String, String> newContext = new HashMap<String, String>(); CodecUtils.treeCopyTo(rpcTraceContext + ".", header, newContext, true); newRequestProps.put(rpcTraceContext, newContext); newContext = new HashMap<String, String>(); CodecUtils.treeCopyTo(rpcReqBaggage + ".", header, newContext, true); newRequestProps.put(rpcReqBaggage, newContext); newContext = new HashMap<String, String>(); CodecUtils.treeCopyTo(rpcRespBaggage + ".", header, newContext, true); newRequestProps.put(rpcRespBaggage, newContext); for (Map.Entry<String, Object> entry : newRequestProps.entrySet()) { LOGGER.info(entry.getKey() + " : " + entry.getValue()); } newRequestProps.putAll(header); Assert.assertTrue(newRequestProps.size() == 5); }
static boolean applyTags(RuleDto rule, Set<String> tags) { for (String tag : tags) { RuleTagFormat.validate(tag); } Set<String> initialTags = rule.getTags(); final Set<String> systemTags = rule.getSystemTags(); Set<String> withoutSystemTags = Sets.filter(tags, input -> input != null && !systemTags.contains(input)); rule.setTags(withoutSystemTags); return withoutSystemTags.size() != initialTags.size() || !withoutSystemTags.containsAll(initialTags); }
@Test public void applyTags_no_changes() { RuleDto rule = new RuleDto().setTags(Sets.newHashSet("performance")); boolean changed = RuleTagHelper.applyTags(rule, Sets.newHashSet("performance")); assertThat(rule.getTags()).containsOnly("performance"); assertThat(changed).isFalse(); }
public static String resolvePlaceholders(String pattern, String placeholderNamespace, Map<String, Object> variableValues) { StringBuilder sb = new StringBuilder(pattern); String placeholderPrefix = "$" + placeholderNamespace + "{"; int endIndex; int startIndex = sb.indexOf(placeholderPrefix); while (startIndex > -1) { endIndex = sb.indexOf("}", startIndex); if (endIndex == -1) { // ignore bad syntax, search finished break; } String variableName = sb.substring(startIndex + placeholderPrefix.length(), endIndex); Object variableValue = variableValues.get(variableName); // ignore missing values if (variableValue != null) { String valueStr = variableValue.toString(); sb.replace(startIndex, endIndex + 1, valueStr); endIndex = startIndex + valueStr.length(); } startIndex = sb.indexOf(placeholderPrefix, endIndex); } return sb.toString(); }
@Test void testResolvePlaceholders() { assertResolvePlaceholder( "noPlaceholders", "noPlaceholders", "", "param", "value"); assertResolvePlaceholder( "param: value", "param: ${param}", "", "param", "value"); assertResolvePlaceholder( "param: ${param", "param: ${param", "", "param", "value"); assertResolvePlaceholder( "missing: ${missing}", "missing: ${missing}", "", "param", "value"); assertResolvePlaceholder( "missing: ${missing}, param: value", "missing: ${missing}, param: ${param}", "", "param", "value"); assertResolvePlaceholder( "broken: ${broken, param: ${param}", "broken: ${broken, param: ${param}", "", "param", "value"); assertResolvePlaceholder( "param: value, broken: ${broken", "param: ${param}, broken: ${broken", "", "param", "value"); assertResolvePlaceholder( "missing: ${missing}, param: value, broken: ${broken", "missing: ${missing}, param: ${param}, broken: ${broken", "", "param", "value"); assertResolvePlaceholder( "param1: value1, param2: value2, param3: value3", "param1: ${param1}, param2: ${param2}, param3: ${param3}", "", "param1", "value1", "param2", "value2", "param3", "value3"); assertResolvePlaceholder( "param: value, param: $OTHER_PREFIX{param}", "param: $PREFIX{param}, param: $OTHER_PREFIX{param}", "PREFIX", "param", "value"); }
public static byte[] bigIntegerToBytes(BigInteger b, int numBytes) { checkArgument(b.signum() >= 0, () -> "b must be positive or zero: " + b); checkArgument(numBytes > 0, () -> "numBytes must be positive: " + numBytes); byte[] src = b.toByteArray(); byte[] dest = new byte[numBytes]; boolean isFirstByteOnlyForSign = src[0] == 0; int length = isFirstByteOnlyForSign ? src.length - 1 : src.length; checkArgument(length <= numBytes, () -> "The given number does not fit in " + numBytes); int srcPos = isFirstByteOnlyForSign ? 1 : 0; int destPos = numBytes - length; System.arraycopy(src, srcPos, dest, destPos, length); return dest; }
@Test(expected = IllegalArgumentException.class) public void bigIntegerToBytes_convertWithZeroLength() { BigInteger b = BigInteger.valueOf(10); ByteUtils.bigIntegerToBytes(b, 0); }
public static byte bitsToByte(String bits) { byte b = 0; for (int i = bits.length() - 1, j = 0; i >= 0; i--, j++) { char c = bits.charAt(i); if (c == '1') { b += (1 << j); } } return b; }
@Test public void bitsToByte() { String s = "00110101"; Assert.assertEquals(CodecUtils.bitsToByte(s), 0x35); String s1 = "00111101"; Assert.assertEquals(CodecUtils.bitsToByte(s1), 0x3d); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException { try { final StoregateApiClient client = session.getClient(); final MoveFileRequest move = new MoveFileRequest() .name(renamed.getName()) .parentID(fileid.getFileId(renamed.getParent())) .mode(1); // Overwrite final HttpEntityEnclosingRequestBase request; request = new HttpPost(String.format("%s/v4.2/files/%s/move", client.getBasePath(), fileid.getFileId(file))); if(status.getLockId() != null) { request.addHeader("X-Lock-Id", status.getLockId().toString()); } request.setEntity(new StringEntity(new JSON().getContext(move.getClass()).writeValueAsString(move), ContentType.create("application/json", StandardCharsets.UTF_8.name()))); request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE); final HttpResponse response = client.getClient().execute(request); try { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_NO_CONTENT: final PathAttributes attr = new PathAttributes(file.attributes()); fileid.cache(file, null); fileid.cache(renamed, attr.getFileId()); return renamed.withAttributes(attr); default: throw new StoregateExceptionMappingService(fileid).map("Cannot rename {0}", new ApiException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } finally { EntityUtils.consume(response.getEntity()); } } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test public void testMove() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final TransferStatus status = new TransferStatus(); final Path test = new StoregateTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), status); final String fileid = test.attributes().getFileId(); final Path target = new StoregateMoveFeature(session, nodeid).move(test, new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertEquals(fileid, target.attributes().getFileId()); assertEquals(fileid, new StoregateAttributesFinderFeature(session, nodeid).find(target).getFileId()); assertFalse(new DefaultFindFeature(session).find(test)); assertTrue(new DefaultFindFeature(session).find(target)); assertEquals(0, session.getMetrics().get(Copy.class)); assertEquals(Comparison.equal, session.getHost().getProtocol().getFeature(ComparisonService.class).compare(Path.Type.file, test.attributes(), new StoregateAttributesFinderFeature(session, nodeid).find(target))); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
void regionUnfinished(SchedulingPipelinedRegion region) { for (ConsumerRegionGroupExecutionView executionView : executionViewByRegion.getOrDefault(region, Collections.emptySet())) { executionView.regionUnfinished(region); } }
@Test void testRegionUnfinished() throws Exception { consumerRegionGroupExecutionViewMaintainer.regionFinished(consumerRegion); consumerRegionGroupExecutionViewMaintainer.regionUnfinished(consumerRegion); assertThat(consumerRegionGroupExecutionView.isFinished()).isFalse(); }
public static int checkPositive(int i, String name) { if (i <= INT_ZERO) { throw new IllegalArgumentException(name + " : " + i + " (expected: > 0)"); } return i; }
@Test public void testCheckPositiveIntString() { Exception actualEx = null; try { ObjectUtil.checkPositive(POS_ONE_INT, NUM_POS_NAME); } catch (Exception e) { actualEx = e; } assertNull(actualEx, TEST_RESULT_NULLEX_NOK); actualEx = null; try { ObjectUtil.checkPositive(ZERO_INT, NUM_ZERO_NAME); } catch (Exception e) { actualEx = e; } assertNotNull(actualEx, TEST_RESULT_NULLEX_OK); assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK); actualEx = null; try { ObjectUtil.checkPositive(NEG_ONE_INT, NUM_NEG_NAME); } catch (Exception e) { actualEx = e; } assertNotNull(actualEx, TEST_RESULT_NULLEX_OK); assertTrue(actualEx instanceof IllegalArgumentException, TEST_RESULT_EXTYPE_NOK); }
public void unsecure() { BasicAuthConfiguration configuration = configuration(); if (configuration == null || Boolean.FALSE.equals(configuration.getEnabled())) { return; } settingRepository.save(Setting.builder() .key(BASIC_AUTH_SETTINGS_KEY) .value(configuration.withEnabled(false)) .build()); }
@Test void unsecure() { assertThat(basicAuthService.isEnabled(), is(true)); BasicAuthService.SaltedBasicAuthConfiguration previousConfiguration = basicAuthService.configuration(); basicAuthService.unsecure(); assertThat(basicAuthService.isEnabled(), is(false)); BasicAuthService.SaltedBasicAuthConfiguration newConfiguration = basicAuthService.configuration(); assertThat(newConfiguration.getEnabled(), is(false)); assertThat(newConfiguration.getUsername(), is(previousConfiguration.getUsername())); assertThat(newConfiguration.getPassword(), is(previousConfiguration.getPassword())); assertThat(newConfiguration.getRealm(), is(previousConfiguration.getRealm())); assertThat(newConfiguration.getOpenUrls(), is(previousConfiguration.getOpenUrls())); }
public static DecimalType findAdditionDecimalType( int precision1, int scale1, int precision2, int scale2) { final int scale = Math.max(scale1, scale2); int precision = Math.max(precision1 - scale1, precision2 - scale2) + scale + 1; return adjustPrecisionScale(precision, scale); }
@Test void testFindAdditionDecimalType() { assertThat(LogicalTypeMerging.findAdditionDecimalType(38, 8, 32, 8)) .hasPrecisionAndScale(38, 7); assertThat(LogicalTypeMerging.findAdditionDecimalType(32, 8, 38, 8)) .hasPrecisionAndScale(38, 7); assertThat(LogicalTypeMerging.findAdditionDecimalType(30, 20, 28, 20)) .hasPrecisionAndScale(31, 20); assertThat(LogicalTypeMerging.findAdditionDecimalType(10, 10, 10, 10)) .hasPrecisionAndScale(11, 10); assertThat(LogicalTypeMerging.findAdditionDecimalType(38, 5, 38, 4)) .hasPrecisionAndScale(38, 5); }
@Override public Map<String, String> discoverLocalMetadata() { if (memberMetadata.isEmpty()) { memberMetadata.put(PartitionGroupMetaData.PARTITION_GROUP_ZONE, azureClient.getAvailabilityZone()); } return memberMetadata; }
@Test public void discoverLocalMetadata() { // given given(azureClient.getAvailabilityZone()).willReturn(ZONE); // when Map<String, String> result1 = azureDiscoveryStrategy.discoverLocalMetadata(); Map<String, String> result2 = azureDiscoveryStrategy.discoverLocalMetadata(); // then assertEquals(ZONE, result1.get(PartitionGroupMetaData.PARTITION_GROUP_ZONE)); assertEquals(ZONE, result2.get(PartitionGroupMetaData.PARTITION_GROUP_ZONE)); verify(azureClient).getAvailabilityZone(); }
public static <I> Builder<I> foreach(Iterable<I> items) { return new Builder<>(requireNonNull(items, "items")); }
@Test public void testFailedCallAbortSuppressed() throws Throwable { assertFailed(builder() .stopOnFailure() .suppressExceptions() .abortWith(aborter), failingTask); failingTask.assertInvokedAtLeast("success", FAILPOINT); if (!isParallel()) { aborter.assertInvokedAtLeast("abort", 1); // all uncommitted items were aborted items.stream().filter(i -> !i.committed) .map(Item::assertAborted); items.stream().filter(i -> i.committed) .forEach(i -> assertFalse(i.toString(), i.aborted)); } }
public Mono<Void> createConsumerAcl(KafkaCluster cluster, CreateConsumerAclDTO request) { return adminClientService.get(cluster) .flatMap(ac -> createAclsWithLogging(ac, createConsumerBindings(request))) .then(); }
@Test void createsConsumerDependantAclsWhenTopicsAndGroupsSpecifiedByPrefix() { ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class); when(adminClientMock.createAcls(createdCaptor.capture())) .thenReturn(Mono.empty()); var principal = UUID.randomUUID().toString(); var host = UUID.randomUUID().toString(); aclsService.createConsumerAcl( CLUSTER, new CreateConsumerAclDTO() .principal(principal) .host(host) .consumerGroupsPrefix("cgPref") .topicsPrefix("topicPref") ).block(); //Read, Describe on topics, Read on consumerGroups Collection<AclBinding> createdBindings = createdCaptor.getValue(); assertThat(createdBindings) .hasSize(3) .contains(new AclBinding( new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED), new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW))) .contains(new AclBinding( new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED), new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW))) .contains(new AclBinding( new ResourcePattern(ResourceType.GROUP, "cgPref", PatternType.PREFIXED), new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW))); }
@Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) { return beginningOffsets(partitions, Duration.ofMillis(defaultApiTimeoutMs)); }
@Test public void testBeginningOffsetsFailsIfNullPartitions() { consumer = newConsumer(); assertThrows(NullPointerException.class, () -> consumer.beginningOffsets(null, Duration.ofMillis(1))); }
public void save(final Order order) { DynamoDbTable<Order> orderTable = getTable(); orderTable.putItem(order); }
@Test void testCreateOrder() { Order order = new Order(); order.setOrderID("ORD-010"); order.setCustomerID("CUST-001"); List<Product> products = new ArrayList<Product>(); Product product = new Product(); product.setName("Television"); product.setBrand("samsung"); product.setPrice(112.56); products.add(product); product = new Product(); product.setName("Washing Machine"); product.setBrand("panasonic"); product.setPrice(119.99); products.add(product); order.setProducts(products ); order.setOrderValue(56.7); order.setCreatedDate(Instant.now()); orderRepository.save(order); }
public JobRunrConfiguration useDashboardIf(boolean guard) { return useDashboardIf(guard, usingStandardDashboardConfiguration()); }
@Test void dashboardIsNotStartedIfGuardIsFalse() { assertThatCode(() -> JobRunr.configure() .useDashboardIf(false) ).doesNotThrowAnyException(); }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentExecutor, TokensAndUrlAuthData authData, MediaContainerResource data) throws Exception { if (data == null) { return ImportResult.OK; } for (PhotoModel photoModel: data.getPhotos()) { monitor.debug(() -> "AppleMediaImporter received data", AuditKeys.dataId, photoModel.getDataId(), AuditKeys.updatedTimeInMs, photoModel.getUploadedTime()); } AppleMediaInterface mediaInterface = factory .getOrCreateMediaInterface(jobId, authData, appCredentials, exportingService, monitor); final int albumCount = mediaInterface.importAlbums( jobId, idempotentExecutor, data.getAlbums(), DataVertical.MEDIA.getDataType()); final Map<String, Long> importPhotosMap = mediaInterface.importAllMedia( jobId, idempotentExecutor, data.getPhotos(), DataVertical.MEDIA.getDataType()); final Map<String, Long> importVideosResult = mediaInterface.importAllMedia( jobId, idempotentExecutor, data.getVideos(), DataVertical.MEDIA.getDataType()); final Map<String, Integer> counts = new ImmutableMap.Builder<String, Integer>() .put(MediaContainerResource.ALBUMS_COUNT_DATA_NAME, albumCount) .put( MediaContainerResource.PHOTOS_COUNT_DATA_NAME, importPhotosMap.getOrDefault(ApplePhotosConstants.COUNT_KEY, 0L).intValue()) .put( MediaContainerResource.VIDEOS_COUNT_DATA_NAME, importVideosResult.getOrDefault(ApplePhotosConstants.COUNT_KEY, 0L).intValue()) .build(); monitor.info(() -> "AppleMediaImporter imported batch", MediaContainerResource.ALBUMS_COUNT_DATA_NAME, albumCount, MediaContainerResource.PHOTOS_COUNT_DATA_NAME, importPhotosMap.getOrDefault(ApplePhotosConstants.COUNT_KEY, 0L).intValue(), MediaContainerResource.VIDEOS_COUNT_DATA_NAME, importVideosResult.getOrDefault(ApplePhotosConstants.COUNT_KEY, 0L).intValue()); return ImportResult.OK .copyWithBytes( importPhotosMap.getOrDefault(ApplePhotosConstants.BYTES_KEY, 0L) + importVideosResult.getOrDefault(ApplePhotosConstants.BYTES_KEY, 0L)) .copyWithCounts(counts); }
@Test public void importEmptyNamePhoto() throws Exception { // set up photos final int photoCount = 1; final List<PhotoModel> photos = Arrays.asList( new PhotoModel( "", // empty title "fetchableUrl", "description", "mediaType", PHOTOS_DATAID_BASE + UUID.randomUUID(), null , false, null, new Date(), new FavoriteInfo(true, new Date())), new PhotoModel( null, // empty title "fetchableUrl", "description", "mediaType", PHOTOS_DATAID_BASE + UUID.randomUUID(), null , false, null, new Date(), new FavoriteInfo(true, new Date()))); final Map<String, Integer> dataIdToStatus = photos.stream() .collect( Collectors.toMap(PhotoModel::getDataId, photoModel -> SC_OK)); setUpGetUploadUrlResponse(dataIdToStatus); setUpUploadContentResponse(dataIdToStatus); setUpCreateMediaResponse(dataIdToStatus); MediaContainerResource mediaData = new MediaContainerResource(null, photos, null); appleMediaImporter.importItem(uuid, executor, authData, mediaData); // verify correct methods were called final List<String> photosDataIds = photos.stream().map(PhotoModel::getDataId).collect(Collectors.toList()); verify(mediaInterface) .getUploadUrl(uuid.toString(), DataVertical.MEDIA.getDataType(), photosDataIds); verify(mediaInterface, times(1)).uploadContent(anyMap(), anyList()); verify(mediaInterface, times(1)).createMedia(anyString(), anyString(), argThat(newMediaRequestList -> { assertThat(newMediaRequestList).isNotNull(); assertThat(newMediaRequestList.stream().allMatch(newMediaRequest -> newMediaRequest.getFilename().equals(ApplePhotosConstants.APPLE_PHOTOS_UNTITLED_FILE_NAME))).isTrue(); return true; })); }
protected CertPair generateCert() { if (certPair != null && !certPair.isExpire()) { return certPair; } synchronized (this) { if (certPair == null || certPair.isExpire()) { try { logger.info("Try to generate cert from Dubbo Certificate Authority."); CertPair certFromRemote = refreshCert(); if (certFromRemote != null) { certPair = certFromRemote; } else { logger.error( CONFIG_SSL_CERT_GENERATE_FAILED, "", "", "Generate Cert from Dubbo Certificate Authority failed."); } } catch (Exception e) { logger.error(REGISTRY_FAILED_GENERATE_CERT_ISTIO, "", "", "Generate Cert from Istio failed.", e); } } } return certPair; }
@Test void testGenerateCert() { FrameworkModel frameworkModel = new FrameworkModel(); AtomicBoolean exception = new AtomicBoolean(false); AtomicReference<CertPair> certPairReference = new AtomicReference<>(); DubboCertManager certManager = new DubboCertManager(frameworkModel) { @Override protected CertPair refreshCert() throws IOException { if (exception.get()) { throw new IOException("test"); } return certPairReference.get(); } }; CertPair certPair = new CertPair("", "", "", Long.MAX_VALUE); certPairReference.set(certPair); Assertions.assertEquals(certPair, certManager.generateCert()); certManager.certPair = new CertPair("", "", "", Long.MAX_VALUE - 10000); Assertions.assertEquals(new CertPair("", "", "", Long.MAX_VALUE - 10000), certManager.generateCert()); certManager.certPair = new CertPair("", "", "", 0); Assertions.assertEquals(certPair, certManager.generateCert()); certManager.certPair = new CertPair("", "", "", 0); certPairReference.set(null); Assertions.assertEquals(new CertPair("", "", "", 0), certManager.generateCert()); exception.set(true); Assertions.assertEquals(new CertPair("", "", "", 0), certManager.generateCert()); frameworkModel.destroy(); }
public static int precision(final Schema schema) { requireDecimal(schema); final String precisionString = schema.parameters().get(PRECISION_FIELD); if (precisionString == null) { return PRECISION_DEFAULT; } try { return Integer.parseInt(precisionString); } catch (final NumberFormatException e) { throw new KsqlException("Invalid precision parameter found in Decimal schema: ", e); } }
@Test public void shouldExtractPrecisionFromDecimalSchema() { // When: final int precision = DecimalUtil.precision(DECIMAL_SCHEMA); // Then: assertThat(precision, is(2)); }
@Override protected Set<StepField> getUsedFields( ExcelInputMeta meta ) { Set<StepField> usedFields = new HashSet<>(); if ( meta.isAcceptingFilenames() && StringUtils.isNotEmpty( meta.getAcceptingStepName() ) ) { StepField stepField = new StepField( meta.getAcceptingStepName(), meta.getAcceptingField() ); usedFields.add( stepField ); } return usedFields; }
@Test public void testGetUsedFields_fileNameFromField() throws Exception { lenient().when( meta.isAcceptingFilenames() ).thenReturn( true ); lenient().when( meta.getAcceptingField() ).thenReturn( "filename" ); lenient().when( meta.getAcceptingStepName() ).thenReturn( "previousStep" ); Set<StepField> usedFields = analyzer.getUsedFields( meta ); assertNotNull( usedFields ); assertEquals( 1, usedFields.size() ); StepField used = usedFields.iterator().next(); assertEquals( "previousStep", used.getStepName() ); assertEquals( "filename", used.getFieldName() ); }
public String generate(int secretLength) { byte[] randomBytes = new byte[secretLength]; payloadSecretRng.nextBytes(randomBytes); return BaseEncoding.base16().lowerCase().encode(randomBytes); }
@Test public void generate_always_generatesExpectedSecretString() { PayloadSecretGenerator secretGenerator = new PayloadSecretGenerator(TEST_RNG); assertThat(secretGenerator.generate(4)).isEqualTo("ffffffff"); }
void stopPosition(final long recordingId, final long position) { final int recordingDescriptorOffset = recordingDescriptorOffset(recordingId); final int offset = recordingDescriptorOffset + DESCRIPTOR_HEADER_LENGTH; final long stopPosition = nativeOrder() == BYTE_ORDER ? position : Long.reverseBytes(position); fieldAccessBuffer.putLongVolatile(offset + stopPositionEncodingOffset(), stopPosition); updateChecksum(recordingDescriptorOffset); forceWrites(catalogChannel); }
@Test void stopPositionShouldUpdateChecksum() { final Checksum checksum = crc32(); try (Catalog catalog = new Catalog(archiveDir, null, 0, CAPACITY, clock, checksum, segmentFileBuffer)) { assertChecksum(catalog, recordingTwoId, 160, 0, null); catalog.stopPosition(recordingTwoId, 7777); assertChecksum(catalog, recordingTwoId, 160, -1985007076, checksum); } }
public void validate(CreateReviewAnswerRequest request) { Question question = questionRepository.findById(request.questionId()) .orElseThrow(() -> new SubmittedQuestionNotFoundException(request.questionId())); validateNotIncludingOptions(request); validateQuestionRequired(question, request); validateLength(request); }
@Test void ์ €์žฅ๋˜์ง€_์•Š์€_์งˆ๋ฌธ์—_๋Œ€ํ•œ_๋Œ€๋‹ต์ด๋ฉด_์˜ˆ์™ธ๊ฐ€_๋ฐœ์ƒํ•œ๋‹ค() { // given CreateReviewAnswerRequest request = new CreateReviewAnswerRequest(100L, null, "ํ…์ŠคํŠธํ˜• ์‘๋‹ต"); // when, then assertThatCode(() -> createTextAnswerRequestValidator.validate(request)) .isInstanceOf(SubmittedQuestionNotFoundException.class); }
@Override public byte[] serialize(PendingSplitsState state) throws IOException { // optimization: the splits lazily cache their own serialized form if (state.serializedFormCache != null) { return state.serializedFormCache; } final DataOutputSerializer out = SERIALIZER_CACHE.get(); out.writeInt(splitSerializer.getVersion()); if (state instanceof SnapshotPendingSplitsState) { out.writeInt(SNAPSHOT_PENDING_SPLITS_STATE_FLAG); serializeSnapshotPendingSplitsState((SnapshotPendingSplitsState) state, out); } else if (state instanceof BinlogPendingSplitsState) { out.writeInt(BINLOG_PENDING_SPLITS_STATE_FLAG); serializeBinlogPendingSplitsState((BinlogPendingSplitsState) state, out); } else if (state instanceof HybridPendingSplitsState) { out.writeInt(HYBRID_PENDING_SPLITS_STATE_FLAG); serializeHybridPendingSplitsState((HybridPendingSplitsState) state, out); } else { throw new IOException( "Unsupported to serialize PendingSplitsState class: " + state.getClass().getName()); } final byte[] result = out.getCopyOfBuffer(); // optimization: cache the serialized from, so we avoid the byte work during repeated // serialization state.serializedFormCache = result; out.clear(); return result; }
@Test public void testRepeatedSerializationCache() throws Exception { final PendingSplitsStateSerializer serializer = new PendingSplitsStateSerializer(MySqlSplitSerializer.INSTANCE); final byte[] ser1 = serializer.serialize(state); final byte[] ser2 = serializer.serialize(state); final byte[] ser3 = state.serializedFormCache; assertSame(ser1, ser2); assertSame(ser1, ser3); }
@Override public Boolean nodeExist(ResourceId path) { return super.nodeExist(toAbsoluteId(path)); }
@Test public void testNodeExist() { view.nodeExist(relIntf); assertTrue(ResourceIds.isPrefix(rid, realPath)); }