focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public KeyVersion createKey(String name, Options options) throws NoSuchAlgorithmException, IOException { writeLock.lock(); try { authorizeCreateKey(name, options, getUser()); return provider.createKey(name, options); } finally { writeLock.unlock(); } }
@Test public void testCreateKey() throws Exception { final Configuration conf = new Configuration(); KeyProvider kp = new UserProvider.Factory().createProvider(new URI("user:///"), conf); KeyACLs mock = mock(KeyACLs.class); when(mock.isACLPresent("foo", KeyOpType.MANAGEMENT)).thenReturn(true); UserGroupInformation u1 = UserGroupInformation.createRemoteUser("u1"); when(mock.hasAccessToKey("foo", u1, KeyOpType.MANAGEMENT)).thenReturn(true); final KeyProviderCryptoExtension kpExt = new KeyAuthorizationKeyProvider( KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp), mock); u1.doAs( new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try { kpExt.createKey("foo", SecureRandom.getSeed(16), newOptions(conf)); } catch (IOException ioe) { Assert.fail("User should be Authorized !!"); } // "bar" key not configured try { kpExt.createKey("bar", SecureRandom.getSeed(16), newOptions(conf)); Assert.fail("User should NOT be Authorized !!"); } catch (IOException ioe) { // Ignore } return null; } } ); // Unauthorized User UserGroupInformation.createRemoteUser("badGuy").doAs( new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try { kpExt.createKey("foo", SecureRandom.getSeed(16), newOptions(conf)); Assert.fail("User should NOT be Authorized !!"); } catch (IOException ioe) { // Ignore } return null; } } ); }
@Override public void batchRegisterService(String serviceName, String groupName, List<Instance> instances) throws NacosException { NAMING_LOGGER.info("batchRegisterInstance instances: {} ,serviceName: {} begin.", instances, serviceName); if (CollectionUtils.isEmpty(instances)) { NAMING_LOGGER.warn("batchRegisterInstance instances is Empty:{}", instances); } grpcClientProxy.batchRegisterService(serviceName, groupName, instances); NAMING_LOGGER.info("batchRegisterInstance instances: {} ,serviceName: {} finish.", instances, serviceName); }
@Test void testBatchRegisterServiceByGrpc() throws NacosException { String serviceName = "service1"; String groupName = "group1"; Instance instance = new Instance(); instance.setServiceName(serviceName); instance.setClusterName(groupName); instance.setIp("1.1.1.1"); instance.setPort(1); instance.setEphemeral(true); List<Instance> instanceList = new ArrayList<>(); delegate.batchRegisterService(serviceName, groupName, instanceList); verify(mockGrpcClient, times(1)).batchRegisterService(serviceName, groupName, instanceList); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void qualifiedName() { String inputExpression = "My Person.Full Name"; MapBackedType personType = new MapBackedType("Person", mapOf( entry("Full Name", BuiltInType.STRING), entry("Age", BuiltInType.NUMBER))); BaseNode qualRef = parse( inputExpression, mapOf( entry("My Person", personType))); assertThat( qualRef).isInstanceOf(QualifiedNameNode.class); assertThat( qualRef.getResultType()).isEqualTo(BuiltInType.STRING); List<NameRefNode> parts = ((QualifiedNameNode) qualRef).getParts(); // `My Person` ... assertThat( parts.get(0)).isInstanceOf(NameRefNode.class); assertThat( parts.get(0).getResultType()).isEqualTo(personType); // ... `.Full Name` assertThat( parts.get(1)).isInstanceOf(NameRefNode.class); assertThat( parts.get(1).getResultType()).isEqualTo(BuiltInType.STRING); assertLocation( inputExpression, qualRef ); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void getUserChatBoosts() { ChatBoost[] chatBoosts = bot.execute(new GetUserChatBoosts(channelId, chatId)).boosts(); assertEquals(chatBoosts.length, 0); }
public static Optional<TableSchema> getUpdatedSchema( TableSchema oldSchema, TableSchema newSchema) { Result updatedFields = getUpdatedSchema(oldSchema.getFieldsList(), newSchema.getFieldsList()); if (updatedFields.isEquivalent()) { return Optional.empty(); } else { return updatedFields .getFields() .map( tableFieldSchemas -> TableSchema.newBuilder().addAllFields(tableFieldSchemas).build()); } }
@Test public void testSchemaUpdate() { TableSchema baseSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema schema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(baseSchema.getFieldsList())) .build(); TableSchema topSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(schema.getFieldsList())) .build(); TableSchema newBaseSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("d").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema newSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(newBaseSchema.getFieldsList())) .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("d").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema newTopSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(newSchema.getFieldsList())) .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema expectedSchemaBaseSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("d").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema expectedSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("b").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder().setName("c").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(expectedSchemaBaseSchema.getFieldsList())) .addFields( TableFieldSchema.newBuilder().setName("d").setType(TableFieldSchema.Type.STRING)) .build(); TableSchema expectedTopSchema = TableSchema.newBuilder() .addFields( TableFieldSchema.newBuilder().setName("a").setType(TableFieldSchema.Type.STRING)) .addFields( TableFieldSchema.newBuilder() .setName("nested") .setType(TableFieldSchema.Type.STRUCT) .addAllFields(expectedSchema.getFieldsList())) .build(); TableSchema updatedTopSchema = TableSchemaUpdateUtils.getUpdatedSchema(topSchema, newTopSchema).get(); assertEquals(expectedTopSchema, updatedTopSchema); }
public void truncateFullyAndStartAt(long offset) throws IOException { clearProducerIds(); ongoingTxns.clear(); unreplicatedTxns.clear(); for (SnapshotFile snapshotFile : snapshots.values()) { removeAndDeleteSnapshot(snapshotFile.offset); } lastSnapOffset = 0L; lastMapOffset = offset; updateOldestTxnTimestamp(); }
@Test public void testTruncateFullyAndStartAt() throws IOException { appendClientEntry(stateManager, producerId, epoch, defaultSequence, 0L, false); appendClientEntry(stateManager, producerId, epoch, 1, 1L, false); stateManager.takeSnapshot(); assertEquals(1, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(singleton(2L), currentSnapshotOffsets()); appendClientEntry(stateManager, producerId, epoch, 2, 2L, false); stateManager.takeSnapshot(); assertEquals(2, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(new HashSet<>(asList(2L, 3L)), currentSnapshotOffsets()); stateManager.truncateFullyAndStartAt(0L); assertEquals(0, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(emptySet(), currentSnapshotOffsets()); appendClientEntry(stateManager, producerId, epoch, 0, 0L, false); stateManager.takeSnapshot(); assertEquals(1, Objects.requireNonNull(logDir.listFiles()).length); assertEquals(singleton(1L), currentSnapshotOffsets()); }
@Override public CompletableFuture<T> toCompletableFuture() { return _task.toCompletionStage().toCompletableFuture(); }
@Test public void testCreateStageFromRunnable() throws Exception { final String[] stringArr = new String[1]; String testResult = "testCreateStageFromCompletableFuture"; ParSeqBasedCompletionStage<Void> stageFromCompletionStage = _parSeqBasedCompletionStageFactory.buildStageFromRunnableAsync(() -> { stringArr[0] = testResult; }); stageFromCompletionStage.toCompletableFuture().get(); //ensure completion Assert.assertEquals(stringArr[0], testResult); }
@Override public String getDatabaseProductName() { return "H2"; }
@Test void assertGetDatabaseProductName() { assertThat(metaData.getDatabaseProductName(), is("H2")); }
public static Slice unscaledDecimal() { return Slices.allocate(UNSCALED_DECIMAL_128_SLICE_LENGTH); }
@Test public void testMultiplyOverflow() { assertMultiplyOverflows(unscaledDecimal("99999999999999"), unscaledDecimal("-10000000000000000000000000")); assertMultiplyOverflows(MAX_DECIMAL, unscaledDecimal("10")); }
@SuppressWarnings("WeakerAccess") public Map<String, Object> getMainConsumerConfigs(final String groupId, final String clientId, final int threadIdx) { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); // Get main consumer override configs final Map<String, Object> mainConsumerProps = originalsWithPrefix(MAIN_CONSUMER_PREFIX); consumerProps.putAll(mainConsumerProps); // this is a hack to work around StreamsConfig constructor inside StreamsPartitionAssignor to avoid casting consumerProps.put(APPLICATION_ID_CONFIG, groupId); // add group id, client id with stream client id prefix, and group instance id consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId); final String groupInstanceId = (String) consumerProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG); // Suffix each thread consumer with thread.id to enforce uniqueness of group.instance.id. if (groupInstanceId != null) { consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId + "-" + threadIdx); } // add configs required for stream partition assignor consumerProps.put(UPGRADE_FROM_CONFIG, getString(UPGRADE_FROM_CONFIG)); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ACCEPTABLE_RECOVERY_LAG_CONFIG, getLong(ACCEPTABLE_RECOVERY_LAG_CONFIG)); consumerProps.put(MAX_WARMUP_REPLICAS_CONFIG, getInt(MAX_WARMUP_REPLICAS_CONFIG)); consumerProps.put(PROBING_REBALANCE_INTERVAL_MS_CONFIG, getLong(PROBING_REBALANCE_INTERVAL_MS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamsPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, getString(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, getList(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG)); consumerProps.put(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG)); consumerProps.put(TASK_ASSIGNOR_CLASS_CONFIG, getString(TASK_ASSIGNOR_CLASS_CONFIG)); // disable auto topic creation consumerProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false"); // verify that producer batch config is no larger than segment size, then add topic configs required for creating topics final Map<String, Object> topicProps = originalsWithPrefix(TOPIC_PREFIX, false); final Map<String, Object> producerProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (topicProps.containsKey(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)) && producerProps.containsKey(ProducerConfig.BATCH_SIZE_CONFIG)) { final int segmentSize = Integer.parseInt(topicProps.get(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)).toString()); final int batchSize = Integer.parseInt(producerProps.get(ProducerConfig.BATCH_SIZE_CONFIG).toString()); if (segmentSize < batchSize) { throw new IllegalArgumentException(String.format("Specified topic segment size %d is is smaller than the configured producer batch size %d, this will cause produced batch not able to be appended to the topic", segmentSize, batchSize)); } } consumerProps.putAll(topicProps); return consumerProps; }
@Test public void testGetConsumerConfigs() { final Map<String, Object> returnedProps = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx); assertThat(returnedProps.get(ConsumerConfig.CLIENT_ID_CONFIG), equalTo(clientId)); assertThat(returnedProps.get(ConsumerConfig.GROUP_ID_CONFIG), equalTo(groupId)); assertThat(returnedProps.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), equalTo("1000")); assertNull(returnedProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG)); }
@ScalarOperator(MULTIPLY) @SqlType(StandardTypes.TINYINT) public static long multiply(@SqlType(StandardTypes.TINYINT) long left, @SqlType(StandardTypes.TINYINT) long right) { try { return SignedBytes.checkedCast(left * right); } catch (IllegalArgumentException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("tinyint multiplication overflow: %s * %s", left, right), e); } }
@Test public void testMultiply() { assertFunction("TINYINT'11' * TINYINT'11'", TINYINT, (byte) (11 * 11)); assertFunction("TINYINT'11' * TINYINT'9'", TINYINT, (byte) (11 * 9)); assertFunction("TINYINT'9' * TINYINT'11'", TINYINT, (byte) (9 * 11)); assertFunction("TINYINT'9' * TINYINT'9'", TINYINT, (byte) (9 * 9)); assertNumericOverflow(format("TINYINT'%s' * TINYINT'2'", Byte.MAX_VALUE), "tinyint multiplication overflow: 127 * 2"); }
public Single<Boolean> addAll(Publisher<? extends V> c) { return new PublisherAdder<V>() { @Override public RFuture<Boolean> add(Object o) { return instance.addAsync((V) o); } }.addAll(c); }
@Test public void testAddAll() { RListRx<Integer> list = redisson.getList("list"); sync(list.add(1)); sync(list.add(2)); sync(list.add(3)); sync(list.add(4)); sync(list.add(5)); Assertions.assertEquals(true, sync(list.addAll(Arrays.asList(7, 8, 9)))); Assertions.assertEquals(true, sync(list.addAll(Arrays.asList(9, 1, 9)))); assertThat(sync(list)).containsExactly(1, 2, 3, 4, 5, 7, 8, 9, 9, 1, 9); }
@Override public XAConnection wrap(final XADataSource xaDataSource, final Connection connection) throws SQLException { return createXAConnection(connection.unwrap(jdbcConnectionClass)); }
@Test void assertWrap() throws SQLException { XAConnection actual = DatabaseTypedSPILoader.getService(XAConnectionWrapper.class, databaseType).wrap(createXADataSource(), mockConnection()); assertThat(actual.getXAResource(), instanceOf(MariaXaResource.class)); }
boolean isMethodCorrect(ResolvedMethod m) { if (m.getReturnType()!=null) { log.error("The method {} is annotated with @SelfValidation but does not return void. It is ignored", m.getRawMember()); return false; } else if (m.getArgumentCount() != 1 || !m.getArgumentType(0).getErasedType().equals(ViolationCollector.class)) { log.error("The method {} is annotated with @SelfValidation but does not have a single parameter of type {}", m.getRawMember(), ViolationCollector.class); return false; } else if (!m.isPublic()) { log.error("The method {} is annotated with @SelfValidation but is not public", m.getRawMember()); return false; } return true; }
@Test void correctMethod() { assertThat(selfValidatingValidator.isMethodCorrect( getMethod("validateCorrect", ViolationCollector.class))) .isTrue(); }
static Multimap<String, Range<Integer>> extractHighlightRanges(Map<String, List<String>> highlight) { if (highlight == null || highlight.isEmpty()) { return ImmutableListMultimap.of(); } final ImmutableListMultimap.Builder<String, Range<Integer>> builder = ImmutableListMultimap.builder(); highlight.forEach((key, value) -> extractRange(value).forEach(range -> builder.put(key, range))); return builder.build(); }
@Test public void emptyHighlights() throws Exception { final Map<String, List<String>> highlights = ImmutableMap.of(); final Multimap<String, Range<Integer>> result = HighlightParser.extractHighlightRanges(highlights); assertThat(result).isNotNull(); assertThat(result.entries()).isEmpty(); }
public String getClusterName() { return clusterName; }
@Test public void shouldSetClusterName() { // When JGroupsEndpoint endpoint = getMandatoryEndpoint("jgroups:" + CLUSTER_NAME, JGroupsEndpoint.class); // Then assertEquals(CLUSTER_NAME, endpoint.getClusterName()); }
public byte[] getEncoded() { final byte[] data = new byte[2*length]; ByteArrayUtils.copyAdjustedLength(r.toByteArray(), length, data, 0); ByteArrayUtils.copyAdjustedLength(s.toByteArray(), length, data, length); return data; }
@Test public void shouldEncode() { assertArrayEquals(new byte[] { 0, 0, 0, 1, 0, 0, 0, 10}, new EcSignature(4, BigInteger.ONE, BigInteger.TEN).getEncoded()); }
@Transactional(readOnly = true) public ArticlesPreviewResponse readLatestArticles() { List<ArticlePreviewResponse> articles = articleRepository.findLatest(MAX_ARTICLE_CARDS).stream() .map(ArticlePreviewResponse::from) .toList(); return new ArticlesPreviewResponse(articles); }
@DisplayName("최신 아티클 3건 조회 성공") @Test void readLatestArticles() { // given articleRepository.save(ARTICLE_1); articleRepository.save(ARTICLE_2); articleRepository.save(ARTICLE_3); articleRepository.save(ARTICLE_4); // when List<String> articleTitles = articleService.readLatestArticles().articles().stream() .map(ArticlePreviewResponse::title) .toList(); // then assertThat(articleTitles).containsExactly(ARTICLE_4.getTitle(), ARTICLE_3.getTitle(), ARTICLE_2.getTitle()); }
public static void validate(TableConfig tableConfig, @Nullable Schema schema) { validate(tableConfig, schema, null); }
@Test public void validateTierConfigs() { Schema schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME) .addDateTime(TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build(); // null tier configs TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList(null).build(); TableConfigUtils.validate(tableConfig, schema); // empty tier configs tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList(Collections.emptyList()) .build(); TableConfigUtils.validate(tableConfig, schema); // 1 tier configs tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null))).build(); TableConfigUtils.validate(tableConfig, schema); // 2 tier configs, case insensitive check tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE.toLowerCase(), "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null), new TierConfig("tier2", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "40d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE.toLowerCase(), "tier2_tag_OFFLINE", null, null))).build(); TableConfigUtils.validate(tableConfig, schema); //realtime table tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setTimeColumnName(TIME_COLUMN) .setStreamConfigs(getStreamConfigs()).setTierConfigList(Lists.newArrayList( new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE.toLowerCase(), "tier1_tag_OFFLINE", null, null), new TierConfig("tier2", TierFactory.TIME_SEGMENT_SELECTOR_TYPE.toLowerCase(), "40d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier2_tag_OFFLINE", null, null))).build(); TableConfigUtils.validate(tableConfig, schema); // tier name empty tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to empty tier name"); } catch (IllegalStateException e) { // expected } // tier name repeats tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("sameTierName", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null), new TierConfig("sameTierName", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "100d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier2_tag_OFFLINE", null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to duplicate tier name"); } catch (IllegalStateException e) { // expected } // segmentSelectorType invalid tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null), new TierConfig("tier2", "unsupportedSegmentSelector", "40d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier2_tag_OFFLINE", null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to invalid segmentSelectorType"); } catch (IllegalStateException e) { // expected } // segmentAge not provided for TIME segmentSelectorType tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, null, null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null), new TierConfig("tier2", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "40d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier2_tag_OFFLINE", null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to missing segmentAge"); } catch (IllegalStateException e) { // expected } // segmentAge invalid tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null), new TierConfig("tier2", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "3600", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier2_tag_OFFLINE", null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to invalid segment age"); } catch (IllegalStateException e) { // expected } // fixedSegmentSelector tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.FIXED_SEGMENT_SELECTOR_TYPE, null, null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null))).build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.FIXED_SEGMENT_SELECTOR_TYPE, "30d", Lists.newArrayList(), TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null))).build(); TableConfigUtils.validate(tableConfig, schema); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList( new TierConfig("tier1", TierFactory.FIXED_SEGMENT_SELECTOR_TYPE, null, Lists.newArrayList("seg0", "seg1"), TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null))).build(); TableConfigUtils.validate(tableConfig, schema); // storageType invalid tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList( new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, "unsupportedStorageType", "tier1_tag_OFFLINE", null, null), new TierConfig("tier2", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "40d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier2_tag_OFFLINE", null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to invalid storage type"); } catch (IllegalStateException e) { // expected } // serverTag not provided for PINOT_SERVER storageType tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag_OFFLINE", null, null), new TierConfig("tier2", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "40d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, null, null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to "); } catch (IllegalStateException e) { // expected } // serverTag invalid tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setTierConfigList( Lists.newArrayList(new TierConfig("tier1", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "30d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier1_tag", null, null), new TierConfig("tier2", TierFactory.TIME_SEGMENT_SELECTOR_TYPE, "40d", null, TierFactory.PINOT_SERVER_STORAGE_TYPE, "tier2_tag_OFFLINE", null, null))).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should have failed due to invalid server tag"); } catch (IllegalStateException e) { // expected } }
@VisibleForTesting @SuppressWarnings("nullness") // ok to have nullable elements on stream static String renderName(String prefix, MetricResult<?> metricResult) { MetricKey key = metricResult.getKey(); MetricName name = key.metricName(); String step = key.stepName(); return Streams.concat( Stream.of(prefix), // prefix is not cleaned, should it be? Stream.of(stripSuffix(normalizePart(step))), Stream.of(name.getNamespace(), name.getName()).map(SparkBeamMetric::normalizePart)) .filter(not(Strings::isNullOrEmpty)) .collect(Collectors.joining(".")); }
@Test public void testRenderName() { MetricResult<Object> metricResult = MetricResult.create( MetricKey.create( "myStep.one.two(three)", MetricName.named("myNameSpace//", "myName()")), 123, 456); String renderedName = SparkBeamMetric.renderName("", metricResult); assertThat( "Metric name was not rendered correctly", renderedName, equalTo("myStep_one_two_three.myNameSpace__.myName__")); }
@SuppressWarnings("rawtypes") public Collection<RuleConfiguration> swapToRuleConfigurations(final Collection<RepositoryTuple> repositoryTuples) { if (repositoryTuples.isEmpty()) { return Collections.emptyList(); } Collection<RuleConfiguration> result = new LinkedList<>(); YamlRuleConfigurationSwapperEngine yamlSwapperEngine = new YamlRuleConfigurationSwapperEngine(); for (YamlRuleConfigurationSwapper each : OrderedSPILoader.getServices(YamlRuleConfigurationSwapper.class)) { Class<? extends YamlRuleConfiguration> yamlRuleConfigClass = getYamlRuleConfigurationClass(each); swapToYamlRuleConfiguration(repositoryTuples, yamlRuleConfigClass).ifPresent(optional -> result.add(yamlSwapperEngine.swapToRuleConfiguration(optional))); } return result; }
@Test void assertSwapToEmptyRuleConfigurations() { assertTrue(new RepositoryTupleSwapperEngine().swapToRuleConfigurations(Collections.emptyList()).isEmpty()); }
public Bson createDbQuery(final List<String> filters, final String query) { try { final var searchQuery = searchQueryParser.parse(query); final var filterExpressionFilters = dbFilterParser.parse(filters, attributes); return buildDbQuery(searchQuery, filterExpressionFilters); } catch (IllegalArgumentException e) { throw new BadRequestException("Invalid argument in search query: " + e.getMessage()); } }
@Test void throwsBadRequestExceptionIfDbFilterParserThrowsIllegalArgumentException() { doReturn(new SearchQuery("")).when(searchQueryParser).parse(eq("")); doThrow(IllegalArgumentException.class).when(dbFilterParser).parse(eq(List.of("wrong #$%#$%$ filter")), eq(attributes)); assertThrows(BadRequestException.class, () -> toTest.createDbQuery(List.of("wrong #$%#$%$ filter"), "")); }
public static int countInstances(Applications applications) { int count = 0; for (Application application : applications.getRegisteredApplications()) { count += application.getInstances().size(); } return count; }
@Test public void testCountInstancesIfApplicationsHasInstancesReturnSize() { Application application = createSingleInstanceApp("foo", "foo", InstanceInfo.ActionType.ADDED); Applications applications = createApplications(application); Assert.assertEquals(1, EurekaEntityFunctions.countInstances(applications)); }
public final void containsEntry(@Nullable Object key, @Nullable Object value) { // TODO(kak): Can we share any of this logic w/ MapSubject.containsEntry()? checkNotNull(actual); if (!actual.containsEntry(key, value)) { Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value); ImmutableList<Map.Entry<@Nullable Object, @Nullable Object>> entryList = ImmutableList.of(entry); // TODO(cpovirk): If the key is present but not with the right value, we could fail using // something like valuesForKey(key).contains(value). Consider whether this is worthwhile. if (hasMatchingToStringPair(actual.entries(), entryList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.entries(), /* itemsToCheck = */ entryList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsKey(key)) { failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain values with that key", actual.asMap().get(key)), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsValue(value)) { Set<@Nullable Object> keys = new LinkedHashSet<>(); for (Map.Entry<?, ?> actualEntry : actual.entries()) { if (Objects.equal(actualEntry.getValue(), value)) { keys.add(actualEntry.getKey()); } } failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain keys with that value", keys), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else { failWithActual("expected to contain entry", immutableEntry(key, value)); } } }
@Test public void failContainsEntryWithNullValuePresentExpected() { ListMultimap<String, String> actual = ArrayListMultimap.create(); actual.put("a", null); expectFailureWhenTestingThat(actual).containsEntry("a", "A"); assertFailureKeys( "expected to contain entry", "but did not", "though it did contain values with that key", "full contents"); assertFailureValue("though it did contain values with that key", "[null]"); }
public static List<Node> getNestedChildrenNodesList(Document document, String grandParentNodeName, String parentNodeName, String childNodeName) { return asStream(document.getElementsByTagName(childNodeName)) .filter(childNode -> { Node parentNode = childNode.getParentNode(); Node grandParentNode = parentNode.getParentNode(); return Objects.equals(parentNodeName, parentNode.getNodeName()) && Objects.equals(grandParentNodeName, grandParentNode.getNodeName()); }).collect(Collectors.toList()); }
@Test public void getNestedChildrenNodesList() throws Exception { Document document = DOMParserUtil.getDocument(XML); List<Node> retrieved = DOMParserUtil.getNestedChildrenNodesList(document, MAIN_NODE, CHILD_NODE, TEST_NODE); assertThat(retrieved).isNotNull(); assertThat(retrieved).hasSize(2); retrieved.forEach(testNode -> assertThat(testNode.getNodeName()).isEqualTo(TEST_NODE)); retrieved = DOMParserUtil.getNestedChildrenNodesList(document, CHILD_NODE, NESTING_NODE, NESTED_NODE); assertThat(retrieved).isNotNull().hasSize(2); retrieved.forEach(nestedNode -> assertThat(nestedNode.getNodeName()).isEqualTo(NESTED_NODE)); }
protected void setUpJettyOptions( Node node ) { Map<String, String> jettyOptions = parseJettyOptions( node ); if ( jettyOptions != null && jettyOptions.size() > 0 ) { for ( Entry<String, String> jettyOption : jettyOptions.entrySet() ) { System.setProperty( jettyOption.getKey(), jettyOption.getValue() ); } } }
@Test public void testDoNotSetUpJettyOptionsAsSystemParameters_WhenEmptyOptionsNode() throws KettleXMLException { Node configNode = getConfigNode( getConfigWithEmptyOptionsNode() ); slServerConfig.setUpJettyOptions( configNode ); assertFalse( "There should not be any jetty option but it is here: " + EXPECTED_ACCEPTORS_KEY, System .getProperties().containsKey( EXPECTED_ACCEPTORS_KEY ) ); assertFalse( "There should not be any jetty option but it is here: " + EXPECTED_ACCEPT_QUEUE_SIZE_KEY, System .getProperties().containsKey( EXPECTED_ACCEPT_QUEUE_SIZE_KEY ) ); assertFalse( "There should not be any jetty option but it is here: " + EXPECTED_LOW_RES_MAX_IDLE_TIME_KEY, System .getProperties().containsKey( EXPECTED_LOW_RES_MAX_IDLE_TIME_KEY ) ); }
public String param(String paramName) { return params.get(paramName); }
@Test void testRequestUrlParamAsString() { RequestUrl requestUrl = new MatchUrl("/api/problems/some-string").toRequestUrl("/api/problems/:type"); assertThat(requestUrl.param(":type", String.class)).isEqualTo("some-string"); }
public static String getLocalAddressByDatagram() { try(final DatagramSocket socket = new DatagramSocket()){ socket.connect(InetAddress.getByName("8.8.8.8"), 10002); return socket.getLocalAddress().getHostAddress(); } catch (Exception e) { logger.error("Failed to retrieving ip address.", e); } return null; }
@Test public void testGetLocalAddressByDatagram() { String ip = NetUtils.getLocalAddressByDatagram(); System.out.println("ip = " + ip); }
protected final AnyKeyboardViewBase getMiniKeyboard() { return mMiniKeyboard; }
@Test public void testShortPressWhenNoPrimaryKeyAndNoPopupItemsButLongPressCodeShouldNotOutputLongPress() throws Exception { ExternalAnyKeyboard anyKeyboard = new ExternalAnyKeyboard( new DefaultAddOn(getApplicationContext(), getApplicationContext()), getApplicationContext(), keyboard_with_keys_with_no_codes, keyboard_with_keys_with_no_codes, "test", 0, 0, "en", "", "", KEYBOARD_ROW_MODE_NORMAL); anyKeyboard.loadKeyboard(mViewUnderTest.mKeyboardDimens); mViewUnderTest.setKeyboard(anyKeyboard, 0); final AnyKeyboard.AnyKey key = (AnyKeyboard.AnyKey) anyKeyboard.getKeys().get(2); Assert.assertEquals(0, key.getPrimaryCode()); Assert.assertEquals(0, key.getCodesCount()); Assert.assertEquals(0, key.popupResId); Assert.assertEquals(45, key.longPressCode); Assert.assertEquals("c", key.label); Assert.assertNull(key.popupCharacters); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 30, true, false); Mockito.verify(mMockKeyboardListener) .onGestureTypingInputStart( eq(Keyboard.Key.getCenterX(key)), eq(Keyboard.Key.getCenterY(key)), same(key), anyLong()); Mockito.verifyNoMoreInteractions(mMockKeyboardListener); Assert.assertNull(mViewUnderTest.getMiniKeyboard()); Assert.assertFalse(mViewUnderTest.mMiniKeyboardPopup.isShowing()); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 10, false, true); Mockito.verify(mMockKeyboardListener, Mockito.never()) .onKey( eq(45), nullable(Keyboard.Key.class), anyInt(), Mockito.any(int[].class), Mockito.anyBoolean()); Mockito.verify(mMockKeyboardListener) .onKey(eq(0), same(key), eq(0), Mockito.any(int[].class), Mockito.anyBoolean()); }
public static CopyFilter getCopyFilter(Configuration conf) { String filtersClassName = conf .get(DistCpConstants.CONF_LABEL_FILTERS_CLASS); if (filtersClassName != null) { try { Class<? extends CopyFilter> filtersClass = conf .getClassByName(filtersClassName) .asSubclass(CopyFilter.class); filtersClassName = filtersClass.getName(); Constructor<? extends CopyFilter> constructor = filtersClass .getDeclaredConstructor(Configuration.class); return constructor.newInstance(conf); } catch (Exception e) { LOG.error(DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filtersClassName, e); throw new RuntimeException( DistCpConstants.CLASS_INSTANTIATION_ERROR_MSG + filtersClassName, e); } } else { return getDefaultCopyFilter(conf); } }
@Test public void testGetCopyFilterTrueCopyFilter() { Configuration configuration = new Configuration(false); CopyFilter copyFilter = CopyFilter.getCopyFilter(configuration); assertTrue("copyFilter should be instance of TrueCopyFilter", copyFilter instanceof TrueCopyFilter); }
public static HttpClient create() { return new HttpClientConnect(new HttpConnectionProvider()); }
@Test public void testSharedNameResolver_SharedClientWithConnectionPool() throws InterruptedException { doTestSharedNameResolver(HttpClient.create(), true); }
public SM2() { this(null, (byte[]) null); }
@Test public void issueIA824PTest() { assertThrows(DataLengthException.class, () -> { SM2 sm2 = SmUtil.sm2(); String emptyStr = ""; sm2.encryptHex(emptyStr, KeyType.PublicKey); }); }
public void printKsqlEntityList(final List<KsqlEntity> entityList) { switch (outputFormat) { case JSON: printAsJson(entityList); break; case TABULAR: final boolean showStatements = entityList.size() > 1; for (final KsqlEntity ksqlEntity : entityList) { writer().println(); if (showStatements) { writer().println(ksqlEntity.getStatementText()); } printAsTable(ksqlEntity); } break; default: throw new RuntimeException(String.format( "Unexpected output format: '%s'", outputFormat.name() )); } }
@Test public void shouldPrintFunctionDescription() { final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of( new FunctionDescriptionList( "DESCRIBE FUNCTION foo;", "FOO", "Description that is very, very, very, very, very, very, very, very, very, " + "very, very, very, very, very, very, very, very, very, very, very, very long\n" + "and containing new lines\n" + "\tAND TABS\n" + "too!", "Andy", "v1.1.0", "some.jar", ImmutableList.of(new FunctionInfo( ImmutableList.of( new ArgumentInfo( "arg1", "INT", "Another really, really, really, really, really, really, really," + "really, really, really, really, really, really, really, really " + " really, really, really, really, really, really, really, long\n" + "description\n" + "\tContaining Tabs\n" + "and stuff", true) ), "LONG", "The function description, which too can be really, really, really, " + "really, really, really, really, really, really, really, really, really, " + "really, really, really, really, really, really, really, really, long\n" + "and contains\n\ttabs and stuff" )), FunctionType.SCALAR ))); console.printKsqlEntityList(entityList); final String output = terminal.getOutputString(); Approvals.verify(output, approvalOptions); }
@Override public void addHeatMapActivities(List<Class<?>> activitiesList) { }
@Test public void addHeatMapActivities() { List<Class<?>> activities = new ArrayList<>(); activities.add(EmptyActivity.class); activities.add(ListActivity.class); mSensorsAPI.addHeatMapActivities(activities); Assert.assertFalse(mSensorsAPI.isHeatMapActivity(EmptyActivity.class)); }
boolean isGeneratedFromSource(ClassTemplateSpec templateSpec) { DataSchemaLocation location = _specGenerator.getClassLocation(templateSpec); return location != null && _sourceFiles.contains(location.getSourceFile().getAbsolutePath()); }
@Test public void testGeneratedFromSourceExternal() throws Exception { ProjectionMaskApiChecker projectionMaskApiChecker = new ProjectionMaskApiChecker( _templateSpecGenerator, _sourceFiles, _classLoader); Mockito.when(_nestedTypeSource.getAbsolutePath()).thenReturn("models.jar:/Bar.pdl"); Assert.assertFalse(projectionMaskApiChecker.isGeneratedFromSource(_templateSpec)); Mockito.verify(_nestedTypeSource, Mockito.atLeast(1)).getAbsolutePath(); }
@VisibleForTesting public static PerNodeTimelineCollectorsAuxService launchServer(String[] args, NodeTimelineCollectorManager collectorManager, Configuration conf) { Thread .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage( PerNodeTimelineCollectorsAuxService.class, args, LOG); PerNodeTimelineCollectorsAuxService auxService = null; try { auxService = collectorManager == null ? new PerNodeTimelineCollectorsAuxService( new NodeTimelineCollectorManager(false)) : new PerNodeTimelineCollectorsAuxService(collectorManager); ShutdownHookManager.get().addShutdownHook(new ShutdownHook(auxService), SHUTDOWN_HOOK_PRIORITY); auxService.init(conf); auxService.start(); } catch (Throwable t) { LOG.error("Error starting PerNodeTimelineCollectorServer", t); ExitUtil.terminate(-1, "Error starting PerNodeTimelineCollectorServer"); } return auxService; }
@Test @Timeout(60000) void testLaunch() throws Exception { ExitUtil.disableSystemExit(); try { auxService = PerNodeTimelineCollectorsAuxService.launchServer(new String[0], createCollectorManager(), conf); } catch (ExitUtil.ExitException e) { assertEquals(0, e.status); ExitUtil.resetFirstExitException(); fail(); } }
synchronized void appendRecheckOp() { doAppend(RECHECK_OP); }
@Test public void testRecheckTask() { QueueInstance instance = createInstance(); for (int i = 0; i < 10; i++) { instance.queue.appendRecheckOp(); } Awaitility.await().untilAsserted(() -> { verify(instance.mockedConsumer, times(10)).recheckTopicsChange(); }); // cleanup. instance.close(); }
@Override public String getName() { return FUNCTION_NAME; }
@Test public void testModuloNullColumn() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("mod(%s, %s)", INT_SV_NULL_COLUMN, LONG_SV_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof ModuloTransformFunction); Assert.assertEquals(transformFunction.getName(), "mod"); RoaringBitmap roaringBitmap = new RoaringBitmap(); double[] expectedValues = new double[NUM_ROWS]; for (int i = 0; i < NUM_ROWS; i++) { if (isNullRow(i)) { roaringBitmap.add(i); } else { expectedValues[i] = (double) _intSVValues[i] % (double) _longSVValues[i]; } } testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap); }
public boolean deleteExpiredCommitLog(final String addr, long timeoutMillis) throws MQClientException, RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_EXPIRED_COMMITLOG, null); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); switch (response.getCode()) { case ResponseCode.SUCCESS: { return true; } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void assertDeleteExpiredCommitLog() throws RemotingException, InterruptedException, MQClientException { mockInvokeSync(); assertTrue(mqClientAPI.deleteExpiredCommitLog(defaultBrokerAddr, defaultTimeout)); }
@Override public void acknowledge(OutputBufferId bufferId, long sequenceId) { checkState(!Thread.holdsLock(this), "Can not acknowledge pages while holding a lock on this"); requireNonNull(bufferId, "bufferId is null"); getBuffer(bufferId).acknowledgePages(sequenceId); }
@Test public void testAcknowledge() { OutputBuffers outputBuffers = createInitialEmptyOutputBuffers(BROADCAST); BroadcastOutputBuffer buffer = createBroadcastBuffer(outputBuffers, sizeOfPages(10)); // add three items for (int i = 0; i < 3; i++) { addPage(buffer, createPage(i)); } outputBuffers = createInitialEmptyOutputBuffers(BROADCAST).withBuffer(FIRST, BROADCAST_PARTITION_ID); // add a queue buffer.setOutputBuffers(outputBuffers); assertQueueState(buffer, FIRST, 3, 0); // get the three elements assertBufferResultEquals(TYPES, getBufferResult(buffer, FIRST, 0, sizeOfPages(10), NO_WAIT), bufferResult(0, createPage(0), createPage(1), createPage(2))); // acknowledge pages 0 and 1 acknowledgeBufferResult(buffer, FIRST, 2); // only page 2 is not removed assertQueueState(buffer, FIRST, 1, 2); // acknowledge page 2 acknowledgeBufferResult(buffer, FIRST, 3); // nothing left assertQueueState(buffer, FIRST, 0, 3); // acknowledge more pages will fail try { acknowledgeBufferResult(buffer, FIRST, 4); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "Invalid sequence id"); } // fill the buffer for (int i = 3; i < 6; i++) { addPage(buffer, createPage(i)); } assertQueueState(buffer, FIRST, 3, 3); // getting new pages will again acknowledge the previously acknowledged pages but this is ok buffer.get(FIRST, 3, sizeOfPages(1)).cancel(true); assertQueueState(buffer, FIRST, 3, 3); }
@Override public String getTargetRestEndpointURL() { return "/jobs/:" + JobIDPathParameter.KEY + "/vertices/:" + JobVertexIdPathParameter.KEY + "/metrics"; }
@Test void testUrl() { assertThat(jobVertexMetricsHeaders.getTargetRestEndpointURL()) .isEqualTo( "/jobs/:" + JobIDPathParameter.KEY + "/vertices/:" + JobVertexIdPathParameter.KEY + "/metrics"); }
@SuppressWarnings("deprecation") public static <K> KStreamHolder<K> build( final KStreamHolder<K> left, final KStreamHolder<K> right, final StreamStreamJoin<K> join, final RuntimeBuildContext buildContext, final StreamJoinedFactory streamJoinedFactory) { final QueryContext queryContext = join.getProperties().getQueryContext(); final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext); final LogicalSchema leftSchema; final LogicalSchema rightSchema; final Formats rightFormats; final Formats leftFormats; if (join.getJoinType().equals(RIGHT)) { leftFormats = join.getRightInternalFormats(); rightFormats = join.getLeftInternalFormats(); leftSchema = right.getSchema(); rightSchema = left.getSchema(); } else { leftFormats = join.getLeftInternalFormats(); rightFormats = join.getRightInternalFormats(); leftSchema = left.getSchema(); rightSchema = right.getSchema(); } final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from( leftSchema, leftFormats.getKeyFeatures(), leftFormats.getValueFeatures() ); final Serde<GenericRow> leftSerde = buildContext.buildValueSerde( leftFormats.getValueFormat(), leftPhysicalSchema, stacker.push(LEFT_SERDE_CTX).getQueryContext() ); final PhysicalSchema rightPhysicalSchema = PhysicalSchema.from( rightSchema, rightFormats.getKeyFeatures(), rightFormats.getValueFeatures() ); final Serde<GenericRow> rightSerde = buildContext.buildValueSerde( rightFormats.getValueFormat(), rightPhysicalSchema, stacker.push(RIGHT_SERDE_CTX).getQueryContext() ); final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde( leftFormats.getKeyFormat(), leftPhysicalSchema, queryContext ); final StreamJoined<K, GenericRow, GenericRow> joined = streamJoinedFactory.create( keySerde, leftSerde, rightSerde, StreamsUtil.buildOpName(queryContext), StreamsUtil.buildOpName(queryContext) ); final JoinParams joinParams = JoinParamsFactory .create(join.getKeyColName(), leftSchema, rightSchema); JoinWindows joinWindows; // Grace, as optional, helps to identify if a user specified the GRACE PERIOD syntax in the // join window. If specified, then we'll call the new KStreams API ofTimeDifferenceAndGrace() // which enables the "spurious" results bugfix with left/outer joins (see KAFKA-10847). if (join.getGraceMillis().isPresent()) { joinWindows = JoinWindows.ofTimeDifferenceAndGrace( join.getBeforeMillis(), join.getGraceMillis().get()); } else { joinWindows = JoinWindows.of(join.getBeforeMillis()); } joinWindows = joinWindows.after(join.getAfterMillis()); final KStream<K, GenericRow> result; switch (join.getJoinType()) { case LEFT: result = left.getStream().leftJoin( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case RIGHT: result = right.getStream().leftJoin( left.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case OUTER: result = left.getStream().outerJoin( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; case INNER: result = left.getStream().join( right.getStream(), joinParams.getJoiner(), joinWindows, joined); break; default: throw new IllegalStateException("invalid join type"); } return left.withStream(result, joinParams.getSchema()); }
@Test public void shouldReturnCorrectLegacySchema() { // Given: givenInnerJoin(L_KEY); join = new StreamStreamJoin<>( new ExecutionStepPropertiesV1(CTX), JoinType.INNER, ColumnName.of(LEGACY_KEY_COL), LEFT_FMT, RIGHT_FMT, left, right, BEFORE, AFTER, Optional.empty() ); // When: final KStreamHolder<Struct> result = join.build(planBuilder, planInfo); // Then: assertThat( result.getSchema(), is(JoinParamsFactory.create(ROWKEY_NAME, LEFT_SCHEMA, RIGHT_SCHEMA).getSchema()) ); }
@Override @Nullable public ZuulFilter<?, ?> getFilterByNameAndType(String name, FilterType type) { Map<String, ZuulFilter<?, ?>> filtersByName = filtersByTypeAndName.get(type); if (filtersByName == null) { return null; } return filtersByName.get(name); }
@Test void getFilterByNameAndType() { StaticFilterLoader filterLoader = new StaticFilterLoader(factory, ImmutableSet.of(DummyFilter2.class, DummyFilter1.class)); ZuulFilter<?, ?> filter = filterLoader.getFilterByNameAndType("Robin", FilterType.INBOUND); Truth.assertThat(filter).isInstanceOf(DummyFilter2.class); }
@Override public Set<Rule<?>> rules() { return ImmutableSet.of(filterRowExpressionRewriteRule(), projectRowExpressionRewriteRule()); }
@Test public void testNotFire() { tester().assertThat( ImmutableSet.<Rule<?>>builder().addAll(new SimplifyRowExpressions(getMetadata()).rules()).addAll( new RewriteConstantArrayContainsToInExpression(getFunctionManager()).rules()).build()) .setSystemProperty(REWRITE_CONSTANT_ARRAY_CONTAINS_TO_IN_EXPRESSION, "true") .on(p -> { VariableReferenceExpression a = p.variable("a", BOOLEAN); VariableReferenceExpression b = p.variable("b"); VariableReferenceExpression c = p.variable("c"); return p.project( assignment(a, p.rowExpression("contains(array[1, 2, c], b)")), p.values(b, c)); }) .matches( project( ImmutableMap.of("a", expression("contains(array[1, 2, c], b)")), values("b", "c"))); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void setChatPhoto() throws IOException { BaseResponse response = bot.execute(new SetChatPhoto(groupId, imageFile)); assertTrue(response.isOk()); byte[] bytes = Files.readAllBytes(imageFile.toPath()); response = bot.execute(new SetChatPhoto(groupId, bytes)); assertTrue(response.isOk()); }
@Udf(description = "Returns the cotangent of an INT value") public Double cot( @UdfParameter( value = "value", description = "The value in radians to get the cotangent of." ) final Integer value ) { return cot(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleZero() { assertThat(Double.isInfinite(udf.cot(0.0)), is(true)); assertThat(Double.isInfinite(udf.cot(0)), is(true)); assertThat(Double.isInfinite(udf.cot(0L)), is(true)); }
public void validate(List<String> values, String type, List<String> options) { TypeValidation typeValidation = findByKey(type); for (String value : values) { typeValidation.validate(value, options); } }
@Test public void validate__multiple_values() { TypeValidation fakeTypeValidation = mock(TypeValidation.class); when(fakeTypeValidation.key()).thenReturn("Fake"); TypeValidations typeValidations = new TypeValidations(newArrayList(fakeTypeValidation)); typeValidations.validate(newArrayList("10", "11", "12"), "Fake", newArrayList("11")); verify(fakeTypeValidation).validate("10", newArrayList("11")); }
public static String toString(Throwable cause) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); cause.printStackTrace(pw); return sw.toString(); }
@Test public void testToString() { String result = ExceptionUtil.toString(throwable); assertContains(result, "RuntimeException"); assertContains(result, "expected exception"); }
@Override public List<Node> sniff(List<Node> nodes) { if (attribute == null || value == null) { return nodes; } return nodes.stream() .filter(node -> nodeMatchesFilter(node, attribute, value)) .collect(Collectors.toList()); }
@Test void returnsAllNodesIfFilterMatchesAll() throws Exception { final List<Node> nodes = mockNodes(); final NodesSniffer nodesSniffer = new FilteredOpenSearchNodesSniffer("always", "true"); assertThat(nodesSniffer.sniff(nodes)).isEqualTo(nodes); }
@Override public ImmutableMap<K, V> updated(K key, V value) { return new PCollectionsImmutableMap<>(underlying().plus(key, value)); }
@Test public void testDelegationOfUpdated() { new PCollectionsHashMapWrapperDelegationChecker<>() .defineMockConfigurationForFunctionInvocation(mock -> mock.plus(eq(this), eq(this)), SINGLETON_MAP) .defineWrapperFunctionInvocationAndMockReturnValueTransformation(wrapper -> wrapper.updated(this, this), identity()) .expectWrapperToWrapMockFunctionReturnValue() .doFunctionDelegationCheck(); }
public boolean isHostAllowed(URI serviceUri) { if (serviceUri != null) { List<String> hostWhitelist = config.getHostWhitelist(); if (hostWhitelist == null || hostWhitelist.size() == 0) { throw new ConfigException("No whitelist defined to allow the route to " + serviceUri); } String host = serviceUri.getHost(); return hostWhitelist.stream().anyMatch( hostRegEx -> host != null && host.matches(hostRegEx)); } else { return false; } }
@Test public void testHostAllowed() throws URISyntaxException { Assert.assertTrue(hostWhitelist.isHostAllowed(new URI("http://192.168.0.1"))); Assert.assertTrue(hostWhitelist.isHostAllowed(new URI("http://10.1.2.3:8543"))); Assert.assertTrue(hostWhitelist.isHostAllowed(new URI("https://192.168.0.10:8765"))); }
public abstract boolean idleMessagePending();
@Test public void when_nothingHappened_then_noWm() { assertFalse(wc.idleMessagePending()); }
@Override public long get(long key1, int key2) { return super.get0(key1, key2); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testGet_whenDisposed() { hsa.dispose(); hsa.get(1, 1); }
@SuppressWarnings("rawtypes") @Deprecated public synchronized Topology addProcessor(final String name, final org.apache.kafka.streams.processor.ProcessorSupplier supplier, final String... parentNames) { return addProcessor( name, new ProcessorSupplier<Object, Object, Object, Object>() { @Override public Set<StoreBuilder<?>> stores() { return supplier.stores(); } @Override public org.apache.kafka.streams.processor.api.Processor<Object, Object, Object, Object> get() { return ProcessorAdapter.adaptRaw(supplier.get()); } }, parentNames ); }
@Test public void shouldFailIfNodeIsItsOwnParent() { assertThrows(TopologyException.class, () -> topology.addProcessor("processor", new MockApiProcessorSupplier<>(), "processor")); }
public void register(Operation operation) { Map<Long, Operation> callIds = liveOperations.computeIfAbsent(operation.getCallerAddress(), (key) -> new ConcurrentHashMap<>()); if (callIds.putIfAbsent(operation.getCallId(), operation) != null) { throw new IllegalStateException("Duplicate operation during registration of operation=" + operation); } }
@Test public void when_registerDuplicateCallId_then_exception() throws UnknownHostException { Operation operation = createOperation("1.2.3.4", 1234, 2222L); r.register(operation); // this should not fail r.register(createOperation("1.2.3.4", 1234, 2223L)); // adding a duplicate, expecting failure assertThrows(IllegalStateException.class, () -> r.register(operation)); }
@Override public <K, V> void forward(final K key, final V value) { throw new UnsupportedOperationException("StateStores can't access forward."); }
@Test public void shouldThrowOnForward() { assertThrows(UnsupportedOperationException.class, () -> context.forward("key", "value")); }
@VisibleForTesting static ArrayList getNameSpaceArray(MetricKey metricKey) { MetricName metricName = metricKey.metricName(); try { return OBJECT_MAPPER.readValue(metricName.getNamespace(), ArrayList.class); } catch (JsonProcessingException e) { throw new RuntimeException( String.format("Parse namespace[%s] error. ", metricName.getNamespace()), e); } }
@Test void testGetNameSpaceArray() { String json = "[\"key\", \"value\", \"MetricGroupType.key\", \"MetricGroupType.value\"]"; MetricKey key = MetricKey.create("step", MetricName.named(json, "name")); assertThat(FlinkMetricContainer.getNameSpaceArray(key)).isEqualTo(DEFAULT_SCOPE_COMPONENTS); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldThrowWhenInsertValuesOnReservedInternalTopic() { // Given givenDataSourceWithSchema("_confluent-ksql-default__command-topic", SCHEMA, SerdeFeatures.of(), SerdeFeatures.of(), false, false); final KsqlConfig ksqlConfig = new KsqlConfig(ImmutableMap.of()); final ConfiguredStatement<InsertValues> statement = ConfiguredStatement.of( PreparedStatement.of( "", new InsertValues(SourceName.of("TOPIC"), allAndPseudoColumnNames(SCHEMA), ImmutableList.of( new LongLiteral(1L), new StringLiteral("str"), new StringLiteral("str"), new LongLiteral(2L) ))), SessionConfig.of(ksqlConfig, ImmutableMap.of()) ); // When: final Exception e = assertThrows( KsqlException.class, () -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext) ); // Then: assertThat(e.getMessage(), containsString( "Cannot insert values into read-only topic: _confluent-ksql-default__command-topic")); }
@Override public void enableNetworkRequest(boolean isRequest) { }
@Test public void enableNetworkRequest() { mSensorsAPI.enableNetworkRequest(false); Assert.assertFalse(mSensorsAPI.isNetworkRequestEnable()); }
protected void validate() throws ContainerInitializationException { List<String> errFields = new ArrayList<>(); if (requestTypeClass == null) { errFields.add("request type class"); } if (responseTypeClass == null) { errFields.add("response type class"); } if (requestReader == null) { errFields.add("request reader"); } if (responseWriter == null) { errFields.add("response writer"); } if (securityContextWriter == null) { errFields.add("security context writer"); } if (exceptionHandler == null) { errFields.add("exception handler"); } if (initializationWrapper == null) { errFields.add("initialization wrapper"); } if (!errFields.isEmpty()) { throw new ContainerInitializationException(String.format(MISSING_FIELD_ERROR, String.join(", ", errFields)), null); } }
@Test void validation_throwsException() { TestBuilder testBuilder = new TestBuilder(); try { testBuilder.validate(); } catch (ContainerInitializationException e) { return; } fail("Did not throw exception"); }
public String marshal() { StringBuilder result = new StringBuilder(); result.append(logicTableName).append(':'); for (DataNode each : dataNodes) { result.append(DataNodeUtils.formatWithSchema(each)).append(','); } if (!dataNodes.isEmpty()) { result.setLength(result.length() - 1); } return result.toString(); }
@Test void assertMarshal() { String actual = new JobDataNodeEntry("t_order", Arrays.asList(new DataNode("ds_0.t_order_0"), new DataNode("ds_0.t_order_1"))).marshal(); String expected = "t_order:ds_0.t_order_0,ds_0.t_order_1"; assertThat(actual, is(expected)); }
public CredentialRetriever googleApplicationDefaultCredentials() { return () -> { try { if (imageReference.getRegistry().endsWith("gcr.io") || imageReference.getRegistry().endsWith("docker.pkg.dev")) { GoogleCredentials googleCredentials = googleCredentialsProvider.get(); logger.accept(LogEvent.info("Google ADC found")); if (googleCredentials.createScopedRequired()) { // not scoped if service account // The short-lived OAuth2 access token to be generated from the service account with // refreshIfExpired() below will have one-hour expiry (as of Aug 2019). Instead of using // an access token, it is technically possible to use the service account private key to // auth with GCR, but it does not worth writing complex code to achieve that. logger.accept(LogEvent.info("ADC is a service account. Setting GCS read-write scope")); List<String> scope = Collections.singletonList(OAUTH_SCOPE_STORAGE_READ_WRITE); googleCredentials = googleCredentials.createScoped(scope); } googleCredentials.refreshIfExpired(); logGotCredentialsFrom("Google Application Default Credentials"); AccessToken accessToken = googleCredentials.getAccessToken(); // https://cloud.google.com/container-registry/docs/advanced-authentication#access_token return Optional.of(Credential.from("oauth2accesstoken", accessToken.getTokenValue())); } } catch (IOException ex) { // Includes the case where ADC is simply not available. logger.accept( LogEvent.info("ADC not present or error fetching access token: " + ex.getMessage())); } return Optional.empty(); }; }
@Test public void testGoogleApplicationDefaultCredentials_serviceAccount() throws CredentialRetrievalException { Mockito.when(mockGoogleCredentials.createScopedRequired()).thenReturn(true); Mockito.when(mockGoogleCredentials.createScoped(Mockito.anyCollection())) .thenReturn(mockGoogleCredentials); CredentialRetrieverFactory credentialRetrieverFactory = createCredentialRetrieverFactory("gcr.io", "repo"); Credential credential = credentialRetrieverFactory.googleApplicationDefaultCredentials().retrieve().get(); Assert.assertEquals("oauth2accesstoken", credential.getUsername()); Assert.assertEquals("my-token", credential.getPassword()); Mockito.verify(mockGoogleCredentials) .createScoped( Collections.singletonList("https://www.googleapis.com/auth/devstorage.read_write")); Mockito.verify(mockLogger).accept(LogEvent.info("Google ADC found")); Mockito.verify(mockLogger) .accept(LogEvent.info("ADC is a service account. Setting GCS read-write scope")); Mockito.verify(mockLogger) .accept(LogEvent.lifecycle("Using Google Application Default Credentials for gcr.io/repo")); Mockito.verifyNoMoreInteractions(mockLogger); }
@Override public void w(String tag, String message, Object... args) { Log.w(tag, formatString(message, args)); }
@Test public void warningLoggedCorrectly() { String expectedMessage = "Hello World"; logger.w(tag, "Hello %s", "World"); assertLogged(WARN, tag, expectedMessage, null); }
public static void throwException(final Exception e) { throw new ResourceNotFoundException("the validation ExistProviderMethod invoked error", e); }
@Test public void throwExceptionTest() { Assertions.assertThrows(ResourceNotFoundException.class, () -> Assert.throwException(mock(ShenyuException.class))); }
CoordinatorResult<Void, CoordinatorRecord> prepareRebalance( ClassicGroup group, String reason ) { // If any members are awaiting sync, cancel their request and have them rejoin. if (group.isInState(COMPLETING_REBALANCE)) { resetAndPropagateAssignmentWithError(group, Errors.REBALANCE_IN_PROGRESS); } // If a sync expiration is pending, cancel it. removeSyncExpiration(group); boolean isInitialRebalance = group.isInState(EMPTY); if (isInitialRebalance) { // The group is new. Provide more time for the members to join. int delayMs = classicGroupInitialRebalanceDelayMs; int remainingMs = Math.max(group.rebalanceTimeoutMs() - classicGroupInitialRebalanceDelayMs, 0); timer.schedule( classicGroupJoinKey(group.groupId()), delayMs, TimeUnit.MILLISECONDS, false, () -> tryCompleteInitialRebalanceElseSchedule(group.groupId(), delayMs, remainingMs) ); } group.transitionTo(PREPARING_REBALANCE); log.info("Preparing to rebalance group {} in state {} with old generation {} (reason: {}).", group.groupId(), group.currentState(), group.generationId(), reason); return isInitialRebalance ? EMPTY_RESULT : maybeCompleteJoinElseSchedule(group); }
@Test public void testLastJoiningMembersAreKickedOutWhenRejoiningGroupWithMaxSize() { int groupMaxSize = 10; GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withClassicGroupMaxSize(groupMaxSize) .withClassicGroupInitialRebalanceDelayMs(50) .build(); // Create a group and add members that exceed the group max size. ClassicGroup group = context.createClassicGroup("group-id"); List<String> memberIds = IntStream.range(0, groupMaxSize + 2) .mapToObj(i -> group.generateMemberId("client-id", Optional.empty())) .collect(Collectors.toList()); memberIds.forEach(memberId -> group.add( new ClassicGroupMember( memberId, Optional.empty(), "client-id", "client-host", 10000, 5000, "consumer", GroupMetadataManagerTestContext.toProtocols("range") ) )); context.groupMetadataManager.prepareRebalance(group, "test"); List<GroupMetadataManagerTestContext.JoinResult> joinResults = memberIds.stream().map(memberId -> context.sendClassicGroupJoin( new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId("group-id") .withMemberId(memberId) .withDefaultProtocolTypeAndProtocols() .withRebalanceTimeoutMs(10000) .build() )).collect(Collectors.toList()); assertEquals(groupMaxSize, group.numMembers()); assertEquals(groupMaxSize, group.numAwaitingJoinResponse()); assertTrue(group.isInState(PREPARING_REBALANCE)); // Advance clock by rebalance timeout to complete join phase. GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(10000)); verifyClassicGroupJoinResponses(joinResults, groupMaxSize, Errors.GROUP_MAX_SIZE_REACHED); assertEquals(groupMaxSize, group.numMembers()); assertTrue(group.isInState(COMPLETING_REBALANCE)); memberIds.subList(groupMaxSize, groupMaxSize + 2) .forEach(memberId -> assertFalse(group.hasMember(memberId))); memberIds.subList(0, groupMaxSize) .forEach(memberId -> assertTrue(group.hasMember(memberId))); }
public ReadyCheckingSideInputReader createReaderForViews( Collection<PCollectionView<?>> newContainedViews) { if (!containedViews.containsAll(newContainedViews)) { Set<PCollectionView<?>> currentlyContained = ImmutableSet.copyOf(containedViews); Set<PCollectionView<?>> newRequested = ImmutableSet.copyOf(newContainedViews); throw new IllegalArgumentException( "Can't create a SideInputReader with unknown views " + Sets.difference(newRequested, currentlyContained)); } return new SideInputContainerSideInputReader(newContainedViews); }
@Test public void isReadyForEmptyWindowTrue() throws Exception { CountDownLatch onComplete = new CountDownLatch(1); immediatelyInvokeCallback(mapView, GlobalWindow.INSTANCE); CountDownLatch latch = invokeLatchedCallback(singletonView, GlobalWindow.INSTANCE, onComplete); ReadyCheckingSideInputReader reader = container.createReaderForViews(ImmutableList.of(mapView, singletonView)); assertThat(reader.isReady(mapView, GlobalWindow.INSTANCE), is(true)); assertThat(reader.isReady(singletonView, GlobalWindow.INSTANCE), is(false)); latch.countDown(); if (!onComplete.await(1500L, TimeUnit.MILLISECONDS)) { fail("Callback to set empty values did not complete!"); } // The cached value was false, so it continues to be true assertThat(reader.isReady(singletonView, GlobalWindow.INSTANCE), is(false)); // A new reader for the same container gets a fresh look reader = container.createReaderForViews(ImmutableList.of(mapView, singletonView)); assertThat(reader.isReady(singletonView, GlobalWindow.INSTANCE), is(true)); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testNewClusterWithKafkaVersionOnly(VertxTestContext context) { VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(VERSIONS.defaultVersion().version(), null, null), mockNewCluster(null, null, List.of()) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.defaultVersion())); assertThat(c.to(), is(VERSIONS.defaultVersion())); assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion())); assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion())); assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion())); async.flag(); }))); }
public static boolean isProtoClass(Object object) { return object instanceof BindableService; }
@Test public void testIsProtoClass() { Assert.assertTrue(SofaProtoUtils.isProtoClass(new BindableServiceImpl())); Assert.assertFalse(SofaProtoUtils.isProtoClass("")); }
@Override public void registerStore(StateStore store) { final String storeName = store.fqsn(); checkArgument(!stores.containsKey(storeName), String.format("Store %s has already been registered.", storeName)); stores.put(storeName, store); }
@Test public void testRegisterStore() { final String fqsn = "t/ns/store"; StateStore store = mock(StateStore.class); when(store.fqsn()).thenReturn(fqsn); this.stateManager.registerStore(store); StateStore getStore = stateManager.getStore("t", "ns", "store"); assertSame(getStore, store); }
public static Optional<String> makePartitionFilter( int partColOffset, List<String> partColNames, List<Expression> expressions, HiveShim hiveShim) { List<String> filters = new ArrayList<>(expressions.size()); ExpressionExtractor extractor = new ExpressionExtractor(partColOffset, partColNames, hiveShim); for (Expression expression : expressions) { String str = expression.accept(extractor); if (str == null) { return Optional.empty(); } filters.add(str); } return Optional.of(String.join(" and ", filters)); }
@Test public void testMakePartitionFilter() { List<String> partColNames = Arrays.asList("p1", "p2", "p3"); ResolvedExpression p1Ref = new FieldReferenceExpression("p1", DataTypes.INT(), 0, 2); ResolvedExpression p2Ref = new FieldReferenceExpression("p2", DataTypes.STRING(), 0, 3); ResolvedExpression p3Ref = new FieldReferenceExpression("p3", DataTypes.DOUBLE(), 0, 4); ResolvedExpression p1Exp = CallExpression.permanent( BuiltInFunctionDefinitions.EQUALS, Arrays.asList(p1Ref, valueLiteral(1)), DataTypes.BOOLEAN()); ResolvedExpression p2Exp = CallExpression.permanent( BuiltInFunctionDefinitions.EQUALS, Arrays.asList(p2Ref, valueLiteral("a", DataTypes.STRING().notNull())), DataTypes.BOOLEAN()); ResolvedExpression p3Exp = CallExpression.permanent( BuiltInFunctionDefinitions.EQUALS, Arrays.asList(p3Ref, valueLiteral(1.1)), DataTypes.BOOLEAN()); Optional<String> filter = HiveTableUtil.makePartitionFilter(2, partColNames, Arrays.asList(p1Exp), hiveShim); assertThat(filter.orElse(null)).isEqualTo("(p1 = 1)"); filter = HiveTableUtil.makePartitionFilter( 2, partColNames, Arrays.asList(p1Exp, p3Exp), hiveShim); assertThat(filter.orElse(null)).isEqualTo("(p1 = 1) and (p3 = 1.1)"); filter = HiveTableUtil.makePartitionFilter( 2, partColNames, Arrays.asList( p2Exp, CallExpression.permanent( BuiltInFunctionDefinitions.OR, Arrays.asList(p1Exp, p3Exp), DataTypes.BOOLEAN())), hiveShim); assertThat(filter.orElse(null)).isEqualTo("(p2 = 'a') and ((p1 = 1) or (p3 = 1.1))"); }
private static Schema optional(Schema original) { // null is first in the union because Parquet's default is always null return Schema.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), original)); }
@Test public void testOldThriftListOfLists() throws Exception { Schema listOfLists = optional(Schema.createArray(Schema.createArray(Schema.create(INT)))); Schema schema = Schema.createRecord("ThriftCompatListInList", null, null, false); schema.setFields( Lists.newArrayList(new Schema.Field("listOfLists", listOfLists, null, JsonProperties.NULL_VALUE))); System.err.println("Avro schema: " + schema.toString(true)); // Cannot use round-trip assertion because repeated group names differ testParquetToAvroConversion( schema, "message ThriftCompatListInList {\n" + " optional group listOfLists (LIST) {\n" + " repeated group listOfLists_tuple (LIST) {\n" + " repeated int32 listOfLists_tuple_tuple;\n" + " }\n" + " }\n" + "}"); // Cannot use round-trip assertion because 3-level representation is used testParquetToAvroConversion( NEW_BEHAVIOR, schema, "message ThriftCompatListInList {\n" + " optional group listOfLists (LIST) {\n" + " repeated group listOfLists_tuple (LIST) {\n" + " repeated int32 listOfLists_tuple_tuple;\n" + " }\n" + " }\n" + "}"); }
public <T> Future<T> valueFuture(ByteString encodedTag, String stateFamily, Coder<T> coder) { return stateFuture(StateTag.of(StateTag.Kind.VALUE, encodedTag, stateFamily), coder); }
@Test public void testNoStateFamily() throws Exception { Future<Integer> future = underTest.valueFuture(STATE_KEY_1, "", INT_CODER); Mockito.verifyNoMoreInteractions(mockWindmill); Windmill.KeyedGetDataRequest.Builder expectedRequest = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setMaxBytes(WindmillStateReader.MAX_KEY_BYTES) .setWorkToken(WORK_TOKEN) .addValuesToFetch( Windmill.TagValue.newBuilder().setTag(STATE_KEY_1).setStateFamily("").build()); Windmill.KeyedGetDataResponse.Builder response = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addValues( Windmill.TagValue.newBuilder() .setTag(STATE_KEY_1) .setStateFamily("") .setValue(intValue(8))); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build())) .thenReturn(response.build()); Integer result = future.get(); Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest.build()); Mockito.verifyNoMoreInteractions(mockWindmill); assertThat(result, Matchers.equalTo(8)); assertNoReader(future); }
@Override public Checksum compute(final InputStream in, final TransferStatus status) throws BackgroundException { return this.compute(status.getLength(), super.digest("SHA-256", this.normalize(in, status), status)); }
@Test public void testDigest() throws Exception { final byte[] bytes = "1".getBytes(StandardCharsets.UTF_8); assertEquals("HbkcOE7OuVb-kpfNRqNanXMu9LKEVu2cIVIB0Me6Q_Y", new ChunkListSHA256ChecksumCompute() .compute(new ByteArrayInputStream(bytes), new TransferStatus().withLength(bytes.length)).hash); assertEquals("07_pjo8c6JFhSFRWmiD19FCNarQCW4crezqCgu91sSo", new ChunkListSHA256ChecksumCompute() .compute(new ByteArrayInputStream(new byte[0]), new TransferStatus().withLength(0L)).hash); }
public static String throwableToGRPCCodeString(@Nullable Throwable t) { if (t == null) { return BigQuerySinkMetrics.UNKNOWN; } return Status.fromThrowable(t).getCode().toString(); }
@Test public void testThrowableToGRPCCodeString() throws Exception { Throwable nullThrowable = null; assertThat(BigQuerySinkMetrics.throwableToGRPCCodeString(nullThrowable), equalTo("UNKNOWN")); Throwable nonGrpcError = new IndexOutOfBoundsException("Test Error"); assertThat(BigQuerySinkMetrics.throwableToGRPCCodeString(nonGrpcError), equalTo("UNKNOWN")); int notFoundVal = Status.Code.NOT_FOUND.value(); Throwable grpcError = new Exceptions.AppendSerializationError(notFoundVal, "Test Error", "Stream name", null); assertThat(BigQuerySinkMetrics.throwableToGRPCCodeString(grpcError), equalTo("NOT_FOUND")); }
@Override public void notifyCheckpointComplete(SubtaskKey subtaskKey, long checkpointId) throws Exception { super.notifyCheckpointComplete(subtaskKey, checkpointId); removeAndCloseFiles(subtaskKey, checkpointId); }
@Test public void testSpaceControl() throws Exception { try (FileMergingSnapshotManagerBase fmsm = (FileMergingSnapshotManagerBase) createFileMergingSnapshotManager(checkpointBaseDir); CloseableRegistry closeableRegistry = new CloseableRegistry()) { fmsm.registerSubtaskForSharedStates(subtaskKey1); BiFunctionWithException<Long, Integer, SegmentFileStateHandle, Exception> writer = ((checkpointId, size) -> { return writeCheckpointAndGetStream( subtaskKey1, checkpointId, CheckpointedStateScope.SHARED, fmsm, closeableRegistry, size) .closeAndGetHandle(); }); Integer eighthOfFile = 4 * 1024 * 1024; // Doing checkpoint-1 with 6 files SegmentFileStateHandle stateHandle1 = writer.apply(1L, eighthOfFile); SegmentFileStateHandle stateHandle2 = writer.apply(1L, eighthOfFile); SegmentFileStateHandle stateHandle3 = writer.apply(1L, eighthOfFile); SegmentFileStateHandle stateHandle4 = writer.apply(1L, eighthOfFile); SegmentFileStateHandle stateHandle5 = writer.apply(1L, eighthOfFile); SegmentFileStateHandle stateHandle6 = writer.apply(1L, eighthOfFile); fmsm.notifyCheckpointComplete(subtaskKey1, 1); assertThat(fmsm.spaceStat.physicalFileCount.get()).isEqualTo(1); assertThat(fmsm.spaceStat.logicalFileCount.get()).isEqualTo(6); // complete checkpoint-2 with 3 files written and 1 file reused from checkpoint 1 assertThat(fmsm.couldReusePreviousStateHandle(stateHandle1)).isTrue(); SegmentFileStateHandle stateHandle7 = writer.apply(2L, eighthOfFile); SegmentFileStateHandle stateHandle8 = writer.apply(2L, eighthOfFile); SegmentFileStateHandle stateHandle9 = writer.apply(2L, eighthOfFile); fmsm.reusePreviousStateHandle(2, Collections.singletonList(stateHandle1)); fmsm.notifyCheckpointComplete(subtaskKey1, 2); assertThat(fmsm.spaceStat.physicalFileCount.get()).isEqualTo(2); assertThat(fmsm.spaceStat.logicalFileCount.get()).isEqualTo(9); // subsume checkpoint-1 fmsm.notifyCheckpointSubsumed(subtaskKey1, 1); assertThat(fmsm.spaceStat.physicalFileCount.get()).isEqualTo(2); assertThat(fmsm.spaceStat.logicalFileCount.get()).isEqualTo(4); // complete checkpoint-3 with 1 files reuse from checkpoint 1 and 2. assertThat(fmsm.couldReusePreviousStateHandle(stateHandle1)).isFalse(); assertThat(fmsm.couldReusePreviousStateHandle(stateHandle7)).isTrue(); SegmentFileStateHandle stateHandle10 = writer.apply(3L, eighthOfFile); SegmentFileStateHandle stateHandle11 = writer.apply(3L, eighthOfFile); fmsm.reusePreviousStateHandle(3, Collections.singletonList(stateHandle7)); fmsm.notifyCheckpointComplete(subtaskKey1, 3); assertThat(fmsm.spaceStat.physicalFileCount.get()).isEqualTo(3); assertThat(fmsm.spaceStat.logicalFileCount.get()).isEqualTo(6); // subsume checkpoint-2 fmsm.notifyCheckpointSubsumed(subtaskKey1, 2); assertThat(fmsm.spaceStat.physicalFileCount.get()).isEqualTo(2); assertThat(fmsm.spaceStat.logicalFileCount.get()).isEqualTo(3); } }
public void addTypeParameter(TypeParameter typeParameter) { this.typeParameters.add(typeParameter); }
@Test void testAddTypeParameter() { InnerClass clazz = new InnerClass("com.foo.UserClass"); assertEquals(0, clazz.getTypeParameters().size()); clazz.addTypeParameter(new TypeParameter("T")); assertEquals(1, clazz.getTypeParameters().size()); clazz.addTypeParameter(new TypeParameter("U")); assertEquals(2, clazz.getTypeParameters().size()); }
public boolean supportsErrorHandling() { return true; }
@Test public void testErrorHandling() { SalesforceStepMeta meta = new SalesforceDeleteMeta(); assertTrue( meta.supportsErrorHandling() ); }
@Override public ProjectRepositories load(String projectKey, @Nullable String branchBase) { GetRequest request = new GetRequest(getUrl(projectKey, branchBase)); try (WsResponse response = wsClient.call(request)) { try (InputStream is = response.contentStream()) { return processStream(is); } catch (IOException e) { throw new IllegalStateException("Couldn't load project repository for " + projectKey, e); } } catch (RuntimeException e) { if (shouldThrow(e)) { throw e; } LOG.debug("Project repository not available - continuing without it"); return new SingleProjectRepository(); } }
@Test public void readRealResponse() throws IOException { InputStream is = getTestResource("project.protobuf"); WsTestUtil.mockStream(wsClient, "/batch/project.protobuf?key=org.sonarsource.github%3Asonar-github-plugin", is); DefaultInputFile file = mock(DefaultInputFile.class); when(file.getModuleRelativePath()).thenReturn("src/test/java/org/sonar/plugins/github/PullRequestIssuePostJobTest.java"); ProjectRepositories proj = loader.load("org.sonarsource.github:sonar-github-plugin", null); FileData fd = proj.fileData("org.sonarsource.github:sonar-github-plugin", file); assertThat(fd.revision()).isEqualTo("27bf2c54633d05c5df402bbe09471fe43bd9e2e5"); assertThat(fd.hash()).isEqualTo("edb6b3b9ab92d8dc53ba90ab86cd422e"); }
@Override public void deleteCouponTemplate(Long id) { // 校验存在 validateCouponTemplateExists(id); // 删除 couponTemplateMapper.deleteById(id); }
@Test public void testDeleteCouponTemplate_success() { // mock 数据 CouponTemplateDO dbCouponTemplate = randomPojo(CouponTemplateDO.class); couponTemplateMapper.insert(dbCouponTemplate);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbCouponTemplate.getId(); // 调用 couponTemplateService.deleteCouponTemplate(id); // 校验数据不存在了 assertNull(couponTemplateMapper.selectById(id)); }
public DistributedTransactionOperationType getDistributedTransactionOperationType(final boolean autoCommit) { if (!autoCommit && !distributionTransactionManager.isInTransaction()) { return DistributedTransactionOperationType.BEGIN; } if (autoCommit && distributionTransactionManager.isInTransaction()) { return DistributedTransactionOperationType.COMMIT; } return DistributedTransactionOperationType.IGNORE; }
@Test void assertDistributedTransactionOperationTypeCommit() { connectionTransaction = new ConnectionTransaction(getXATransactionRule(), new TransactionConnectionContext()); DistributedTransactionOperationType operationType = connectionTransaction.getDistributedTransactionOperationType(true); assertThat(operationType, is(DistributedTransactionOperationType.COMMIT)); }
@Override public boolean match(Message msg, StreamRule rule) { Double msgVal = getDouble(msg.getField(rule.getField())); if (msgVal == null) { return false; } Double ruleVal = getDouble(rule.getValue()); if (ruleVal == null) { return false; } return rule.getInverted() ^ (msgVal > ruleVal); }
@Test public void testSuccessfulDoubleMatchWithNegativeValue() { StreamRule rule = getSampleRule(); rule.setValue("-54354.0"); Message msg = getSampleMessage(); msg.addField("something", "4.1"); StreamRuleMatcher matcher = getMatcher(rule); assertTrue(matcher.match(msg, rule)); }
public List<String> tokenize(String text) { List<String> tokens = new ArrayList<>(); Matcher regexMatcher = regexExpression.matcher(text); int lastIndexOfPrevMatch = 0; while (regexMatcher.find(lastIndexOfPrevMatch)) // this is where the magic happens: // the regexp is used to find a matching pattern for substitution { int beginIndexOfNextMatch = regexMatcher.start(); String prevToken = text.substring(lastIndexOfPrevMatch, beginIndexOfNextMatch); if (!prevToken.isEmpty()) { tokens.add(prevToken); } String currentMatch = regexMatcher.group(); tokens.add(currentMatch); lastIndexOfPrevMatch = regexMatcher.end(); if (lastIndexOfPrevMatch < text.length() && text.charAt(lastIndexOfPrevMatch) != '_') { // beause it is sometimes positioned after the "_", but it should be positioned // before the "_" --lastIndexOfPrevMatch; } } String tail = text.substring(lastIndexOfPrevMatch); if (!tail.isEmpty()) { tokens.add(tail); } return tokens; }
@Test void testTokenize_happyPath_2() { // given CompoundCharacterTokenizer tokenizer = new CompoundCharacterTokenizer( new HashSet<>(Arrays.asList(new String[] { "_84_93_", "_104_82_", "_104_87_" }))); String text = "_84_112_93_104_82_61_96_102_93_104_87_110_"; // when List<String> tokens = tokenizer.tokenize(text); // then assertEquals(Arrays.asList("_84_112_93", "_104_82_", "_61_96_102_93", "_104_87_", "_110_"), tokens); }
@Nonnull @Override public Optional<Signature> parse( @Nullable String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } final String generalizedStr = str.toLowerCase().trim(); if (!generalizedStr.contains("with")) { return map(str, detectionLocation); } int hashEndPos = generalizedStr.indexOf("with"); String digestStr = str.substring(0, hashEndPos); JcaMessageDigestMapper jcaMessageDigestMapper = new JcaMessageDigestMapper(); final Optional<MessageDigest> messageDigestOptional = jcaMessageDigestMapper.parse(digestStr, detectionLocation); int encryptStartPos = hashEndPos + 4; String signatureStr = str.substring(encryptStartPos); final String format; if (generalizedStr.contains("in") && generalizedStr.contains("format")) { int inStartPos = generalizedStr.indexOf("in"); int inEndPos = inStartPos + 2; signatureStr = str.substring(encryptStartPos, inStartPos); format = str.substring(inEndPos); } else { format = null; } return map(signatureStr, detectionLocation) .map( signature -> { messageDigestOptional.ifPresent(signature::put); if (format != null) { signature.put(new OutputFormat(format, detectionLocation)); } return signature; }); }
@Test void SHA3_224withECDSA() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaSignatureMapper jcaSignatureMapper = new JcaSignatureMapper(); Optional<Signature> signatureOptional = jcaSignatureMapper.parse("SHA3-224withECDSA", testDetectionLocation); assertThat(signatureOptional).isPresent(); assertThat(signatureOptional.get()).isInstanceOf(ECDSA.class); assertThat(signatureOptional.get().getFormat()).isEmpty(); assertThat(signatureOptional.get().getDigest()).isPresent(); MessageDigest messageDigest = signatureOptional.get().getDigest().get(); assertThat(messageDigest).isInstanceOf(SHA3.class); assertThat(messageDigest.getName()).isEqualTo("SHA3-224"); assertThat(messageDigest.getDigestSize()).isPresent(); assertThat(messageDigest.getDigestSize().get().getValue()).isEqualTo(224); }
@Override public ValidationResult validate(Object value) { if (value instanceof Map) return new ValidationResult.ValidationPassed(); else return new ValidationResult.ValidationFailed("Value is not a Map!"); }
@Test public void testValidate() throws Exception { Validator v = new MapValidator(); assertFalse(v.validate(null).passed()); assertFalse(v.validate(Collections.emptyList()).passed()); assertFalse(v.validate(9001).passed()); assertFalse(v.validate("foo").passed()); Map<String, String> actuallyFilledMap = ImmutableMap.of( "foo", "bar", "lol", "wut"); assertTrue(v.validate(actuallyFilledMap).passed()); assertTrue(v.validate(Collections.emptyMap()).passed()); }
Future<Boolean> canRollController(int nodeId) { LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId); return describeMetadataQuorum().map(info -> { boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info); if (!canRoll) { LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId); } return canRoll; }).recover(error -> { LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error); return Future.failedFuture(error); }); }
@Test public void cannotRollActiveControllerWith1FollowerBehindOddSizedCluster(VertxTestContext context) { Map<Integer, OptionalLong> controllers = new HashMap<>(); controllers.put(1, OptionalLong.of(10000L)); controllers.put(2, OptionalLong.of(7000L)); controllers.put(3, OptionalLong.of(8200L)); Admin admin = setUpMocks(1, controllers); KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, CONTROLLER_QUORUM_FETCH_TIMEOUT_MS); quorumCheck.canRollController(1).onComplete(context.succeeding(result -> { context.verify(() -> assertFalse(result)); context.completeNow(); })); }
@VisibleForTesting synchronized void populateProgress(WorkItemStatus status) throws Exception { Progress progress = worker.getWorkerProgress(); if (progress != null) { status.setReportedProgress(SourceTranslationUtils.readerProgressToCloudProgress(progress)); } }
@Test public void populateProgress() throws Exception { WorkItemStatus status = new WorkItemStatus(); Progress progress = cloudProgressToReaderProgress(ReaderTestUtils.approximateProgressAtIndex(42L)); when(worker.getWorkerProgress()).thenReturn(progress); statusClient.setWorker(worker, executionContext); statusClient.populateProgress(status); assertThat( status.getReportedProgress(), equalTo(ReaderTestUtils.approximateProgressAtIndex(42L))); }
static boolean shouldStoreMessage(final Message message) { // XEP-0334: Implement the <no-store/> hint to override offline storage if (message.getChildElement("no-store", "urn:xmpp:hints") != null) { return false; } // OF-2083: Prevent storing offline message that is already stored if (message.getChildElement("offline", "http://jabber.org/protocol/offline") != null) { return false; } switch (message.getType()) { case chat: // XEP-0160: Messages with a 'type' attribute whose value is "chat" SHOULD be stored offline, with the exception of messages that contain only Chat State Notifications (XEP-0085) [7] content // Iterate through the child elements to see if we can find anything that's not a chat state notification or // real time text notification Iterator<?> it = message.getElement().elementIterator(); while (it.hasNext()) { Object item = it.next(); if (item instanceof Element) { Element el = (Element) item; if (Namespace.NO_NAMESPACE.equals(el.getNamespace())) { continue; } if (!el.getNamespaceURI().equals("http://jabber.org/protocol/chatstates") && !(el.getQName().equals(QName.get("rtt", "urn:xmpp:rtt:0"))) ) { return true; } } } return message.getBody() != null && !message.getBody().isEmpty(); case groupchat: case headline: // XEP-0160: "groupchat" message types SHOULD NOT be stored offline // XEP-0160: "headline" message types SHOULD NOT be stored offline return false; case error: // XEP-0160: "error" message types SHOULD NOT be stored offline, // although a server MAY store advanced message processing errors offline if (message.getChildElement("amp", "http://jabber.org/protocol/amp") == null) { return false; } break; default: // XEP-0160: Messages with a 'type' attribute whose value is "normal" (or messages with no 'type' attribute) SHOULD be stored offline. break; } return true; }
@Test public void shouldNotStoreEmptyChatMessages() { // XEP-0160: "chat" message types SHOULD be stored offline unless they only contain chat state notifications Message message = new Message(); message.setType(Message.Type.chat); assertFalse(OfflineMessageStore.shouldStoreMessage(message)); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { handle((HttpServletRequest) request, (HttpServletResponse) response, chain); }
@Test void disallowedMethodCausesMethodNotAllowedResponse() throws IOException, ServletException { when(request.getMethod()).thenReturn("TRACE"); filter.doFilter(request, response, chain); verify(response).sendError(DISALLOWED_STATUS_CODE); }
public int compare(boolean b1, boolean b2) { throw new UnsupportedOperationException( "compare(boolean, boolean) was called on a non-boolean comparator: " + toString()); }
@Test public void testFloat16Comparator() { Binary[] valuesInAscendingOrder = { Binary.fromConstantByteArray(new byte[] {0x00, (byte) 0xfc}), // -Infinity Binary.fromConstantByteArray(new byte[] {0x00, (byte) 0xc0}), // -2.0 Binary.fromConstantByteArray(new byte[] {(byte) 0x01, (byte) 0x84}), // -6.109476E-5 Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x80}), // -0 Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x00}), // +0 Binary.fromConstantByteArray(new byte[] {(byte) 0x01, (byte) 0x00}), // 5.9604645E-8 Binary.fromConstantByteArray(new byte[] {(byte) 0xff, (byte) 0x7b}), // 65504.0 Binary.fromConstantByteArray(new byte[] {(byte) 0x00, (byte) 0x7c}) }; // Infinity for (int i = 0; i < valuesInAscendingOrder.length; ++i) { for (int j = 0; j < valuesInAscendingOrder.length; ++j) { Binary bi = valuesInAscendingOrder[i]; Binary bj = valuesInAscendingOrder[j]; float fi = Float16.toFloat(bi); float fj = Float16.toFloat(bj); assertEquals(Float.compare(fi, fj), BINARY_AS_FLOAT16_COMPARATOR.compare(bi, bj)); if (i < j) { assertEquals(-1, Float.compare(fi, fj)); } } } }
protected static String encrypt(String... args) throws Exception { int iterations = args.length == 2 ? Integer.parseInt(args[1]) : DEFAULT_ITERATIONS; EncryptionReplacer replacer = new EncryptionReplacer(); String xmlPath = System.getProperty("hazelcast.config"); Properties properties = xmlPath == null ? System.getProperties() : loadPropertiesFromConfig(new FileInputStream(xmlPath)); replacer.init(properties); String encrypted = replacer.encrypt(args[0], iterations); String variable = "$" + replacer.getPrefix() + "{" + encrypted + "}"; return variable; }
@Test public void testClientGenerateEncryptedLegacy() throws Exception { assumeAlgorithmsSupported("PBKDF2WithHmacSHA1", "DES"); String xml = "<hazelcast-client xmlns=\"http://www.hazelcast.com/schema/client-config\">\n" + XML_LEGACY_CONFIG + "</hazelcast-client>"; File configFile = createFileWithString(xml); hazelcastConfigProperty.setOrClearProperty(configFile.getAbsolutePath()); String encrypted = encrypt("test"); assertThat(encrypted) .startsWith("$ENC{") .endsWith("}"); }
public CompletableFuture<Void> delete(final UUID uuid, final List<TransactWriteItem> additionalWriteItems) { final Timer.Sample sample = Timer.start(); return getByAccountIdentifierAsync(uuid) .thenCompose(maybeAccount -> maybeAccount.map(account -> { final List<TransactWriteItem> transactWriteItems = new ArrayList<>(List.of( buildDelete(phoneNumberConstraintTableName, ATTR_ACCOUNT_E164, account.getNumber()), buildDelete(accountsTableName, KEY_ACCOUNT_UUID, uuid), buildDelete(phoneNumberIdentifierConstraintTableName, ATTR_PNI_UUID, account.getPhoneNumberIdentifier()), buildPutDeletedAccount(uuid, account.getNumber()) )); account.getUsernameHash().ifPresent(usernameHash -> transactWriteItems.add( buildDelete(usernamesConstraintTableName, UsernameTable.KEY_USERNAME_HASH, usernameHash))); transactWriteItems.addAll(additionalWriteItems); return asyncClient.transactWriteItems(TransactWriteItemsRequest.builder() .transactItems(transactWriteItems) .build()) .thenRun(Util.NOOP); }) .orElseGet(() -> CompletableFuture.completedFuture(null))) .thenRun(() -> sample.stop(DELETE_TIMER)); }
@Test void testDelete() { final Device deletedDevice = generateDevice(DEVICE_ID_1); final Account deletedAccount = generateAccount("+14151112222", UUID.randomUUID(), UUID.randomUUID(), List.of(deletedDevice)); final Device retainedDevice = generateDevice(DEVICE_ID_1); final Account retainedAccount = generateAccount("+14151112345", UUID.randomUUID(), UUID.randomUUID(), List.of(retainedDevice)); createAccount(deletedAccount); createAccount(retainedAccount); assertThat(accounts.findRecentlyDeletedAccountIdentifier(deletedAccount.getNumber())).isEmpty(); assertPhoneNumberConstraintExists("+14151112222", deletedAccount.getUuid()); assertPhoneNumberIdentifierConstraintExists(deletedAccount.getPhoneNumberIdentifier(), deletedAccount.getUuid()); assertPhoneNumberConstraintExists("+14151112345", retainedAccount.getUuid()); assertPhoneNumberIdentifierConstraintExists(retainedAccount.getPhoneNumberIdentifier(), retainedAccount.getUuid()); assertThat(accounts.getByAccountIdentifier(deletedAccount.getUuid())).isPresent(); assertThat(accounts.getByAccountIdentifier(retainedAccount.getUuid())).isPresent(); accounts.delete(deletedAccount.getUuid(), Collections.emptyList()).join(); assertThat(accounts.getByAccountIdentifier(deletedAccount.getUuid())).isNotPresent(); assertThat(accounts.findRecentlyDeletedAccountIdentifier(deletedAccount.getNumber())).hasValue(deletedAccount.getUuid()); assertPhoneNumberConstraintDoesNotExist(deletedAccount.getNumber()); assertPhoneNumberIdentifierConstraintDoesNotExist(deletedAccount.getPhoneNumberIdentifier()); verifyStoredState(retainedAccount.getNumber(), retainedAccount.getUuid(), retainedAccount.getPhoneNumberIdentifier(), null, accounts.getByAccountIdentifier(retainedAccount.getUuid()).get(), retainedAccount); { final Account recreatedAccount = generateAccount(deletedAccount.getNumber(), UUID.randomUUID(), UUID.randomUUID(), List.of(generateDevice(DEVICE_ID_1))); final boolean freshUser = createAccount(recreatedAccount); assertThat(freshUser).isTrue(); assertThat(accounts.getByAccountIdentifier(recreatedAccount.getUuid())).isPresent(); verifyStoredState(recreatedAccount.getNumber(), recreatedAccount.getUuid(), recreatedAccount.getPhoneNumberIdentifier(), null, accounts.getByAccountIdentifier(recreatedAccount.getUuid()).get(), recreatedAccount); assertPhoneNumberConstraintExists(recreatedAccount.getNumber(), recreatedAccount.getUuid()); assertPhoneNumberIdentifierConstraintExists(recreatedAccount.getPhoneNumberIdentifier(), recreatedAccount.getUuid()); } }
public Map<String, List<String>> getTableToBrokersMap() { Map<String, Set<String>> brokerUrlsMap = new HashMap<>(); try { byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true); brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData); JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData)); JsonNode brokerResourceNode = jsonObject.get("mapFields"); Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields(); while (resourceEntries.hasNext()) { Entry<String, JsonNode> resourceEntry = resourceEntries.next(); String resourceName = resourceEntry.getKey(); String tableName = resourceName.replace(OFFLINE_SUFFIX, "").replace(REALTIME_SUFFIX, ""); Set<String> brokerUrls = brokerUrlsMap.computeIfAbsent(tableName, k -> new HashSet<>()); JsonNode resource = resourceEntry.getValue(); Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields(); while (brokerEntries.hasNext()) { Entry<String, JsonNode> brokerEntry = brokerEntries.next(); String brokerName = brokerEntry.getKey(); if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) { brokerUrls.add(getHostPort(brokerName)); } } } } catch (Exception e) { LOGGER.warn("Exception while reading External view from zookeeper", e); // ignore } Map<String, List<String>> tableToBrokersMap = new HashMap<>(); for (Entry<String, Set<String>> entry : brokerUrlsMap.entrySet()) { tableToBrokersMap.put(entry.getKey(), new ArrayList<>(entry.getValue())); } return tableToBrokersMap; }
@Test public void testGetBrokersMapByInstanceConfig() { configureData(_instanceConfigPlain, true); // Run the test final Map<String, List<String>> result = _externalViewReaderUnderTest.getTableToBrokersMap(); final Map<String, List<String>> expectedResult = ImmutableMap.of("field1", Arrays.asList("first.pug-pinot-broker-headless:8099")); // Verify the results assertEquals(expectedResult, result); }
public static List<String> getDnsInfo(String hostName, String... attrNames) { final String uri = StrUtil.addPrefixIfNot(hostName, "dns:"); final Attributes attributes = JNDIUtil.getAttributes(uri, attrNames); final List<String> infos = new ArrayList<>(); for (Attribute attribute : new EnumerationIter<>(attributes.getAll())) { try { infos.add((String) attribute.get()); } catch (NamingException ignore) { //ignore } } return infos; }
@Test @Disabled public void getDnsInfoTest(){ final List<String> txt = NetUtil.getDnsInfo("hutool.cn", "TXT"); Console.log(txt); }
public <T> boolean parse(Handler<T> handler, T target, CharSequence input) { if (input == null) throw new NullPointerException("input == null"); return parse(handler, target, input, 0, input.length()); }
@Test void toleratesButIgnores_onlyWhitespace() { for (String w : Arrays.asList(" ", "\t")) { entrySplitter.parse(parseIntoMap, map, w); entrySplitter.parse(parseIntoMap, map, w + w); } assertThat(map.isEmpty()); }
public ImmutableMap<String, Pipeline> resolvePipelines(PipelineMetricRegistry pipelineMetricRegistry) { final Map<String, Rule> ruleNameMap = resolveRules(); // Read all pipelines and parse them final ImmutableMap.Builder<String, Pipeline> pipelineIdMap = ImmutableMap.builder(); try (final var pipelineStream = pipelineDaoSupplier.get()) { pipelineStream.forEach(pipelineDao -> { Pipeline pipeline; try { pipeline = ruleParser.parsePipeline(pipelineDao.id(), pipelineDao.source()); } catch (ParseException e) { LOG.warn("Ignoring non parseable pipeline <{}/{}> with errors <{}>", pipelineDao.title(), pipelineDao.id(), e.getErrors()); pipeline = Pipeline.empty("Failed to parse pipeline: " + pipelineDao.id()); } //noinspection ConstantConditions pipelineIdMap.put(pipelineDao.id(), resolvePipeline(pipelineMetricRegistry, pipeline, ruleNameMap)); }); } return pipelineIdMap.build(); }
@Test void resolvePipelinesWithMetricPrefix() { final var registry = PipelineMetricRegistry.create(metricRegistry, "PIPELINE", "RULE"); final var resolver = new PipelineResolver( new PipelineRuleParser(new FunctionRegistry(Map.of())), PipelineResolverConfig.of( () -> Stream.of(rule1), () -> Stream.of(pipeline1), () -> Stream.of(connections1, connections2) ) ); resolver.resolvePipelines(registry); assertThat(metricRegistry.getMetrics().keySet()).containsExactlyInAnyOrder( "RULE.rule-1.pipeline-1.5.not-matched", "RULE.rule-1.pipeline-1.5.matched", "RULE.rule-1.pipeline-1.5.failed", "RULE.rule-1.pipeline-1.5.executed", "RULE.rule-1.matched", "RULE.rule-1.not-matched", "RULE.rule-1.failed", "RULE.rule-1.executed", "PIPELINE.pipeline-1.executed", "PIPELINE.pipeline-1.stage.5.executed" ); }
@VisibleForTesting static Session.SessionBuilder transferSessionPropertiesToSession(Session.SessionBuilder session, Map<String, String> sessionProperties) { sessionProperties.forEach((key, value) -> { // Presto session properties may also contain catalog properties in format catalog.property_name=value String[] parts = key.split("\\."); if (parts.length == 1) { // system property session.setSystemProperty(parts[0], value); } else if (parts.length == 2) { // catalog property session.setCatalogSessionProperty(parts[0], parts[1], value); } else { throw new PrestoException(INVALID_SESSION_PROPERTY, "Unable to parse session property: " + key); } }); return session; }
@Test public void testTransferSessionProperties() { Session.SessionBuilder sessionBuilder = TestingSession.testSessionBuilder(); Map<String, String> sessionProperties = ImmutableMap.of( "property_name", "property_value", "catalog.property_name", "value2"); Session result = transferSessionPropertiesToSession(sessionBuilder, sessionProperties).build(); assertEquals(result.getSystemProperties().get("property_name"), "property_value"); assertEquals(result.getUnprocessedCatalogProperties().get("catalog").get("property_name"), "value2"); }
@Override public String getFileChecksum(Path sourceFile) throws IOException { FileSystem fs = sourceFile.getFileSystem(this.conf); try (FSDataInputStream in = fs.open(sourceFile)) { return this.checksum.computeChecksum(in); } }
@Test public void testChecksum() throws Exception { String filename = "test1.txt"; Path file = makeFile(filename); assertEquals(inputChecksumSHA256, client.getFileChecksum(file)); }
public GcsUri resolve(String path) { Path relativePath = Paths.get(path); checkState(!relativePath.isAbsolute(), "path is absolute"); String resolvedPath = _absolutePath.get().resolve(relativePath).toString(); return createGcsUri(getBucketName(), resolvedPath); }
@Test public void testResolve() { GcsUri gcsUri = new GcsUri(URI.create("gs://bucket_name/dir")); GcsUri subDir = gcsUri.resolve("subdir/file"); assertEquals(new GcsUri(URI.create("gs://bucket_name/dir/subdir/file")), subDir); }
public void handleUseSecurityCheckbox() { if ( useIntegratedSecurityCheck != null ) { if ( useIntegratedSecurityCheck.isChecked() ) { userNameBox.setDisabled( true ); passwordBox.setDisabled( true ); } else { userNameBox.setDisabled( false ); passwordBox.setDisabled( false ); } } }
@Test public void testHandleUseSecurityCheckbox() throws Exception { dataHandler.handleUseSecurityCheckbox(); // Now add the widget XulCheckbox useIntegratedSecurityCheck = mock( XulCheckbox.class ); when( useIntegratedSecurityCheck.isChecked() ).thenReturn( false ); when( document.getElementById( "use-integrated-security-check" ) ).thenReturn( useIntegratedSecurityCheck ); dataHandler.getControls(); dataHandler.handleUseSecurityCheckbox(); when( useIntegratedSecurityCheck.isChecked() ).thenReturn( true ); dataHandler.handleUseSecurityCheckbox(); }
@Override public void updateGroup(MemberGroupUpdateReqVO updateReqVO) { // 校验存在 validateGroupExists(updateReqVO.getId()); // 更新 MemberGroupDO updateObj = MemberGroupConvert.INSTANCE.convert(updateReqVO); memberGroupMapper.updateById(updateObj); }
@Test public void testUpdateGroup_notExists() { // 准备参数 MemberGroupUpdateReqVO reqVO = randomPojo(MemberGroupUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> groupService.updateGroup(reqVO), GROUP_NOT_EXISTS); }