focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public double doubleValue() { return value; }
@Test public void testDoubleValue() { assertEquals(100f, MilliPct.ofMilliPct(100).doubleValue(), 0.0001f); assertEquals(-100f, MilliPct.ofMilliPct(-100).doubleValue(), 0.0001f); }
public final Span joinSpan(TraceContext context) { if (context == null) throw new NullPointerException("context == null"); if (!supportsJoin) return newChild(context); // set shared flag if not already done int flags = InternalPropagation.instance.flags(context); if (!context.shared()) { flags |= FLAG_SHARED; return toSpan(context, InternalPropagation.instance.withFlags(context, flags)); } else { flags &= ~FLAG_SHARED; return toSpan(InternalPropagation.instance.withFlags(context, flags), context); } }
@Test void localRootId_joinSpan_notYetSampled() { TraceContext context1 = TraceContext.newBuilder().traceId(1).spanId(2).build(); TraceContext context2 = TraceContext.newBuilder().traceId(1).spanId(3).build(); localRootId(context1, context2, ctx -> tracer.joinSpan(ctx.context())); }
public void assignStandby(final TaskId task) { assertNotAssigned(task); assignedStandbyTasks.taskIds().add(task); }
@Test public void shouldRefuseDoubleStandbyTask() { final ClientState clientState = new ClientState(1); clientState.assignStandby(TASK_0_0); assertThrows(IllegalArgumentException.class, () -> clientState.assignStandby(TASK_0_0)); }
@Override public KTable<K, V> reduce(final Reducer<V> adder, final Reducer<V> subtractor, final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) { return reduce(adder, subtractor, NamedInternal.empty(), materialized); }
@Test public void shouldNotAllowNullSubtractorOnReduce() { assertThrows(NullPointerException.class, () -> groupedTable.reduce( MockReducer.STRING_ADDER, null, Materialized.as("store"))); }
public static Map<String, PluginConfiguration> swap(final YamlAgentConfiguration yamlConfig) { YamlPluginCategoryConfiguration plugins = yamlConfig.getPlugins(); if (null == plugins) { return Collections.emptyMap(); } Map<String, PluginConfiguration> result = new LinkedHashMap<>(); result.putAll(swap(plugins.getLogging())); result.putAll(swap(plugins.getMetrics())); result.putAll(swap(plugins.getTracing())); return result; }
@Test void assertSwapWithFile() throws IOException { try (InputStream inputStream = Files.newInputStream(new File(getResourceURL(), CONFIG_PATH).toPath())) { YamlAgentConfiguration yamlAgentConfig = AgentYamlEngine.unmarshalYamlAgentConfiguration(inputStream); Map<String, PluginConfiguration> actual = YamlPluginsConfigurationSwapper.swap(yamlAgentConfig); assertThat(actual.size(), is(3)); assertLogFixturePluginConfiguration(actual.get("log_fixture")); assertMetricsPluginConfiguration(actual.get("metrics_fixture")); assertTracingPluginConfiguration(actual.get("tracing_fixture")); } }
public MessagesRequestSpec simpleQueryParamsToFullRequestSpecification(final String query, final Set<String> streams, final String timerangeKeyword, final List<String> fields, final String sort, final SortSpec.Direction sortOrder, final int from, final int size) { return new MessagesRequestSpec(query, streams, timerangeParser.parseTimeRange(timerangeKeyword), sort, sortOrder, from, size, fields); }
@Test void throwsExceptionOnNullGroups() { assertThrows(IllegalArgumentException.class, () -> toTest.simpleQueryParamsToFullRequestSpecification("*", Set.of(), "42d", null, List.of("avg:joe"))); }
public static TrackPair makeTrackPairFromNopData(File nopFile) { try { return TrackPair.from(makeTracksFromNopData(nopFile)); } catch (IllegalArgumentException iae) { //throw a better exception when too many tracks are found throw new RuntimeException(nopFile.getAbsolutePath() + " caused an exception", iae); } }
@Test public void testMakeTrackPairFromNopData() { File testData = getResourceFile( "org/mitre/openaria/threading/MEARTS-11-05-19-trackData.txt" ); // File testData = new File("src/test/resources/org/mitre/openaria/threading/MEARTS-11-05-19-trackData.txt"); TrackPair pair = makeTrackPairFromNopData(testData); assertNotNull(pair); assertNotEquals(pair.track1(), pair.track2()); assertTrue(pair.track1().trackId().equals("236")); assertTrue(pair.track2().trackId().equals("204")); assertTrue(pair.track1().aircraftType().equals("B77W")); assertTrue(pair.track2().aircraftType().equals("A321")); }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldThrowOnSimpleCase() { final Expression expression = new SimpleCaseExpression( TestExpressions.COL0, ImmutableList.of(new WhenClause(new IntegerLiteral(10), new StringLiteral("ten"))), Optional.empty() ); // When: assertThrows( UnsupportedOperationException.class, () -> expressionTypeManager.getExpressionSqlType(expression) ); }
List<MappingField> resolveAndValidateFields(List<MappingField> userFields, Map<String, ?> options) { if (options.get(OPTION_FORMAT) == null) { throw QueryException.error("Missing '" + OPTION_FORMAT + "' option"); } if (options.get(OPTION_PATH) == null) { throw QueryException.error("Missing '" + OPTION_PATH + "' option"); } List<MappingField> fields = findMetadataResolver(options).resolveAndValidateFields(userFields, options); if (fields.isEmpty()) { throw QueryException.error("The resolved field list is empty"); } return fields; }
@Test public void when_formatIsMissing_then_throws() { assertThatThrownBy(() -> resolvers.resolveAndValidateFields(emptyList(), emptyMap())) .isInstanceOf(QueryException.class) .hasMessageContaining("Missing 'format"); }
public JdbcUrl parse(final String jdbcUrl) { Matcher matcher = CONNECTION_URL_PATTERN.matcher(jdbcUrl); ShardingSpherePreconditions.checkState(matcher.matches(), () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%"))); String authority = matcher.group(AUTHORITY_GROUP_KEY); ShardingSpherePreconditions.checkNotNull(authority, () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%"))); return new JdbcUrl(parseHostname(authority), parsePort(authority), matcher.group(PATH_GROUP_KEY), parseQueryProperties(matcher.group(QUERY_GROUP_KEY))); }
@Test void assertParseMySQLJdbcUrlWithReplication() { JdbcUrl actual = new StandardJdbcUrlParser().parse("jdbc:mysql:replication://master-ip:3306,slave-1-ip:3306,slave-2-ip:3306/demo_ds?useUnicode=true"); assertThat(actual.getHostname(), is("master-ip")); assertThat(actual.getPort(), is(3306)); assertThat(actual.getDatabase(), is("demo_ds")); assertThat(actual.getQueryProperties().size(), is(1)); assertThat(actual.getQueryProperties().getProperty("useUnicode"), is(Boolean.TRUE.toString())); }
private LinkKey(ConnectPoint src, ConnectPoint dst) { this.src = checkNotNull(src); this.dst = checkNotNull(dst); }
@Test(expected = NullPointerException.class) public void testNullDst() { LinkKey key = LinkKey.linkKey(SRC1, null); }
@Override public Expression getExpression(String tableName, Alias tableAlias) { // 只有有登陆用户的情况下,才进行数据权限的处理 LoginUser loginUser = SecurityFrameworkUtils.getLoginUser(); if (loginUser == null) { return null; } // 只有管理员类型的用户,才进行数据权限的处理 if (ObjectUtil.notEqual(loginUser.getUserType(), UserTypeEnum.ADMIN.getValue())) { return null; } // 获得数据权限 DeptDataPermissionRespDTO deptDataPermission = loginUser.getContext(CONTEXT_KEY, DeptDataPermissionRespDTO.class); // 从上下文中拿不到,则调用逻辑进行获取 if (deptDataPermission == null) { deptDataPermission = permissionApi.getDeptDataPermission(loginUser.getId()); if (deptDataPermission == null) { log.error("[getExpression][LoginUser({}) 获取数据权限为 null]", JsonUtils.toJsonString(loginUser)); throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 未返回数据权限", loginUser.getId(), tableName, tableAlias.getName())); } // 添加到上下文中,避免重复计算 loginUser.setContext(CONTEXT_KEY, deptDataPermission); } // 情况一,如果是 ALL 可查看全部,则无需拼接条件 if (deptDataPermission.getAll()) { return null; } // 情况二,即不能查看部门,又不能查看自己,则说明 100% 无权限 if (CollUtil.isEmpty(deptDataPermission.getDeptIds()) && Boolean.FALSE.equals(deptDataPermission.getSelf())) { return new EqualsTo(null, null); // WHERE null = null,可以保证返回的数据为空 } // 情况三,拼接 Dept 和 User 的条件,最后组合 Expression deptExpression = buildDeptExpression(tableName,tableAlias, deptDataPermission.getDeptIds()); Expression userExpression = buildUserExpression(tableName, tableAlias, deptDataPermission.getSelf(), loginUser.getId()); if (deptExpression == null && userExpression == null) { // TODO 芋艿:获得不到条件的时候,暂时不抛出异常,而是不返回数据 log.warn("[getExpression][LoginUser({}) Table({}/{}) DeptDataPermission({}) 构建的条件为空]", JsonUtils.toJsonString(loginUser), tableName, tableAlias, JsonUtils.toJsonString(deptDataPermission)); // throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 构建的条件为空", // loginUser.getId(), tableName, tableAlias.getName())); return EXPRESSION_NULL; } if (deptExpression == null) { return userExpression; } if (userExpression == null) { return deptExpression; } // 目前,如果有指定部门 + 可查看自己,采用 OR 条件。即,WHERE (dept_id IN ? OR user_id = ?) return new Parenthesis(new OrExpression(deptExpression, userExpression)); }
@Test // 全部数据权限 public void testGetExpression_allDeptDataPermission() { try (MockedStatic<SecurityFrameworkUtils> securityFrameworkUtilsMock = mockStatic(SecurityFrameworkUtils.class)) { // 准备参数 String tableName = "t_user"; Alias tableAlias = new Alias("u"); // mock 方法(LoginUser) LoginUser loginUser = randomPojo(LoginUser.class, o -> o.setId(1L) .setUserType(UserTypeEnum.ADMIN.getValue())); securityFrameworkUtilsMock.when(SecurityFrameworkUtils::getLoginUser).thenReturn(loginUser); // mock 方法(DeptDataPermissionRespDTO) DeptDataPermissionRespDTO deptDataPermission = new DeptDataPermissionRespDTO().setAll(true); when(permissionApi.getDeptDataPermission(same(1L))).thenReturn(deptDataPermission); // 调用 Expression expression = rule.getExpression(tableName, tableAlias); // 断言 assertNull(expression); assertSame(deptDataPermission, loginUser.getContext(DeptDataPermissionRule.CONTEXT_KEY, DeptDataPermissionRespDTO.class)); } }
public static <T> T getOnlyElement(Iterable<T> iterable) { if (iterable == null) { throw new IllegalArgumentException("iterable cannot be null."); } Iterator<T> iterator = iterable.iterator(); T first = iterator.next(); if (!iterator.hasNext()) { return first; } throw new IllegalArgumentException(buildExceptionMessage(iterator, first)); }
@Test void testGetOnlyElementNoSuchElementException() { assertThrows(NoSuchElementException.class, () -> { List<Object> list = new ArrayList<>(); CollectionUtils.getOnlyElement(list); }); }
@Override public Long sendSingleMail(String mail, Long userId, Integer userType, String templateCode, Map<String, Object> templateParams) { // 校验邮箱模版是否合法 MailTemplateDO template = validateMailTemplate(templateCode); // 校验邮箱账号是否合法 MailAccountDO account = validateMailAccount(template.getAccountId()); // 校验邮箱是否存在 mail = validateMail(mail); validateTemplateParams(template, templateParams); // 创建发送日志。如果模板被禁用,则不发送短信,只记录日志 Boolean isSend = CommonStatusEnum.ENABLE.getStatus().equals(template.getStatus()); String title = mailTemplateService.formatMailTemplateContent(template.getTitle(), templateParams); String content = mailTemplateService.formatMailTemplateContent(template.getContent(), templateParams); Long sendLogId = mailLogService.createMailLog(userId, userType, mail, account, template, content, templateParams, isSend); // 发送 MQ 消息,异步执行发送短信 if (isSend) { mailProducer.sendMailSendMessage(sendLogId, mail, account.getId(), template.getNickname(), title, content); } return sendLogId; }
@Test public void testSendSingleMail_successWhenMailTemplateEnable() { // 准备参数 String mail = randomEmail(); Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String templateCode = RandomUtils.randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock MailTemplateService 的方法 MailTemplateDO template = randomPojo(MailTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(mailTemplateService.getMailTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String title = RandomUtils.randomString(); when(mailTemplateService.formatMailTemplateContent(eq(template.getTitle()), eq(templateParams))) .thenReturn(title); String content = RandomUtils.randomString(); when(mailTemplateService.formatMailTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock MailAccountService 的方法 MailAccountDO account = randomPojo(MailAccountDO.class); when(mailAccountService.getMailAccountFromCache(eq(template.getAccountId()))).thenReturn(account); // mock MailLogService 的方法 Long mailLogId = randomLongId(); when(mailLogService.createMailLog(eq(userId), eq(userType), eq(mail), eq(account), eq(template), eq(content), eq(templateParams), eq(true))).thenReturn(mailLogId); // 调用 Long resultMailLogId = mailSendService.sendSingleMail(mail, userId, userType, templateCode, templateParams); // 断言 assertEquals(mailLogId, resultMailLogId); // 断言调用 verify(mailProducer).sendMailSendMessage(eq(mailLogId), eq(mail), eq(account.getId()), eq(template.getNickname()), eq(title), eq(content)); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testReconciliationProcess() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); String memberId3 = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; // Create a context with one consumer group containing two members. MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))) .build()) .withMember(new ConsumerGroupMember.Builder(memberId2) .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) .build()) .withAssignment(memberId1, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2), mkTopicAssignment(barTopicId, 0, 1))) .withAssignment(memberId2, mkAssignment( mkTopicAssignment(fooTopicId, 3, 4, 5), mkTopicAssignment(barTopicId, 2))) .withAssignmentEpoch(10)) .build(); // Prepare new assignment for the group. assignor.prepareGroupAssignment(new GroupAssignment( new HashMap<String, MemberAssignment>() { { put(memberId1, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0) ))); put(memberId2, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 2) ))); put(memberId3, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 1) ))); } } )); CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result; // Members in the group are in Stable state. assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId1)); assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId2)); assertEquals(ConsumerGroup.ConsumerGroupState.STABLE, context.consumerGroupState(groupId)); // Member 3 joins the group. This triggers the computation of a new target assignment // for the group. Member 3 does not get any assigned partitions yet because they are // all owned by other members. However, it transitions to epoch 11 and the // Unreleased Partitions state. result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId3) .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignor("range") .setTopicPartitions(Collections.emptyList())); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId3) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment()), result.response() ); // We only check the last record as the subscription/target assignment updates are // already covered by other tests. assertRecordEquals( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3) .setState(MemberState.UNRELEASED_PARTITIONS) .setMemberEpoch(11) .setPreviousMemberEpoch(0) .build()), result.records().get(result.records().size() - 1) ); assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 1 heartbeats. It remains at epoch 10 but transitions to Unrevoked Partitions // state until it acknowledges the revocation of its partitions. The response contains the new // assignment without the partitions that must be revoked. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId1) .setMemberEpoch(10)); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId1) .setMemberEpoch(10) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Arrays.asList(0, 1)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(0)) ))), result.response() ); assertRecordsEquals(Collections.singletonList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.UNREVOKED_PARTITIONS) .setMemberEpoch(10) .setPreviousMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(fooTopicId, 2), mkTopicAssignment(barTopicId, 1))) .build())), result.records() ); assertEquals(MemberState.UNREVOKED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId1)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 2 heartbeats. It remains at epoch 10 but transitions to Unrevoked Partitions // state until it acknowledges the revocation of its partitions. The response contains the new // assignment without the partitions that must be revoked. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(10)); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId2) .setMemberEpoch(10) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Collections.singletonList(3)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(2)) ))), result.response() ); assertRecordsEquals(Collections.singletonList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId2) .setState(MemberState.UNREVOKED_PARTITIONS) .setMemberEpoch(10) .setPreviousMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 3), mkTopicAssignment(barTopicId, 2))) .setPartitionsPendingRevocation(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5))) .build())), result.records() ); assertEquals(MemberState.UNREVOKED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId2)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 3 heartbeats. The response does not contain any assignment // because the member is still waiting on other members to revoke partitions. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId3) .setMemberEpoch(11)); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId3) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000), result.response() ); assertRecordsEquals(Collections.singletonList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3) .setState(MemberState.UNRELEASED_PARTITIONS) .setMemberEpoch(11) .setPreviousMemberEpoch(11) .build())), result.records() ); assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 1 acknowledges the revocation of the partitions. It does so by providing the // partitions that it still owns in the request. This allows him to transition to epoch 11 // and to the Stable state. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId1) .setMemberEpoch(10) .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Arrays.asList(0, 1)), new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(0)) ))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId1) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000), result.response() ); assertRecordsEquals(Collections.singletonList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setMemberEpoch(11) .setPreviousMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1), mkTopicAssignment(barTopicId, 0))) .build())), result.records() ); assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId1)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 2 heartbeats but without acknowledging the revocation yet. This is basically a no-op. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(10)); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId2) .setMemberEpoch(10) .setHeartbeatIntervalMs(5000), result.response() ); assertEquals(Collections.emptyList(), result.records()); assertEquals(MemberState.UNREVOKED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId2)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 3 heartbeats. It receives the partitions revoked by member 1 but remains // in Unreleased Partitions state because it still waits on other partitions. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId3) .setMemberEpoch(11)); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId3) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Collections.singletonList( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(1))))), result.response() ); assertRecordsEquals(Collections.singletonList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3) .setState(MemberState.UNRELEASED_PARTITIONS) .setMemberEpoch(11) .setPreviousMemberEpoch(11) .setAssignedPartitions(mkAssignment( mkTopicAssignment(barTopicId, 1))) .build())), result.records() ); assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 3 heartbeats. Member 2 has not acknowledged the revocation of its partition so // member keeps its current assignment. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId3) .setMemberEpoch(11)); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId3) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000), result.response() ); assertEquals(Collections.emptyList(), result.records()); assertEquals(MemberState.UNRELEASED_PARTITIONS, context.consumerGroupMemberState(groupId, memberId3)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 2 acknowledges the revocation of the partitions. It does so by providing the // partitions that it still owns in the request. This allows him to transition to epoch 11 // and to the Stable state. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId2) .setMemberEpoch(10) .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Collections.singletonList(3)), new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(2)) ))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId2) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Arrays.asList(2, 3)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(2)) ))), result.response() ); assertRecordsEquals(Collections.singletonList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId2) .setState(MemberState.STABLE) .setMemberEpoch(11) .setPreviousMemberEpoch(10) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 2, 3), mkTopicAssignment(barTopicId, 2))) .build())), result.records() ); assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId2)); assertEquals(ConsumerGroup.ConsumerGroupState.RECONCILING, context.consumerGroupState(groupId)); // Member 3 heartbeats to acknowledge its current assignment. It receives all its partitions and // transitions to Stable state. result = context.consumerGroupHeartbeat(new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId3) .setMemberEpoch(11) .setTopicPartitions(Collections.singletonList( new ConsumerGroupHeartbeatRequestData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(1))))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId3) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Arrays.asList(4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Collections.singletonList(1))))), result.response() ); assertRecordsEquals(Collections.singletonList( GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, new ConsumerGroupMember.Builder(memberId3) .setState(MemberState.STABLE) .setMemberEpoch(11) .setPreviousMemberEpoch(11) .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 4, 5), mkTopicAssignment(barTopicId, 1))) .build())), result.records() ); assertEquals(MemberState.STABLE, context.consumerGroupMemberState(groupId, memberId3)); assertEquals(ConsumerGroup.ConsumerGroupState.STABLE, context.consumerGroupState(groupId)); }
public String[] names() { return metadata.keySet().toArray(new String[0]); }
@Test public void testNames() { String[] names = null; Metadata meta = new Metadata(); names = meta.names(); assertEquals(0, names.length); meta.add("name-one", "value"); names = meta.names(); assertEquals(1, names.length); assertEquals("name-one", names[0]); meta.add("name-two", "value"); names = meta.names(); assertEquals(2, names.length); }
@Override public ValidationResponse validate(ValidationRequest req) { if (req.isEmptyQuery()) { return ValidationResponse.ok(); } try { final ParsedQuery parsedQuery = luceneQueryParser.parse(req.rawQuery()); final ValidationContext context = ValidationContext.builder() .request(req) .query(parsedQuery) .availableFields(fields.fieldTypesByStreamIds(req.streams(), req.timerange())) .build(); final List<ValidationMessage> explanations = validators.stream() .flatMap(val -> val.validate(context).stream()) .collect(Collectors.toList()); return ValidationResponse.withDetectedStatus(explanations); } catch (Exception e) { return ValidationResponse.error(ValidationErrors.create(e)); } }
@Test void validateWithError() { // validator returns one warning final QueryValidator queryValidator = context -> Collections.singletonList( ValidationMessage.builder(ValidationStatus.ERROR, ValidationType.QUERY_PARSING_ERROR) .errorMessage("Query can't be parsed") .build()); final QueryValidationServiceImpl service = new QueryValidationServiceImpl( LUCENE_QUERY_PARSER, FIELD_TYPES_SERVICE, Collections.singleton(queryValidator)); final ValidationResponse validationResponse = service.validate(req()); assertThat(validationResponse.status()).isEqualTo(ValidationStatus.ERROR); assertThat(validationResponse.explanations()) .hasOnlyOneElementSatisfying(message -> { assertThat(message.validationType()).isEqualTo(ValidationType.QUERY_PARSING_ERROR); assertThat(message.validationStatus()).isEqualTo(ValidationStatus.ERROR); }); }
public static void checkAllDone(Collection<Future<?>> futures) throws Exception { for (Future<?> f : futures) { if (f.isDone()) { f.get(); } } }
@Test public void testGetAllDoneThrowsException_whenSomeFutureHasException() { InterruptedException exception = new InterruptedException(); Collection<Future<?>> futures = List.of(InternalCompletableFuture.completedExceptionally(exception)); // the future is completedExceptionally with an InterruptedException (thread was not // interrupted during future.get()), so it is normal to expect // InterruptedException wrapped within an ExecutionException. assertThatThrownBy(() -> FutureUtil.checkAllDone(futures)) .isInstanceOf(ExecutionException.class) .cause().has(rootCause(InterruptedException.class)); }
public EndpointResponse handleKsqlStatements( final KsqlSecurityContext securityContext, final KsqlRequest request ) { // CHECKSTYLE_RULES.ON: JavaNCSS // CHECKSTYLE_RULES.ON: CyclomaticComplexity // Set masked sql statement if request is not from OldApiUtils.handleOldApiRequest ApiServerUtils.setMaskedSqlIfNeeded(request); QueryLogger.info("Received: " + request.toStringWithoutQuery(), request.getMaskedKsql()); throwIfNotConfigured(); activenessRegistrar.updateLastRequestTime(); try { CommandStoreUtil.httpWaitForCommandSequenceNumber( commandRunner.getCommandQueue(), request, distributedCmdResponseTimeout); final Map<String, Object> configProperties = request.getConfigOverrides(); denyListPropertyValidator.validateAll(configProperties); final KsqlRequestConfig requestConfig = new KsqlRequestConfig(request.getRequestProperties()); final List<ParsedStatement> statements = ksqlEngine.parse(request.getUnmaskedKsql()); validator.validate( SandboxedServiceContext.create(securityContext.getServiceContext()), statements, new SessionProperties( configProperties, localHost, localUrl, requestConfig.getBoolean(KsqlRequestConfig.KSQL_REQUEST_INTERNAL_REQUEST), request.getSessionVariables() ), request.getUnmaskedKsql() ); // log validated statements for query anonymization statements.forEach(s -> { if (s.getUnMaskedStatementText().toLowerCase().contains("terminate") || s.getUnMaskedStatementText().toLowerCase().contains("drop")) { QueryLogger.info("Query terminated", s.getMaskedStatementText()); } else { QueryLogger.info("Query created", s.getMaskedStatementText()); } }); final KsqlEntityList entities = handler.execute( securityContext, statements, new SessionProperties( configProperties, localHost, localUrl, requestConfig.getBoolean(KsqlRequestConfig.KSQL_REQUEST_INTERNAL_REQUEST), request.getSessionVariables() ) ); QueryLogger.info( "Processed successfully: " + request.toStringWithoutQuery(), request.getMaskedKsql() ); addCommandRunnerWarning( entities, commandRunnerWarning); return EndpointResponse.ok(entities); } catch (final KsqlRestException e) { QueryLogger.info( "Processed unsuccessfully: " + request.toStringWithoutQuery(), request.getMaskedKsql(), e ); throw e; } catch (final KsqlStatementException e) { QueryLogger.info( "Processed unsuccessfully: " + request.toStringWithoutQuery(), request.getMaskedKsql(), e ); final EndpointResponse response; if (e.getProblem() == KsqlStatementException.Problem.STATEMENT) { response = Errors.badStatement(e.getRawUnloggedDetails(), e.getSqlStatement()); } else if (e.getProblem() == KsqlStatementException.Problem.OTHER) { response = Errors.serverErrorForStatement(e, e.getSqlStatement()); } else { response = Errors.badRequest(e); } return errorHandler.generateResponse(e, response); } catch (final KsqlException e) { QueryLogger.info( "Processed unsuccessfully: " + request.toStringWithoutQuery(), request.getMaskedKsql(), e ); return errorHandler.generateResponse(e, Errors.badRequest(e)); } catch (final Exception e) { QueryLogger.info( "Processed unsuccessfully: " + request.toStringWithoutQuery(), request.getMaskedKsql(), e ); return errorHandler.generateResponse( e, Errors.serverErrorForStatement(e, request.getMaskedKsql()) ); } }
@Test public void shouldThrowOnHandleStatementIfNotConfigured() { // Given: ksqlResource = new KsqlResource( ksqlEngine, commandRunner, DISTRIBUTED_COMMAND_RESPONSE_TIMEOUT, activenessRegistrar, (ec, sc) -> InjectorChain.of( schemaInjectorFactory.apply(sc), topicInjectorFactory.apply(ec), new TopicDeleteInjector(ec, sc)), Optional.of(authorizationValidator), errorsHandler, denyListPropertyValidator, commandRunnerWarning ); // When: final KsqlRestException e = assertThrows( KsqlRestException.class, () -> ksqlResource.handleKsqlStatements( securityContext, new KsqlRequest("query", emptyMap(), emptyMap(), null) ) ); // Then: assertThat(e, exceptionStatusCode(CoreMatchers.is(SERVICE_UNAVAILABLE.code()))); assertThat(e, exceptionErrorMessage(errorMessage(Matchers.is("Server initializing")))); }
@Udf public <T extends Comparable<? super T>> List<T> arraySortDefault(@UdfParameter( description = "The array to sort") final List<T> input) { return arraySortWithDirection(input, "ASC"); }
@Test public void shouldSortNullsToEnd() { final List<String> input = Arrays.asList(null, "foo", null, "bar", null); final List<String> output = udf.arraySortDefault(input); assertThat(output, contains("bar", "foo", null, null, null)); }
@Override public ConfigAdvanceInfo findConfigAdvanceInfo(final String dataId, final String group, final String tenant) { final String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; try { List<String> configTagList = this.selectTagByConfig(dataId, group, tenant); ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); ConfigAdvanceInfo configAdvance = this.jt.queryForObject(configInfoMapper.select( Arrays.asList("gmt_create", "gmt_modified", "src_user", "src_ip", "c_desc", "c_use", "effect", "type", "c_schema"), Arrays.asList("data_id", "group_id", "tenant_id")), new Object[] {dataId, group, tenantTmp}, CONFIG_ADVANCE_INFO_ROW_MAPPER); if (configTagList != null && !configTagList.isEmpty()) { StringBuilder configTagsTmp = new StringBuilder(); for (String configTag : configTagList) { if (configTagsTmp.length() == 0) { configTagsTmp.append(configTag); } else { configTagsTmp.append(',').append(configTag); } } configAdvance.setConfigTags(configTagsTmp.toString()); } return configAdvance; } catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null return null; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindConfigAdvanceInfo() { String dataId = "dataId1324"; String group = "group23546"; String tenant = "tenant13245"; //mock select tags List<String> mockTags = Arrays.asList("tag1", "tag2", "tag3"); when(jdbcTemplate.queryForList(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class))).thenReturn(mockTags); String schema = "schema12345654"; //mock select config advance ConfigAdvanceInfo mockedAdvance = new ConfigAdvanceInfo(); mockedAdvance.setSchema(schema); when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_ADVANCE_INFO_ROW_MAPPER))).thenReturn(mockedAdvance); //execute return mock obj ConfigAdvanceInfo configAdvanceInfo = externalConfigInfoPersistService.findConfigAdvanceInfo(dataId, group, tenant); //expect check schema & tags. assertEquals(mockedAdvance.getSchema(), configAdvanceInfo.getSchema()); assertEquals(String.join(",", mockTags), configAdvanceInfo.getConfigTags()); //mock EmptyResultDataAccessException when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_ADVANCE_INFO_ROW_MAPPER))).thenThrow(new EmptyResultDataAccessException(1)); //expect return null. assertNull(externalConfigInfoPersistService.findConfigAdvanceInfo(dataId, group, tenant)); //mock CannotGetJdbcConnectionException when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_ADVANCE_INFO_ROW_MAPPER))).thenThrow(new CannotGetJdbcConnectionException("mock exp")); //expect throw exception. try { externalConfigInfoPersistService.findConfigAdvanceInfo(dataId, group, tenant); assertFalse(true); } catch (Exception e) { assertTrue(e instanceof CannotGetJdbcConnectionException); assertTrue(e.getMessage().endsWith("mock exp")); } }
public static WindowingStrategy<?, BoundedWindow> getWindowStrategy( String collectionId, RunnerApi.Components components) { RehydratedComponents rehydratedComponents = RehydratedComponents.forComponents(components); RunnerApi.WindowingStrategy windowingStrategyProto = components.getWindowingStrategiesOrThrow( components.getPcollectionsOrThrow(collectionId).getWindowingStrategyId()); WindowingStrategy<?, ?> windowingStrategy; try { windowingStrategy = WindowingStrategyTranslation.fromProto(windowingStrategyProto, rehydratedComponents); } catch (Exception e) { throw new IllegalStateException( String.format( "Unable to hydrate GroupByKey windowing strategy %s.", windowingStrategyProto), e); } @SuppressWarnings("unchecked") WindowingStrategy<?, BoundedWindow> ret = (WindowingStrategy<?, BoundedWindow>) windowingStrategy; return ret; }
@Test public void testGetWindowStrategy() throws IOException { SdkComponents components = SdkComponents.create(); String environmentId = components.registerEnvironment(Environments.createDockerEnvironment("java")); WindowingStrategy<Object, IntervalWindow> expected = WindowingStrategy.of(FixedWindows.of(Duration.standardMinutes(1))) .withMode(WindowingStrategy.AccumulationMode.DISCARDING_FIRED_PANES) .withTimestampCombiner(TimestampCombiner.END_OF_WINDOW) .withAllowedLateness(Duration.ZERO) .withEnvironmentId(environmentId); components.registerWindowingStrategy(expected); String collectionId = components.registerPCollection( PCollection.createPrimitiveOutputInternal( Pipeline.create(), expected, PCollection.IsBounded.BOUNDED, VoidCoder.of()) .setName("name")); WindowingStrategy<?, ?> actual = WindowUtils.getWindowStrategy(collectionId, components.toComponents()); assertEquals(expected, actual); }
@Override public Optional<Long> getValue() { if (initialized) { return Optional.of(value); } return Optional.empty(); }
@Test public void no_value_when_no_aggregation() { assertThat(sumCounter.getValue()).isNotPresent(); }
public OffsetStore getOffsetStore() { return offsetStore; }
@Test public void testGetOffsetStore() { assertEquals(offsetStore, defaultMQPushConsumerImpl.getOffsetStore()); }
@Override public ParSeqBasedCompletionStage<Void> thenAcceptAsync(Consumer<? super T> action, Executor executor) { return nextStageByComposingTask(_task.flatMap("thenAcceptAsync", t -> Task.blocking(() -> { action.accept(_task.get()); return null; }, executor))); }
@Test public void testThenAcceptAsync() throws Exception { CompletionStage<String> completionStage = createTestStage(TESTVALUE1); CountDownLatch waitLatch = new CountDownLatch(1); completionStage.thenAcceptAsync(r -> { assertEquals(THREAD_NAME_VALUE, Thread.currentThread().getName()); waitLatch.countDown(); }, _mockExecutor); finish(completionStage); waitLatch.await(1000, TimeUnit.MILLISECONDS); }
public boolean submit(PriorityLeaderTask task) { long signature = task.getSignature(); synchronized (runningTasks) { if (runningTasks.containsKey(signature)) { return false; } try { Future<?> future = executor.submit(task); runningTasks.put(signature, future); return true; } catch (RejectedExecutionException e) { LOG.warn("submit task {} failed.", task.getSignature(), e); return false; } } }
@Test public void testSubmit() { // submit task LeaderTask task1 = new TestLeaderTask(1L); Assert.assertTrue(executor.submit(task1)); Assert.assertEquals(1, executor.getTaskNum()); // submit same running task error Assert.assertFalse(executor.submit(task1)); Assert.assertEquals(1, executor.getTaskNum()); // submit another task LeaderTask task2 = new TestLeaderTask(2L); Assert.assertTrue(executor.submit(task2)); Assert.assertEquals(2, executor.getTaskNum()); // wait for tasks run to end try { // checker thread interval is 1s // sleep 3s Thread.sleep(SLEEP_MS * 300); Assert.assertEquals(0, executor.getTaskNum()); } catch (InterruptedException e) { LOG.error("error", e); } }
@VisibleForTesting void updateRetryDelayTimeToTimeline(StepRuntimeSummary runtimeSummary) { StepInstance.Status status = runtimeSummary.getRuntimeState().getStatus(); if (status == StepInstance.Status.USER_FAILED || status == StepInstance.Status.PLATFORM_FAILED || status == StepInstance.Status.TIMEOUT_FAILED) { int nextRetryDelayInSecs = runtimeSummary .getStepRetry() .getNextRetryDelay(runtimeSummary.getRuntimeState().getStatus()); String humanReadableRetryTime = DurationHelper.humanReadableFormat(Duration.ofSeconds(nextRetryDelayInSecs)); runtimeSummary.addTimeline( TimelineLogEvent.info("Retrying task in [%s]", humanReadableRetryTime)); } }
@Test public void testUpdateTimeoutRetryDelayTimeToTimeline() { StepRuntimeState runtimeState = new StepRuntimeState(); runtimeState.setStatus(StepInstance.Status.TIMEOUT_FAILED); Timeline timeline = new Timeline(new ArrayList<>()); StepInstance.StepRetry stepRetry = new StepInstance.StepRetry(); stepRetry.setRetryable(true); RetryPolicy.FixedBackoff fixedBackoff = RetryPolicy.FixedBackoff.builder().timeoutRetryBackoffInSecs(200L).build(); stepRetry.setBackoff(fixedBackoff); StepRuntimeSummary runtimeSummary = StepRuntimeSummary.builder() .timeline(timeline) .runtimeState(runtimeState) .stepRetry(stepRetry) .build(); maestroTask.updateRetryDelayTimeToTimeline(runtimeSummary); List<TimelineEvent> timelineEvents = timeline.getTimelineEvents(); assertThat(timelineEvents) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("timestamp") .contains(TimelineLogEvent.info("Retrying task in [3m 20s]")); RetryPolicy.ExponentialBackoff exponentialBackoff = RetryPolicy.ExponentialBackoff.builder() .timeoutRetryExponent(2) .timeoutRetryLimitInSecs(600L) .timeoutRetryBackoffInSecs(100L) .build(); stepRetry.setBackoff(exponentialBackoff); stepRetry.setTimeoutRetries(6); timelineEvents.clear(); maestroTask.updateRetryDelayTimeToTimeline(runtimeSummary); assertThat(timelineEvents) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("timestamp") .contains(TimelineLogEvent.info("Retrying task in [10m]")); timelineEvents.clear(); runtimeState.setStatus(StepInstance.Status.PAUSED); maestroTask.updateRetryDelayTimeToTimeline(runtimeSummary); assertThat(timelineEvents).isEmpty(); }
public synchronized boolean isWebkitUnavailable() { String path = getWebkitPath(); String osName = getEnvironmentName(); return ( path == null || !path.contains("webkit") ) && osName.contains( SUPPORTED_DISTRIBUTION_NAME ); }
@Test public void testIsWebkitUnavailable_ubuntu() { EnvironmentUtilsMock mock = new EnvironmentUtilsMock( Case.UBUNTU ); assertFalse( mock.getMockedInstance().isWebkitUnavailable() ); mock = new EnvironmentUtilsMock( Case.UBUNTU_WRONG ); assertFalse( mock.getMockedInstance().isWebkitUnavailable() ); }
static Optional<Object> getValueFromKiePMMLNameValuesByVariableName(final String variableName, final List<KiePMMLNameValue> kiePMMLNameValues) { return kiePMMLNameValues.stream() .filter(kiePMMLNameValue -> kiePMMLNameValue.getValue() != null && kiePMMLNameValue.getName().equals(variableName)) .map(KiePMMLNameValue::getValue) .findFirst(); }
@Test void getValueFromKiePMMLNameValuesByVariableName() { final String variableName = "variableName"; final List<KiePMMLNameValue> kiePMMLNameValues = IntStream.range(0, 3).mapToObj(i -> new KiePMMLNameValue( "val-" + i, i)).collect(Collectors.toList()); Optional<Object> retrieved = KiePMMLOutputField.getValueFromKiePMMLNameValuesByVariableName(variableName, kiePMMLNameValues); assertThat(retrieved).isNotPresent(); final Object variableValue = 243.94; kiePMMLNameValues.add(new KiePMMLNameValue(variableName, variableValue)); retrieved = KiePMMLOutputField.getValueFromKiePMMLNameValuesByVariableName(variableName, kiePMMLNameValues); assertThat(retrieved).isPresent(); assertThat(retrieved.get()).isEqualTo(variableValue); }
static Collection<TopicPartition> getMatchingTopicPartitions( Admin adminClient, String topicRegex, int startPartition, int endPartition) throws Throwable { final Pattern topicNamePattern = Pattern.compile(topicRegex); // first get list of matching topics List<String> matchedTopics = new ArrayList<>(); ListTopicsResult res = adminClient.listTopics( new ListTopicsOptions().timeoutMs(ADMIN_REQUEST_TIMEOUT)); Map<String, TopicListing> topicListingMap = res.namesToListings().get(); for (Map.Entry<String, TopicListing> topicListingEntry: topicListingMap.entrySet()) { if (!topicListingEntry.getValue().isInternal() && topicNamePattern.matcher(topicListingEntry.getKey()).matches()) { matchedTopics.add(topicListingEntry.getKey()); } } // create a list of topic/partitions List<TopicPartition> out = new ArrayList<>(); DescribeTopicsResult topicsResult = adminClient.describeTopics( matchedTopics, new DescribeTopicsOptions().timeoutMs(ADMIN_REQUEST_TIMEOUT)); Map<String, TopicDescription> topicDescriptionMap = topicsResult.allTopicNames().get(); for (TopicDescription desc: topicDescriptionMap.values()) { List<TopicPartitionInfo> partitions = desc.partitions(); for (TopicPartitionInfo info: partitions) { if ((info.partition() >= startPartition) && (info.partition() <= endPartition)) { out.add(new TopicPartition(desc.name(), info.partition())); } } } return out; }
@Test public void testGetMatchingTopicPartitionsCorrectlyMatchesExactTopicName() throws Throwable { final String topic1 = "existing-topic"; final String topic2 = "another-topic"; makeExistingTopicWithOneReplica(topic1, 10); makeExistingTopicWithOneReplica(topic2, 20); Collection<TopicPartition> topicPartitions = WorkerUtils.getMatchingTopicPartitions(adminClient, topic2, 0, 2); assertEquals( Utils.mkSet( new TopicPartition(topic2, 0), new TopicPartition(topic2, 1), new TopicPartition(topic2, 2) ), new HashSet<>(topicPartitions) ); }
@Override public String getCheckerType() { return CHECKER_TYPE; }
@Test void testCheckerType() { assertEquals("default", paramChecker.getCheckerType()); }
@Override public ResourceReconcileResult tryReconcileClusterResources( TaskManagerResourceInfoProvider taskManagerResourceInfoProvider) { ResourceReconcileResult.Builder builder = ResourceReconcileResult.builder(); List<TaskManagerInfo> taskManagersIdleTimeout = new ArrayList<>(); List<TaskManagerInfo> taskManagersNonTimeout = new ArrayList<>(); long currentTime = System.currentTimeMillis(); taskManagerResourceInfoProvider .getRegisteredTaskManagers() .forEach( taskManagerInfo -> { if (taskManagerInfo.isIdle() && currentTime - taskManagerInfo.getIdleSince() >= taskManagerTimeout.toMilliseconds()) { taskManagersIdleTimeout.add(taskManagerInfo); } else { taskManagersNonTimeout.add(taskManagerInfo); } }); List<PendingTaskManager> pendingTaskManagersNonUse = new ArrayList<>(); List<PendingTaskManager> pendingTaskManagersInuse = new ArrayList<>(); taskManagerResourceInfoProvider .getPendingTaskManagers() .forEach( pendingTaskManager -> { if (pendingTaskManager.getPendingSlotAllocationRecords().isEmpty()) { pendingTaskManagersNonUse.add(pendingTaskManager); } else { pendingTaskManagersInuse.add(pendingTaskManager); } }); ResourceProfile resourcesToKeep = ResourceProfile.ZERO; ResourceProfile resourcesInTotal = ResourceProfile.ZERO; boolean resourceFulfilled = false; // check whether available resources of used (pending) task manager is enough. ResourceProfile resourcesAvailableOfNonIdle = getAvailableResourceOfTaskManagers(taskManagersNonTimeout); ResourceProfile resourcesInTotalOfNonIdle = getTotalResourceOfTaskManagers(taskManagersNonTimeout); resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdle); resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdle); if (isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) { resourceFulfilled = true; } else { ResourceProfile resourcesAvailableOfNonIdlePendingTaskManager = getAvailableResourceOfPendingTaskManagers(pendingTaskManagersInuse); ResourceProfile resourcesInTotalOfNonIdlePendingTaskManager = getTotalResourceOfPendingTaskManagers(pendingTaskManagersInuse); resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdlePendingTaskManager); resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdlePendingTaskManager); } // try reserve or release unused (pending) task managers for (TaskManagerInfo taskManagerInfo : taskManagersIdleTimeout) { if (resourceFulfilled || isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) { resourceFulfilled = true; builder.addTaskManagerToRelease(taskManagerInfo); } else { resourcesToKeep = resourcesToKeep.merge(taskManagerInfo.getAvailableResource()); resourcesInTotal = resourcesInTotal.merge(taskManagerInfo.getTotalResource()); } } for (PendingTaskManager pendingTaskManager : pendingTaskManagersNonUse) { if (resourceFulfilled || isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) { resourceFulfilled = true; builder.addPendingTaskManagerToRelease(pendingTaskManager); } else { resourcesToKeep = resourcesToKeep.merge(pendingTaskManager.getUnusedResource()); resourcesInTotal = resourcesInTotal.merge(pendingTaskManager.getTotalResourceProfile()); } } if (!resourceFulfilled) { // fulfill required resources tryFulFillRequiredResourcesWithAction( resourcesToKeep, resourcesInTotal, builder::addPendingTaskManagerToAllocate); } return builder.build(); }
@Test void testUsedPendingTaskManagerShouldNotBeReleased() { final PendingTaskManager pendingTaskManager = new PendingTaskManager(DEFAULT_SLOT_RESOURCE, 1); pendingTaskManager.replaceAllPendingAllocations( Collections.singletonMap( new JobID(), ResourceCounter.withResource(DEFAULT_SLOT_RESOURCE, 1))); final TaskManagerResourceInfoProvider taskManagerResourceInfoProvider = TestingTaskManagerResourceInfoProvider.newBuilder() .setPendingTaskManagersSupplier( () -> Collections.singleton(pendingTaskManager)) .build(); ResourceReconcileResult result = ANY_MATCHING_STRATEGY.tryReconcileClusterResources(taskManagerResourceInfoProvider); assertThat(result.getPendingTaskManagersToRelease()).isEmpty(); }
@SuppressWarnings("unchecked") public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) { final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type()); if (handler == null) { throw new UnsupportedOperationException("Unsupported schema type: " + schema.type()); } return (S) handler.apply(visitor, schema); }
@Test public void shouldVisitInt32() { // Given: final Schema schema = Schema.OPTIONAL_INT32_SCHEMA; when(visitor.visitInt32(any())).thenReturn("Expected"); // When: final String result = SchemaWalker.visit(schema, visitor); // Then: verify(visitor).visitInt32(same(schema)); assertThat(result, is("Expected")); }
@Override public String resolve(Method method, Object[] arguments, String spelExpression) { if (StringUtils.isEmpty(spelExpression)) { return spelExpression; } if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) { return stringValueResolver.resolveStringValue(spelExpression); } if (spelExpression.matches(METHOD_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } if (spelExpression.matches(BEAN_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory)); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } return spelExpression; }
@Test public void givenNonSpelExpression_whenParse_returnsItself() throws Exception { String testExpression = "backendA"; DefaultSpelResolverTest target = new DefaultSpelResolverTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); String result = sut.resolve(testMethod, new Object[]{}, testExpression); assertThat(result).isEqualTo(testExpression); }
@Override public long skip(long n) throws IOException { int bufSize = (int) Math.min(n, SKIP_SIZE); byte[] buf = new byte[bufSize]; long bytesSkipped = 0; int bytesRead = 0; while (bytesSkipped < n && bytesRead != -1) { int len = (int) Math.min(bufSize, n - bytesSkipped); bytesRead = read(buf, 0, len); if (bytesRead != -1) { bytesSkipped += bytesRead; } } return (bytesRead < 0 && bytesSkipped == 0) ? -1 : bytesSkipped; }
@Test public void testSkipReadEnd() throws IOException { final int count = 128; TailStream stream = new TailStream(generateStream(0, count), 2 * count); readStream(stream); assertEquals(-1, stream.skip(1), "Wrong result"); }
public T get(final int index) { if (index < 0 || index >= values.size()) { throw new IndexOutOfBoundsException( String.format( "Attempted to access variadic argument at index %s when only %s " + "arguments are available", index, values.size() ) ); } return values.get(index); }
@Test public void shouldThrowWhenIndexNegative() { final VariadicArgs<Integer> varArgs = new VariadicArgs<>(ImmutableList.of(1, 2, 3)); final Exception e = assertThrows( IndexOutOfBoundsException.class, () -> varArgs.get(-1) ); assertThat(e.getMessage(), is("Attempted to access variadic argument at index -1 when only 3 " + "arguments are available")); }
public static Ip6Prefix valueOf(byte[] address, int prefixLength) { return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfShortArrayIPv6() { Ip6Prefix ipPrefix; byte[] value; value = new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9}; ipPrefix = Ip6Prefix.valueOf(value, 120); }
public Object toIdObject(String baseId) throws AmqpProtocolException { if (baseId == null) { return null; } try { if (hasAmqpUuidPrefix(baseId)) { String uuidString = strip(baseId, AMQP_UUID_PREFIX_LENGTH); return UUID.fromString(uuidString); } else if (hasAmqpUlongPrefix(baseId)) { String longString = strip(baseId, AMQP_ULONG_PREFIX_LENGTH); return UnsignedLong.valueOf(longString); } else if (hasAmqpStringPrefix(baseId)) { return strip(baseId, AMQP_STRING_PREFIX_LENGTH); } else if (hasAmqpBinaryPrefix(baseId)) { String hexString = strip(baseId, AMQP_BINARY_PREFIX_LENGTH); byte[] bytes = convertHexStringToBinary(hexString); return new Binary(bytes); } else { // We have a string without any type prefix, transmit it as-is. return baseId; } } catch (IllegalArgumentException e) { throw new AmqpProtocolException("Unable to convert ID value"); } }
@Test public void testToIdObjectWithStringContainingBinaryHexThrowsWithNonHexCharacters() { // char before '0' char nonHexChar = '/'; String nonHexString = AMQPMessageIdHelper.AMQP_BINARY_PREFIX + nonHexChar + nonHexChar; try { messageIdHelper.toIdObject(nonHexString); fail("expected exception was not thrown"); } catch (AmqpProtocolException ex) { // expected } // char after '9', before 'A' nonHexChar = ':'; nonHexString = AMQPMessageIdHelper.AMQP_BINARY_PREFIX + nonHexChar + nonHexChar; try { messageIdHelper.toIdObject(nonHexString); fail("expected exception was not thrown"); } catch (AmqpProtocolException ex) { // expected } // char after 'F', before 'a' nonHexChar = 'G'; nonHexString = AMQPMessageIdHelper.AMQP_BINARY_PREFIX + nonHexChar + nonHexChar; try { messageIdHelper.toIdObject(nonHexString); fail("expected exception was not thrown"); } catch (AmqpProtocolException ex) { // expected } // char after 'f' nonHexChar = 'g'; nonHexString = AMQPMessageIdHelper.AMQP_BINARY_PREFIX + nonHexChar + nonHexChar; try { messageIdHelper.toIdObject(nonHexString); fail("expected exception was not thrown"); } catch (AmqpProtocolException ex) { // expected } }
@Override public boolean supportsUnionAll() { return false; }
@Test void assertSupportsUnionAll() { assertFalse(metaData.supportsUnionAll()); }
@Override public void shutdown(final PropertyEventShutdownCallback shutdown) { trace(_log, "shutdown"); // shutdown all three registries, all tracker clients, and the event thread _executor.execute(new PropertyEvent("shutdown load balancer state") { @Override public void innerRun() { // Need to shutdown loadBalancerStrategies before the transportClients are shutdown for (Map<String, LoadBalancerStrategy> strategyEntry : _serviceStrategies.values()) { strategyEntry.values().forEach(LoadBalancerStrategy::shutdown); } // put all tracker clients into a single set for convenience Set<TransportClient> transportClients = new HashSet<>(); for (Map<String, TransportClient> clientsByScheme : _serviceClients.values()) { transportClients.addAll(clientsByScheme.values()); } Callback<None> trackerCallback = Callbacks.countDown(Callbacks.<None>adaptSimple(new SimpleCallback() { @Override public void onDone() { shutdown.done(); } }), transportClients.size()); info(_log, "shutting down cluster clients"); for (TransportClient transportClient : transportClients) { transportClient.shutdown(trackerCallback); } // When SimpleLoadBalancerState is shutdown, all the strategies and clients are effectively removed, // so it is needed to notify all the listeners for (SimpleLoadBalancerStateListener listener : _listeners) { // Send removal notifications for service properties. for (LoadBalancerStateItem<ServiceProperties> serviceProperties : _serviceProperties.values()) { listener.onServicePropertiesRemoval(serviceProperties); } // Send removal notification for cluster properties. for (ClusterInfoItem clusterInfoItem: _clusterInfo.values()) { listener.onClusterInfoRemoval(clusterInfoItem); } // Notify the strategy removal for (Map.Entry<String, Map<String, LoadBalancerStrategy>> serviceStrategy : _serviceStrategies.entrySet()) { for (Map.Entry<String, LoadBalancerStrategy> strategyEntry : serviceStrategy.getValue().entrySet()) { listener.onStrategyRemoved(serviceStrategy.getKey(), strategyEntry.getKey(), strategyEntry.getValue()); } // Also notify the client removal Map<URI, TrackerClient> trackerClients = _trackerClients.get(serviceStrategy.getKey()); if (trackerClients != null) { for (TrackerClient client : trackerClients.values()) { listener.onClientRemoved(serviceStrategy.getKey(), client); } } } } // When SimpleLoadBalancerState is shutdown, all the cluster listener also need to be notified. for (LoadBalancerClusterListener clusterListener : _clusterListeners) { for (String clusterName : _clusterInfo.keySet()) { clusterListener.onClusterRemoved(clusterName); } } } }); }
@Test(groups = { "small", "back-end" }) public void testShutdown() throws URISyntaxException, InterruptedException { reset(); URI uri = URI.create("http://cluster-1/test"); TestListener listener = new TestListener(); List<String> schemes = new ArrayList<>(); Map<Integer, PartitionData> partitionData = new HashMap<>(1); partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d)); Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<>(); uriData.put(uri, partitionData); schemes.add("http"); _state.register(listener); assertNull(listener.scheme); assertNull(listener.strategy); assertNull(listener.serviceName); // set up state ClusterProperties clusterProperties = new ClusterProperties("cluster-1", schemes); ServiceProperties serviceProperties = new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random")); _state.listenToCluster("cluster-1", new NullStateListenerCallback()); _state.listenToService("service-1", new NullStateListenerCallback()); _clusterRegistry.put("cluster-1", clusterProperties); _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData)); _serviceRegistry.put("service-1", serviceProperties); TrackerClient client = _state.getClient("cluster-1", uri); TestShutdownCallback callback = new TestShutdownCallback(); _state.shutdown(callback); if (!callback.await(10, TimeUnit.SECONDS)) { fail("unable to shut down state"); } for (TransportClientFactory factory : _clientFactories.values()) { SimpleLoadBalancerTest.DoNothingClientFactory f = (SimpleLoadBalancerTest.DoNothingClientFactory)factory; assertEquals(f.getRunningClientCount(), 0, "Not all clients were shut down"); } // Verify that registered listeners get all removal events for cluster properties and service properties. Assert.assertEquals(listener.servicePropertiesRemoved.size(), 1); Assert.assertEquals(listener.servicePropertiesRemoved.get(0).getProperty(), serviceProperties); Assert.assertEquals(listener.clusterInfoRemoved.size(), 1); Assert.assertEquals(listener.clusterInfoRemoved.get(0).getClusterPropertiesItem().getProperty(), clusterProperties); }
@VisibleForTesting static void reloadSslFactoryWhenFileStoreChanges(SSLFactory baseSslFactory, String keyStoreType, String keyStorePath, String keyStorePassword, String trustStoreType, String trustStorePath, String trustStorePassword, String sslContextProtocol, SecureRandom secureRandom, Supplier<Boolean> insecureModeSupplier) throws IOException, URISyntaxException, InterruptedException { LOGGER.info("Enable auto renewal of SSLFactory {} when key store {} or trust store {} changes", baseSslFactory, keyStorePath, trustStorePath); WatchService watchService = FileSystems.getDefault().newWatchService(); Map<WatchKey, Set<Path>> watchKeyPathMap = new HashMap<>(); registerFile(watchService, watchKeyPathMap, keyStorePath); registerFile(watchService, watchKeyPathMap, trustStorePath); int maxSslFactoryReloadingAttempts = 3; int sslFactoryReloadingRetryDelayMs = 1000; WatchKey key; while ((key = watchService.take()) != null) { for (WatchEvent<?> event : key.pollEvents()) { Path changedFile = (Path) event.context(); if (watchKeyPathMap.get(key).contains(changedFile)) { LOGGER.info("Detected change in file: {}, try to renew SSLFactory {} " + "(built from key store {} and truststore {})", changedFile, baseSslFactory, keyStorePath, trustStorePath); try { // Need to retry a few times because when one file (key store or trust store) is updated, the other file // (trust store or key store) may not have been fully written yet, so we need to wait a bit and retry. RetryPolicies.fixedDelayRetryPolicy(maxSslFactoryReloadingAttempts, sslFactoryReloadingRetryDelayMs) .attempt(() -> { try { SSLFactory updatedSslFactory = createSSLFactory(keyStoreType, keyStorePath, keyStorePassword, trustStoreType, trustStorePath, trustStorePassword, sslContextProtocol, secureRandom, false, insecureModeSupplier.get()); SSLFactoryUtils.reload(baseSslFactory, updatedSslFactory); LOGGER.info("Successfully renewed SSLFactory {} (built from key store {} and truststore {}) on file" + " {} changes", baseSslFactory, keyStorePath, trustStorePath, changedFile); return true; } catch (Exception e) { LOGGER.info( "Encountered issues when renewing SSLFactory {} (built from key store {} and truststore {}) on " + "file {} changes", baseSslFactory, keyStorePath, trustStorePath, changedFile, e); return false; } }); } catch (Exception e) { LOGGER.error( "Failed to renew SSLFactory {} (built from key store {} and truststore {}) on file {} changes after {} " + "retries", baseSslFactory, keyStorePath, trustStorePath, changedFile, maxSslFactoryReloadingAttempts, e); } } } key.reset(); } }
@Test public void reloadSslFactoryWhenFileStoreChanges() throws IOException, URISyntaxException, InterruptedException { SecureRandom secureRandom = new SecureRandom(); SSLFactory sslFactory = RenewableTlsUtils.createSSLFactory(KEYSTORE_TYPE, TLS_KEYSTORE_FILE_PATH, PASSWORD, TRUSTSTORE_TYPE, TLS_TRUSTSTORE_FILE_PATH, PASSWORD, "TLS", secureRandom, true, false); X509ExtendedKeyManager x509ExtendedKeyManager = sslFactory.getKeyManager().get(); X509ExtendedTrustManager x509ExtendedTrustManager = sslFactory.getTrustManager().get(); SSLContext sslContext = sslFactory.getSslContext(); PrivateKey privateKey = x509ExtendedKeyManager.getPrivateKey(KEY_NAME_ALIAS); Certificate certForPrivateKey = x509ExtendedKeyManager.getCertificateChain(KEY_NAME_ALIAS)[0]; X509Certificate acceptedIssuerForCert = x509ExtendedTrustManager.getAcceptedIssuers()[0]; // Start a new thread to reload the ssl factory when the tls files change // Avoid early finalization by not using Executors.newSingleThreadExecutor (java <= 20, JDK-8145304) ExecutorService executorService = Executors.newFixedThreadPool(1); executorService.execute( () -> { try { RenewableTlsUtils.reloadSslFactoryWhenFileStoreChanges(sslFactory, KEYSTORE_TYPE, TLS_KEYSTORE_FILE_PATH, PASSWORD, TRUSTSTORE_TYPE, TLS_TRUSTSTORE_FILE_PATH, PASSWORD, "TLS", secureRandom, () -> false); } catch (Exception e) { throw new RuntimeException(e); } }); updateTlsFilesAndWaitForSslFactoryToBeRenewed(); executorService.shutdown(); // after tls file update, the returned values should be the same, since the wrapper is the same X509ExtendedKeyManager udpatedX509ExtendedKeyManager = sslFactory.getKeyManager().get(); X509ExtendedTrustManager updatedX509ExtendedTrustManager = sslFactory.getTrustManager().get(); SSLContext updatedSslContext = sslFactory.getSslContext(); assertEquals(x509ExtendedKeyManager, udpatedX509ExtendedKeyManager); assertEquals(x509ExtendedTrustManager, updatedX509ExtendedTrustManager); assertEquals(sslContext, updatedSslContext); // after tls file update, the underlying values should be different assertNotEquals(privateKey, udpatedX509ExtendedKeyManager.getPrivateKey(KEY_NAME_ALIAS)); assertNotEquals(certForPrivateKey, udpatedX509ExtendedKeyManager.getCertificateChain(KEY_NAME_ALIAS)[0]); assertNotEquals(acceptedIssuerForCert, updatedX509ExtendedTrustManager.getAcceptedIssuers()[0]); }
public static void checkNewCodeDefinitionParam(@Nullable String newCodeDefinitionType, @Nullable String newCodeDefinitionValue) { if (newCodeDefinitionType == null && newCodeDefinitionValue != null) { throw new IllegalArgumentException("New code definition type is required when new code definition value is provided"); } }
@Test public void checkNewCodeDefinitionParam_do_not_throw_when_both_value_and_type_are_provided() { assertThatNoException() .isThrownBy(() -> newCodeDefinitionResolver.checkNewCodeDefinitionParam("PREVIOUS_VERSION", "anyvalue")); }
@Override public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) { if (client.getId() != null) { // if it's not null, it's already been saved, this is an error throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId()); } if (client.getRegisteredRedirectUri() != null) { for (String uri : client.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } } // assign a random clientid if it's empty // NOTE: don't assign a random client secret without asking, since public clients have no secret if (Strings.isNullOrEmpty(client.getClientId())) { client = generateClientId(client); } // make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa ensureRefreshTokenConsistency(client); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(client); // check consistency when using HEART mode checkHeartMode(client); // timestamp this to right now client.setCreatedAt(new Date()); // check the sector URI checkSectorIdentifierUri(client); ensureNoReservedScopes(client); ClientDetailsEntity c = clientRepository.saveClient(client); statsService.resetCache(); return c; }
@Test public void saveNewClient_idWasAssigned() { // Set up a mock client. ClientDetailsEntity client = Mockito.mock(ClientDetailsEntity.class); Mockito.when(client.getId()).thenReturn(null); service.saveNewClient(client); Mockito.verify(client).setClientId(Matchers.anyString()); }
public GetKinesisRecordsResult getRecords(String shardIterator, String streamName, String shardId) throws TransientKinesisException { return getRecords(shardIterator, streamName, shardId, limit); }
@Test public void shouldReturnLimitedNumberOfRecords() throws Exception { final Integer limit = 100; doAnswer( (Answer<GetRecordsResult>) invocation -> { GetRecordsRequest request = (GetRecordsRequest) invocation.getArguments()[0]; List<Record> records = generateRecords(request.getLimit()); return new GetRecordsResult().withRecords(records).withMillisBehindLatest(1000L); }) .when(kinesis) .getRecords(any(GetRecordsRequest.class)); GetKinesisRecordsResult result = underTest.getRecords(SHARD_ITERATOR, STREAM, SHARD_1, limit); assertThat(result.getRecords().size()).isEqualTo(limit); }
public float strokeSizeFor(int elementIndex) { return mStartStrokeSize - (mStartStrokeSize - mEndStrokeSize) * elementIndex * mTrailFraction; }
@Test public void testStrokeSizeFor() { GestureTrailTheme underTest = new GestureTrailTheme(Color.BLACK, Color.BLACK, 120f, 20f, 50); Assert.assertEquals(120f, underTest.strokeSizeFor(0), 0.1f); Assert.assertEquals(20f, underTest.strokeSizeFor(50), 0.1f); Assert.assertEquals(70f, underTest.strokeSizeFor(25), 0.1f); }
public static <T> PaginatedResponse<T> create(String listKey, PaginatedList<T> paginatedList) { return new PaginatedResponse<>(listKey, paginatedList, null, null); }
@Test public void serializeWithContext() throws Exception { final ImmutableList<String> values = ImmutableList.of("hello", "world"); final ImmutableMap<String, Object> context = ImmutableMap.of("context1", "wow"); final PaginatedList<String> paginatedList = new PaginatedList<>(values, values.size(), 1, 10); final PaginatedResponse<String> response = PaginatedResponse.create("foo", paginatedList, context); final DocumentContext ctx = JsonPath.parse(objectMapper.writeValueAsString(response)); final JsonPathAssert jsonPathAssert = JsonPathAssert.assertThat(ctx); jsonPathAssert.jsonPathAsInteger("$.total").isEqualTo(2); jsonPathAssert.jsonPathAsInteger("$.count").isEqualTo(2); jsonPathAssert.jsonPathAsInteger("$.page").isEqualTo(1); jsonPathAssert.jsonPathAsInteger("$.per_page").isEqualTo(10); jsonPathAssert.jsonPathAsString("$.foo[0]").isEqualTo("hello"); jsonPathAssert.jsonPathAsString("$.foo[1]").isEqualTo("world"); jsonPathAssert.jsonPathAsString("$.context.context1").isEqualTo("wow"); }
public static void applyLocaleToContext(@NonNull Context context, @Nullable String localeString) { final Locale forceLocale = LocaleTools.getLocaleForLocaleString(localeString); final Configuration configuration = context.getResources().getConfiguration(); if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) { configuration.setLocale(forceLocale); } else { //noinspection deprecation configuration.locale = forceLocale; } context.getResources().updateConfiguration(configuration, null); }
@Test @Config(sdk = Build.VERSION_CODES.JELLY_BEAN) @Ignore("Robolectric does not support this API") public void testSetAndResetValueAPI16() { Assert.assertEquals( "English (United States)", mContext.getResources().getConfiguration().locale.getDisplayName()); LocaleTools.applyLocaleToContext(mContext, "de"); Assert.assertEquals("de", mContext.getResources().getConfiguration().locale.getLanguage()); Assert.assertTrue( mContext.getResources().getConfiguration().locale.getDisplayName().contains("German")); LocaleTools.applyLocaleToContext(mContext, ""); Assert.assertSame(Locale.getDefault(), mContext.getResources().getConfiguration().locale); LocaleTools.applyLocaleToContext(mContext, "NONE_EXISTING"); Assert.assertEquals( "none_existing", mContext.getResources().getConfiguration().locale.getLanguage()); }
static public JobConf createJob(String[] argv) throws IOException { StreamJob job = new StreamJob(); job.argv_ = argv; job.init(); job.preProcessArgs(); job.parseArgv(); job.postProcessArgs(); job.setJobConf(); return job.jobConf_; }
@Test(expected = IllegalArgumentException.class) public void testCreateJobWithExtraArgs() throws IOException { ArrayList<String> dummyArgs = new ArrayList<String>(); dummyArgs.add("-input"); dummyArgs.add("dummy"); dummyArgs.add("-output"); dummyArgs.add("dummy"); dummyArgs.add("-mapper"); dummyArgs.add("dummy"); dummyArgs.add("dummy"); dummyArgs.add("-reducer"); dummyArgs.add("dummy"); StreamJob.createJob(dummyArgs.toArray(new String[] {})); }
@VisibleForTesting Bootstrap getBootstrap(MessageInput input) { LOG.debug("Setting UDP receive buffer size to {} bytes", getRecvBufferSize()); final NettyTransportType transportType = nettyTransportConfiguration.getType(); eventLoopGroup = eventLoopGroupFactory.create(workerThreads, localRegistry, "workers"); return new Bootstrap() .group(eventLoopGroup) .channelFactory(new DatagramChannelFactory(transportType)) .option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(65535)) // Maximum possible UDP packet size .option(ChannelOption.SO_RCVBUF, getRecvBufferSize()) .option(UnixChannelOption.SO_REUSEPORT, true) .handler(getChannelInitializer(getChannelHandlers(input))) .validate(); }
@Test public void receiveBufferSizeIsDefaultSize() { assertThat(udpTransport.getBootstrap(mock(MessageInput.class)).config().options().get(ChannelOption.SO_RCVBUF)).isEqualTo(RECV_BUFFER_SIZE); }
public static byte[] readData(String data) throws IOException, URISyntaxException, InstantiationException, IllegalAccessException { if (data.startsWith("data:") || data.startsWith("file:")) { return IOUtils.toByteArray(URL.createURL(data)); } else if (Files.exists(Paths.get(data))) { return Files.readAllBytes(Paths.get(data)); } else if (org.apache.commons.codec.binary.Base64.isBase64(data)) { return Base64.getDecoder().decode(data); } else { String msg = "Not supported config"; throw new IllegalArgumentException(msg); } }
@Test public void testReadData() throws Exception { byte[] data = Files.readAllBytes(Path.of(basicAuthConf)); String base64Data = Base64.getEncoder().encodeToString(data); // base64 format assertEquals(AuthenticationProviderBasic.readData("data:;base64," + base64Data), data); assertEquals(AuthenticationProviderBasic.readData(base64Data), data); // file format assertEquals(AuthenticationProviderBasic.readData("file://" + basicAuthConf), data); assertEquals(AuthenticationProviderBasic.readData(basicAuthConf), data); }
public static String genDpid(long index) { if (index < 0) { return null; } String hexStr = Long.toHexString(index); StringBuilder zeroPadding = new StringBuilder(); for (int i = 0; i < HEX_LENGTH - hexStr.length(); i++) { zeroPadding.append(ZERO); } return OF_PREFIX + zeroPadding.toString() + hexStr; }
@Test public void testGenDpid() { long one = 1; long ten = 10; long sixteen = 16; long seventeen = 17; long minus = -1; assertEquals("of:0000000000000001", genDpid(one)); assertEquals("of:000000000000000a", genDpid(ten)); assertEquals("of:0000000000000010", genDpid(sixteen)); assertEquals("of:0000000000000011", genDpid(seventeen)); assertNull(genDpid(minus)); }
public static <T> TimeLimiterOperator<T> of(TimeLimiter timeLimiter) { return new TimeLimiterOperator<>(timeLimiter); }
@Test public void timeoutUsingMono() { given(timeLimiter.getTimeLimiterConfig()) .willReturn(toConfig(Duration.ofMillis(1))); Mono<?> mono = Mono.delay(Duration.ofMinutes(1)) .transformDeferred(TimeLimiterOperator.of(timeLimiter)); StepVerifier.create(mono) .expectError(TimeoutException.class) .verify(Duration.ofMinutes(1)); then(timeLimiter).should() .onError(any(TimeoutException.class)); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthGetUncleByBlockHashAndIndex() throws Exception { web3j.ethGetUncleByBlockHashAndIndex( "0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b", BigInteger.ZERO) .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getUncleByBlockHashAndIndex\"," + "\"params\":[" + "\"0xc6ef2fc5426d6ad6fd9e2a26abeab0aa2411b7ab17f30a99d3cb96aed1d1055b\",\"0x0\"]," + "\"id\":1}"); }
@GetMapping("/{memberId}/histories") public ResponseEntity<List<TradeHistoryResponse>> findTradeHistories( @PathVariable("memberId") final Long memberId, @AuthMember final Long authId, @RequestParam(value = "isSeller") final boolean isSeller ) { List<TradeHistoryResponse> response = memberService.findTradeHistories(memberId, authId, isSeller); return ResponseEntity.ok(response); }
@Test void 거래_내역을_조회한다() throws Exception { // given when(memberService.findTradeHistories(anyLong(), anyLong(), anyBoolean())) .thenReturn(List.of(new TradeHistoryResponse(1L, "buyerNickname", "sellerNickname", 1L, "productTitle", 10000, 10, "1,2,3"))); // when & then mockMvc.perform(get("/api/members/{memberId}/histories", 1L) .queryParam("isSeller", String.valueOf(true)) .header(HttpHeaders.AUTHORIZATION, "Bearer tokenInfo~") ).andExpect(status().isOk()) .andDo(customDocument("find_trade_histories", requestHeaders( headerWithName(AUTHORIZATION).description("유저 토큰 정보") ), queryParameters( parameterWithName("isSeller").description("판매자인지 구매자인지 여부 (true = 판매자, false = 구매자)") ), responseFields( fieldWithPath("[0].tradeHistoryId").description("거래 내역 id"), fieldWithPath("[0].buyerName").description("구매자 닉네임"), fieldWithPath("[0].sellerName").description("판매자 닉네임"), fieldWithPath("[0].productId").description("상품 id"), fieldWithPath("[0].productTitle").description("상품 제목"), fieldWithPath("[0].productOriginPrice").description("상품 정상가"), fieldWithPath("[0].productDiscountPrice").description("상품 할인해서 구매한 가격"), fieldWithPath("[0].usingCouponIds").description("사용한 쿠폰 ids, String 타입으로 ',' 이용해서 묶음") ) )); }
public void unJar(File jarFile, File toDir) throws IOException { unJar(jarFile, toDir, MATCH_ANY); }
@Test public void testUnJarDoesNotLooseLastModify() throws Exception { File unjarDir = getUnjarDir("unjar-lastmod"); // Unjar everything RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME), unjarDir, MATCH_ANY); String failureMessage = "Last modify time was lost during unJar"; assertEquals(failureMessage, MOCKED_NOW, new File(unjarDir, TestRunJar.FOOBAR_TXT).lastModified()); assertEquals(failureMessage, MOCKED_NOW_PLUS_TWO_SEC, new File(unjarDir, FOOBAZ_TXT).lastModified()); }
static SortKey[] rangeBounds( int numPartitions, Comparator<StructLike> comparator, SortKey[] samples) { // sort the keys first Arrays.sort(samples, comparator); int numCandidates = numPartitions - 1; SortKey[] candidates = new SortKey[numCandidates]; int step = (int) Math.ceil((double) samples.length / numPartitions); int position = step - 1; int numChosen = 0; while (position < samples.length && numChosen < numCandidates) { SortKey candidate = samples[position]; // skip duplicate values if (numChosen > 0 && candidate.equals(candidates[numChosen - 1])) { // linear probe for the next distinct value position += 1; } else { candidates[numChosen] = candidate; position += step; numChosen += 1; } } return candidates; }
@Test public void testRangeBoundsNonDivisible() { // step is 3 = ceiling(11/4) assertThat( SketchUtil.rangeBounds( 4, SORT_ORDER_COMPARTOR, new SortKey[] { CHAR_KEYS.get("a"), CHAR_KEYS.get("b"), CHAR_KEYS.get("c"), CHAR_KEYS.get("d"), CHAR_KEYS.get("e"), CHAR_KEYS.get("f"), CHAR_KEYS.get("g"), CHAR_KEYS.get("h"), CHAR_KEYS.get("i"), CHAR_KEYS.get("j"), CHAR_KEYS.get("k"), })) .containsExactly(CHAR_KEYS.get("c"), CHAR_KEYS.get("f"), CHAR_KEYS.get("i")); }
public SamlSession findSamlSessionByArtifact(String artifact) throws SamlSessionException { Optional<SamlSession> optionalSamlSession = samlSessionRepository.findByArtifact(artifact); if (optionalSamlSession.isEmpty()) throw new SamlSessionException("Saml session not found by artifact"); return optionalSamlSession.get(); }
@Test public void findSamlSessionByArtifactTest() throws SamlSessionException { SamlSession samlSession = new SamlSession(1L); samlSession.setServiceUuid("serviceUuid"); Optional<SamlSession> optionalSamlSession = Optional.of(samlSession); when(samlSessionRepositoryMock.findByArtifact(anyString())).thenReturn(optionalSamlSession); SamlSession result = samlSessionService.findSamlSessionByArtifact("artifact"); verify(samlSessionRepositoryMock, times(1)).findByArtifact(anyString()); assertEquals(result.getServiceUuid(), samlSession.getServiceUuid()); }
@Override public ConnectResponse<String> delete(final String connector) { try { LOG.debug("Issuing request to Kafka Connect at URI {} to delete {}", connectUri, connector); final ConnectResponse<String> connectResponse = withRetries(() -> Request .delete(resolveUri(String.format("%s/%s", CONNECTORS, connector))) .setHeaders(requestHeaders) .responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs)) .connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs)) .execute(httpClient) .handleResponse( createHandler( ImmutableList.of(HttpStatus.SC_NO_CONTENT, HttpStatus.SC_OK), new TypeReference<Object>() {}, foo -> connector))); connectResponse.error() .ifPresent(error -> LOG.warn("Could not delete connector: {}.", error)); return connectResponse; } catch (final Exception e) { throw new KsqlServerException(e); } }
@Test public void testDeleteWithStatusOKResponse() throws JsonProcessingException { // Given: WireMock.stubFor( WireMock.delete(WireMock.urlEqualTo(pathPrefix + "/connectors/foo")) .withHeader(AUTHORIZATION.toString(), new EqualToPattern(AUTH_HEADER)) .withHeader(CUSTOM_HEADER_NAME, new EqualToPattern(CUSTOM_HEADER_VALUE)) .willReturn(WireMock.aResponse() .withStatus(HttpStatus.SC_OK) .withBody("{\"error\":null}")) ); // When: final ConnectResponse<String> response = client.delete("foo"); // Then: assertThat(response.datum(), OptionalMatchers.of(is("foo"))); assertThat("Expected no error!", !response.error().isPresent()); }
@Override public boolean authenticate(final ShardingSphereUser user, final Object[] authInfo) { String password = (String) authInfo[0]; return Strings.isNullOrEmpty(user.getPassword()) || user.getPassword().equals(password); }
@Test void assertAuthenticateSuccess() { assertTrue(new PostgreSQLPasswordAuthenticator().authenticate(new ShardingSphereUser("root", "password", ""), new Object[]{"password", null})); }
@Override public String getColumnLabel() { ProjectionIdentifierExtractEngine extractEngine = new ProjectionIdentifierExtractEngine(databaseType); return getAlias().isPresent() && !DerivedColumn.isDerivedColumnName(getAlias().get().getValueWithQuoteCharacters()) ? extractEngine.getIdentifierValue(getAlias().get()) : extractEngine.getColumnNameFromFunction(type.name(), expression); }
@Test void assertGetColumnLabelWithAliasNoQuote() { assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("DIRECTION_COUNT"), TypedSPILoader.getService(DatabaseType.class, "MySQL")).getColumnLabel(), is("DIRECTION_COUNT")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("DIRECTION_COUNT"), TypedSPILoader.getService(DatabaseType.class, "PostgreSQL")).getColumnLabel(), is("direction_count")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("DIRECTION_COUNT"), TypedSPILoader.getService(DatabaseType.class, "openGauss")).getColumnLabel(), is("direction_count")); assertThat(new AggregationProjection(AggregationType.COUNT, "COUNT( A.\"DIRECTION\" )", new IdentifierValue("direction_count"), TypedSPILoader.getService(DatabaseType.class, "Oracle")).getColumnLabel(), is("DIRECTION_COUNT")); }
public KsqlConfig overrideBreakingConfigsWithOriginalValues(final Map<String, ?> props) { final KsqlConfig originalConfig = new KsqlConfig(ConfigGeneration.LEGACY, props); final Map<String, Object> mergedProperties = new HashMap<>(originals()); COMPATIBLY_BREAKING_CONFIG_DEFS.stream() .map(CompatibilityBreakingConfigDef::getName) .forEach( k -> mergedProperties.put(k, originalConfig.get(k))); final Map<String, ConfigValue> mergedStreamConfigProps = new HashMap<>(this.ksqlStreamConfigProps); COMPATIBILITY_BREAKING_STREAMS_CONFIGS.stream() .map(CompatibilityBreakingStreamsConfig::getName) .forEach( k -> mergedStreamConfigProps.put(k, originalConfig.ksqlStreamConfigProps.get(k))); return new KsqlConfig(ConfigGeneration.LEGACY, mergedProperties, mergedStreamConfigProps); }
@Test @Ignore // we don't have any compatibility sensitive configs! public void shouldPreserveOriginalCompatibilitySensitiveConfigs() { final Map<String, String> originalProperties = ImmutableMap.of( KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG, "not_the_default"); final KsqlConfig currentConfig = new KsqlConfig(Collections.emptyMap()); final KsqlConfig compatibleConfig = currentConfig.overrideBreakingConfigsWithOriginalValues(originalProperties); assertThat( compatibleConfig.getString(KsqlConfig.KSQL_PERSISTENT_QUERY_NAME_PREFIX_CONFIG), equalTo("not_the_default")); }
public static PrestoIcebergNestedField toPrestoNestedField(NestedField nestedField, TypeManager typeManager) { return new PrestoIcebergNestedField( nestedField.isOptional(), nestedField.fieldId(), nestedField.name(), toPrestoType(nestedField.type(), typeManager), Optional.ofNullable(nestedField.doc())); }
@Test(dataProvider = "allTypes") public void testToPrestoNestedField(int id, String name) { // Create a test TypeManager TypeManager typeManager = createTestFunctionAndTypeManager(); // Create a mock NestedField Types.NestedField nestedField = nestedField(id, name); PrestoIcebergNestedField expectedPrestoNestedField = prestoIcebergNestedField(id, name, typeManager); // Convert Iceberg NestedField to Presto Nested Field PrestoIcebergNestedField prestoNestedField = toPrestoNestedField(nestedField, typeManager); // Check that the result is not null assertNotNull(prestoNestedField); assertEquals(prestoNestedField, expectedPrestoNestedField); }
@Override @MethodNotAvailable public <T> T invoke(K key, EntryProcessor<K, V, T> entryProcessor, Object... arguments) throws EntryProcessorException { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testInvoke() { adapter.invoke(23, new ICacheReplaceEntryProcessor(), "value", "newValue"); }
protected TestResult parseTestResult(final String inputFile) throws IOException { String fileAsString = fileUtil.readFile(inputFile); TestResult testResult = xmlToCamelRouteCoverageConverter.convert(fileAsString); assert testResult != null; return testResultParser.parse(testResult); }
@Test public void testParseTestResultNull() throws IllegalAccessException { FieldUtils.writeDeclaredField(processor, "xmlToCamelRouteCoverageConverter", converter, true); Mockito .doReturn(null) .when(converter).convert(any(String.class)); Assertions.assertThrows(AssertionError.class, () -> processor.parseTestResult(inputFile())); }
public static String extractAttributeNameNameWithoutArguments(String attributeNameWithArguments) { int start = StringUtil.lastIndexOf(attributeNameWithArguments, '['); int end = StringUtil.lastIndexOf(attributeNameWithArguments, ']'); if (start > 0 && end > 0 && end > start) { return attributeNameWithArguments.substring(0, start); } if (start < 0 && end < 0) { return attributeNameWithArguments; } throw new IllegalArgumentException("Wrong argument input passed " + attributeNameWithArguments); }
@Test(expected = IllegalArgumentException.class) public void extractAttributeName_wrongArguments_noClosing() { extractAttributeNameNameWithoutArguments("car.wheel[left"); }
public static String removeFormattingTags(String str) { StringBuffer stringBuffer = new StringBuffer(); Matcher matcher = TAG_REGEXP.matcher(str); while (matcher.find()) { matcher.appendReplacement(stringBuffer, ""); String match = matcher.group(0); switch (match) { case "<lt>": case "<gt>": stringBuffer.append(match); break; } } matcher.appendTail(stringBuffer); return stringBuffer.toString(); }
@Test public void removeFormattingTags() { assertEquals("Test", Text.removeFormattingTags("<col=FFFFFF>Test</col>")); assertEquals("Test", Text.removeFormattingTags("<img=1><s>Test</s>")); assertEquals("Zezima (level-126)", Text.removeFormattingTags("<col=ffffff><img=2>Zezima<col=00ffff> (level-126)")); assertEquals("", Text.removeFormattingTags("<colrandomtext test>")); assertEquals("Not so much.", Text.removeFormattingTags("<col=FFFFFF This is a very special message.</col>Not so much.")); assertEquals("Use Item -> Man", Text.removeFormattingTags("Use Item -> Man")); assertEquals("a < b", Text.removeFormattingTags("a < b")); assertEquals("a <lt> b", Text.removeFormattingTags("a <lt> b")); assertEquals("Remove no tags", Text.removeFormattingTags("Remove no tags")); }
public ExecutionStateTracker getExecutionStateTracker() { return executionStateTracker; }
@Test public void testContextActivationObserverActivation() throws Exception { BatchModeExecutionContext executionContext = BatchModeExecutionContext.forTesting(PipelineOptionsFactory.create(), "testStage"); Closeable c = executionContext.getExecutionStateTracker().activate(); c.close(); // AutoRegistrationClass's variable was modified to 'true'. assertTrue(AutoRegistrationClass.WAS_CALLED); // AutoRegistrationClassNotActive class is not registered as registrar for the same is disabled. assertFalse(AutoRegistrationClassNotActive.WAS_CALLED); }
public <T0> DataSource<Tuple1<T0>> types(Class<T0> type0) { TupleTypeInfo<Tuple1<T0>> types = TupleTypeInfo.getBasicAndBasicValueTupleTypeInfo(type0); CsvInputFormat<Tuple1<T0>> inputFormat = new TupleCsvInputFormat<Tuple1<T0>>(path, types, this.includedMask); configureInputFormat(inputFormat); return new DataSource<Tuple1<T0>>( executionContext, inputFormat, types, Utils.getCallLocationName()); }
@Test void testWithValueType() { CsvReader reader = getCsvReader(); DataSource< Tuple8< StringValue, BooleanValue, ByteValue, ShortValue, IntValue, LongValue, FloatValue, DoubleValue>> items = reader.types( StringValue.class, BooleanValue.class, ByteValue.class, ShortValue.class, IntValue.class, LongValue.class, FloatValue.class, DoubleValue.class); TypeInformation<?> info = items.getType(); assertThat(info.isTupleType()).isTrue(); assertThat(info.getTypeClass()).isEqualTo(Tuple8.class); }
@Override public String toString() { StringBuilder b = new StringBuilder(); if (StringUtils.isNotBlank(protocol)) { b.append(protocol); b.append("://"); } if (StringUtils.isNotBlank(host)) { b.append(host); } if (!isPortDefault() && port != -1) { b.append(':'); b.append(port); } if (StringUtils.isNotBlank(path)) { // If no scheme/host/port, leave the path as is if (b.length() > 0 && !path.startsWith("/")) { b.append('/'); } b.append(encodePath(path)); } if (queryString != null && !queryString.isEmpty()) { b.append(queryString.toString()); } if (fragment != null) { b.append("#"); b.append(encodePath(fragment)); } return b.toString(); }
@Test public void testURLWithLeadingTrailingSpaces() { s = " http://www.example.com/path "; t = "http://www.example.com/path"; assertEquals(t, new HttpURL(s).toString()); }
@Nonnull public <K, V> KafkaProducer<K, V> getProducer(@Nullable String transactionalId) { if (getConfig().isShared()) { if (transactionalId != null) { throw new IllegalArgumentException("Cannot use transactions with shared " + "KafkaProducer for DataConnection" + getConfig().getName()); } retain(); //noinspection unchecked return (KafkaProducer<K, V>) producerSupplier.get(); } else { if (transactionalId != null) { @SuppressWarnings({"rawtypes", "unchecked"}) Map<String, Object> castProperties = (Map) getConfig().getProperties(); Map<String, Object> copy = new HashMap<>(castProperties); copy.put("transactional.id", transactionalId); return new KafkaProducer<>(copy); } else { return new KafkaProducer<>(getConfig().getProperties()); } } }
@Test public void shared_producer_should_be_closed_after_all_close() { kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport); Producer<Object, Object> p1 = kafkaDataConnection.getProducer(null); Producer<Object, Object> p2 = kafkaDataConnection.getProducer(null); kafkaDataConnection.release(); p1.close(); p2.close(); assertThatThrownBy(() -> p1.partitionsFor("my-topic")) .isInstanceOf(KafkaException.class) .hasMessage("Requested metadata update after close"); }
public Environment set(@NonNull String key, @NonNull Object value) { props.remove(key); props.put(key, value.toString()); return this; }
@Test public void testSet() { Environment environment = Environment.empty(); environment.set("name", "biezhi"); assertEquals("biezhi", environment.getOrNull("name")); environment.add("age", 20); assertEquals(Integer.valueOf(20), environment.getIntOrNull("age")); }
public static <T> T toObj(byte[] json, Class<T> cls) { try { return mapper.readValue(json, cls); } catch (Exception e) { throw new NacosDeserializationException(cls, e); } }
@Test void testToObject7() { assertThrows(Exception.class, () -> { JacksonUtils.toObj((ByteArrayInputStream) null, BigDecimal.class); }); }
@Override public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) { topics.clear(); Set<TopicPartition> allPartitions = new HashSet<>(); for (Map.Entry<String, List<PartitionInfo>> entry : consumer.listTopics().entrySet()) { if (pattern.matcher(entry.getKey()).matches()) { for (PartitionInfo partitionInfo : entry.getValue()) { allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition())); topics.add(partitionInfo.topic()); } } } return allPartitions; }
@Test public void testFilter() { Pattern pattern = Pattern.compile("test-\\d+"); PatternTopicFilter filter = new PatternTopicFilter(pattern); String matchingTopicOne = "test-1"; String matchingTopicTwo = "test-11"; String unmatchedTopic = "unmatched"; Map<String, List<PartitionInfo>> allTopics = new HashMap<>(); allTopics.put(matchingTopicOne, Collections.singletonList(createPartitionInfo(matchingTopicOne, 0))); List<PartitionInfo> testTwoPartitions = new ArrayList<>(); testTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 0)); testTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 1)); allTopics.put(matchingTopicTwo, testTwoPartitions); allTopics.put(unmatchedTopic, Collections.singletonList(createPartitionInfo(unmatchedTopic, 0))); when(consumerMock.listTopics()).thenReturn(allTopics); Set<TopicPartition> matchedPartitions = filter.getAllSubscribedPartitions(consumerMock); assertThat("Expected topic partitions matching the pattern to be passed by the filter", matchedPartitions, containsInAnyOrder(new TopicPartition(matchingTopicOne, 0), new TopicPartition(matchingTopicTwo, 0), new TopicPartition(matchingTopicTwo, 1))); }
public static String toLowerHex(long v) { char[] data = RecyclableBuffers.parseBuffer(); writeHexLong(data, 0, v); return new String(data, 0, 16); }
@Test void toLowerHex_fixedLength() { assertThat(toLowerHex(0L)).isEqualTo("0000000000000000"); }
@Override public void revert(final Path file) throws BackgroundException { try { new DbxUserFilesRequests(session.getClient(file)).restore(containerService.getKey(file), file.attributes().getVersionId()); } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Failure to write attributes of {0}", e, file); } }
@Test public void testRevert() throws Exception { final Path directory = new DropboxDirectoryFeature(session).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final DropboxAttributesFinderFeature f = new DropboxAttributesFinderFeature(session); final Path test = new DropboxTouchFeature(session).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(test.attributes().getVersionId(), new DropboxAttributesFinderFeature(session).find(test).getVersionId()); final DropboxVersioningFeature feature = new DropboxVersioningFeature(session); assertEquals(0, feature.list(test, new DisabledListProgressListener()).size()); final PathAttributes initialAttributes = new PathAttributes(test.attributes()); final String initialVersion = test.attributes().getVersionId(); final byte[] content = RandomUtils.nextBytes(32769); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setExists(true); final DropboxWriteFeature writer = new DropboxWriteFeature(session); final StatusOutputStream<Metadata> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); assertNotEquals(initialVersion, new DropboxAttributesFinderFeature(session).toAttributes(out.getStatus()).getVersionId()); { final AttributedList<Path> versions = feature.list(test.withAttributes(new DropboxAttributesFinderFeature(session).toAttributes(out.getStatus())), new DisabledListProgressListener()); assertEquals(1, versions.size()); assertEquals(new Path(test).withAttributes(initialAttributes), versions.get(0)); assertEquals(initialVersion, versions.get(0).attributes().getVersionId()); } final PathAttributes updated = new DropboxAttributesFinderFeature(session).find(test.withAttributes(new DropboxAttributesFinderFeature(session).toAttributes(out.getStatus()))); assertNotEquals(initialVersion, updated.getVersionId()); feature.revert(new Path(test).withAttributes(initialAttributes)); // Delete versions permanently try { final List<Path> versions = feature.list(new Path(test).withAttributes(new DropboxAttributesFinderFeature(session).find(test)), new DisabledListProgressListener()).toList(); assertEquals(2, versions.size()); assertEquals(status.getResponse().getVersionId(), versions.get(0).attributes().getVersionId()); assertEquals(initialVersion, versions.get(1).attributes().getVersionId()); for(Path d : versions) { assertFalse(new DropboxThresholdDeleteFeature(session).isSupported(d)); assertFalse(new DropboxBatchDeleteFeature(session).isSupported(d)); assertFalse(new DropboxDeleteFeature(session).isSupported(d)); } new DropboxDeleteFeature(session).delete(versions, new DisabledPasswordCallback(), new Delete.DisabledCallback()); fail(); } catch(InteroperabilityException e) { // Expected } for(Path version : new DropboxListService(session).list(directory, new DisabledListProgressListener())) { new DropboxDeleteFeature(session).delete(Collections.singletonList(version), new DisabledLoginCallback(), new Delete.DisabledCallback()); } new DropboxDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static void init(String queryRewritersClassNamesStr) { List<String> queryRewritersClassNames = (queryRewritersClassNamesStr != null) ? Arrays.asList(queryRewritersClassNamesStr.split(",")) : DEFAULT_QUERY_REWRITERS_CLASS_NAMES; final List<QueryRewriter> queryRewriters = getQueryRewriters(queryRewritersClassNames); synchronized (CalciteSqlParser.class) { CalciteSqlParser.QUERY_REWRITERS.clear(); CalciteSqlParser.QUERY_REWRITERS.addAll(queryRewriters); } }
@Test public void testQueryRewriters() { // Default behavior QueryRewriterFactory.init(null); Assert.assertEquals(QUERY_REWRITERS.size(), 6); Assert.assertTrue(QUERY_REWRITERS.get(0) instanceof CompileTimeFunctionsInvoker); Assert.assertTrue(QUERY_REWRITERS.get(1) instanceof SelectionsRewriter); Assert.assertTrue(QUERY_REWRITERS.get(2) instanceof PredicateComparisonRewriter); Assert.assertTrue(QUERY_REWRITERS.get(3) instanceof AliasApplier); Assert.assertTrue(QUERY_REWRITERS.get(4) instanceof OrdinalsUpdater); Assert.assertTrue(QUERY_REWRITERS.get(5) instanceof NonAggregationGroupByToDistinctQueryRewriter); // Check init with other configs QueryRewriterFactory.init("org.apache.pinot.sql.parsers.rewriter.PredicateComparisonRewriter," + "org.apache.pinot.sql.parsers.rewriter.CompileTimeFunctionsInvoker," + "org.apache.pinot.sql.parsers.rewriter.SelectionsRewriter"); Assert.assertEquals(QUERY_REWRITERS.size(), 3); Assert.assertTrue(QUERY_REWRITERS.get(0) instanceof PredicateComparisonRewriter); Assert.assertTrue(QUERY_REWRITERS.get(1) instanceof CompileTimeFunctionsInvoker); Assert.assertTrue(QUERY_REWRITERS.get(2) instanceof SelectionsRewriter); // Revert back to default behavior QueryRewriterFactory.init(null); Assert.assertEquals(QUERY_REWRITERS.size(), 6); Assert.assertTrue(QUERY_REWRITERS.get(0) instanceof CompileTimeFunctionsInvoker); Assert.assertTrue(QUERY_REWRITERS.get(1) instanceof SelectionsRewriter); Assert.assertTrue(QUERY_REWRITERS.get(2) instanceof PredicateComparisonRewriter); Assert.assertTrue(QUERY_REWRITERS.get(3) instanceof AliasApplier); Assert.assertTrue(QUERY_REWRITERS.get(4) instanceof OrdinalsUpdater); Assert.assertTrue(QUERY_REWRITERS.get(5) instanceof NonAggregationGroupByToDistinctQueryRewriter); }
public static BigDecimal cast(final Integer value, final int precision, final int scale) { if (value == null) { return null; } return cast(value.longValue(), precision, scale); }
@Test public void shouldNotCastIntTooNegative() { // When: final Exception e = assertThrows( ArithmeticException.class, () -> cast(-10, 2, 1) ); // Then: assertThat(e.getMessage(), containsString("Numeric field overflow")); }
public String scramble(String clearText) { return encrypt(BASE64_ALGORITHM, clearText); }
@Test public void scramble() { Encryption encryption = new Encryption(null); assertThat(encryption.scramble("foo")).isEqualTo("{b64}Zm9v"); }
@Override public Optional<PiPipeconf> getPipeconf(PiPipeconfId id) { return Optional.ofNullable(pipeconfs.get(id)); }
@Test public void getPipeconf() { mgr.register(pipeconf); assertEquals("Returned PiPipeconf is not correct", pipeconf, mgr.getPipeconf(pipeconf.id()).get()); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetcherMetrics() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); Map<String, String> tags = new HashMap<>(); tags.put("topic", tp0.topic()); tags.put("partition", String.valueOf(tp0.partition())); MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric); // recordsFetchLagMax should be initialized to NaN assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON); // recordsFetchLagMax should be hw - fetchOffset after receiving an empty FetchResponse fetchRecords(tidp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 0); assertEquals(100, (Double) recordsFetchLagMax.metricValue(), EPSILON); KafkaMetric partitionLag = allMetrics.get(partitionLagMetric); assertEquals(100, (Double) partitionLag.metricValue(), EPSILON); // recordsFetchLagMax should be hw - offset of the last message after receiving a non-empty FetchResponse MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tidp0, builder.build(), Errors.NONE, 200L, 0); assertEquals(197, (Double) recordsFetchLagMax.metricValue(), EPSILON); assertEquals(197, (Double) partitionLag.metricValue(), EPSILON); // verify de-registration of partition lag subscriptions.unsubscribe(); sendFetches(); assertFalse(allMetrics.containsKey(partitionLagMetric)); }
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) { return Optional.ofNullable(HANDLERS.get(step.getClass())) .map(h -> h.handle(this, schema, step)) .orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass())); }
@Test public void shouldResolveSchemaForTableFilter() { // Given: final TableFilter<?> step = new TableFilter<>( PROPERTIES, tableSource, mock(Expression.class) ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: assertThat(result, is(SCHEMA)); }
@Override public List<ActionParameter> getParameters() { return List.of( ActionParameter.from("query", "The query to pass to the web search engine.") ); }
@Test void testGetParameters() { List<ActionParameter> parameters = searchWebAction.getParameters(); assertEquals(1, parameters.size()); assertEquals("query", parameters.get(0).getName()); assertEquals("The query to pass to the web search engine.", parameters.get(0).getDescription()); }
@GetMapping public DeferredResult<ResponseEntity<ApolloConfigNotification>> pollNotification( @RequestParam(value = "appId") String appId, @RequestParam(value = "cluster") String cluster, @RequestParam(value = "namespace", defaultValue = ConfigConsts.NAMESPACE_APPLICATION) String namespace, @RequestParam(value = "dataCenter", required = false) String dataCenter, @RequestParam(value = "notificationId", defaultValue = "-1") long notificationId, @RequestParam(value = "ip", required = false) String clientIp) { //strip out .properties suffix namespace = namespaceUtil.filterNamespaceName(namespace); Set<String> watchedKeys = watchKeysUtil.assembleAllWatchKeys(appId, cluster, namespace, dataCenter); DeferredResult<ResponseEntity<ApolloConfigNotification>> deferredResult = new DeferredResult<>(TIMEOUT, NOT_MODIFIED_RESPONSE); //check whether client is out-dated ReleaseMessage latest = releaseMessageService.findLatestReleaseMessageForMessages(watchedKeys); /** * Manually close the entity manager. * Since for async request, Spring won't do so until the request is finished, * which is unacceptable since we are doing long polling - means the db connection would be hold * for a very long time */ entityManagerUtil.closeEntityManager(); if (latest != null && latest.getId() != notificationId) { deferredResult.setResult(new ResponseEntity<>( new ApolloConfigNotification(namespace, latest.getId()), HttpStatus.OK)); } else { //register all keys for (String key : watchedKeys) { this.deferredResults.put(key, deferredResult); } deferredResult .onTimeout(() -> logWatchedKeys(watchedKeys, "Apollo.LongPoll.TimeOutKeys")); deferredResult.onCompletion(() -> { //unregister all keys for (String key : watchedKeys) { deferredResults.remove(key, deferredResult); } logWatchedKeys(watchedKeys, "Apollo.LongPoll.CompletedKeys"); }); logWatchedKeys(watchedKeys, "Apollo.LongPoll.RegisteredKeys"); logger.debug("Listening {} from appId: {}, cluster: {}, namespace: {}, datacenter: {}", watchedKeys, appId, cluster, namespace, dataCenter); } return deferredResult; }
@Test public void testPollNotificationWithDefaultNamespaceWithNotificationIdOutDated() throws Exception { long notificationId = someNotificationId + 1; ReleaseMessage someReleaseMessage = mock(ReleaseMessage.class); String someWatchKey = "someKey"; Set<String> watchKeys = Sets.newHashSet(someWatchKey); when(watchKeysUtil .assembleAllWatchKeys(someAppId, someCluster, defaultNamespace, someDataCenter)) .thenReturn( watchKeys); when(someReleaseMessage.getId()).thenReturn(notificationId); when(releaseMessageService.findLatestReleaseMessageForMessages(watchKeys)) .thenReturn(someReleaseMessage); DeferredResult<ResponseEntity<ApolloConfigNotification>> deferredResult = controller .pollNotification(someAppId, someCluster, defaultNamespace, someDataCenter, someNotificationId, someClientIp); ResponseEntity<ApolloConfigNotification> result = (ResponseEntity<ApolloConfigNotification>) deferredResult.getResult(); assertEquals(HttpStatus.OK, result.getStatusCode()); assertEquals(defaultNamespace, result.getBody().getNamespaceName()); assertEquals(notificationId, result.getBody().getNotificationId()); }
public Set<Node> voterNodes(Stream<Integer> voterIds, ListenerName listenerName) { return voterIds .map(voterId -> voterNode(voterId, listenerName).orElseThrow(() -> new IllegalArgumentException( String.format( "Unable to find endpoint for voter %d and listener %s in %s", voterId, listenerName, voters ) ) ) ) .collect(Collectors.toSet()); }
@Test void testVoterNodes() { VoterSet voterSet = VoterSet.fromMap(voterMap(IntStream.of(1, 2, 3), true)); assertEquals( Utils.mkSet(new Node(1, "localhost", 9991), new Node(2, "localhost", 9992)), voterSet.voterNodes(IntStream.of(1, 2).boxed(), DEFAULT_LISTENER_NAME) ); assertThrows( IllegalArgumentException.class, () -> voterSet.voterNodes(IntStream.of(1, 2).boxed(), ListenerName.normalised("MISSING")) ); assertThrows( IllegalArgumentException.class, () -> voterSet.voterNodes(IntStream.of(1, 4).boxed(), DEFAULT_LISTENER_NAME) ); }
public static WorkerIdentity fromProto(alluxio.grpc.WorkerIdentity proto) throws ProtoParsingException { return Parsers.fromProto(proto); }
@Test public void legacyInvalidIdentifier() { alluxio.grpc.WorkerIdentity identityProto = alluxio.grpc.WorkerIdentity.newBuilder() .setVersion(0) .setIdentifier(ByteString.copyFrom("a byte string longer than 8 bytes".getBytes())) .build(); assertThrows(ProtoParsingException.class, () -> WorkerIdentity.ParserV0.INSTANCE.fromProto(identityProto)); }
Future<RecordMetadata> send(final ProducerRecord<byte[], byte[]> record, final Callback callback) { maybeBeginTransaction(); try { return producer.send(record, callback); } catch (final KafkaException uncaughtException) { if (isRecoverable(uncaughtException)) { // producer.send() call may throw a KafkaException which wraps a FencedException, // in this case we should throw its wrapped inner cause so that it can be // captured and re-wrapped as TaskMigratedException throw new TaskMigratedException( formatException("Producer got fenced trying to send a record"), uncaughtException.getCause() ); } else { throw new StreamsException( formatException(String.format("Error encountered trying to send record to topic %s", record.topic())), uncaughtException ); } } }
@Test public void shouldFailOnEosBeginTxnFatal() { eosAlphaMockProducer.beginTransactionException = new RuntimeException("KABOOM!"); // calling `send()` implicitly starts a new transaction final RuntimeException thrown = assertThrows( RuntimeException.class, () -> eosAlphaStreamsProducer.send(null, null)); assertThat(thrown.getMessage(), is("KABOOM!")); }
@Override public R apply(R record) { final Matcher matcher = regex.matcher(record.topic()); if (matcher.matches()) { final String topic = matcher.replaceFirst(replacement); log.trace("Rerouting from topic '{}' to new topic '{}'", record.topic(), topic); return record.newRecord(topic, record.kafkaPartition(), record.keySchema(), record.key(), record.valueSchema(), record.value(), record.timestamp()); } else { log.trace("Not rerouting topic '{}' as it does not match the configured regex", record.topic()); } return record; }
@Test public void staticReplacement() { assertEquals("bar", apply("foo", "bar", "foo")); }
@Override protected List<MatchResult> match(List<String> specs) throws IOException { return match(new File(".").getAbsolutePath(), specs); }
@Test public void testMatchExact() throws Exception { List<String> expected = ImmutableList.of(temporaryFolder.newFile("a").toString()); temporaryFolder.newFile("aa"); temporaryFolder.newFile("ab"); List<MatchResult> matchResults = localFileSystem.match( ImmutableList.of(temporaryFolder.getRoot().toPath().resolve("a").toString())); assertThat( toFilenames(matchResults), containsInAnyOrder(expected.toArray(new String[expected.size()]))); }
public static boolean hasCause(Throwable t, Class<? extends Throwable> exceptionClass) { if (t.getClass().isAssignableFrom(exceptionClass)) return true; if (t.getCause() != null) { return hasCause(t.getCause(), exceptionClass); } return false; }
@Test void hasCauseReturnsTrueIfExceptionHasGivenCause() { final boolean hasInterruptedExceptionAsCause = Exceptions.hasCause(new RuntimeException(new InterruptedException()), InterruptedException.class); assertThat(hasInterruptedExceptionAsCause).isTrue(); }
Optional<PriorityAndResource> getPriorityAndResource( final TaskExecutorProcessSpec taskExecutorProcessSpec) { tryAdaptAndAddTaskExecutorResourceSpecIfNotExist(taskExecutorProcessSpec); return Optional.ofNullable( taskExecutorProcessSpecToPriorityAndResource.get(taskExecutorProcessSpec)); }
@Test void testMaxContainerResource() { final TaskExecutorProcessSpecContainerResourcePriorityAdapter adapter = getAdapter(); assertThat(adapter.getPriorityAndResource(TASK_EXECUTOR_PROCESS_SPEC_EXCEED_MAX)) .isNotPresent(); }
public static String format(Object x) { if (x != null) { return format(x.toString()); } else { return StrUtil.EMPTY; } }
@Test public void testFormatNull() { // 测试传入null值的情况 String result = NumberWordFormatter.format(null); assertEquals("", result); }
@Override public long getAndIncrement() throws Exception { final AtomicLong current = new AtomicLong(); final boolean updated = kubeClient .checkAndUpdateConfigMap( configMapName, configMap -> { if (isValidOperation(configMap)) { final long currentValue = getCurrentCounter(configMap); current.set(currentValue); configMap .getData() .put( CHECKPOINT_COUNTER_KEY, String.valueOf(currentValue + 1)); return Optional.of(configMap); } return Optional.empty(); }) .get(); if (updated) { return current.get(); } else { throw new KubernetesException( "Failed to update ConfigMap " + configMapName + " since current KubernetesCheckpointIDCounter does not have the leadership."); } }
@Test void testGetAndIncrement() throws Exception { new Context() { { runTest( () -> { leaderCallbackGrantLeadership(); final KubernetesCheckpointIDCounter checkpointIDCounter = new KubernetesCheckpointIDCounter( flinkKubeClient, LEADER_CONFIGMAP_NAME, LOCK_IDENTITY); checkpointIDCounter.setCount(100L); final long counter = checkpointIDCounter.getAndIncrement(); assertThat(counter).isEqualTo(100L); assertThat(checkpointIDCounter.get()).isEqualTo(101L); }); } }; }
public static Optional<String> extractOrganizationName(String groupName) { return extractRegexGroupIfMatches(groupName, 1); }
@Test public void extractOrganizationName_whenNameIsIncorrect_returnEmpty() { assertThat(GithubTeamConverter.extractOrganizationName("Org1")).isEmpty(); assertThat(GithubTeamConverter.extractOrganizationName("Org1/")).isEmpty(); }
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception { CruiseConfig configForEdit; CruiseConfig config; LOGGER.debug("[Config Save] Loading config holder"); configForEdit = deserializeConfig(content); if (callback != null) callback.call(configForEdit); config = preprocessAndValidate(configForEdit); return new GoConfigHolder(config, configForEdit); }
@Test void shouldSetConfigOriginInPipeline_AfterLoadingConfigFile() throws Exception { CruiseConfig cruiseConfig = xmlLoader.loadConfigHolder(CONFIG).config; PipelineConfig pipelineConfig1 = cruiseConfig.pipelineConfigByName(new CaseInsensitiveString("pipeline1")); assertThat(pipelineConfig1.getOrigin()).isEqualTo(new FileConfigOrigin()); }
public static byte[] signMessage(RawTransaction rawTransaction, Credentials credentials) { byte[] encodedTransaction; if (rawTransaction.getTransaction().getType().isEip4844()) { encodedTransaction = encode4844(rawTransaction); } else { encodedTransaction = encode(rawTransaction); } Sign.SignatureData signatureData = Sign.signMessage(encodedTransaction, credentials.getEcKeyPair()); return encode(rawTransaction, signatureData); }
@Test public void testSignMessage() { byte[] signedMessage = TransactionEncoder.signMessage(createEtherTransaction(), SampleKeys.CREDENTIALS); String hexMessage = Numeric.toHexString(signedMessage); assertEquals( hexMessage, ("0xf85580010a840add5355887fffffffffffffff80" + "1c" + "a046360b50498ddf5566551ce1ce69c46c565f1f478bb0ee680caf31fbc08ab727" + "a01b2f1432de16d110407d544f519fc91b84c8e16d3b6ec899592d486a94974cd0")); }
public static void noteEntryPoint(Object tool) { if (tool != null && !GLOBAL_CONTEXT_MAP.containsKey(PARAM_COMMAND)) { String classname = tool.getClass().toString(); int lastDot = classname.lastIndexOf('.'); int l = classname.length(); if (lastDot > 0 && lastDot < (l - 1)) { String name = classname.substring(lastDot + 1, l); setGlobalContextEntry(PARAM_COMMAND, name); } } }
@Test public void testNoteEntryPoint() throws Throwable { setAndAssertEntryPoint(this).isEqualTo("TestCommonAuditContext"); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) { if (list == null) { return FEELFnResult.ofResult(true); } boolean result = true; for (final Object element : list) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" + " a Boolean")); } else { if (element != null) { result &= (Boolean) element; } } } return FEELFnResult.ofResult(result); }
@Test void invokeBooleanParamFalse() { FunctionTestUtil.assertResult(nnAllFunction.invoke(false), false); }
public String toLoggableString(ApiMessage message) { MetadataRecordType type = MetadataRecordType.fromId(message.apiKey()); switch (type) { case CONFIG_RECORD: { if (!configSchema.isSensitive((ConfigRecord) message)) { return message.toString(); } ConfigRecord duplicate = ((ConfigRecord) message).duplicate(); duplicate.setValue("(redacted)"); return duplicate.toString(); } case USER_SCRAM_CREDENTIAL_RECORD: { UserScramCredentialRecord record = (UserScramCredentialRecord) message; return "UserScramCredentialRecord(" + "name=" + ((record.name() == null) ? "null" : "'" + record.name() + "'") + ", mechanism=" + record.mechanism() + ", salt=(redacted)" + ", storedKey=(redacted)" + ", serverKey=(redacted)" + ", iterations=" + record.iterations() + ")"; } default: return message.toString(); } }
@Test public void testUserScramCredentialRecordToStringWithNullName() { assertEquals("UserScramCredentialRecord(name=null, mechanism=1, " + "salt=(redacted), storedKey=(redacted), serverKey=(redacted), iterations=256)", REDACTOR.toLoggableString(new UserScramCredentialRecord(). setName(null). setMechanism((byte) 1). setSalt(new byte[512]). setServerKey(new byte[128]). setStoredKey(new byte[128]). setIterations(256))); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { try { if(null != status.getModified()) { final DavResource resource = this.getResource(file); session.getClient().patch(new DAVPathEncoder().encode(file), this.getCustomProperties(resource, status.getModified()), Collections.emptyList(), this.getCustomHeaders(file, status)); status.setResponse(new DAVAttributesFinderFeature(session).toAttributes(resource).withModificationDate( Timestamp.toSeconds(status.getModified()))); } } catch(SardineException e) { throw new DAVExceptionMappingService().map("Failure to write attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e, file); } }
@Test public void testSetTimestamp() throws Exception { final TransferStatus status = new TransferStatus(); final Path file = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), status); new DAVTimestampFeature(session).setTimestamp(file, status.withModified(5100L)); final PathAttributes attr = new DAVAttributesFinderFeature(session).find(file); assertEquals(5000L, attr.getModificationDate()); assertEquals(status.getResponse(), attr); assertEquals(5000L, new DefaultAttributesFinderFeature(session).find(file).getModificationDate()); new DAVDeleteFeature(session).delete(Collections.<Path>singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Converter(fallback = true) public static <T> T convertTo(Class<T> type, Exchange exchange, Object value, TypeConverterRegistry registry) { if (NodeInfo.class.isAssignableFrom(value.getClass())) { // use a fallback type converter so we can convert the embedded body if the value is NodeInfo NodeInfo ni = (NodeInfo) value; // first try to find a Converter for Node TypeConverter tc = registry.lookup(type, Node.class); if (tc != null) { Node node = NodeOverNodeInfo.wrap(ni); return tc.convertTo(type, exchange, node); } // if this does not exist we can also try NodeList (there are some type converters for that) as // the default Xerces Node implementation also implements NodeList. tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> nil = new LinkedList<>(); nil.add(ni); return tc.convertTo(type, exchange, toDOMNodeList(nil)); } } else if (List.class.isAssignableFrom(value.getClass())) { TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> lion = new LinkedList<>(); for (Object o : (List<?>) value) { if (o instanceof NodeInfo) { lion.add((NodeInfo) o); } } if (!lion.isEmpty()) { NodeList nl = toDOMNodeList(lion); return tc.convertTo(type, exchange, nl); } } } else if (NodeOverNodeInfo.class.isAssignableFrom(value.getClass())) { // NodeOverNode info is a read-only Node implementation from Saxon. In contrast to the JDK // com.sun.org.apache.xerces.internal.dom.NodeImpl class it does not implement NodeList, but // many Camel type converters are based on that interface. Therefore we convert to NodeList and // try type conversion in the fallback type converter. TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<Node> domNodeList = new LinkedList<>(); domNodeList.add((NodeOverNodeInfo) value); return tc.convertTo(type, exchange, new DOMNodeList(domNodeList)); } } return null; }
@Test public void convertToInputStream() { InputStream is = context.getTypeConverter().convertTo(InputStream.class, exchange, doc); assertNotNull(is); String string = context.getTypeConverter().convertTo(String.class, exchange, is); assertEquals(CONTENT, string); }