focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
public synchronized boolean tryWriteLock() {
if (!isFree()) {
return false;
} else {
status = FREE_STATUS;
return true;
}
} | @Test
public void singleTryWriteLockTest() {
SimpleReadWriteLock simpleReadWriteLock = new SimpleReadWriteLock();
boolean result = simpleReadWriteLock.tryWriteLock();
Assert.isTrue(result);
} |
@Override
public HttpHeaders add(HttpHeaders headers) {
if (headers instanceof DefaultHttpHeaders) {
this.headers.add(((DefaultHttpHeaders) headers).headers);
return this;
} else {
return super.add(headers);
}
} | @Test
public void addIterable() {
final DefaultHttpHeaders headers = newDefaultDefaultHttpHeaders();
headers.add(HEADER_NAME, HeaderValue.THREE.asList());
assertDefaultValues(headers, HeaderValue.THREE);
} |
protected Record<T> buildRecord(Consumer<T> consumer, Message<T> message) {
Schema<T> schema = null;
if (message instanceof MessageImpl) {
MessageImpl impl = (MessageImpl) message;
schema = impl.getSchemaInternal();
} else if (message instanceof TopicMessageImpl) {
TopicMessageImpl impl = (TopicMessageImpl) message;
schema = impl.getSchemaInternal();
}
// we don't want the Function/Sink to see AutoConsumeSchema
if (schema instanceof AutoConsumeSchema) {
AutoConsumeSchema autoConsumeSchema = (AutoConsumeSchema) schema;
// we cannot use atSchemaVersion, because atSchemaVersion is only
// able to decode data, here we want a Schema that
// is able to re-encode the payload when needed.
schema = (Schema<T>) autoConsumeSchema
.unwrapInternalSchema(message.getSchemaVersion());
}
return PulsarRecord.<T>builder()
.message(message)
.schema(schema)
.topicName(message.getTopicName())
.customAckFunction(cumulative -> {
if (cumulative) {
consumer.acknowledgeCumulativeAsync(message)
.whenComplete((unused, throwable) -> message.release());
} else {
consumer.acknowledgeAsync(message).whenComplete((unused, throwable) -> message.release());
}
})
.ackFunction(() -> {
if (pulsarSourceConfig
.getProcessingGuarantees() == FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE) {
consumer.acknowledgeCumulativeAsync(message)
.whenComplete((unused, throwable) -> message.release());
} else {
consumer.acknowledgeAsync(message).whenComplete((unused, throwable) -> message.release());
}
}).failFunction(() -> {
try {
if (pulsarSourceConfig.getProcessingGuarantees()
== FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE) {
throw new RuntimeException("Failed to process message: " + message.getMessageId());
}
consumer.negativeAcknowledge(message);
} finally {
// don't need to check if message pooling is set
// client will automatically check
message.release();
}
})
.build();
} | @Test(dataProvider = "sourceImpls")
public void testPulsarRecordCustomAck(PulsarSourceConfig pulsarSourceConfig) throws Exception {
PulsarSource pulsarSource = getPulsarSource(pulsarSourceConfig);
Message message = Mockito.mock(Message.class);
Consumer consumer = Mockito.mock(Consumer.class);
Mockito.when(consumer.acknowledgeAsync(message)).thenReturn(CompletableFuture.completedFuture(null));
Mockito.when(consumer.acknowledgeCumulativeAsync(message)).thenReturn(CompletableFuture.completedFuture(null));
PulsarRecord record = (PulsarRecord) pulsarSource.buildRecord(consumer, message);
record.cumulativeAck();
Mockito.verify(consumer, Mockito.times(1)).acknowledgeCumulativeAsync(message);
record.individualAck();
Mockito.verify(consumer, Mockito.times(1)).acknowledgeAsync(message);
} |
public Input<DefaultIssue> create(Component component) {
return new RawLazyInput(component);
} | @Test
void load_issues_from_report_missing_secondary_location_component() {
RuleKey ruleKey = RuleKey.of("java", "S001");
markRuleAsActive(ruleKey);
registerRule(ruleKey, "name");
ScannerReport.Issue reportIssue = ScannerReport.Issue.newBuilder()
.setTextRange(newTextRange(2))
.setMsg("the message")
.setRuleRepository(ruleKey.repository())
.setRuleKey(ruleKey.rule())
.setSeverity(Constants.Severity.BLOCKER)
.setGap(3.14)
.addFlow(ScannerReport.Flow.newBuilder()
.addLocation(ScannerReport.IssueLocation.newBuilder()
.setComponentRef(FILE_REF)
.setMsg("Secondary location in same file")
.setTextRange(newTextRange(2)))
.addLocation(ScannerReport.IssueLocation.newBuilder()
.setComponentRef(NOT_IN_REPORT_FILE_REF)
.setMsg("Secondary location in a missing file")
.setTextRange(newTextRange(3)))
.addLocation(ScannerReport.IssueLocation.newBuilder()
.setComponentRef(ANOTHER_FILE_REF)
.setMsg("Secondary location in another file")
.setTextRange(newTextRange(3)))
.build())
.build();
reportReader.putIssues(FILE.getReportAttributes().getRef(), singletonList(reportIssue));
Input<DefaultIssue> input = underTest.create(FILE);
Collection<DefaultIssue> issues = input.getIssues();
assertThat(issues).hasSize(1);
DefaultIssue issue = Iterators.getOnlyElement(issues.iterator());
DbIssues.Locations locations = issue.getLocations();
// fields set by analysis report
assertThat(locations.getFlowList()).hasSize(1);
assertThat(locations.getFlow(0).getLocationList()).hasSize(2);
// Not component id if location is in the same file
assertThat(locations.getFlow(0).getLocation(0).getComponentId()).isEmpty();
assertThat(locations.getFlow(0).getLocation(1).getComponentId()).isEqualTo(ANOTHER_FILE_UUID);
} |
@Override
public Path createSnapshot(Path path, String name) throws IOException {
return myFs.createSnapshot(fullPath(path), name);
} | @Test(timeout = 30000)
public void testCreateSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path(
Path.getPathWithoutSchemeAndAuthority(chrootedTo), "snapPath");
AbstractFileSystem baseFs = Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs = new ChRootedFs(baseFs, chrootedTo);
Mockito.doReturn(snapRootPath).when(baseFs)
.createSnapshot(chRootedSnapRootPath, "snap1");
Assert.assertEquals(snapRootPath,
chRootedFs.createSnapshot(snapRootPath, "snap1"));
Mockito.verify(baseFs).createSnapshot(chRootedSnapRootPath, "snap1");
} |
public ConfigEvaluatorBuilder addExternalProperties(Map<String, String> properties) {
evaluatorBuilder.addExternalProperties(properties);
return this;
} | @Test
public void addExternalProperties() {
var builder = ConfigEvaluatorBuilder.unconfigured();
builder.addExternalProperty("ONE", "one");
var properties = Map.of("TWO", "two", "THREE", "three");
builder.addExternalProperties(properties);
assertThat(builder.getExternalProperties()).hasSize(3);
assertThat(builder.getExternalProperties()).containsEntry("ONE", "one");
assertThat(builder.getExternalProperties()).containsAllEntriesOf(properties);
} |
@Override
public boolean supportsResume() {
return true;
} | @Test
public void testSupportsResume() {
assertTrue(writer.supportsResume());
} |
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
} | @Test
public void shouldFormatTableElementsNamedAfterReservedWords() {
// Given:
final TableElements tableElements = TableElements.of(
new TableElement(ColumnName.of("GROUP"), new Type(SqlTypes.STRING)),
new TableElement(ColumnName.of("Having"), new Type(SqlTypes.STRING))
);
final CreateStream createStream = new CreateStream(
TEST,
tableElements,
false,
false,
SOME_WITH_PROPS,
false);
// When:
final String sql = SqlFormatter.formatSql(createStream);
// Then:
assertThat("literal escaping failure", sql, containsString("`GROUP` STRING"));
assertThat("lowercase literal escaping failure", sql, containsString("`Having` STRING"));
assertValidSql(sql);
} |
@Override
public void onEvent(ServerConfigChangeEvent event) {
// load config
Map<AbilityKey, Boolean> newValues = new HashMap<>(serverAbilityKeys.size());
serverAbilityKeys.forEach(abilityKey -> {
String key = PREFIX + abilityKey.getName();
try {
// scan
Boolean property = EnvUtil.getProperty(key, Boolean.class);
if (property != null) {
newValues.put(abilityKey, property);
}
} catch (Exception e) {
LOGGER.warn("Update ability config from env failed, use old val, ability : {} , because : {}", key, e);
}
});
// update
refresh(newValues);
} | @Test
void testConfigChange() throws InterruptedException {
// test no change
environment.setProperty(AbilityConfigs.PREFIX + AbilityKey.SERVER_TEST_1.getName(), Boolean.TRUE.toString());
environment.setProperty(AbilityConfigs.PREFIX + AbilityKey.SERVER_TEST_2.getName(), Boolean.TRUE.toString());
abilityConfigs.onEvent(new ServerConfigChangeEvent());
assertEquals(AbilityStatus.SUPPORTED, serverAbilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_1));
assertEquals(AbilityStatus.SUPPORTED, serverAbilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_2));
// test change
environment.setProperty(AbilityConfigs.PREFIX + AbilityKey.SERVER_TEST_1.getName(), Boolean.FALSE.toString());
abilityConfigs.onEvent(new ServerConfigChangeEvent());
assertNotEquals(AbilityStatus.SUPPORTED, serverAbilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_1));
assertEquals(AbilityStatus.SUPPORTED, serverAbilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_2));
environment.setProperty(AbilityConfigs.PREFIX + AbilityKey.SERVER_TEST_1.getName(), Boolean.TRUE.toString());
abilityConfigs.onEvent(new ServerConfigChangeEvent());
assertEquals(AbilityStatus.SUPPORTED, serverAbilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_1));
environment.setProperty(AbilityConfigs.PREFIX + AbilityKey.SERVER_TEST_1.getName(), Boolean.FALSE.toString());
environment.setProperty(AbilityConfigs.PREFIX + AbilityKey.SERVER_TEST_2.getName(), Boolean.FALSE.toString());
abilityConfigs.onEvent(new ServerConfigChangeEvent());
assertNotEquals(AbilityStatus.SUPPORTED, serverAbilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_1));
assertNotEquals(AbilityStatus.SUPPORTED, serverAbilityControlManager.isCurrentNodeAbilityRunning(AbilityKey.SERVER_TEST_2));
} |
public ConfigRepoConfig getConfigRepo(MaterialConfig config) {
for (ConfigRepoConfig repoConfig : this) {
if (repoConfig.hasSameMaterial(config)) {
return repoConfig;
}
}
return null;
} | @Test
public void shouldFindConfigRepoWithSpecifiedId() {
String id = "repo1";
ConfigRepoConfig configRepo1 = ConfigRepoConfig.createConfigRepoConfig(git("http://git"), "myplugin", id);
repos.add(configRepo1);
assertThat(repos.getConfigRepo(id), is(configRepo1));
} |
@ProtoFactory
public static MediaType fromString(String tree) {
if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType();
Matcher matcher = TREE_PATTERN.matcher(tree);
return parseSingleMediaType(tree, matcher, false);
} | @Test
public void testQuotedParam() {
MediaType mediaType = MediaType.fromString("application/json; charset=\"UTF-8\"");
assertMediaTypeWithParam(mediaType, "application", "json", "charset", "\"UTF-8\"");
} |
public TimelineEvent stop(String workflowId, User caller) {
return terminate(workflowId, Actions.WorkflowInstanceAction.STOP, caller);
} | @Test
public void testStop() {
when(instanceDao.terminateQueuedInstances(
eq("sample-minimal-wf"),
eq(Constants.TERMINATE_BATCH_LIMIT),
eq(WorkflowInstance.Status.STOPPED),
anyString()))
.thenReturn(Constants.TERMINATE_BATCH_LIMIT)
.thenReturn(Constants.TERMINATE_BATCH_LIMIT)
.thenReturn(Constants.TERMINATE_BATCH_LIMIT - 1);
when(instanceDao.terminateRunningInstances(
eq("sample-minimal-wf"),
eq(Constants.TERMINATE_BATCH_LIMIT),
eq(Actions.WorkflowInstanceAction.STOP),
any(),
anyString()))
.thenReturn(Constants.TERMINATE_BATCH_LIMIT - 1);
String res = actionHandler.stop("sample-minimal-wf", tester).getMessage();
assertEquals("Terminated [29] queued instances and terminating [9] running instances", res);
} |
@Override
public List<Catalogue> sort(List<Catalogue> catalogueTree, SortTypeEnum sortTypeEnum) {
log.debug("sort catalogue tree based on id. catalogueTree: {}, sortTypeEnum: {}", catalogueTree, sortTypeEnum);
return recursionSortCatalogues(catalogueTree, sortTypeEnum);
} | @Test
public void sortDescTest() {
SortTypeEnum sortTypeEnum = SortTypeEnum.DESC;
List<Catalogue> catalogueTree = Lists.newArrayList();
Catalogue catalogue = new Catalogue();
catalogue.setId(1);
Catalogue catalogue11 = new Catalogue();
catalogue11.setId(2);
Catalogue catalogue12 = new Catalogue();
catalogue12.setId(3);
catalogue.setChildren(Lists.newArrayList(catalogue12, catalogue11));
Catalogue catalogue2 = new Catalogue();
catalogue2.setId(4);
Catalogue catalogue21 = new Catalogue();
catalogue21.setId(7);
Catalogue catalogue22 = new Catalogue();
catalogue22.setId(6);
catalogue2.setChildren(Lists.newArrayList(catalogue21, catalogue22));
catalogueTree.add(catalogue2);
catalogueTree.add(catalogue);
/*
input:
-- 4
-- 7
-- 6
-- 1
-- 3
-- 2
output:
-- 4
-- 7
-- 6
-- 1
-- 3
-- 2
*/
List<Catalogue> resultList = catalogueTreeSortDefaultStrategyTest.sort(catalogueTree, sortTypeEnum);
List<Integer> resultIdList = CategoryTreeSortStrategyTestUtils.breadthTraverse(resultList);
assertEquals(Lists.newArrayList(4, 1, 7, 6, 3, 2), resultIdList);
} |
public boolean hasPendingTasks() {
return embeddedEventLoop().hasPendingNormalTasks() ||
embeddedEventLoop().nextScheduledTask() == 0;
} | @Test
void testHasPendingTasks() {
EmbeddedChannel channel = new EmbeddedChannel();
channel.freezeTime();
Runnable runnable = new Runnable() {
@Override
public void run() {
}
};
// simple execute
assertFalse(channel.hasPendingTasks());
channel.eventLoop().execute(runnable);
assertTrue(channel.hasPendingTasks());
channel.runPendingTasks();
assertFalse(channel.hasPendingTasks());
// schedule in the future (note: time is frozen above)
channel.eventLoop().schedule(runnable, 1, TimeUnit.SECONDS);
assertFalse(channel.hasPendingTasks());
channel.runPendingTasks();
assertFalse(channel.hasPendingTasks());
channel.advanceTimeBy(1, TimeUnit.SECONDS);
assertTrue(channel.hasPendingTasks());
channel.runPendingTasks();
assertFalse(channel.hasPendingTasks());
} |
public static boolean isLocalElasticsearchEnabled(AppSettings settings) {
// elasticsearch is enabled on "search" nodes, but disabled on "application" nodes
if (isClusterEnabled(settings.getProps())) {
return NodeType.parse(settings.getValue(CLUSTER_NODE_TYPE.getKey()).orElse("")) == NodeType.SEARCH;
}
// elasticsearch is enabled in standalone mode
return true;
} | @Test
public void isLocalElasticsearchEnabled_returns_true_in_standalone_mode() {
TestAppSettings settings = new TestAppSettings();
assertThat(ClusterSettings.isLocalElasticsearchEnabled(settings)).isTrue();
} |
@VisibleForTesting
void parseWorkflowParameter(
Map<String, Parameter> workflowParams, Parameter param, String workflowId) {
parseWorkflowParameter(workflowParams, param, workflowId, new HashSet<>());
} | @Test
public void testParseWorkflowParameter() {
StringParameter bar = StringParameter.builder().name("bar").expression("foo + '-1';").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap("foo", LongParameter.builder().expression("1+2+3;").build()),
bar,
"test-workflow");
assertEquals("6-1", bar.getEvaluatedResult());
bar = StringParameter.builder().name("bar").expression("foo + '-1';").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap(
"foo", LongParameter.builder().evaluatedResult(6L).evaluatedTime(123L).build()),
bar,
"test-workflow");
assertEquals("6-1", bar.getEvaluatedResult());
} |
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
} | @Test
public void testPartitionedTruncateString() throws Exception {
createPartitionedTable(spark, tableName, "truncate(4, data)");
SparkScanBuilder builder = scanBuilder();
TruncateFunction.TruncateString function = new TruncateFunction.TruncateString();
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(4), fieldRef("data")));
Predicate predicate = new Predicate("<>", expressions(udf, stringLit("data")));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(5);
// NOT NotEqual
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(5);
} |
private RemotingCommand deleteSubscriptionGroup(ChannelHandlerContext ctx,
RemotingCommand request) throws RemotingCommandException {
final RemotingCommand response = RemotingCommand.createResponseCommand(null);
DeleteSubscriptionGroupRequestHeader requestHeader =
(DeleteSubscriptionGroupRequestHeader) request.decodeCommandCustomHeader(DeleteSubscriptionGroupRequestHeader.class);
LOGGER.info("AdminBrokerProcessor#deleteSubscriptionGroup, caller={}",
RemotingHelper.parseChannelRemoteAddr(ctx.channel()));
this.brokerController.getSubscriptionGroupManager().deleteSubscriptionGroupConfig(requestHeader.getGroupName());
if (requestHeader.isCleanOffset()) {
this.brokerController.getConsumerOffsetManager().removeOffset(requestHeader.getGroupName());
this.brokerController.getPopInflightMessageCounter().clearInFlightMessageNumByGroupName(requestHeader.getGroupName());
}
if (this.brokerController.getBrokerConfig().isAutoDeleteUnusedStats()) {
this.brokerController.getBrokerStatsManager().onGroupDeleted(requestHeader.getGroupName());
}
response.setCode(ResponseCode.SUCCESS);
response.setRemark(null);
return response;
} | @Test
public void testDeleteSubscriptionGroup() throws RemotingCommandException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_SUBSCRIPTIONGROUP, null);
request.addExtField("groupName", "GID-Group-Name");
request.addExtField("removeOffset", "true");
RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request);
assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS);
} |
public static Schema getPinotSchemaFromPinotSchemaWithComplexTypeHandling(Descriptors.Descriptor protoSchema,
@Nullable Map<String, FieldSpec.FieldType> fieldTypeMap, @Nullable TimeUnit timeUnit, List<String> fieldsToUnnest,
String delimiter) {
Schema pinotSchema = new Schema();
for (Descriptors.FieldDescriptor field : protoSchema.getFields()) {
extractSchemaWithComplexTypeHandling(field, fieldsToUnnest, delimiter, field.getName(), pinotSchema,
fieldTypeMap, timeUnit);
}
return pinotSchema;
} | @Test
public void testGetPinotSchemaFromPinotSchemaWithComplexTypeHandling()
throws URISyntaxException, IOException {
Descriptors.Descriptor desc = CompositeTypes.CompositeMessage.getDescriptor();
Map<String, FieldSpec.FieldType> fieldTypeMap = new HashMap<>();
fieldTypeMap.put("test_message.long_field", FieldSpec.FieldType.DATE_TIME);
Schema schema = ProtoBufSchemaUtils.getPinotSchemaFromPinotSchemaWithComplexTypeHandling(
desc,
fieldTypeMap,
TimeUnit.MILLISECONDS,
Collections.emptyList(),
".");
URL resource = getClass().getClassLoader().getResource("complex_type_schema.json");
Schema expectedSchema = Schema.fromString(new String(Files.readAllBytes(Paths.get(resource.toURI()))));
assertEquals(expectedSchema, schema);
} |
public boolean matches(final PathMatcher pathMatcher,
final String targetPath,
final String pathMethod) {
return pathMatcher.match(path, targetPath) &&
httpMethod.matches(pathMethod);
} | @Test
void uri와_method가_같은지_확인한다() {
// given
String path = "/path";
HttpMethod method = HttpMethod.GET;
PathRequest pathRequest = new PathRequest(path, method);
// when
boolean result = pathRequest.matches(new AntPathMatcher(), path, method.name());
// then
assertThat(result).isTrue();
} |
@Override
public T addBoolean(K name, boolean value) {
throw new UnsupportedOperationException("read only");
} | @Test
public void testAddBoolean() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
HEADERS.addBoolean("name", true);
}
});
} |
public Optional<File> get(InstalledPlugin plugin) {
// Does not fail if another process tries to create the directory at the same time.
Path jarInCache = jarInCache(plugin.key, plugin.hash);
if (Files.isRegularFile(jarInCache)) {
return Optional.of(jarInCache.toFile());
}
return download(plugin).map(Path::toFile);
} | @Test
void download_and_add_jar_to_cache_if_missing() throws Exception {
FileAndMd5 tempJar = new FileAndMd5();
stubDownload(tempJar);
InstalledPlugin plugin = newInstalledPlugin("foo", tempJar.md5);
File result = underTest.get(plugin).get();
verifySameContent(result.toPath(), tempJar);
sonarqube.verify(exactly(1), getRequestedFor(urlEqualTo("/api/plugins/download?plugin=foo")));
// get from cache on second call
result = underTest.get(plugin).get();
verifySameContent(result.toPath(), tempJar);
sonarqube.verify(exactly(1), getRequestedFor(urlEqualTo("/api/plugins/download?plugin=foo")));
} |
public Logger getLogger() {
return logger;
} | @Test
void testGetLogger() {
Assertions.assertThrows(RuntimeException.class, () -> {
Logger failLogger = mock(Logger.class);
FailsafeLogger failsafeLogger = new FailsafeLogger(failLogger);
doThrow(new RuntimeException()).when(failLogger).error(anyString());
failsafeLogger.getLogger().error("should get error");
});
} |
public static TypeBuilder<Schema> builder() {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext());
} | @Test
void mapObjectProp() {
Map<String, Object> values = new HashMap<>();
values.put("booleanKey", true);
values.put("intKey", Integer.MAX_VALUE);
values.put("longKey", Long.MAX_VALUE);
values.put("floatKey", 1.0f);
values.put("doubleKey", Double.MAX_VALUE);
values.put("byteKey", new byte[] { 0x41, 0x42, 0x43 });
values.put("stringKey", "abc");
Schema s = SchemaBuilder.builder().intBuilder().prop("mapProp", values).endInt();
// object properties
assertTrue(s.getObjectProp("mapProp") instanceof Map);
@SuppressWarnings("unchecked")
Map<String, Object> valueMap = (Map<String, Object>) s.getObjectProp("mapProp");
assertEquals(values.size(), valueMap.size());
assertTrue(valueMap.get("booleanKey") instanceof Boolean);
assertEquals(true, valueMap.get("booleanKey"));
assertTrue(valueMap.get("intKey") instanceof Integer);
assertEquals(Integer.MAX_VALUE, valueMap.get("intKey"));
assertTrue(valueMap.get("longKey") instanceof Long);
assertEquals(Long.MAX_VALUE, valueMap.get("longKey"));
assertTrue(valueMap.get("floatKey") instanceof Float);
assertEquals(1.0f, valueMap.get("floatKey"));
assertTrue(valueMap.get("doubleKey") instanceof Double);
assertEquals(Double.MAX_VALUE, valueMap.get("doubleKey"));
assertTrue(valueMap.get("byteKey") instanceof byte[]);
assertArrayEquals("ABC".getBytes(StandardCharsets.UTF_8), (byte[]) valueMap.get("byteKey"));
assertTrue(valueMap.get("stringKey") instanceof String);
assertEquals("abc", valueMap.get("stringKey"));
} |
public static String format(TemporalAccessor time, DateTimeFormatter formatter) {
if (null == time) {
return null;
}
if(time instanceof Month){
return time.toString();
}
if(null == formatter){
formatter = DateTimeFormatter.ISO_LOCAL_DATE_TIME;
}
try {
return formatter.format(time);
} catch (UnsupportedTemporalTypeException e){
if(time instanceof LocalDate && e.getMessage().contains("HourOfDay")){
// 用户传入LocalDate,但是要求格式化带有时间部分,转换为LocalDateTime重试
return formatter.format(((LocalDate) time).atStartOfDay());
}else if(time instanceof LocalTime && e.getMessage().contains("YearOfEra")){
// 用户传入LocalTime,但是要求格式化带有日期部分,转换为LocalDateTime重试
return formatter.format(((LocalTime) time).atDate(LocalDate.now()));
} else if(time instanceof Instant){
// 时间戳没有时区信息,赋予默认时区
return formatter.format(((Instant) time).atZone(ZoneId.systemDefault()));
}
throw e;
}
} | @Test
public void formatLocalDateTest(){
final String format = TemporalAccessorUtil.format(LocalDate.of(2020, 12, 7), DatePattern.NORM_DATETIME_PATTERN);
assertEquals("2020-12-07 00:00:00", format);
} |
@Override
public void sendSmsCode(SmsCodeSendReqDTO reqDTO) {
SmsSceneEnum sceneEnum = SmsSceneEnum.getCodeByScene(reqDTO.getScene());
Assert.notNull(sceneEnum, "验证码场景({}) 查找不到配置", reqDTO.getScene());
// 创建验证码
String code = createSmsCode(reqDTO.getMobile(), reqDTO.getScene(), reqDTO.getCreateIp());
// 发送验证码
smsSendService.sendSingleSms(reqDTO.getMobile(), null, null,
sceneEnum.getTemplateCode(), MapUtil.of("code", code));
} | @Test
public void sendSmsCode_exceedDay() {
// mock 数据
SmsCodeDO smsCodeDO = randomPojo(SmsCodeDO.class,
o -> o.setMobile("15601691300").setTodayIndex(10).setCreateTime(LocalDateTime.now()));
smsCodeMapper.insert(smsCodeDO);
// 准备参数
SmsCodeSendReqDTO reqDTO = randomPojo(SmsCodeSendReqDTO.class, o -> {
o.setMobile("15601691300");
o.setScene(SmsSceneEnum.MEMBER_LOGIN.getScene());
});
// mock 方法
SqlConstants.init(DbType.MYSQL);
when(smsCodeProperties.getSendFrequency()).thenReturn(Duration.ofMillis(0));
// 调用,并断言异常
assertServiceException(() -> smsCodeService.sendSmsCode(reqDTO),
SMS_CODE_EXCEED_SEND_MAXIMUM_QUANTITY_PER_DAY);
} |
@Override
public Map<String, String> getAll() {
return flags.values().stream().collect(Collectors.toMap(flag -> flag.name, flag -> flag.value));
} | @Test
void testEnvironmentVariablePrefix() throws IOException {
FeatureFlags flags = create(EMPTY, EMPTY, EMPTY, Map.of(
"wrong prefix", ENVIRONMENT_VARIABLE_VALUE,
PREFIX_ENVIRONMENT_VARIABLE + FEATURE_1, ENVIRONMENT_VARIABLE_VALUE));
assertThat(flags.getAll()).isEqualTo(Map.of(FEATURE_1, ENVIRONMENT_VARIABLE_VALUE));
} |
public static void compress(File imageFile, File outFile, float quality) throws IORuntimeException {
Img img = null;
try {
img = Img.from(imageFile);
img.setQuality(quality).write(outFile);
} finally {
IoUtil.flush(img);
}
} | @Test
@Disabled
public void compressTest() {
ImgUtil.compress(FileUtil.file("d:/test/dest.png"),
FileUtil.file("d:/test/1111_target.jpg"), 0.1f);
} |
public List<String> getOptions()
{
COSBase values = getCOSObject().getDictionaryObject(COSName.OPT);
return FieldUtils.getPairableItems(values, 0);
} | @Test
void getOptionsFromCOSArray()
{
PDChoice choiceField = new PDComboBox(acroForm);
COSArray choiceFieldOptions = new COSArray();
// add entry to options
COSArray entry = new COSArray();
entry.add(new COSString(" "));
choiceFieldOptions.add(entry);
// add entry to options
entry = new COSArray();
entry.add(new COSString("A"));
choiceFieldOptions.add(entry);
// add entry to options
entry = new COSArray();
entry.add(new COSString("B"));
choiceFieldOptions.add(entry);
// add the options using the low level COS model as the PD model will
// abstract the COSArray
choiceField.getCOSObject().setItem(COSName.OPT, choiceFieldOptions);
assertEquals(options, choiceField.getOptions());
} |
public OpenConfigConfigOfAssignmentHandler addLogicalChannel(String logicalChannel) {
modelObject.logicalChannel(logicalChannel);
return this;
} | @Test
public void testAddLogicalChannel() {
// test Handler
OpenConfigConfigOfAssignmentHandler config = new OpenConfigConfigOfAssignmentHandler(parent);
// call addLogicalChannel
config.addLogicalChannel("name");
// expected ModelObject
DefaultConfig modelObject = new DefaultConfig();
modelObject.logicalChannel("name");
assertEquals("[NG]addLogicalChannel:ModelObject(LogicalChannel added) is not an expected one.\n",
modelObject, config.getModelObject());
} |
public synchronized Snapshot getSnapshot() {
moveWindowToCurrentEpochSecond(getLatestPartialAggregation());
return new SnapshotImpl(totalAggregation);
} | @Test
public void checkInitialBucketCreation() {
MockClock clock = MockClock.at(2019, 8, 4, 12, 0, 0, ZoneId.of("UTC"));
SlidingTimeWindowMetrics metrics = new SlidingTimeWindowMetrics(5, clock);
PartialAggregation[] buckets = metrics.partialAggregations;
long epochSecond = clock.instant().getEpochSecond();
for (int i = 0; i < buckets.length; i++) {
PartialAggregation bucket = buckets[i];
assertThat(bucket.getEpochSecond()).isEqualTo(epochSecond + i);
}
Snapshot snapshot = metrics.getSnapshot();
assertThat(snapshot.getTotalNumberOfCalls()).isZero();
assertThat(snapshot.getNumberOfSuccessfulCalls()).isZero();
assertThat(snapshot.getNumberOfFailedCalls()).isZero();
assertThat(snapshot.getTotalNumberOfSlowCalls()).isZero();
assertThat(snapshot.getNumberOfSlowSuccessfulCalls()).isZero();
assertThat(snapshot.getNumberOfSlowFailedCalls()).isZero();
assertThat(snapshot.getTotalDuration().toMillis()).isZero();
assertThat(snapshot.getAverageDuration().toMillis()).isZero();
assertThat(snapshot.getFailureRate()).isZero();
} |
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
} | @Test
public void testOffsetAssignmentAfterUpConversionV1ToV2Compressed() {
Compression compression = Compression.gzip().build();
MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V1, RecordBatch.NO_TIMESTAMP, compression);
long offset = 1234567;
checkOffsets(records, 0);
checkOffsets(new LogValidator(
records,
new TopicPartition("topic", 0),
time,
CompressionType.GZIP,
compression,
false,
RecordBatch.MAGIC_VALUE_V2,
TimestampType.LOG_APPEND_TIME,
1000L,
1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords, offset);
} |
VIn deserializeValue(final String topic, final Headers headers, final byte[] data) {
return valDeserializer.deserialize(topic, headers, data);
} | @Test
public void shouldProvideTopicHeadersAndDataToValueDeserializer() {
final SourceNode<String, String> sourceNode = new MockSourceNode<>(new TheDeserializer(), new TheDeserializer());
final RecordHeaders headers = new RecordHeaders();
final String deserializedValue = sourceNode.deserializeValue("topic", headers, "data".getBytes(StandardCharsets.UTF_8));
assertThat(deserializedValue, is("topic" + headers + "data"));
} |
@Nullable
public String getInstanceRegion(InstanceInfo instanceInfo) {
if (instanceInfo.getDataCenterInfo() == null || instanceInfo.getDataCenterInfo().getName() == null) {
logger.warn("Cannot get region for instance id:{}, app:{} as dataCenterInfo is null. Returning local:{} by default",
instanceInfo.getId(), instanceInfo.getAppName(), localRegion);
return localRegion;
}
if (DataCenterInfo.Name.Amazon.equals(instanceInfo.getDataCenterInfo().getName())) {
AmazonInfo amazonInfo = (AmazonInfo) instanceInfo.getDataCenterInfo();
Map<String, String> metadata = amazonInfo.getMetadata();
String availabilityZone = metadata.get(AmazonInfo.MetaDataKey.availabilityZone.getName());
if (null != availabilityZone) {
return azToRegionMapper.getRegionForAvailabilityZone(availabilityZone);
}
}
return null;
} | @Test
public void testDefaults() throws Exception {
PropertyBasedAzToRegionMapper azToRegionMapper = new PropertyBasedAzToRegionMapper(
new DefaultEurekaClientConfig());
InstanceRegionChecker checker = new InstanceRegionChecker(azToRegionMapper, "us-east-1");
azToRegionMapper.setRegionsToFetch(new String[]{"us-east-1"});
AmazonInfo dcInfo = AmazonInfo.Builder.newBuilder().addMetadata(AmazonInfo.MetaDataKey.availabilityZone,
"us-east-1c").build();
InstanceInfo instanceInfo = InstanceInfo.Builder.newBuilder().setAppName("app").setDataCenterInfo(dcInfo).build();
String instanceRegion = checker.getInstanceRegion(instanceInfo);
Assert.assertEquals("Invalid instance region.", "us-east-1", instanceRegion);
} |
public static String findDatumOverlijdenOudeWaarde(List<Container> categorieList){
return findValue(categorieList, CATEGORIE_OVERLIJDEN_OUDE_WAARDE, ELEMENT_DATUM_OVERLIJDEN);
} | @Test
public void testFindDatumOverlijdenOudeWaarde() {
assertThat(CategorieUtil.findDatumOverlijdenOudeWaarde(createFullCategories()), is("datumoverlijden_oud"));
} |
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final byte[] payload = rawMessage.getPayload();
final Map<String, Object> event;
try {
event = objectMapper.readValue(payload, TypeReferences.MAP_STRING_OBJECT);
} catch (IOException e) {
LOG.error("Couldn't decode raw message {}", rawMessage);
return null;
}
return parseEvent(event);
} | @Test
public void decodeMessagesHandleGenericBeatMessages() throws Exception {
final Message message = codec.decode(messageFromJson("generic.json"));
assertThat(message).isNotNull();
assertThat(message.getSource()).isEqualTo("unknown");
assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 4, 1, 0, 0, DateTimeZone.UTC));
assertThat(message.getField("facility")).isEqualTo("genericbeat");
assertThat(message.getField("beat_foo")).isEqualTo("bar");
} |
@Override
public ProviderInfo doSelect(SofaRequest request, List<ProviderInfo> providerInfos) {
String interfaceId = request.getInterfaceName();
String method = request.getMethodName();
String key = interfaceId + "#" + method;
// 判断是否同样的服务列表
int hashcode = providerInfos.hashCode();
Selector selector = selectorCache.get(key);
// 原来没有
if (selector == null ||
// 或者服务列表已经变化
selector.getHashCode() != hashcode) {
selector = new Selector(interfaceId, method, providerInfos, hashcode);
selectorCache.put(key, selector);
}
return selector.select(request);
} | @Test
public void testWeight() throws Exception {
WeightConsistentHashLoadBalancer loadBalancer = new WeightConsistentHashLoadBalancer(null);
SofaRequest request = new SofaRequest();
request.setInterfaceName(ConsistentHashLoadBalancerTest.class.getName());
request.setMethod(ConsistentHashLoadBalancerTest.class.getMethod("doSelect"));
int size = 20;
int total = 100000;
List<ProviderInfo> providers = buildDiffWeightProviderList(size);
Map<Integer, Integer> map = new HashMap(total * 2);
for (int i = 0; i < total; i++) {
request.setMethodArgs(new Object[] { "method" + i });
ProviderInfo provider = loadBalancer.doSelect(request, providers);
Integer key = provider.getPort();
if (map.containsKey(key)) {
int count = map.get(key);
map.put(key, ++count);
} else {
map.put(key, 0);
}
}
Set<Map.Entry<Integer, Integer>> set = map.entrySet();
Iterator<Map.Entry<Integer, Integer>> iterator = set.iterator();
while (iterator.hasNext()) {
Map.Entry<Integer, Integer> entry = iterator.next();
int port = entry.getKey() - 9000;
//最大误差不超过10%
Assert.assertTrue(entry.getValue() > 500 * port * 0.90);
Assert.assertTrue(entry.getValue() < 500 * port * 1.10);
}
} |
public Iterable<NetworkClient.InFlightRequest> clearAll(String node) {
Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
if (reqs == null) {
return Collections.emptyList();
} else {
final Deque<NetworkClient.InFlightRequest> clearedRequests = requests.remove(node);
inFlightRequestCount.getAndAdd(-clearedRequests.size());
return clearedRequests::descendingIterator;
}
} | @Test
public void testClearAll() {
int correlationId1 = addRequest(dest);
int correlationId2 = addRequest(dest);
List<NetworkClient.InFlightRequest> clearedRequests = TestUtils.toList(this.inFlightRequests.clearAll(dest));
assertEquals(0, inFlightRequests.count());
assertEquals(2, clearedRequests.size());
assertEquals(correlationId1, clearedRequests.get(0).header.correlationId());
assertEquals(correlationId2, clearedRequests.get(1).header.correlationId());
} |
public KafkaFuture<Void> all() {
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
this.future.whenComplete((topicPartitions, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else {
for (TopicPartition partition : partitions) {
if (maybeCompleteExceptionally(topicPartitions, partition, result)) {
return;
}
}
result.complete(null);
}
});
return result;
} | @Test
public void testTopLevelErrorConstructor() throws InterruptedException {
partitionFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception());
DeleteConsumerGroupOffsetsResult topLevelErrorResult =
new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions);
TestUtils.assertFutureError(topLevelErrorResult.all(), GroupAuthorizationException.class);
} |
@SuppressWarnings("SuspiciousMethodCalls")
public static boolean containsAll(Collection<?> coll1, Collection<?> coll2) {
if (isEmpty(coll1)) {
return isEmpty(coll2);
}
if (isEmpty(coll2)) {
return true;
}
// Set直接判定
if(coll1 instanceof Set){
return coll1.containsAll(coll2);
}
// 参考Apache commons collection4
// 将时间复杂度降低到O(n + m)
final Iterator<?> it = coll1.iterator();
final Set<Object> elementsAlreadySeen = new HashSet<>(coll1.size(), 1);
for (final Object nextElement : coll2) {
if (elementsAlreadySeen.contains(nextElement)) {
continue;
}
boolean foundCurrentElement = false;
while (it.hasNext()) {
final Object p = it.next();
elementsAlreadySeen.add(p);
if (Objects.equals(nextElement, p)) {
foundCurrentElement = true;
break;
}
}
if (false == foundCurrentElement) {
return false;
}
}
return true;
} | @Test
public void containsAllTest() {
final ArrayList<Integer> list1 = CollUtil.newArrayList(1, 2, 3, 4, 5);
final ArrayList<Integer> list2 = CollUtil.newArrayList(5, 3, 1);
assertTrue(CollUtil.containsAll(list1, list2));
final ArrayList<Integer> list3 = CollUtil.newArrayList(1);
final ArrayList<Integer> list4 = CollUtil.newArrayList();
assertTrue(CollUtil.containsAll(list3, list4));
} |
@Override
public void deleteGroup(Long id) {
// 校验存在
validateGroupExists(id);
// 校验分组下是否有用户
validateGroupHasUser(id);
// 删除
memberGroupMapper.deleteById(id);
} | @Test
public void testDeleteGroup_hasUser() {
// mock 数据
MemberGroupDO dbGroup = randomPojo(MemberGroupDO.class);
groupMapper.insert(dbGroup);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbGroup.getId();
// mock 会员数据
when(memberUserService.getUserCountByGroupId(eq(id))).thenReturn(1L);
// 调用, 并断言异常
assertServiceException(() -> groupService.deleteGroup(id), GROUP_HAS_USER);
} |
@Override
public boolean batchPublishAggr(final String dataId, final String group, final String tenant,
final Map<String, String> datumMap, final String appName) {
try {
Boolean isPublishOk = tjt.execute(status -> {
for (Map.Entry<String, String> entry : datumMap.entrySet()) {
try {
if (!addAggrConfigInfo(dataId, group, tenant, entry.getKey(), appName, entry.getValue())) {
throw new TransactionSystemException("error in batchPublishAggr");
}
} catch (Throwable e) {
throw new TransactionSystemException("error in batchPublishAggr");
}
}
return Boolean.TRUE;
});
if (isPublishOk == null) {
return false;
}
return isPublishOk;
} catch (TransactionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
return false;
}
} | @Test
void testBatchPublishAggrException() {
String dataId = "dataId111";
String group = "group";
String tenant = "tenant";
//mock query datumId and equal with current content param.
when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant, "d1"}), eq(String.class))).thenThrow(
new TransactionSystemException("c1t fail"));
Map<String, String> datumMap = new HashMap<>();
datumMap.put("d1", "c1");
datumMap.put("d2", "c2");
datumMap.put("d3", "c3");
String appName = "appname1234";
boolean result = externalConfigInfoAggrPersistService.batchPublishAggr(dataId, group, tenant, datumMap, appName);
assertFalse(result);
} |
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException {
boolean forceRedirect = config
.getBoolean(SONAR_FORCE_REDIRECT_DEFAULT_ADMIN_CREDENTIALS)
.orElse(true);
if (forceRedirect && userSession.hasSession() && userSession.isLoggedIn()
&& userSession.isSystemAdministrator() && !"admin".equals(userSession.getLogin())
&& defaultAdminCredentialsVerifier.hasDefaultCredentialUser()) {
redirectTo(response, request.getContextPath() + CHANGE_ADMIN_PASSWORD_PATH);
}
chain.doFilter(request, response);
} | @Test
public void redirect_if_instance_uses_default_admin_credentials_and_web_context_configured() throws Exception {
when(request.getContextPath()).thenReturn("/sonarqube");
underTest.doFilter(request, response, chain);
verify(response).sendRedirect("/sonarqube/admin/change_admin_password");
} |
public static NearCacheConfig copyWithInitializedDefaultMaxSizeForOnHeapMaps(NearCacheConfig nearCacheConfig) {
if (nearCacheConfig == null) {
return null;
}
EvictionConfig evictionConfig = nearCacheConfig.getEvictionConfig();
if (nearCacheConfig.getInMemoryFormat() == InMemoryFormat.NATIVE
|| evictionConfig.sizeConfigured) {
return nearCacheConfig;
}
// create copy of eviction config
EvictionConfig copyEvictionConfig = new EvictionConfig(evictionConfig)
.setSize(MapConfig.DEFAULT_MAX_SIZE);
// create copy of nearCache config and set eviction config
return new NearCacheConfig(nearCacheConfig)
.setEvictionConfig(copyEvictionConfig);
} | @Test
public void testCopyInitDefaultMaxSizeForOnHeapMaps_doesNotChangeOriginal_createsChangedCopy() {
NearCacheConfig nearCacheConfig = new NearCacheConfig();
NearCacheConfig copy = NearCacheConfigAccessor.copyWithInitializedDefaultMaxSizeForOnHeapMaps(nearCacheConfig);
assertEquals(MapConfig.DEFAULT_MAX_SIZE, copy.getEvictionConfig().getSize());
assertEquals(EvictionConfig.DEFAULT_MAX_ENTRY_COUNT, nearCacheConfig.getEvictionConfig().getSize());
} |
@Override
protected MessageFormat resolveCode(String code, Locale locale) {
List<JsonObject> langs = getLanguageMap(locale);
String value = getValue(code, langs);
if (value == null) {
// if we haven't found anything, try the default locale
langs = getLanguageMap(fallbackLocale);
value = getValue(code, langs);
}
if (value == null) {
// if it's still null, return null
return null;
} else {
// otherwise format the message
return new MessageFormat(value, locale);
}
} | @Test
public void verifyWhenLocaleExists_canResolveCode() {
MessageFormat mf = jsonMessageSource.resolveCode("testAttribute", localeThatHasAFile);
assertEquals(mf.getLocale().getLanguage(), "en");
assertEquals(mf.toPattern(), "testValue");
} |
@Override
public void restore(final Path file, final LoginCallback prompt) throws BackgroundException {
final Path container = session.getFeature(PathContainerService.class).getContainer(file);
try {
try {
final AmazonS3 client = client(container);
// Standard - S3 Standard retrievals allow you to access any of your archived objects within several hours.
// This is the default option for the GLACIER and DEEP_ARCHIVE retrieval requests that do not specify
// the retrieval option. S3 Standard retrievals typically complete within 3-5 hours from the GLACIER
// storage class and typically complete within 12 hours from the DEEP_ARCHIVE storage class.
client.restoreObjectV2(new RestoreObjectRequest(container.getName(), session.getFeature(PathContainerService.class).getKey(file))
// To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
.withVersionId(file.attributes().getVersionId())
.withExpirationInDays(new HostPreferences(session.getHost()).getInteger("s3.glacier.restore.expiration.days"))
.withGlacierJobParameters(new GlacierJobParameters().withTier(new HostPreferences(session.getHost()).getProperty("s3.glacier.restore.tier")))
);
// 200 Reply if already restored
}
catch(AmazonClientException e) {
throw new AmazonServiceExceptionMappingService().map("Failure to write attributes of {0}", e, file);
}
}
catch(ConflictException e) {
// 409 when restore is in progress
log.warn(String.format("Restore for %s already in progress %s", file, e));
}
} | @Test
public void testRestore() throws Exception {
final Path bucket = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final Path test = new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(512);
final TransferStatus status = new TransferStatus();
status.setStorageClass("GLACIER");
status.setLength(content.length);
status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status));
final HttpResponseOutputStream<StorageObject> out = new S3WriteFeature(session, new S3AccessControlListFeature(session)).write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
out.close();
assertEquals("GLACIER", new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test).getStorageClass());
final Glacier feature = new Glacier(session, new S3LocationFeature(session), new DisabledX509TrustManager(), new DefaultX509KeyManager());
feature.restore(test, new DisabledLoginCallback());
new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledPasswordCallback(), new Delete.DisabledCallback());
} |
public boolean allowsUnlock(UUID ownerId) {
Preconditions.checkNotNull(ownerId);
return ownerId.equals(lockOwnerId);
} | @Test(expected = NullPointerException.class)
public void testAllowsUnlock_nullTransactionId() {
LockGuard stateLock = LockGuard.NOT_LOCKED;
stateLock.allowsUnlock(null);
} |
public void createOrUpdateItem(final String key, final Object value, final String comment) {
this.createOrUpdateItem(key, GsonUtils.getInstance().toJson(value), comment);
} | @Test
public void testCreateOrUpdateItem() {
doNothing().when(apolloClient)
.createOrUpdateItem(Mockito.any(), Mockito.<Object>any(), Mockito.any());
apolloClient.createOrUpdateItem("Key", (Object) "Value", "Comment");
verify(apolloClient).createOrUpdateItem(Mockito.any(), Mockito.<Object>any(), Mockito.any());
} |
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
} | @Test
public void mapToJsonNonStringKeys() {
Schema intIntMap = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build();
Map<Integer, Integer> input = new HashMap<>();
input.put(1, 12);
input.put(2, 15);
JsonNode converted = parse(converter.fromConnectData(TOPIC, intIntMap, input));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"map\", \"keys\": { \"type\" : \"int32\", \"optional\": false }, \"values\": { \"type\" : \"int32\", \"optional\": false }, \"optional\": false }"),
converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertTrue(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).isArray());
ArrayNode payload = (ArrayNode) converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME);
assertEquals(2, payload.size());
Set<JsonNode> payloadEntries = new HashSet<>();
for (JsonNode elem : payload)
payloadEntries.add(elem);
assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add(1).add(12),
JsonNodeFactory.instance.arrayNode().add(2).add(15))),
payloadEntries
);
} |
public boolean appendPendingTask(final Configuration conf, final Configuration oldConf, final Closure done) {
final Ballot bl = new Ballot();
if (!bl.init(conf, oldConf)) {
LOG.error("Fail to init ballot.");
return false;
}
final long stamp = this.stampedLock.writeLock();
try {
if (this.pendingIndex <= 0) {
LOG.error("Node {} fail to appendingTask, pendingIndex={}.", this.opts.getNodeId(), this.pendingIndex);
return false;
}
this.pendingMetaQueue.add(bl);
this.closureQueue.appendPendingClosure(done);
return true;
} finally {
this.stampedLock.unlockWrite(stamp);
}
} | @Test
public void testAppendPendingTask() {
assertTrue(this.box.getPendingMetaQueue().isEmpty());
assertTrue(this.closureQueue.getQueue().isEmpty());
assertFalse(this.box.appendPendingTask(
JRaftUtils.getConfiguration("localhost:8081,localhost:8082,localhost:8083"),
JRaftUtils.getConfiguration("localhost:8081"), new Closure() {
@Override
public void run(Status status) {
}
}));
assertTrue(box.resetPendingIndex(1));
assertTrue(this.box.appendPendingTask(
JRaftUtils.getConfiguration("localhost:8081,localhost:8082,localhost:8083"),
JRaftUtils.getConfiguration("localhost:8081"), new Closure() {
@Override
public void run(Status status) {
}
}));
assertEquals(1, this.box.getPendingMetaQueue().size());
assertEquals(1, this.closureQueue.getQueue().size());
} |
Object[] getOneRow() throws KettleException {
if ( !openNextFile() ) {
return null;
}
// Build an empty row based on the meta-data
Object[] outputRowData = buildEmptyRow();
try {
// Create new row or clone
if ( meta.getIsInFields() ) {
outputRowData = copyOrCloneArrayFromLoadFile( outputRowData, data.readrow );
}
// Read fields...
for ( int i = 0; i < data.nrInputFields; i++ ) {
// Get field
LoadFileInputField loadFileInputField = meta.getInputFields()[i];
Object o = null;
int indexField = data.totalpreviousfields + i;
ValueMetaInterface targetValueMeta = data.outputRowMeta.getValueMeta( indexField );
ValueMetaInterface sourceValueMeta = data.convertRowMeta.getValueMeta( indexField );
switch ( loadFileInputField.getElementType() ) {
case LoadFileInputField.ELEMENT_TYPE_FILECONTENT:
// DO Trimming!
switch ( loadFileInputField.getTrimType() ) {
case LoadFileInputField.TYPE_TRIM_LEFT:
if ( meta.getEncoding() != null ) {
data.filecontent = Const.ltrim( new String( data.filecontent, meta.getEncoding() ) ).getBytes();
} else {
data.filecontent = Const.ltrim( new String( data.filecontent ) ).getBytes();
}
break;
case LoadFileInputField.TYPE_TRIM_RIGHT:
if ( meta.getEncoding() != null ) {
data.filecontent = Const.rtrim( new String( data.filecontent, meta.getEncoding() ) ).getBytes();
} else {
data.filecontent = Const.rtrim( new String( data.filecontent ) ).getBytes();
}
break;
case LoadFileInputField.TYPE_TRIM_BOTH:
if ( meta.getEncoding() != null ) {
data.filecontent = Const.trim( new String( data.filecontent, meta.getEncoding() ) ).getBytes();
} else {
data.filecontent = Const.trim( new String( data.filecontent ) ).getBytes();
}
break;
default:
break;
}
if ( targetValueMeta.getType() != ValueMetaInterface.TYPE_BINARY ) {
// handle as a String
if ( meta.getEncoding() != null ) {
o = new String( data.filecontent, meta.getEncoding() );
} else {
o = new String( data.filecontent );
}
} else {
// save as byte[] without any conversion
o = data.filecontent;
}
break;
case LoadFileInputField.ELEMENT_TYPE_FILESIZE:
o = String.valueOf( data.fileSize );
break;
default:
break;
}
if ( targetValueMeta.getType() == ValueMetaInterface.TYPE_BINARY ) {
// save as byte[] without any conversion
outputRowData[indexField] = o;
} else {
// convert string (processing type) to the target type
outputRowData[indexField] = targetValueMeta.convertData( sourceValueMeta, o );
}
// Do we need to repeat this field if it is null?
if ( loadFileInputField.isRepeated() ) {
if ( data.previousRow != null && o == null ) {
outputRowData[indexField] = data.previousRow[indexField];
}
}
} // End of loop over fields...
int rowIndex = data.totalpreviousfields + data.nrInputFields;
// See if we need to add the filename to the row...
if ( meta.includeFilename() && meta.getFilenameField() != null && meta.getFilenameField().length() > 0 ) {
outputRowData[rowIndex++] = data.filename;
}
// See if we need to add the row number to the row...
if ( meta.includeRowNumber() && meta.getRowNumberField() != null && meta.getRowNumberField().length() > 0 ) {
outputRowData[rowIndex++] = new Long( data.rownr );
}
// Possibly add short filename...
if ( meta.getShortFileNameField() != null && meta.getShortFileNameField().length() > 0 ) {
outputRowData[rowIndex++] = data.shortFilename;
}
// Add Extension
if ( meta.getExtensionField() != null && meta.getExtensionField().length() > 0 ) {
outputRowData[rowIndex++] = data.extension;
}
// add path
if ( meta.getPathField() != null && meta.getPathField().length() > 0 ) {
outputRowData[rowIndex++] = data.path;
}
// add Hidden
if ( meta.isHiddenField() != null && meta.isHiddenField().length() > 0 ) {
outputRowData[rowIndex++] = new Boolean( data.hidden );
}
// Add modification date
if ( meta.getLastModificationDateField() != null && meta.getLastModificationDateField().length() > 0 ) {
outputRowData[rowIndex++] = data.lastModificationDateTime;
}
// Add Uri
if ( meta.getUriField() != null && meta.getUriField().length() > 0 ) {
outputRowData[rowIndex++] = data.uriName;
}
// Add RootUri
if ( meta.getRootUriField() != null && meta.getRootUriField().length() > 0 ) {
outputRowData[rowIndex++] = data.rootUriName;
}
RowMetaInterface irow = getInputRowMeta();
data.previousRow = irow == null ? outputRowData : irow.cloneRow( outputRowData ); // copy it to make
// surely the next step doesn't change it in between...
incrementLinesInput();
data.rownr++;
} catch ( Exception e ) {
throw new KettleException( "Error during processing a row", e );
}
return outputRowData;
} | @Test
public void testUTF8Encoding() throws KettleException, FileSystemException {
stepMetaInterface.setIncludeFilename( true );
stepMetaInterface.setFilenameField( "filename" );
stepMetaInterface.setIncludeRowNumber( true );
stepMetaInterface.setRowNumberField( "rownumber" );
stepMetaInterface.setShortFileNameField( "shortname" );
stepMetaInterface.setExtensionField( "extension" );
stepMetaInterface.setPathField( "path" );
stepMetaInterface.setIsHiddenField( "hidden" );
stepMetaInterface.setLastModificationDateField( "lastmodified" );
stepMetaInterface.setUriField( "uri" );
stepMetaInterface.setRootUriField( "root uri" );
// string with UTF-8 encoding
( (LoadFileInputMeta) runtimeSMI ).setEncoding( "UTF-8" );
stepInputFiles.addFile( getFile( "UTF-8.txt" ) );
Object[] result = stepLoadFileInput.getOneRow();
assertEquals( " UTF-8 string ÕÕÕ€ ", result[0] );
assertEquals( 1L, result[2] );
assertEquals( "UTF-8.txt", result[3] );
assertEquals( "txt", result[4] );
assertEquals( false, result[6] );
assertEquals( getFile( "UTF-8.txt" ).getURL().toString(), result[8] );
assertEquals( getFile( "UTF-8.txt" ).getName().getRootURI(), result[9] );
} |
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@Path("{networkId}/devices/{deviceId}/ports")
public Response createVirtualPort(@PathParam("networkId") long networkId,
@PathParam("deviceId") String virtDeviceId,
InputStream stream) {
try {
ObjectNode jsonTree = readTreeFromStream(mapper(), stream);
// final VirtualPort vportReq = codec(VirtualPort.class).decode(jsonTree, this);
JsonNode specifiedNetworkId = jsonTree.get("networkId");
JsonNode specifiedDeviceId = jsonTree.get("deviceId");
if (specifiedNetworkId == null || specifiedNetworkId.asLong() != (networkId)) {
throw new IllegalArgumentException(INVALID_FIELD + "networkId");
}
if (specifiedDeviceId == null || !specifiedDeviceId.asText().equals(virtDeviceId)) {
throw new IllegalArgumentException(INVALID_FIELD + "deviceId");
}
JsonNode specifiedPortNum = jsonTree.get("portNum");
JsonNode specifiedPhysDeviceId = jsonTree.get("physDeviceId");
JsonNode specifiedPhysPortNum = jsonTree.get("physPortNum");
final NetworkId nid = NetworkId.networkId(networkId);
DeviceId vdevId = DeviceId.deviceId(virtDeviceId);
ConnectPoint realizedBy = new ConnectPoint(DeviceId.deviceId(specifiedPhysDeviceId.asText()),
PortNumber.portNumber(specifiedPhysPortNum.asText()));
VirtualPort vport = vnetAdminService.createVirtualPort(nid, vdevId,
PortNumber.portNumber(specifiedPortNum.asText()), realizedBy);
UriBuilder locationBuilder = uriInfo.getBaseUriBuilder()
.path("vnets").path(specifiedNetworkId.asText())
.path("devices").path(specifiedDeviceId.asText())
.path("ports").path(vport.number().toString());
return Response
.created(locationBuilder.build())
.build();
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
} | @Test
public void testPostVirtualPort() {
NetworkId networkId = networkId3;
DeviceId deviceId = devId22;
DefaultAnnotations annotations = DefaultAnnotations.builder().build();
Device physDevice = new DefaultDevice(null, DeviceId.deviceId("dev1"),
null, null, null, null, null, null, annotations);
ConnectPoint cp1 = new ConnectPoint(physDevice.id(), portNumber(1));
expect(mockVnetAdminService.createVirtualPort(networkId, deviceId, portNumber(22), cp1))
.andReturn(vport22);
replay(mockVnetAdminService);
WebTarget wt = target();
InputStream jsonStream = VirtualNetworkWebResourceTest.class
.getResourceAsStream("post-virtual-port.json");
String reqLocation = "vnets/" + networkId.toString()
+ "/devices/" + deviceId.toString() + "/ports";
Response response = wt.path(reqLocation).request(MediaType.APPLICATION_JSON_TYPE)
.post(Entity.json(jsonStream));
assertThat(response.getStatus(), is(HttpURLConnection.HTTP_CREATED));
verify(mockVnetAdminService);
} |
@Nullable public static String ipOrNull(@Nullable String ip) {
if (ip == null || ip.isEmpty()) return null;
if ("::1".equals(ip) || "127.0.0.1".equals(ip)) return ip; // special-case localhost
IpFamily format = detectFamily(ip);
if (format == IpFamily.IPv4Embedded) {
ip = ip.substring(ip.lastIndexOf(':') + 1);
} else if (format == IpFamily.Unknown) {
ip = null;
}
return ip;
} | @Test void ipOrNull_ipv6_compatIpv4() {
assertThat(IpLiteral.ipOrNull("::0000:43.0.192.2")).isEqualTo("43.0.192.2");
} |
@Override
public synchronized void editSchedule() {
updateConfigIfNeeded();
long startTs = clock.getTime();
CSQueue root = scheduler.getRootQueue();
Resource clusterResources = Resources.clone(scheduler.getClusterResource());
containerBasedPreemptOrKill(root, clusterResources);
if (LOG.isDebugEnabled()) {
LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms.");
}
} | @Test
public void testPreemptionNotHappenForSingleReservedQueue() {
/*
* Test case to make sure, when reserved > pending, preemption will not
* happen if there's only one demanding queue.
*/
int[][] qData = new int[][]{
// / A B C
{ 100, 40, 40, 20 }, // abs
{ 100, 100, 100, 100 }, // maxCap
{ 100, 70, 0, 0 }, // used
{ 10, 30, 0, 0 }, // pending
{ 0, 50, 0, 0 }, // reserved
{ 1, 1, 0, 0 }, // apps
{ -1, 1, 1, 1 }, // req granularity
{ 3, 0, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// No preemption happens
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
} |
String getRMWebAddress() throws IOException {
Configuration conf = getConfig();
String scheme = "http://";
String path = "/app/v1/services/version";
String rmAddress = conf
.get("yarn.resourcemanager.webapp.address");
if (YarnConfiguration.useHttps(conf)) {
scheme = "https://";
rmAddress = conf
.get("yarn.resourcemanager.webapp.https.address");
}
if (HAUtil.isHAEnabled(conf)) {
boolean useKerberos = UserGroupInformation.isSecurityEnabled();
List<String> rmServers = getRMHAWebAddresses(conf);
StringBuilder diagnosticsMsg = new StringBuilder();
for (String host : rmServers) {
try {
Client client = Client.create();
client.setFollowRedirects(false);
StringBuilder sb = new StringBuilder();
sb.append(scheme)
.append(host)
.append(path);
if (!useKerberos) {
try {
String username = UserGroupInformation.getCurrentUser()
.getShortUserName();
sb.append("?user.name=")
.append(username);
} catch (IOException e) {
LOG.debug("Fail to resolve username: {}", e);
}
}
Builder builder = client
.resource(sb.toString()).type(MediaType.APPLICATION_JSON);
if (useKerberos) {
String[] server = host.split(":");
String challenge = YarnClientUtils.generateToken(server[0]);
builder.header(HttpHeaders.AUTHORIZATION, "Negotiate " +
challenge);
LOG.debug("Authorization: Negotiate {}", challenge);
}
ClientResponse test = builder.get(ClientResponse.class);
if (test.getStatus() == 200) {
return scheme + host;
}
} catch (Exception e) {
LOG.info("Fail to connect to: " + host);
LOG.debug("Root cause: ", e);
diagnosticsMsg.append("Error connecting to " + host
+ " due to " + e.getMessage() + "\n");
}
}
throw new IOException(diagnosticsMsg.toString());
}
return scheme+rmAddress;
} | @Test
void testGetRMWebAddress() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
conf.set(YarnConfiguration.RM_HA_IDS, "rm1");
conf.set(YarnConfiguration.RM_HA_ID, "rm1");
conf.set("yarn.resourcemanager.webapp.address.rm1", "localhost:0");
ApiServiceClient asc1 = new ApiServiceClient(conf);
boolean exceptionCaught = false;
String diagnosticsMsg = null;
try {
String rmWebAddress = asc1.getRMWebAddress();
} catch (IOException e) {
exceptionCaught = true;
diagnosticsMsg = e.getMessage();
}
assertTrue(exceptionCaught, "ApiServiceClient failed to throw exception");
assertTrue(diagnosticsMsg.contains("Error connecting to localhost:0"),
"Exception Message does not match");
} |
public CeActivityDto setComponentUuid(@Nullable String s) {
validateUuid(s, "COMPONENT_UUID");
this.componentUuid = s;
return this;
} | @Test
void setComponentUuid_throws_IAE_if_value_is_41_chars() {
String str_41_chars = STR_40_CHARS + "a";
assertThatThrownBy(() -> underTest.setComponentUuid(str_41_chars))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Value is too long for column CE_ACTIVITY.COMPONENT_UUID: " + str_41_chars);
} |
@Override
public Processor<K, SubscriptionResponseWrapper<VO>, K, VR> get() {
return new ContextualProcessor<K, SubscriptionResponseWrapper<VO>, K, VR>() {
private String valueHashSerdePseudoTopic;
private Serializer<V> runtimeValueSerializer = constructionTimeValueSerializer;
private KTableValueGetter<K, V> valueGetter;
private Sensor droppedRecordsSensor;
@SuppressWarnings("unchecked")
@Override
public void init(final ProcessorContext<K, VR> context) {
super.init(context);
valueHashSerdePseudoTopic = valueHashSerdePseudoTopicSupplier.get();
valueGetter = valueGetterSupplier.get();
valueGetter.init(context);
if (runtimeValueSerializer == null) {
runtimeValueSerializer = (Serializer<V>) context.valueSerde().serializer();
}
final InternalProcessorContext<?, ?> internalProcessorContext = (InternalProcessorContext<?, ?>) context;
droppedRecordsSensor = TaskMetrics.droppedRecordsSensor(
Thread.currentThread().getName(),
internalProcessorContext.taskId().toString(),
internalProcessorContext.metrics()
);
}
@Override
public void process(final Record<K, SubscriptionResponseWrapper<VO>> record) {
if (record.value().getVersion() != SubscriptionResponseWrapper.CURRENT_VERSION) {
//Guard against modifications to SubscriptionResponseWrapper. Need to ensure that there is
//compatibility with previous versions to enable rolling upgrades. Must develop a strategy for
//upgrading from older SubscriptionWrapper versions to newer versions.
throw new UnsupportedVersionException("SubscriptionResponseWrapper is of an incompatible version.");
}
final ValueAndTimestamp<V> currentValueWithTimestamp = valueGetter.get(record.key());
final long[] currentHash = currentValueWithTimestamp == null ?
null :
Murmur3.hash128(runtimeValueSerializer.serialize(valueHashSerdePseudoTopic, currentValueWithTimestamp.value()));
final long[] messageHash = record.value().getOriginalValueHash();
//If this value doesn't match the current value from the original table, it is stale and should be discarded.
if (java.util.Arrays.equals(messageHash, currentHash)) {
final VR result;
if (record.value().getForeignValue() == null && (!leftJoin || currentValueWithTimestamp == null)) {
result = null; //Emit tombstone
} else {
result = joiner.apply(currentValueWithTimestamp == null ? null : currentValueWithTimestamp.value(), record.value().getForeignValue());
}
context().forward(record.withValue(result));
} else {
LOG.trace("Dropping FK-join response due to hash mismatch. Expected {}. Actual {}", messageHash, currentHash);
droppedRecordsSensor.record();
}
}
};
} | @Test
public void shouldNotForwardWhenHashDoesNotMatch() {
final TestKTableValueGetterSupplier<String, String> valueGetterSupplier =
new TestKTableValueGetterSupplier<>();
final boolean leftJoin = false;
final ResponseJoinProcessorSupplier<String, String, String, String> processorSupplier =
new ResponseJoinProcessorSupplier<>(
valueGetterSupplier,
STRING_SERIALIZER,
() -> "value-hash-dummy-topic",
JOINER,
leftJoin
);
final Processor<String, SubscriptionResponseWrapper<String>, String, String> processor = processorSupplier.get();
final MockInternalNewProcessorContext<String, String> context = new MockInternalNewProcessorContext<>();
processor.init(context);
context.setRecordMetadata("topic", 0, 0);
valueGetterSupplier.put("lhs1", "lhsValue");
final long[] oldHash = Murmur3.hash128(STRING_SERIALIZER.serialize("topic-join-resolver", "oldLhsValue"));
processor.process(new Record<>("lhs1", new SubscriptionResponseWrapper<>(oldHash, "rhsValue", 0), 0));
final List<MockProcessorContext.CapturedForward<? extends String, ? extends String>> forwarded = context.forwarded();
assertThat(forwarded, empty());
// test dropped-records sensors
assertEquals(1.0, getDroppedRecordsTotalMetric(context));
assertNotEquals(0.0, getDroppedRecordsRateMetric(context));
} |
public static CsvReader getReader(CsvReadConfig config) {
return new CsvReader(config);
} | @Test
public void readCsvStr1(){
CsvData data = CsvUtil.getReader().readFromStr("# 这是一行注释,读取时应忽略\n" +
"\"sss,sss\",姓名,\"性别\",关注\"对象\",年龄,\"\",\"\"\"\n");
List<CsvRow> rows = data.getRows();
final CsvRow row0 = rows.get(0);
assertEquals("sss,sss", row0.get(0));
assertEquals("姓名", row0.get(1));
assertEquals("性别", row0.get(2));
assertEquals("关注\"对象\"", row0.get(3));
assertEquals("年龄", row0.get(4));
assertEquals("", row0.get(5));
assertEquals("\"", row0.get(6));
} |
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
Parameters.initialize(config.getServletContext());
if (!Parameter.LOG.getValueAsBoolean()) {
// si log désactivé dans serveur de collecte,
// alors pas de log, comme dans webapp
Configurator.setLevel(LOGGER.getName(), Level.WARN);
}
// dans le serveur de collecte, on est sûr que log4j est disponible
LOGGER.info("initialization of the collector servlet of the monitoring");
httpAuth = new HttpAuth();
try {
collectorServer = new CollectorServer();
} catch (final IOException e) {
throw new ServletException(e.getMessage(), e);
}
} | @Test
public void testInit() throws ServletException, IOException {
replay(config);
replay(context);
collectorServlet.init(config);
verify(config);
verify(context);
setUp();
expect(context
.getInitParameter(Parameters.PARAMETER_SYSTEM_PREFIX + Parameter.LOG.getCode()))
.andReturn(TRUE).anyTimes();
expect(context.getInitParameter(
Parameters.PARAMETER_SYSTEM_PREFIX + Parameter.ALLOWED_ADDR_PATTERN.getCode()))
.andReturn("127\\.0\\.0\\.1").anyTimes();
replay(config);
replay(context);
collectorServlet.init(config);
verify(config);
verify(context);
} |
@Override
public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request)
throws RemotingCommandException {
switch (request.getCode()) {
case RequestCode.GET_CONSUMER_LIST_BY_GROUP:
return this.getConsumerListByGroup(ctx, request);
case RequestCode.UPDATE_CONSUMER_OFFSET:
return this.updateConsumerOffset(ctx, request);
case RequestCode.QUERY_CONSUMER_OFFSET:
return this.queryConsumerOffset(ctx, request);
default:
break;
}
return null;
} | @Test
public void testUpdateConsumerOffset_InvalidTopic() throws Exception {
RemotingCommand request = buildUpdateConsumerOffsetRequest(group, "InvalidTopic", 0, 0);
RemotingCommand response = consumerManageProcessor.processRequest(handlerContext, request);
assertThat(response).isNotNull();
assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST);
} |
public NetworkConfig setPort(int port) {
if (port < 0 || port > PORT_MAX) {
throw new IllegalArgumentException("Port out of range: " + port + ". Allowed range [0,65535]");
}
this.port = port;
return this;
} | @Test(expected = IllegalArgumentException.class)
public void testOverLimitPort() {
int port = 65536;
networkConfig.setPort(port);
} |
public static String insertStatementCorrection(String sql) {
Matcher matcher = INSERT_INTO_PATTERN.matcher(sql);
if (!matcher.find()) {
return sql;
}
final String target = matcher.group(0);
final String upperCase = target.toUpperCase().replace("`", "");
return sql.replaceFirst(INSERT_INTO_VALUES, upperCase).replace(";", "");
} | @Test
void testDerbySqlCorrect() {
final String testSql = "INSERT INTO `config_info` (`id`, `data_id`, `group_id`, `content`, `md5`, `gmt_create`, `gmt_modified`, `src_user`, `src_ip`, `app_name`, `tenant_id`, `c_desc`, `c_use`, `effect`, `type`, `c_schema`) VALUES (1,'boot-test','ALIBABA','dept:123123123\\ngroup:123123123','2ca50d002a7dabf81497f666a7967e15','2020-04-13 13:44:43','2020-04-30 10:45:21',NULL,'127.0.0.1','','',NULL,NULL,NULL,NULL,NULL);";
final String result = DerbyUtils.insertStatementCorrection(testSql);
final String expect = "INSERT INTO CONFIG_INFO (ID, DATA_ID, GROUP_ID, CONTENT, MD5, GMT_CREATE, GMT_MODIFIED, SRC_USER, SRC_IP, APP_NAME, TENANT_ID, C_DESC, C_USE, EFFECT, TYPE, C_SCHEMA) VALUES (1,'boot-test','ALIBABA','dept:123123123\\ngroup:123123123','2ca50d002a7dabf81497f666a7967e15','2020-04-13 13:44:43','2020-04-30 10:45:21',NULL,'127.0.0.1','','',NULL,NULL,NULL,NULL,NULL)";
assertEquals(expect, result);
} |
@Override
public PlanNode optimize(
PlanNode maxSubplan,
ConnectorSession session,
VariableAllocator variableAllocator,
PlanNodeIdAllocator idAllocator)
{
return rewriteWith(new Rewriter(session, idAllocator), maxSubplan);
} | @Test
public void testJdbcComputePushdownUnsupported()
{
String table = "test_table";
String schema = "test_schema";
String expression = "(c1 + c2) > c2";
TypeProvider typeProvider = TypeProvider.copyOf(ImmutableMap.of("c1", BIGINT, "c2", BIGINT));
RowExpression rowExpression = sqlToRowExpressionTranslator.translateAndOptimize(expression(expression), typeProvider);
Set<ColumnHandle> columns = Stream.of("c1", "c2").map(TestJdbcComputePushdown::integerJdbcColumnHandle).collect(Collectors.toSet());
PlanNode original = filter(jdbcTableScan(schema, table, BIGINT, "c1", "c2"), rowExpression);
JdbcTableHandle jdbcTableHandle = new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName(schema, table), CATALOG_NAME, schema, table);
ConnectorSession session = new TestingConnectorSession(ImmutableList.of());
// Test should expect an empty entry for translatedSql since > is an unsupported function currently in the optimizer
JdbcTableLayoutHandle jdbcTableLayoutHandle = new JdbcTableLayoutHandle(session.getSqlFunctionProperties(), jdbcTableHandle, TupleDomain.none(), Optional.empty());
PlanNode actual = this.jdbcComputePushdown.optimize(original, session, null, ID_ALLOCATOR);
assertPlanMatch(actual, PlanMatchPattern.filter(
expression,
JdbcTableScanMatcher.jdbcTableScanPattern(jdbcTableLayoutHandle, columns)));
} |
@PUT
@Path("{id}")
@Timed
@ApiOperation(value = "Update index set")
@AuditEvent(type = AuditEventTypes.INDEX_SET_UPDATE)
@ApiResponses(value = {
@ApiResponse(code = 403, message = "Unauthorized"),
@ApiResponse(code = 409, message = "Mismatch of IDs in URI path and payload"),
})
public IndexSetSummary update(@ApiParam(name = "id", required = true)
@PathParam("id") String id,
@ApiParam(name = "Index set configuration", required = true)
@Valid @NotNull IndexSetUpdateRequest updateRequest) {
checkPermission(RestPermissions.INDEXSETS_EDIT, id);
final IndexSetConfig oldConfig = indexSetService.get(id)
.orElseThrow(() -> new NotFoundException("Index set <" + id + "> not found"));
final IndexSetConfig defaultIndexSet = indexSetService.getDefault();
final boolean isDefaultSet = oldConfig.equals(defaultIndexSet);
if (isDefaultSet && !updateRequest.isWritable()) {
throw new ClientErrorException("Default index set must be writable.", Response.Status.CONFLICT);
}
checkDataTieringNotNull(updateRequest.useLegacyRotation(), updateRequest.dataTieringConfig());
final IndexSetConfig indexSetConfig = updateRequest.toIndexSetConfig(id, oldConfig);
final Optional<Violation> violation = indexSetValidator.validate(indexSetConfig);
if (violation.isPresent()) {
throw new BadRequestException(violation.get().message());
}
final IndexSetConfig savedObject = indexSetService.save(indexSetConfig);
return IndexSetSummary.fromIndexSetConfig(savedObject, isDefaultSet);
} | @Test
public void updateDenied() {
notPermitted();
final IndexSetConfig indexSetConfig = createTestConfig("id", "title");
expectedException.expect(ForbiddenException.class);
expectedException.expectMessage("Not authorized to access resource id <wrong-id>");
try {
indexSetsResource.update("wrong-id", IndexSetUpdateRequest.fromIndexSetConfig(indexSetConfig));
} finally {
verifyNoMoreInteractions(indexSetService);
}
} |
@Override
public void print(Iterator<RowData> it, PrintWriter printWriter) {
if (!it.hasNext()) {
printEmptyResult(it, printWriter);
return;
}
long numRows = printTable(it, printWriter);
printFooter(printWriter, numRows);
} | @Test
void testPrintWithEmptyResult() {
PrintStyle.tableauWithDataInferredColumnWidths(getSchema(), getConverter())
.print(Collections.emptyIterator(), new PrintWriter(outContent));
assertThat(outContent.toString()).isEqualTo("Empty set" + System.lineSeparator());
} |
@Override
public Num calculate(BarSeries series, Position position) {
return criterion.calculate(series, position).dividedBy(enterAndHoldCriterion.calculate(series, position));
} | @Test
public void calculateWithAverageProfit() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 130);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, NaN, NaN), Trade.sellAt(1, NaN, NaN),
Trade.buyAt(2, NaN, NaN), Trade.sellAt(5, NaN, NaN));
AnalysisCriterion buyAndHold = getCriterion(new AverageReturnPerBarCriterion());
assertNumEquals(Math.pow(95d / 100 * 130d / 100, 1d / 6) / Math.pow(130d / 100, 1d / 6),
buyAndHold.calculate(series, tradingRecord));
} |
public static int getAgeByIdCard(String idcard) {
return getAgeByIdCard(idcard, DateUtil.date());
} | @Test
public void issue3651Test() {
DateTime date = DateUtil.parse("2014-07-11");
int age = IdcardUtil.getAgeByIdCard("321083200807112111", date);
assertEquals(5, age);
date = DateUtil.parse("2014-07-31");
age = IdcardUtil.getAgeByIdCard("321083200807312113", date);
assertEquals(5, age);
} |
static Object[] addCtxParamIfRequired(EvaluationContext ctx, Object[] params, boolean isNamedParams, Method m) {
logger.trace("addCtxParamIfRequired {} {} {} {}", ctx, params, isNamedParams, m);
Object[] actualParams;
// Here, we check if any of the parameters is an EvaluationContext
boolean injectCtx = Arrays.stream(m.getParameterTypes()).anyMatch(EvaluationContext.class::isAssignableFrom);
if (injectCtx) {
actualParams = new Object[params.length + 1];
int j = 0;
for (int i = 0; i < m.getParameterCount(); i++) {
if (EvaluationContext.class.isAssignableFrom(m.getParameterTypes()[i])) {
if (isNamedParams) {
actualParams[i] = new NamedParameter("ctx", ctx);
} else {
actualParams[i] = ctx;
}
} else if (j < params.length) {
actualParams[i] = params[j];
j++;
}
}
} else {
actualParams = params;
}
return actualParams;
} | @Test
void addCtxParamIfRequired() throws NoSuchMethodException {
// AllFunction.invoke(@ParameterName( "list" ) List list)
Method method = AllFunction.class.getMethod("invoke", List.class);
assertNotNull(method);
Object[] parameters = {List.of(true, false)};
Object[] retrieved = BaseFEELFunctionHelper.addCtxParamIfRequired(ctx, parameters, true, method);
assertNotNull(retrieved);
assertEquals(parameters.length, retrieved.length);
for (int i = 0; i < parameters.length; i++) {
assertEquals(parameters[i], retrieved[i]);
}
// SortFunction.invoke(@ParameterName( "ctx" ) EvaluationContext ctx,
// @ParameterName("list") List list,
// @ParameterName("precedes") FEELFunction function)
method = SortFunction.class.getMethod("invoke", EvaluationContext.class, List.class, FEELFunction.class);
assertNotNull(method);
parameters = new Object[]{List.of(1, 2), AllFunction.INSTANCE};
// direct reference to ctx
retrieved = BaseFEELFunctionHelper.addCtxParamIfRequired(ctx, parameters, false, method);
assertNotNull(retrieved);
assertEquals(parameters.length + 1, retrieved.length);
assertEquals(ctx, retrieved[0]);
for (int i = 0; i < parameters.length; i++) {
assertEquals(parameters[i], retrieved[i + 1]);
}
// NamedParameter reference to ctx
retrieved = BaseFEELFunctionHelper.addCtxParamIfRequired(ctx, parameters, true, method);
assertNotNull(retrieved);
assertEquals(parameters.length + 1, retrieved.length);
assertEquals(NamedParameter.class, retrieved[0].getClass());
NamedParameter retrievedNamedParameter = (NamedParameter) retrieved[0];
assertEquals("ctx", retrievedNamedParameter.getName());
assertEquals(ctx, retrievedNamedParameter.getValue());
for (int i = 0; i < parameters.length; i++) {
assertEquals(parameters[i], retrieved[i + 1]);
}
} |
@Override
public ListTransactionsRequest.Builder buildBatchedRequest(
int brokerId,
Set<AllBrokersStrategy.BrokerKey> keys
) {
ListTransactionsRequestData request = new ListTransactionsRequestData();
request.setProducerIdFilters(new ArrayList<>(options.filteredProducerIds()));
request.setStateFilters(options.filteredStates().stream()
.map(TransactionState::toString)
.collect(Collectors.toList()));
request.setDurationFilter(options.filteredDuration());
return new ListTransactionsRequest.Builder(request);
} | @Test
public void testBuildRequestWithFilteredState() {
int brokerId = 1;
BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId));
TransactionState filteredState = TransactionState.ONGOING;
ListTransactionsOptions options = new ListTransactionsOptions()
.filterStates(singleton(filteredState));
ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext);
ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, singleton(brokerKey)).build();
assertEquals(Collections.singletonList(filteredState.toString()), request.data().stateFilters());
assertEquals(Collections.emptyList(), request.data().producerIdFilters());
} |
public Set<String> getFieldNames() {
return Collections.unmodifiableSet(fields.keySet());
} | @Test(expected = UnsupportedOperationException.class)
public void testGetFieldNamesReturnsUnmodifiableSet() throws Exception {
final Set<String> fieldNames = message.getFieldNames();
fieldNames.remove("_id");
} |
public static <K, V> Read<K, V> read() {
return new AutoValue_KafkaIO_Read.Builder<K, V>()
.setTopics(new ArrayList<>())
.setTopicPartitions(new ArrayList<>())
.setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN)
.setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES)
.setMaxNumRecords(Long.MAX_VALUE)
.setCommitOffsetsInFinalizeEnabled(false)
.setDynamicRead(false)
.setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime())
.setConsumerPollingTimeout(2L)
.setRedistributed(false)
.setAllowDuplicates(false)
.setRedistributeNumKeys(0)
.build();
} | @Test
public void testReadAvroGenericRecordsWithConfluentSchemaRegistry() {
int numElements = 100;
String topic = "my_topic";
String schemaRegistryUrl = "mock://my-scope-name";
String keySchemaSubject = topic + "-key";
String valueSchemaSubject = topic + "-value";
List<KV<GenericRecord, GenericRecord>> inputs = new ArrayList<>();
for (int i = 0; i < numElements; i++) {
inputs.add(
KV.of(
new AvroGeneratedUser("KeyName" + i, i, "color" + i),
new AvroGeneratedUser("ValueName" + i, i, "color" + i)));
}
KafkaIO.Read<GenericRecord, GenericRecord> reader =
KafkaIO.<GenericRecord, GenericRecord>read()
.withBootstrapServers("localhost:9092")
.withTopic(topic)
.withKeyDeserializer(
mockDeserializerProvider(schemaRegistryUrl, keySchemaSubject, null))
.withValueDeserializer(
mockDeserializerProvider(schemaRegistryUrl, valueSchemaSubject, null))
.withConsumerFactoryFn(
new ConsumerFactoryFn(
ImmutableList.of(topic),
1,
numElements,
OffsetResetStrategy.EARLIEST,
new KeyAvroSerializableFunction(topic, schemaRegistryUrl),
new ValueAvroSerializableFunction(topic, schemaRegistryUrl)))
.withMaxNumRecords(numElements);
PCollection<KV<GenericRecord, GenericRecord>> input = p.apply(reader.withoutMetadata());
PAssert.that(input).containsInAnyOrder(inputs);
p.run();
} |
public Type getType() {
return token.getType();
} | @Test
public void testEnclosing() throws Exception {
TypeRememberer<List<String>> rememberer = new GenericMaker<String>() {}.getRememberer();
assertEquals(
new TypeToken<List<String>>() {}.getType(), rememberer.descriptorByInstance.getType());
// descriptorByClass *is not* able to find the type of T because it comes from the enclosing
// instance of GenericMaker.
// assertEquals(new TypeToken<List<T>>() {}.getType(), rememberer.descriptorByClass.getType());
} |
public PaginatedList<StreamDestinationFilterRuleDTO> findPaginatedForStreamAndTarget(
String streamId,
String targetId,
String queryString,
Bson sort,
int perPage,
int page,
Predicate<String> permissionSelector
) {
final var query = parseQuery(queryString);
return paginationHelper.filter(and(eq(FIELD_STREAM_ID, streamId), eq(FIELD_DESTINATION_TYPE, targetId), query))
.sort(sort)
.perPage(perPage)
.page(page, dto -> permissionSelector.test(dto.id()));
} | @Test
@MongoDBFixtures("StreamDestinationFilterServiceTest-2024-07-01-1.json")
void findPaginatedForStreamAndTarget() {
final var result = service.findPaginatedForStreamAndTarget("54e3deadbeefdeadbeef1000", "indexer", "", Sorts.ascending("title"), 10, 1, id -> true);
assertThat(result.delegate()).hasSize(2);
} |
@NotNull
@Override
public Iterator<String> iterator() {
return versionParts.iterator();
} | @Test
public void testIterator() {
DependencyVersion instance = new DependencyVersion("1.2.3");
Iterator<String> result = instance.iterator();
assertTrue(result.hasNext());
int count = 1;
while (result.hasNext()) {
String v = result.next();
assertTrue(String.valueOf(count++).equals(v));
}
} |
public void write(CruiseConfig configForEdit, OutputStream output, boolean skipPreprocessingAndValidation) throws Exception {
LOGGER.debug("[Serializing Config] Starting to write. Validation skipped? {}", skipPreprocessingAndValidation);
MagicalGoConfigXmlLoader loader = new MagicalGoConfigXmlLoader(configCache, registry);
if (!configForEdit.getOrigin().isLocal()) {
throw new GoConfigInvalidException(configForEdit, "Attempted to save merged configuration with partials");
}
if (!skipPreprocessingAndValidation) {
loader.preprocessAndValidate(configForEdit);
LOGGER.debug("[Serializing Config] Done with cruise config validators.");
}
Document document = createEmptyCruiseConfigDocument();
write(configForEdit, document.getRootElement(), configCache, registry);
LOGGER.debug("[Serializing Config] XSD and DOM validation.");
verifyXsdValid(document);
MagicalGoConfigXmlLoader.validateDom(document.getRootElement(), registry);
LOGGER.info("[Serializing Config] Generating config partial.");
XmlUtils.writeXml(document, output);
LOGGER.debug("[Serializing Config] Finished writing config partial.");
} | @Test
public void shouldThrowExceptionWhenPersisInvalidDom() throws Exception {
//simulate the xml partial saving logic
CruiseConfig cruiseConfig = ConfigMigrator.load(ConfigFileFixture.CONTAINS_MULTI_DIFFERENT_STATUS_RUN_IF);
StageConfig stage = xmlLoader.fromXmlPartial(ConfigFileFixture.SAME_STATUS_RUN_IF_PARTIAL, StageConfig.class);
PipelineConfig pipelineConfig = cruiseConfig.pipelineConfigByName(new CaseInsensitiveString("test"));
pipelineConfig.set(0, stage);
try {
xmlWriter.write(cruiseConfig, output, false);
fail();
} catch (Exception e) {
assertThat(e.getMessage(), anyOf(
is("Duplicate unique value [passed] declared for identity constraint of element \"exec\"."),
is("Duplicate unique value [passed] declared for identity constraint \"uniqueRunIfTypeForExec\" of element \"exec\".")
));
}
} |
public int wrapAdjustment()
{
return 0;
} | @Test
void wrapAdjustmentIsAlwaysZero()
{
final MutableDirectBuffer buffer = newBuffer(5);
assertEquals(0, buffer.wrapAdjustment());
} |
@Override
public void startTrackingPartition(
ResourceID producingTaskExecutorId,
ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor) {
Preconditions.checkNotNull(producingTaskExecutorId);
Preconditions.checkNotNull(resultPartitionDeploymentDescriptor);
// non-releaseByScheduler partitions don't require explicit partition release calls.
if (!resultPartitionDeploymentDescriptor.getPartitionType().isReleaseByScheduler()) {
return;
}
final ResultPartitionID resultPartitionId =
resultPartitionDeploymentDescriptor.getShuffleDescriptor().getResultPartitionID();
startTrackingPartition(
producingTaskExecutorId, resultPartitionId, resultPartitionDeploymentDescriptor);
} | @Test
void testStopTrackingIssuesNoReleaseCalls() {
final TestingShuffleMaster shuffleMaster = new TestingShuffleMaster();
final Queue<ReleaseCall> releaseCalls = new ArrayBlockingQueue<>(4);
final Queue<PromoteCall> promoteCalls = new ArrayBlockingQueue<>(4);
final JobMasterPartitionTrackerImpl partitionTracker =
new JobMasterPartitionTrackerImpl(
new JobID(),
shuffleMaster,
resourceId ->
Optional.of(
createTaskExecutorGateway(
resourceId, releaseCalls, promoteCalls)));
final ResourceID taskExecutorId1 = ResourceID.generate();
final ResultPartitionID resultPartitionId1 = new ResultPartitionID();
partitionTracker.startTrackingPartition(
taskExecutorId1,
AbstractPartitionTrackerTest.createResultPartitionDeploymentDescriptor(
resultPartitionId1, true));
partitionTracker.stopTrackingPartitionsFor(taskExecutorId1);
assertThat(releaseCalls).isEmpty();
assertThat(promoteCalls).isEmpty();
assertThat(shuffleMaster.externallyReleasedPartitions).isEmpty();
} |
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) {
IdentityProvider provider = resolveProviderOrHandleResponse(request, response, INIT_CONTEXT);
if (provider != null) {
handleProvider(request, response, provider);
}
} | @Test
public void redirect_with_context_path_when_failing_because_of_UnauthorizedException() throws Exception {
when(request.getContextPath()).thenReturn("/sonarqube");
IdentityProvider identityProvider = new FailWithUnauthorizedExceptionIdProvider("failing");
when(request.getRequestURI()).thenReturn("/sonarqube/sessions/init/" + identityProvider.getKey());
identityProviderRepository.addIdentityProvider(identityProvider);
underTest.doFilter(request, response, chain);
verify(response).sendRedirect("/sonarqube/sessions/unauthorized");
} |
boolean isSynced() throws Exception {
EthSyncing ethSyncing = web3j.ethSyncing().send();
if (ethSyncing.isSyncing()) {
return false;
} else {
EthBlock ethBlock =
web3j.ethGetBlockByNumber(DefaultBlockParameterName.LATEST, false).send();
long timestamp = ethBlock.getBlock().getTimestamp().longValue() * 1000;
return System.currentTimeMillis() - syncThreshold < timestamp;
}
} | @Test
public void testIsSyncedBelowThreshold() throws Exception {
configureSyncing(false);
configureLatestBlock((System.currentTimeMillis() / 1000) - DEFAULT_SYNC_THRESHOLD);
assertFalse(ensResolver.isSynced());
} |
@Override
public T remove() {
T item = poll();
if (item == null) {
throw new NoSuchElementException();
}
return item;
} | @Test
public void removeTest() {
BlockingQueue<Integer> queue = new GrowableArrayBlockingQueue<>(4);
assertNull(queue.poll());
assertTrue(queue.offer(1));
assertTrue(queue.offer(2));
assertTrue(queue.offer(3));
assertEquals(queue.size(), 3);
assertFalse(queue.remove(4));
assertEquals(queue.size(), 3);
assertEquals(queue.toString(), "[1, 2, 3]");
assertTrue(queue.remove(2));
assertEquals(queue.size(), 2);
assertEquals(queue.toString(), "[1, 3]");
assertTrue(queue.remove(3));
assertEquals(queue.size(), 1);
assertEquals(queue.toString(), "[1]");
assertTrue(queue.remove(1));
assertEquals(queue.size(), 0);
assertEquals(queue.toString(), "[]");
// Test queue rollover
queue.offer(1);
queue.offer(2);
queue.offer(3);
assertEquals(queue.poll().intValue(), 1);
assertEquals(queue.poll().intValue(), 2);
assertTrue(queue.offer(4));
assertTrue(queue.offer(5));
assertTrue(queue.remove(5));
assertEquals(queue.size(), 2);
assertEquals(queue.toString(), "[3, 4]");
queue.offer(6);
queue.offer(7);
assertTrue(queue.remove(6));
assertEquals(queue.size(), 3);
assertEquals(queue.toString(), "[3, 4, 7]");
queue.offer(8);
assertTrue(queue.remove(8));
assertEquals(queue.size(), 3);
assertEquals(queue.toString(), "[3, 4, 7]");
queue.offer(8);
assertEquals(queue.toString(), "[3, 4, 7, 8]");
assertTrue(queue.remove(4));
assertEquals(queue.size(), 3);
assertEquals(queue.toString(), "[3, 7, 8]");
assertTrue(queue.remove(8));
assertEquals(queue.size(), 2);
assertEquals(queue.toString(), "[3, 7]");
assertTrue(queue.remove(7));
assertEquals(queue.size(), 1);
assertEquals(queue.toString(), "[3]");
} |
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} | @Test
public void shouldHandleUdfs() {
// Given:
givenSourceStreamWithSchema(SINGLE_VALUE_COLUMN_SCHEMA, SerdeFeatures.of(), SerdeFeatures.of());
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(COL0),
ImmutableList.of(
new FunctionCall(
FunctionName.of("SUBSTRING"),
ImmutableList.of(new StringLiteral("foo"), new IntegerLiteral(2))))
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(valueSerializer).serialize(TOPIC_NAME, genericRow("oo"));
verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE));
} |
public static int getInt(String envName, int defaultValue) {
int value = defaultValue;
String envValue = System.getenv(envName);
if (envValue != null) {
try {
value = Integer.parseInt(envValue);
} catch (NumberFormatException e) {
}
}
return value;
} | @Test
public void getInt() {
assertEquals(123, EnvUtil.getInt("myInt", 234));
assertEquals(234, EnvUtil.getLong("wrongInt", 234));
} |
public static boolean isRaw(int index, List<Pair<Integer>> pairs) {
if (pairs == null || pairs.isEmpty()) {
return false;
}
for (Pair<Integer> pair : pairs) {
if (index < pair.getLeft()) {
return false;
}
if (index <= pair.getRight()) {
return true;
}
}
return false;
} | @Test
public void testIsRaw() {
List<Pair<Integer>> pairs = Arrays.asList(
new Pair(3, 5),
new Pair(8, 10));
for (int i = 0; i < 3; i++) {
assertFalse(URISupport.isRaw(i, pairs));
}
for (int i = 3; i < 6; i++) {
assertTrue(URISupport.isRaw(i, pairs));
}
for (int i = 6; i < 8; i++) {
assertFalse(URISupport.isRaw(i, pairs));
}
for (int i = 8; i < 11; i++) {
assertTrue(URISupport.isRaw(i, pairs));
}
for (int i = 11; i < 15; i++) {
assertFalse(URISupport.isRaw(i, pairs));
}
} |
@VisibleForTesting
static Map<String, Set<Pair<TableRebalanceContext, Long>>> getCandidateJobs(String tableNameWithType,
Map<String, Map<String, String>> allJobMetadata)
throws Exception {
long nowMs = System.currentTimeMillis();
Map<String, Set<Pair<TableRebalanceContext, Long>>> candidates = new HashMap<>();
// If the job started most recently has already completed, then skip retry for the table.
Pair<String, Long> latestStartedJob = null;
Pair<String, Long> latestCompletedJob = null;
// The processing order of job metadata from the given Map is not deterministic. Track the completed original
// jobs so that we can simply skip the retry jobs belonging to the completed original jobs.
Map<String, String> completedOriginalJobs = new HashMap<>();
Set<String> cancelledOriginalJobs = new HashSet<>();
for (Map.Entry<String, Map<String, String>> entry : allJobMetadata.entrySet()) {
String jobId = entry.getKey();
Map<String, String> jobMetadata = entry.getValue();
long statsUpdatedAt = Long.parseLong(jobMetadata.get(CommonConstants.ControllerJob.SUBMISSION_TIME_MS));
String jobStatsInStr = jobMetadata.get(RebalanceJobConstants.JOB_METADATA_KEY_REBALANCE_PROGRESS_STATS);
if (StringUtils.isEmpty(jobStatsInStr)) {
LOGGER.info("Skip rebalance job: {} as it has no job progress stats", jobId);
continue;
}
String jobCtxInStr = jobMetadata.get(RebalanceJobConstants.JOB_METADATA_KEY_REBALANCE_CONTEXT);
if (StringUtils.isEmpty(jobCtxInStr)) {
LOGGER.info("Skip rebalance job: {} as it has no job context", jobId);
continue;
}
TableRebalanceProgressStats jobStats = JsonUtils.stringToObject(jobStatsInStr, TableRebalanceProgressStats.class);
TableRebalanceContext jobCtx = JsonUtils.stringToObject(jobCtxInStr, TableRebalanceContext.class);
long jobStartTimeMs = jobStats.getStartTimeMs();
if (latestStartedJob == null || latestStartedJob.getRight() < jobStartTimeMs) {
latestStartedJob = Pair.of(jobId, jobStartTimeMs);
}
String originalJobId = jobCtx.getOriginalJobId();
RebalanceResult.Status jobStatus = jobStats.getStatus();
if (jobStatus == RebalanceResult.Status.DONE || jobStatus == RebalanceResult.Status.NO_OP) {
LOGGER.info("Skip rebalance job: {} as it has completed with status: {}", jobId, jobStatus);
completedOriginalJobs.put(originalJobId, jobId);
if (latestCompletedJob == null || latestCompletedJob.getRight() < jobStartTimeMs) {
latestCompletedJob = Pair.of(jobId, jobStartTimeMs);
}
continue;
}
if (jobStatus == RebalanceResult.Status.FAILED || jobStatus == RebalanceResult.Status.ABORTED) {
LOGGER.info("Found rebalance job: {} for original job: {} has been stopped with status: {}", jobId,
originalJobId, jobStatus);
candidates.computeIfAbsent(originalJobId, (k) -> new HashSet<>()).add(Pair.of(jobCtx, jobStartTimeMs));
continue;
}
if (jobStatus == RebalanceResult.Status.CANCELLED) {
LOGGER.info("Found cancelled rebalance job: {} for original job: {}", jobId, originalJobId);
cancelledOriginalJobs.add(originalJobId);
continue;
}
// Check if an IN_PROGRESS job is still actively running.
long heartbeatTimeoutMs = jobCtx.getConfig().getHeartbeatTimeoutInMs();
if (nowMs - statsUpdatedAt < heartbeatTimeoutMs) {
LOGGER.info("Rebalance job: {} is actively running with status updated at: {} within timeout: {}. Skip "
+ "retry for table: {}", jobId, statsUpdatedAt, heartbeatTimeoutMs, tableNameWithType);
return Collections.emptyMap();
}
// The job is considered failed, but it's possible it is still running, then we might end up with more than one
// rebalance jobs running in parallel for a table. The rebalance algorithm is idempotent, so this should be fine
// for the correctness.
LOGGER.info("Found stuck rebalance job: {} for original job: {}", jobId, originalJobId);
candidates.computeIfAbsent(originalJobId, (k) -> new HashSet<>()).add(Pair.of(jobCtx, jobStartTimeMs));
}
if (latestCompletedJob != null && latestCompletedJob.getLeft().equals(latestStartedJob.getLeft())) {
LOGGER.info("Rebalance job: {} started most recently has already done. Skip retry for table: {}",
latestCompletedJob.getLeft(), tableNameWithType);
return Collections.emptyMap();
}
for (String jobId : cancelledOriginalJobs) {
LOGGER.info("Skip original job: {} as it's cancelled", jobId);
candidates.remove(jobId);
}
for (Map.Entry<String, String> entry : completedOriginalJobs.entrySet()) {
LOGGER.info("Skip original job: {} as it's completed by attempt: {}", entry.getKey(), entry.getValue());
candidates.remove(entry.getKey());
}
return candidates;
} | @Test
public void testGetCandidateJobs()
throws Exception {
String tableName = "table01";
Map<String, Map<String, String>> allJobMetadata = new HashMap<>();
// Original job run as job1, and all its retry jobs failed too.
RebalanceConfig jobCfg = new RebalanceConfig();
jobCfg.setMaxAttempts(4);
TableRebalanceProgressStats stats = new TableRebalanceProgressStats();
stats.setStatus(RebalanceResult.Status.FAILED);
stats.setStartTimeMs(1000);
TableRebalanceContext jobCtx = TableRebalanceContext.forInitialAttempt("job1", jobCfg);
Map<String, String> jobMetadata = ZkBasedTableRebalanceObserver.createJobMetadata(tableName, "job1", stats, jobCtx);
allJobMetadata.put("job1", jobMetadata);
// 3 failed retry runs for job1
jobMetadata = createDummyJobMetadata(tableName, "job1", 2, 1100, RebalanceResult.Status.FAILED);
allJobMetadata.put("job1_2", jobMetadata);
jobMetadata = createDummyJobMetadata(tableName, "job1", 3, 1200, RebalanceResult.Status.ABORTED);
allJobMetadata.put("job1_3", jobMetadata);
jobMetadata = createDummyJobMetadata(tableName, "job1", 4, 1300, RebalanceResult.Status.FAILED);
allJobMetadata.put("job1_4", jobMetadata);
// Original job run as job2, and its retry job job2_1 completed.
jobCfg = new RebalanceConfig();
jobCfg.setMaxAttempts(4);
stats = new TableRebalanceProgressStats();
stats.setStatus(RebalanceResult.Status.FAILED);
stats.setStartTimeMs(2000);
jobCtx = TableRebalanceContext.forInitialAttempt("job2", jobCfg);
jobMetadata = ZkBasedTableRebalanceObserver.createJobMetadata(tableName, "job2", stats, jobCtx);
allJobMetadata.put("job2", jobMetadata);
jobMetadata = createDummyJobMetadata(tableName, "job2", 2, 2100, RebalanceResult.Status.DONE);
allJobMetadata.put("job2_2", jobMetadata);
// Original job run as job3, and failed to send out heartbeat in time.
jobCfg = new RebalanceConfig();
jobCfg.setMaxAttempts(4);
stats = new TableRebalanceProgressStats();
stats.setStatus(RebalanceResult.Status.IN_PROGRESS);
stats.setStartTimeMs(3000);
jobCtx = TableRebalanceContext.forInitialAttempt("job3", jobCfg);
jobMetadata = ZkBasedTableRebalanceObserver.createJobMetadata(tableName, "job3", stats, jobCtx);
jobMetadata.put(CommonConstants.ControllerJob.SUBMISSION_TIME_MS, "3000");
allJobMetadata.put("job3", jobMetadata);
// Original job run as job4, which didn't have retryJobCfg as from old version of the code.
stats = new TableRebalanceProgressStats();
stats.setStatus(RebalanceResult.Status.FAILED);
stats.setStartTimeMs(4000);
jobMetadata = ZkBasedTableRebalanceObserver.createJobMetadata(tableName, "job4", stats, null);
jobMetadata.remove(RebalanceJobConstants.JOB_METADATA_KEY_REBALANCE_CONTEXT);
allJobMetadata.put("job4", jobMetadata);
// Only need to retry job1 and job3, as job2 is completed and job4 is from old version of code.
Map<String, Set<Pair<TableRebalanceContext, Long>>> jobs =
RebalanceChecker.getCandidateJobs(tableName, allJobMetadata);
assertEquals(jobs.size(), 2);
assertTrue(jobs.containsKey("job1"));
assertTrue(jobs.containsKey("job3"));
assertEquals(jobs.get("job1").size(), 4); // four runs including job1,job1_1,job1_2,job1_3
assertEquals(jobs.get("job3").size(), 1); // just a single run job3
// Abort job1 and cancel its retries, then only job3 is retry candidate.
jobMetadata = allJobMetadata.get("job1_4");
cancelRebalanceJob(jobMetadata);
jobs = RebalanceChecker.getCandidateJobs(tableName, allJobMetadata);
assertEquals(jobs.size(), 1);
assertTrue(jobs.containsKey("job3"));
assertEquals(jobs.get("job3").size(), 1); // just a single run job3
// Add latest job5 that's already done, thus no need to retry for table.
jobCfg = new RebalanceConfig();
jobCfg.setMaxAttempts(4);
stats = new TableRebalanceProgressStats();
stats.setStatus(RebalanceResult.Status.DONE);
stats.setStartTimeMs(5000);
jobCtx = TableRebalanceContext.forInitialAttempt("job5", jobCfg);
jobMetadata = ZkBasedTableRebalanceObserver.createJobMetadata(tableName, "job5", stats, jobCtx);
allJobMetadata.put("job5", jobMetadata);
jobs = RebalanceChecker.getCandidateJobs(tableName, allJobMetadata);
assertEquals(jobs.size(), 0);
} |
@VisibleForTesting
double getTotalNicLimitWithConfiguration(List<String> nics) {
// Use the override value as configured. Return the total max speed across all available NICs, converted
// from Gbps into Kbps
return overrideBrokerNicSpeedGbps.map(BitRateUnit.Gigabit::toKilobit)
.map(speed -> speed * nics.size())
.orElseGet(() -> getTotalNicLimit(nics, BitRateUnit.Kilobit));
} | @Test
public void checkOverrideBrokerNicSpeedGbps() {
@Cleanup("shutdown")
ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
LinuxBrokerHostUsageImpl linuxBrokerHostUsage =
new LinuxBrokerHostUsageImpl(1, Optional.of(3.0), executorService);
List<String> nics = new ArrayList<>();
nics.add("1");
nics.add("2");
nics.add("3");
double totalLimit = linuxBrokerHostUsage.getTotalNicLimitWithConfiguration(nics);
Assert.assertEquals(totalLimit, 3.0 * 1000 * 1000 * 3);
} |
@Override
public synchronized boolean isAggregate(final FunctionName functionName) {
return udafs.containsKey(functionName.text().toUpperCase());
} | @Test
public void shouldKnowIfFunctionIsAggregate() {
loadAllUserFunctions(functionRegistry);
assertFalse(functionRegistry.isAggregate(FunctionName.of("lcase")));
assertTrue(functionRegistry.isAggregate(FunctionName.of("topk")));
} |
@Override
public V takeLast() throws InterruptedException {
return commandExecutor.getInterrupted(takeLastAsync());
} | @Test
public void testTakeLast() throws InterruptedException {
RBlockingDeque<Integer> deque = redisson.getBlockingDeque("queue:take");
deque.offerFirst(1);
deque.offerFirst(2);
deque.offerLast(3);
deque.offerLast(4);
assertThat(deque.takeLast()).isEqualTo(4);
assertThat(deque.takeLast()).isEqualTo(3);
assertThat(deque.takeLast()).isEqualTo(1);
assertThat(deque.takeLast()).isEqualTo(2);
assertThat(deque.size()).isZero();
} |
@Override
public boolean compareAndSet(double expect, double update) {
return get(compareAndSetAsync(expect, update));
} | @Test
public void testCompareAndSet() {
RAtomicDouble al = redisson.getAtomicDouble("test");
assertThat(al.compareAndSet(-1, 2.5)).isFalse();
assertThat(al.get()).isZero();
assertThat(al.compareAndSet(0, 2.5)).isTrue();
assertThat(al.get()).isEqualTo(2.5);
} |
@Override
public Object getInitialAggregatedValue(Object rawValue) {
CpcUnion cpcUnion = new CpcUnion(_lgK);
if (rawValue instanceof byte[]) { // Serialized Sketch
byte[] bytes = (byte[]) rawValue;
cpcUnion.update(deserializeAggregatedValue(bytes));
} else if (rawValue instanceof byte[][]) { // Multiple Serialized Sketches
byte[][] serializedSketches = (byte[][]) rawValue;
for (byte[] bytes : serializedSketches) {
cpcUnion.update(deserializeAggregatedValue(bytes));
}
} else {
CpcSketch pristineSketch = empty();
addObjectToSketch(rawValue, pristineSketch);
cpcUnion.update(pristineSketch);
}
return cpcUnion;
} | @Test
public void initialShouldCreateSingleItemSketch() {
DistinctCountCPCSketchValueAggregator agg = new DistinctCountCPCSketchValueAggregator(Collections.emptyList());
assertEquals(toSketch(agg.getInitialAggregatedValue("hello world")).getEstimate(), 1.0);
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
} | @Test
void invokeNull() {
FunctionTestUtil.assertResultError(ceilingFunction.invoke(null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(ceilingFunction.invoke((BigDecimal) null, null),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(ceilingFunction.invoke(BigDecimal.ONE, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(ceilingFunction.invoke(null, BigDecimal.ONE), InvalidParametersEvent.class);
} |
public static Db use() {
return use(DSFactory.get());
} | @Test
public void countTest2() throws SQLException {
final long count = Db.use().count("select * from user order by name DESC");
assertEquals(4, count);
} |
@Override
public Set<EmailRecipient> findSubscribedEmailRecipients(String dispatcherKey, String projectKey,
SubscriberPermissionsOnProject subscriberPermissionsOnProject) {
verifyProjectKey(projectKey);
try (DbSession dbSession = dbClient.openSession(false)) {
Set<EmailSubscriberDto> emailSubscribers = dbClient.propertiesDao().findEmailSubscribersForNotification(
dbSession, dispatcherKey, EmailNotificationChannel.class.getSimpleName(), projectKey);
return keepAuthorizedEmailSubscribers(dbSession, projectKey, subscriberPermissionsOnProject, emailSubscribers);
}
} | @Test
public void findSubscribedEmailRecipients_returns_empty_if_no_email_recipients_in_project_for_dispatcher_key() {
String dispatcherKey = randomAlphabetic(12);
String globalPermission = randomAlphanumeric(4);
String projectPermission = randomAlphanumeric(5);
String projectKey = randomAlphabetic(6);
when(propertiesDao.findEmailSubscribersForNotification(dbSession, dispatcherKey, "EmailNotificationChannel", projectKey))
.thenReturn(Collections.emptySet());
Set<EmailRecipient> emailRecipients = underTest.findSubscribedEmailRecipients(dispatcherKey, projectKey,
new SubscriberPermissionsOnProject(globalPermission, projectPermission));
assertThat(emailRecipients).isEmpty();
verify(authorizationDao, times(0)).keepAuthorizedLoginsOnEntity(any(DbSession.class), anySet(), anyString(), anyString());
} |
@Override
public void requestCompleted(HttpContext context) {
httpAsyncRequestProducer.requestCompleted(context);
} | @Test
public void requestCompleted() {
final HttpAsyncRequestProducer delegate = Mockito.mock(HttpAsyncRequestProducer.class);
final HttpAsyncRequestProducerDecorator decorator = new HttpAsyncRequestProducerDecorator(
delegate, null, null);
decorator.requestCompleted(null);
Mockito.verify(delegate, Mockito.times(1)).requestCompleted(null);
} |
@Override
public Health checkNode() {
return nodeHealthChecks.stream()
.map(NodeHealthCheck::check)
.reduce(Health.GREEN, HealthReducer::merge);
} | @Test
public void checkNode_returns_YELLOW_status_if_only_GREEN_and_at_least_one_YELLOW_statuses_returned_by_NodeHealthCheck() {
List<Health.Status> statuses = new ArrayList<>();
Stream.concat(
IntStream.range(0, 1 + random.nextInt(20)).mapToObj(i -> YELLOW), // at least 1 YELLOW
IntStream.range(0, random.nextInt(20)).mapToObj(i -> GREEN)).forEach(statuses::add); // between 0 and 19 GREEN
Collections.shuffle(statuses);
HealthCheckerImpl underTest = newNodeHealthCheckerImpl(statuses.stream());
assertThat(underTest.checkNode().getStatus())
.describedAs("%s should have been computed from %s statuses", YELLOW, statuses)
.isEqualTo(YELLOW);
} |
public static <T> Collection<ServiceNamespace> getAllNamespaces(Map<?, T> containers,
Predicate<T> containerFilter,
Function<T, ObjectNamespace> toNamespace) {
if (MapUtil.isNullOrEmpty(containers)) {
return Collections.emptySet();
}
Collection<ServiceNamespace> collection = Collections.emptySet();
for (T container : containers.values()) {
if (!containerFilter.test(container)) {
continue;
}
ObjectNamespace namespace = toNamespace.apply(container);
if (collection.isEmpty()) {
collection = new HashSet<>(collection);
}
collection.add(namespace);
}
return collection;
} | @Test
public void testGetAllNamespaces_whenOneMatches() {
Collection<ServiceNamespace> namespaces =
NameSpaceUtil.getAllNamespaces(containers,
container -> container == 5,
container -> new DistributedObjectNamespace(SERVICE_NAME, Integer.toString(container)));
assertEquals(1, namespaces.size());
} |
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
} | @Test
public void sessionWindowAnonymousMaterializedCountShouldPreserveTopologyStructure() {
final StreamsBuilder builder = new StreamsBuilder();
builder.stream("input-topic")
.groupByKey()
.windowedBy(SessionWindows.with(ofMillis(1)))
.count(Materialized.<Object, Long, SessionStore<Bytes, byte[]>>with(null, Serdes.Long())
.withStoreType(Materialized.StoreType.ROCKS_DB));
final Topology topology = builder.build();
final TopologyDescription describe = topology.describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" +
" --> KSTREAM-AGGREGATE-0000000003\n" +
" Processor: KSTREAM-AGGREGATE-0000000003 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000002])\n" +
" --> none\n" +
" <-- KSTREAM-SOURCE-0000000000\n\n",
describe.toString()
);
topology.internalTopologyBuilder.setStreamsConfig(streamsConfig);
assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true));
} |
@Override
@SuppressWarnings("unchecked")
public void onApplicationEvent(@NotNull final DataChangedEvent event) {
for (DataChangedListener listener : listeners) {
if ((!(listener instanceof AbstractDataChangedListener))
&& clusterProperties.isEnabled()
&& Objects.nonNull(shenyuClusterSelectMasterService)
&& !shenyuClusterSelectMasterService.isMaster()) {
LOG.info("received DataChangedEvent, not master, pass");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("received DataChangedEvent, dispatching, event:{}", JsonUtils.toJson(event));
}
switch (event.getGroupKey()) {
case APP_AUTH:
listener.onAppAuthChanged((List<AppAuthData>) event.getSource(), event.getEventType());
break;
case PLUGIN:
listener.onPluginChanged((List<PluginData>) event.getSource(), event.getEventType());
break;
case RULE:
listener.onRuleChanged((List<RuleData>) event.getSource(), event.getEventType());
break;
case SELECTOR:
listener.onSelectorChanged((List<SelectorData>) event.getSource(), event.getEventType());
break;
case META_DATA:
listener.onMetaDataChanged((List<MetaData>) event.getSource(), event.getEventType());
break;
case PROXY_SELECTOR:
listener.onProxySelectorChanged((List<ProxySelectorData>) event.getSource(), event.getEventType());
break;
case DISCOVER_UPSTREAM:
listener.onDiscoveryUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
applicationContext.getBean(LoadServiceDocEntry.class).loadDocOnUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
break;
default:
throw new IllegalStateException("Unexpected value: " + event.getGroupKey());
}
}
} | @Test
public void onApplicationEventWithNullTest() {
when(clusterProperties.isEnabled()).thenReturn(true);
when(shenyuClusterSelectMasterService.isMaster()).thenReturn(true);
NullPointerException exception = assertThrows(NullPointerException.class, () -> {
DataChangedEvent dataChangedEvent = new DataChangedEvent(null, null, new ArrayList<>());
dataChangedEventDispatcher.onApplicationEvent(dataChangedEvent);
});
assertNotNull(exception);
} |
public static FromEndOfWindow pastEndOfWindow() {
return new FromEndOfWindow();
} | @Test
public void testEarlyAndLateFiringsToString() {
TriggerStateMachine trigger =
AfterWatermarkStateMachine.pastEndOfWindow()
.withEarlyFirings(StubTriggerStateMachine.named("t1"))
.withLateFirings(StubTriggerStateMachine.named("t2"));
assertEquals(
"AfterWatermark.pastEndOfWindow().withEarlyFirings(t1).withLateFirings(t2)",
trigger.toString());
} |
@GetMapping
@Operation(security = @SecurityRequirement(name = "keycloak"))
public Iterable<Product> findProducts(@RequestParam(name = "filter", required = false) String filter) {
return this.productService.findAllProducts(filter);
} | @Test
void findProduct_ReturnsProductsList() {
// given
var filter = "товар";
doReturn(List.of(new Product(1, "Первый товар", "Описание первого товара"),
new Product(2, "Второй товар", "Описание второго товара")))
.when(this.productService).findAllProducts("товар");
// when
var result = this.controller.findProducts(filter);
// then
assertEquals(List.of(new Product(1, "Первый товар", "Описание первого товара"),
new Product(2, "Второй товар", "Описание второго товара")), result);
} |
public static String reformatSql(String sql) {
char[] chars = sql.toCharArray();
StringBuilder result = new StringBuilder(chars.length);
for (int i = 0; i < chars.length; i++) {
char c = chars[i];
if (c == '\n' || c == '\t') {
c = ' ';
}
if (Character.isWhitespace(c) && i > 0 && Character.isWhitespace(chars[i - 1])) {
continue;
}
result.append(c);
}
return result.toString();
} | @Test
public void reformatSql() {
assertThat(SqlLogFormatter.reformatSql("")).isEmpty();
assertThat(SqlLogFormatter.reformatSql("select *")).isEqualTo("select *");
assertThat(SqlLogFormatter.reformatSql("select *\nfrom issues")).isEqualTo("select * from issues");
assertThat(SqlLogFormatter.reformatSql("select *\n from issues")).isEqualTo("select * from issues");
assertThat(SqlLogFormatter.reformatSql("select *\n from issues")).isEqualTo("select * from issues");
assertThat(SqlLogFormatter.reformatSql("select *\n from issues")).isEqualTo("select * from issues");
assertThat(SqlLogFormatter.reformatSql("select *\n\t\t from \tissues")).isEqualTo("select * from issues");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.