focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void preflight(final Path source, final Path target) throws BackgroundException {
if(source.isDirectory()) {
throw new UnsupportedException(MessageFormat.format(LocaleFactory.localizedString("Cannot copy {0}", "Error"), source.getName())).withFile(source);
}
}
|
@Test
public void testCopyDirectory() throws Exception {
final DeepboxIdProvider fileid = new DeepboxIdProvider(session);
final Path documents = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents/", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path directory = new DeepboxDirectoryFeature(session, fileid).mkdir(new Path(documents,
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path copy = new Path(documents, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
assertThrows(UnsupportedException.class, () -> new DeepboxCopyFeature(session, fileid).preflight(directory, copy));
new DeepboxDeleteFeature(session, fileid).delete(Collections.singletonList(directory.withAttributes(new PathAttributes())), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public double quantile(double p) {
if (p < 0.0 || p > 1.0) {
throw new IllegalArgumentException("Invalid p: " + p);
}
int n = (int) Math.max(Math.sqrt(1 / this.p), 5.0);
int nl, nu, inc = 1;
if (p < cdf(n)) {
do {
n = Math.max(n - inc, 0);
inc *= 2;
} while (p < cdf(n) && n > 0);
nl = n;
nu = n + inc / 2;
} else {
do {
n += inc;
inc *= 2;
} while (p > cdf(n));
nu = n;
nl = n - inc / 2;
}
return quantile(p, nl, nu);
}
|
@Test
public void testQuantile() {
System.out.println("quantile");
GeometricDistribution instance = new GeometricDistribution(0.3);
instance.rand();
assertEquals(0, instance.quantile(0.01), 1E-6);
assertEquals(0, instance.quantile(0.1), 1E-6);
assertEquals(0, instance.quantile(0.2), 1E-6);
assertEquals(0, instance.quantile(0.3), 1E-6);
assertEquals(1, instance.quantile(0.4), 1E-6);
assertEquals(2, instance.quantile(0.6), 1E-6);
assertEquals(4, instance.quantile(0.8), 1E-6);
assertEquals(6, instance.quantile(0.9), 1E-6);
assertEquals(12, instance.quantile(0.99), 1E-6);
}
|
@Override
public boolean add(V e) {
return get(addAsync(e));
}
|
@Test
public void testClusteredIterator() {
testInCluster(redisson -> {
int size = 10000;
RSet<String> set = redisson.getSet("{test");
for (int i = 0; i < size; i++) {
set.add("" + i);
}
Set<String> keys = new HashSet<>();
for (String key : set) {
keys.add(key);
}
assertThat(keys).hasSize(size);
});
}
|
public <E extends Enum<E>> void logOnTruncateLogEntry(
final int memberId,
final E state,
final long logLeadershipTermId,
final long leadershipTermId,
final long candidateTermId,
final long commitPosition,
final long logPosition,
final long appendPosition,
final long oldPosition,
final long newPosition)
{
final int length = SIZE_OF_INT + enumName(state).length() + SIZE_OF_INT + 8 * SIZE_OF_LONG;
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(TRUNCATE_LOG_ENTRY.toEventCodeId(), encodedLength);
if (index > 0)
{
try
{
encodeTruncateLogEntry(
(UnsafeBuffer)ringBuffer.buffer(),
index,
captureLength,
length,
memberId,
state,
logLeadershipTermId,
leadershipTermId,
candidateTermId,
commitPosition,
logPosition,
appendPosition,
oldPosition,
newPosition);
}
finally
{
ringBuffer.commit(index);
}
}
}
|
@Test
void logTruncateLogEntry()
{
final int offset = align(22, ALIGNMENT);
logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset);
final ChronoUnit state = ChronoUnit.FOREVER;
final int memberId = 8;
final long logLeadershipTermId = 777L;
final long leadershipTermId = 1233L;
final long candidateTermId = 42L;
final int commitPosition = 1000;
final long logPosition = 33L;
final long appendPosition = 555L;
final long oldPosition = 98L;
final long newPosition = 24L;
logger.logOnTruncateLogEntry(
memberId,
state,
logLeadershipTermId,
leadershipTermId,
candidateTermId,
commitPosition,
logPosition,
appendPosition,
oldPosition,
newPosition);
final int length = SIZE_OF_INT + state.name().length() + SIZE_OF_INT + 8 * SIZE_OF_LONG;
verifyLogHeader(logBuffer, offset, TRUNCATE_LOG_ENTRY.toEventCodeId(), length, length);
int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH;
assertEquals(logLeadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(leadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(candidateTermId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(commitPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(logPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(appendPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(oldPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(newPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(memberId, logBuffer.getInt(index, LITTLE_ENDIAN));
index += SIZE_OF_INT;
assertEquals(enumName(state), logBuffer.getStringAscii(index, LITTLE_ENDIAN));
final StringBuilder sb = new StringBuilder();
ClusterEventDissector.dissectTruncateLogEntry(TRUNCATE_LOG_ENTRY, logBuffer, encodedMsgOffset(offset), sb);
final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: TRUNCATE_LOG_ENTRY \\[79/79]: " +
"memberId=8 state=FOREVER logLeadershipTermId=777 leadershipTermId=1233 candidateTermId=42 " +
"commitPosition=1000 logPosition=33 appendPosition=555 oldPosition=98 newPosition=24";
assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern));
}
|
@Override
public boolean validateTree(ValidationContext validationContext) {
validate(validationContext);
return (onCancelConfig.validateTree(validationContext) && errors.isEmpty() && !configuration.hasErrors());
}
|
@Test
public void validateTreeShouldVerifyIfConfigurationHasErrors() {
Configuration configuration = mock(Configuration.class);
PluggableTask pluggableTask = new PluggableTask(new PluginConfiguration(), configuration);
when(configuration.hasErrors()).thenReturn(true);
assertFalse(pluggableTask.validateTree(null));
}
|
public Canvas canvas() {
Canvas canvas = new Canvas(getLowerBound(), getUpperBound());
canvas.add(this);
if (name != null) {
canvas.setTitle(name);
}
return canvas;
}
|
@Test
public void testIris() throws Exception {
System.out.println("Iris");
var canvas = ScatterPlot.of(iris, "sepallength", "sepalwidth", "petallength", "class", '*').canvas();
canvas.setAxisLabels("sepallength", "sepalwidth", "petallength");
canvas.window();
}
|
private static void updateQueue(QueueConfigInfo updateInfo,
CapacitySchedulerConfiguration proposedConf,
Map<String, String> confUpdate) {
if (updateInfo == null) {
return;
}
QueuePath queuePath = new QueuePath(updateInfo.getQueue());
String keyPrefix = QueuePrefixes.getQueuePrefix(queuePath);
for (Map.Entry<String, String> kv : updateInfo.getParams().entrySet()) {
String keyValue = kv.getValue();
if (keyValue == null || keyValue.isEmpty()) {
proposedConf.unset(keyPrefix + kv.getKey());
confUpdate.put(keyPrefix + kv.getKey(), null);
} else {
proposedConf.set(keyPrefix + kv.getKey(), keyValue);
confUpdate.put(keyPrefix + kv.getKey(), keyValue);
}
}
}
|
@Test
public void testUpdateQueue() throws Exception {
SchedConfUpdateInfo updateInfo = new SchedConfUpdateInfo();
Map<String, String> updateMap = new HashMap<>();
updateMap.put(CONFIG_NAME, A_CONFIG_VALUE);
QueueConfigInfo queueAConfigInfo = new QueueConfigInfo(A_PATH, updateMap);
updateInfo.getUpdateQueueInfo().add(queueAConfigInfo);
Map<String, String> updateMapQueueB = new HashMap<>();
updateMapQueueB.put(CONFIG_NAME, B_CONFIG_VALUE);
QueueConfigInfo queueBConfigInfo = new QueueConfigInfo(B_PATH, updateMapQueueB);
updateInfo.getUpdateQueueInfo().add(queueBConfigInfo);
Map<String, String> configurationUpdate =
ConfigurationUpdateAssembler.constructKeyValueConfUpdate(csConfig, updateInfo);
assertEquals(A_CONFIG_VALUE, configurationUpdate.get(A_CONFIG_PATH));
assertEquals(B_CONFIG_VALUE, configurationUpdate.get(B_CONFIG_PATH));
}
|
public static ExtensibleLoadManagerImpl get(LoadManager loadManager) {
if (!(loadManager instanceof ExtensibleLoadManagerWrapper loadManagerWrapper)) {
throw new IllegalArgumentException("The load manager should be 'ExtensibleLoadManagerWrapper'.");
}
return loadManagerWrapper.get();
}
|
@Test(timeOut = 30 * 1000)
public void testDeleteNamespace() throws Exception {
String namespace = "public/test-delete-namespace";
TopicName topicName = TopicName.get(namespace + "/test-delete-namespace-topic");
admin.namespaces().createNamespace(namespace);
admin.namespaces().setNamespaceReplicationClusters(namespace, Sets.newHashSet(this.conf.getClusterName()));
assertTrue(admin.namespaces().getNamespaces("public").contains(namespace));
admin.topics().createPartitionedTopic(topicName.toString(), 2);
admin.lookups().lookupTopic(topicName.toString());
NamespaceBundle bundle = getBundleAsync(pulsar1, topicName).get();
try {
admin.namespaces().deleteNamespaceBundle(namespace, bundle.getBundleRange());
fail();
} catch (Exception ex) {
assertTrue(ex.getMessage().contains("Cannot delete non empty bundle"));
}
admin.namespaces().deleteNamespaceBundle(namespace, bundle.getBundleRange(), true);
admin.lookups().lookupTopic(topicName.toString());
admin.namespaces().deleteNamespace(namespace, true);
assertFalse(admin.namespaces().getNamespaces("public").contains(namespace));
}
|
@Override
public String getDescription() {
return "time: " + time
+ ", field: " + field
+ ", check type: " + type.toString().toLowerCase(Locale.ENGLISH)
+ ", threshold_type: " + thresholdType.toString().toLowerCase(Locale.ENGLISH)
+ ", threshold: " + decimalFormat.format(threshold)
+ ", grace: " + grace
+ ", repeat notifications: " + repeatNotifications;
}
|
@Test
public void testConstructor() throws Exception {
Map<String, Object> parameters = getParametersMap(0,
0,
FieldValueAlertCondition.ThresholdType.HIGHER,
FieldValueAlertCondition.CheckType.MAX,
0,
"response_time");
final FieldValueAlertCondition fieldValueAlertCondition = getTestInstance(FieldValueAlertCondition.class, parameters, alertConditionTitle);
assertNotNull(fieldValueAlertCondition);
assertNotNull(fieldValueAlertCondition.getDescription());
}
|
@Override
public TransactionType getTransactionType() {
return TransactionType.XA;
}
|
@Test
void assertGetTransactionType() {
assertThat(xaTransactionManager.getTransactionType(), is(TransactionType.XA));
}
|
public static IntStream allLinesFor(DefaultIssue issue, String componentUuid) {
DbIssues.Locations locations = issue.getLocations();
if (locations == null) {
return IntStream.empty();
}
Stream<DbCommons.TextRange> textRanges = Stream.concat(
locations.hasTextRange() ? Stream.of(locations.getTextRange()) : Stream.empty(),
locations.getFlowList().stream()
.flatMap(f -> f.getLocationList().stream())
.filter(l -> Objects.equals(componentIdOf(issue, l), componentUuid))
.map(DbIssues.Location::getTextRange));
return textRanges.flatMapToInt(range -> IntStream.rangeClosed(range.getStartLine(), range.getEndLine()));
}
|
@Test
public void allLinesFor_traverses_all_flows() {
DbIssues.Locations.Builder locations = DbIssues.Locations.newBuilder();
locations.addFlowBuilder()
.addLocation(newLocation("file1", 5, 5))
.addLocation(newLocation("file2", 10, 11))
.build();
locations.addFlowBuilder()
.addLocation(newLocation("file1", 7, 9))
.addLocation(newLocation("file2", 12, 12))
.build();
DefaultIssue issue = new DefaultIssue().setLocations(locations.build());
assertThat(IssueLocations.allLinesFor(issue, "file1")).containsExactlyInAnyOrder(5, 7, 8, 9);
assertThat(IssueLocations.allLinesFor(issue, "file2")).containsExactlyInAnyOrder(10, 11, 12);
}
|
@Override
public Result search(Query query, Execution execution) {
Result mergedResults = execution.search(query);
var targets = getTargets(query.getModel().getSources(), query.properties());
warnIfUnresolvedSearchChains(extractErrors(targets), mergedResults.hits());
var prunedTargets = pruneTargetsWithoutDocumentTypes(query.getModel().getRestrict(), extractSpecs(targets));
var regularTargetHandlers = resolveSearchChains(prunedTargets, execution.searchChainRegistry());
query.errors().addAll(regularTargetHandlers.errors());
Set<Target> targetHandlers = new LinkedHashSet<>(regularTargetHandlers.data());
targetHandlers.addAll(getAdditionalTargets(query, execution, targetSelector));
traceTargets(query, targetHandlers);
if (targetHandlers.isEmpty())
return mergedResults;
else if (targetHandlers.size() > 1)
search(query, execution, targetHandlers, mergedResults);
else if (shouldExecuteTargetLongerThanThread(query, targetHandlers.iterator().next()))
search(query, execution, targetHandlers, mergedResults); // one target, but search in separate thread
else
search(query, execution, first(targetHandlers), mergedResults); // search in this thread
return mergedResults;
}
|
@Test
void target_selectors_can_have_multiple_targets() {
ComponentId targetSelectorId = ComponentId.fromString("TestMultipleTargetSelector");
ComponentRegistry<TargetSelector> targetSelectors = new ComponentRegistry<>();
targetSelectors.register(targetSelectorId, new TestMultipleTargetSelector());
FederationSearcher searcher = new FederationSearcher(
new FederationConfig(new FederationConfig.Builder().targetSelector(targetSelectorId.toString())),
SchemaInfo.empty(),
targetSelectors);
Query query = new Query();
query.setTimeout(20000);
Result result = new Execution(searcher, Context.createContextStub()).search(query);
Iterator<Hit> hitsIterator = result.hits().deepIterator();
Hit hit1 = hitsIterator.next();
Hit hit2 = hitsIterator.next();
assertEquals(hit1.getSource(), "chain1");
assertEquals(hit2.getSource(), "chain2");
assertEquals(hit1.getField("data"), "modifyTargetQuery:custom-data:1");
assertEquals(hit2.getField("data"), "modifyTargetQuery:custom-data:2");
}
|
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset(
RequestContext context,
OffsetCommitRequestData request
) throws ApiException {
Group group = validateOffsetCommit(context, request);
// In the old consumer group protocol, the offset commits maintain the session if
// the group is in Stable or PreparingRebalance state.
if (group.type() == Group.GroupType.CLASSIC) {
ClassicGroup classicGroup = (ClassicGroup) group;
if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) {
groupMetadataManager.rescheduleClassicGroupMemberHeartbeat(
classicGroup,
classicGroup.member(request.memberId())
);
}
}
final OffsetCommitResponseData response = new OffsetCommitResponseData();
final List<CoordinatorRecord> records = new ArrayList<>();
final long currentTimeMs = time.milliseconds();
final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs);
request.topics().forEach(topic -> {
final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name());
response.topics().add(topicResponse);
topic.partitions().forEach(partition -> {
if (isMetadataInvalid(partition.committedMetadata())) {
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code()));
} else {
log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.",
request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(),
request.memberId(), partition.committedLeaderEpoch());
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(Errors.NONE.code()));
final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest(
partition,
currentTimeMs,
expireTimestampMs
);
records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord(
request.groupId(),
topic.name(),
partition.partitionIndex(),
offsetAndMetadata,
metadataImage.features().metadataVersion()
));
}
});
});
if (!records.isEmpty()) {
metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size());
}
return new CoordinatorResult<>(records, response);
}
|
@Test
public void testGenericGroupOffsetDeleteWithPendingTransactionalOffsets() {
OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build();
ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup(
"foo",
true
);
context.commitOffset(10L, "foo", "bar", 0, 100L, 0, context.time.milliseconds());
group.setSubscribedTopics(Optional.of(Collections.emptySet()));
context.testOffsetDeleteWith("foo", "bar", 0, Errors.NONE);
assertFalse(context.hasOffset("foo", "bar", 0));
}
|
@Override
public boolean isGenerateSQLToken(final SQLStatementContext sqlStatementContext) {
return sqlStatementContext instanceof InsertStatementContext && !((InsertStatementContext) sqlStatementContext).containsInsertColumns();
}
|
@Test
void assertIsGenerateSQLToken() {
assertFalse(generator.isGenerateSQLToken(EncryptGeneratorFixtureBuilder.createInsertStatementContext(Collections.emptyList())));
}
|
@Override
public Config build() {
return build(new Config());
}
|
@Override
@Test
public void testConfigurationURL() throws Exception {
URL configURL = getClass().getClassLoader().getResource("hazelcast-default.xml");
Config config = new XmlConfigBuilder(configURL).build();
assertEquals(configURL, config.getConfigurationUrl());
assertNull(config.getConfigurationFile());
}
|
static String normalizeCpu(String milliCpu) {
return formatMilliCpu(parseCpuAsMilliCpus(milliCpu));
}
|
@Test
public void testNormalizeCpu() {
assertThat(normalizeCpu("1"), is("1"));
assertThat(normalizeCpu("1000m"), is("1"));
assertThat(normalizeCpu("500m"), is("500m"));
assertThat(normalizeCpu("0.5"), is("500m"));
assertThat(normalizeCpu("0.1"), is("100m"));
assertThat(normalizeCpu("0.01"), is("10m"));
assertThat(normalizeCpu("0.001"), is("1m"));
}
|
@Override
public AppResponse process(Flow flow, CheckAuthenticationStatusRequest request){
switch(appSession.getState()) {
case "AUTHENTICATION_REQUIRED", "AWAITING_QR_SCAN":
return new CheckAuthenticationStatusResponse("PENDING", false);
case "RETRIEVED", "AWAITING_CONFIRMATION":
return new CheckAuthenticationStatusResponse("PENDING", true);
case "CONFIRMED":
return new StatusResponse("PENDING_CONFIRMED");
case "AUTHENTICATED":
return new OkResponse();
case "CANCELLED":
return new StatusResponse("CANCELLED");
case "ABORTED":
if (appSession.getAbortCode().equals("verification_code_invalid")) {
String logCode = "wid_checker".equals(request.getAppType()) ? "1320" : "1368";
digidClient.remoteLog(logCode, Map.of(HIDDEN, true));
}
return new NokResponse();
default:
return new CheckAuthenticationStatusResponse("PENDING", false);
}
}
|
@Test
void processConfirmed(){
appSession.setState("CONFIRMED");
AppResponse response = checkAuthenticationStatus.process(flow, request);
assertTrue(response instanceof StatusResponse);
assertEquals("PENDING_CONFIRMED", ((StatusResponse) response).getStatus());
}
|
public String getTargetEngine() {
return targetEngine;
}
|
@Test
void getTargetEngine() {
String targetEngine = "targetEngine";
EfestoRedirectOutput retrieved = new EfestoRedirectOutput(modelLocalUriId, targetEngine, null) {};
assertThat(retrieved.getTargetEngine()).isEqualTo(targetEngine);
}
|
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
}
|
@Test
public void testPrintTopicDescription() {
// Given:
final KsqlEntityList entityList = new KsqlEntityList(ImmutableList.of(
new TopicDescription("e", "TestTopic", "TestKafkaTopic", "AVRO", "schemaString")
));
// When:
console.printKsqlEntityList(entityList);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
}
|
@Udf(description = "Returns the inverse (arc) cosine of an INT value")
public Double acos(
@UdfParameter(
value = "value",
description = "The value to get the inverse cosine of."
) final Integer value
) {
return acos(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleLessThanNegativeOne() {
assertThat(Double.isNaN(udf.acos(-1.1)), is(true));
assertThat(Double.isNaN(udf.acos(-6.0)), is(true));
assertThat(Double.isNaN(udf.acos(-2)), is(true));
assertThat(Double.isNaN(udf.acos(-2L)), is(true));
}
|
protected static String resolveEnvVars(String input) {
Preconditions.checkNotNull(input);
// match ${ENV_VAR_NAME}
Pattern p = Pattern.compile("\\$\\{(\\w+)\\}");
Matcher m = p.matcher(input);
StringBuffer sb = new StringBuffer();
while (m.find()) {
String envVarName = m.group(1);
String envVarValue = System.getenv(envVarName);
m.appendReplacement(sb, null == envVarValue ? "" : envVarValue);
}
m.appendTail(sb);
return sb.toString();
}
|
@Test
public void resolveEnvVars() throws Exception {
SystemLambda.withEnvironmentVariable("VARNAME1", "varvalue1")
.and("VARNAME2", "varvalue2")
.execute(() -> {
String resolved = EnvVarResolverProperties.resolveEnvVars(
"padding ${VARNAME1} ${VARNAME2} padding");
assertEquals(resolved, "padding varvalue1 varvalue2 padding");
});
}
|
@Override
public List<SmsReceiveRespDTO> parseSmsReceiveStatus(String text) {
JSONArray statuses = JSONUtil.parseArray(text);
// 字段参考
return convertList(statuses, status -> {
JSONObject statusObj = (JSONObject) status;
return new SmsReceiveRespDTO()
.setSuccess(statusObj.getBool("success")) // 是否接收成功
.setErrorCode(statusObj.getStr("err_code")) // 状态报告编码
.setErrorMsg(statusObj.getStr("err_msg")) // 状态报告说明
.setMobile(statusObj.getStr("phone_number")) // 手机号
.setReceiveTime(statusObj.getLocalDateTime("report_time", null)) // 状态报告时间
.setSerialNo(statusObj.getStr("biz_id")) // 发送序列号
.setLogId(statusObj.getLong("out_id")); // 用户序列号
});
}
|
@Test
public void testParseSmsReceiveStatus() {
// 准备参数
String text = "[\n" +
" {\n" +
" \"phone_number\" : \"13900000001\",\n" +
" \"send_time\" : \"2017-01-01 11:12:13\",\n" +
" \"report_time\" : \"2017-02-02 22:23:24\",\n" +
" \"success\" : true,\n" +
" \"err_code\" : \"DELIVERED\",\n" +
" \"err_msg\" : \"用户接收成功\",\n" +
" \"sms_size\" : \"1\",\n" +
" \"biz_id\" : \"12345\",\n" +
" \"out_id\" : \"67890\"\n" +
" }\n" +
"]";
// mock 方法
// 调用
List<SmsReceiveRespDTO> statuses = smsClient.parseSmsReceiveStatus(text);
// 断言
assertEquals(1, statuses.size());
assertTrue(statuses.get(0).getSuccess());
assertEquals("DELIVERED", statuses.get(0).getErrorCode());
assertEquals("用户接收成功", statuses.get(0).getErrorMsg());
assertEquals("13900000001", statuses.get(0).getMobile());
assertEquals(LocalDateTime.of(2017, 2, 2, 22, 23, 24),
statuses.get(0).getReceiveTime());
assertEquals("12345", statuses.get(0).getSerialNo());
assertEquals(67890L, statuses.get(0).getLogId());
}
|
@Override
public void execute(ComputationStep.Context context) {
executeForBranch(treeRootHolder.getRoot());
}
|
@Test
public void verify_detection_with_complex_mix_of_qps() {
final Set<Event> events = new HashSet<>();
doAnswer(invocationOnMock -> {
events.add((Event) invocationOnMock.getArguments()[0]);
return null;
}).when(eventRepository).add(any(Event.class));
Date date = new Date();
QualityProfile qp1 = qp(QP_NAME_2, LANGUAGE_KEY_1, date);
QualityProfile qp2 = qp(QP_NAME_2, LANGUAGE_KEY_2, date);
QualityProfile qp3 = qp(QP_NAME_1, LANGUAGE_KEY_1, BEFORE_DATE);
QualityProfile qp3_updated = qp(QP_NAME_1, LANGUAGE_KEY_1, AFTER_DATE);
QualityProfile qp4 = qp(QP_NAME_2, LANGUAGE_KEY_3, date);
mockQualityProfileMeasures(
treeRootHolder.getRoot(),
arrayOf(qp1, qp2, qp3),
arrayOf(qp3_updated, qp2, qp4));
mockNoLanguageInRepository();
qProfileStatusRepository.register(qp1.getQpKey(), REMOVED);
qProfileStatusRepository.register(qp2.getQpKey(), UNCHANGED);
qProfileStatusRepository.register(qp3.getQpKey(), UPDATED);
qProfileStatusRepository.register(qp4.getQpKey(), ADDED);
when(qualityProfileRuleChangeTextResolver.mapChangeToNumberOfRules(qp3_updated, treeRootHolder.getRoot().getUuid())).thenReturn(CHANGE_TO_NUMBER_OF_RULES_MAP);
underTest.execute(new TestComputationStepContext());
assertThat(events).extracting("name").containsOnly(
"Stop using \"" + QP_NAME_2 + "\" (" + LANGUAGE_KEY_1 + ")",
"Use \"" + QP_NAME_2 + "\" (" + LANGUAGE_KEY_3 + ")",
"\"" + QP_NAME_1 + "\" (" + LANGUAGE_KEY_1 + ") updated with " + RULE_CHANGE_TEXT);
}
|
public static String toUnderlineCase(CharSequence str) {
return toSymbolCase(str, UNDERLINE);
}
|
@Test
public void toUnderlineCase() {
String string = "str";
String s = StringUtil.toUnderlineCase(string);
Assert.assertEquals("str", s);
}
|
public boolean isStarted() {
return jobLeaderIdActions != null;
}
|
@Test
void testIsStarted() throws Exception {
final JobID jobId = new JobID();
TestingHighAvailabilityServices highAvailabilityServices =
new TestingHighAvailabilityServices();
SettableLeaderRetrievalService leaderRetrievalService =
new SettableLeaderRetrievalService(null, null);
highAvailabilityServices.setJobMasterLeaderRetriever(jobId, leaderRetrievalService);
ScheduledExecutor scheduledExecutor = mock(ScheduledExecutor.class);
Time timeout = Time.milliseconds(5000L);
JobLeaderIdActions jobLeaderIdActions = mock(JobLeaderIdActions.class);
DefaultJobLeaderIdService jobLeaderIdService =
new DefaultJobLeaderIdService(highAvailabilityServices, scheduledExecutor, timeout);
assertThat(jobLeaderIdService.isStarted()).isFalse();
jobLeaderIdService.start(jobLeaderIdActions);
assertThat(jobLeaderIdService.isStarted()).isTrue();
jobLeaderIdService.stop();
assertThat(jobLeaderIdService.isStarted()).isFalse();
}
|
Plugin create(Options.Plugin plugin) {
try {
return instantiate(plugin.pluginString(), plugin.pluginClass(), plugin.argument());
} catch (IOException | URISyntaxException e) {
throw new CucumberException(e);
}
}
|
@Test
void instantiates_file_or_empty_arg_plugin_without_arg() {
PluginOption option = parse(WantsFileOrEmpty.class.getName());
WantsFileOrEmpty plugin = (WantsFileOrEmpty) fc.create(option);
assertThat(plugin.out, is(nullValue()));
}
|
@Override
public CRArtifact deserialize(JsonElement json, Type type, JsonDeserializationContext context) throws JsonParseException {
return determineJsonElementForDistinguishingImplementers(json, context, TYPE, TypeAdapter.ARTIFACT_ORIGIN);
}
|
@Test
public void shouldInstantiateATaskOfTypeExec() {
JsonObject jsonObject = new JsonObject();
jsonObject.addProperty("type", "build");
artifactTypeAdapter.deserialize(jsonObject, type, jsonDeserializationContext);
verify(jsonDeserializationContext).deserialize(jsonObject, CRBuiltInArtifact.class);
}
|
public static NormalKey createFromSpec(String spec) {
if (spec == null || !spec.contains(":")) {
throw new IllegalArgumentException("Invalid spec format");
}
String[] parts = spec.split(":", 2);
if (parts.length != 2) {
throw new IllegalArgumentException("Invalid spec format");
}
String algorithmName = parts[0];
String base64Key = parts[1];
EncryptionAlgorithmPB algorithm;
if (algorithmName.equalsIgnoreCase("AES_128")) {
algorithm = EncryptionAlgorithmPB.AES_128;
} else {
throw new IllegalArgumentException("Unsupported algorithm: " + algorithmName);
}
byte[] plainKey;
try {
plainKey = Base64.getDecoder().decode(base64Key);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Invalid Base64 key", e);
}
if (plainKey.length != 16) {
throw new IllegalArgumentException("Invalid key length " + plainKey.length * 8);
}
return new NormalKey(algorithm, plainKey, null);
}
|
@Test
public void testCreateFromSpec() {
String base64Key = Base64.getEncoder().encodeToString(normalKey.getPlainKey());
String spec = "AES_128:" + base64Key;
NormalKey key = NormalKey.createFromSpec(spec);
assertNotNull(key);
assertEquals(EncryptionAlgorithmPB.AES_128, key.getAlgorithm());
assertArrayEquals(normalKey.getPlainKey(), key.getPlainKey());
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
final Migration that = (Migration) o;
return Objects.equals(this.createdAt(), that.createdAt());
}
|
@Test
public void testEquals() {
final MigrationA a = new MigrationA();
final MigrationA aa = new MigrationA();
final MigrationB b = new MigrationB();
assertThat(a.equals(aa)).isTrue();
assertThat(a.equals(b)).isFalse();
}
|
public static TableElements parse(final String schema, final TypeRegistry typeRegistry) {
return new SchemaParser(typeRegistry).parse(schema);
}
|
@Test
public void shouldParseEmptySchema() {
// Given:
final String schema = " \t\n\r";
// When:
final TableElements elements = parser.parse(schema);
// Then:
assertThat(Iterables.isEmpty(elements), is(true));
}
|
public static Autoscaling empty() {
return empty("");
}
|
@Test
public void autoscaling_respects_group_size_limit() {
var min = new ClusterResources( 2, 2, new NodeResources(1, 1, 10, 1));
var now = new ClusterResources(5, 5, new NodeResources(3.0, 10, 100, 1));
var max = new ClusterResources(18, 6, new NodeResources(100, 1000, 10000, 1));
var fixture = DynamicProvisioningTester.fixture()
.awsProdSetup(true)
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max, IntRange.of(2, 3), false, true, Optional.empty(), ClusterInfo.empty()))
.build();
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(0.6, 240);
fixture.tester().assertResources("Scaling cpu up",
12, 6, 3.2, 4.2, 27.5,
fixture.autoscale());
}
|
@Override
public boolean isCallable(NamingEvent event) {
if (event == null) {
return false;
}
NamingChangeEvent changeEvent = (NamingChangeEvent) event;
return changeEvent.isAdded() || changeEvent.isRemoved() || changeEvent.isModified();
}
|
@Test
public void testCallable() {
NamingSelectorWrapper selectorWrapper = new NamingSelectorWrapper(null, null);
InstancesDiff instancesDiff = new InstancesDiff(null, Collections.singletonList(new Instance()), null);
NamingChangeEvent changeEvent = new NamingChangeEvent("serviceName", Collections.emptyList(), instancesDiff);
assertTrue(selectorWrapper.isCallable(changeEvent));
changeEvent.getRemovedInstances().clear();
assertFalse(selectorWrapper.isCallable(changeEvent));
}
|
public <T> void addThreadLevelImmutableMetric(final String name,
final String description,
final String threadId,
final T value) {
final MetricName metricName = metrics.metricName(
name, THREAD_LEVEL_GROUP, description, threadLevelTagMap(threadId));
synchronized (threadLevelMetrics) {
threadLevelMetrics.computeIfAbsent(
threadSensorPrefix(threadId),
tid -> new LinkedList<>()
).add(metricName);
metrics.addMetric(metricName, new ImmutableMetricValue<>(value));
}
}
|
@Test
public void shouldAddThreadLevelImmutableMetric() {
final int measuredValue = 123;
final StreamsMetricsImpl streamsMetrics
= new StreamsMetricsImpl(metrics, THREAD_ID1, VERSION, time);
streamsMetrics.addThreadLevelImmutableMetric(
"foobar",
"test metric",
"t1",
measuredValue
);
final MetricName name = metrics.metricName(
"foobar",
THREAD_LEVEL_GROUP,
Collections.singletonMap("thread-id", "t1")
);
assertThat(metrics.metric(name), notNullValue());
assertThat(metrics.metric(name).metricValue(), equalTo(measuredValue));
}
|
public KsqlTarget target(final URI server) {
return target(server, Collections.emptyMap());
}
|
@Test
public void shouldHandleErrorMessageOnPostRequests() {
// Given:
KsqlErrorMessage ksqlErrorMessage = new KsqlErrorMessage(40000, "ouch");
server.setResponseObject(ksqlErrorMessage);
server.setErrorCode(400);
// When:
KsqlTarget target = ksqlClient.target(serverUri);
RestResponse<KsqlEntityList> response = target.postKsqlRequest("sql", Collections.emptyMap(), Optional.of(123L));
// Then:
assertThat(response.getStatusCode(), is(400));
assertThat(response.getErrorMessage().getErrorCode(), is(40000));
assertThat(response.getErrorMessage().getMessage(), is("ouch"));
}
|
@GET
@Path("all")
@ZeppelinApi
public Response getAll() {
try {
Map<String, String> properties =
configurationService.getAllProperties(getServiceContext(), new RestServiceCallback<>());
return new JsonResponse<>(Status.OK, "", properties).build();
} catch (IOException e) {
return new JsonResponse<>(Status.INTERNAL_SERVER_ERROR, "Fail to get configuration", e).build();
}
}
|
@Test
void testGetAll() throws IOException {
try (CloseableHttpResponse get = httpGet("/configurations/all")) {
Map<String, Object> resp =
gson.fromJson(EntityUtils.toString(get.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {
}.getType());
Map<String, String> body = (Map<String, String>) resp.get("body");
assertTrue(body.size() > 0);
// it shouldn't have key/value pair which key contains "password"
for (String key : body.keySet()) {
assertTrue(!key.contains("password"));
}
}
}
|
public static ErrorCodes getErrorCode(ResponseException responseException)
throws ResponseException {
// Obtain the error response code.
String errorContent = responseException.getContent();
if (errorContent == null) {
throw responseException;
}
try {
ErrorResponseTemplate errorResponse =
JsonTemplateMapper.readJson(errorContent, ErrorResponseTemplate.class);
List<ErrorEntryTemplate> errors = errorResponse.getErrors();
// There may be multiple error objects
if (errors.size() == 1) {
String errorCodeString = errors.get(0).getCode();
// May not get an error code back.
if (errorCodeString != null) {
// throws IllegalArgumentException if unknown error code
return ErrorCodes.valueOf(errorCodeString);
}
}
} catch (IOException | IllegalArgumentException ex) {
// Parse exception: either isn't an error object or unknown error code
}
// rethrow the original exception
throw responseException;
}
|
@Test
public void testGetErrorCode_multipleErrors() {
Mockito.when(responseException.getContent())
.thenReturn(
"{\"errors\":["
+ "{\"code\":\"MANIFEST_INVALID\",\"message\":\"message 1\",\"detail\":{}},"
+ "{\"code\":\"TAG_INVALID\",\"message\":\"message 2\",\"detail\":{}}"
+ "]}");
try {
ErrorResponseUtil.getErrorCode(responseException);
Assert.fail();
} catch (ResponseException ex) {
Assert.assertSame(responseException, ex);
}
}
|
@SuppressWarnings("unchecked")
@Override
public void punctuate(final ProcessorNode<?, ?, ?, ?> node,
final long timestamp,
final PunctuationType type,
final Punctuator punctuator) {
if (processorContext.currentNode() != null) {
throw new IllegalStateException(String.format("%sCurrent node is not null", logPrefix));
}
// when punctuating, we need to preserve the timestamp (this can be either system time or event time)
// while other record context are set as dummy: null topic, -1 partition, -1 offset and empty header
final ProcessorRecordContext recordContext = new ProcessorRecordContext(
timestamp,
-1L,
-1,
null,
new RecordHeaders()
);
updateProcessorContext(node, time.milliseconds(), recordContext);
if (log.isTraceEnabled()) {
log.trace("Punctuating processor {} with timestamp {} and punctuation type {}", node.name(), timestamp, type);
}
try {
maybeMeasureLatency(() -> punctuator.punctuate(timestamp), time, punctuateLatencySensor);
} catch (final TimeoutException timeoutException) {
if (!eosEnabled) {
throw timeoutException;
} else {
record = null;
throw new TaskCorruptedException(Collections.singleton(id));
}
} catch (final FailedProcessingException e) {
throw createStreamsException(node.name(), e.getCause());
} catch (final TaskCorruptedException | TaskMigratedException e) {
throw e;
} catch (final RuntimeException processingException) {
final ErrorHandlerContext errorHandlerContext = new DefaultErrorHandlerContext(
null,
recordContext.topic(),
recordContext.partition(),
recordContext.offset(),
recordContext.headers(),
node.name(),
id()
);
final ProcessingExceptionHandler.ProcessingHandlerResponse response;
try {
response = Objects.requireNonNull(
processingExceptionHandler.handle(errorHandlerContext, null, processingException),
"Invalid ProcessingExceptionHandler response."
);
} catch (final RuntimeException fatalUserException) {
log.error(
"Processing error callback failed after processing error for record: {}",
errorHandlerContext,
processingException
);
throw new FailedProcessingException("Fatal user code error in processing error callback", fatalUserException);
}
if (response == ProcessingExceptionHandler.ProcessingHandlerResponse.FAIL) {
log.error("Processing exception handler is set to fail upon" +
" a processing error. If you would rather have the streaming pipeline" +
" continue after a processing error, please set the " +
PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG + " appropriately.");
throw createStreamsException(node.name(), processingException);
} else {
droppedRecordsSensor.record();
}
} finally {
processorContext.setCurrentNode(null);
}
}
|
@Test
public void punctuateShouldNotThrowStreamsExceptionWhenProcessingExceptionHandlerRepliesWithContinue() {
when(stateManager.taskId()).thenReturn(taskId);
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
task = createStatelessTask(createConfig(
AT_LEAST_ONCE,
"100",
LogAndFailExceptionHandler.class.getName(),
LogAndContinueProcessingExceptionHandler.class.getName()
));
task.punctuate(processorStreamTime, 1, PunctuationType.STREAM_TIME, timestamp -> {
throw new KafkaException("KABOOM!");
});
}
|
@NonNull
public ConnectionFileName getConnectionRootFileName( @NonNull VFSConnectionDetails details ) {
String connectionName = details.getName();
if ( StringUtils.isEmpty( connectionName ) ) {
throw new IllegalArgumentException( "Unnamed connection" );
}
return new ConnectionFileName( connectionName );
}
|
@Test
public void testGetConnectionRootFileNameReturnsTheConnectionRoot() {
String connectionNameWithNoReservedChars = "connection-name";
when( vfsConnectionDetails.getName() ).thenReturn( connectionNameWithNoReservedChars );
// pvfs://connection-name/
ConnectionFileName fileName = vfsConnectionManagerHelper.getConnectionRootFileName( vfsConnectionDetails );
assertEquals( ConnectionFileProvider.SCHEME, fileName.getScheme() );
assertEquals( connectionNameWithNoReservedChars, fileName.getConnection() );
assertEquals( ConnectionFileName.SEPARATOR, fileName.getPath() );
}
|
@Override
public final boolean wasNull() {
return wasNull;
}
|
@Test
void assertWasNull() {
assertFalse(memoryMergedResult.wasNull());
}
|
public List<String> getEnabledIdentityProviders() {
return identityProviderRepository.getAllEnabledAndSorted()
.stream()
.filter(IdentityProvider::isEnabled)
.map(IdentityProvider::getName)
.toList();
}
|
@Test
public void getEnabledIdentityProviders_whenDefined_shouldReturnOnlyEnabled() {
mockIdentityProviders(List.of(
new TestIdentityProvider().setKey("saml").setName("Okta").setEnabled(true),
new TestIdentityProvider().setKey("github").setName("GitHub").setEnabled(true),
new TestIdentityProvider().setKey("bitbucket").setName("BitBucket").setEnabled(false)
));
assertThat(commonSystemInformation.getEnabledIdentityProviders())
.containsExactlyInAnyOrder("Okta", "GitHub");
}
|
@SuppressWarnings("deprecation")
static Object[] buildArgs(final Object[] positionalArguments,
final ResourceMethodDescriptor resourceMethod,
final ServerResourceContext context,
final DynamicRecordTemplate template,
final ResourceMethodConfig resourceMethodConfig)
{
List<Parameter<?>> parameters = resourceMethod.getParameters();
Object[] arguments = Arrays.copyOf(positionalArguments, parameters.size());
fixUpComplexKeySingletonArraysInArguments(arguments);
boolean attachmentsDesired = false;
for (int i = positionalArguments.length; i < parameters.size(); ++i)
{
Parameter<?> param = parameters.get(i);
try
{
if (param.getParamType() == Parameter.ParamType.KEY || param.getParamType() == Parameter.ParamType.ASSOC_KEY_PARAM)
{
Object value = context.getPathKeys().get(param.getName());
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.CALLBACK)
{
continue;
}
else if (param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT_PARAM || param.getParamType() == Parameter.ParamType.PARSEQ_CONTEXT)
{
continue; // don't know what to fill in yet
}
else if (param.getParamType() == Parameter.ParamType.HEADER)
{
HeaderParam headerParam = param.getAnnotations().get(HeaderParam.class);
String value = context.getRequestHeaders().get(headerParam.value());
arguments[i] = value;
continue;
}
//Since we have multiple different types of MaskTrees that can be passed into resource methods,
//we must evaluate based on the param type (annotation used)
else if (param.getParamType() == Parameter.ParamType.PROJECTION || param.getParamType() == Parameter.ParamType.PROJECTION_PARAM)
{
arguments[i] = context.getProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.METADATA_PROJECTION_PARAM)
{
arguments[i] = context.getMetadataProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.PAGING_PROJECTION_PARAM)
{
arguments[i] = context.getPagingProjectionMask();
continue;
}
else if (param.getParamType() == Parameter.ParamType.CONTEXT || param.getParamType() == Parameter.ParamType.PAGING_CONTEXT_PARAM)
{
PagingContext ctx = RestUtils.getPagingContext(context, (PagingContext) param.getDefaultValue());
arguments[i] = ctx;
continue;
}
else if (param.getParamType() == Parameter.ParamType.PATH_KEYS || param.getParamType() == Parameter.ParamType.PATH_KEYS_PARAM)
{
arguments[i] = context.getPathKeys();
continue;
}
else if (param.getParamType() == Parameter.ParamType.PATH_KEY_PARAM) {
Object value = context.getPathKeys().get(param.getName());
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT || param.getParamType() == Parameter.ParamType.RESOURCE_CONTEXT_PARAM)
{
arguments[i] = context;
continue;
}
else if (param.getParamType() == Parameter.ParamType.VALIDATOR_PARAM)
{
RestLiDataValidator validator = new RestLiDataValidator(resourceMethod.getResourceModel().getResourceClass().getAnnotations(),
resourceMethod.getResourceModel().getValueClass(), resourceMethod.getMethodType());
arguments[i] = validator;
continue;
}
else if (param.getParamType() == Parameter.ParamType.RESTLI_ATTACHMENTS_PARAM)
{
arguments[i] = context.getRequestAttachmentReader();
attachmentsDesired = true;
continue;
}
else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_WRITER_PARAM)
{
// The OutputStream is passed to the resource implementation in a synchronous call. Upon return of the
// resource method, all the bytes would haven't written to the OutputStream. The EntityStream would have
// contained all the bytes by the time data is requested. The ownership of the OutputStream is passed to
// the ByteArrayOutputStreamWriter, which is responsible of closing the OutputStream if necessary.
ByteArrayOutputStream out = new ByteArrayOutputStream();
context.setResponseEntityStream(EntityStreams.newEntityStream(new ByteArrayOutputStreamWriter(out)));
arguments[i] = new UnstructuredDataWriter(out, context);
continue;
}
else if (param.getParamType() == Parameter.ParamType.UNSTRUCTURED_DATA_REACTIVE_READER_PARAM)
{
arguments[i] = new UnstructuredDataReactiveReader(context.getRequestEntityStream(), context.getRawRequest().getHeader(RestConstants.HEADER_CONTENT_TYPE));
continue;
}
else if (param.getParamType() == Parameter.ParamType.POST)
{
// handle action parameters
if (template != null)
{
DataMap data = template.data();
if (data.containsKey(param.getName()))
{
arguments[i] = template.getValue(param);
continue;
}
}
}
else if (param.getParamType() == Parameter.ParamType.QUERY)
{
Object value;
if (DataTemplate.class.isAssignableFrom(param.getType()))
{
value = buildDataTemplateArgument(context.getStructuredParameter(param.getName()), param,
resourceMethodConfig.shouldValidateQueryParams());
}
else
{
value = buildRegularArgument(context, param, resourceMethodConfig.shouldValidateQueryParams());
}
if (value != null)
{
arguments[i] = value;
continue;
}
}
else if (param.getParamType() == Parameter.ParamType.BATCH || param.getParamType() == Parameter.ParamType.RESOURCE_KEY)
{
// should not come to this routine since it should be handled by passing in positionalArguments
throw new RoutingException("Parameter '" + param.getName() + "' should be passed in as a positional argument",
HttpStatus.S_400_BAD_REQUEST.getCode());
}
else
{
// unknown param type
throw new RoutingException(
"Parameter '" + param.getName() + "' has an unknown parameter type '" + param.getParamType().name() + "'",
HttpStatus.S_400_BAD_REQUEST.getCode());
}
}
catch (TemplateRuntimeException e)
{
throw new RoutingException("Parameter '" + param.getName() + "' is invalid", HttpStatus.S_400_BAD_REQUEST.getCode());
}
try
{
// Handling null-valued parameters not provided in resource context or entity body
// check if it is optional parameter
if (param.isOptional() && param.hasDefaultValue())
{
arguments[i] = param.getDefaultValue();
}
else if (param.isOptional() && !param.getType().isPrimitive())
{
// optional primitive parameter must have default value or provided
arguments[i] = null;
}
else
{
throw new RoutingException("Parameter '" + param.getName() + "' is required", HttpStatus.S_400_BAD_REQUEST.getCode());
}
}
catch (ResourceConfigException e)
{
// Parameter default value format exception should result in server error code 500.
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Parameter '" + param.getName() + "' default value is invalid", e);
}
}
//Verify that if the resource method did not expect attachments, and attachments were present, that we drain all
//incoming attachments and send back a bad request. We must take precaution here since simply ignoring the request
//attachments is not correct behavior here. Ignoring other request level constructs such as headers or query parameters
//that were not needed is safe, but not for request attachments.
if (!attachmentsDesired && context.getRequestAttachmentReader() != null)
{
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST,
"Resource method endpoint invoked does not accept any request attachments.");
}
return arguments;
}
|
@Test
public void testQueryParameterType()
{
String testParamKey = "testParam";
String expectedTestParamValue = "testParamValue";
ServerResourceContext mockResourceContext = EasyMock.createMock(ServerResourceContext.class);
EasyMock.expect(mockResourceContext.hasParameter(testParamKey)).andReturn(true).times(1);
EasyMock.expect(mockResourceContext.getParameter(testParamKey)).andReturn(expectedTestParamValue).anyTimes();
EasyMock.expect(mockResourceContext.getRequestAttachmentReader()).andReturn(null);
EasyMock.replay(mockResourceContext);
Parameter<String> param = new Parameter<>(testParamKey, String.class, DataSchemaConstants.STRING_DATA_SCHEMA,
false, null, Parameter.ParamType.QUERY, false, AnnotationSet.EMPTY);
List<Parameter<?>> parameters = Collections.singletonList(param);
Object[] results = ArgumentBuilder.buildArgs(new Object[0], getMockResourceMethod(parameters), mockResourceContext, null, getMockResourceMethodConfig(false));
Assert.assertEquals(results[0], expectedTestParamValue);
}
|
public static <T> T autobox(Object value, Class<T> type) {
return Autoboxer.autobox(value, type);
}
|
@Test
void testAutobox() {
assertThat(ReflectionUtils.autobox(null, String.class)).isEqualTo(null);
assertThat(ReflectionUtils.autobox("string", String.class)).isEqualTo("string");
assertThat(ReflectionUtils.autobox(1, int.class)).isEqualTo(1);
assertThat(ReflectionUtils.autobox(1, Integer.class)).isEqualTo(1);
assertThat(ReflectionUtils.autobox("1", int.class)).isEqualTo(1);
assertThat(ReflectionUtils.autobox("1", Integer.class)).isEqualTo(1);
assertThat(ReflectionUtils.autobox(1L, long.class)).isEqualTo(1L);
assertThat(ReflectionUtils.autobox(1, Long.class)).isEqualTo(1L);
assertThat(ReflectionUtils.autobox("1", long.class)).isEqualTo(1L);
assertThat(ReflectionUtils.autobox("1", Long.class)).isEqualTo(1L);
assertThat(ReflectionUtils.autobox("6ec8044c-ad95-4416-a29e-e946c72a37b0", UUID.class)).isEqualTo(UUID.fromString("6ec8044c-ad95-4416-a29e-e946c72a37b0"));
assertThat(ReflectionUtils.autobox("A", TestEnum.class)).isEqualTo(TestEnum.A);
assertThat(ReflectionUtils.autobox("PT8H6M12.345S", Duration.class)).isEqualTo(Duration.parse("PT8H6M12.345S"));
}
|
public static Schema getNestedFieldSchemaFromWriteSchema(Schema writeSchema, String fieldName) {
String[] parts = fieldName.split("\\.");
int i = 0;
for (; i < parts.length; i++) {
String part = parts[i];
Schema schema = writeSchema.getField(part).schema();
if (i == parts.length - 1) {
return resolveNullableSchema(schema);
}
}
throw new HoodieException("Failed to get schema. Not a valid field name: " + fieldName);
}
|
@Test
public void testGetNestedFieldSchema() throws IOException {
Schema schema = SchemaTestUtil.getEvolvedSchema();
GenericRecord rec = new GenericData.Record(schema);
rec.put("field1", "key1");
rec.put("field2", "val1");
rec.put("name", "val2");
rec.put("favorite_number", 2);
// test simple field schema
assertEquals(Schema.create(Schema.Type.STRING), getNestedFieldSchemaFromWriteSchema(rec.getSchema(), "field1"));
GenericRecord rec2 = new GenericData.Record(schema);
rec2.put("field1", "key1");
rec2.put("field2", "val1");
rec2.put("name", "val2");
rec2.put("favorite_number", 12);
// test comparison of non-string type
assertEquals(-1, GenericData.get().compare(rec.get("favorite_number"), rec2.get("favorite_number"), getNestedFieldSchemaFromWriteSchema(rec.getSchema(), "favorite_number")));
// test nested field schema
Schema nestedSchema = new Schema.Parser().parse(SCHEMA_WITH_NESTED_FIELD);
GenericRecord rec3 = new GenericData.Record(nestedSchema);
rec3.put("firstname", "person1");
rec3.put("lastname", "person2");
GenericRecord studentRecord = new GenericData.Record(rec3.getSchema().getField("student").schema());
studentRecord.put("firstname", "person1");
studentRecord.put("lastname", "person2");
rec3.put("student", studentRecord);
assertEquals(Schema.create(Schema.Type.STRING), getNestedFieldSchemaFromWriteSchema(rec3.getSchema(), "student.firstname"));
assertEquals(Schema.create(Schema.Type.STRING), getNestedFieldSchemaFromWriteSchema(nestedSchema, "student.firstname"));
}
|
@Override
public ListenableFuture<?> execute(StartTransaction statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters)
{
Session session = stateMachine.getSession();
if (!session.isClientTransactionSupport()) {
throw new PrestoException(StandardErrorCode.INCOMPATIBLE_CLIENT, "Client does not support transactions");
}
if (session.getTransactionId().isPresent()) {
throw new PrestoException(StandardErrorCode.NOT_SUPPORTED, "Nested transactions not supported");
}
Optional<IsolationLevel> isolationLevel = extractIsolationLevel(statement);
Optional<Boolean> readOnly = extractReadOnly(statement);
TransactionId transactionId = transactionManager.beginTransaction(
isolationLevel.orElse(TransactionManager.DEFAULT_ISOLATION),
readOnly.orElse(TransactionManager.DEFAULT_READ_ONLY),
false);
stateMachine.setStartedTransactionId(transactionId);
// Since the current session does not contain this new transaction ID, we need to manually mark it as inactive
// when this statement completes.
transactionManager.trySetInactive(transactionId);
return immediateFuture(null);
}
|
@Test
public void testStartTransactionTooManyIsolationLevels()
{
Session session = sessionBuilder()
.setClientTransactionSupport()
.build();
TransactionManager transactionManager = createTestTransactionManager();
QueryStateMachine stateMachine = createQueryStateMachine("START TRANSACTION", session, true, transactionManager, executor, metadata);
assertFalse(stateMachine.getSession().getTransactionId().isPresent());
StartTransactionTask startTransactionTask = new StartTransactionTask();
try {
getFutureValue(startTransactionTask.execute(
new StartTransaction(ImmutableList.of(new Isolation(Isolation.Level.READ_COMMITTED), new Isolation(Isolation.Level.READ_COMMITTED))),
transactionManager,
metadata,
new AllowAllAccessControl(),
stateMachine,
emptyList()));
fail();
}
catch (SemanticException e) {
assertEquals(e.getCode(), INVALID_TRANSACTION_MODE);
}
assertTrue(transactionManager.getAllTransactionInfos().isEmpty());
assertFalse(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId());
assertFalse(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().isPresent());
}
|
public static String buildGlueExpression(Map<Column, Domain> partitionPredicates)
{
List<String> perColumnExpressions = new ArrayList<>();
int expressionLength = 0;
for (Map.Entry<Column, Domain> partitionPredicate : partitionPredicates.entrySet()) {
String columnName = partitionPredicate.getKey().getName();
if (JSQL_PARSER_RESERVED_KEYWORDS.contains(columnName.toUpperCase(ENGLISH))) {
// The column name is a reserved keyword in the grammar of the SQL parser used internally by Glue API
continue;
}
Domain domain = partitionPredicate.getValue();
if (domain != null && !domain.isAll()) {
Optional<String> columnExpression = buildGlueExpressionForSingleDomain(columnName, domain);
if (columnExpression.isPresent()) {
int newExpressionLength = expressionLength + columnExpression.get().length();
if (expressionLength > 0) {
newExpressionLength += CONJUNCT_SEPARATOR.length();
}
if (newExpressionLength > GLUE_EXPRESSION_CHAR_LIMIT) {
continue;
}
perColumnExpressions.add((columnExpression.get()));
expressionLength = newExpressionLength;
}
}
}
return Joiner.on(CONJUNCT_SEPARATOR).join(perColumnExpressions);
}
|
@Test
public void testIntegerConversion()
{
Map<Column, Domain> predicates = new PartitionFilterBuilder(HIVE_TYPE_TRANSLATOR)
.addIntegerValues("col1", Long.valueOf(Integer.MAX_VALUE))
.build();
String expression = buildGlueExpression(predicates);
assertEquals(expression, format("((col1 = %d))", Integer.MAX_VALUE));
}
|
@Override
public String toString() {
return "ReliableTopicConfig{"
+ "name='" + name + '\''
+ ", topicOverloadPolicy=" + topicOverloadPolicy
+ ", executor=" + executor
+ ", readBatchSize=" + readBatchSize
+ ", statisticsEnabled=" + statisticsEnabled
+ ", listenerConfigs=" + listenerConfigs
+ ", userCodeNamespace=" + userCodeNamespace
+ '}';
}
|
@Test
public void test_toString() {
ReliableTopicConfig config = new ReliableTopicConfig("foo");
String s = config.toString();
assertEquals("ReliableTopicConfig{name='foo', topicOverloadPolicy=BLOCK, executor=null,"
+ " readBatchSize=10, statisticsEnabled=true, listenerConfigs=[], userCodeNamespace=null}", s);
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final SDSApiClient client = session.getClient();
final DownloadTokenGenerateResponse token = new NodesApi(session.getClient()).generateDownloadUrl(Long.valueOf(nodeid.getVersionId(file)), StringUtils.EMPTY);
final HttpUriRequest request = new HttpGet(token.getDownloadUrl());
request.addHeader("X-Sds-Auth-Token", StringUtils.EMPTY);
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
final String header;
if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
request.addHeader(new BasicHeader(HttpHeaders.RANGE, header));
// Disable compression
request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity"));
}
final HttpResponse response = client.getClient().execute(request);
switch(response.getStatusLine().getStatusCode()) {
case HttpStatus.SC_OK:
case HttpStatus.SC_PARTIAL_CONTENT:
return new HttpMethodReleaseInputStream(response, status);
case HttpStatus.SC_NOT_FOUND:
nodeid.cache(file, null);
// Break through
default:
throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException(
response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file);
}
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Download {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test
public void testReadInterrupt() throws Exception {
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus writeStatus = new TransferStatus();
writeStatus.setLength(content.length);
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(room, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final SDSDirectS3MultipartWriteFeature writer = new SDSDirectS3MultipartWriteFeature(session, nodeid);
final HttpResponseOutputStream<Node> out = writer.write(test, writeStatus, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(writeStatus, writeStatus).transfer(new ByteArrayInputStream(content), out);
// Unknown length in status
final TransferStatus readStatus = new TransferStatus();
// Read a single byte
{
final InputStream in = new SDSReadFeature(session, nodeid).read(test, readStatus, new DisabledConnectionCallback());
assertNotNull(in.read());
in.close();
}
{
final InputStream in = new SDSReadFeature(session, nodeid).read(test, readStatus, new DisabledConnectionCallback());
assertNotNull(in);
in.close();
}
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static HintValueContext extractHint(final String sql) {
if (!containsSQLHint(sql)) {
return new HintValueContext();
}
HintValueContext result = new HintValueContext();
int hintKeyValueBeginIndex = getHintKeyValueBeginIndex(sql);
String hintKeyValueText = sql.substring(hintKeyValueBeginIndex, sql.indexOf(SQL_COMMENT_SUFFIX, hintKeyValueBeginIndex));
Map<String, String> hintKeyValues = getSQLHintKeyValues(hintKeyValueText);
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY)) {
result.setDataSourceName(getHintValue(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)) {
result.setWriteRouteOnly(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)) {
result.setSkipSQLRewrite(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY)) {
String property = getHintValue(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY);
result.getDisableAuditNames().addAll(getSplitterSQLHintValue(property));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)) {
result.setShadow(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)));
}
for (Entry<String, String> entry : hintKeyValues.entrySet()) {
Object value = convert(entry.getValue());
Comparable<?> comparable = value instanceof Comparable ? (Comparable<?>) value : Objects.toString(value);
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_DATABASE_VALUE_KEY)) {
result.getShardingDatabaseValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_TABLE_VALUE_KEY)) {
result.getShardingTableValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
}
return result;
}
|
@Test
void assertSQLHintWriteRouteOnlyWithCommentString() {
HintValueContext actual = SQLHintUtils.extractHint("/* SHARDINGSPHERE_HINT: WRITE_ROUTE_ONLY=true */");
assertTrue(actual.isWriteRouteOnly());
}
|
@Override
public String getSinkTableName(Table table) {
String tableName = table.getName();
Map<String, String> sink = config.getSink();
// Add table name mapping logic
String mappingRoute = sink.get(FlinkCDCConfig.TABLE_MAPPING_ROUTES);
if (mappingRoute != null) {
Map<String, String> mappingRules = parseMappingRoute(mappingRoute);
if (mappingRules.containsKey(tableName)) {
tableName = mappingRules.get(tableName);
}
}
tableName = sink.getOrDefault(FlinkCDCConfig.TABLE_PREFIX, "")
+ tableName
+ sink.getOrDefault(FlinkCDCConfig.TABLE_SUFFIX, "");
// table.lower and table.upper can not be true at the same time
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))
&& Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) {
throw new IllegalArgumentException("table.lower and table.upper can not be true at the same time");
}
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) {
tableName = tableName.toUpperCase();
}
if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))) {
tableName = tableName.toLowerCase();
}
// Implement regular expressions to replace table names through
// sink.table.replace.pattern and table.replace.with
String replacePattern = sink.get(FlinkCDCConfig.TABLE_REPLACE_PATTERN);
String replaceWith = sink.get(FlinkCDCConfig.TABLE_REPLACE_WITH);
if (replacePattern != null && replaceWith != null) {
Pattern pattern = Pattern.compile(replacePattern);
Matcher matcher = pattern.matcher(tableName);
tableName = matcher.replaceAll(replaceWith);
}
// add schema
if (Boolean.parseBoolean(sink.get("table.prefix.schema"))) {
tableName = table.getSchema() + "_" + tableName;
}
return tableName;
}
|
@Test
public void testGetSinkTableNameWithConversionUpperAndLowerCase() {
Map<String, String> sinkConfig = new HashMap<>();
sinkConfig.put("table.prefix", "");
sinkConfig.put("table.suffix", "");
sinkConfig.put("table.lower", "true");
sinkConfig.put("table.upper", "true");
when(config.getSink()).thenReturn(sinkConfig);
Table table = new Table("TestTable", "TestSchema", null);
Assert.assertThrows(IllegalArgumentException.class, () -> sinkBuilder.getSinkTableName(table));
}
|
public boolean isPaneDeprecated(final Pane<T> pane) {
return isPaneDeprecated(System.currentTimeMillis(), pane);
}
|
@Test
void testIsPaneDeprecated() {
Pane<LongAdder> currentPane = window.currentPane();
currentPane.setStartInMs(1000000L);
assertTrue(window.isPaneDeprecated(currentPane));
}
|
public static String getJwt(final String authorizationHeader) {
return authorizationHeader.replace(TOKEN_PREFIX, "");
}
|
@Test
void testGetJwt_WithInvalidTokenFormat() {
// Given
String authorizationHeader = "sampleAccessToken";
// When
String jwt = Token.getJwt(authorizationHeader);
// Then
assertEquals("sampleAccessToken", jwt);
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testShhUninstallFilter() throws Exception {
web3j.shhUninstallFilter(Numeric.toBigInt("0x7")).send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"shh_uninstallFilter\","
+ "\"params\":[\"0x7\"],\"id\":1}");
}
|
@Override
public void post(SpanAdapter span, Exchange exchange, Endpoint endpoint) {
super.post(span, exchange, endpoint);
Message message = exchange.getMessage();
if (message != null) {
Integer responseCode = message.getHeader(Exchange.HTTP_RESPONSE_CODE, Integer.class);
if (responseCode != null) {
span.setTag(TagConstants.HTTP_STATUS, responseCode);
}
}
}
|
@Test
public void testPostResponseCode() {
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Mockito.when(exchange.getMessage()).thenReturn(message);
Mockito.when(message.getHeader(Exchange.HTTP_RESPONSE_CODE, Integer.class)).thenReturn(200);
SpanDecorator decorator = new AbstractHttpSpanDecorator() {
@Override
public String getComponent() {
return null;
}
@Override
public String getComponentClassName() {
return null;
}
};
MockSpanAdapter span = new MockSpanAdapter();
decorator.post(span, exchange, null);
assertEquals(200, span.tags().get(TagConstants.HTTP_STATUS));
}
|
String buildDefaultMessage(EventNotificationContext ctx) {
String title = ctx.eventDefinition().map(EventDefinitionDto::title).orElse("Unnamed");
// Build Message title
return String.format(Locale.ROOT, "**Alert %s triggered:**\n", title);
}
|
@Test
public void buildDefaultMessage() {
String message = teamsEventNotification.buildDefaultMessage(eventNotificationContext);
assertThat(message).isNotEmpty();
}
|
@SuppressWarnings({"unchecked", "UnstableApiUsage"})
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement) {
if (!(statement.getStatement() instanceof DropStatement)) {
return statement;
}
final DropStatement dropStatement = (DropStatement) statement.getStatement();
if (!dropStatement.isDeleteTopic()) {
return statement;
}
final SourceName sourceName = dropStatement.getName();
final DataSource source = metastore.getSource(sourceName);
if (source != null) {
if (source.isSource()) {
throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text());
}
checkTopicRefs(source);
deleteTopic(source);
final Closer closer = Closer.create();
closer.register(() -> deleteKeySubject(source));
closer.register(() -> deleteValueSubject(source));
try {
closer.close();
} catch (final KsqlException e) {
throw e;
} catch (final Exception e) {
throw new KsqlException(e);
}
} else if (!dropStatement.getIfExists()) {
throw new KsqlException("Could not find source to delete topic for: " + statement);
}
final T withoutDelete = (T) dropStatement.withoutDeleteClause();
final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";";
return statement.withStatement(withoutDeleteText, withoutDelete);
}
|
@Test
public void shouldNotThrowIfNoOtherSourcesUsingTopic() {
// Given:
final ConfiguredStatement<DropStream> dropStatement = givenStatement(
"DROP SOMETHING DELETE TOPIC;",
new DropStream(SOURCE_NAME,
true,
true)
);
final DataSource other1 = givenSource(SourceName.of("OTHER"), "other");
final Map<SourceName, DataSource> sources = new HashMap<>();
sources.put(SOURCE_NAME, source);
sources.put(SourceName.of("OTHER"), other1);
when(metaStore.getAllDataSources()).thenReturn(sources);
// When:
deleteInjector.inject(dropStatement);
}
|
@Override
public Map<String, Object> toElasticSearchObject(ObjectMapper objectMapper, @Nonnull final Meter invalidTimestampMeter) {
final Map<String, Object> obj = Maps.newHashMapWithExpectedSize(REQUIRED_FIELDS.size() + fields.size());
for (Map.Entry<String, Object> entry : fields.entrySet()) {
final String key = entry.getKey();
if (key.equals(FIELD_ID)) {
continue;
}
final Object value = entry.getValue();
// Elasticsearch does not allow "." characters in keys since version 2.0.
// See: https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking_20_mapping_changes.html#_field_names_may_not_contain_dots
if (key.contains(".")) {
final String newKey = key.replace('.', KEY_REPLACEMENT_CHAR);
// If the message already contains the transformed key, we skip the field and emit a warning.
// This is still not optimal but better than implementing expensive logic with multiple replacement
// character options. Conflicts should be rare...
if (!obj.containsKey(newKey)) {
obj.put(newKey, value);
} else {
LOG.warn("Keys must not contain a \".\" character! Ignoring field \"{}\"=\"{}\" in message [{}] - Unable to replace \".\" with a \"{}\" because of key conflict: \"{}\"=\"{}\"",
key, value, getId(), KEY_REPLACEMENT_CHAR, newKey, obj.get(newKey));
LOG.debug("Full message with \".\" in message key: {}", this);
}
} else {
if (obj.containsKey(key)) {
final String newKey = key.replace(KEY_REPLACEMENT_CHAR, '.');
// Deliberate warning duplicates because the key with the "." might be transformed before reaching
// the duplicate original key with a "_". Otherwise we would silently overwrite the transformed key.
LOG.warn("Keys must not contain a \".\" character! Ignoring field \"{}\"=\"{}\" in message [{}] - Unable to replace \".\" with a \"{}\" because of key conflict: \"{}\"=\"{}\"",
newKey, fields.get(newKey), getId(), KEY_REPLACEMENT_CHAR, key, value);
LOG.debug("Full message with \".\" in message key: {}", this);
}
obj.put(key, value);
}
}
obj.put(FIELD_MESSAGE, getMessage());
obj.put(FIELD_SOURCE, getSource());
obj.put(FIELD_STREAMS, getStreamIds());
obj.put(FIELD_GL2_ACCOUNTED_MESSAGE_SIZE, getSize());
final Object timestampValue = getField(FIELD_TIMESTAMP);
DateTime dateTime = timestampValue == null ? fallbackForNullTimestamp() : convertToDateTime(timestampValue);
obj.put(FIELD_TIMESTAMP, buildElasticSearchTimeFormat(dateTime.withZone(UTC)));
if (processingErrors != null && !processingErrors.isEmpty()) {
if (processingErrors.stream().anyMatch(processingError -> processingError.getCause().equals(ProcessingFailureCause.InvalidTimestampException))) {
invalidTimestampMeter.mark();
}
obj.put(FIELD_GL2_PROCESSING_ERROR,
processingErrors.stream()
.map(pe -> pe.getMessage() + " - " + pe.getDetails())
.collect(Collectors.joining(", ")));
}
return obj;
}
|
@Test
public void testToElasticSearchObject() throws Exception {
message.addField("field1", "wat");
message.addField("field2", "that");
message.addField(Message.FIELD_STREAMS, Collections.singletonList("test-stream"));
final Map<String, Object> object = message.toElasticSearchObject(objectMapper, invalidTimestampMeter);
assertEquals("foo", object.get("message"));
assertEquals("bar", object.get("source"));
assertEquals("wat", object.get("field1"));
assertEquals("that", object.get("field2"));
assertEquals(Tools.buildElasticSearchTimeFormat((DateTime) message.getField("timestamp")), object.get("timestamp"));
@SuppressWarnings("unchecked")
final Collection<String> streams = (Collection<String>) object.get("streams");
assertThat(streams).containsOnly("test-stream");
}
|
static FeatureResolver create(
ConfigurationParameters parameters, CucumberEngineDescriptor engineDescriptor,
Predicate<String> packageFilter
) {
return new FeatureResolver(parameters, engineDescriptor, packageFilter);
}
|
@Test
void scenario() {
TestDescriptor scenario = getScenario();
assertEquals("A scenario", scenario.getDisplayName());
assertEquals(
asSet(create("FeatureTag"), create("ScenarioTag"), create("ResourceA"), create("ResourceAReadOnly")),
scenario.getTags());
assertEquals(of(from(featurePath, from(5, 3))), scenario.getSource());
assertEquals(TEST, scenario.getType());
assertEquals(
id.append("feature", featureSegmentValue)
.append("scenario", "5"),
scenario.getUniqueId());
PickleDescriptor pickleDescriptor = (PickleDescriptor) scenario;
assertEquals(Optional.of("io.cucumber.junit.platform.engine"), pickleDescriptor.getPackage());
}
|
public static TupleDomain<ColumnHandle> computeEnforced(TupleDomain<ColumnHandle> predicate, TupleDomain<ColumnHandle> unenforced)
{
if (predicate.isNone()) {
// If the engine requests that the connector provides a layout with a domain of "none". The connector can have two possible reactions, either:
// 1. The connector can provide an empty table layout.
// * There would be no unenforced predicate, i.e., unenforced predicate is TupleDomain.all().
// * The predicate was successfully enforced. Enforced predicate would be same as predicate: TupleDomain.none().
// 2. The connector can't/won't.
// * The connector would tell the engine to put a filter on top of the scan, i.e., unenforced predicate is TupleDomain.none().
// * The connector didn't successfully enforce anything. Therefore, enforced predicate would be TupleDomain.all().
if (unenforced.isNone()) {
return TupleDomain.all();
}
if (unenforced.isAll()) {
return TupleDomain.none();
}
throw new IllegalArgumentException();
}
// The engine requested the connector provides a layout with a non-none TupleDomain.
// A TupleDomain is effectively a list of column-Domain pairs.
// The connector is expected enforce the respective domain entirely on none, some, or all of the columns.
// 1. When the connector could enforce none of the domains, the unenforced would be equal to predicate;
// 2. When the connector could enforce some of the domains, the unenforced would contain a subset of the column-Domain pairs;
// 3. When the connector could enforce all of the domains, the unenforced would be TupleDomain.all().
// In all 3 cases shown above, the unenforced is not TupleDomain.none().
checkArgument(!unenforced.isNone());
Map<ColumnHandle, Domain> predicateDomains = predicate.getDomains().get();
Map<ColumnHandle, Domain> unenforcedDomains = unenforced.getDomains().get();
ImmutableMap.Builder<ColumnHandle, Domain> enforcedDomainsBuilder = ImmutableMap.builder();
for (Map.Entry<ColumnHandle, Domain> entry : predicateDomains.entrySet()) {
ColumnHandle predicateColumnHandle = entry.getKey();
if (unenforcedDomains.containsKey(predicateColumnHandle)) {
checkArgument(
entry.getValue().equals(unenforcedDomains.get(predicateColumnHandle)),
"Enforced tuple domain cannot be determined. The connector is expected to enforce the respective domain entirely on none, some, or all of the column.");
}
else {
enforcedDomainsBuilder.put(predicateColumnHandle, entry.getValue());
}
}
Map<ColumnHandle, Domain> enforcedDomains = enforcedDomainsBuilder.build();
checkArgument(
enforcedDomains.size() + unenforcedDomains.size() == predicateDomains.size(),
"Enforced tuple domain cannot be determined. Connector returned an unenforced TupleDomain that contains columns not in predicate.");
return TupleDomain.withColumnDomains(enforcedDomains);
}
|
@Test
public void testComputeEnforced()
{
assertComputeEnforced(TupleDomain.all(), TupleDomain.all(), TupleDomain.all());
assertComputeEnforcedFails(TupleDomain.all(), TupleDomain.none());
assertComputeEnforced(TupleDomain.none(), TupleDomain.all(), TupleDomain.none());
assertComputeEnforced(TupleDomain.none(), TupleDomain.none(), TupleDomain.all());
assertComputeEnforced(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))),
TupleDomain.all(),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))));
assertComputeEnforced(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))),
TupleDomain.all());
assertComputeEnforcedFails(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))),
TupleDomain.none());
assertComputeEnforcedFails(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 9999L))));
assertComputeEnforcedFails(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c9999"), Domain.singleValue(BIGINT, 1L))));
assertComputeEnforced(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L),
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))),
TupleDomain.all(),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L),
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))));
assertComputeEnforced(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L),
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))));
assertComputeEnforced(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L),
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L))));
assertComputeEnforced(
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L),
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))),
TupleDomain.withColumnDomains(ImmutableMap.of(
new TestingColumnHandle("c1"), Domain.singleValue(BIGINT, 1L),
new TestingColumnHandle("c2"), Domain.singleValue(BIGINT, 2L))),
TupleDomain.all());
}
|
public static HintValueContext extractHint(final String sql) {
if (!containsSQLHint(sql)) {
return new HintValueContext();
}
HintValueContext result = new HintValueContext();
int hintKeyValueBeginIndex = getHintKeyValueBeginIndex(sql);
String hintKeyValueText = sql.substring(hintKeyValueBeginIndex, sql.indexOf(SQL_COMMENT_SUFFIX, hintKeyValueBeginIndex));
Map<String, String> hintKeyValues = getSQLHintKeyValues(hintKeyValueText);
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY)) {
result.setDataSourceName(getHintValue(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)) {
result.setWriteRouteOnly(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)) {
result.setSkipSQLRewrite(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY)) {
String property = getHintValue(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY);
result.getDisableAuditNames().addAll(getSplitterSQLHintValue(property));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)) {
result.setShadow(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)));
}
for (Entry<String, String> entry : hintKeyValues.entrySet()) {
Object value = convert(entry.getValue());
Comparable<?> comparable = value instanceof Comparable ? (Comparable<?>) value : Objects.toString(value);
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_DATABASE_VALUE_KEY)) {
result.getShardingDatabaseValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_TABLE_VALUE_KEY)) {
result.getShardingTableValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
}
return result;
}
|
@Test
void assertSQLHintShardingDatabaseValue() {
HintValueContext actual = SQLHintUtils.extractHint("/* SHARDINGSPHERE_HINT: SHARDING_DATABASE_VALUE=10 */");
assertThat(actual.getHintShardingDatabaseValue("t_order"), is(Collections.singletonList(new BigInteger("10"))));
}
|
public HttpResponse get(Application application, String hostName, String serviceType, Path path, Query query) {
return get(application, hostName, serviceType, path, query, null);
}
|
@Test
public void testNormalGet() {
ArgumentCaptor<HttpFetcher.Params> actualParams = ArgumentCaptor.forClass(HttpFetcher.Params.class);
ArgumentCaptor<URI> actualUrl = ArgumentCaptor.forClass(URI.class);
HttpResponse response = new StaticResponse(200, "application/json", "body");
when(fetcher.get(actualParams.capture(), actualUrl.capture())).thenReturn(response);
HttpResponse actualResponse = proxy.get(applicationMock, hostname, CLUSTERCONTROLLER_CONTAINER.serviceName,
Path.parse("clustercontroller-status/v1/clusterName"),
Query.parse("foo=bar"));
assertEquals(1, actualParams.getAllValues().size());
assertEquals(29000, actualParams.getValue().readTimeoutMs);
assertEquals(1, actualUrl.getAllValues().size());
assertEquals(URI.create("http://" + hostname + ":" + port + "/clustercontroller-status/v1/clusterName?foo=bar"),
actualUrl.getValue());
// The HttpResponse returned by the fetcher IS the same object as the one returned by the proxy,
// when everything goes well.
assertSame(actualResponse, response);
}
|
@Override
public String getColumnClassName(final int column) {
Preconditions.checkArgument(1 == column);
return Number.class.getName();
}
|
@Test
void assertGetColumnClassName() throws SQLException {
assertThat(actualMetaData.getColumnClassName(1), is("java.lang.Number"));
}
|
static long averageBytesPerRecord(HoodieTimeline commitTimeline, HoodieWriteConfig hoodieWriteConfig) {
long avgSize = hoodieWriteConfig.getCopyOnWriteRecordSizeEstimate();
long fileSizeThreshold = (long) (hoodieWriteConfig.getRecordSizeEstimationThreshold() * hoodieWriteConfig.getParquetSmallFileLimit());
if (!commitTimeline.empty()) {
// Go over the reverse ordered commits to get a more recent estimate of average record size.
Iterator<HoodieInstant> instants = commitTimeline.getReverseOrderedInstants().iterator();
while (instants.hasNext()) {
HoodieInstant instant = instants.next();
try {
HoodieCommitMetadata commitMetadata = HoodieCommitMetadata
.fromBytes(commitTimeline.getInstantDetails(instant).get(), HoodieCommitMetadata.class);
if (instant.getAction().equals(COMMIT_ACTION) || instant.getAction().equals(REPLACE_COMMIT_ACTION)) {
long totalBytesWritten = commitMetadata.fetchTotalBytesWritten();
long totalRecordsWritten = commitMetadata.fetchTotalRecordsWritten();
if (totalBytesWritten > fileSizeThreshold && totalRecordsWritten > 0) {
avgSize = (long) Math.ceil((1.0 * totalBytesWritten) / totalRecordsWritten);
break;
}
} else if (instant.getAction().equals(DELTA_COMMIT_ACTION)) {
// lets consider only base files in case of delta commits
AtomicLong totalBytesWritten = new AtomicLong(0L);
AtomicLong totalRecordsWritten = new AtomicLong(0L);
commitMetadata.getWriteStats().stream()
.filter(hoodieWriteStat -> FSUtils.isBaseFile(new StoragePath(hoodieWriteStat.getPath())))
.forEach(hoodieWriteStat -> {
totalBytesWritten.addAndGet(hoodieWriteStat.getTotalWriteBytes());
totalRecordsWritten.addAndGet(hoodieWriteStat.getNumWrites());
});
if (totalBytesWritten.get() > fileSizeThreshold && totalRecordsWritten.get() > 0) {
avgSize = (long) Math.ceil((1.0 * totalBytesWritten.get()) / totalRecordsWritten.get());
break;
}
}
} catch (Throwable t) {
// make this fail safe.
LOG.error("Error trying to compute average bytes/record ", t);
}
}
}
return avgSize;
}
|
@Test
public void testErrorHandling() {
int recordSize = 10000;
HoodieWriteConfig writeConfig = HoodieWriteConfig.newBuilder()
.withProps(Collections.singletonMap(COPY_ON_WRITE_RECORD_SIZE_ESTIMATE.key(), String.valueOf(recordSize)))
.build(false);
HoodieDefaultTimeline commitsTimeline = new HoodieDefaultTimeline();
List<HoodieInstant> instants = Collections.singletonList(
new HoodieInstant(HoodieInstant.State.COMPLETED, HoodieTimeline.COMMIT_ACTION, "1"));
when(mockTimeline.getInstants()).thenReturn(instants);
when(mockTimeline.getReverseOrderedInstants()).then(i -> instants.stream());
// Simulate a case where the instant details are absent
commitsTimeline.setInstants(new ArrayList<>());
assertEquals(recordSize, AverageRecordSizeUtils.averageBytesPerRecord(mockTimeline, writeConfig));
}
|
@CheckForNull
public String getDecoratedSourceAsHtml(@Nullable String sourceLine, @Nullable String highlighting, @Nullable String symbols) {
if (sourceLine == null) {
return null;
}
DecorationDataHolder decorationDataHolder = new DecorationDataHolder();
if (StringUtils.isNotBlank(highlighting)) {
decorationDataHolder.loadSyntaxHighlightingData(highlighting);
}
if (StringUtils.isNotBlank(symbols)) {
decorationDataHolder.loadLineSymbolReferences(symbols);
}
HtmlTextDecorator textDecorator = new HtmlTextDecorator();
List<String> decoratedSource = textDecorator.decorateTextWithHtml(sourceLine, decorationDataHolder, 1, 1);
if (decoratedSource == null) {
return null;
} else {
if (decoratedSource.isEmpty()) {
return "";
} else {
return decoratedSource.get(0);
}
}
}
|
@Test
public void should_ignore_empty_source() {
assertThat(sourceDecorator.getDecoratedSourceAsHtml("", "0,1,cppd", "")).isEmpty();
}
|
public void markCoordinatorUnknown(final String cause, final long currentTimeMs) {
if (this.coordinator != null) {
log.info("Group coordinator {} is unavailable or invalid due to cause: {}. "
+ "Rediscovery will be attempted.", this.coordinator, cause);
this.coordinator = null;
timeMarkedUnknownMs = currentTimeMs;
totalDisconnectedMin = 0;
} else {
long durationOfOngoingDisconnectMs = Math.max(0, currentTimeMs - timeMarkedUnknownMs);
long currDisconnectMin = durationOfOngoingDisconnectMs / COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS;
if (currDisconnectMin > this.totalDisconnectedMin) {
log.debug("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnectMs);
totalDisconnectedMin = currDisconnectMin;
}
}
}
|
@Test
public void testMarkCoordinatorUnknown() {
CoordinatorRequestManager coordinatorManager = setupCoordinatorManager(GROUP_ID);
expectFindCoordinatorRequest(coordinatorManager, Errors.NONE);
assertTrue(coordinatorManager.coordinator().isPresent());
// It may take time for metadata to converge between after a coordinator has
// been demoted. This can cause a tight loop in which FindCoordinator continues to
// return node X while that node continues to reply with NOT_COORDINATOR. Hence we
// still want to ensure a backoff after successfully finding the coordinator.
coordinatorManager.markCoordinatorUnknown("coordinator changed", time.milliseconds());
assertEquals(Collections.emptyList(), coordinatorManager.poll(time.milliseconds()).unsentRequests);
time.sleep(RETRY_BACKOFF_MS - 1);
assertEquals(Collections.emptyList(), coordinatorManager.poll(time.milliseconds()).unsentRequests);
time.sleep(RETRY_BACKOFF_MS);
expectFindCoordinatorRequest(coordinatorManager, Errors.NONE);
assertTrue(coordinatorManager.coordinator().isPresent());
}
|
@Override
public MenuButton deserializeResponse(String answer) throws TelegramApiRequestException {
return deserializeResponse(answer, MenuButton.class);
}
|
@Test
public void testGetChatMenuButtonErrorResponse() {
String responseText = "{\"ok\":false,\"error_code\": 404,\"description\": \"Error message\"}";
GetChatMenuButton getChatMenuButton = GetChatMenuButton
.builder()
.build();
try {
getChatMenuButton.deserializeResponse(responseText);
fail();
} catch (TelegramApiRequestException e) {
assertEquals(404, e.getErrorCode());
assertEquals("Error message", e.getApiResponse());
}
}
|
@Nonnull
public static String removeBracketsFromIpv6Address(@Nonnull final String address)
{
final String result;
if (address.startsWith("[") && address.endsWith("]")) {
result = address.substring(1, address.length()-1);
try {
Ipv6.parse(result);
// The remainder is a valid IPv6 address. Return the original value.
return result;
} catch (IllegalArgumentException e) {
// The remainder isn't a valid IPv6 address. Return the original value.
return address;
}
}
// Not a bracket-enclosed string. Return the original input.
return address;
}
|
@Test
public void stripBracketsIpv4() throws Exception {
// Setup test fixture.
final String input = "[192.168.0.1]";
// Execute system under test.
final String result = AuthCheckFilter.removeBracketsFromIpv6Address(input);
// Verify result.
assertEquals(input, result); // Should only strip brackets from IPv6, not IPv4.
}
|
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
}
|
@Test
public void testGreaterThan() {
UnboundPredicate<Integer> expected =
org.apache.iceberg.expressions.Expressions.greaterThan("field1", 1);
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(resolve(Expressions.$("field1").isGreater(Expressions.lit(1))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(resolve(Expressions.lit(1).isLess(Expressions.$("field1"))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
}
|
@Override
public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding,
boolean endStream, ChannelPromise promise) {
return writeHeaders0(ctx, streamId, headers, false, 0, (short) 0, false, padding, endStream, promise);
}
|
@Test
public void headersWriteShouldHalfCloseAfterOnErrorForImplicitlyCreatedStream() throws Exception {
final ChannelPromise promise = newPromise();
final Throwable ex = new RuntimeException();
// Fake an encoding error, like HPACK's HeaderListSizeException
when(writer.writeHeaders(eq(ctx), eq(STREAM_ID), eq(EmptyHttp2Headers.INSTANCE), eq(0), eq(true), eq(promise)))
.thenAnswer(new Answer<ChannelFuture>() {
@Override
public ChannelFuture answer(InvocationOnMock invocation) {
promise.setFailure(ex);
return promise;
}
});
writeAllFlowControlledFrames();
encoder.writeHeaders(ctx, STREAM_ID, EmptyHttp2Headers.INSTANCE, 0, true, promise);
assertTrue(promise.isDone());
assertFalse(promise.isSuccess());
assertFalse(stream(STREAM_ID).isHeadersSent());
InOrder inOrder = inOrder(lifecycleManager);
inOrder.verify(lifecycleManager).onError(eq(ctx), eq(true), eq(ex));
inOrder.verify(lifecycleManager).closeStreamLocal(eq(stream(STREAM_ID)), eq(promise));
}
|
@Override
public boolean isIndexed(QueryContext queryContext) {
Index index = queryContext.matchIndex(attributeName, QueryContext.IndexMatchHint.PREFER_ORDERED);
return index != null && index.isOrdered() && expressionCanBeUsedAsIndexPrefix();
}
|
@Test
public void likePredicateIsNotIndexed_whenHashIndexIsUsed() {
QueryContext queryContext = mock(QueryContext.class);
when(queryContext.matchIndex("this", QueryContext.IndexMatchHint.PREFER_ORDERED)).thenReturn(createIndex(IndexType.HASH));
assertFalse(new LikePredicate("this", "string%").isIndexed(queryContext));
}
|
@Override
public void close(String nodeId) {
log.info("Client requested connection close from node {}", nodeId);
selector.close(nodeId);
long now = time.milliseconds();
cancelInFlightRequests(nodeId, now, null, false);
connectionStates.remove(nodeId);
}
|
@Test
public void testClose() {
client.ready(node, time.milliseconds());
awaitReady(client, node);
client.poll(1, time.milliseconds());
assertTrue(client.isReady(node, time.milliseconds()), "The client should be ready");
ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData()
.setTopicData(new ProduceRequestData.TopicProduceDataCollection())
.setAcks((short) 1)
.setTimeoutMs(1000));
ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true);
client.send(request, time.milliseconds());
assertEquals(1, client.inFlightRequestCount(node.idString()),
"There should be 1 in-flight request after send");
assertTrue(client.hasInFlightRequests(node.idString()));
assertTrue(client.hasInFlightRequests());
client.close(node.idString());
assertEquals(0, client.inFlightRequestCount(node.idString()), "There should be no in-flight request after close");
assertFalse(client.hasInFlightRequests(node.idString()));
assertFalse(client.hasInFlightRequests());
assertFalse(client.isReady(node, 0), "Connection should not be ready after close");
}
|
static ConfigServer toConfigServer(String configserverString) {
try {
String[] hostPortTuple = configserverString.split(":");
if (configserverString.contains(":")) {
return new ConfigServer(hostPortTuple[0], Optional.of(Integer.parseInt(hostPortTuple[1])));
} else {
return new ConfigServer(configserverString, Optional.empty());
}
} catch (Exception e) {
throw new IllegalArgumentException("Invalid config server " + configserverString, e);
}
}
|
@Test(expected = IllegalArgumentException.class)
public void non_numeric_port_gives_exception() {
toConfigServer("myhost:non-numeric");
}
|
@Override
public Integer call() throws Exception {
super.call();
if (fileValue != null) {
value = Files.readString(Path.of(fileValue.toString().trim()));
}
if (isLiteral(value) || type == Type.STRING) {
value = wrapAsJsonLiteral(value);
}
Duration ttl = expiration == null ? null : Duration.parse(expiration);
MutableHttpRequest<String> request = HttpRequest
.PUT(apiUri("/namespaces/") + namespace + "/kv/" + key, value)
.contentType(MediaType.APPLICATION_JSON_TYPE);
if (ttl != null) {
request.header("ttl", ttl.toString());
}
try (DefaultHttpClient client = client()) {
client.toBlocking().exchange(this.requestOptions(request));
}
return 0;
}
|
@Test
void object() throws IOException, ResourceExpiredException {
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class);
embeddedServer.start();
String[] args = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"io.kestra.cli",
"object",
"{\"some\":\"json\"}",
};
PicocliRunner.call(KvUpdateCommand.class, ctx, args);
KVStoreService kvStoreService = ctx.getBean(KVStoreService.class);
KVStore kvStore = kvStoreService.get(null, "io.kestra.cli", null);
assertThat(kvStore.getValue("object").get(), is(new KVValue(Map.of("some", "json"))));
assertThat(((InternalKVStore)kvStore).getRawValue("object").get(), is("{some:\"json\"}"));
}
}
|
public static CoordinatorRecord newConsumerGroupTargetAssignmentEpochRecord(
String groupId,
int assignmentEpoch
) {
return new CoordinatorRecord(
new ApiMessageAndVersion(
new ConsumerGroupTargetAssignmentMetadataKey()
.setGroupId(groupId),
(short) 6
),
new ApiMessageAndVersion(
new ConsumerGroupTargetAssignmentMetadataValue()
.setAssignmentEpoch(assignmentEpoch),
(short) 0
)
);
}
|
@Test
public void testNewConsumerGroupTargetAssignmentEpochRecord() {
CoordinatorRecord expectedRecord = new CoordinatorRecord(
new ApiMessageAndVersion(
new ConsumerGroupTargetAssignmentMetadataKey()
.setGroupId("group-id"),
(short) 6),
new ApiMessageAndVersion(
new ConsumerGroupTargetAssignmentMetadataValue()
.setAssignmentEpoch(10),
(short) 0));
assertEquals(expectedRecord, newConsumerGroupTargetAssignmentEpochRecord(
"group-id",
10
));
}
|
public void resetProducer() {
if (processingMode != EXACTLY_ONCE_V2) {
throw new IllegalStateException("Expected eos-v2 to be enabled, but the processing mode was " + processingMode);
}
oldProducerTotalBlockedTime += totalBlockedTime(producer);
final long start = time.nanoseconds();
close();
final long closeTime = time.nanoseconds() - start;
oldProducerTotalBlockedTime += closeTime;
producer = clientSupplier.getProducer(eosV2ProducerConfigs);
}
|
@Test
public void shouldFailOnResetProducerForAtLeastOnce() {
final IllegalStateException thrown = assertThrows(
IllegalStateException.class,
() -> nonEosStreamsProducer.resetProducer()
);
assertThat(thrown.getMessage(), is("Expected eos-v2 to be enabled, but the processing mode was AT_LEAST_ONCE"));
}
|
@Override
public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext)
{
doEvaluateDisruptContext(request, requestContext);
return _client.sendRequest(request, requestContext);
}
|
@Test
public void testSendRequest2()
{
when(_controller.getDisruptContext(any(String.class), any(ResourceMethod.class))).thenReturn(_disrupt);
_client.sendRequest(_request, _context);
verify(_underlying, times(1)).sendRequest(eq(_request), eq(_context));
verify(_context, times(1)).putLocalAttr(eq(DISRUPT_CONTEXT_KEY), eq(_disrupt));
verify(_context, times(1)).putLocalAttr(eq(DISRUPT_SOURCE_KEY), any(String.class));
}
|
@Override
public ValidationResult validate(RuleBuilderStep step) {
final RuleFragment ruleFragment = actions.get(step.function());
FunctionDescriptor<?> functionDescriptor = ruleFragment.descriptor();
String functionName = functionDescriptor.name();
if (functionName.equals(SetField.NAME)) {
return validateSetField(step.parameters());
}
return new ValidationResult(false, "");
}
|
@Test
void validateSetFieldFunction() {
HashMap<String, Object> parameters = new HashMap<>();
parameters.put(FIELD_PARAM, "valid_new_field");
RuleBuilderStep validStep = RuleBuilderStep.builder()
.parameters(parameters)
.function(SetField.NAME)
.build();
ValidationResult result = classUnderTest.validate(validStep);
assertThat(result.failed()).isFalse();
}
|
public Predicate convert(ScalarOperator operator) {
if (operator == null) {
return null;
}
return operator.accept(this, null);
}
|
@Test
public void testNullOp() {
ScalarOperator op = new IsNullPredicateOperator(false, F0);
Predicate result = CONVERTER.convert(op);
Assert.assertTrue(result instanceof LeafPredicate);
LeafPredicate leafPredicate = (LeafPredicate) result;
Assert.assertTrue(leafPredicate.function() instanceof IsNull);
}
|
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image)
throws IOException
{
if (isGrayImage(image))
{
return createFromGrayImage(image, document);
}
// We try to encode the image with predictor
if (USE_PREDICTOR_ENCODER)
{
PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode();
if (pdImageXObject != null)
{
if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE &&
pdImageXObject.getBitsPerComponent() < 16 &&
image.getWidth() * image.getHeight() <= 50 * 50)
{
// also create classic compressed image, compare sizes
PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document);
if (pdImageXObjectClassic.getCOSObject().getLength() <
pdImageXObject.getCOSObject().getLength())
{
pdImageXObject.getCOSObject().close();
return pdImageXObjectClassic;
}
else
{
pdImageXObjectClassic.getCOSObject().close();
}
}
return pdImageXObject;
}
}
// Fallback: We export the image as 8-bit sRGB and might lose color information
return createFromRGBImage(image, document);
}
|
@Test
void testCreateLosslessFromGovdocs032163() throws IOException
{
PDDocument document = new PDDocument();
BufferedImage image = ImageIO.read(new File("target/imgs", "PDFBOX-4184-032163.jpg"));
PDImageXObject ximage = LosslessFactory.createFromImage(document, image);
validate(ximage, 8, image.getWidth(), image.getHeight(), "png", PDDeviceRGB.INSTANCE.getName());
checkIdent(image, ximage.getImage());
doWritePDF(document, ximage, TESTRESULTSDIR, "PDFBOX-4184-032163.pdf");
}
|
static boolean isInvalidSnapshot(final Entry entry)
{
return !entry.isValid && ENTRY_TYPE_SNAPSHOT == entry.type;
}
|
@Test
void shouldDetermineIfSnapshotIsInvalid()
{
final RecordingLog.Entry validSnapshot = new RecordingLog.Entry(
42, 5, 1024, 701, 1_000_000_000_000L, 16, ENTRY_TYPE_SNAPSHOT, null, true, 2);
final RecordingLog.Entry invalidSnapshot = new RecordingLog.Entry(
42, 5, 1024, 701, 1_000_000_000_000L, 16, ENTRY_TYPE_SNAPSHOT, null, false, 2);
final RecordingLog.Entry term = new RecordingLog.Entry(
42, 5, 1024, 701, 1_000_000_000_000L, 16, ENTRY_TYPE_TERM, null, true, 2);
assertFalse(RecordingLog.isInvalidSnapshot(validSnapshot));
assertTrue(RecordingLog.isInvalidSnapshot(invalidSnapshot));
assertFalse(RecordingLog.isInvalidSnapshot(term));
}
|
public static ScenarioRunnerProvider getSpecificRunnerProvider(Type type) {
if (Type.RULE.equals(type)) {
return RuleScenarioRunner::new;
} else if (Type.DMN.equals(type)) {
return DMNScenarioRunner::new;
} else {
throw new IllegalArgumentException("Impossible to run simulation of type " + type);
}
}
|
@Test
public void getSpecificRunnerProvider() {
// all existing types should have a dedicated runner
assertThat(ScenarioSimulationModel.Type.values()).extracting(x -> AbstractScenarioRunner.getSpecificRunnerProvider(x)).isNotNull();
}
|
public Ap01 createAp01(String bsn) {
Ap01 ap01 = new Ap01();
Container container = new Container();
container.setNummer(CategorieUtil.CATEGORIE_IDENTIFICATIENUMMERS);
Element bsnElement = new Element();
bsnElement.setNummer(CategorieUtil.ELEMENT_BURGERSERVICENUMMER);
bsnElement.setValue(bsn);
container.getElement().add(bsnElement);
ap01.getCategorie().add(container);
ap01.setHerhaling(0);
ap01.setRandomKey("SSSSSSSS");
return ap01;
}
|
@Test
public void testCreateAp01Test(){
String testBsn = "SSSSSSSSS";
Ap01 result = classUnderTest.createAp01(testBsn);
assertEquals(testBsn, CategorieUtil.findBsn(result.getCategorie()));
assertEquals(0, result.getHerhaling());
assertEquals("SSSSSSSS", result.getRandomKey());
}
|
public EventDefinitionDto create(EventDefinitionDto unsavedEventDefinition, Optional<User> user) {
final EventDefinitionDto eventDefinition = createEventDefinition(unsavedEventDefinition, user);
try {
createJobDefinitionAndTriggerIfScheduledType(eventDefinition);
} catch (Exception e) {
// Cleanup if anything goes wrong
LOG.error("Removing event definition <{}/{}> because of an error creating the job definition",
eventDefinition.id(), eventDefinition.title(), e);
eventDefinitionService.delete(eventDefinition.id());
throw e;
}
return eventDefinition;
}
|
@Test
public void create() {
final EventDefinitionDto newDto = EventDefinitionDto.builder()
.title("Test")
.description("A test event definition")
.config(TestEventProcessorConfig.builder()
.message("This is a test event processor")
.searchWithinMs(300000)
.executeEveryMs(60001)
.build())
.priority(3)
.alert(false)
.notificationSettings(EventNotificationSettings.withGracePeriod(60000))
.keySpec(ImmutableList.of("a", "b"))
.notifications(ImmutableList.of())
.build();
final EventDefinitionDto dto = handler.create(newDto, Optional.empty());
// Handler should create the event definition
assertThat(eventDefinitionService.get(dto.id())).isPresent();
final Optional<JobDefinitionDto> jobDefinition = jobDefinitionService.getByConfigField("event_definition_id", dto.id());
// Handler also should create the job definition for the event definition/processor
assertThat(jobDefinition).isPresent().get().satisfies(definition -> {
assertThat(definition.title()).isEqualTo("Test");
assertThat(definition.description()).isEqualTo("A test event definition");
assertThat(definition.config()).isInstanceOf(EventProcessorExecutionJob.Config.class);
final EventProcessorExecutionJob.Config config = (EventProcessorExecutionJob.Config) definition.config();
assertThat(config.processingWindowSize()).isEqualTo(300000);
assertThat(config.processingHopSize()).isEqualTo(60001);
});
// And the handler should also create a job trigger for the created job definition
final Optional<JobTriggerDto> jobTrigger = jobTriggerService.nextRunnableTrigger();
assertThat(jobTrigger).isPresent().get().satisfies(trigger -> {
assertThat(trigger.jobDefinitionId()).isEqualTo(jobDefinition.get().id());
assertThat(trigger.schedule()).isInstanceOf(IntervalJobSchedule.class);
final IntervalJobSchedule schedule = (IntervalJobSchedule) trigger.schedule();
assertThat(schedule.interval()).isEqualTo(60001);
assertThat(schedule.unit()).isEqualTo(TimeUnit.MILLISECONDS);
});
}
|
public final void setStrictness(Strictness strictness) {
Objects.requireNonNull(strictness);
this.strictness = strictness;
}
|
@Test
public void testSetStrictnessNull() {
JsonReader reader = new JsonReader(reader("{}"));
assertThrows(NullPointerException.class, () -> reader.setStrictness(null));
}
|
public static SegmentGenerationJobSpec getSegmentGenerationJobSpec(String jobSpecFilePath, String propertyFilePath,
Map<String, Object> context, Map<String, String> environmentValues) {
Properties properties = new Properties();
if (propertyFilePath != null) {
try {
properties.load(FileUtils.openInputStream(new File(propertyFilePath)));
} catch (IOException e) {
throw new RuntimeException(
String.format("Unable to read property file [%s] into properties.", propertyFilePath), e);
}
}
Map<String, Object> propertiesMap = (Map) properties;
if (environmentValues != null) {
for (String propertyName: propertiesMap.keySet()) {
if (environmentValues.get(propertyName) != null) {
propertiesMap.put(propertyName, environmentValues.get(propertyName));
}
}
}
if (context != null) {
propertiesMap.putAll(context);
}
String jobSpecTemplate;
try {
jobSpecTemplate = IOUtils.toString(new BufferedReader(new FileReader(jobSpecFilePath)));
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to read ingestion job spec file [%s].", jobSpecFilePath), e);
}
String jobSpecStr;
try {
jobSpecStr = GroovyTemplateUtils.renderTemplate(jobSpecTemplate, propertiesMap);
} catch (Exception e) {
throw new RuntimeException(String
.format("Unable to render templates on ingestion job spec template file - [%s] with propertiesMap - [%s].",
jobSpecFilePath, Arrays.toString(propertiesMap.entrySet().toArray())), e);
}
String jobSpecFormat = (String) propertiesMap.getOrDefault(JOB_SPEC_FORMAT, YAML);
if (jobSpecFormat.equals(JSON)) {
try {
return JsonUtils.stringToObject(jobSpecStr, SegmentGenerationJobSpec.class);
} catch (IOException e) {
throw new RuntimeException(String
.format("Unable to parse job spec - [%s] to JSON with propertiesMap - [%s]", jobSpecFilePath,
Arrays.toString(propertiesMap.entrySet().toArray())), e);
}
}
return new Yaml().loadAs(jobSpecStr, SegmentGenerationJobSpec.class);
}
|
@Test
public void testIngestionJobLauncherWithTemplateAndPropertyFileAndValueAndEnvironmentVariableOverride() {
Map<String, Object> context = GroovyTemplateUtils.getTemplateContext(Arrays.asList("year=2020"));
SegmentGenerationJobSpec spec = IngestionJobLauncher.getSegmentGenerationJobSpec(
GroovyTemplateUtils.class.getClassLoader().getResource("ingestion_job_spec_template.yaml").getFile(),
GroovyTemplateUtils.class.getClassLoader().getResource("job.config").getFile(), context,
_defaultEnvironmentValues);
Assert.assertEquals(spec.getInputDirURI(), "file:///path/to/input/2020/08/07");
Assert.assertEquals(spec.getOutputDirURI(), "file:///path/to/output/2020/08/07");
Assert.assertEquals(spec.getSegmentCreationJobParallelism(), 100);
}
|
public Flowable<Transaction> replayTransactionsFlowable(
DefaultBlockParameter startBlock, DefaultBlockParameter endBlock) {
return replayBlocksFlowable(startBlock, endBlock, true)
.flatMapIterable(JsonRpc2_0Rx::toTransactions);
}
|
@Test
public void testReplayTransactionsFlowable() throws Exception {
List<EthBlock> ethBlocks =
Arrays.asList(
createBlockWithTransactions(
0,
Arrays.asList(
createTransaction("0x1234"),
createTransaction("0x1235"),
createTransaction("0x1236"))),
createBlockWithTransactions(
1,
Arrays.asList(
createTransaction("0x2234"),
createTransaction("0x2235"),
createTransaction("0x2236"))),
createBlockWithTransactions(
2,
Arrays.asList(
createTransaction("0x3234"), createTransaction("0x3235"))));
OngoingStubbing<EthBlock> stubbing =
when(web3jService.send(any(Request.class), eq(EthBlock.class)));
for (EthBlock ethBlock : ethBlocks) {
stubbing = stubbing.thenReturn(ethBlock);
}
List<Transaction> expectedTransactions =
ethBlocks.stream()
.flatMap(it -> it.getBlock().getTransactions().stream())
.map(it -> (Transaction) it.get())
.collect(Collectors.toList());
Flowable<Transaction> flowable =
web3j.replayPastTransactionsFlowable(
new DefaultBlockParameterNumber(BigInteger.ZERO),
new DefaultBlockParameterNumber(BigInteger.valueOf(2)));
CountDownLatch transactionLatch = new CountDownLatch(expectedTransactions.size());
CountDownLatch completedLatch = new CountDownLatch(1);
List<Transaction> results = new ArrayList<>(expectedTransactions.size());
Disposable subscription =
flowable.subscribe(
result -> {
results.add(result);
transactionLatch.countDown();
},
throwable -> fail(throwable.getMessage()),
() -> completedLatch.countDown());
transactionLatch.await(1, TimeUnit.SECONDS);
assertEquals(results, (expectedTransactions));
subscription.dispose();
completedLatch.await(1, TimeUnit.SECONDS);
assertTrue(subscription.isDisposed());
}
|
public static <F extends Future<Void>> Mono<Void> deferFuture(Supplier<F> deferredFuture) {
return new DeferredFutureMono<>(deferredFuture);
}
|
@SuppressWarnings("FutureReturnValueIgnored")
@Test
void testDeferredFutureMonoLater() {
ImmediateEventExecutor eventExecutor = ImmediateEventExecutor.INSTANCE;
Promise<Void> promise = eventExecutor.newPromise();
Supplier<Promise<Void>> promiseSupplier = () -> promise;
StepVerifier.create(FutureMono.deferFuture(promiseSupplier))
.expectSubscription()
.then(() -> promise.setFailure(new ClosedChannelException()))
.expectError(AbortedException.class)
.verify(Duration.ofSeconds(30));
}
|
@Override
public int get(PageId pageId, int pageOffset, ReadTargetBuffer buffer,
CacheContext cacheContext) {
ReadWriteLock pageLock = getPageLock(pageId);
long pageSize = -1L;
try (LockResource r = new LockResource(pageLock.readLock())) {
PageInfo pageInfo;
try (LockResource r2 = new LockResource(mPageMetaStore.getLock().readLock())) {
pageInfo = mPageMetaStore.getPageInfo(pageId); //check if page exists and refresh LRU items
} catch (PageNotFoundException e) {
LOG.debug("get({},pageOffset={}) fails due to page not found", pageId, pageOffset);
return 0;
}
pageSize = pageInfo.getPageSize();
}
return get(pageId, pageOffset, (int) pageSize, buffer, cacheContext);
}
|
@Test
public void getNotEnoughSpaceException() throws Exception {
byte[] buf = new byte[PAGE1.length - 1];
assertThrows(IllegalArgumentException.class, () ->
mCacheManager.get(PAGE_ID1, PAGE1.length, buf, 0));
}
|
@Override
public Optional<ResultDecorator<EncryptRule>> newInstance(final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database,
final EncryptRule encryptRule, final ConfigurationProperties props, final SQLStatementContext sqlStatementContext) {
if (sqlStatementContext instanceof SelectStatementContext) {
return Optional.of(new EncryptDQLResultDecorator(database, encryptRule, (SelectStatementContext) sqlStatementContext));
}
if (sqlStatementContext.getSqlStatement() instanceof DALStatement) {
return Optional.of(new EncryptDALResultDecorator(globalRuleMetaData));
}
return Optional.empty();
}
|
@Test
void assertNewInstanceWithDALStatement() {
SQLStatementContext sqlStatementContext = mock(ExplainStatementContext.class);
when(sqlStatementContext.getSqlStatement()).thenReturn(mock(MySQLExplainStatement.class));
EncryptResultDecoratorEngine engine = (EncryptResultDecoratorEngine) OrderedSPILoader.getServices(ResultProcessEngine.class, Collections.singleton(rule)).get(rule);
Optional<ResultDecorator<EncryptRule>> actual = engine.newInstance(mock(RuleMetaData.class), database, rule, mock(ConfigurationProperties.class), sqlStatementContext);
assertTrue(actual.isPresent());
assertThat(actual.get(), instanceOf(EncryptDALResultDecorator.class));
}
|
private static String approximateSimpleName(Class<?> clazz, boolean dropOuterClassNames) {
checkArgument(!clazz.isAnonymousClass(), "Attempted to get simple name of anonymous class");
return approximateSimpleName(clazz.getName(), dropOuterClassNames);
}
|
@Test
public void testDropsOuterClassNamesTrue() {
assertEquals("Bar", NameUtils.approximateSimpleName("Foo$1$Bar", true));
assertEquals("Foo$1", NameUtils.approximateSimpleName("Foo$1", true));
assertEquals("Foo$1$2", NameUtils.approximateSimpleName("Foo$1$2", true));
}
|
private static boolean canSatisfyConstraints(ApplicationId appId,
PlacementConstraint constraint, SchedulerNode node,
AllocationTagsManager atm,
Optional<DiagnosticsCollector> dcOpt)
throws InvalidAllocationTagsQueryException {
if (constraint == null) {
LOG.debug("Constraint is found empty during constraint validation for"
+ " app:{}", appId);
return true;
}
// If this is a single constraint, transform to SingleConstraint
SingleConstraintTransformer singleTransformer =
new SingleConstraintTransformer(constraint);
constraint = singleTransformer.transform();
AbstractConstraint sConstraintExpr = constraint.getConstraintExpr();
// TODO handle other type of constraints, e.g CompositeConstraint
if (sConstraintExpr instanceof SingleConstraint) {
SingleConstraint single = (SingleConstraint) sConstraintExpr;
return canSatisfySingleConstraint(appId, single, node, atm, dcOpt);
} else if (sConstraintExpr instanceof And) {
And and = (And) sConstraintExpr;
return canSatisfyAndConstraint(appId, and, node, atm, dcOpt);
} else if (sConstraintExpr instanceof Or) {
Or or = (Or) sConstraintExpr;
return canSatisfyOrConstraint(appId, or, node, atm, dcOpt);
} else {
throw new InvalidAllocationTagsQueryException(
"Unsupported type of constraint: "
+ sConstraintExpr.getClass().getSimpleName());
}
}
|
@Test
public void testInterAppConstraintsByAppID()
throws InvalidAllocationTagsQueryException {
AllocationTagsManager tm = new AllocationTagsManager(rmContext);
PlacementConstraintManagerService pcm =
new MemoryPlacementConstraintManager();
rmContext.setAllocationTagsManager(tm);
rmContext.setPlacementConstraintManager(pcm);
long ts = System.currentTimeMillis();
ApplicationId application1 = BuilderUtils.newApplicationId(ts, 123);
ApplicationId application2 = BuilderUtils.newApplicationId(ts, 124);
ApplicationId application3 = BuilderUtils.newApplicationId(ts, 125);
// Register App1 with anti-affinity constraint map.
RMNode n0r1 = rmNodes.get(0);
RMNode n1r1 = rmNodes.get(1);
RMNode n2r2 = rmNodes.get(2);
RMNode n3r2 = rmNodes.get(3);
/**
* Place container:
* n0: app1/hbase-m(1)
* n1: ""
* n2: app1/hbase-m(1)
* n3: ""
*/
tm.addContainer(n0r1.getNodeID(),
newContainerId(application1, 0), ImmutableSet.of("hbase-m"));
tm.addContainer(n2r2.getNodeID(),
newContainerId(application1, 1), ImmutableSet.of("hbase-m"));
Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n0r1.getNodeID())
.get("hbase-m").longValue());
Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n2r2.getNodeID())
.get("hbase-m").longValue());
SchedulerNode schedulerNode0 =newSchedulerNode(n0r1.getHostName(),
n0r1.getRackName(), n0r1.getNodeID());
SchedulerNode schedulerNode1 =newSchedulerNode(n1r1.getHostName(),
n1r1.getRackName(), n1r1.getNodeID());
SchedulerNode schedulerNode2 =newSchedulerNode(n2r2.getHostName(),
n2r2.getRackName(), n2r2.getNodeID());
SchedulerNode schedulerNode3 =newSchedulerNode(n3r2.getHostName(),
n3r2.getRackName(), n3r2.getNodeID());
TargetApplicationsNamespace namespace =
new TargetApplicationsNamespace.AppID(application1);
Map<Set<String>, PlacementConstraint> constraintMap = new HashMap<>();
PlacementConstraint constraint2 = PlacementConstraints
.targetNotIn(NODE, allocationTagWithNamespace(namespace.toString(),
"hbase-m"))
.build();
Set<String> srcTags2 = new HashSet<>();
srcTags2.add("app2");
constraintMap.put(srcTags2, constraint2);
pcm.registerApplication(application2, constraintMap);
// Anti-affinity with app1/hbase-m so it should not be able to be placed
// onto n0 and n2 as they already have hbase-m allocated.
Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(
application2, createSchedulingRequest(srcTags2),
schedulerNode0, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(
application2, createSchedulingRequest(srcTags2),
schedulerNode1, pcm, tm));
Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(
application2, createSchedulingRequest(srcTags2),
schedulerNode2, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(
application2, createSchedulingRequest(srcTags2),
schedulerNode3, pcm, tm));
// Intra-app constraint
// Test with default and empty namespace
TargetApplicationsNamespace self = new TargetApplicationsNamespace.Self();
PlacementConstraint constraint3 = PlacementConstraints
.targetNotIn(NODE, allocationTagWithNamespace(self.toString(),
"hbase-m"))
.build();
Set<String> srcTags3 = new HashSet<>();
srcTags3.add("app3");
constraintMap.put(srcTags3, constraint3);
pcm.registerApplication(application3, constraintMap);
/**
* Place container:
* n0: app1/hbase-m(1), app3/hbase-m
* n1: ""
* n2: app1/hbase-m(1)
* n3: ""
*/
tm.addContainer(n0r1.getNodeID(),
newContainerId(application3, 0), ImmutableSet.of("hbase-m"));
// Anti-affinity to self/hbase-m
Assert.assertFalse(PlacementConstraintsUtil
.canSatisfyConstraints(application3, createSchedulingRequest(srcTags3),
schedulerNode0, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil
.canSatisfyConstraints(application3, createSchedulingRequest(srcTags3),
schedulerNode1, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil
.canSatisfyConstraints(application3, createSchedulingRequest(srcTags3),
schedulerNode2, pcm, tm));
Assert.assertTrue(PlacementConstraintsUtil
.canSatisfyConstraints(application3, createSchedulingRequest(srcTags3),
schedulerNode3, pcm, tm));
pcm.unregisterApplication(application3);
}
|
static ProcessorSupplier readMapIndexSupplier(MapIndexScanMetadata indexScanMetadata) {
return new MapIndexScanProcessorSupplier(indexScanMetadata);
}
|
@Test
public void test_whenFilterAndSpecificProjectionExists_sorted() {
List<JetSqlRow> expected = new ArrayList<>();
for (int i = count; i > 0; i--) {
map.put(i, new Person("value-" + i, i));
if (i > count / 2) {
expected.add(jetRow((count - i + 1), "value-" + (count - i + 1), (count - i + 1)));
}
}
IndexConfig indexConfig = new IndexConfig(IndexType.SORTED, "age").setName(randomName());
map.addIndex(indexConfig);
IndexFilter filter = new IndexRangeFilter(intValue(0), true, intValue(count / 2), true);
MapIndexScanMetadata metadata = metadata(indexConfig.getName(), filter, 0, false);
TestSupport
.verifyProcessor(adaptSupplier(MapIndexScanP.readMapIndexSupplier(metadata)))
.hazelcastInstance(instance())
.jobConfig(new JobConfig().setArgument(SQL_ARGUMENTS_KEY_NAME, emptyList()))
.outputChecker(LENIENT_SAME_ITEMS_IN_ORDER)
.disableSnapshots()
.disableProgressAssertion()
.expectOutput(expected);
}
|
private void logResponse(final ShenyuContext shenyuContext, final BodyWriter writer) {
if (StringUtils.isNotBlank(getHeaders().getFirst(HttpHeaders.CONTENT_LENGTH))) {
String size = StringUtils.defaultIfEmpty(getHeaders().getFirst(HttpHeaders.CONTENT_LENGTH), "0");
logInfo.setResponseContentLength(Integer.parseInt(size));
} else if (Objects.nonNull(writer)) {
logInfo.setResponseContentLength(writer.size());
}
logInfo.setTimeLocal(shenyuContext.getStartDateTime().format(DATE_TIME_FORMATTER));
logInfo.setModule(shenyuContext.getModule());
long costTime = DateUtils.acquireMillisBetween(shenyuContext.getStartDateTime(), LocalDateTime.now());
logInfo.setUpstreamResponseTime(costTime);
logInfo.setMethod(shenyuContext.getMethod());
logInfo.setRpcType(shenyuContext.getRpcType());
if (StringUtils.isNotBlank(shenyuContext.getRpcType())) {
logInfo.setUpstreamIp(getUpstreamIp());
}
if (Objects.nonNull(writer)) {
int size = writer.size();
String body = writer.output();
if (size > 0 && !LogCollectConfigUtils.isResponseBodyTooLarge(size)) {
logInfo.setResponseBody(body);
}
} else {
logInfo.setResponseBody("[bytes]");
}
// collect log
if (Objects.nonNull(logCollector)) {
// desensitize log
if (desensitized) {
logCollector.desensitize(logInfo, keyWordMatch, dataDesensitizeAlg);
}
logCollector.collect(logInfo);
}
}
|
@Test
public void testLogResponse() throws Exception {
logCollector.start();
// DefaultLogCollector.getInstance().start();
loggingServerHttpResponse.setExchange(exchange);
BodyWriter writer = new BodyWriter();
String sendString = "hello, shenyu";
ByteBuffer byteBuffer = ByteBuffer.wrap(sendString.getBytes(StandardCharsets.UTF_8));
writer.write(byteBuffer.asReadOnlyBuffer());
Method method1 = loggingServerHttpResponse.getClass().getDeclaredMethod("logResponse", ShenyuContext.class, BodyWriter.class);
method1.setAccessible(true);
method1.invoke(loggingServerHttpResponse, exchange.getAttribute(Constants.CONTEXT), writer);
Field field1 = loggingServerHttpResponse.getClass().getDeclaredField("logInfo");
field1.setAccessible(true);
ShenyuRequestLog log1 = (ShenyuRequestLog) field1.get(loggingServerHttpResponse);
Assertions.assertEquals(log1.getResponseBody(), "hello, shenyu");
ShenyuContext shenyuContext2 = new ShenyuContext();
shenyuContext2.setRpcType("http");
shenyuContext2.setStartDateTime(startDateTime);
exchange.getAttributes().put(Constants.CONTEXT, shenyuContext2);
exchange.getAttributes().put(Constants.HTTP_DOMAIN, "http://localhost:9195/http/order/path/123/name");
loggingServerHttpResponse.setExchange(exchange);
Method method2 = loggingServerHttpResponse.getClass().getDeclaredMethod("logResponse", ShenyuContext.class, BodyWriter.class);
method2.setAccessible(true);
method2.invoke(loggingServerHttpResponse, exchange.getAttribute(Constants.CONTEXT), writer);
Field field2 = loggingServerHttpResponse.getClass().getDeclaredField("logInfo");
field2.setAccessible(true);
ShenyuRequestLog log2 = (ShenyuRequestLog) field2.get(loggingServerHttpResponse);
Assertions.assertEquals(log2.getUpstreamIp(), "localhost");
}
|
public UniVocityFixedDataFormat setFieldLengths(int[] fieldLengths) {
this.fieldLengths = fieldLengths;
return this;
}
|
@Test
public void shouldConfigureIgnoreTrailingWhitespaces() {
UniVocityFixedDataFormat dataFormat = new UniVocityFixedDataFormat()
.setFieldLengths(new int[] { 1, 2, 3 })
.setIgnoreTrailingWhitespaces(true);
assertTrue(dataFormat.getIgnoreTrailingWhitespaces());
assertTrue(dataFormat.createAndConfigureWriterSettings().getIgnoreTrailingWhitespaces());
assertTrue(dataFormat.createAndConfigureParserSettings().getIgnoreTrailingWhitespaces());
}
|
public void changeLevel(LoggerLevel level) {
Level logbackLevel = Level.toLevel(level.name());
database.enableSqlLogging(level == TRACE);
helper.changeRoot(serverProcessLogging.getLogLevelConfig(), logbackLevel);
LoggerFactory.getLogger(ServerLogging.class).info("Level of logs changed to {}", level);
}
|
@Test
@UseDataProvider("supportedSonarApiLevels")
public void changeLevel_calls_changeRoot_with_LogLevelConfig_and_level_converted_to_logback_class_then_log_INFO_message(LoggerLevel level) {
LogLevelConfig logLevelConfig = LogLevelConfig.newBuilder(rootLoggerName).build();
when(serverProcessLogging.getLogLevelConfig()).thenReturn(logLevelConfig);
underTest.changeLevel(level);
verify(logbackHelper).changeRoot(logLevelConfig, Level.valueOf(level.name()));
}
|
@Override
public String getFullName(){
return getFullName(organization, job);
}
|
@Test
public void testFreestyle() throws Exception {
Job job = j.createFreeStyleProject("freestyle");
login();
Assert.assertEquals(
get("/organizations/jenkins/pipelines/" + job.getFullName() + "/").get("disabled"),
false
);
put("/organizations/jenkins/pipelines/" + job.getFullName() + "/disable", "{}");
Assert.assertEquals(
get("/organizations/jenkins/pipelines/" + job.getFullName() + "/").get("disabled"),
true
);
put("/organizations/jenkins/pipelines/" + job.getFullName() + "/enable", "{}");
Assert.assertEquals(
get("/organizations/jenkins/pipelines/" + job.getFullName() + "/").get("disabled"),
false
);
}
|
@Override
public String getURL( String hostname, String port, String databaseName ) {
String url = "jdbc:sqlserver://" + hostname + ":" + port + ";database=" + databaseName + ";encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;";
if ( getAttribute( IS_ALWAYS_ENCRYPTION_ENABLED, "" ).equals( "true" ) ) {
url += "columnEncryptionSetting=Enabled;keyVaultProviderClientId=" + getAttribute( CLIENT_ID, "" ) + ";keyVaultProviderClientKey=" + getAttribute( CLIENT_SECRET_KEY, "" ) + ";";
}
if ( ACTIVE_DIRECTORY_PASSWORD.equals( getAttribute( JDBC_AUTH_METHOD, "" ) ) ) {
return url + "authentication=ActiveDirectoryPassword;";
} else if ( ACTIVE_DIRECTORY_MFA.equals( getAttribute( JDBC_AUTH_METHOD, "" ) ) ) {
return url + "authentication=ActiveDirectoryInteractive;";
} else if ( ACTIVE_DIRECTORY_INTEGRATED.equals( getAttribute( JDBC_AUTH_METHOD, "" ) ) ) {
return url + "Authentication=ActiveDirectoryIntegrated;";
} else {
return url;
}
}
|
@Test
public void testGetUrlWithAadPasswordAuth(){
dbMeta.setAccessType( DatabaseMeta.TYPE_ACCESS_NATIVE );
dbMeta.addAttribute( IS_ALWAYS_ENCRYPTION_ENABLED, "false" );
dbMeta.addAttribute( JDBC_AUTH_METHOD, ACTIVE_DIRECTORY_PASSWORD );
String expectedUrl = "jdbc:sqlserver://abc.database.windows.net:1433;database=AzureDB;encrypt=true;trustServerCertificate=false;hostNameInCertificate=*.database.windows.net;loginTimeout=30;authentication=ActiveDirectoryPassword;";
String actualUrl = dbMeta.getURL( "abc.database.windows.net", "1433", "AzureDB" );
assertEquals( expectedUrl, actualUrl );
}
|
public static String temporaryFileName(long nonce, String path) {
return path + String.format(TEMPORARY_SUFFIX_FORMAT, nonce);
}
|
@Test
public void temporaryFileName() {
assertEquals(PathUtils.temporaryFileName(1, "/"),
PathUtils.temporaryFileName(1, "/"));
assertNotEquals(PathUtils.temporaryFileName(1, "/"),
PathUtils.temporaryFileName(2, "/"));
assertNotEquals(PathUtils.temporaryFileName(1, "/"),
PathUtils.temporaryFileName(1, "/a"));
}
|
private PolicerId(URI u) {
super(u.toString());
uri = u;
}
|
@Test
public void testWrongCreation() {
// Build not allowed string
String wrongString = Strings.repeat("x", 1025);
// Define expected exception
exceptionWrongId.expect(IllegalArgumentException.class);
// Create policer id
PolicerId.policerId(wrongString);
}
|
public String getString(HazelcastProperty property) {
String value = properties.getProperty(property.getName());
if (value != null) {
return value;
}
value = property.getSystemProperty();
if (value != null) {
return value;
}
HazelcastProperty parent = property.getParent();
if (parent != null) {
return getString(parent);
}
String deprecatedName = property.getDeprecatedName();
if (deprecatedName != null) {
value = get(deprecatedName);
if (value == null) {
value = System.getProperty(deprecatedName);
}
if (value != null) {
// we don't have a logger available, and the Logging service is constructed after the Properties are created.
System.err.print("Don't use deprecated '" + deprecatedName + "' "
+ "but use '" + property.getName() + "' instead. "
+ "The former name will be removed in the next Hazelcast release.");
return value;
}
}
Function<HazelcastProperties, ?> function = property.getFunction();
if (function != null) {
return "" + function.apply(this);
}
return property.getDefaultValue();
}
|
@Test
public void setProperty_ensureHighestPriorityOfConfig() {
config.setProperty(ENTERPRISE_LICENSE_KEY.getName(), "configValue");
ENTERPRISE_LICENSE_KEY.setSystemProperty("systemValue");
HazelcastProperties properties = new HazelcastProperties(config);
String value = properties.getString(ENTERPRISE_LICENSE_KEY);
System.clearProperty(ENTERPRISE_LICENSE_KEY.getName());
assertEquals("configValue", value);
}
|
@ApiOperation(value = "Delete user settings (deleteUserSettings)",
notes = "Delete user settings by specifying list of json element xpaths. \n " +
"Example: to delete B and C element in { \"A\": {\"B\": 5}, \"C\": 15} send A.B,C in jsonPaths request parameter")
@PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN', 'CUSTOMER_USER')")
@RequestMapping(value = "/user/settings/{paths}", method = RequestMethod.DELETE)
public void deleteUserSettings(@Parameter(description = PATHS)
@PathVariable(PATHS) String paths) throws ThingsboardException {
checkParameter(USER_ID, paths);
SecurityUser currentUser = getCurrentUser();
userSettingsService.deleteUserSettings(currentUser.getTenantId(), currentUser.getId(), UserSettingsType.GENERAL, Arrays.asList(paths.split(",")));
}
|
@Test
public void testDeleteUserSettings() throws Exception {
loginCustomerUser();
JsonNode userSettings = JacksonUtil.toJsonNode("{\"A\":10, \"B\":10, \"C\":{\"D\": 16}}");
JsonNode savedSettings = doPost("/api/user/settings", userSettings, JsonNode.class);
Assert.assertEquals(userSettings, savedSettings);
doDelete("/api/user/settings/C.D,B");
JsonNode retrievedSettings = doGet("/api/user/settings", JsonNode.class);
JsonNode expectedSettings = JacksonUtil.toJsonNode("{\"A\":10, \"C\":{}}");
Assert.assertEquals(expectedSettings, retrievedSettings);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.