focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
public static Map<String, String> extractPrefixMap(
Map<String, String> properties, String prefix) {
Preconditions.checkNotNull(properties, "Invalid properties map: null");
Map<String, String> result = Maps.newHashMap();
properties.forEach(
(key, value) -> {
if (key != null && key.startsWith(prefix)) {
result.put(key.substring(prefix.length()), value);
}
});
return result;
} | @Test
public void testExtractPrefixMap() {
Map<String, String> input =
ImmutableMap.of(
"warehouse", "/tmp/warehouse",
"rest.prefix", "/ws/ralphs_catalog",
"rest.token", "YnVybiBhZnRlciByZWFkaW5nIC0gYWxzbyBoYW5rIGFuZCByYXVsIDQgZXZlcgo=",
"rest.rest.uri", "https://localhost:1080/",
"doesnt_start_with_prefix.rest", "",
"", "");
Map<String, String> expected =
ImmutableMap.of(
"prefix", "/ws/ralphs_catalog",
"token", "YnVybiBhZnRlciByZWFkaW5nIC0gYWxzbyBoYW5rIGFuZCByYXVsIDQgZXZlcgo=",
"rest.uri", "https://localhost:1080/");
Map<String, String> actual = RESTUtil.extractPrefixMap(input, "rest.");
assertThat(actual).isEqualTo(expected);
} |
public TwoFactorStatusResult getTwoFactorStatus(long accountId, String deviceName, String appCode) {
Map<String, Object> resultMap = accountClient.getTwoFactor(accountId, deviceName, appCode);
return objectMapper.convertValue(resultMap, TwoFactorStatusResult.class);
} | @Test
public void testStatusTwoFactor() {
Map<String, Object> result = Map.of(
"status", "OK",
"error", "custom error",
"setting_2_factor", true);
when(accountClient.getTwoFactor(eq(1L), anyString(), anyString())).thenReturn(result);
TwoFactorStatusResult twoFactorStatus = accountService.getTwoFactorStatus(1L, "deviceName", "appCode");
assertEquals(Status.OK, twoFactorStatus.getStatus());
assertEquals("custom error", twoFactorStatus.getError());
assertEquals(true, twoFactorStatus.getSetting());
} |
public static String getName(DistributedObject distributedObject) {
/*
* The motivation of this behaviour is that some distributed objects (`ICache`) can have prefixed name.
* For example, for the point of view of cache,
* it has pure name and full name which contains prefixes also.
*
* However, both of our `DistributedObject` and `javax.cache.Cache` (from JCache spec) interfaces
* have same method name with same signature. It is `String getName()`.
*
* From the distributed object side, name must be fully qualified name of object,
* but from the JCache spec side (also for backward compatibility),
* it must be pure cache name without any prefix.
* So there is same method name with same signature for different purposes.
* Therefore, `PrefixedDistributedObject` has been introduced to retrieve the
* fully qualified name of distributed object when it is needed.
*
* For cache case, the fully qualified name is full cache name contains Hazelcast prefix (`/hz`),
* cache name prefix regarding to URI and/or classloader if specified and pure cache name.
*/
if (distributedObject instanceof PrefixedDistributedObject object) {
return object.getPrefixedName();
} else {
return distributedObject.getName();
}
} | @Test
public void testGetName() {
DistributedObject distributedObject = mock(DistributedObject.class);
when(distributedObject.getName()).thenReturn("MockedDistributedObject");
String name = DistributedObjectUtil.getName(distributedObject);
assertEquals("MockedDistributedObject", name);
verify(distributedObject).getName();
verifyNoMoreInteractions(distributedObject);
} |
public static NetFlowV5Packet parsePacket(ByteBuf bb) {
final int readableBytes = bb.readableBytes();
final NetFlowV5Header header = parseHeader(bb.slice(bb.readerIndex(), HEADER_LENGTH));
final int packetLength = HEADER_LENGTH + header.count() * RECORD_LENGTH;
if (header.count() <= 0 || readableBytes < packetLength) {
throw new CorruptFlowPacketException("Insufficient data (expected: " + packetLength + " bytes, actual: " + readableBytes + " bytes)");
}
final ImmutableList.Builder<NetFlowV5Record> records = ImmutableList.builder();
int offset = HEADER_LENGTH;
for (int i = 0; i < header.count(); i++) {
records.add(parseRecord(bb.slice(offset + bb.readerIndex(), RECORD_LENGTH)));
offset += RECORD_LENGTH;
}
return NetFlowV5Packet.create(header, records.build(), offset);
} | @Test
public void pcap_pmacctd_NetFlowV5() throws Exception {
final List<NetFlowV5Record> allRecords = new ArrayList<>();
try (InputStream inputStream = Resources.getResource("netflow-data/pmacctd-netflow5.pcap").openStream()) {
final Pcap pcap = Pcap.openStream(inputStream);
pcap.loop(packet -> {
if (packet.hasProtocol(Protocol.UDP)) {
final UDPPacket udp = (UDPPacket) packet.getPacket(Protocol.UDP);
final ByteBuf byteBuf = Unpooled.wrappedBuffer(udp.getPayload().getArray());
final NetFlowV5Packet netFlowV5Packet = NetFlowV5Parser.parsePacket(byteBuf);
assertThat(netFlowV5Packet).isNotNull();
allRecords.addAll(netFlowV5Packet.records());
}
return true;
}
);
}
assertThat(allRecords).hasSize(42);
} |
@Override
public void createProxySelector(final DiscoveryHandlerDTO discoveryHandlerDTO, final ProxySelectorDTO proxySelectorDTO) {
DataChangedEvent dataChangedEvent = new DataChangedEvent(ConfigGroupEnum.PROXY_SELECTOR, DataEventTypeEnum.CREATE,
Collections.singletonList(DiscoveryTransfer.INSTANCE.mapToData(proxySelectorDTO)));
eventPublisher.publishEvent(dataChangedEvent);
} | @Test
public void testCreateProxySelector() {
doNothing().when(eventPublisher).publishEvent(any(DataChangedEvent.class));
localDiscoveryProcessor.createProxySelector(new DiscoveryHandlerDTO(), new ProxySelectorDTO());
verify(eventPublisher).publishEvent(any(DataChangedEvent.class));
} |
List<OpeningHtmlTag> getOpeningTagsEntries() {
return openingTagsEntries;
} | @Test
public void should_extract_lower_bounds_from_serialized_rules() {
List<OpeningHtmlTag> openingTagsEntries = decorationDataHolder.getOpeningTagsEntries();
assertThat(openingTagsEntries.get(0)).isEqualTo(new OpeningHtmlTag(0, "k"));
assertThat(openingTagsEntries.get(1)).isEqualTo(new OpeningHtmlTag(0, "cppd"));
assertThat(openingTagsEntries.get(2)).isEqualTo(new OpeningHtmlTag(54, "a"));
assertThat(openingTagsEntries.get(3)).isEqualTo(new OpeningHtmlTag(69, "k"));
assertThat(openingTagsEntries.get(4)).isEqualTo(new OpeningHtmlTag(80, "sym-80 sym"));
assertThat(openingTagsEntries.get(5)).isEqualTo(new OpeningHtmlTag(90, "sym-80 sym"));
assertThat(openingTagsEntries.get(6)).isEqualTo(new OpeningHtmlTag(106, "cppd"));
assertThat(openingTagsEntries.get(7)).isEqualTo(new OpeningHtmlTag(114, "k"));
assertThat(openingTagsEntries.get(8)).isEqualTo(new OpeningHtmlTag(140, "sym-80 sym"));
} |
@Override
public int hashCode() {
if (value == null) {
return 31;
}
// Using recommended hashing algorithm from Effective Java for longs and doubles
if (isIntegral(this)) {
long value = getAsNumber().longValue();
return (int) (value ^ (value >>> 32));
}
if (value instanceof Number) {
long value = Double.doubleToLongBits(getAsNumber().doubleValue());
return (int) (value ^ (value >>> 32));
}
return value.hashCode();
} | @Test
public void testFloatEqualsBigDecimal() {
JsonPrimitive p1 = new JsonPrimitive(10.25F);
JsonPrimitive p2 = new JsonPrimitive(new BigDecimal("10.25"));
assertThat(p1).isEqualTo(p2);
assertThat(p1.hashCode()).isEqualTo(p2.hashCode());
} |
@Override
public String getTableName() {
StringBuilder sb = new StringBuilder();
SQLServerOutputVisitor visitor = new SQLServerOutputVisitor(sb) {
@Override
public boolean visit(SQLExprTableSource x) {
printTableSourceExpr(x.getExpr());
return false;
}
@Override
public boolean visit(SQLJoinTableSource x) {
throw new NotSupportYetException("not support the syntax of update with join table");
}
};
SQLTableSource tableSource = ast.getTableSource();
if (tableSource instanceof SQLExprTableSource) {
visitor.visit((SQLExprTableSource) tableSource);
} else if (tableSource instanceof SQLJoinTableSource) {
visitor.visit((SQLJoinTableSource) tableSource);
} else {
throw new NotSupportYetException("not support the syntax of update with unknow");
}
return sb.toString();
} | @Test
public void testGetTableName() {
String sql = "update t set a = ?, b = ?, c = ?";
SQLStatement sqlStatement = getSQLStatement(sql);
SqlServerUpdateRecognizer recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement);
Assertions.assertEquals("t", recognizer.getTableName());
//test for alias
sql = "update t t1 set a = ?";
sqlStatement = getSQLStatement(sql);
recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement);
Assertions.assertEquals("t", recognizer.getTableName());
} |
@Override
protected List<ParentRunner<?>> getChildren() {
return children;
} | @Test
void finds_features_based_on_explicit_root_package() throws InitializationError {
Cucumber cucumber = new Cucumber(ExplicitFeaturePath.class);
assertThat(cucumber.getChildren().size(), is(equalTo(7)));
assertThat(cucumber.getChildren().get(1).getDescription().getDisplayName(), is(equalTo("Feature A")));
} |
ShenyuRequest(final HttpMethod method,
final String url,
final Map<String, Collection<String>> headers,
final String body,
final String name,
final RequestTemplate requestTemplate) {
this.httpMethod = checkNotNull(method, "httpMethod of %s", method.name());
this.url = checkNotNull(url, "url");
this.headers = checkNotNull(headers, "headers of %s %s", method, url);
this.body = body;
this.requestTemplate = requestTemplate;
this.name = name;
} | @Test
public void testShenyuRequest() {
Map<String, Collection<String>> headerMap = new HashMap<>();
headerMap.put("header", Arrays.asList("header1", "header2"));
ShenyuRequest request = ShenyuRequest.create(ShenyuRequest.HttpMethod.GET, "https://shenyu.apache.org",
headerMap, null, null, null);
Assert.assertNotNull(request);
} |
@Subscribe
public void inputDeleted(InputDeleted inputDeletedEvent) {
LOG.debug("Input deleted: {}", inputDeletedEvent.id());
final IOState<MessageInput> inputState = inputRegistry.getInputState(inputDeletedEvent.id());
if (inputState != null) {
inputRegistry.remove(inputState);
}
} | @Test
public void inputDeletedStopsInputIfItIsRunning() throws Exception {
final String inputId = "input-id";
@SuppressWarnings("unchecked")
final IOState<MessageInput> inputState = mock(IOState.class);
when(inputState.getState()).thenReturn(IOState.Type.RUNNING);
when(inputRegistry.getInputState(inputId)).thenReturn(inputState);
listener.inputDeleted(InputDeleted.create(inputId));
verify(inputRegistry, never()).remove(any(MessageInput.class));
} |
@Override
public boolean mkdir(String dir) {
if (isDir(dir)) {
// 目录已经存在,创建直接返回
return true;
}
try {
getClient().mkdir(dir);
return true;
} catch (SftpException e) {
throw new JschRuntimeException(e);
}
} | @Test
@Disabled
public void mkDirTest() {
boolean flag = sshjSftp.mkdir("/home/test/temp");
System.out.println("是否创建成功: " + flag);
} |
@Override
public boolean touch(URI uri)
throws IOException {
try {
HeadObjectResponse s3ObjectMetadata = getS3ObjectMetadata(uri);
String encodedUrl = URLEncoder.encode(uri.getHost() + uri.getPath(), StandardCharsets.UTF_8);
String path = sanitizePath(uri.getPath());
CopyObjectRequest request = generateCopyObjectRequest(encodedUrl, uri, path,
ImmutableMap.of("lastModified", String.valueOf(System.currentTimeMillis())));
_s3Client.copyObject(request);
long newUpdateTime = getS3ObjectMetadata(uri).lastModified().toEpochMilli();
return newUpdateTime > s3ObjectMetadata.lastModified().toEpochMilli();
} catch (NoSuchKeyException e) {
String path = sanitizePath(uri.getPath());
PutObjectRequest putObjectRequest = generatePutObjectRequest(uri, path);
_s3Client.putObject(putObjectRequest, RequestBody.fromBytes(new byte[0]));
return true;
} catch (S3Exception e) {
throw new IOException(e);
}
} | @Test
public void testTouchFileInBucket()
throws Exception {
String[] originalFiles = new String[]{"a-touch.txt", "b-touch.txt", "c-touch.txt"};
for (String fileName : originalFiles) {
_s3PinotFS.touch(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, fileName)));
}
ListObjectsV2Response listObjectsV2Response =
_s3Client.listObjectsV2(S3TestUtils.getListObjectRequest(BUCKET, "", true));
String[] response = listObjectsV2Response.contents().stream().map(S3Object::key).filter(x -> x.contains("touch"))
.toArray(String[]::new);
Assert.assertEquals(response.length, originalFiles.length);
Assert.assertTrue(Arrays.equals(response, originalFiles));
} |
@Nullable
public static TraceContextOrSamplingFlags parseB3SingleFormat(CharSequence b3) {
return parseB3SingleFormat(b3, 0, b3.length());
} | @Test void parseB3SingleFormat_parentid_notYetSampled() {
assertThat(parseB3SingleFormat(traceId + "-" + spanId + "-" + parentId).context())
.isEqualToComparingFieldByField(TraceContext.newBuilder()
.traceId(Long.parseUnsignedLong(traceId, 16))
.parentId(Long.parseUnsignedLong(parentId, 16))
.spanId(Long.parseUnsignedLong(spanId, 16)).build()
);
} |
public void autoConfig() throws JoranException {
autoConfig(Configurator.class.getClassLoader());
} | @Test
@Disabled
// this test works only if logback-test.xml or logback.xml files are on the
// classpath.
// However, this is something we try to avoid in order to simplify the life
// of users trying to follow the manual and logback-examples from an IDE
public void reset() throws JoranException {
{
new ContextInitializer(loggerContext).autoConfig();
Appender<ILoggingEvent> appender = root.getAppender("STDOUT");
assertNotNull(appender);
assertTrue(appender instanceof ConsoleAppender);
}
{
loggerContext.stop();
Appender<ILoggingEvent> appender = root.getAppender("STDOUT");
assertNull(appender);
}
} |
public Map<String, String> findTableNames(final Collection<ColumnSegment> columns, final ShardingSphereSchema schema) {
if (1 == simpleTables.size()) {
return findTableNameFromSingleTable(columns);
}
Map<String, String> result = new CaseInsensitiveMap<>();
Map<String, Collection<String>> ownerColumnNames = getOwnerColumnNames(columns);
result.putAll(findTableNameFromSQL(ownerColumnNames));
Collection<String> noOwnerColumnNames = getNoOwnerColumnNames(columns);
result.putAll(findTableNameFromMetaData(noOwnerColumnNames, schema));
result.putAll(findTableNameFromSubquery(columns, result));
return result;
} | @Test
void assertFindTableNameWhenColumnSegmentOwnerPresent() {
SimpleTableSegment tableSegment1 = createTableSegment("table_1", "tbl_1");
SimpleTableSegment tableSegment2 = createTableSegment("table_2", "tbl_2");
ColumnSegment columnSegment = createColumnSegment("table_1", "col");
Map<String, String> actual = new TablesContext(Arrays.asList(tableSegment1, tableSegment2), TypedSPILoader.getService(DatabaseType.class, "FIXTURE"), DefaultDatabase.LOGIC_NAME)
.findTableNames(Collections.singletonList(columnSegment), mock(ShardingSphereSchema.class));
assertFalse(actual.isEmpty());
assertThat(actual.get("table_1.col"), is("table_1"));
} |
@VisibleForTesting
List<String> parseTemplateContentParams(String content) {
return ReUtil.findAllGroup1(PATTERN_PARAMS, content);
} | @Test
public void testParseTemplateContentParams() {
// 准备参数
String content = "正在进行登录操作{operation},您的验证码是{code}";
// mock 方法
// 调用
List<String> params = smsTemplateService.parseTemplateContentParams(content);
// 断言
assertEquals(Lists.newArrayList("operation", "code"), params);
} |
@Override
public JobStatus getJobStatus() {
return JobStatus.RUNNING;
} | @Test
void testJobInformationMethods() throws Exception {
try (MockExecutingContext ctx = new MockExecutingContext()) {
Executing exec = new ExecutingStateBuilder().build(ctx);
final JobID jobId = exec.getExecutionGraph().getJobID();
assertThat(exec.getJob()).isInstanceOf(ArchivedExecutionGraph.class);
assertThat(exec.getJob().getJobID()).isEqualTo(jobId);
assertThat(exec.getJobStatus()).isEqualTo(JobStatus.RUNNING);
}
} |
public void setServerContainer(@Nullable final ServerContainer serverContainer) {
this.serverContainer = serverContainer;
} | @Test
public void setServerContainerTest() {
exporter.setServerContainer(serverContainer);
assertSame(serverContainer, exporter.getServerContainer());
} |
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
underlying().forEach(action);
} | @Test
public void testDelegationOfForEach() {
final BiConsumer<Object, Object> mockBiConsumer = mock(BiConsumer.class);
new PCollectionsHashMapWrapperDelegationChecker<>()
.defineMockConfigurationForVoidMethodInvocation(mock -> mock.forEach(eq(mockBiConsumer)))
.defineWrapperVoidMethodInvocation(wrapper -> wrapper.forEach(mockBiConsumer))
.doVoidMethodDelegationCheck();
} |
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(
topicConfigsStillToValidate.stream()
.map(topic -> new ConfigResource(Type.TOPIC, topic))
.collect(Collectors.toSet())
);
configsForTopic = describeConfigsResult.values().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
descriptionsForTopic,
topicConfigs,
topicDescriptionsStillToValidate,
(streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide)
);
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(
validationResult,
configsForTopic,
topicConfigs,
topicConfigsStillToValidate,
(streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide)
);
}
maybeThrowTimeoutException(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
String.format("Could not validate internal topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs)
);
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(
Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate),
deadline,
"validated"
);
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
} | @Test
public void shouldReportMisconfigurationsOfCleanupPolicyForWindowedChangelogTopics() {
final long retentionMs = 1000;
final long shorterRetentionMs = 900;
setupTopicInMockAdminClient(topic1, windowedChangelogConfig(retentionMs));
setupTopicInMockAdminClient(topic2, windowedChangelogConfig(shorterRetentionMs));
final Map<String, String> windowedChangelogConfigOnlyCleanupPolicyCompact = windowedChangelogConfig(retentionMs);
windowedChangelogConfigOnlyCleanupPolicyCompact.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT);
setupTopicInMockAdminClient(topic3, windowedChangelogConfigOnlyCleanupPolicyCompact);
final Map<String, String> windowedChangelogConfigOnlyCleanupPolicyDelete = windowedChangelogConfig(shorterRetentionMs);
windowedChangelogConfigOnlyCleanupPolicyDelete.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE);
setupTopicInMockAdminClient(topic4, windowedChangelogConfigOnlyCleanupPolicyDelete);
final Map<String, String> windowedChangelogConfigWithRetentionBytes = windowedChangelogConfig(retentionMs);
windowedChangelogConfigWithRetentionBytes.put(TopicConfig.RETENTION_BYTES_CONFIG, "1024");
setupTopicInMockAdminClient(topic5, windowedChangelogConfigWithRetentionBytes);
final InternalTopicConfig internalTopicConfig1 = setupWindowedChangelogTopicConfig(topic1, 1, retentionMs);
final InternalTopicConfig internalTopicConfig2 = setupWindowedChangelogTopicConfig(topic2, 1, retentionMs);
final InternalTopicConfig internalTopicConfig3 = setupWindowedChangelogTopicConfig(topic3, 1, retentionMs);
final InternalTopicConfig internalTopicConfig4 = setupWindowedChangelogTopicConfig(topic4, 1, retentionMs);
final InternalTopicConfig internalTopicConfig5 = setupWindowedChangelogTopicConfig(topic5, 1, retentionMs);
final ValidationResult validationResult = internalTopicManager.validate(mkMap(
mkEntry(topic1, internalTopicConfig1),
mkEntry(topic2, internalTopicConfig2),
mkEntry(topic3, internalTopicConfig3),
mkEntry(topic4, internalTopicConfig4),
mkEntry(topic5, internalTopicConfig5)
));
final Map<String, List<String>> misconfigurationsForTopics = validationResult.misconfigurationsForTopics();
assertThat(validationResult.missingTopics(), empty());
assertThat(misconfigurationsForTopics.size(), is(3));
assertThat(misconfigurationsForTopics, hasKey(topic2));
assertThat(misconfigurationsForTopics.get(topic2).size(), is(1));
assertThat(
misconfigurationsForTopics.get(topic2).get(0),
is("Retention time (" + TopicConfig.RETENTION_MS_CONFIG + ") of existing internal topic " +
topic2 + " is " + shorterRetentionMs + " but should be " + retentionMs + " or larger.")
);
assertThat(misconfigurationsForTopics, hasKey(topic4));
assertThat(misconfigurationsForTopics.get(topic4).size(), is(1));
assertThat(
misconfigurationsForTopics.get(topic4).get(0),
is("Retention time (" + TopicConfig.RETENTION_MS_CONFIG + ") of existing internal topic " +
topic4 + " is " + shorterRetentionMs + " but should be " + retentionMs + " or larger.")
);
assertThat(misconfigurationsForTopics, hasKey(topic5));
assertThat(misconfigurationsForTopics.get(topic5).size(), is(1));
assertThat(
misconfigurationsForTopics.get(topic5).get(0),
is("Retention byte (" + TopicConfig.RETENTION_BYTES_CONFIG + ") of existing internal topic " +
topic5 + " is set but it should be unset.")
);
assertThat(misconfigurationsForTopics, not(hasKey(topic1)));
assertThat(misconfigurationsForTopics, not(hasKey(topic3)));
} |
public Operation parseMethod(
Method method,
List<Parameter> globalParameters,
JsonView jsonViewAnnotation) {
JavaType classType = TypeFactory.defaultInstance().constructType(method.getDeclaringClass());
return parseMethod(
classType.getClass(),
method,
globalParameters,
null,
null,
null,
null,
new ArrayList<>(),
Optional.empty(),
new HashSet<>(),
new ArrayList<>(),
false,
null,
null,
jsonViewAnnotation,
null,
null);
} | @Test(description = "Security Requirement")
public void testSecurityRequirement() {
Reader reader = new Reader(new OpenAPI());
Method[] methods = SecurityResource.class.getDeclaredMethods();
Operation securityOperation = reader.parseMethod(Arrays.stream(methods).filter(
(method -> method.getName().equals("getSecurity"))).findFirst().get(), null, null);
assertNotNull(securityOperation);
List<SecurityRequirement> securityRequirements = securityOperation.getSecurity();
assertNotNull(securityRequirements);
assertEquals(SECURITY_REQUIREMENT_NUMBER, securityRequirements.size());
List<String> scopes = securityRequirements.get(0).get(SECURITY_KEY);
assertNotNull(scopes);
assertEquals(SCOPE_NUMBER, scopes.size());
assertEquals(SCOPE_VALUE1, scopes.get(0));
assertEquals(SCOPE_VALUE2, scopes.get(1));
} |
@Override
@SuppressWarnings("unchecked")
public Output run(RunContext runContext) throws Exception {
Logger logger = runContext.logger();
String renderedNamespace = runContext.render(this.namespace);
String renderedDestination = runContext.render(destination);
final Namespace namespace = runContext.storage().namespace(renderedNamespace);
List<String> renderedFiles;
if (files instanceof String filesString) {
renderedFiles = List.of(runContext.render(filesString));
} else if (files instanceof List<?> filesList) {
renderedFiles = runContext.render((List<String>) filesList);
} else {
throw new IllegalArgumentException("The files property must be a string or a list of strings");
}
Map<String, URI> downloaded = namespace.findAllFilesMatching(PathMatcherPredicate.matches(renderedFiles))
.stream()
.map(Rethrow.throwFunction(file -> {
try (InputStream is = runContext.storage().getFile(file.uri())) {
URI uri = runContext.storage().putFile(is, renderedDestination + file.path());
logger.debug(String.format("Downloaded %s", uri));
return new AbstractMap.SimpleEntry<>(file.path(true).toString(), uri);
}
}))
.collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue));
runContext.metric(Counter.of("downloaded", downloaded.size()));
return Output.builder().files(downloaded).build();
} | @Test
void shouldDownloadNamespaceFile() throws Exception {
String namespaceId = "io.kestra." + IdUtils.create();
DownloadFiles downloadFiles = DownloadFiles.builder()
.id(DownloadFiles.class.getSimpleName())
.type(DownloadFiles.class.getName())
.files(List.of("**test1.txt"))
.namespace("{{ inputs.namespace }}")
.build();
final RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, downloadFiles, Map.of("namespace", namespaceId));
final Namespace namespace = runContext.storage().namespace(namespaceId);
namespace.putFile(Path.of("/a/b/test1.txt"), new ByteArrayInputStream("1".getBytes(StandardCharsets.UTF_8)));
namespace.putFile(Path.of("/a/b/test2.txt"), new ByteArrayInputStream("2".getBytes(StandardCharsets.UTF_8)));
DownloadFiles.Output output = downloadFiles.run(runContext);
assertThat(output.getFiles().size(), is(1));
assertThat(output.getFiles().get("/a/b/test1.txt"), notNullValue());
} |
@Override
public Optional<String> getNodeName() {
return Optional.ofNullable(nodeName);
} | @Test
public void getNodeName_whenClusterAndNameNotDefined_fallbacksToDefaultName() {
settings.setProperty("sonar.cluster.enabled", "true");
settings.removeProperty("sonar.cluster.node.name");
DefaultNodeInformation underTest = new DefaultNodeInformation(settings.asConfig());
assertThat(underTest.getNodeName()).isNotEmpty();
String nodeNameFirstCallToGetNodeName = underTest.getNodeName().get();
assertThat(nodeNameFirstCallToGetNodeName).startsWith("sonarqube-");
String nodeNameSecondCallToGetNodeName = underTest.getNodeName().get();
assertThat(nodeNameFirstCallToGetNodeName).isEqualTo(nodeNameSecondCallToGetNodeName);
} |
@Override
public void onSubscribe(final PluginData pluginData) {
LOG.info("subscribe plugin data for plugin: [id: {}, name: {}, config: {}]", pluginData.getId(), pluginData.getName(), pluginData.getConfig());
subscribeDataHandler(pluginData, DataEventTypeEnum.UPDATE);
} | @Test
public void testOnSubscribe() {
baseDataCache.cleanPluginData();
PluginData pluginData = PluginData.builder().name(mockName1).build();
commonPluginDataSubscriber.onSubscribe(pluginData);
assertNotNull(baseDataCache.obtainPluginData(pluginData.getName()));
assertEquals(pluginData, baseDataCache.obtainPluginData(pluginData.getName()));
} |
public void applyConfig(ClientBwListDTO configDTO) {
requireNonNull(configDTO, "Client filtering config must not be null");
requireNonNull(configDTO.mode, "Config mode must not be null");
requireNonNull(configDTO.entries, "Config entries must not be null");
ClientSelector selector;
switch (configDTO.mode) {
case DISABLED:
selector = ClientSelectors.any();
break;
case WHITELIST:
selector = createSelector(configDTO.entries);
break;
case BLACKLIST:
selector = ClientSelectors.inverse(createSelector(configDTO.entries));
break;
default:
throw new IllegalArgumentException("Unknown client B/W list mode: " + configDTO.mode);
}
clientEngine.applySelector(selector);
} | @Test
public void testApplyConfig_emptyBlacklist() {
clientEngine.applySelector(ClientSelectors.none());
ClientBwListDTO config = createConfig(Mode.BLACKLIST);
handler.applyConfig(config);
Client client = createClient("127.0.0.1", "a_name");
assertTrue(clientEngine.isClientAllowed(client));
} |
public void consumerSendMessageBack(
final String addr,
final String brokerName,
final MessageExt msg,
final String consumerGroup,
final int delayLevel,
final long timeoutMillis,
final int maxConsumeRetryTimes
) throws RemotingException, MQBrokerException, InterruptedException {
ConsumerSendMsgBackRequestHeader requestHeader = new ConsumerSendMsgBackRequestHeader();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CONSUMER_SEND_MSG_BACK, requestHeader);
requestHeader.setGroup(consumerGroup);
requestHeader.setOriginTopic(msg.getTopic());
requestHeader.setOffset(msg.getCommitLogOffset());
requestHeader.setDelayLevel(delayLevel);
requestHeader.setOriginMsgId(msg.getMsgId());
requestHeader.setMaxReconsumeTimes(maxConsumeRetryTimes);
requestHeader.setBrokerName(brokerName);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return;
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
} | @Test
public void testConsumerSendMessageBack() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
MessageExt messageExt = mock(MessageExt.class);
mqClientAPI.consumerSendMessageBack(defaultBrokerAddr, brokerName, messageExt, "", 1, defaultTimeout, 1000);
} |
@VisibleForTesting
Map<String, Long> getAllResourceInfos(Object resource) {
if (!isYarnResourceTypesAvailable) {
return Collections.emptyMap();
}
final Map<String, Long> externalResources = new HashMap<>();
final Object[] externalResourcesInfo;
try {
externalResourcesInfo = (Object[]) resourceGetResourcesMethod.invoke(resource);
for (int i = 0; i < externalResourcesInfo.length; i++) {
final String name =
(String) resourceInformationGetNameMethod.invoke(externalResourcesInfo[i]);
final long value =
(long) resourceInformationGetValueMethod.invoke(externalResourcesInfo[i]);
externalResources.put(name, value);
}
} catch (Exception e) {
LOG.warn("Could not obtain the external resources supported by the given Resource.", e);
return Collections.emptyMap();
}
return externalResources;
} | @Test
void testDefaultTwoResourceTypeWithYarnSupport() {
assumeThat(HadoopUtils.isMinHadoopVersion(2, 10)).isTrue();
final Resource resource = Resource.newInstance(100, 1);
// make sure that Resource has at least two associated resources (cpu and memory)
final Map<String, Long> resourcesResult =
ResourceInformationReflector.INSTANCE.getAllResourceInfos(resource);
assertThat(resourcesResult).hasSizeGreaterThanOrEqualTo(2);
} |
@Override
public String getName() {
return ANALYZER_NAME;
} | @Test
public void testGetName() {
assertEquals("Analyzer name wrong.", "Python Distribution Analyzer",
analyzer.getName());
} |
@InvokeOnHeader(Web3jConstants.ETH_COMPILE_SERPENT)
void ethCompileSerpent(Message message) throws IOException {
String sourceCode = message.getHeader(Web3jConstants.SOURCE_CODE, configuration::getSourceCode, String.class);
Request<?, EthCompileSerpent> request = web3j.ethCompileSerpent(sourceCode);
setRequestId(message, request);
EthCompileSerpent response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getCompiledSourceCode());
}
} | @Test
public void ethCompileSerpentTest() throws Exception {
EthCompileSerpent response = Mockito.mock(EthCompileSerpent.class);
Mockito.when(mockWeb3j.ethCompileSerpent(any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getCompiledSourceCode()).thenReturn("test");
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_COMPILE_SERPENT);
template.send(exchange);
String body = exchange.getIn().getBody(String.class);
assertEquals("test", body);
} |
public static QueryDescription forQueryMetadata(
final QueryMetadata queryMetadata,
final Map<KsqlHostInfoEntity, KsqlQueryStatus> ksqlHostQueryStatus
) {
if (queryMetadata instanceof PersistentQueryMetadata) {
final PersistentQueryMetadata persistentQuery = (PersistentQueryMetadata) queryMetadata;
return create(
persistentQuery,
persistentQuery.getResultTopic().map(t -> t.getKeyFormat().getWindowType())
.orElse(Optional.empty()),
persistentQuery.getSinkName(),
ksqlHostQueryStatus
);
}
return create(
queryMetadata,
Optional.empty(),
Optional.empty(),
ksqlHostQueryStatus
);
} | @Test
public void shouldHandleRowTimeInValueSchemaForTransientQuery() {
// Given:
final LogicalSchema schema = LogicalSchema.builder()
.valueColumn(ColumnName.of("field1"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of("ROWTIME"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of("field2"), SqlTypes.STRING)
.build();
transientQuery = new TransientQueryMetadata(
SQL_TEXT,
schema,
SOURCE_NAMES,
"execution plan",
queryQueue,
QUERY_ID,
"app id",
topology,
kafkaStreamsBuilder,
STREAMS_PROPS,
PROP_OVERRIDES,
closeTimeout,
10,
ResultType.STREAM,
0L,
0L,
listener,
processingLoggerFactory
);
transientQuery.initialize();
// When:
transientQueryDescription = QueryDescriptionFactory.forQueryMetadata(transientQuery, Collections.emptyMap());
// Then:
assertThat(transientQueryDescription.getFields(), contains(
new FieldInfo("field1", new SchemaInfo(SqlBaseType.INTEGER, null, null), Optional.empty()),
new FieldInfo("ROWTIME", new SchemaInfo(SqlBaseType.BIGINT, null, null), Optional.empty()),
new FieldInfo("field2", new SchemaInfo(SqlBaseType.STRING, null, null), Optional.empty())));
} |
public static <T> RedistributeArbitrarily<T> arbitrarily() {
return new RedistributeArbitrarily<>(null, false);
} | @Test
@Category(ValidatesRunner.class)
public void testRedistributeAfterFixedWindows() {
PCollection<KV<String, Integer>> input =
pipeline
.apply(
Create.of(ARBITRARY_KVS)
.withCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of())))
.apply(Window.into(FixedWindows.of(Duration.standardMinutes(10L))));
PCollection<KV<String, Integer>> output = input.apply(Redistribute.arbitrarily());
PAssert.that(output).containsInAnyOrder(ARBITRARY_KVS);
assertEquals(input.getWindowingStrategy(), output.getWindowingStrategy());
pipeline.run();
} |
@Override
public void add(T item) {
final int sizeAtTimeOfAdd;
synchronized (items) {
items.add(item);
sizeAtTimeOfAdd = items.size();
}
/*
WARNING: It is possible that the item that was just added to the list
has been processed by an existing idle task at this point.
By rescheduling the following timers, it is possible that a
superfluous maxTask is generated now OR that the idle task and max
task are scheduled at their specified delays. This could result in
calls to processItems sooner than expected.
*/
// Did we hit the max item threshold?
if (sizeAtTimeOfAdd >= maxItems) {
if (maxIdleMillis < maxBatchMillis) {
cancelTask(idleTask);
}
rescheduleTask(maxTask, 0 /* now! */);
} else {
// Otherwise, schedule idle task and if this is a first item
// also schedule the max batch age task.
if (maxIdleMillis < maxBatchMillis) {
rescheduleTask(idleTask, maxIdleMillis);
}
if (sizeAtTimeOfAdd == 1) {
rescheduleTask(maxTask, maxBatchMillis);
}
}
} | @Test
public void readyLongTrigger() {
TestAccumulator accumulator = new TestAccumulator();
accumulator.ready = false;
timer.advanceTimeMillis(120, SHORT_REAL_TIME_DELAY);
assertTrue("should not have fired yet", accumulator.batch.isEmpty());
accumulator.add(new TestItem("a"));
assertTrue("should not have fired yet", accumulator.batch.isEmpty());
accumulator.ready = true;
timer.advanceTimeMillis(120, LONG_REAL_TIME_DELAY);
assertFalse("should have fired", accumulator.batch.isEmpty());
assertEquals("incorrect batch", "a", accumulator.batch);
} |
public static double shuffleCompressionRatio(
SparkSession spark, FileFormat outputFileFormat, String outputCodec) {
if (outputFileFormat == FileFormat.ORC || outputFileFormat == FileFormat.PARQUET) {
return columnarCompression(shuffleCodec(spark), outputCodec);
} else if (outputFileFormat == FileFormat.AVRO) {
return rowBasedCompression(shuffleCodec(spark), outputCodec);
} else {
return 1.0;
}
} | @Test
public void testParquetCompressionRatios() {
configureShuffle("lz4", true);
double ratio1 = shuffleCompressionRatio(PARQUET, "zstd");
assertThat(ratio1).isEqualTo(3.0);
double ratio2 = shuffleCompressionRatio(PARQUET, "gzip");
assertThat(ratio2).isEqualTo(3.0);
double ratio3 = shuffleCompressionRatio(PARQUET, "snappy");
assertThat(ratio3).isEqualTo(2.0);
} |
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats);
}
LongColumnStatsDataInspector columnStatsData = longInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
LongColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
LongColumnStatsMerger merger = new LongColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setLongStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsData newData = cso.getStatsData().getLongStats();
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
LongColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getLongStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getLongStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
} | @Test
public void testAggregateMultiStatsWhenUnmergeableBitVectors() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2", "part3");
long[] values = {1, 2, 3, 3, 4, 5, 1, 2, 6, 8};
ColumnStatisticsData data1 = new ColStatsBuilder<>(long.class).numNulls(1).numDVs(3)
.low(1L).high(3L).fmSketch(1, 2, 3).kll(1, 2, 3).build();
ColumnStatisticsData data2 = new ColStatsBuilder<>(long.class).numNulls(2).numDVs(3)
.low(3L).high(5L).hll(3, 4, 5).kll(3, 4, 5).build();
ColumnStatisticsData data3 = new ColStatsBuilder<>(long.class).numNulls(3).numDVs(4)
.low(1L).high(8L).hll(1, 2, 6, 8).kll(1, 2, 6, 8).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data2, TABLE, COL, partitions.get(1)),
createStatsWithInfo(data3, TABLE, COL, partitions.get(2)));
LongColumnStatsAggregator aggregator = new LongColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the aggregation does not update the bitvector, only numDVs is, it keeps the first bitvector;
// numDVs is set to the maximum among all stats when non-mergeable bitvectors are detected
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(long.class).numNulls(6).numDVs(4)
.low(1L).high(8L).fmSketch(1, 2, 3).kll(values).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
// the use of the density function leads to a different estimation for numNDV
expectedStats = new ColStatsBuilder<>(long.class).numNulls(6).numDVs(6)
.low(1L).high(8L).fmSketch(1, 2, 3).kll(values).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = false;
double[] tunerValues = new double[] { 0, 0.5, 0.75, 1 };
long[] expectedDVs = new long[] { 4, 7, 8, 10 };
for (int i = 0; i < tunerValues.length; i++) {
aggregator.ndvTuner = tunerValues[i];
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
expectedStats = new ColStatsBuilder<>(long.class).numNulls(6).numDVs(expectedDVs[i])
.low(1L).high(8L).fmSketch(1, 2, 3).kll(values).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
}
} |
public static Map<TopicPartition, ListOffsetsResultInfo> fetchEndOffsets(final Collection<TopicPartition> partitions,
final Admin adminClient) {
if (partitions.isEmpty()) {
return Collections.emptyMap();
}
return getEndOffsets(fetchEndOffsetsFuture(partitions, adminClient));
} | @Test
public void fetchEndOffsetsShouldRethrowInterruptedExceptionAsStreamsException() throws Exception {
final Admin adminClient = mock(AdminClient.class);
final ListOffsetsResult result = mock(ListOffsetsResult.class);
@SuppressWarnings("unchecked")
final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = mock(KafkaFuture.class);
when(adminClient.listOffsets(any())).thenReturn(result);
when(result.all()).thenReturn(allFuture);
when(allFuture.get()).thenThrow(new InterruptedException());
assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
} |
static Map<String, String> getKiePMMLCharacteristicsSourcesMap(
final ScorecardCompilationDTO compilationDTO) {
final String characteristicsClassName = compilationDTO.getCharacteristicsClassName();
final Characteristics characteristics = compilationDTO.getCharacteristics();
final List<Field<?>> fields = compilationDTO.getFields();
final String packageName = compilationDTO.getPackageName();
CompilationUnit cloneCU = JavaParserUtils.getKiePMMLModelCompilationUnit(characteristicsClassName,
packageName,
KIE_PMML_CHARACTERISTICS_TEMPLATE_JAVA, KIE_PMML_CHARACTERISTICS_TEMPLATE);
final ClassOrInterfaceDeclaration characteristicsTemplate =
cloneCU.getClassByName(characteristicsClassName)
.orElseThrow(() -> new KiePMMLException(MAIN_CLASS_NOT_FOUND + ": " + KIE_PMML_CHARACTERISTICS_TEMPLATE));
setCharacteristicsVariableDeclaration(characteristicsClassName, characteristics, fields,
characteristicsTemplate);
Map<String, String> toReturn = new HashMap<>();
String fullClassName = packageName + "." + characteristicsClassName;
toReturn.put(fullClassName, cloneCU.toString());
return toReturn;
} | @Test
void getKiePMMLCharacteristicsSourcesMap() {
final CommonCompilationDTO<Scorecard> source =
CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
basicComplexPartialScorePmml,
basicComplexPartialScore,
new PMMLCompilationContextMock(),
BASIC_COMPLEX_PARTIAL_SCORE_SOURCE);
ScorecardCompilationDTO compilationDTO = ScorecardCompilationDTO.fromCompilationDTO(source);
final Map<String, String> retrieved =
KiePMMLCharacteristicsFactory.getKiePMMLCharacteristicsSourcesMap(compilationDTO);
assertThat(retrieved).isNotNull();
assertThat(retrieved).hasSize(1);
String expected = compilationDTO.getPackageCanonicalCharacteristicsClassName();
assertThat(retrieved).containsKey(expected);
try {
KieMemoryCompiler.compile(retrieved, Thread.currentThread().getContextClassLoader());
} catch (Exception e) {
fail(e.getMessage());
}
} |
@Override
public URL getLocalArtifactUrl(DependencyJar dependency) {
return delegate.getLocalArtifactUrl(dependency);
} | @Test
public void whenRobolectricDepsPropertiesProperty() throws Exception {
Path depsPath =
tempDirectory.createFile(
"deps.properties", "org.robolectric\\:android-all\\:" + VERSION + ": file-123.jar");
Path jarPath = tempDirectory.createFile("file-123.jar", "...");
properties.setProperty("robolectric-deps.properties", depsPath.toString());
DependencyResolver resolver = new LegacyDependencyResolver(properties, mockClassLoader);
URL jarUrl = resolver.getLocalArtifactUrl(DEPENDENCY_COORDS);
assertThat(Fs.fromUrl(jarUrl)).isEqualTo(jarPath);
} |
public ReadwriteSplittingDataSourceGroupRule getSingleDataSourceGroupRule() {
return dataSourceRuleGroups.values().iterator().next();
} | @Test
void assertUpdateRuleStatusWithNotExistDataSource() {
ReadwriteSplittingRule readwriteSplittingRule = createReadwriteSplittingRule();
readwriteSplittingRule.getAttributes().getAttribute(StaticDataSourceRuleAttribute.class).updateStatus(
new QualifiedDataSource("readwrite_splitting_db.readwrite.read_ds"), DataSourceState.DISABLED);
assertThat(readwriteSplittingRule.getSingleDataSourceGroupRule().getDisabledDataSourceNames(), is(Collections.singleton("read_ds")));
} |
@Override
public AbstractWALEvent decode(final ByteBuffer data, final BaseLogSequenceNumber logSequenceNumber) {
AbstractWALEvent result;
byte[] bytes = new byte[data.remaining()];
data.get(bytes);
String dataText = new String(bytes, StandardCharsets.UTF_8);
if (decodeWithTX) {
result = decodeDataWithTX(dataText);
} else {
result = decodeDataIgnoreTX(dataText);
}
result.setLogSequenceNumber(logSequenceNumber);
return result;
} | @Test
void assertDecodeWriteRowEvent() {
MppTableData tableData = new MppTableData();
tableData.setTableName("public.test");
tableData.setOpType("INSERT");
String[] insertTypes = new String[]{"character varying", "text", "char", "character", "nchar", "varchar2", "nvarchar2", "clob"};
tableData.setColumnsType(insertTypes);
tableData.setColumnsName(IntStream.range(0, insertTypes.length).mapToObj(idx -> "data" + idx).toArray(String[]::new));
tableData.setColumnsVal(IntStream.range(0, insertTypes.length).mapToObj(idx -> "'1 2 3'").toArray(String[]::new));
ByteBuffer data = ByteBuffer.wrap(JsonUtils.toJsonString(tableData).getBytes());
WriteRowEvent actual = (WriteRowEvent) new MppdbDecodingPlugin(null, false, false).decode(data, logSequenceNumber);
assertThat(actual.getLogSequenceNumber(), is(logSequenceNumber));
assertThat(actual.getTableName(), is("test"));
IntStream.range(0, insertTypes.length).forEach(each -> assertThat(actual.getAfterRow().get(each), is("1 2 3")));
} |
@Override
public void deconstruct(long generation, List<Object> components, Collection<Bundle> bundles) {
Collection<Deconstructable> destructibleComponents = new ArrayList<>();
for (var component : components) {
if (component instanceof AbstractComponent) {
AbstractComponent abstractComponent = (AbstractComponent) component;
if (abstractComponent.isDeconstructable()) {
destructibleComponents.add(abstractComponent);
}
} else if (component instanceof Provider) {
destructibleComponents.add((Deconstructable) component);
} else if (component instanceof SharedResource) {
// Release shared resources in same order as other components in case of usage without reference counting
destructibleComponents.add(new SharedResourceReleaser(component));
}
}
if (!destructibleComponents.isEmpty() || !bundles.isEmpty()) {
executor.execute(new DestructComponentTask(generation, destructibleComponents, bundles));
}
} | @Test
void bundles_are_uninstalled() throws InterruptedException {
var bundle = new UninstallableMockBundle();
// Done by executor, so it takes some time even with a 0 delay.
deconstructor.deconstruct(0, List.of(), singleton(bundle));
waitForDeconstructToComplete(() -> bundle.uninstalled);
assertTrue(bundle.uninstalled);
} |
public boolean matches(Evidence evidence) {
return sourceMatches(evidence)
&& confidenceMatches(evidence)
&& name.equalsIgnoreCase(evidence.getName())
&& valueMatches(evidence);
} | @Test
public void testRegExWildcardSourceWildcardConfidenceFourMatching() throws Exception {
final EvidenceMatcher regexMediumWildcardSourceMatcher = new EvidenceMatcher(null, "name", "^.*[Vv][al]{2,2}[a-z ]+$", true, null);
assertFalse("regex wildcard source wildcard confidence matcher should not match REGEX_EVIDENCE_HIGHEST", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_HIGHEST));
assertFalse("regex wildcard source wildcard confidence matcher should not match REGEX_EVIDENCE_HIGH", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_HIGH));
assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_MEDIUM", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_MEDIUM));
assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_MEDIUM_SECOND_SOURCE", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_MEDIUM_SECOND_SOURCE));
assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_MEDIUM_THIRD_SOURCE", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_MEDIUM_THIRD_SOURCE));
assertTrue("regex wildcard source wildcard confidence matcher should match REGEX_EVIDENCE_LOW", regexMediumWildcardSourceMatcher.matches(REGEX_EVIDENCE_LOW));
} |
@Override
public Collection<String> resolve(Class<? extends AnalysisIndexer> clazz) {
return Collections.emptyList();
} | @Test
public void resolve_shouldDoNothing() {
assertThat(underTest.resolve(IssueIndexer.class))
.isEmpty();
} |
@SqlNullable
@Description("Returns the geometry that represents all points whose distance from the specified geometry is less than or equal to the specified distance")
@ScalarFunction("ST_Buffer")
@SqlType(GEOMETRY_TYPE_NAME)
public static Slice stBuffer(@SqlType(GEOMETRY_TYPE_NAME) Slice input, @SqlType(DOUBLE) double distance)
{
if (isNaN(distance)) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "distance is NaN");
}
if (distance < 0) {
throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "distance is negative");
}
if (distance == 0) {
return input;
}
Geometry geometry = deserialize(input);
if (geometry.isEmpty()) {
return null;
}
return serialize(geometry.buffer(distance));
} | @Test
public void testSTBuffer()
{
assert2DPolygon("ST_Buffer(ST_Point(0, 0), 0.5)", "POLYGON ((0.5 0, 0.4903926402016152 -0.0975451610080641, 0.4619397662556434 -0.1913417161825449, 0.4157348061512726 -0.2777851165098011, 0.3535533905932738 -0.3535533905932737, 0.2777851165098011 -0.4157348061512726, 0.1913417161825449 -0.4619397662556434, 0.0975451610080642 -0.4903926402016152, 0 -0.5, -0.0975451610080641 -0.4903926402016152, -0.1913417161825449 -0.4619397662556434, -0.277785116509801 -0.4157348061512727, -0.3535533905932737 -0.3535533905932738, -0.4157348061512727 -0.2777851165098011, -0.4619397662556434 -0.1913417161825449, -0.4903926402016152 -0.0975451610080643, -0.5 -0.0000000000000001, -0.4903926402016152 0.0975451610080642, -0.4619397662556434 0.1913417161825448, -0.4157348061512727 0.277785116509801, -0.3535533905932738 0.3535533905932737, -0.2777851165098011 0.4157348061512726, -0.1913417161825452 0.4619397662556433, -0.0975451610080643 0.4903926402016152, -0.0000000000000001 0.5, 0.0975451610080642 0.4903926402016152, 0.191341716182545 0.4619397662556433, 0.2777851165098009 0.4157348061512727, 0.3535533905932737 0.3535533905932738, 0.4157348061512726 0.2777851165098011, 0.4619397662556433 0.1913417161825452, 0.4903926402016152 0.0975451610080644, 0.5 0))");
assert2DPolygon("ST_Buffer(ST_LineFromText('LINESTRING (0 0, 1 1, 2 0.5)'), 0.2)", "POLYGON ((0.8585786437626906 1.1414213562373094, 0.8908600605480863 1.167596162296255, 0.9278541681368628 1.1865341227356967, 0.9679635513986066 1.1974174915274993, 1.0094562767938988 1.1997763219933664, 1.050540677712335 1.1935087592239118, 1.0894427190999916 1.1788854381999831, 2.0894427190999916 0.6788854381999831, 2.1226229200749436 0.6579987957938098, 2.1510907909991412 0.6310403482720258, 2.173752327557934 0.5990460936544217, 2.189736659610103 0.5632455532033676, 2.198429518239 0.5250145216112229, 2.1994968417625285 0.4858221959818642, 2.192897613536241 0.4471747154099183, 2.178885438199983 0.4105572809000084, 2.1579987957938096 0.3773770799250564, 2.131040348272026 0.3489092090008588, 2.099046093654422 0.3262476724420662, 2.0632455532033678 0.3102633403898972, 2.0250145216112228 0.3015704817609999, 1.985822195981864 0.3005031582374715, 1.9471747154099184 0.3071023864637593, 1.9105572809000084 0.3211145618000169, 1.0394906098164267 0.7566478973418077, 0.1414213562373095 -0.1414213562373095, 0.1111140466039205 -0.1662939224605091, 0.076536686473018 -0.1847759065022574, 0.0390180644032257 -0.1961570560806461, 0 -0.2, -0.0390180644032256 -0.1961570560806461, -0.076536686473018 -0.1847759065022574, -0.1111140466039204 -0.1662939224605091, -0.1414213562373095 -0.1414213562373095, -0.1662939224605091 -0.1111140466039204, -0.1847759065022574 -0.076536686473018, -0.1961570560806461 -0.0390180644032257, -0.2 -0, -0.1961570560806461 0.0390180644032257, -0.1847759065022574 0.0765366864730179, -0.1662939224605091 0.1111140466039204, -0.1414213562373095 0.1414213562373095, 0.8585786437626906 1.1414213562373094))");
assert2DPolygon("ST_Buffer(ST_GeometryFromText('POLYGON ((0 0, 0 5, 5 5, 5 0, 0 0))'), 1.2)", "POLYGON ((0 -1.2, -0.2341083864193544 -1.1769423364838763, -0.4592201188381084 -1.1086554390135437, -0.6666842796235226 -0.9977635347630542, -0.8485281374238572 -0.8485281374238569, -0.9977635347630545 -0.6666842796235223, -1.1086554390135441 -0.4592201188381076, -1.1769423364838765 -0.234108386419354, -1.2 0, -1.2 5, -1.1769423364838765 5.234108386419354, -1.1086554390135441 5.4592201188381075, -0.9977635347630543 5.666684279623523, -0.8485281374238569 5.848528137423857, -0.6666842796235223 5.997763534763054, -0.4592201188381076 6.108655439013544, -0.2341083864193538 6.176942336483877, 0 6.2, 5 6.2, 5.234108386419354 6.176942336483877, 5.4592201188381075 6.108655439013544, 5.666684279623523 5.997763534763054, 5.848528137423857 5.848528137423857, 5.997763534763054 5.666684279623523, 6.108655439013544 5.4592201188381075, 6.176942336483877 5.234108386419354, 6.2 5, 6.2 0, 6.176942336483877 -0.2341083864193539, 6.108655439013544 -0.4592201188381077, 5.997763534763054 -0.6666842796235226, 5.848528137423857 -0.8485281374238569, 5.666684279623523 -0.9977635347630542, 5.4592201188381075 -1.1086554390135441, 5.234108386419354 -1.1769423364838765, 5 -1.2, 0 -1.2))");
// zero distance
assertFunction("ST_AsText(ST_Buffer(ST_Point(0, 0), 0))", VARCHAR, "POINT (0 0)");
assertFunction("ST_AsText(ST_Buffer(ST_LineFromText('LINESTRING (0 0, 1 1, 2 0.5)'), 0))", VARCHAR, "LINESTRING (0 0, 1 1, 2 0.5)");
assertFunction("ST_AsText(ST_Buffer(ST_GeometryFromText('POLYGON ((0 0, 0 5, 5 5, 5 0, 0 0))'), 0))", VARCHAR, "POLYGON ((0 0, 0 5, 5 5, 5 0, 0 0))");
// geometry collection
assertFunction("ST_AsText(ST_Buffer(ST_Intersection(ST_GeometryFromText('MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))'), ST_GeometryFromText('MULTILINESTRING ((3 4, 6 4), (5 0, 5 4))')), 0.2))", VARCHAR, "MULTIPOLYGON (((5.2 1, 5.196157056080646 0.9609819355967744, 5.184775906502257 0.9234633135269821, 5.166293922460509 0.8888859533960796, 5.141421356237309 0.8585786437626906, 5.11111404660392 0.8337060775394909, 5.076536686473018 0.8152240934977426, 5.039018064403225 0.803842943919354, 5 0.8, 4.960981935596775 0.803842943919354, 4.923463313526982 0.8152240934977426, 4.88888595339608 0.8337060775394909, 4.858578643762691 0.8585786437626904, 4.833706077539491 0.8888859533960796, 4.815224093497743 0.9234633135269821, 4.803842943919354 0.9609819355967743, 4.8 1, 4.803842943919354 1.0390180644032256, 4.815224093497743 1.076536686473018, 4.833706077539491 1.1111140466039204, 4.858578643762691 1.1414213562373094, 4.88888595339608 1.1662939224605091, 4.923463313526982 1.1847759065022574, 4.960981935596775 1.196157056080646, 5 1.2, 5.039018064403225 1.196157056080646, 5.076536686473018 1.1847759065022574, 5.11111404660392 1.1662939224605091, 5.141421356237309 1.1414213562373094, 5.166293922460509 1.1111140466039204, 5.184775906502257 1.0765366864730181, 5.196157056080646 1.0390180644032259, 5.2 1)), ((4 4.2, 4.039018064403225 4.196157056080646, 4.076536686473018 4.184775906502257, 4.11111404660392 4.166293922460509, 4.141421356237309 4.141421356237309, 4.166293922460509 4.11111404660392, 4.184775906502257 4.076536686473018, 4.196157056080646 4.039018064403225, 4.2 4, 4.196157056080646 3.960981935596774, 4.184775906502257 3.923463313526982, 4.166293922460509 3.8888859533960796, 4.141421356237309 3.8585786437626903, 4.11111404660392 3.833706077539491, 4.076536686473018 3.8152240934977426, 4.039018064403225 3.8038429439193537, 4 3.8, 3 3.8, 2.960981935596774 3.8038429439193537, 2.923463313526982 3.8152240934977426, 2.8888859533960796 3.833706077539491, 2.8585786437626903 3.8585786437626903, 2.8337060775394907 3.8888859533960796, 2.8152240934977426 3.923463313526982, 2.8038429439193537 3.960981935596774, 2.8 4, 2.8038429439193537 4.039018064403225, 2.8152240934977426 4.076536686473018, 2.833706077539491 4.11111404660392, 2.8585786437626903 4.141421356237309, 2.8888859533960796 4.166293922460509, 2.923463313526982 4.184775906502257, 2.960981935596774 4.196157056080646, 3 4.2, 4 4.2)))");
// empty geometry
assertFunction("ST_Buffer(ST_GeometryFromText('POINT EMPTY'), 1)", GEOMETRY, null);
// negative distance
assertInvalidFunction("ST_Buffer(ST_Point(0, 0), -1.2)", "distance is negative");
assertInvalidFunction("ST_Buffer(ST_Point(0, 0), -infinity())", "distance is negative");
// infinity() and nan() distance
assertFunction("ST_AsText(ST_Buffer(ST_Point(0, 0), infinity()))", VARCHAR, "POLYGON EMPTY");
assertInvalidFunction("ST_Buffer(ST_Point(0, 0), nan())", "distance is NaN");
// For small polygons, there was a bug in ESRI that throw an NPE. This
// was fixed (https://github.com/Esri/geometry-api-java/pull/243) to
// return an empty geometry instead. However, JTS does not suffer from
// this problem.
assertFunction("ST_AsText(ST_Buffer(ST_Buffer(ST_Point(177.50102959662, 64.726807421691), 0.0000000001), 0.00005))",
VARCHAR, "POLYGON ((177.50107936028078 64.72681227844056, 177.50107936028078 64.72680256494145, 177.5010774479383 64.72679292130174, 177.5010736620884 64.72678379449388, 177.50106827679528 64.72677573803607, 177.50106131974323 64.72676877387859, 177.50105313736503 64.72676330997743, 177.50104413021958 64.72675958044903, 177.50103449281957 64.72675766189617, 177.5010247004399 64.72675766189425, 177.50101506303915 64.72675958044333, 177.50100605589225 64.72676330996819, 177.5009978735119 64.72676877386616, 177.5009909164571 64.7267757380209, 177.50098553116084 64.7267837944766, 177.50098174530737 64.72679292128298, 177.5009798329611 64.72680256492194, 177.5009798329592 64.72681227844056, 177.5009817453017 64.72682192208028, 177.50098553115157 64.72683104888813, 177.5009909164447 64.72683910534595, 177.50099787349674 64.72684606950342, 177.50100605587494 64.72685153340458, 177.5010150630204 64.72685526293299, 177.5010247004204 64.72685718148584, 177.50103449280007 64.72685718148776, 177.50104413020082 64.72685526293868, 177.50105313734772 64.72685153341382, 177.50106131972808 64.72684606951586, 177.50106827678286 64.72683910536111, 177.50107366207914 64.72683104890541, 177.5010774479326 64.72682192209903, 177.50107936027888 64.72681227846007, 177.50107936028078 64.72681227844056))");
assertFunction("ST_AsText(ST_Buffer(ST_GeometryFromText('POLYGON ((177.0 64.0, 177.0000000001 64.0, 177.0000000001 64.0000000001, 177.0 64.0000000001, 177.0 64.0))'), 0.01))",
VARCHAR, "POLYGON ((177 63.99, 176.99804909677985 63.99019214719597, 176.99617316567634 63.99076120467489, 176.99444429766982 63.99168530387698, 176.99292893218814 63.992928932188136, 176.99168530387698 63.9944442976698, 176.9907612046749 63.996173165676346, 176.99019214719596 63.99804909677984, 176.99 64, 176.99019214719596 64.00195090332016, 176.9907612046749 64.00382683442365, 176.99168530387698 64.0055557024302, 176.99292893218814 64.00707106791187, 176.99444429766982 64.00831469622302, 176.99617316567634 64.00923879542512, 176.99804909677985 64.00980785290403, 177 64.0100000001, 177.00195090332014 64.00980785290403, 177.00382683442365 64.00923879542512, 177.00555570243017 64.00831469622302, 177.00707106791185 64.00707106791187, 177.008314696223 64.0055557024302, 177.0092387954251 64.00382683442365, 177.00980785290403 64.00195090332016, 177.01000000009998 64.0000000001, 177.00980785290403 63.99804909677984, 177.0092387954251 63.996173165676346, 177.008314696223 63.9944442976698, 177.00707106791185 63.992928932188136, 177.00555570243017 63.99168530387698, 177.00382683442365 63.99076120467489, 177.00195090332014 63.99019214719597, 177.0000000001 63.99, 177 63.99))");
} |
@Override
public boolean isEmpty() {
return isEmpty;
} | @Test
public void testCpuResources() {
assertThat(testCpuResources(
new ResourceRequirementsBuilder()
.addToRequests(singletonMap("cpu", new Quantity("1000m")))
.addToRequests(singletonMap("memory", new Quantity("1.1Gi")))
.addToLimits(singletonMap("cpu", new Quantity("1000m")))
.addToLimits(singletonMap("memory", new Quantity("500Mi")))
.build(),
new ResourceRequirementsBuilder()
.addToRequests(singletonMap("cpu", new Quantity("1")))
.addToRequests(singletonMap("memory", new Quantity("1181116006")))
.addToLimits(singletonMap("cpu", new Quantity("1")))
.addToLimits(singletonMap("memory", new Quantity("524288000")))
.build()).isEmpty(), is(true));
assertThat(testCpuResources(
new ResourceRequirementsBuilder()
.addToRequests(singletonMap("cpu", new Quantity("1001m")))
.build(),
new ResourceRequirementsBuilder()
.addToRequests(singletonMap("cpu", new Quantity("1")))
.build()).isEmpty(), is(false));
assertThat(testCpuResources(
new ResourceRequirementsBuilder()
.addToRequests(singletonMap("memory", new Quantity("1.1Gi")))
.build(),
new ResourceRequirementsBuilder()
.addToRequests(singletonMap("memory", new Quantity("1181116007")))
.build()).isEmpty(), is(false));
assertThat(testCpuResources(
new ResourceRequirementsBuilder()
.build(),
new ResourceRequirementsBuilder()
.addToRequests(singletonMap("memory", new Quantity("1181116007")))
.build()).isEmpty(), is(false));
assertThat(testCpuResources(
new ResourceRequirementsBuilder()
.build(),
new ResourceRequirementsBuilder()
.build()).isEmpty(), is(true));
assertThat(testCpuResources(
new ResourceRequirementsBuilder()
.build(),
null).isEmpty(), is(true));
} |
@Override
public <I, O> List<O> map(List<I> data, SerializableFunction<I, O> func, int parallelism) {
return data.stream().parallel().map(throwingMapWrapper(func)).collect(toList());
} | @Test
public void testMap() {
List<Integer> mapList = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
List<Integer> result = context.map(mapList, x -> x + 1, 2);
result.removeAll(mapList);
Assertions.assertEquals(1, result.size());
Assertions.assertEquals(11, result.get(0));
} |
@Override
public Collection<String> getAllHandles() throws Exception {
return kubeClient
.getConfigMap(configMapName)
.map(
configMap ->
configMap.getData().keySet().stream()
.filter(configMapKeyFilter)
.filter(
k -> {
try {
final String content =
Objects.requireNonNull(
configMap.getData().get(k));
return !deserializeStateHandle(content)
.isMarkedForDeletion();
} catch (IOException e) {
return false;
}
})
.collect(Collectors.toList()))
.orElseThrow(this::getConfigMapNotExistException);
} | @Test
void testGetAllHandles() throws Exception {
new Context() {
{
runTest(
() -> {
leaderCallbackGrantLeadership();
final KubernetesStateHandleStore<
TestingLongStateHandleHelper.LongStateHandle>
store =
new KubernetesStateHandleStore<>(
flinkKubeClient,
LEADER_CONFIGMAP_NAME,
longStateStorage,
filter,
LOCK_IDENTITY);
final List<String> expected = Arrays.asList(key + 3, key + 2, key + 1);
for (String each : expected) {
store.addAndLock(each, state);
}
final String[] actual = store.getAllHandles().toArray(new String[0]);
expected.sort(Comparator.comparing(e -> e));
assertThat(actual).containsExactlyInAnyOrderElementsOf(expected);
});
}
};
} |
protected String[] getRunCommand(String command, String groupId,
String userName, Path pidFile, Configuration config) {
return getRunCommand(command, groupId, userName, pidFile, config, null);
} | @Test (timeout = 5000)
public void testRunCommandWithMemoryOnlyResources() {
assumeWindows();
Configuration conf = new Configuration();
conf.set(YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, "true");
String[] command = containerExecutor.getRunCommand("echo", "group1", null, null,
conf, Resource.newInstance(1024, 1));
// Assert the cpu and memory limits are set correctly in the command
String[] expected = { Shell.WINUTILS, "task", "create", "-m", "1024", "-c",
"-1", "group1", "cmd /c " + "echo" };
Assert.assertTrue(Arrays.equals(expected, command));
} |
public static String getClusterName(final String originalFilename) {
checkThreePart(originalFilename);
return getThreePart(originalFilename)[1];
} | @Test
public void getClusterName() {
final String cluster = ConfigFileUtils.getClusterName("application+default+application.properties");
assertEquals("default", cluster);
final String Beijing = ConfigFileUtils.getClusterName("abc+Beijing+application.yml");
assertEquals("Beijing", Beijing);
} |
public static String stripSchemeAndOptions(Endpoint endpoint) {
int start = endpoint.getEndpointUri().indexOf(':');
start++;
// Remove any leading '/'
while (endpoint.getEndpointUri().charAt(start) == '/') {
start++;
}
int end = endpoint.getEndpointUri().indexOf('?');
return end == -1 ? endpoint.getEndpointUri().substring(start) : endpoint.getEndpointUri().substring(start, end);
} | @Test
public void testStripSchemeAndOptions() {
Endpoint endpoint = Mockito.mock(Endpoint.class);
Mockito.when(endpoint.getEndpointUri()).thenReturn("direct:hello?world=true");
assertEquals("hello", AbstractSpanDecorator.stripSchemeAndOptions(endpoint));
} |
static Multimap<String, Range<Integer>> extractHighlightRanges(Map<String, List<String>> highlight) {
if (highlight == null || highlight.isEmpty()) {
return ImmutableListMultimap.of();
}
final ImmutableListMultimap.Builder<String, Range<Integer>> builder = ImmutableListMultimap.builder();
highlight.forEach((key, value) -> extractRange(value).forEach(range -> builder.put(key, range)));
return builder.build();
} | @Test
public void multipleHighlights() throws Exception {
final Map<String, List<String>> highlights = ImmutableMap.of(
"message", ImmutableList.of("/<em>usr</em>/sbin/cron[22390]: (root) CMD (/<em>usr</em>/libexec/atrun)"),
"full_message", ImmutableList.of("<78>Aug 22 10:40:00 /<em>usr</em>/sbin/cron[22390]: (root) CMD (/<em>usr</em>/libexec/atrun)")
);
final Multimap<String, Range<Integer>> result = HighlightParser.extractHighlightRanges(highlights);
assertThat(result).isNotNull();
assertThat(result.get("message"))
.isNotEmpty()
.containsExactly(
Range.closed(1, 4),
Range.closed(36, 39)
);
assertThat(result.get("full_message"))
.isNotEmpty()
.containsExactly(
Range.closed(21, 24),
Range.closed(56, 59)
);
} |
public static String[] fieldNamesOf(final AbstractDescribedSObjectBase object) {
if (object == null) {
return NONE;
}
final SObjectDescription description = object.description();
final List<SObjectField> fields = description.getFields();
return fields.stream().map(SObjectField::getName).toArray(String[]::new);
} | @Test
public void shouldGatherAllFieldNames() {
assertThat(QueryHelper.fieldNamesOf(new Account())).contains("Id", "Name", "ShippingCity");
} |
public boolean isBodyOnly() {
return bodyOnly;
} | @Test
public void testDefaultBodyOnly() {
SplunkHECConfiguration config = new SplunkHECConfiguration();
assertFalse(config.isBodyOnly());
} |
public static Number parseNumber(String numberStr) throws NumberFormatException {
if (StrUtil.startWithIgnoreCase(numberStr, "0x")) {
// 0x04表示16进制数
return Long.parseLong(numberStr.substring(2), 16);
} else if (StrUtil.startWith(numberStr, '+')) {
// issue#I79VS7
numberStr = StrUtil.subSuf(numberStr, 1);
}
try {
final NumberFormat format = NumberFormat.getInstance();
if (format instanceof DecimalFormat) {
// issue#1818@Github
// 当字符串数字超出double的长度时,会导致截断,此处使用BigDecimal接收
((DecimalFormat) format).setParseBigDecimal(true);
}
return format.parse(numberStr);
} catch (ParseException e) {
final NumberFormatException nfe = new NumberFormatException(e.getMessage());
nfe.initCause(e);
throw nfe;
}
} | @Test
public void parseHexNumberTest() {
// 千位分隔符去掉
final int v1 = NumberUtil.parseNumber("0xff").intValue();
assertEquals(255, v1);
} |
public static File load(String name) {
try {
if (name == null) {
throw new IllegalArgumentException("name can't be null");
}
String decodedPath = URLDecoder.decode(name, StandardCharsets.UTF_8.name());
return getFileFromFileSystem(decodedPath);
} catch (UnsupportedEncodingException e) {
LOGGER.error("decode name error: {}", e.getMessage(), e);
}
return null;
} | @Test
public void testLoadExistFile() {
File file = FileLoader.load("io/TestFile.txt");
Assertions.assertTrue(file != null && file.exists());
} |
@Override
protected int getJDBCPort() {
return MySQLContainer.MYSQL_PORT;
} | @Test
public void testGetJDBCPortReturnsCorrectValue() {
assertThat(testManager.getJDBCPort()).isEqualTo(MySQLContainer.MYSQL_PORT);
} |
public String resolve(String ensName) {
if (Strings.isBlank(ensName) || (ensName.trim().length() == 1 && ensName.contains("."))) {
return null;
}
try {
if (isValidEnsName(ensName, addressLength)) {
OffchainResolverContract resolver = obtainOffchainResolver(ensName);
boolean supportWildcard =
resolver.supportsInterface(EnsUtils.ENSIP_10_INTERFACE_ID).send();
byte[] nameHash = NameHash.nameHashAsBytes(ensName);
String resolvedName;
if (supportWildcard) {
String dnsEncoded = NameHash.dnsEncode(ensName);
String addrFunction = resolver.addr(nameHash).encodeFunctionCall();
String lookupDataHex =
resolver.resolve(
Numeric.hexStringToByteArray(dnsEncoded),
Numeric.hexStringToByteArray(addrFunction))
.send();
resolvedName = resolveOffchain(lookupDataHex, resolver, LOOKUP_LIMIT);
} else {
try {
resolvedName = resolver.addr(nameHash).send();
} catch (Exception e) {
throw new RuntimeException("Unable to execute Ethereum request: ", e);
}
}
if (!WalletUtils.isValidAddress(resolvedName)) {
throw new EnsResolutionException(
"Unable to resolve address for name: " + ensName);
} else {
return resolvedName;
}
} else {
return ensName;
}
} catch (Exception e) {
throw new EnsResolutionException(e);
}
} | @Test
public void testResolveWildCardWhenResolvedAddressNotValid() throws Exception {
EnsResolverForTest ensResolverForTest = new EnsResolverForTest(web3j);
OffchainResolverContract resolverMock = mock(OffchainResolverContract.class);
ensResolverForTest.setResolverMock(resolverMock);
RemoteFunctionCall suppIntResp = mock(RemoteFunctionCall.class);
when(resolverMock.supportsInterface(any())).thenReturn(suppIntResp);
when(suppIntResp.send()).thenReturn(true);
RemoteFunctionCall addrResp = mock(RemoteFunctionCall.class);
when(resolverMock.addr(any())).thenReturn(addrResp);
when(addrResp.encodeFunctionCall()).thenReturn("0x12345");
RemoteFunctionCall resolveResp = mock(RemoteFunctionCall.class);
when(resolverMock.resolve(any(), any())).thenReturn(resolveResp);
when(resolveResp.send()).thenReturn("0xNotvalidAddress");
assertThrows(
EnsResolutionException.class,
() -> ensResolverForTest.resolve("1.offchainexample.eth"));
} |
public static RectL getBounds(final RectL pIn,
final long pCenterX, final long pCenterY, final double pDegrees,
final RectL pReuse) {
final RectL out = pReuse != null ? pReuse : new RectL();
if (pDegrees == 0) { // optimization
out.top = pIn.top;
out.left = pIn.left;
out.bottom = pIn.bottom;
out.right = pIn.right;
return out;
}
final double radians = pDegrees * Math.PI / 180.;
final double cos = Math.cos(radians);
final double sin = Math.sin(radians);
long inputX;
long inputY;
long outputX;
long outputY;
inputX = pIn.left; // corner 1
inputY = pIn.top;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
out.top = out.bottom = outputY;
out.left = out.right = outputX;
inputX = pIn.right; // corner 2
inputY = pIn.top;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
if (out.top > outputY) {
out.top = outputY;
}
if (out.bottom < outputY) {
out.bottom = outputY;
}
if (out.left > outputX) {
out.left = outputX;
}
if (out.right < outputX) {
out.right = outputX;
}
inputX = pIn.right; // corner 3
inputY = pIn.bottom;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
if (out.top > outputY) {
out.top = outputY;
}
if (out.bottom < outputY) {
out.bottom = outputY;
}
if (out.left > outputX) {
out.left = outputX;
}
if (out.right < outputX) {
out.right = outputX;
}
inputX = pIn.left; // corner 4
inputY = pIn.bottom;
outputX = getRotatedX(inputX, inputY, pCenterX, pCenterY, cos, sin);
outputY = getRotatedY(inputX, inputY, pCenterX, pCenterY, cos, sin);
if (out.top > outputY) {
out.top = outputY;
}
if (out.bottom < outputY) {
out.bottom = outputY;
}
if (out.left > outputX) {
out.left = outputX;
}
if (out.right < outputX) {
out.right = outputX;
}
return out;
} | @Test
public void testGetBounds0() {
final double degrees = 0;
final RectL in = new RectL();
final RectL out = new RectL();
for (int i = 0; i < mIterations; i++) {
in.top = getRandomCoordinate();
in.left = getRandomCoordinate();
in.bottom = getRandomCoordinate();
in.right = getRandomCoordinate();
final long centerX = getRandomCoordinate();
final long centerY = getRandomCoordinate();
RectL.getBounds(in, centerX, centerY, degrees, out);
Assert.assertEquals(in.top, out.top);
Assert.assertEquals(in.left, out.left);
Assert.assertEquals(in.bottom, out.bottom);
Assert.assertEquals(in.right, out.right);
}
} |
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) {
PointList pointList = way.getTag("point_list", null);
if (pointList != null) {
if (pointList.isEmpty() || !pointList.is3D()) {
if (maxSlopeEnc != null)
maxSlopeEnc.setDecimal(false, edgeId, edgeIntAccess, 0);
if (averageSlopeEnc != null)
averageSlopeEnc.setDecimal(false, edgeId, edgeIntAccess, 0);
return;
}
// Calculate 2d distance, although pointList might be 3D.
// This calculation is a bit expensive and edge_distance is available already, but this would be in 3D
double distance2D = DistanceCalcEarth.calcDistance(pointList, false);
if (distance2D < MIN_LENGTH) {
if (averageSlopeEnc != null)
// default is minimum of average_slope is negative so we have to explicitly set it to 0
averageSlopeEnc.setDecimal(false, edgeId, edgeIntAccess, 0);
return;
}
double towerNodeSlope = calcSlope(pointList.getEle(pointList.size() - 1) - pointList.getEle(0), distance2D);
if (Double.isNaN(towerNodeSlope))
throw new IllegalArgumentException("average_slope was NaN for OSM way ID " + way.getId());
if (averageSlopeEnc != null) {
if (towerNodeSlope >= 0)
averageSlopeEnc.setDecimal(false, edgeId, edgeIntAccess, Math.min(towerNodeSlope, averageSlopeEnc.getMaxStorableDecimal()));
else
averageSlopeEnc.setDecimal(true, edgeId, edgeIntAccess, Math.min(Math.abs(towerNodeSlope), averageSlopeEnc.getMaxStorableDecimal()));
}
if (maxSlopeEnc != null) {
// max_slope is more error-prone as the shorter distances increase the fluctuation
// so apply some more filtering (here we use the average elevation delta of the previous two points)
double maxSlope = 0, prevDist = 0, prevLat = pointList.getLat(0), prevLon = pointList.getLon(0);
for (int i = 1; i < pointList.size(); i++) {
double pillarDistance2D = DistanceCalcEarth.DIST_EARTH.calcDist(prevLat, prevLon, pointList.getLat(i), pointList.getLon(i));
if (i > 1 && prevDist > MIN_LENGTH) {
double averagedPrevEle = (pointList.getEle(i - 1) + pointList.getEle(i - 2)) / 2;
double tmpSlope = calcSlope(pointList.getEle(i) - averagedPrevEle, pillarDistance2D + prevDist / 2);
maxSlope = Math.abs(tmpSlope) > Math.abs(maxSlope) ? tmpSlope : maxSlope;
}
prevDist = pillarDistance2D;
prevLat = pointList.getLat(i);
prevLon = pointList.getLon(i);
}
// For tunnels and bridges we cannot trust the pillar node elevation and ignore all changes.
// Probably we should somehow recalculate even the average_slope after elevation interpolation? See EdgeElevationInterpolator
if (way.hasTag("tunnel", "yes") || way.hasTag("bridge", "yes") || way.hasTag("highway", "steps"))
maxSlope = towerNodeSlope;
else
maxSlope = Math.abs(towerNodeSlope) > Math.abs(maxSlope) ? towerNodeSlope : maxSlope;
if (Double.isNaN(maxSlope))
throw new IllegalArgumentException("max_slope was NaN for OSM way ID " + way.getId());
double val = Math.max(maxSlope, maxSlopeEnc.getMinStorableDecimal());
maxSlopeEnc.setDecimal(false, edgeId, edgeIntAccess, Math.min(maxSlopeEnc.getMaxStorableDecimal(), val));
}
}
} | @Test
void simpleElevation() {
DecimalEncodedValue averageEnc = AverageSlope.create();
DecimalEncodedValue maxEnc = MaxSlope.create();
new EncodingManager.Builder().add(averageEnc).add(maxEnc).build();
SlopeCalculator creator = new SlopeCalculator(maxEnc, averageEnc);
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
int edgeId = 0;
ReaderWay way = new ReaderWay(1L);
PointList pointList = new PointList(5, true);
pointList.add(51.0, 12.001, 0);
pointList.add(51.0, 12.002, 3.5); // ~70m
pointList.add(51.0, 12.003, 4); // ~140m
pointList.add(51.0, 12.004, 2); // ~210m
way.setTag("point_list", pointList);
creator.handleWayTags(edgeId, edgeIntAccess, way, IntsRef.EMPTY);
assertEquals(Math.round(2.0 / 210 * 100), averageEnc.getDecimal(false, edgeId, edgeIntAccess), 1e-3);
assertEquals(-Math.round(2.0 / 210 * 100), averageEnc.getDecimal(true, edgeId, edgeIntAccess), 1e-3);
assertEquals(Math.round(1.75 / 105 * 100), maxEnc.getDecimal(false, edgeId, edgeIntAccess), 1e-3);
assertEquals(-Math.round(1.75 / 105 * 100), maxEnc.getDecimal(true, edgeId, edgeIntAccess), 1e-3);
} |
public static byte[] generateCreate2ContractAddress(
byte[] address, byte[] salt, byte[] initCode) {
if (address.length != ADDRESS_BYTE_SIZE) {
throw new RuntimeException("Invalid address size");
}
if (salt.length != SALT_SIZE) {
throw new RuntimeException("Invalid salt size");
}
byte[] hashedInitCode = Hash.sha3(initCode);
byte[] buffer = new byte[1 + address.length + salt.length + hashedInitCode.length];
buffer[0] = (byte) 0xff;
int offset = 1;
System.arraycopy(address, 0, buffer, offset, address.length);
offset += address.length;
System.arraycopy(salt, 0, buffer, offset, salt.length);
offset += salt.length;
System.arraycopy(hashedInitCode, 0, buffer, offset, hashedInitCode.length);
byte[] hashed = Hash.sha3(buffer);
return Arrays.copyOfRange(hashed, 12, hashed.length);
} | @Test
public void testEIP1014Create2ContractAddress() {
// https://eips.ethereum.org/EIPS/eip-1014
// example 0
assertEquals(
Keys.toChecksumAddress(
generateCreate2ContractAddress(
"0x0000000000000000000000000000000000000000",
hexStringToByteArray(
"0x0000000000000000000000000000000000000000000000000000000000000000"),
hexStringToByteArray("0x00"))),
("0x4D1A2e2bB4F88F0250f26Ffff098B0b30B26BF38"));
// example 1
assertEquals(
Keys.toChecksumAddress(
generateCreate2ContractAddress(
"0xdeadbeef00000000000000000000000000000000",
hexStringToByteArray(
"0x0000000000000000000000000000000000000000000000000000000000000000"),
hexStringToByteArray("0x00"))),
("0xB928f69Bb1D91Cd65274e3c79d8986362984fDA3"));
// example 2
assertEquals(
Keys.toChecksumAddress(
generateCreate2ContractAddress(
"0xdeadbeef00000000000000000000000000000000",
hexStringToByteArray(
"0x000000000000000000000000feed000000000000000000000000000000000000"),
hexStringToByteArray("0x00"))),
("0xD04116cDd17beBE565EB2422F2497E06cC1C9833"));
// example 3
assertEquals(
Keys.toChecksumAddress(
generateCreate2ContractAddress(
"0x0000000000000000000000000000000000000000",
hexStringToByteArray(
"0x0000000000000000000000000000000000000000000000000000000000000000"),
hexStringToByteArray("0xdeadbeef"))),
("0x70f2b2914A2a4b783FaEFb75f459A580616Fcb5e"));
// example 4
assertEquals(
Keys.toChecksumAddress(
generateCreate2ContractAddress(
"0x00000000000000000000000000000000deadbeef",
hexStringToByteArray(
"0x00000000000000000000000000000000000000000000000000000000cafebabe"),
hexStringToByteArray("0xdeadbeef"))),
("0x60f3f640a8508fC6a86d45DF051962668E1e8AC7"));
// example 5
assertEquals(
Keys.toChecksumAddress(
generateCreate2ContractAddress(
"0x00000000000000000000000000000000deadbeef",
hexStringToByteArray(
"0x00000000000000000000000000000000000000000000000000000000cafebabe"),
hexStringToByteArray(
"0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"))),
("0x1d8bfDC5D46DC4f61D6b6115972536eBE6A8854C"));
// example 6
assertEquals(
Keys.toChecksumAddress(
generateCreate2ContractAddress(
"0x0000000000000000000000000000000000000000",
hexStringToByteArray(
"0x0000000000000000000000000000000000000000000000000000000000000000"),
hexStringToByteArray("0x"))),
("0xE33C0C7F7df4809055C3ebA6c09CFe4BaF1BD9e0"));
} |
@Override
public List<String> listTableNames(String dbName) {
ConnectorMetadata metadata = metadataOfDb(dbName);
return metadata.listTableNames(dbName);
} | @Test
void testListTableNames(@Mocked ConnectorMetadata connectorMetadata) {
new Expectations() {
{
connectorMetadata.listTableNames("test_db");
result = ImmutableList.of("test_tbl1", "test_tbl2");
times = 1;
}
};
CatalogConnectorMetadata catalogConnectorMetadata = new CatalogConnectorMetadata(
connectorMetadata,
informationSchemaMetadata,
metaMetadata
);
List<String> tblNames = catalogConnectorMetadata.listTableNames(InfoSchemaDb.DATABASE_NAME);
List<String> expected = ImmutableList.of("tables", "table_privileges", "referential_constraints",
"key_column_usage", "routines", "schemata", "columns", "character_sets", "collations",
"table_constraints", "engines", "user_privileges", "schema_privileges", "statistics",
"triggers", "events", "views", "partitions", "column_privileges"
);
assertEquals(expected, tblNames);
tblNames = catalogConnectorMetadata.listTableNames("test_db");
expected = ImmutableList.of("test_tbl1", "test_tbl2");
assertEquals(expected, tblNames);
} |
public Sketch<?> merge(Sketch<?> left, Sketch<?> right) {
if (left instanceof NormalSketch && right instanceof NormalSketch) {
return mergeNormalWithNormal(asNormal(left), asNormal(right));
} else if (left instanceof NormalSketch && right instanceof SparseSketch) {
return mergeNormalWithSparse(asNormal(left), asSparse(right));
} else if (left instanceof SparseSketch && right instanceof NormalSketch) {
return mergeNormalWithSparse(asNormal(right), asSparse(left));
} else if (left instanceof SparseSketch && right instanceof SparseSketch) {
return mergeSparseWithSparse(asSparse(left), asSparse(right));
} else {
throw new IllegalArgumentException(
String.format("Invalid sketch types: left=%s, right=%s", right.getClass(), left.getClass()));
}
} | @Test
public void requireThatMergingTwoThresholdSizeSparseSketchesReturnsNormalSketch() {
SparseSketch s1 = SketchUtils.createSparseSketch();
SparseSketch s2 = SketchUtils.createSparseSketch();
// Fill sketches with disjoint data.
for (int i = 0; i < HyperLogLog.SPARSE_SKETCH_CONVERSION_THRESHOLD; i++) {
s1.aggregate(i);
s2.aggregate(i + HyperLogLog.SPARSE_SKETCH_CONVERSION_THRESHOLD);
}
Sketch<?> result = merger.merge(s1, s2);
assertEquals(result.getClass(), NormalSketch.class);
List<Integer> unionOfSketchData = new ArrayList<>();
unionOfSketchData.addAll(s1.data());
unionOfSketchData.addAll(s2.data());
Integer[] expectedValues = unionOfSketchData.toArray(new Integer[unionOfSketchData.size()]);
SketchUtils.assertSketchContains(result, expectedValues);
} |
@NonNull
@Override
public Object configure(CNode config, ConfigurationContext context) throws ConfiguratorException {
return Stapler.lookupConverter(target)
.convert(
target,
context.getSecretSourceResolver()
.resolve(config.asScalar().toString()));
} | @Test
public void _Integer() throws Exception {
Configurator c = registry.lookupOrFail(Integer.class);
final Object value = c.configure(new Scalar("123"), context);
assertEquals(123, (int) value);
} |
public ProtocolBuilder appendParameters(Map<String, String> appendParameters) {
this.parameters = appendParameters(parameters, appendParameters);
return getThis();
} | @Test
void appendParameters() {
Map<String, String> source = new HashMap<>();
source.put("default.num", "one");
source.put("num", "ONE");
ProtocolBuilder builder = new ProtocolBuilder();
builder.appendParameters(source);
Map<String, String> parameters = builder.build().getParameters();
Assertions.assertTrue(parameters.containsKey("default.num"));
Assertions.assertEquals("ONE", parameters.get("num"));
} |
public static String normalizeTimePartitionName(String partitionName, PartitionField partitionField, Schema schema,
Type type) {
DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
boolean parseFromDate = true;
IcebergPartitionTransform transform = IcebergPartitionTransform.fromString(partitionField.transform().toString());
if (transform == IcebergPartitionTransform.YEAR) {
partitionName += "-01-01";
} else if (transform == IcebergPartitionTransform.MONTH) {
partitionName += "-01";
} else if (transform == IcebergPartitionTransform.DAY) {
dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
} else if (transform == IcebergPartitionTransform.HOUR) {
dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH");
parseFromDate = false;
} else {
throw new StarRocksConnectorException("Unsupported partition transform to normalize: %s",
partitionField.transform().toString());
}
// partition name formatter
DateTimeFormatter formatter = null;
if (type.isDate()) {
formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd");
} else {
formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
}
// If has timestamp with time zone, should compute the time zone offset to UTC
ZoneId zoneId;
if (schema.findType(partitionField.sourceId()).equals(Types.TimestampType.withZone())) {
zoneId = TimeUtils.getTimeZone().toZoneId();
} else {
zoneId = ZoneOffset.UTC;
}
String result;
try {
if (parseFromDate) {
LocalDate date = LocalDate.parse(partitionName, dateTimeFormatter);
if (type.isDate()) {
result = date.format(formatter);
} else {
LocalDateTime dateTime = date.atStartOfDay().atZone(ZoneOffset.UTC).
withZoneSameInstant(zoneId).toLocalDateTime();
result = dateTime.format(formatter);
}
} else {
// parse from datetime which contains hour
LocalDateTime dateTime = LocalDateTime.parse(partitionName, dateTimeFormatter).atZone(ZoneOffset.UTC).
withZoneSameInstant(zoneId).toLocalDateTime();
result = dateTime.format(formatter);
}
} catch (Exception e) {
LOG.warn("parse partition name failed, partitionName: {}, partitionField: {}, type: {}",
partitionName, partitionField, type);
throw new StarRocksConnectorException("parse/format partition name failed", e);
}
return result;
} | @Test
public void testNormalizeTimePartitionName() {
new MockUp<TimeUtils>() {
@Mock
public TimeZone getTimeZone() {
return TimeZone.getTimeZone("GMT+6");
}
};
// year
// with time zone
String partitionName = "2020";
PartitionField partitionField = SPEC_D_2.fields().get(0);
String result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATETIME);
Assert.assertEquals("2020-01-01 06:00:00", result);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATE);
Assert.assertEquals("2020-01-01", result);
// without time zone
partitionField = SPEC_E_2.fields().get(0);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_E,
Type.DATETIME);
Assert.assertEquals("2020-01-01 00:00:00", result);
// month
// with time zone
partitionName = "2020-02";
partitionField = SPEC_D_3.fields().get(0);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATETIME);
Assert.assertEquals("2020-02-01 06:00:00", result);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATE);
Assert.assertEquals("2020-02-01", result);
// without time zone
partitionField = SPEC_E_3.fields().get(0);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_E,
Type.DATETIME);
Assert.assertEquals("2020-02-01 00:00:00", result);
// day
// with time zone
partitionName = "2020-01-02";
partitionField = SPEC_D_4.fields().get(0);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATETIME);
Assert.assertEquals("2020-01-02 06:00:00", result);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATE);
Assert.assertEquals("2020-01-02", result);
// without time zone
partitionField = SPEC_E_4.fields().get(0);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_E,
Type.DATETIME);
Assert.assertEquals("2020-01-02 00:00:00", result);
// hour
partitionName = "2020-01-02-12";
partitionField = SPEC_D_5.fields().get(0);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATETIME);
Assert.assertEquals("2020-01-02 18:00:00", result);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_D,
Type.DATE);
Assert.assertEquals("2020-01-02", result);
// without time zone
partitionField = SPEC_E_5.fields().get(0);
result = IcebergPartitionUtils.normalizeTimePartitionName(partitionName, partitionField, SCHEMA_E,
Type.DATETIME);
Assert.assertEquals("2020-01-02 12:00:00", result);
} |
@ExceptionHandler(MethodArgumentTypeMismatchException.class)
protected ShenyuAdminResult handleMethodArgumentTypeMismatchException(final MethodArgumentTypeMismatchException e) {
LOG.warn("method argument type mismatch", e);
return ShenyuAdminResult.error(String.format("%s should be of type %s", e.getName(), Objects.requireNonNull(e.getRequiredType()).getName()));
} | @Test
public void testHandleMethodArgumentTypeMismatchException() {
MethodArgumentTypeMismatchException exception = mock(MethodArgumentTypeMismatchException.class);
Class clazz = MethodArgumentTypeMismatchException.class;
when(exception.getRequiredType()).thenReturn(clazz);
ShenyuAdminResult result = exceptionHandlersUnderTest.handleMethodArgumentTypeMismatchException(exception);
Assertions.assertEquals(result.getCode().intValue(), CommonErrorCode.ERROR);
MatcherAssert.assertThat(result.getMessage(), containsString("should be of type"));
} |
@Override
public CompletableFuture<ExecutionState> requestPartitionState(
final IntermediateDataSetID intermediateResultId,
final ResultPartitionID resultPartitionId) {
try {
return CompletableFuture.completedFuture(
schedulerNG.requestPartitionState(intermediateResultId, resultPartitionId));
} catch (PartitionProducerDisposedException e) {
log.info("Error while requesting partition state", e);
return FutureUtils.completedExceptionally(e);
}
} | @Test
void testRequestPartitionState() throws Exception {
final JobGraph producerConsumerJobGraph = producerConsumerJobGraph();
try (final JobMaster jobMaster =
new JobMasterBuilder(producerConsumerJobGraph, rpcService)
.withConfiguration(configuration)
.withHighAvailabilityServices(haServices)
.withHeartbeatServices(heartbeatServices)
.createJobMaster()) {
jobMaster.start();
final CompletableFuture<TaskDeploymentDescriptor> tddFuture = new CompletableFuture<>();
final TestingTaskExecutorGateway testingTaskExecutorGateway =
new TestingTaskExecutorGatewayBuilder()
.setSubmitTaskConsumer(
(taskDeploymentDescriptor, jobMasterId) -> {
tddFuture.complete(taskDeploymentDescriptor);
return CompletableFuture.completedFuture(Acknowledge.get());
})
.createTestingTaskExecutorGateway();
final LocalUnresolvedTaskManagerLocation taskManagerLocation =
new LocalUnresolvedTaskManagerLocation();
final JobMasterGateway jobMasterGateway =
jobMaster.getSelfGateway(JobMasterGateway.class);
final Collection<SlotOffer> slotOffers =
registerSlotsAtJobMaster(
1,
jobMasterGateway,
producerConsumerJobGraph.getJobID(),
testingTaskExecutorGateway,
taskManagerLocation);
assertThat(slotOffers).hasSize(1);
// obtain tdd for the result partition ids
final TaskDeploymentDescriptor tdd = tddFuture.get();
assertThat(tdd.getProducedPartitions()).hasSize(1);
final ResultPartitionDeploymentDescriptor partition =
tdd.getProducedPartitions().iterator().next();
final ExecutionAttemptID executionAttemptId = tdd.getExecutionAttemptId();
final ExecutionAttemptID copiedExecutionAttemptId =
InstantiationUtil.clone(executionAttemptId);
// finish the producer task
jobMasterGateway
.updateTaskExecutionState(
SchedulerTestingUtils.createFinishedTaskExecutionState(
executionAttemptId))
.get();
// request the state of the result partition of the producer
final ResultPartitionID partitionId =
new ResultPartitionID(partition.getPartitionId(), copiedExecutionAttemptId);
CompletableFuture<ExecutionState> partitionStateFuture =
jobMasterGateway.requestPartitionState(partition.getResultId(), partitionId);
assertThat(partitionStateFuture.get()).isEqualTo(ExecutionState.FINISHED);
// ask for unknown result partition
partitionStateFuture =
jobMasterGateway.requestPartitionState(
partition.getResultId(), new ResultPartitionID());
assertThatThrownBy(partitionStateFuture::get)
.hasRootCauseInstanceOf(IllegalArgumentException.class);
// ask for wrong intermediate data set id
partitionStateFuture =
jobMasterGateway.requestPartitionState(
new IntermediateDataSetID(), partitionId);
assertThatThrownBy(partitionStateFuture::get)
.hasRootCauseInstanceOf(IllegalArgumentException.class);
// ask for "old" execution
partitionStateFuture =
jobMasterGateway.requestPartitionState(
partition.getResultId(),
new ResultPartitionID(
partition.getPartitionId(), createExecutionAttemptId()));
assertThatThrownBy(partitionStateFuture::get)
.hasRootCauseInstanceOf(PartitionProducerDisposedException.class);
}
} |
@CanIgnoreReturnValue
public final Ordered containsAtLeast(
@Nullable Object firstExpected,
@Nullable Object secondExpected,
@Nullable Object @Nullable ... restOfExpected) {
return containsAtLeastElementsIn(accumulate(firstExpected, secondExpected, restOfExpected));
} | @Test
public void iterableContainsAtLeastFailsWithEmptyString() {
expectFailureWhenTestingThat(asList("a", null)).containsAtLeast("", null);
assertFailureKeys("missing (1)", "---", "expected to contain at least", "but was");
assertFailureValue("missing (1)", "");
} |
public static String getGcloudCancelCommand(DataflowPipelineOptions options, String jobId) {
// If using a different Dataflow API than default, prefix command with an API override.
String dataflowApiOverridePrefix = "";
String apiUrl = options.getDataflowClient().getBaseUrl();
if (!apiUrl.equals(Dataflow.DEFAULT_BASE_URL)) {
dataflowApiOverridePrefix = String.format("%s=%s ", ENDPOINT_OVERRIDE_ENV_VAR, apiUrl);
}
// Assemble cancel command from optional prefix and project/job parameters.
return String.format(
"%s%s jobs --project=%s cancel --region=%s %s",
dataflowApiOverridePrefix,
GCLOUD_DATAFLOW_PREFIX,
options.getProject(),
options.getRegion(),
jobId);
} | @Test
public void testOverridesEndpointWithStagedDataflowEndpoint() {
DataflowPipelineOptions options =
PipelineOptionsFactory.create().as(DataflowPipelineOptions.class);
options.setProject(PROJECT_ID);
options.setRegion(REGION_ID);
options.setGcpCredential(new TestCredential());
String stagingDataflowEndpoint = "v0neverExisted";
options.setDataflowEndpoint(stagingDataflowEndpoint);
String cancelCommand = MonitoringUtil.getGcloudCancelCommand(options, JOB_ID);
assertEquals(
"CLOUDSDK_API_ENDPOINT_OVERRIDES_DATAFLOW=https://dataflow.googleapis.com/v0neverExisted/ "
+ "gcloud dataflow jobs --project=someProject cancel --region=thatRegion 1234",
cancelCommand);
} |
public String send() throws MailException {
try {
return doSend();
} catch (MessagingException e) {
if (e instanceof SendFailedException) {
// 当地址无效时,显示更加详细的无效地址信息
final Address[] invalidAddresses = ((SendFailedException) e).getInvalidAddresses();
final String msg = StrUtil.format("Invalid Addresses: {}", ArrayUtil.toString(invalidAddresses));
throw new MailException(msg, e);
}
throw new MailException(e);
}
} | @Test
@Disabled
public void sendHtmlTest() {
MailUtil.send("hutool@foxmail.com", "测试", "<h1>邮件来自Hutool测试</h1>", true);
} |
static CodecFactory getCodecFactory(JobConf job) {
CodecFactory factory = null;
if (FileOutputFormat.getCompressOutput(job)) {
int deflateLevel = job.getInt(DEFLATE_LEVEL_KEY, DEFAULT_DEFLATE_LEVEL);
int xzLevel = job.getInt(XZ_LEVEL_KEY, DEFAULT_XZ_LEVEL);
int zstdLevel = job.getInt(ZSTD_LEVEL_KEY, DEFAULT_ZSTANDARD_LEVEL);
boolean zstdBufferPool = job.getBoolean(ZSTD_BUFFERPOOL_KEY, DEFAULT_ZSTANDARD_BUFFERPOOL);
String codecName = job.get(AvroJob.OUTPUT_CODEC);
if (codecName == null) {
String codecClassName = job.get("mapred.output.compression.codec", null);
String avroCodecName = HadoopCodecFactory.getAvroCodecName(codecClassName);
if (codecClassName != null && avroCodecName != null) {
factory = HadoopCodecFactory.fromHadoopString(codecClassName);
job.set(AvroJob.OUTPUT_CODEC, avroCodecName);
return factory;
} else {
return CodecFactory.deflateCodec(deflateLevel);
}
} else {
if (codecName.equals(DEFLATE_CODEC)) {
factory = CodecFactory.deflateCodec(deflateLevel);
} else if (codecName.equals(XZ_CODEC)) {
factory = CodecFactory.xzCodec(xzLevel);
} else if (codecName.equals(ZSTANDARD_CODEC)) {
factory = CodecFactory.zstandardCodec(zstdLevel, false, zstdBufferPool);
} else {
factory = CodecFactory.fromString(codecName);
}
}
}
return factory;
} | @Test
void bZip2CodecUsingHadoopClass() {
CodecFactory avroBZip2Codec = CodecFactory.fromString("bzip2");
JobConf job = new JobConf();
job.set("mapred.output.compress", "true");
job.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.BZip2Codec");
CodecFactory factory = AvroOutputFormat.getCodecFactory(job);
assertNotNull(factory);
assertEquals(factory.getClass(), avroBZip2Codec.getClass());
} |
<T> LakosMetrics(Collection<MetricsComponent<T>> components, Function<T, Collection<T>> getDependencies) {
MetricsComponentDependencyGraph<T> graph = MetricsComponentDependencyGraph.of(components, getDependencies);
int cumulativeComponentDependency = components.stream()
.mapToInt(component -> 1 + getNumberOfTransitiveDependencies(graph, component))
.sum();
this.cumulativeComponentDependency = cumulativeComponentDependency;
this.averageComponentDependency = ((double) cumulativeComponentDependency) / components.size();
this.relativeAverageComponentDependency = averageComponentDependency / components.size();
this.normalizedCumulativeComponentDependency =
((double) cumulativeComponentDependency) / calculateCumulativeComponentDependencyOfBinaryTree(components.size());
} | @Test
public void lakos_metrics_of_packages() {
JavaClasses classes = new ClassFileImporter().importPackagesOf(SomeTestClass1.class, SomeTestClass2.class);
Set<JavaPackage> packages = ImmutableSet.of(
classes.get(SomeTestClass1.class).getPackage(),
classes.get(SomeTestClass2.class).getPackage());
LakosMetrics metrics = ArchitectureMetrics.lakosMetrics(MetricsComponents.fromPackages(packages));
assertMetrics(metrics, ExpectedMetrics.ccd(3).acd(1.5).racd(0.75).nccd(1.0));
} |
@Subscribe
public void onItemContainerChanged(final ItemContainerChanged event)
{
final ItemContainer itemContainer = event.getItemContainer();
if (event.getContainerId() == InventoryID.EQUIPMENT.getId())
{
equippedItems = itemContainer.getItems();
return;
}
if (event.getContainerId() != InventoryID.INVENTORY.getId())
{
return;
}
inventoryItems = itemContainer.getItems();
// Add runes from rune pouch to inventoryItems
if (itemContainer.contains(ItemID.RUNE_POUCH) || itemContainer.contains(ItemID.RUNE_POUCH_L)
|| itemContainer.contains(ItemID.DIVINE_RUNE_POUCH) || itemContainer.contains(ItemID.DIVINE_RUNE_POUCH_L))
{
List<Item> runePouchContents = getRunepouchContents();
if (!runePouchContents.isEmpty())
{
for (int i = 0; i < inventoryItems.length; i++)
{
Item invItem = inventoryItems[i];
for (Item rune : runePouchContents)
{
if (invItem.getId() == rune.getId())
{
inventoryItems[i] = new Item(invItem.getId(), rune.getQuantity() + invItem.getQuantity());
runePouchContents.remove(rune);
break;
}
}
}
inventoryItems = ArrayUtils.addAll(inventoryItems, runePouchContents.toArray(new Item[0]));
}
}
// Check if item was removed from inventory
if (clue != null && clueItemId != null)
{
// Check if clue was removed from inventory
if (!itemContainer.contains(clueItemId))
{
resetClue(true);
}
}
// if three step clue check for clue scroll pieces
if (clue instanceof ThreeStepCrypticClue)
{
if (((ThreeStepCrypticClue) clue).update(event.getContainerId(), itemContainer))
{
worldMapPointsSet = false;
npcsToMark.clear();
if (config.displayHintArrows())
{
client.clearHintArrow();
}
checkClueNPCs(clue, client.getCachedNPCs());
}
}
} | @Test
public void testThatRunepouchIsAddedToInventory()
{
ItemContainer container = mock(ItemContainer.class);
ItemContainerChanged event = new ItemContainerChanged(InventoryID.INVENTORY.getId(), container);
final Item[] inventory = {
new Item(ItemID.COINS_995, 100),
new Item(ItemID.MITHRIL_BAR, 1),
new Item(ItemID.MITHRIL_BAR, 1),
new Item(ItemID.MITHRIL_BAR, 1),
new Item(ItemID.SOUL_RUNE, 30),
new Item(ItemID.COSMIC_RUNE, 100),
new Item(ItemID.RUNE_POUCH, 1),
new Item(ItemID.SPADE, 1),
new Item(ItemID.CLUE_SCROLL_MASTER, 1)
};
when(container.getItems()).thenReturn(inventory);
when(container.contains(ItemID.RUNE_POUCH)).thenReturn(true);
when(client.getVarbitValue(Varbits.RUNE_POUCH_RUNE1)).thenReturn(9); // Cosmic Rune
when(client.getVarbitValue(Varbits.RUNE_POUCH_AMOUNT1)).thenReturn(20);
when(client.getVarbitValue(Varbits.RUNE_POUCH_RUNE3)).thenReturn(4); // Fire Rune
when(client.getVarbitValue(Varbits.RUNE_POUCH_AMOUNT3)).thenReturn(4000);
EnumComposition enumComposition = mock(EnumComposition.class);
when(enumComposition.getIntValue(9)).thenReturn(ItemID.COSMIC_RUNE);
when(enumComposition.getIntValue(4)).thenReturn(ItemID.FIRE_RUNE);
when(client.getEnum(EnumID.RUNEPOUCH_RUNE)).thenReturn(enumComposition);
plugin.onItemContainerChanged(event);
assertFalse(Arrays.equals(inventory, plugin.getInventoryItems()));
List<Item> inventoryList = Arrays.asList(plugin.getInventoryItems());
assertThat(inventoryList, hasItem(new Item(ItemID.COSMIC_RUNE, 120)));
assertThat(inventoryList, hasItem(new Item(ItemID.FIRE_RUNE, 4000)));
} |
@PutMapping("/{id}")
public ShenyuAdminResult updateRule(@PathVariable("id") @Valid
@Existed(provider = RuleMapper.class,
message = "rule is not existed") final String id,
@Valid @RequestBody final RuleDTO ruleDTO) {
ruleDTO.setId(id);
Integer updateCount = ruleService.createOrUpdate(ruleDTO);
return ShenyuAdminResult.success(ShenyuResultMessage.UPDATE_SUCCESS, updateCount);
} | @Test
public void testUpdateRule() throws Exception {
RuleConditionDTO ruleConditionDTO = RuleConditionDTO.builder()
.id("888")
.ruleId("666")
.paramType("uri")
.operator("match")
.paramName("/")
.paramValue("/http/order/update")
.build();
List<RuleConditionDTO> conList = new ArrayList<>();
conList.add(ruleConditionDTO);
RuleDTO ruleDTO = RuleDTO.builder()
.id("666")
.selectorId("168")
.matchMode(0)
.name("/http/order/update")
.enabled(true)
.loged(true)
.matchRestful(false)
.sort(1)
.handle("{\"loadBalance\":\"random\",\"retry\":0,\"timeout\":3000}")
.ruleConditions(conList)
.build();
SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class));
when(SpringBeanUtils.getInstance().getBean(RuleMapper.class)).thenReturn(ruleMapper);
when(ruleMapper.existed(ruleDTO.getId())).thenReturn(true);
when(SpringBeanUtils.getInstance().getBean(SelectorMapper.class)).thenReturn(selectorMapper);
when(selectorMapper.existed(ruleDTO.getSelectorId())).thenReturn(true);
given(this.ruleService.createOrUpdate(ruleDTO)).willReturn(1);
this.mockMvc.perform(MockMvcRequestBuilders.put("/rule/{id}", "666")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(ruleDTO))
)
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.UPDATE_SUCCESS)))
.andReturn();
} |
@Override
public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) {
final Stacker contextStacker = buildContext.buildNodeContext(getId().toString());
return schemaKStreamFactory.create(
buildContext,
dataSource,
contextStacker.push(SOURCE_OP_NAME)
);
} | @Test
public void shouldBuildTransformNode() {
// When:
realStream = buildStream(node);
// Then:
final TopologyDescription.Processor node = (TopologyDescription.Processor) getNodeByName(
realBuilder.build(), PlanTestUtil.TRANSFORM_NODE);
verifyProcessorNode(node, Collections.singletonList(PlanTestUtil.SOURCE_NODE), Collections.emptyList());
} |
public static Builder newBuilder() {
return new Builder();
} | @Test
public void testBuild() {
GenericTranslatorProvider builded = GenericTranslatorProvider.newBuilder().build();
Assert.assertNotNull(builded);
} |
@Override
public ListenableFuture<HttpResponse> sendAsync(HttpRequest httpRequest) {
return sendAsync(httpRequest, null);
} | @Test
public void sendAsync_whenHeadRequest_returnsHttpResponseWithoutBody()
throws IOException, ExecutionException, InterruptedException {
String responseBody = "test response";
mockWebServer.enqueue(
new MockResponse()
.setResponseCode(HttpStatus.OK.code())
.setHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString())
.setBody(responseBody));
mockWebServer.start();
String requestUrl = mockWebServer.url("/test/head").toString();
HttpResponse response = httpClient.sendAsync(head(requestUrl).withEmptyHeaders().build()).get();
assertThat(response)
.isEqualTo(
HttpResponse.builder()
.setStatus(HttpStatus.OK)
.setHeaders(
HttpHeaders.builder()
.addHeader(CONTENT_TYPE, MediaType.PLAIN_TEXT_UTF_8.toString())
// MockWebServer always adds this response header.
.addHeader(CONTENT_LENGTH, String.valueOf(responseBody.length()))
.build())
.setBodyBytes(Optional.empty())
.setResponseUrl(HttpUrl.parse(requestUrl))
.build());
} |
@Override
public void onEvent(final RequestEvent event) {
if (event.getType() == RequestEvent.Type.FINISHED) {
if (!event.getUriInfo().getMatchedTemplates().isEmpty()) {
final List<Tag> tags = new ArrayList<>(5);
tags.add(Tag.of(PATH_TAG, UriInfoUtil.getPathTemplate(event.getUriInfo())));
tags.add(Tag.of(METHOD_TAG, event.getContainerRequest().getMethod()));
tags.add(Tag.of(STATUS_CODE_TAG, String.valueOf(Optional
.ofNullable(event.getContainerResponse())
.map(ContainerResponse::getStatus)
.orElse(499))));
tags.add(Tag.of(TRAFFIC_SOURCE_TAG, trafficSource.name().toLowerCase()));
@Nullable final String userAgent;
{
final List<String> userAgentValues = event.getContainerRequest().getRequestHeader(HttpHeaders.USER_AGENT);
userAgent = userAgentValues != null && !userAgentValues.isEmpty() ? userAgentValues.get(0) : null;
}
tags.add(UserAgentTagUtil.getPlatformTag(userAgent));
meterRegistry.counter(REQUEST_COUNTER_NAME, tags).increment();
UserAgentTagUtil.getClientVersionTag(userAgent, clientReleaseManager)
.ifPresent(clientVersionTag -> meterRegistry.counter(REQUESTS_BY_VERSION_COUNTER_NAME,
Tags.of(clientVersionTag, UserAgentTagUtil.getPlatformTag(userAgent)))
.increment());
}
}
} | @Test
@SuppressWarnings("unchecked")
void testOnEvent() {
final String path = "/test";
final String method = "GET";
final int statusCode = 200;
final ExtendedUriInfo uriInfo = mock(ExtendedUriInfo.class);
when(uriInfo.getMatchedTemplates()).thenReturn(Collections.singletonList(new UriTemplate(path)));
final ContainerRequest request = mock(ContainerRequest.class);
when(request.getMethod()).thenReturn(method);
when(request.getRequestHeader(HttpHeaders.USER_AGENT)).thenReturn(
Collections.singletonList("Signal-Android/4.53.7 (Android 8.1)"));
final ContainerResponse response = mock(ContainerResponse.class);
when(response.getStatus()).thenReturn(statusCode);
final RequestEvent event = mock(RequestEvent.class);
when(event.getType()).thenReturn(RequestEvent.Type.FINISHED);
when(event.getUriInfo()).thenReturn(uriInfo);
when(event.getContainerRequest()).thenReturn(request);
when(event.getContainerResponse()).thenReturn(response);
final ArgumentCaptor<Iterable<Tag>> tagCaptor = ArgumentCaptor.forClass(Iterable.class);
when(meterRegistry.counter(eq(MetricsRequestEventListener.REQUEST_COUNTER_NAME), any(Iterable.class)))
.thenReturn(counter);
listener.onEvent(event);
verify(meterRegistry).counter(eq(MetricsRequestEventListener.REQUEST_COUNTER_NAME), tagCaptor.capture());
final Iterable<Tag> tagIterable = tagCaptor.getValue();
final Set<Tag> tags = new HashSet<>();
for (final Tag tag : tagIterable) {
tags.add(tag);
}
assertEquals(5, tags.size());
assertTrue(tags.contains(Tag.of(MetricsRequestEventListener.PATH_TAG, path)));
assertTrue(tags.contains(Tag.of(MetricsRequestEventListener.METHOD_TAG, method)));
assertTrue(tags.contains(Tag.of(MetricsRequestEventListener.STATUS_CODE_TAG, String.valueOf(statusCode))));
assertTrue(tags.contains(Tag.of(MetricsRequestEventListener.TRAFFIC_SOURCE_TAG, TRAFFIC_SOURCE.name().toLowerCase())));
assertTrue(tags.contains(Tag.of(UserAgentTagUtil.PLATFORM_TAG, "android")));
} |
List<Interpreter> createInterpreters(String user, String interpreterGroupId, String sessionId) {
List<Interpreter> interpreters = new ArrayList<>();
List<InterpreterInfo> interpreterInfos = getInterpreterInfos();
Properties intpProperties = getJavaProperties();
for (InterpreterInfo info : interpreterInfos) {
Interpreter interpreter = new RemoteInterpreter(intpProperties, sessionId,
info.getClassName(), user, zConf);
if (info.isDefaultInterpreter()) {
interpreters.add(0, interpreter);
} else {
interpreters.add(interpreter);
}
LOGGER.info("Interpreter {} created for user: {}, sessionId: {}",
interpreter.getClassName(), user, sessionId);
}
// TODO(zjffdu) this kind of hardcode is ugly. For now SessionConfInterpreter is used
// for livy, we could add new property in interpreter-setting.json when there's new interpreter
// require SessionConfInterpreter
if (group.equals("livy")) {
interpreters.add(
new SessionConfInterpreter(intpProperties, sessionId, interpreterGroupId, this));
} else {
interpreters.add(new ConfInterpreter(intpProperties, sessionId, interpreterGroupId, this));
}
return interpreters;
} | @Test
void testCreateInterpreters() {
InterpreterOption interpreterOption = new InterpreterOption();
interpreterOption.setPerUser(InterpreterOption.SHARED);
InterpreterInfo interpreterInfo1 = new InterpreterInfo(EchoInterpreter.class.getName(),
"echo", true, new HashMap<String, Object>(), new HashMap<String, Object>());
InterpreterInfo interpreterInfo2 = new InterpreterInfo(DoubleEchoInterpreter.class.getName(),
"double_echo", false, new HashMap<String, Object>(),
new HashMap<String, Object>());
List<InterpreterInfo> interpreterInfos = new ArrayList<>();
interpreterInfos.add(interpreterInfo1);
interpreterInfos.add(interpreterInfo2);
InterpreterSetting interpreterSetting = new InterpreterSetting.Builder()
.setId("id")
.setName("test")
.setGroup("test")
.setInterpreterInfos(interpreterInfos)
.setOption(interpreterOption)
.setIntepreterSettingManager(interpreterSettingManager)
.setConf(zConf)
.create();
// create default interpreter for user1 and note1
assertEquals(EchoInterpreter.class.getName(), interpreterSetting.getDefaultInterpreter("user1", note1Id).getClassName());
// create interpreter echo for user1 and note1
assertEquals(EchoInterpreter.class.getName(), interpreterSetting.getInterpreter("user1", note1Id, "echo").getClassName());
assertEquals(interpreterSetting.getDefaultInterpreter("user1", note1Id), interpreterSetting.getInterpreter("user1", note1Id, "echo"));
// create interpreter double_echo for user1 and note1
assertEquals(DoubleEchoInterpreter.class.getName(), interpreterSetting.getInterpreter("user1", note1Id, "double_echo").getClassName());
// create non-existed interpreter
assertNull(interpreterSetting.getInterpreter("user1", note1Id, "invalid_echo"));
} |
void validate() {
checkArgument(
(getProjectId() != null
&& (!getProjectId().isAccessible() || !getProjectId().get().isEmpty()))
|| (getBigtableOptions() != null
&& getBigtableOptions().getProjectId() != null
&& !getBigtableOptions().getProjectId().isEmpty()),
"Could not obtain Bigtable project id");
checkArgument(
(getInstanceId() != null
&& (!getInstanceId().isAccessible() || !getInstanceId().get().isEmpty()))
|| (getBigtableOptions() != null
&& getBigtableOptions().getInstanceId() != null
&& !getBigtableOptions().getInstanceId().isEmpty()),
"Could not obtain Bigtable instance id");
} | @Test
public void testValidate() {
config.withProjectId(PROJECT_ID).withInstanceId(INSTANCE_ID).validate();
} |
public File saveSecret(String filename) throws IOException
{
return secretConfig().save(filename);
} | @Test
public void testSaveSecretFile() throws IOException
{
ZCert cert = new ZCert();
cert.saveSecret(CERT_LOCATION + "/test_secret.cert");
File file = new File(CERT_LOCATION + "/test_secret.cert");
assertThat(file.exists(), is(true));
} |
public static byte[] compress(String urlString) throws MalformedURLException {
byte[] compressedBytes = null;
if (urlString != null) {
// Figure the compressed bytes can't be longer than the original string.
byte[] byteBuffer = new byte[urlString.length()];
int byteBufferIndex = 0;
Arrays.fill(byteBuffer, (byte) 0x00);
Pattern urlPattern = Pattern.compile(EDDYSTONE_URL_REGEX);
Matcher urlMatcher = urlPattern.matcher(urlString);
if (urlMatcher.matches()) {
// www.
String wwwdot = urlMatcher.group(EDDYSTONE_URL_WWW_GROUP);
boolean haswww = (wwwdot != null);
// Protocol.
String rawProtocol = urlMatcher.group(EDDYSTONE_URL_PROTOCOL_GROUP);
String protocol = rawProtocol.toLowerCase();
if (protocol.equalsIgnoreCase(URL_PROTOCOL_HTTP)) {
byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTP_WWW : EDDYSTONE_URL_PROTOCOL_HTTP);
}
else {
byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTPS_WWW : EDDYSTONE_URL_PROTOCOL_HTTPS);
}
byteBufferIndex++;
// Fully-qualified domain name (FQDN). This includes the hostname and any other components after the dots
// but BEFORE the first single slash in the URL.
byte[] hostnameBytes = urlMatcher.group(EDDYSTONE_URL_FQDN_GROUP).getBytes();
String rawHostname = new String(hostnameBytes);
String hostname = rawHostname.toLowerCase();
String[] domains = hostname.split(Pattern.quote("."));
boolean consumedSlash = false;
if (domains != null) {
// Write the hostname/subdomains prior to the last one. If there's only one (e. g. http://localhost)
// then that's the only thing to write out.
byte[] periodBytes = {'.'};
int writableDomainsCount = (domains.length == 1 ? 1 : domains.length - 1);
for (int domainIndex = 0; domainIndex < writableDomainsCount; domainIndex++) {
// Write out leading period, if necessary.
if (domainIndex > 0) {
System.arraycopy(periodBytes, 0, byteBuffer, byteBufferIndex, periodBytes.length);
byteBufferIndex += periodBytes.length;
}
byte[] domainBytes = domains[domainIndex].getBytes();
int domainLength = domainBytes.length;
System.arraycopy(domainBytes, 0, byteBuffer, byteBufferIndex, domainLength);
byteBufferIndex += domainLength;
}
// Is the TLD one that we can encode?
if (domains.length > 1) {
String tld = "." + domains[domains.length - 1];
String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP);
String encodableTLDCandidate = (slash == null ? tld : tld + slash);
byte encodedTLDByte = encodedByteForTopLevelDomain(encodableTLDCandidate);
if (encodedTLDByte != TLD_NOT_ENCODABLE) {
byteBuffer[byteBufferIndex++] = encodedTLDByte;
consumedSlash = (slash != null);
} else {
byte[] tldBytes = tld.getBytes();
int tldLength = tldBytes.length;
System.arraycopy(tldBytes, 0, byteBuffer, byteBufferIndex, tldLength);
byteBufferIndex += tldLength;
}
}
}
// Optional slash.
if (! consumedSlash) {
String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP);
if (slash != null) {
int slashLength = slash.length();
System.arraycopy(slash.getBytes(), 0, byteBuffer, byteBufferIndex, slashLength);
byteBufferIndex += slashLength;
}
}
// Path.
String path = urlMatcher.group(EDDYSTONE_URL_PATH_GROUP);
if (path != null) {
int pathLength = path.length();
System.arraycopy(path.getBytes(), 0, byteBuffer, byteBufferIndex, pathLength);
byteBufferIndex += pathLength;
}
// Copy the result.
compressedBytes = new byte[byteBufferIndex];
System.arraycopy(byteBuffer, 0, compressedBytes, 0, compressedBytes.length);
}
else {
throw new MalformedURLException();
}
}
else {
throw new MalformedURLException();
}
return compressedBytes;
} | @Test
public void testCompressWithoutTLD() throws MalformedURLException {
String testURL = "http://xxx";
byte[] expectedBytes = {0x02, 'x', 'x', 'x'};
assertTrue(Arrays.equals(expectedBytes, UrlBeaconUrlCompressor.compress(testURL)));
} |
@Override
@Transactional
public boolean updateAfterApproval(Long userId, Integer userType, String clientId, Map<String, Boolean> requestedScopes) {
// 如果 requestedScopes 为空,说明没有要求,则返回 true 通过
if (CollUtil.isEmpty(requestedScopes)) {
return true;
}
// 更新批准的信息
boolean success = false; // 需要至少有一个同意
LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT);
for (Map.Entry<String, Boolean> entry : requestedScopes.entrySet()) {
if (entry.getValue()) {
success = true;
}
saveApprove(userId, userType, clientId, entry.getKey(), entry.getValue(), expireTime);
}
return success;
} | @Test
public void testUpdateAfterApproval_reject() {
// 准备参数
Long userId = randomLongId();
Integer userType = randomEle(UserTypeEnum.values()).getValue();
String clientId = randomString();
Map<String, Boolean> requestedScopes = new LinkedHashMap<>();
requestedScopes.put("write", false);
// mock 方法
// 调用
boolean success = oauth2ApproveService.updateAfterApproval(userId, userType, clientId,
requestedScopes);
// 断言
assertFalse(success);
List<OAuth2ApproveDO> result = oauth2ApproveMapper.selectList();
assertEquals(1, result.size());
// write
assertEquals(userId, result.get(0).getUserId());
assertEquals(userType, result.get(0).getUserType());
assertEquals(clientId, result.get(0).getClientId());
assertEquals("write", result.get(0).getScope());
assertFalse(result.get(0).getApproved());
assertFalse(DateUtils.isExpired(result.get(0).getExpiresTime()));
} |
public static String generateNewId(String id, int targetLength) {
if (id.length() <= targetLength) {
return id;
}
if (targetLength <= 8) {
throw new IllegalArgumentException("targetLength must be greater than 8");
}
HashFunction hashFunction = goodFastHash(32);
String hash = hashFunction.hashUnencodedChars(id).toString();
return id.substring(0, targetLength - hash.length() - 1) + "-" + hash;
} | @Test
public void testGenerateNewIdShouldReturnOldIdWhenInputLengthIsNotLongerThanTargetLength() {
String shortId = "test-id";
String actual = generateNewId(shortId, shortId.length());
assertThat(actual).isEqualTo(shortId);
} |
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
} | @Test
public void testChronicleChargeEmpty()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHRONICLE_CHECK_CHARGES_EMPTY, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_CHRONICLE, 0);
} |
public static <T> T createStub(String interfaceName, Invoker<T> invoker) {
// TODO DO not hack here
if (!STUB_SUPPLIERS.containsKey(interfaceName)) {
ReflectUtils.forName(stubClassName(interfaceName));
if (!STUB_SUPPLIERS.containsKey(interfaceName)) {
throw new IllegalStateException("Can not find any stub supplier for " + interfaceName);
}
}
return (T) STUB_SUPPLIERS.get(interfaceName).apply(invoker);
} | @Test
void createStub() {
Invoker<?> invoker = Mockito.mock(Invoker.class);
try {
StubSuppliers.createStub(serviceName + 1, invoker);
fail();
} catch (IllegalStateException e) {
// pass
}
} |
public static List<PartitionerConfig> getPartitionerConfigs(TableConfig tableConfig, Schema schema,
Map<String, String> taskConfig) {
SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig();
if (segmentPartitionConfig == null) {
return Collections.emptyList();
}
List<PartitionerConfig> partitionerConfigs = new ArrayList<>();
Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
for (Map.Entry<String, ColumnPartitionConfig> entry : columnPartitionMap.entrySet()) {
String partitionColumn = entry.getKey();
Preconditions.checkState(schema.hasColumn(partitionColumn),
"Partition column: %s does not exist in the schema for table: %s", partitionColumn,
tableConfig.getTableName());
PartitionerConfig partitionerConfig =
new PartitionerConfig.Builder().setPartitionerType(PartitionerFactory.PartitionerType.TABLE_PARTITION_CONFIG)
.setColumnName(partitionColumn).setColumnPartitionConfig(entry.getValue()).build();
partitionerConfigs.add(partitionerConfig);
}
return partitionerConfigs;
} | @Test
public void testGetPartitionerConfigs() {
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable")
.setSegmentPartitionConfig(
new SegmentPartitionConfig(Collections.singletonMap("memberId", new ColumnPartitionConfig("murmur", 10))))
.build();
Schema schema = new Schema.SchemaBuilder().addSingleValueDimension("memberId", DataType.LONG).build();
Map<String, String> taskConfig = Collections.emptyMap();
List<PartitionerConfig> partitionerConfigs = MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, taskConfig);
assertEquals(partitionerConfigs.size(), 1);
PartitionerConfig partitionerConfig = partitionerConfigs.get(0);
assertEquals(partitionerConfig.getPartitionerType(), PartitionerFactory.PartitionerType.TABLE_PARTITION_CONFIG);
assertEquals(partitionerConfig.getColumnName(), "memberId");
ColumnPartitionConfig columnPartitionConfig = partitionerConfig.getColumnPartitionConfig();
assertEquals(columnPartitionConfig.getFunctionName(), "murmur");
assertEquals(columnPartitionConfig.getNumPartitions(), 10);
// Table with multiple partition columns.
Map<String, ColumnPartitionConfig> columnPartitionConfigMap = new HashMap<>();
columnPartitionConfigMap.put("memberId", new ColumnPartitionConfig("murmur", 10));
columnPartitionConfigMap.put("memberName", new ColumnPartitionConfig("HashCode", 5));
TableConfig tableConfigWithMultiplePartitionColumns =
new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable")
.setSegmentPartitionConfig(new SegmentPartitionConfig(columnPartitionConfigMap)).build();
Schema schemaWithMultipleColumns = new Schema.SchemaBuilder().addSingleValueDimension("memberId", DataType.LONG)
.addSingleValueDimension("memberName", DataType.STRING).build();
partitionerConfigs =
MergeTaskUtils.getPartitionerConfigs(tableConfigWithMultiplePartitionColumns, schemaWithMultipleColumns,
taskConfig);
assertEquals(partitionerConfigs.size(), 2);
// No partition column in table config
TableConfig tableConfigWithoutPartitionColumn =
new TableConfigBuilder(TableType.OFFLINE).setTableName("myTable").build();
assertTrue(MergeTaskUtils.getPartitionerConfigs(tableConfigWithoutPartitionColumn, schema, taskConfig).isEmpty());
// Partition column does not exist in schema
Schema schemaWithoutPartitionColumn = new Schema.SchemaBuilder().build();
try {
MergeTaskUtils.getPartitionerConfigs(tableConfig, schemaWithoutPartitionColumn, taskConfig);
fail();
} catch (IllegalStateException e) {
// Expected
}
} |
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
} | @Test
public void testMergeAllowSystemUpdateInternalMode() throws JsonProcessingException {
Map<String, ParamDefinition> allParams =
parseParamDefMap("{'tomerge': {'type': 'LONG','value': 2}}");
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap("{'tomerge': {'type': 'LONG', 'value': 3, 'internal_mode': 'OPTIONAL'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, systemMergeContext);
assertEquals(
InternalParamMode.OPTIONAL, allParams.get("tomerge").asLongParamDef().getInternalMode());
} |
public T send() throws IOException {
return web3jService.send(this, responseType);
} | @Test
public void testEthBlockNumber() throws Exception {
web3j.ethBlockNumber().send();
verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}");
} |
@Override
public String getName() {
return ANALYZER_NAME;
} | @Test
public void testMSBuildProjectAnalysis() throws Exception {
try (Engine engine = new Engine(getSettings())) {
File file = BaseTest.getResourceAsFile(this, "msbuild/test.csproj");
Dependency toScan = new Dependency(file);
MSBuildProjectAnalyzer analyzer = new MSBuildProjectAnalyzer();
analyzer.setFilesMatched(true);
analyzer.initialize(getSettings());
analyzer.prepare(engine);
analyzer.setEnabled(true);
analyzer.analyze(toScan, engine);
assertEquals("5 dependencies should be found", 5, engine.getDependencies().length);
int foundCount = 0;
for (Dependency result : engine.getDependencies()) {
assertEquals(DEPENDENCY_ECOSYSTEM, result.getEcosystem());
assertTrue(result.isVirtual());
if (null != result.getName()) {
switch (result.getName()) {
case "Humanizer":
foundCount++;
assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().contains("Humanizer"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("Humanizer"));
assertTrue(result.getEvidence(EvidenceType.VERSION).toString().contains("2.2.0"));
break;
case "JetBrains.Annotations":
foundCount++;
assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().contains("JetBrains"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("JetBrains.Annotations"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("Annotations"));
assertTrue(result.getEvidence(EvidenceType.VERSION).toString().contains("11.1.0"));
break;
case "Microsoft.AspNetCore.All":
foundCount++;
assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().contains("Microsoft"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("Microsoft.AspNetCore.All"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("AspNetCore"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("AspNetCore.All"));
assertTrue(result.getEvidence(EvidenceType.VERSION).toString().contains("2.0.5"));
break;
case "Microsoft.Extensions.Logging":
foundCount++;
assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().contains("Microsoft"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("Microsoft.Extensions.Logging"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("Extensions"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("Extensions.Logging"));
assertTrue(result.getEvidence(EvidenceType.VERSION).toString().contains("6.0.0"));
break;
case "NodaTime":
foundCount++;
assertTrue(result.getEvidence(EvidenceType.VENDOR).toString().contains("NodaTime"));
assertTrue(result.getEvidence(EvidenceType.PRODUCT).toString().contains("NodaTime"));
assertTrue("Expected 3.0.0; contained: " + result.getEvidence(EvidenceType.VERSION).stream().map(e -> e.toString()).collect(Collectors.joining(",", "{", "}")), result.getEvidence(EvidenceType.VERSION).toString().contains("3.0.0"));
break;
default:
break;
}
}
}
assertEquals("5 expected dependencies should be found", 5, foundCount);
}
} |
@GetMapping("/list")
@TpsControl(pointName = "NamingServiceSubscribe", name = "HttpNamingServiceSubscribe")
@Secured(action = ActionTypes.READ)
@ExtractorManager.Extractor(httpExtractor = NamingInstanceListHttpParamExtractor.class)
public Result<ServiceInfo> list(
@RequestParam(value = "namespaceId", defaultValue = Constants.DEFAULT_NAMESPACE_ID) String namespaceId,
@RequestParam(value = "groupName", defaultValue = Constants.DEFAULT_GROUP) String groupName,
@RequestParam("serviceName") String serviceName,
@RequestParam(value = "clusterName", defaultValue = StringUtils.EMPTY) String clusterName,
@RequestParam(value = "ip", defaultValue = StringUtils.EMPTY) String ip,
@RequestParam(value = "port", defaultValue = "0") Integer port,
@RequestParam(value = "healthyOnly", defaultValue = "false") Boolean healthyOnly,
@RequestParam(value = "app", defaultValue = StringUtils.EMPTY) String app,
@RequestHeader(value = HttpHeaderConsts.USER_AGENT_HEADER, required = false) String userAgent,
@RequestHeader(value = HttpHeaderConsts.CLIENT_VERSION_HEADER, required = false) String clientVersion) {
if (StringUtils.isEmpty(userAgent)) {
userAgent = StringUtils.defaultIfEmpty(clientVersion, StringUtils.EMPTY);
}
String compositeServiceName = NamingUtils.getGroupedName(serviceName, groupName);
Subscriber subscriber = new Subscriber(ip + ":" + port, userAgent, app, ip, namespaceId, compositeServiceName,
port, clusterName);
return Result.success(instanceServiceV2.listInstance(namespaceId, compositeServiceName, subscriber, clusterName,
healthyOnly));
} | @Test
void listInstance() throws Exception {
ServiceInfo serviceInfo = new ServiceInfo();
serviceInfo.setName("serviceInfo");
when(instanceServiceV2.listInstance(eq(TEST_NAMESPACE), eq(TEST_SERVICE_NAME), any(), eq(TEST_CLUSTER_NAME), eq(false))).thenReturn(
serviceInfo);
Result<ServiceInfo> result = instanceControllerV2.list(TEST_NAMESPACE, "DEFAULT_GROUP", "test-service", TEST_CLUSTER_NAME, TEST_IP,
9999, false, "", "", "");
verify(instanceServiceV2).listInstance(eq(TEST_NAMESPACE), eq(TEST_SERVICE_NAME), any(), eq(TEST_CLUSTER_NAME), eq(false));
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertEquals(serviceInfo.getName(), result.getData().getName());
} |
public static <T> RetryOperator<T> of(Retry retry) {
return new RetryOperator<>(retry);
} | @Test
public void shouldNotRetryWhenItThrowErrorMono() {
RetryConfig config = retryConfig();
Retry retry = Retry.of("testName", config);
RetryOperator<String> retryOperator = RetryOperator.of(retry);
given(helloWorldService.returnHelloWorld())
.willThrow(new Error("BAM!"));
StepVerifier.create(Mono.fromCallable(helloWorldService::returnHelloWorld)
.transformDeferred(retryOperator))
.expectSubscription()
.expectError(Error.class)
.verify(Duration.ofSeconds(1));
then(helloWorldService).should().returnHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
} |
public ChannelFuture handshake(Channel channel) {
ObjectUtil.checkNotNull(channel, "channel");
return handshake(channel, channel.newPromise());
} | @Test
public void testInvalidOriginWhenIncorrectWebSocketURI() {
URI uri = URI.create("/ws");
EmbeddedChannel channel = new EmbeddedChannel(new HttpClientCodec());
HttpHeaders headers = new DefaultHttpHeaders();
headers.set(HttpHeaderNames.HOST, "localhost:80");
final WebSocketClientHandshaker handshaker = newHandshaker(uri, null, headers, false, true);
final ChannelFuture handshakeFuture = handshaker.handshake(channel);
assertFalse(handshakeFuture.isSuccess());
assertInstanceOf(IllegalArgumentException.class, handshakeFuture.cause());
assertEquals("Cannot generate the '" + getOriginHeaderName() + "' header value," +
" webSocketURI should contain host or disable generateOriginHeader" +
" or pass value through customHeaders", handshakeFuture.cause().getMessage());
assertFalse(channel.finish());
} |
public static GrpcDataWriter create(FileSystemContext context, WorkerNetAddress address,
long id, long length, RequestType type, OutStreamOptions options)
throws IOException {
long chunkSize = context.getClusterConf()
.getBytes(PropertyKey.USER_STREAMING_WRITER_CHUNK_SIZE_BYTES);
CloseableResource<BlockWorkerClient> grpcClient = context.acquireBlockWorkerClient(address);
try {
return new GrpcDataWriter(context, address, id, length, chunkSize, type, options, grpcClient);
} catch (Exception e) {
grpcClient.close();
throw e;
}
} | @Test(timeout = 1000 * 60)
public void writeFileChecksumOfPartialFile() throws Exception {
long checksumActual;
Future<Long> checksumExpected;
long length = CHUNK_SIZE * 1024 + CHUNK_SIZE / 3;
try (DataWriter writer = create(length)) {
checksumExpected = writeFile(writer, length, 10, length / 3);
checksumExpected.get();
checksumActual = verifyWriteRequests(mClient, 10, length / 3);
}
assertEquals(checksumExpected.get().longValue(), checksumActual);
} |
@Override
public int run(String[] args) throws Exception {
Options opts = new Options();
opts.addOption("lnl", LIST_LABELS_CMD, false,
"List cluster node-label collection");
opts.addOption("lna", LIST_CLUSTER_ATTRIBUTES, false,
"List cluster node-attribute collection");
opts.addOption("h", HELP_CMD, false, "Displays help for all commands.");
opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false,
"This is DEPRECATED, will be removed in future releases. Directly access node label store, "
+ "with this option, all node label related operations"
+ " will NOT connect RM. Instead, they will"
+ " access/modify stored node labels directly."
+ " By default, it is false (access via RM)."
+ " AND PLEASE NOTE: if you configured "
+ YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR
+ " to a local directory"
+ " (instead of NFS or HDFS), this option will only work"
+ " when the command run on the machine where RM is running."
+ " Also, this option is UNSTABLE, could be removed in future"
+ " releases.");
int exitCode = -1;
CommandLine parsedCli = null;
try {
parsedCli = new GnuParser().parse(opts, args);
} catch (MissingArgumentException ex) {
sysout.println("Missing argument for options");
printUsage(opts);
return exitCode;
}
createAndStartYarnClient();
if (parsedCli.hasOption(DIRECTLY_ACCESS_NODE_LABEL_STORE)) {
accessLocal = true;
}
if (parsedCli.hasOption(LIST_LABELS_CMD)) {
printClusterNodeLabels();
} else if(parsedCli.hasOption(LIST_CLUSTER_ATTRIBUTES)){
printClusterNodeAttributes();
} else if (parsedCli.hasOption(HELP_CMD)) {
printUsage(opts);
return 0;
} else {
syserr.println("Invalid Command Usage : ");
printUsage(opts);
}
return 0;
} | @Test
public void testGetEmptyClusterNodeLabels() throws Exception {
when(client.getClusterNodeLabels()).thenReturn(new ArrayList<NodeLabel>());
ClusterCLI cli = createAndGetClusterCLI();
int rc =
cli.run(new String[] { ClusterCLI.CMD, "-" + ClusterCLI.LIST_LABELS_CMD });
assertEquals(0, rc);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintWriter pw = new PrintWriter(baos);
pw.print("Node Labels: ");
pw.close();
verify(sysOut).println(baos.toString("UTF-8"));
} |
@Override
public boolean mayHaveMergesPending(String bucketSpace, int contentNodeIndex) {
if (!stats.hasUpdatesFromAllDistributors()) {
return true;
}
ContentNodeStats nodeStats = stats.getStats().getNodeStats(contentNodeIndex);
if (nodeStats != null) {
ContentNodeStats.BucketSpaceStats bucketSpaceStats = nodeStats.getBucketSpace(bucketSpace);
return (bucketSpaceStats != null &&
bucketSpaceStats.mayHaveBucketsPending(minMergeCompletionRatio));
}
return true;
} | @Test
void cluster_without_updates_from_all_distributors_may_have_merges_pending() {
Fixture f = Fixture.fromIncompleteStats();
assertTrue(f.mayHaveMergesPending("default", 1));
} |
public void optimize(PinotQuery pinotQuery, @Nullable Schema schema) {
optimize(pinotQuery, null, schema);
} | @Test
public void testMergeRangeFilter() {
String query =
"SELECT * FROM testTable WHERE (int > 10 AND int <= 100 AND int BETWEEN 10 AND 20) OR (float BETWEEN 5.5 AND "
+ "7.5 AND float = 6 AND float < 6.5 AND float BETWEEN 6 AND 8) OR (string > '123' AND string > '23') OR "
+ "(mvInt > 5 AND mvInt < 0)";
PinotQuery pinotQuery = CalciteSqlParser.compileToPinotQuery(query);
OPTIMIZER.optimize(pinotQuery, SCHEMA);
Function filterFunction = pinotQuery.getFilterExpression().getFunctionCall();
assertEquals(filterFunction.getOperator(), FilterKind.OR.name());
List<Expression> operands = filterFunction.getOperands();
assertEquals(operands.size(), 4);
assertEquals(operands.get(0), getRangeFilterExpression("int", "(10\00020]"));
// Alphabetical order for STRING column ('23' > '123')
assertEquals(operands.get(2), getRangeFilterExpression("string", "(23\000*)"));
Function secondChildFunction = operands.get(1).getFunctionCall();
assertEquals(secondChildFunction.getOperator(), FilterKind.AND.name());
List<Expression> secondChildChildren = secondChildFunction.getOperands();
assertEquals(secondChildChildren.size(), 2);
assertEquals(secondChildChildren.get(0), getEqFilterExpression("float", 6));
assertEquals(secondChildChildren.get(1), getRangeFilterExpression("float", "[6.0\0006.5)"));
// Range filter on multi-value column should not be merged ([-5, 10] can match this filter)
Function fourthChildFunction = operands.get(3).getFunctionCall();
assertEquals(fourthChildFunction.getOperator(), FilterKind.AND.name());
List<Expression> fourthChildChildren = fourthChildFunction.getOperands();
assertEquals(fourthChildChildren.size(), 2);
assertEquals(fourthChildChildren.get(0).getFunctionCall().getOperator(), FilterKind.GREATER_THAN.name());
assertEquals(fourthChildChildren.get(1).getFunctionCall().getOperator(), FilterKind.LESS_THAN.name());
} |
public Result fetchArtifacts(String[] uris) {
checkArgument(uris != null && uris.length > 0, "At least one URI is required.");
ArtifactUtils.createMissingParents(baseDir);
List<File> artifacts =
Arrays.stream(uris)
.map(FunctionUtils.uncheckedFunction(this::fetchArtifact))
.collect(Collectors.toList());
if (artifacts.size() > 1) {
return new Result(null, artifacts);
}
if (artifacts.size() == 1) {
return new Result(artifacts.get(0), null);
}
// Should not happen.
throw new IllegalStateException("Corrupt artifact fetching state.");
} | @Test
void testNoFetchOverride() throws Exception {
DummyFetcher dummyFetcher = new DummyFetcher();
ArtifactFetchManager fetchMgr =
new ArtifactFetchManager(
dummyFetcher, dummyFetcher, dummyFetcher, configuration, null);
File sourceFile = TestingUtils.getClassFile(getClass());
Path destFile = tempDir.resolve(sourceFile.getName());
Files.copy(sourceFile.toPath(), destFile);
String uriStr = "file://" + sourceFile.toURI().getPath();
fetchMgr.fetchArtifacts(uriStr, null);
assertThat(dummyFetcher.fetchCount).isZero();
} |
public void lockClusterState(ClusterStateChange stateChange, Address initiator, UUID txnId, long leaseTime,
int memberListVersion, long partitionStateStamp) {
Preconditions.checkNotNull(stateChange);
clusterServiceLock.lock();
try {
if (!node.getNodeExtension().isStartCompleted()) {
throw new IllegalStateException("Can not lock cluster state! Startup is not completed yet!");
}
if (node.getClusterService().getClusterJoinManager().isMastershipClaimInProgress()) {
throw new IllegalStateException("Can not lock cluster state! Mastership claim is in progress!");
}
if (stateChange.isOfType(Version.class)) {
validateNodeCompatibleWith((Version) stateChange.getNewState());
validateClusterVersionChange((Version) stateChange.getNewState());
}
checkMemberListVersion(memberListVersion);
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
lockOrExtendClusterState(initiator, txnId, leaseTime);
try {
// check migration status and partition-state version again
// if partition state is changed then release the lock and fail.
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
} catch (IllegalStateException e) {
stateLockRef.set(LockGuard.NOT_LOCKED);
throw e;
}
} finally {
clusterServiceLock.unlock();
}
} | @Test(expected = NullPointerException.class)
public void test_lockClusterState_nullInitiator() {
clusterStateManager.lockClusterState(ClusterStateChange.from(FROZEN), null, TXN, 1000, MEMBERLIST_VERSION,
PARTITION_STAMP);
} |
public static <T> T[] replaceFirst(T[] src, T oldValue, T[] newValues) {
int index = indexOf(src, oldValue);
if (index == -1) {
return src;
}
T[] dst = (T[]) Array.newInstance(src.getClass().getComponentType(), src.length - 1 + newValues.length);
// copy the first part till the match
System.arraycopy(src, 0, dst, 0, index);
// copy the second part from the match
System.arraycopy(src, index + 1, dst, index + newValues.length, src.length - index - 1);
// copy the newValues into the dst
System.arraycopy(newValues, 0, dst, index, newValues.length);
return dst;
} | @Test
public void replace_whenInEnd() {
Integer[] result = replaceFirst(new Integer[]{1, 2, 6}, 6, new Integer[]{3, 4});
System.out.println(Arrays.toString(result));
assertArrayEquals(new Integer[]{1, 2, 3, 4}, result);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.