focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
public long timeOfLastReset() {
List<Status> statusList = sm.getCopyOfStatusList();
if (statusList == null)
return -1;
int len = statusList.size();
for (int i = len - 1; i >= 0; i--) {
Status s = statusList.get(i);
if (CoreConstants.RESET_MSG_PREFIX.equals(s.getMessage())) {
return s.getDate();
}
}
return -1;
} | @Test
public void statusListShouldReturnLastResetTime() {
context.getStatusManager().add(new InfoStatus("test", this));
long resetTime = System.currentTimeMillis();
context.getStatusManager().add(new InfoStatus(CoreConstants.RESET_MSG_PREFIX, this));
context.getStatusManager().add(new InfoStatus("bla", this));
assertTrue(resetTime <= statusUtil.timeOfLastReset());
} |
public static BadRequestException userAlreadyAuthorized(String userName) {
return new BadRequestException("%s already authorized", userName);
} | @Test
public void testUserAlreadyAuthorized(){
BadRequestException userAlreadyAuthorized = BadRequestException.userAlreadyAuthorized("user");
assertEquals("user already authorized", userAlreadyAuthorized.getMessage());
} |
public static File classPathToFile(String path) {
URL url = CLASS_LOADER.getResource(path);
if (url == null || !"file".equals(url.getProtocol())) {
return null;
}
try {
return Paths.get(url.toURI()).toFile();
} catch (URISyntaxException e) {
return null;
}
} | @Test
void testClassPathToFileThatDoesNotExist() {
File file = ResourceUtils.classPathToFile("com/intuit/karate/resource/nope.txt");
assertNull(file);
} |
@JsonAnyGetter
public Map<String, Object> getAttributes() {
return this.attributes;
} | @Test
public void testAttributes() throws Exception {
Tag tag =
loadObject("fixtures/workflows/definition/sample-tag-with-attributes.json", Tag.class);
assertNotNull(tag.getAttributes());
assertEquals(4, tag.getAttributes().size());
assertEquals("maestro-dev", tag.getAttributes().get("creator"));
assertEquals("maestro-dev", tag.getAttributes().get("technical_support"));
assertEquals(1234L, tag.getAttributes().get("created_at_milli"));
} |
@Override
public void close() throws UnavailableException {
// JournalContext is closed before block deletion context so that file system master changes
// are written before block master changes. If a failure occurs between deleting an inode and
// remove its blocks, it's better to have an orphaned block than an inode with a missing block.
closeQuietly(mJournalContext);
closeQuietly(mBlockDeletionContext);
if (mThrown != null) {
Throwables.propagateIfPossible(mThrown, UnavailableException.class);
throw new RuntimeException(mThrown);
}
} | @Test
public void blockDeletionContextThrows() throws Throwable {
Exception bdcException = new UnavailableException("block deletion context exception");
doThrow(bdcException).when(mMockBDC).close();
checkClose(bdcException);
} |
@Override
public long getCost() {
return 0L;
} | @Test
public void testGetCosts() {
assertEquals(0, record.getCost());
} |
public long advanceAtLeastTo(int index, int current, Iterator<E> iterator) {
return storage.advanceAtLeastTo(index, current, iterator);
} | @Test
public void testAdvanceAtLeastTo() {
SparseIntArray.Iterator<Integer> iterator = new SparseIntArray.Iterator<>();
// test empty array
verifyAdvanceAtLeastTo(iterator);
// try dense
for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) {
set(i);
verifyAdvanceAtLeastTo(iterator);
}
// go sparse
for (int i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SPARSE_SIZE; ++i) {
set(i);
verifyAdvanceAtLeastTo(iterator);
}
// clear everything we have added
for (int i = 0; i < ARRAY_STORAGE_32_MAX_SPARSE_SIZE / 2; ++i) {
clear(i);
verifyAdvanceAtLeastTo(iterator);
}
for (int i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SPARSE_SIZE; ++i) {
clear(i);
verifyAdvanceAtLeastTo(iterator);
}
// test empty again
verifyAdvanceAtLeastTo(iterator);
// try gaps
for (int i = 0; i < 1000; ++i) {
set(i * i);
verifyAdvanceAtLeastTo(iterator);
}
// try larger gaps
for (int i = (int) Math.sqrt(Integer.MAX_VALUE) - 1000; i < (int) Math.sqrt(Integer.MAX_VALUE); ++i) {
set(i * i);
verifyAdvanceAtLeastTo(iterator);
}
// try some edge cases
for (int i = -2; i <= 2; ++i) {
set(i);
verifyAdvanceAtLeastTo(iterator);
}
for (int i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) {
set(i);
verifyAdvanceAtLeastTo(iterator);
}
for (int i = Short.MIN_VALUE - 2; i <= Short.MIN_VALUE + 2; ++i) {
set(i);
verifyAdvanceAtLeastTo(iterator);
}
for (long i = (long) Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) {
set((int) i);
verifyAdvanceAtLeastTo(iterator);
}
for (long i = (long) Integer.MIN_VALUE - 2; i <= (long) Integer.MIN_VALUE + 2; ++i) {
set((int) i);
verifyAdvanceAtLeastTo(iterator);
}
} |
public static List<HttpCookie> decodeCookies(List<String> cookieStrs)
{
List<HttpCookie> cookies = new ArrayList<>();
if (cookieStrs == null)
{
return cookies;
}
for (String cookieStr : cookieStrs)
{
if (cookieStr == null)
{
continue;
}
StringTokenizer tokenizer = new StringTokenizer(cookieStr, ";");
String nameValuePair;
HttpCookie cookieToBeAdd = null;
while (tokenizer.hasMoreTokens())
{
nameValuePair = tokenizer.nextToken();
int index = nameValuePair.indexOf('=');
if (index != -1)
{
String name = nameValuePair.substring(0, index).trim();
String value = stripOffSurrounding(nameValuePair.substring(index + 1).trim());
if (name.charAt(0) != '$')
{
if (cookieToBeAdd != null)
{
cookies.add(cookieToBeAdd);
}
cookieToBeAdd = new HttpCookie(name, value);
}
else if (cookieToBeAdd != null)
{
if (name.equals("$Path"))
{
cookieToBeAdd.setPath(value);
}
else if (name.equals("$Domain"))
{
cookieToBeAdd.setDomain(value);
}
else if (name.equals("$Port"))
{
cookieToBeAdd.setPortlist(value);
}
}
}
else
{
throw new IllegalArgumentException("Invalid cookie name-value pair");
}
}
if (cookieToBeAdd != null)
{
cookies.add(cookieToBeAdd);
}
}
return cookies;
} | @Test
public void testDifferentCookieStringsCombination()
{
List<HttpCookie> cookies = Arrays.asList(cookieC, cookieD, cookieA, cookieB);
String combinedHeader = cookieC.toString() + ";" + cookieD.toString() + ";" + cookieA.toString();
String cookieBStr = cookieB.toString();
Assert.assertEquals(CookieUtil.decodeCookies(Arrays.asList(combinedHeader, cookieBStr)), cookies);
} |
@Udf
public String rpad(
@UdfParameter(description = "String to be padded") final String input,
@UdfParameter(description = "Target length") final Integer targetLen,
@UdfParameter(description = "Padding string") final String padding) {
if (input == null) {
return null;
}
if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) {
return null;
}
final StringBuilder sb = new StringBuilder(targetLen + padding.length());
sb.append(input);
final int padChars = Math.max(targetLen - input.length(), 0);
for (int i = 0; i < padChars; i += padding.length()) {
sb.append(padding);
}
sb.setLength(targetLen);
return sb.toString();
} | @Test
public void shouldReturnNullForNullLengthString() {
final String result = udf.rpad("foo", null, "bar");
assertThat(result, is(nullValue()));
} |
@Override
public void setFetchSize(final Statement statement) throws SQLException {
delegated.setFetchSize(statement);
} | @Test
void assertSetFetchSize() throws SQLException {
Statement statement = mock(Statement.class);
ContextManager contextManager = mockContextManager();
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
new OpenGaussStatementMemoryStrictlyFetchSizeSetter().setFetchSize(statement);
verify(statement).setFetchSize(1);
} |
@Override
public IndexSetConfig save(IndexSetConfig indexSetConfig) {
final WriteResult<IndexSetConfig, ObjectId> writeResult = collection.save(indexSetConfig);
final IndexSetConfig savedObject = writeResult.getSavedObject();
final IndexSetCreatedEvent createdEvent = IndexSetCreatedEvent.create(savedObject);
clusterEventBus.post(createdEvent);
return savedObject;
} | @Test
public void save() throws Exception {
final IndexSetCreatedSubscriber subscriber = new IndexSetCreatedSubscriber();
clusterEventBus.registerClusterEventSubscriber(subscriber);
final IndexSetConfig indexSetConfig = IndexSetConfig.create(
"Test 3",
null,
true, true,
"test_3",
10,
0,
MessageCountRotationStrategy.class.getCanonicalName(),
MessageCountRotationStrategyConfig.create(10000),
NoopRetentionStrategy.class.getCanonicalName(),
NoopRetentionStrategyConfig.create(5),
ZonedDateTime.of(2016, 10, 4, 12, 0, 0, 0, ZoneOffset.UTC),
"standard",
"index-template",
EVENT_TEMPLATE_TYPE,
1,
false
);
final IndexSetConfig savedIndexSetConfig = indexSetService.save(indexSetConfig);
final Optional<IndexSetConfig> retrievedIndexSetConfig = indexSetService.get(savedIndexSetConfig.id());
assertThat(retrievedIndexSetConfig)
.isPresent()
.contains(savedIndexSetConfig);
assertThat(subscriber.getEvents())
.hasSize(1)
.containsExactly(IndexSetCreatedEvent.create(savedIndexSetConfig));
} |
@Override
public String getTimeDateFunctions() {
return null;
} | @Test
void assertGetTimeDateFunctions() {
assertNull(metaData.getTimeDateFunctions());
} |
public static JsonAsserter with(String json) {
return new JsonAsserterImpl(JsonPath.parse(json).json());
} | @Test
public void assert_that_invalid_path_is_thrown() {
JsonAsserter asserter = JsonAssert.with("{\"foo\":\"bar\"}");
assertThrows(AssertionError.class, () -> asserter.assertEquals("$foo", "bar"));
} |
public <T> Future<Iterable<TimestampedValue<T>>> orderedListFuture(
Range<Long> range, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) {
// First request has no continuation position.
StateTag<ByteString> stateTag =
StateTag.<ByteString>of(StateTag.Kind.ORDERED_LIST, encodedTag, stateFamily)
.toBuilder()
.setSortedListRange(Preconditions.checkNotNull(range))
.build();
return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder));
} | @Test
public void testReadSortedListWithContinuations() throws Exception {
long beginning = SortedListRange.getDefaultInstance().getStart();
long end = SortedListRange.getDefaultInstance().getLimit();
Future<Iterable<TimestampedValue<Integer>>> future =
underTest.orderedListFuture(
Range.closedOpen(beginning, end), STATE_KEY_1, STATE_FAMILY, INT_CODER);
Mockito.verifyNoMoreInteractions(mockWindmill);
Windmill.KeyedGetDataRequest.Builder expectedRequest1 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end))
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES));
final ByteString CONT_1 = ByteString.copyFrom("CONTINUATION_1", StandardCharsets.UTF_8);
final ByteString CONT_2 = ByteString.copyFrom("CONTINUATION_2", StandardCharsets.UTF_8);
Windmill.KeyedGetDataResponse.Builder response1 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
SortedListEntry.newBuilder().setValue(intData(1)).setSortKey(1000).setId(1))
.setContinuationPosition(CONT_1)
.addFetchRanges(
SortedListRange.newBuilder().setStart(beginning).setLimit(end)));
Windmill.KeyedGetDataRequest.Builder expectedRequest2 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end))
.setRequestPosition(CONT_1)
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES));
Windmill.KeyedGetDataResponse.Builder response2 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
SortedListEntry.newBuilder().setValue(intData(2)).setSortKey(2000).setId(2))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(3)).setSortKey(3000).setId(3))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(4)).setSortKey(4000).setId(4))
.setContinuationPosition(CONT_2)
.addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end))
.setRequestPosition(CONT_1));
Windmill.KeyedGetDataRequest.Builder expectedRequest3 =
Windmill.KeyedGetDataRequest.newBuilder()
.setKey(DATA_KEY)
.setShardingKey(SHARDING_KEY)
.setWorkToken(WORK_TOKEN)
.setMaxBytes(WindmillStateReader.MAX_KEY_BYTES)
.addSortedListsToFetch(
Windmill.TagSortedListFetchRequest.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end))
.setRequestPosition(CONT_2)
.setFetchMaxBytes(WindmillStateReader.MAX_ORDERED_LIST_BYTES));
Windmill.KeyedGetDataResponse.Builder response3 =
Windmill.KeyedGetDataResponse.newBuilder()
.setKey(DATA_KEY)
.addTagSortedLists(
Windmill.TagSortedListFetchResponse.newBuilder()
.setTag(STATE_KEY_1)
.setStateFamily(STATE_FAMILY)
.addEntries(
SortedListEntry.newBuilder().setValue(intData(5)).setSortKey(5000).setId(5))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(6)).setSortKey(6000).setId(7))
.addEntries(
SortedListEntry.newBuilder().setValue(intData(7)).setSortKey(7000).setId(7))
.addFetchRanges(SortedListRange.newBuilder().setStart(beginning).setLimit(end))
.setRequestPosition(CONT_2));
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest1.build()))
.thenReturn(response1.build());
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest2.build()))
.thenReturn(response2.build());
Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest3.build()))
.thenReturn(response3.build());
Iterable<TimestampedValue<Integer>> results = future.get();
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest1.build());
for (TimestampedValue<Integer> unused : results) {
// Iterate over the results to force loading all the pages.
}
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest2.build());
Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest3.build());
Mockito.verifyNoMoreInteractions(mockWindmill);
assertThat(
results,
Matchers.contains(
TimestampedValue.of(1, Instant.ofEpochMilli(1)),
TimestampedValue.of(2, Instant.ofEpochMilli(2)),
TimestampedValue.of(3, Instant.ofEpochMilli(3)),
TimestampedValue.of(4, Instant.ofEpochMilli(4)),
TimestampedValue.of(5, Instant.ofEpochMilli(5)),
TimestampedValue.of(6, Instant.ofEpochMilli(6)),
TimestampedValue.of(7, Instant.ofEpochMilli(7))));
// NOTE: The future will still contain a reference to the underlying reader , thus not calling
// assertNoReader(future).
} |
public void cancel(DefaultGoPublisher publisher, EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, Charset consoleLogCharset) {
publisher.taggedConsumeLineWithPrefix(DefaultGoPublisher.CANCEL_TASK_START, "On Cancel Task: " + cancelBuilder.getDescription()); // odd capitalization, but consistent with UI
try {
cancelBuilder.build(publisher, environmentVariableContext, taskExtension, artifactExtension, null, consoleLogCharset);
// As this message will output before the running task outputs its task status, do not use the same
// wording (i.e. "Task status: %s") as the order of outputted lines may be confusing
publisher.taggedConsumeLineWithPrefix(DefaultGoPublisher.CANCEL_TASK_PASS, "On Cancel Task completed");
} catch (Exception e) {
publisher.taggedConsumeLineWithPrefix(DefaultGoPublisher.CANCEL_TASK_FAIL, "On Cancel Task failed");
LOGGER.error("", e);
}
} | @Test
void shouldLogToConsoleOutWhenCanceling() {
StubBuilder stubBuilder = new StubBuilder();
CommandBuilder builder = new CommandBuilder("echo", "", new File("."), new RunIfConfigs(FAILED), stubBuilder,
"");
builder.cancel(goPublisher, environmentVariableContext, null, null, UTF_8);
assertThat(goPublisher.getMessage()).contains("On Cancel Task");
assertThat(goPublisher.getMessage()).contains("On Cancel Task completed");
} |
@Override
public TenantPackageDO validTenantPackage(Long id) {
TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id);
if (tenantPackage == null) {
throw exception(TENANT_PACKAGE_NOT_EXISTS);
}
if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) {
throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName());
}
return tenantPackage;
} | @Test
public void testValidTenantPackage_disable() {
// mock 数据
TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class,
o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()));
tenantPackageMapper.insert(dbTenantPackage);// @Sql: 先插入出一条存在的数据
// 调用, 并断言异常
assertServiceException(() -> tenantPackageService.validTenantPackage(dbTenantPackage.getId()),
TENANT_PACKAGE_DISABLE, dbTenantPackage.getName());
} |
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException,
NoSuchPaddingException, InvalidKeySpecException,
InvalidAlgorithmParameterException,
KeyException, IOException {
return toPrivateKey(keyFile, keyPassword, true);
} | @Test
public void testPkcs1Des3EncryptedRsaEmptyPassword() throws Exception {
assertThrows(IOException.class, new Executable() {
@Override
public void execute() throws Throwable {
SslContext.toPrivateKey(new File(getClass().getResource("rsa_pkcs1_des3_encrypted.key")
.getFile()), "");
}
});
} |
public static Properties readPropertiesFile(String path) throws IOException {
Properties props = new Properties();
try (InputStream propStream = Files.newInputStream(Paths.get(path))) {
props.load(propStream);
}
return props;
} | @Test
public void testReadPropertiesFile() throws IOException {
File tempFile = TestUtils.tempFile();
try {
String testContent = "a=1\nb=2\n#a comment\n\nc=3\nd=";
Files.write(tempFile.toPath(), testContent.getBytes());
Properties props = PropertiesUtils.readPropertiesFile(tempFile.getAbsolutePath());
assertEquals(4, props.size());
assertEquals("1", props.get("a"));
assertEquals("2", props.get("b"));
assertEquals("3", props.get("c"));
assertEquals("", props.get("d"));
} finally {
Files.deleteIfExists(tempFile.toPath());
}
} |
public static void delete(final File file, final boolean ignoreFailures)
{
if (file.exists())
{
if (file.isDirectory())
{
final File[] files = file.listFiles();
if (null != files)
{
for (final File f : files)
{
delete(f, ignoreFailures);
}
}
}
if (!file.delete() && !ignoreFailures)
{
try
{
Files.delete(file.toPath());
}
catch (final IOException ex)
{
LangUtil.rethrowUnchecked(ex);
}
}
}
} | @Test
void deleteErrorHandlerIgnoreFailuresNonExistingFile()
{
final ErrorHandler errorHandler = mock(ErrorHandler.class);
final File file = tempDir.resolve("shadow-file").toFile();
IoUtil.delete(file, errorHandler);
assertFalse(file.exists());
verifyNoInteractions(errorHandler);
} |
public MqttProperty getProperty(int propertyId) {
if (propertyId == MqttPropertyType.USER_PROPERTY.value) {
//special handling to keep compatibility with earlier versions
List<UserProperty> userProperties = this.userProperties;
if (userProperties == null) {
return null;
}
return UserProperties.fromUserPropertyCollection(userProperties);
}
if (propertyId == MqttPropertyType.SUBSCRIPTION_IDENTIFIER.value) {
List<IntegerProperty> subscriptionIds = this.subscriptionIds;
if (subscriptionIds == null || subscriptionIds.isEmpty()) {
return null;
}
return subscriptionIds.get(0);
}
IntObjectHashMap<MqttProperty> props = this.props;
return props == null ? null : props.get(propertyId);
} | @Test
public void testGetProperty() {
MqttProperties props = createSampleProperties();
assertEquals(
"text/plain",
((MqttProperties.StringProperty) props.getProperty(CONTENT_TYPE.value())).value);
assertEquals(
10,
((MqttProperties.IntegerProperty) props.getProperty(SUBSCRIPTION_IDENTIFIER.value())).value.intValue());
List<MqttProperties.StringPair> expectedUserProps = new ArrayList<MqttProperties.StringPair>();
expectedUserProps.add(new MqttProperties.StringPair("isSecret", "true"));
expectedUserProps.add(new MqttProperties.StringPair("tag", "firstTag"));
expectedUserProps.add(new MqttProperties.StringPair("tag", "secondTag"));
List<MqttProperties.StringPair> actualUserProps =
((MqttProperties.UserProperties) props.getProperty(USER_PROPERTY.value())).value;
assertEquals(expectedUserProps, actualUserProps);
} |
@Override
@CacheEvict(cacheNames = "ai:video:config", key = "#createReqVO.type")
public Long createAiVideoConfig(AiVideoConfigCreateReqVO createReqVO) {
// 插入
AiVideoConfigDO aiVideoConfig = AiVideoConfigConvert.INSTANCE.convert(createReqVO);
aiVideoConfigMapper.insert(aiVideoConfig);
// 返回
return aiVideoConfig.getId();
} | @Test
public void testCreateAiVideoConfig_success() {
// 准备参数
AiVideoConfigCreateReqVO reqVO = randomPojo(AiVideoConfigCreateReqVO.class);
// 调用
Long aiVideoConfigId = aiVideoConfigService.createAiVideoConfig(reqVO);
// 断言
assertNotNull(aiVideoConfigId);
// 校验记录的属性是否正确
AiVideoConfigDO aiVideoConfig = aiVideoConfigMapper.selectById(aiVideoConfigId);
assertPojoEquals(reqVO, aiVideoConfig);
} |
@Override
public void registerReceiver(
String instructionId,
List<ApiServiceDescriptor> apiServiceDescriptors,
CloseableFnDataReceiver<Elements> receiver) {
LOG.debug("Registering consumer for {}", instructionId);
for (int i = 0, size = apiServiceDescriptors.size(); i < size; i++) {
BeamFnDataGrpcMultiplexer client = getClientFor(apiServiceDescriptors.get(i));
client.registerConsumer(instructionId, receiver);
}
} | @Test
public void testForInboundConsumer() throws Exception {
CountDownLatch waitForClientToConnect = new CountDownLatch(1);
Collection<WindowedValue<String>> inboundValuesA = new ConcurrentLinkedQueue<>();
Collection<WindowedValue<String>> inboundValuesB = new ConcurrentLinkedQueue<>();
Collection<BeamFnApi.Elements> inboundServerValues = new ConcurrentLinkedQueue<>();
AtomicReference<StreamObserver<BeamFnApi.Elements>> outboundServerObserver =
new AtomicReference<>();
CallStreamObserver<BeamFnApi.Elements> inboundServerObserver =
TestStreams.withOnNext(inboundServerValues::add).build();
Endpoints.ApiServiceDescriptor apiServiceDescriptor =
Endpoints.ApiServiceDescriptor.newBuilder()
.setUrl(this.getClass().getName() + "-" + UUID.randomUUID())
.build();
Server server =
InProcessServerBuilder.forName(apiServiceDescriptor.getUrl())
.addService(
new BeamFnDataGrpc.BeamFnDataImplBase() {
@Override
public StreamObserver<BeamFnApi.Elements> data(
StreamObserver<BeamFnApi.Elements> outboundObserver) {
outboundServerObserver.set(outboundObserver);
waitForClientToConnect.countDown();
return inboundServerObserver;
}
})
.build();
server.start();
try {
ManagedChannel channel =
InProcessChannelBuilder.forName(apiServiceDescriptor.getUrl()).build();
BeamFnDataGrpcClient clientFactory =
new BeamFnDataGrpcClient(
PipelineOptionsFactory.create(),
(Endpoints.ApiServiceDescriptor descriptor) -> channel,
OutboundObserverFactory.trivial());
BeamFnDataInboundObserver observerA =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(DataEndpoint.create(TRANSFORM_ID_A, CODER, inboundValuesA::add)),
Collections.emptyList());
BeamFnDataInboundObserver observerB =
BeamFnDataInboundObserver.forConsumers(
Arrays.asList(DataEndpoint.create(TRANSFORM_ID_B, CODER, inboundValuesB::add)),
Collections.emptyList());
clientFactory.registerReceiver(
INSTRUCTION_ID_A, Arrays.asList(apiServiceDescriptor), observerA);
waitForClientToConnect.await();
outboundServerObserver.get().onNext(ELEMENTS_A_1);
// Purposefully transmit some data before the consumer for B is bound showing that
// data is not lost
outboundServerObserver.get().onNext(ELEMENTS_B_1);
Thread.sleep(100);
clientFactory.registerReceiver(
INSTRUCTION_ID_B, Arrays.asList(apiServiceDescriptor), observerB);
// Show that out of order stream completion can occur.
observerB.awaitCompletion();
assertThat(inboundValuesB, contains(valueInGlobalWindow("JKL"), valueInGlobalWindow("MNO")));
outboundServerObserver.get().onNext(ELEMENTS_A_2);
observerA.awaitCompletion();
assertThat(
inboundValuesA,
contains(
valueInGlobalWindow("ABC"), valueInGlobalWindow("DEF"), valueInGlobalWindow("GHI")));
} finally {
server.shutdownNow();
}
} |
@Override
public void updateOffset(MessageQueue mq, long offset, boolean increaseOnly) {
if (mq != null) {
ControllableOffset offsetOld = this.offsetTable.get(mq);
if (null == offsetOld) {
offsetOld = this.offsetTable.putIfAbsent(mq, new ControllableOffset(offset));
}
if (null != offsetOld) {
if (increaseOnly) {
offsetOld.update(offset, true);
} else {
offsetOld.update(offset);
}
}
}
} | @Test
public void testUpdateOffset() throws Exception {
OffsetStore offsetStore = new LocalFileOffsetStore(mQClientFactory, group);
MessageQueue messageQueue = new MessageQueue(topic, brokerName, 1);
offsetStore.updateOffset(messageQueue, 1024, false);
assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(1024);
offsetStore.updateOffset(messageQueue, 1023, false);
assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(1023);
offsetStore.updateOffset(messageQueue, 1022, true);
assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(1023);
} |
@Override
public void transform(Message message, DataType fromType, DataType toType) {
ProtobufSchema schema = message.getExchange().getProperty(SchemaHelper.CONTENT_SCHEMA, ProtobufSchema.class);
if (schema == null) {
throw new CamelExecutionException("Missing proper Protobuf schema for data type processing", message.getExchange());
}
try {
byte[] marshalled;
String contentClass = SchemaHelper.resolveContentClass(message.getExchange(), null);
if (contentClass != null) {
Class<?> contentType
= message.getExchange().getContext().getClassResolver().resolveMandatoryClass(contentClass);
marshalled = Protobuf.mapper().writer().forType(contentType).with(schema)
.writeValueAsBytes(message.getBody());
} else {
marshalled = Protobuf.mapper().writer().forType(JsonNode.class).with(schema)
.writeValueAsBytes(getBodyAsJsonNode(message, schema));
}
message.setBody(marshalled);
message.setHeader(Exchange.CONTENT_TYPE, MimeType.PROTOBUF_BINARY.type());
message.setHeader(SchemaHelper.CONTENT_SCHEMA, schema.getSource().toString());
} catch (InvalidPayloadException | IOException | ClassNotFoundException e) {
throw new CamelExecutionException(
"Failed to apply Protobuf binary data type on exchange", message.getExchange(), e);
}
} | @Test
void shouldHandlePojo() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
ProtobufSchema protobufSchema
= Protobuf.mapper().schemaLoader()
.load(ProtobufBinaryDataTypeTransformerTest.class.getResourceAsStream("Person.proto"));
exchange.setProperty(SchemaHelper.CONTENT_SCHEMA, protobufSchema);
exchange.getMessage().setBody(new Person("Mickey", 20));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
JSONAssert.assertEquals("""
{"name":"Mickey","age":20}
""", Json.mapper().writeValueAsString(
Protobuf.mapper().reader().with(protobufSchema).readTree(exchange.getMessage().getBody(byte[].class))), true);
} |
public static boolean isThisKeyboardSetAsDefaultIME(Context context) {
final String defaultIME =
Settings.Secure.getString(
context.getContentResolver(), Settings.Secure.DEFAULT_INPUT_METHOD);
return isThisKeyboardSetAsDefaultIME(defaultIME, context.getPackageName());
} | @Test
public void testIsThisKeyboardSetAsDefaultIME() throws Exception {
final String MY_IME_PACKAGE = "net.evendanan.ime";
assertFalse(
SetupSupport.isThisKeyboardSetAsDefaultIME(
new ComponentName("net.some.one.else", "net.some.one.else.IME").flattenToString(),
MY_IME_PACKAGE));
assertFalse(
SetupSupport.isThisKeyboardSetAsDefaultIME(
new ComponentName("net.some.one.else", "net.some.other.IME").flattenToString(),
MY_IME_PACKAGE));
assertFalse(
SetupSupport.isThisKeyboardSetAsDefaultIME(
new ComponentName("net.some.one.else", ".IME").flattenToString(), MY_IME_PACKAGE));
assertFalse(SetupSupport.isThisKeyboardSetAsDefaultIME(null, MY_IME_PACKAGE));
assertTrue(
SetupSupport.isThisKeyboardSetAsDefaultIME(
new ComponentName(MY_IME_PACKAGE, MY_IME_PACKAGE + ".IME").flattenToString(),
MY_IME_PACKAGE));
assertTrue(
SetupSupport.isThisKeyboardSetAsDefaultIME(
new ComponentName(MY_IME_PACKAGE, "net.some.other.IME").flattenToString(),
MY_IME_PACKAGE));
assertTrue(
SetupSupport.isThisKeyboardSetAsDefaultIME(
new ComponentName(MY_IME_PACKAGE, ".IME").flattenToString(), MY_IME_PACKAGE));
} |
@Override
public void transform(Message message, DataType fromType, DataType toType) {
final Map<String, Object> headers = message.getHeaders();
Map<String, Object> cloudEventAttributes = new HashMap<>();
CloudEvent cloudEvent = CloudEvents.v1_0;
for (CloudEvent.Attribute attribute : cloudEvent.attributes()) {
if (headers.containsKey(attribute.id())) {
cloudEventAttributes.put(attribute.json(), headers.get(attribute.id()));
}
}
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_VERSION).json(),
cloudEvent.version());
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_ID).json(),
message.getExchange().getExchangeId());
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TYPE).json(),
CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_TYPE);
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE).json(),
CloudEvent.DEFAULT_CAMEL_CLOUD_EVENT_SOURCE);
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TIME).json(),
cloudEvent.getEventTime(message.getExchange()));
String body = MessageHelper.extractBodyAsString(message);
cloudEventAttributes.putIfAbsent("data", body);
cloudEventAttributes.putIfAbsent(cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE).json(),
headers.getOrDefault(CloudEvent.CAMEL_CLOUD_EVENT_CONTENT_TYPE, "application/json"));
headers.put(Exchange.CONTENT_TYPE, "application/cloudevents+json");
message.setBody(createCouldEventJsonObject(cloudEventAttributes));
cloudEvent.attributes().stream().map(CloudEvent.Attribute::id).forEach(headers::remove);
} | @Test
void shouldMapToJsonCloudEventFormat() throws Exception {
Exchange exchange = new DefaultExchange(camelContext);
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT, "test1.txt");
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_TYPE, "org.apache.camel.event.test");
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE, "org.apache.camel.test");
exchange.getMessage().setHeader(CloudEvent.CAMEL_CLOUD_EVENT_CONTENT_TYPE, "text/plain");
exchange.getMessage().setBody(new ByteArrayInputStream("Test1".getBytes(StandardCharsets.UTF_8)));
transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY);
CloudEvent cloudEvent = CloudEvents.v1_0;
assertTrue(exchange.getMessage().hasHeaders());
assertEquals("application/cloudevents+json", exchange.getMessage().getHeader(Exchange.CONTENT_TYPE));
assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"%s\"",
cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_ID).json(), exchange.getExchangeId())));
assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"org.apache.camel.event.test\"",
cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_TYPE).json())));
assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"org.apache.camel.test\"",
cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE).json())));
assertTrue(exchange.getMessage().getBody(String.class).contains(String.format("\"%s\":\"text/plain\"",
cloudEvent.mandatoryAttribute(CloudEvent.CAMEL_CLOUD_EVENT_DATA_CONTENT_TYPE).json())));
assertTrue(exchange.getMessage().getBody(String.class).contains("\"data\":\"Test1\""));
assertNull(exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_TYPE));
assertNull(exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE));
assertNull(exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT));
} |
@Override
protected boolean isNan(Float number) {
return number.isNaN();
} | @Test
void testIsNan() {
FloatSummaryAggregator ag = new FloatSummaryAggregator();
assertThat(ag.isNan(-1.0f)).isFalse();
assertThat(ag.isNan(0.0f)).isFalse();
assertThat(ag.isNan(23.0f)).isFalse();
assertThat(ag.isNan(Float.MAX_VALUE)).isFalse();
assertThat(ag.isNan(Float.MIN_VALUE)).isFalse();
assertThat(ag.isNan(Float.NaN)).isTrue();
} |
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor,
@Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) {
long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes();
if (desiredSegmentSizeBytes <= 0) {
desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES;
}
long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2;
double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5;
if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null
if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case
long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}",
_latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
} else {
final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName,
autotuneInitialRows);
return autotuneInitialRows;
}
}
final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes();
if (committingSegmentSizeBytes <= 0 // repair segment case
|| SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals(
committingSegmentDescriptor.getStopReason())) {
String reason = committingSegmentSizeBytes <= 0 //
? "Committing segment size is not available" //
: "Committing segment is due to force-commit";
final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}",
reason, newSegmentName, targetNumRows);
return targetNumRows;
}
final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime();
final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs();
final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment();
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}",
newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold,
committingSegmentSizeBytes);
double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes;
if (_latestSegmentRowsToSizeRatio > 0) {
_latestSegmentRowsToSizeRatio =
CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio;
} else {
_latestSegmentRowsToSizeRatio = currentRatio;
}
// If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit.
// We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim
// to hit the row limit next time around.
//
// If the size of the committing segment is higher than the desired segment size, then the administrator has
// set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time
// limit.
//
// TODO: add feature to adjust time threshold as well
// If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit
// and time threshold being hit If we set new threshold to be committingSegmentZKMetadata
// .getSizeThresholdToFlushSegment(),
// we might end up using a lot more memory than required for the segment Using a minor bump strategy, until
// we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed
if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) {
final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis();
long currentNumRows = numRowsConsumed;
StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. ");
if (timeThresholdMillis < timeConsumed) {
// The administrator has reduced the time threshold. Adjust the
// number of rows to match the average consumption rate on the partition.
currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed;
logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows)
.append(". ");
}
long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT);
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
logStringBuilder.append("Setting segment size for {} as {}");
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(),
newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
}
long targetSegmentNumRows;
if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) {
targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2;
} else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) {
targetSegmentNumRows = numRowsConsumed / 2;
} else {
if (_latestSegmentRowsToSizeRatio > 0) {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio);
} else {
targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio);
}
}
targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows);
SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(
"Committing segment size {}, current ratio {}, setting threshold for {} as {}",
committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows);
return (int) targetSegmentNumRows;
} | @Test
public void testUseLastSegmentSizeTimesRatioIfFirstSegmentInPartitionAndNewPartitionGroup() {
double segmentRowsToSizeRatio = 1.5;
long segmentSizeBytes = 20000L;
SegmentFlushThresholdComputer computer =
new SegmentFlushThresholdComputer(Clock.systemUTC(), segmentRowsToSizeRatio);
StreamConfig streamConfig = mock(StreamConfig.class);
when(streamConfig.getFlushThresholdSegmentSizeBytes()).thenReturn(segmentSizeBytes);
CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class);
int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, null, "newSegmentName");
// segmentSize * 1.5
// 20000 * 1.5
assertEquals(threshold, 30000);
} |
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final Map<Path, List<Long>> regular = new HashMap<>();
final Map<Path, List<Long>> trashed = new HashMap<>();
for(Path file : files.keySet()) {
final Map<Path, List<Long>> set = file.attributes().isDuplicate() ? trashed : regular;
if(set.containsKey(file.getParent())) {
set.get(file.getParent()).add(Long.parseLong(nodeid.getVersionId(file)));
}
else {
final List<Long> nodes = new ArrayList<>();
nodes.add(Long.parseLong(nodeid.getVersionId(file)));
set.put(file.getParent(), nodes);
}
callback.delete(file);
nodeid.cache(file, null);
}
for(List<Long> nodes : regular.values()) {
try {
new NodesApi(session.getClient()).removeNodes(new DeleteNodesRequest().nodeIds(nodes), StringUtils.EMPTY);
}
catch(ApiException e) {
switch(e.getCode()) {
case 400:
log.warn(String.format("Ignore failure %s", e));
new SDSDeleteFeature(session, nodeid).delete(files, prompt, callback);
break;
default:
throw new SDSExceptionMappingService(nodeid).map("Cannot delete {0}", e, files.keySet().iterator().next());
}
}
}
for(List<Long> nodes : trashed.values()) {
try {
new NodesApi(session.getClient()).removeDeletedNodes(new DeleteDeletedNodesRequest().deletedNodeIds(nodes), StringUtils.EMPTY);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Cannot delete {0}", e, files.keySet().iterator().next());
}
}
} | @Test
public void testDeleteFiles() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path file1 = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Path file2 = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new SDSTouchFeature(session, nodeid).touch(file1, new TransferStatus());
new SDSTouchFeature(session, nodeid).touch(file2, new TransferStatus());
new SDSBatchDeleteFeature(session, nodeid).delete(Arrays.asList(file1, file2), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new SDSFindFeature(session, nodeid).find(file1));
assertFalse(new SDSFindFeature(session, nodeid).find(file2));
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
public static DataCleaner<Track<NopHit>> simpleSmoothing() {
NopEncoder nopEncoder = new NopEncoder();
DataCleaner<Track<NopHit>> cleaner = coreSmoothing();
ToStringFunction<Track<NopHit>> toString = track -> nopEncoder.asRawNop(track);
ExceptionHandler exceptionHandler = new SequentialFileWriter("trackCleaningExceptions");
return new ExceptionCatchingCleaner<>(cleaner, toString, exceptionHandler);
} | @Test
public void trackSmoothingShouldRemoveMirages() {
//this garabage track is generated by fake radar returns off of radio towers and skyscrapers
Track<NopHit> trackFromDataThatDoesntMove = erroneousTrackFromRadarMirage();
DataCleaner<Track<NopHit>> basicSmoothing = TrackSmoothing.simpleSmoothing();
Optional<Track<NopHit>> result = basicSmoothing.clean(trackFromDataThatDoesntMove);
assertFalse(result.isPresent(), "This track should not make it through the filter");
} |
static void quoteExternalName(StringBuilder sb, String externalName) {
List<String> parts = splitByNonQuotedDots(externalName);
for (int i = 0; i < parts.size(); i++) {
String unescaped = unescapeQuotes(parts.get(i));
String unquoted = unquoteIfQuoted(unescaped);
DIALECT.quoteIdentifier(sb, unquoted);
if (i < parts.size() - 1) {
sb.append(".");
}
}
} | @Test
public void quoteExternalName_with_quotes_and_dots() {
String externalName = "custom_schema.\"table.with_dot\"";
StringBuilder sb = new StringBuilder();
MappingHelper.quoteExternalName(sb, externalName);
assertThat(sb.toString()).isEqualTo("\"custom_schema\".\"table.with_dot\"");
} |
@Override
public List<byte[]> clusterGetKeysInSlot(int slot, Integer count) {
RFuture<List<byte[]>> f = executorService.readAsync((String)null, ByteArrayCodec.INSTANCE, CLUSTER_GETKEYSINSLOT, slot, count);
return syncFuture(f);
} | @Test
public void testClusterGetKeysInSlot() {
testInCluster(connection -> {
connection.flushAll();
List<byte[]> keys = connection.clusterGetKeysInSlot(12, 10);
assertThat(keys).isEmpty();
});
} |
public static Function<List<String>, List<String>> reorderFieldsFunction(List<String> fields, List<SortSpec> sorts) {
if (!needsReorderingFields(fields, sorts)) {
return Function.identity();
}
final List<String> orderedBuckets = orderFields(fields, sorts);
final Map<Integer, Integer> mapping = IntStream.range(0, fields.size())
.boxed()
.collect(Collectors.toMap(Function.identity(), i -> orderedBuckets.indexOf(fields.get(i))));
return (keys) -> IntStream.range(0, fields.size())
.boxed()
.map(i -> keys.get(mapping.get(i)))
.collect(Collectors.toList());
} | @Test
void reorderKeysFunctionDoesNotDoAnythingIfNoSortsSpecified() {
final Function<List<String>, List<String>> reorderKeys = ValuesBucketOrdering.reorderFieldsFunction(List.of("foo", "bar", "baz"), List.of());
assertThat(reorderKeys.apply(List.of("baz", "bar", "foo"))).containsExactly("baz", "bar", "foo");
} |
public static String trimToNull(final String given) {
if (given == null) {
return null;
}
final String trimmed = given.trim();
if (trimmed.isEmpty()) {
return null;
}
return trimmed;
} | @Test
public void testTrimToNull() {
assertEquals("abc", StringHelper.trimToNull("abc"));
assertEquals("abc", StringHelper.trimToNull(" abc"));
assertEquals("abc", StringHelper.trimToNull(" abc "));
assertNull(StringHelper.trimToNull(" "));
assertNull(StringHelper.trimToNull("\t"));
assertNull(StringHelper.trimToNull(" \t "));
assertNull(StringHelper.trimToNull(""));
} |
public static WorkflowInstanceAggregatedInfo computeAggregatedView(
WorkflowInstance workflowInstance, boolean statusKnown) {
if (workflowInstance == null) {
// returning empty object since cannot access state of the current instance run
return new WorkflowInstanceAggregatedInfo();
}
WorkflowInstanceAggregatedInfo instanceAggregated =
computeAggregatedViewNoStatus(workflowInstance);
if (statusKnown || workflowInstance.getAggregatedInfo() == null) {
instanceAggregated.setWorkflowInstanceStatus(workflowInstance.getStatus());
} else {
computeAndSetAggregatedInstanceStatus(workflowInstance, instanceAggregated);
}
return instanceAggregated;
} | @Test
public void testComputeAggregatedViewForEmptyWorkflow() {
WorkflowInstance instance =
getGenericWorkflowInstance(
2,
WorkflowInstance.Status.SUCCEEDED,
RunPolicy.RESTART_FROM_BEGINNING,
RestartPolicy.RESTART_FROM_BEGINNING);
instance.setAggregatedInfo(new WorkflowInstanceAggregatedInfo());
instance.getAggregatedInfo().setStepAggregatedViews(Collections.emptyMap());
instance.getAggregatedInfo().setWorkflowInstanceStatus(WorkflowInstance.Status.STOPPED);
Workflow runtimeWorkflow = mock(Workflow.class);
instance.setRuntimeWorkflow(runtimeWorkflow);
when(runtimeWorkflow.getSteps()).thenReturn(Collections.emptyList());
WorkflowInstanceAggregatedInfo aggregated =
AggregatedViewHelper.computeAggregatedView(instance, false);
assertTrue(aggregated.getStepAggregatedViews().isEmpty());
assertEquals(WorkflowInstance.Status.SUCCEEDED, aggregated.getWorkflowInstanceStatus());
} |
public static ApiVersionCollection filterApis(
RecordVersion minRecordVersion,
ApiMessageType.ListenerType listenerType,
boolean enableUnstableLastVersion,
boolean clientTelemetryEnabled
) {
ApiVersionCollection apiKeys = new ApiVersionCollection();
for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) {
// Skip telemetry APIs if client telemetry is disabled.
if ((apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY) && !clientTelemetryEnabled)
continue;
if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) {
apiKey.toApiVersion(enableUnstableLastVersion).ifPresent(apiKeys::add);
}
}
return apiKeys;
} | @Test
public void shouldNotCreateApiResponseWithTelemetryWhenDisabled() {
ApiVersionsResponse response = new ApiVersionsResponse.Builder().
setThrottleTimeMs(10).
setApiVersions(ApiVersionsResponse.filterApis(
RecordVersion.V1,
ListenerType.BROKER,
true,
false)).
setSupportedFeatures(Features.emptySupportedFeatures()).
setFinalizedFeatures(Collections.emptyMap()).
setFinalizedFeaturesEpoch(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH).
build();
verifyApiKeysForTelemetry(response, 0);
} |
@Override
public void setRule(ApplicationId appId,
DeviceId deviceId,
TrafficSelector selector,
TrafficTreatment treatment,
int priority,
int tableType,
boolean install) {
FlowRule.Builder flowRuleBuilder = DefaultFlowRule.builder()
.forDevice(deviceId)
.withSelector(selector)
.withTreatment(treatment)
.withPriority(priority)
.fromApp(appId)
.forTable(tableType);
if (priority == Constants.PRIORITY_SNAT_RULE) {
flowRuleBuilder.makeTemporary(TIMEOUT_SNAT_RULE);
} else {
flowRuleBuilder.makePermanent();
}
applyRule(flowRuleBuilder.build(), install);
} | @Test
public void testSetRule() {
int testPriority = 10;
int testTableType = 10;
fros = Sets.newConcurrentHashSet();
TrafficSelector.Builder selectorBuilder = DefaultTrafficSelector.builder();
TrafficTreatment.Builder treatmentBuilder = DefaultTrafficTreatment.builder();
FlowRule.Builder flowRuleBuilder = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.withSelector(selectorBuilder.build())
.withTreatment(treatmentBuilder.build())
.withPriority(testPriority)
.fromApp(TEST_APP_ID)
.forTable(testTableType)
.makePermanent();
target.setRule(TEST_APP_ID, DEVICE_ID, selectorBuilder.build(),
treatmentBuilder.build(), testPriority, testTableType, true);
validateFlowRule(flowRuleBuilder.build());
} |
@Override
public void onAppInit() {
} | @Test
public void onAppInit_neverClearAllNotifications() throws Exception {
createUUT().onAppInit();
verify(mNotificationManager, never()).cancelAll();
} |
@Override
public void authenticate(
final JsonObject authInfo,
final Handler<AsyncResult<User>> resultHandler
) {
final String username = authInfo.getString("username");
if (username == null) {
resultHandler.handle(Future.failedFuture("authInfo missing 'username' field"));
return;
}
final String password = authInfo.getString("password");
if (password == null) {
resultHandler.handle(Future.failedFuture("authInfo missing 'password' field"));
return;
}
server.getWorkerExecutor().executeBlocking(
promisedUser -> getUser(contextName, username, password, promisedUser),
false,
resultHandler
);
} | @Test
public void shouldAuthenticateWithAdditionalAllowedRoles() throws Exception {
// Given:
givenAllowedRoles("user", "other");
givenUserRoles("user");
// When:
authProvider.authenticate(authInfo, userHandler);
// Then:
verifyAuthorizedSuccessfulLogin();
} |
@Override
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) {
WafConfig wafConfig = Singleton.INST.get(WafConfig.class);
if (Objects.isNull(selector) && Objects.isNull(rule)) {
if (WafModelEnum.BLACK.getName().equals(wafConfig.getModel())) {
return chain.execute(exchange);
}
exchange.getResponse().setStatusCode(HttpStatus.FORBIDDEN);
Object error = ShenyuResultWrap.error(exchange, HttpStatus.FORBIDDEN.value(), Constants.REJECT_MSG, null);
return WebFluxResultUtils.result(exchange, error);
}
WafHandle wafHandle = buildRuleHandle(rule);
if (Objects.isNull(wafHandle) || StringUtils.isBlank(wafHandle.getPermission())) {
LOG.error("waf handler can not configuration:{}", wafHandle);
return chain.execute(exchange);
}
if (WafEnum.REJECT.getName().equals(wafHandle.getPermission())) {
exchange.getResponse().setStatusCode(HttpStatus.FORBIDDEN);
Object error = ShenyuResultWrap.error(exchange, Integer.parseInt(wafHandle.getStatusCode()), Constants.REJECT_MSG, null);
return WebFluxResultUtils.result(exchange, error);
}
return chain.execute(exchange);
} | @Test
public void testWafPluginAllow() {
ruleData.setId("waf");
ruleData.setSelectorId("waf");
WafHandle handle = GsonUtils.getGson().fromJson("{\"permission\":\"allow\",\"statusCode\":\"0\"}", WafHandle.class);
WafPluginDataHandler.CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), handle);
Mono<Void> execute = wafPluginUnderTest.doExecute(exchange, chain, selectorData, ruleData);
StepVerifier.create(execute).expectSubscription().verifyComplete();
} |
public static StatementExecutorResponse validate(
final ConfiguredStatement<CreateConnector> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final CreateConnector createConnector = statement.getStatement();
final ConnectClient client = serviceContext.getConnectClient();
if (checkForExistingConnector(statement, createConnector, client)) {
final String errorMsg = String.format(
"Connector %s already exists", createConnector.getName());
throw new KsqlRestException(EndpointResponse.create()
.status(HttpStatus.SC_CONFLICT)
.entity(new KsqlErrorMessage(Errors.toErrorCode(HttpStatus.SC_CONFLICT), errorMsg))
.build()
);
}
final List<String> errors = validateConfigs(createConnector, client);
if (!errors.isEmpty()) {
final String errorMessage = "Validation error: " + String.join("\n", errors);
throw new KsqlException(errorMessage);
}
return StatementExecutorResponse.handled(Optional.of(new CreateConnectorEntity(
statement.getMaskedStatementText(),
DUMMY_CREATE_RESPONSE
)));
} | @Test
public void shouldNotThrowOnValidateWhenIfNotExistsSetConnectorExists() {
// Given:
givenConnectorExists();
givenValidationSuccess();
// When:
ConnectExecutor.validate(CREATE_DUPLICATE_CONNECTOR_CONFIGURED, mock(SessionProperties.class), null, serviceContext);
// Then: did not throw
} |
protected void checkHoldRequest() {
for (String key : this.pullRequestTable.keySet()) {
String[] kArray = key.split(TOPIC_QUEUEID_SEPARATOR);
if (2 == kArray.length) {
String topic = kArray[0];
int queueId = Integer.parseInt(kArray[1]);
final long offset = this.brokerController.getMessageStore().getMaxOffsetInQueue(topic, queueId);
try {
this.notifyMessageArriving(topic, queueId, offset);
} catch (Throwable e) {
log.error(
"PullRequestHoldService: failed to check hold request failed, topic={}, queueId={}", topic,
queueId, e);
}
}
}
} | @Test
public void checkHoldRequestTest() {
Assertions.assertThatCode(() -> pullRequestHoldService.checkHoldRequest()).doesNotThrowAnyException();
} |
@Override
public boolean supportsOpenCursorsAcrossRollback() {
return false;
} | @Test
void assertSupportsOpenCursorsAcrossRollback() {
assertFalse(metaData.supportsOpenCursorsAcrossRollback());
} |
@Override
public synchronized void addAggregateFunctionFactory(
final AggregateFunctionFactory aggregateFunctionFactory) {
final String functionName = aggregateFunctionFactory.getName().toUpperCase();
validateFunctionName(functionName);
if (udfs.containsKey(functionName)) {
throw new KsqlException(
"Aggregate function already registered as non-aggregate: " + functionName);
}
if (udtfs.containsKey(functionName)) {
throw new KsqlException(
"Aggregate function already registered as table function: " + functionName);
}
if (udafs.putIfAbsent(functionName, aggregateFunctionFactory) != null) {
throw new KsqlException("Aggregate function already registered: " + functionName);
}
} | @Test
public void shouldThrowOnAddUdafIfUdafFactoryAlreadyExists() {
// Given:
functionRegistry.addAggregateFunctionFactory(udafFactory);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> functionRegistry.addAggregateFunctionFactory(udafFactory)
);
// Then:
assertThat(e.getMessage(), containsString("Aggregate function already registered: SOMEAGGFUNC"));
} |
@SuppressWarnings({"deprecation", "checkstyle:linelength"})
public void convertSiteProperties(Configuration conf,
Configuration yarnSiteConfig, boolean drfUsed,
boolean enableAsyncScheduler, boolean userPercentage,
FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class.getCanonicalName());
if (conf.getBoolean(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
int interval = conf.getInt(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS);
yarnSiteConfig.setInt(PREFIX +
"schedule-asynchronously.scheduling-interval-ms", interval);
}
// This should be always true to trigger cs auto
// refresh queue.
yarnSiteConfig.setBoolean(
YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION,
FairSchedulerConfiguration.DEFAULT_PREEMPTION)) {
preemptionEnabled = true;
String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
int waitTimeBeforeKill = conf.getInt(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL);
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
waitTimeBeforeKill);
long waitBeforeNextStarvationCheck = conf.getLong(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS);
yarnSiteConfig.setLong(
CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
waitBeforeNextStarvationCheck);
} else {
if (preemptionMode ==
FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, "");
}
}
// For auto created queue's auto deletion.
if (!userPercentage) {
String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
// Set the expired for deletion interval to 10s, consistent with fs.
yarnSiteConfig.setInt(CapacitySchedulerConfiguration.
AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10);
}
if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,
FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true);
} else {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false);
}
// Make auto cs conf refresh enabled.
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
addMonitorPolicy(QueueConfigurationAutoRefreshPolicy
.class.getCanonicalName(), yarnSiteConfig));
int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN,
FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN);
if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) {
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT,
maxAssign);
}
float localityThresholdNode = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE);
if (localityThresholdNode !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) {
yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,
localityThresholdNode);
}
float localityThresholdRack = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK);
if (localityThresholdRack !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) {
yarnSiteConfig.setFloat(
CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY,
localityThresholdRack);
}
if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,
FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) {
sizeBasedWeight = true;
}
if (drfUsed) {
yarnSiteConfig.set(
CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class.getCanonicalName());
}
if (enableAsyncScheduler) {
yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
}
} | @Test
public void testAsyncSchedulingEnabledConversion() {
converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true,
true, false, null);
assertTrue("Asynchronous scheduling", yarnConvertedConfig.getBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE,
CapacitySchedulerConfiguration.DEFAULT_SCHEDULE_ASYNCHRONOUSLY_ENABLE));
} |
@Override
public Num calculate(BarSeries series, Position position) {
Num bars = numberOfBars.calculate(series, position);
// If a simple division was used (grossreturn/bars), compounding would not be
// considered, leading to inaccuracies in the calculation.
// Therefore we need to use "pow" to accurately capture the compounding effect.
return bars.isZero() ? series.one() : grossReturn.calculate(series, position).pow(series.one().dividedBy(bars));
} | @Test
public void calculateWithASimplePosition() {
series = new MockBarSeries(numFunction, 100d, 105d, 110d, 100d, 95d, 105d);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series));
AnalysisCriterion averageProfit = getCriterion();
assertNumEquals(numOf(110d / 100).pow(numOf(1d / 3)), averageProfit.calculate(series, tradingRecord));
} |
@SuppressWarnings("PMD.CloseResource")
public void register(CacheEntryListenerConfiguration<K, V> configuration) {
if (configuration.getCacheEntryListenerFactory() == null) {
return;
}
var listener = new EventTypeAwareListener<K, V>(
configuration.getCacheEntryListenerFactory().create());
CacheEntryEventFilter<K, V> filter = event -> true;
if (configuration.getCacheEntryEventFilterFactory() != null) {
filter = new EventTypeFilter<>(listener,
configuration.getCacheEntryEventFilterFactory().create());
}
var registration = new Registration<>(configuration, filter, listener);
dispatchQueues.putIfAbsent(registration, new ConcurrentHashMap<>());
} | @Test
public void register_twice() {
var dispatcher = new EventDispatcher<Integer, Integer>(Runnable::run);
var configuration = new MutableCacheEntryListenerConfiguration<>(
() -> createdListener, null, false, false);
dispatcher.register(configuration);
dispatcher.register(configuration);
assertThat(dispatcher.dispatchQueues).hasSize(1);
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testFetchResponseMetrics() {
buildFetcher();
String topic1 = "foo";
String topic2 = "bar";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
subscriptions.assignFromUser(mkSet(tp1, tp2));
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic1, 1);
partitionCounts.put(topic2, 1);
topicIds.put(topic1, Uuid.randomUuid());
topicIds.put(topic2, Uuid.randomUuid());
TopicIdPartition tidp1 = new TopicIdPartition(topicIds.get(topic1), tp1);
TopicIdPartition tidp2 = new TopicIdPartition(topicIds.get(topic2), tp2);
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, tp -> validLeaderEpoch, topicIds));
int expectedBytes = 0;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicIdPartition tp : mkSet(tidp1, tidp2)) {
subscriptions.seek(tp.topicPartition(), 0);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE,
TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++)
builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
for (Record record : records.records())
expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponseData.PartitionData()
.setPartitionIndex(tp.topicPartition().partition())
.setHighWatermark(15)
.setLogStartOffset(0)
.setRecords(records));
}
assertEquals(1, sendFetches());
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData));
consumerClient.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
assertEquals(3, fetchedRecords.get(tp1).size());
assertEquals(3, fetchedRecords.get(tp2).size());
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(6, (Double) recordsCountAverage.metricValue(), EPSILON);
} |
public String doLayout(ILoggingEvent event) {
if (!isStarted()) {
return CoreConstants.EMPTY_STRING;
}
return writeLoopOnConverters(event);
} | @Test
public void prefixConverterWithProperty() {
try {
String propertyKey = "px1953";
String propertyVal = "pxVal";
System.setProperty(propertyKey, propertyVal);
String pattern = "%prefix(%logger %property{" + propertyKey + "}) %message";
pl.setPattern(pattern);
pl.start();
String val = pl.doLayout(makeLoggingEvent("hello", null));
assertEquals("logger=" + logger.getName() + " " + propertyKey + "=" + propertyVal + " hello", val);
} finally {
System.clearProperty("px");
}
} |
@VisibleForTesting
void validateCaptcha(AuthLoginReqVO reqVO) {
// 如果验证码关闭,则不进行校验
if (!captchaEnable) {
return;
}
// 校验验证码
ValidationUtils.validate(validator, reqVO, AuthLoginReqVO.CodeEnableGroup.class);
CaptchaVO captchaVO = new CaptchaVO();
captchaVO.setCaptchaVerification(reqVO.getCaptchaVerification());
ResponseModel response = captchaService.verification(captchaVO);
// 验证不通过
if (!response.isSuccess()) {
// 创建登录失败日志(验证码不正确)
createLoginLog(null, reqVO.getUsername(), LoginLogTypeEnum.LOGIN_USERNAME, LoginResultEnum.CAPTCHA_CODE_ERROR);
throw exception(AUTH_LOGIN_CAPTCHA_CODE_ERROR, response.getRepMsg());
}
} | @Test
public void testValidateCaptcha_successWithDisable() {
// 准备参数
AuthLoginReqVO reqVO = randomPojo(AuthLoginReqVO.class);
// mock 验证码关闭
ReflectUtil.setFieldValue(authService, "captchaEnable", false);
// 调用,无需断言
authService.validateCaptcha(reqVO);
} |
@Override
public List<JID> getAdmins() {
final String groupName = GROUP_NAME.getValue();
try {
// Note; the list of admins is already cached, so if the list is being refreshed force a cache refresh too
return new ArrayList<>(GroupManager.getInstance().getGroup(groupName, true).getMembers());
} catch (GroupNotFoundException e) {
LOGGER.error(String.format("Unable to retrieve members of group '%s' - assuming no administrators", groupName), e);
return new ArrayList<>();
}
} | @Test
public void willRetrieveGroupMembers() {
final List<JID> admins = adminProvider.getAdmins();
assertThat(admins, is(ADMINS));
} |
@Override
public ServerGroup servers() {
return cache.get();
} | @Test
public void all_up_but_other_endpoint_down() {
NginxHealthClient service = createClient("nginx-health-output-all-up-but-other-down.json");
assertTrue(service.servers().isHealthy("gateway.prod.music.vespa.us-east-2.prod"));
assertFalse(service.servers().isHealthy("frog.prod.music.vespa.us-east-2.prod"));
} |
public void validate(final Metric metric) {
if (metric == null) {
throw new ValidationException("Metric cannot be null");
}
if (!isValidFunction(metric.functionName())) {
throw new ValidationException("Unrecognized metric : " + metric.functionName() + ", valid metrics : " + availableMetricTypes);
}
if (!hasFieldIfFunctionNeedsIt(metric)) {
throw new ValidationException(metric.functionName() + " metric requires field name to be provided after a colon, i.e. " + metric.functionName() + ":http_status_code");
}
if (metric.sort() != null && UNSORTABLE_METRICS.contains(metric.functionName())) {
throw new ValidationException(metric.functionName() + " metric cannot be used to sort aggregations");
}
} | @Test
void throwsExceptionOnMetricWithIllegalFunctionName() {
assertThrows(ValidationException.class, () -> toTest.validate(new Metric("bum", "field", SortSpec.Direction.Ascending, null)));
} |
public static Optional<String> urlEncode(String raw) {
try {
return Optional.of(URLEncoder.encode(raw, UTF_8.toString()));
} catch (UnsupportedEncodingException e) {
return Optional.empty();
}
} | @Test
public void urlEncode_whenNothingToEncode_returnsOriginal() {
assertThat(urlEncode("abcdefghijklmnopqrstuvwxyz")).hasValue("abcdefghijklmnopqrstuvwxyz");
assertThat(urlEncode("ABCDEFGHIJKLMNOPQRSTUVWXYZ")).hasValue("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
assertThat(urlEncode("0123456789")).hasValue("0123456789");
assertThat(urlEncode("-_.*")).hasValue("-_.*");
} |
public long queryConsumerOffset(
final String addr,
final QueryConsumerOffsetRequestHeader requestHeader,
final long timeoutMillis
) throws RemotingException, MQBrokerException, InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_CONSUMER_OFFSET, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr),
request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
QueryConsumerOffsetResponseHeader responseHeader =
(QueryConsumerOffsetResponseHeader) response.decodeCommandCustomHeader(QueryConsumerOffsetResponseHeader.class);
return responseHeader.getOffset();
}
case ResponseCode.QUERY_NOT_FOUND: {
throw new OffsetNotFoundException(response.getCode(), response.getRemark(), addr);
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), addr);
} | @Test
public void testQueryConsumerOffset() throws Exception {
doAnswer((Answer<RemotingCommand>) mock -> {
RemotingCommand request = mock.getArgument(1);
final RemotingCommand response =
RemotingCommand.createResponseCommand(QueryConsumerOffsetResponseHeader.class);
final QueryConsumerOffsetResponseHeader responseHeader =
(QueryConsumerOffsetResponseHeader) response.readCustomHeader();
responseHeader.setOffset(100L);
response.makeCustomHeaderToNet();
response.setCode(ResponseCode.SUCCESS);
response.setOpaque(request.getOpaque());
return response;
}).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong());
long t = mqClientAPI.queryConsumerOffset(brokerAddr, new QueryConsumerOffsetRequestHeader(), 1000);
assertThat(t).isEqualTo(100L);
} |
public static SuffixTree create(Text text) {
SuffixTree tree = new SuffixTree(text);
Suffix active = new Suffix(tree.root, 0, -1);
for (int i = 0; i < text.length(); i++) {
tree.addPrefix(active, i);
}
return tree;
} | @Test
public void test() {
String text = this.data + "$";
StringSuffixTree tree = StringSuffixTree.create(text);
assertThat(tree.getNumberOfLeafs()).as("number of leaves").isEqualTo(text.length());
assertThat(tree.getNumberOfInnerNodes()).as("number of inner nodes").isLessThan(text.length() - 1);
assertThat(tree.getNumberOfEdges()).as("number of edges").isEqualTo(tree.getNumberOfInnerNodes() + tree.getNumberOfLeafs());
for (int beginIndex = 0; beginIndex < text.length(); beginIndex++) {
for (int endIndex = beginIndex + 1; endIndex < text.length() + 1; endIndex++) {
String substring = text.substring(beginIndex, endIndex);
assertThat(tree.indexOf(substring)).as("index of " + substring + " in " + text).isEqualTo(text.indexOf(substring));
}
}
} |
@Override
public void onWebSocketText(String message) {
if (log.isDebugEnabled()) {
log.debug("[{}] Received new message from producer {} ", producer.getTopic(),
getRemote().getInetSocketAddress().toString());
}
ProducerMessage sendRequest;
byte[] rawPayload = null;
String requestContext = null;
try {
sendRequest = producerMessageReader.readValue(message);
requestContext = sendRequest.context;
rawPayload = Base64.getDecoder().decode(sendRequest.payload);
} catch (IOException e) {
sendAckResponse(new ProducerAck(FailedToDeserializeFromJSON, e.getMessage(), null, null));
return;
} catch (IllegalArgumentException e) {
String msg = format("Invalid Base64 message-payload error=%s", e.getMessage());
sendAckResponse(new ProducerAck(PayloadEncodingError, msg, null, requestContext));
return;
} catch (NullPointerException e) {
// Null payload
sendAckResponse(new ProducerAck(PayloadEncodingError, e.getMessage(), null, requestContext));
return;
}
final long msgSize = rawPayload.length;
TypedMessageBuilderImpl<byte[]> builder = (TypedMessageBuilderImpl<byte[]>) producer.newMessage();
try {
builder.value(rawPayload);
} catch (SchemaSerializationException e) {
sendAckResponse(new ProducerAck(PayloadEncodingError, e.getMessage(), null, requestContext));
return;
}
if (sendRequest.properties != null) {
builder.properties(sendRequest.properties);
}
if (sendRequest.key != null) {
builder.key(sendRequest.key);
}
if (sendRequest.replicationClusters != null) {
builder.replicationClusters(sendRequest.replicationClusters);
}
if (sendRequest.eventTime != null) {
try {
builder.eventTime(DateFormatter.parse(sendRequest.eventTime));
} catch (DateTimeParseException e) {
sendAckResponse(new ProducerAck(PayloadEncodingError, e.getMessage(), null, requestContext));
return;
}
}
if (sendRequest.deliverAt > 0) {
builder.deliverAt(sendRequest.deliverAt);
}
if (sendRequest.deliverAfterMs > 0) {
builder.deliverAfter(sendRequest.deliverAfterMs, TimeUnit.MILLISECONDS);
}
// If client-side encryption is enabled, the attributes "encryptParam", "uncompressedMessageSize",
// "uncompressedMessageSize" and "batchSize" of message metadata must be set according to the parameters
// when the client sends messages.
if (clientSideEncrypt) {
try {
if (!StringUtils.isBlank(sendRequest.encryptionParam)) {
builder.getMetadataBuilder().setEncryptionParam(Base64.getDecoder()
.decode(sendRequest.encryptionParam));
}
} catch (Exception e){
String msg = format("Invalid Base64 encryptionParam error=%s", e.getMessage());
sendAckResponse(new ProducerAck(PayloadEncodingError, msg, null, requestContext));
return;
}
if (sendRequest.compressionType != null && sendRequest.uncompressedMessageSize != null) {
// Set compression information.
builder.getMetadataBuilder().setCompression(sendRequest.compressionType);
builder.getMetadataBuilder().setUncompressedSize(sendRequest.uncompressedMessageSize);
} else if ((org.apache.pulsar.common.api.proto.CompressionType.NONE.equals(sendRequest.compressionType)
|| sendRequest.compressionType == null)
&& sendRequest.uncompressedMessageSize == null) {
// Nothing to do, the method send async will set these two attributes.
} else {
// Only one param is set.
sendAckResponse(new ProducerAck(PayloadEncodingError, "the params compressionType and"
+ " uncompressedMessageSize should both empty or both non-empty",
null, requestContext));
return;
}
}
final long now = System.nanoTime();
builder.sendAsync().thenAccept(msgId -> {
if (log.isDebugEnabled()) {
log.debug("[{}] Success fully write the message to broker with returned message ID {} from producer {}",
producer.getTopic(), msgId, getRemote().getInetSocketAddress().toString());
}
updateSentMsgStats(msgSize, TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - now));
if (isConnected()) {
String messageId = Base64.getEncoder().encodeToString(msgId.toByteArray());
sendAckResponse(new ProducerAck(messageId, sendRequest.context));
}
}).exceptionally(exception -> {
log.warn("[{}] Error occurred while producer handler was sending msg from {}", producer.getTopic(),
getRemote().getInetSocketAddress().toString(), exception);
numMsgsFailed.increment();
sendAckResponse(
new ProducerAck(UnknownError, exception.getMessage(), null, sendRequest.context));
return null;
});
} | @Test
public void testProduceMessageAttributes() throws IOException {
String producerV2 = "/ws/v2/producer/persistent/my-property/my-ns/my-topic";
HttpServletRequest httpServletRequest = mock(HttpServletRequest.class);
PulsarClient pulsarClient = mock(PulsarClient.class);
ProducerBuilder producerBuilder = mock(ProducerBuilder.class);
Producer producer = mock(Producer.class);
TypedMessageBuilder messageBuilder = mock(TypedMessageBuilderImpl.class);
ProducerMessage produceRequest = new ProducerMessage();
produceRequest.setDeliverAfterMs(11111);
produceRequest.setDeliverAt(22222);
produceRequest.setContext("context");
produceRequest.setPayload(Base64.getEncoder().encodeToString("my payload".getBytes()));
// the params are all different with the default value
Map<String, String[]> queryParams = new HashMap<>();
httpServletRequest = mock(HttpServletRequest.class);
when(httpServletRequest.getRequestURI()).thenReturn(producerV2);
when(httpServletRequest.getParameterMap()).thenReturn(queryParams);
WebSocketService service = mock(WebSocketService.class);
when(service.isAuthenticationEnabled()).thenReturn(false);
when(service.isAuthorizationEnabled()).thenReturn(false);
when(service.getPulsarClient()).thenReturn(pulsarClient);
when(pulsarClient.newProducer()).thenReturn(producerBuilder);
when(producerBuilder.enableBatching(anyBoolean())).thenReturn(producerBuilder);
when(producerBuilder.messageRoutingMode(any())).thenReturn(producerBuilder);
when(producerBuilder.blockIfQueueFull(anyBoolean())).thenReturn(producerBuilder);
when(producerBuilder.topic(anyString())).thenReturn(producerBuilder);
when(producerBuilder.create()).thenReturn(producer);
when(producer.newMessage()).thenReturn(messageBuilder);
when(messageBuilder.sendAsync()).thenReturn( CompletableFuture.completedFuture(new MessageIdImpl(1, 2, 3)));
ServletUpgradeResponse response = mock(ServletUpgradeResponse.class);
ProducerHandler producerHandler = new ProducerHandler(service, httpServletRequest, response);
producerHandler.onWebSocketText(ObjectMapperFactory.getMapper().writer().writeValueAsString(produceRequest));
verify(messageBuilder, times(1)).deliverAfter(11111, TimeUnit.MILLISECONDS);
verify(messageBuilder, times(1)).deliverAt(22222);
} |
@Override
public ScalarOperator visitLikePredicateOperator(LikePredicateOperator predicate, Void context) {
return shuttleIfUpdate(predicate);
} | @Test
void visitLikePredicateOperator() {
LikePredicateOperator operator = new LikePredicateOperator(
new ColumnRefOperator(1, INT, "id", true),
ConstantOperator.TRUE);
{
ScalarOperator newOperator = shuttle.visitLikePredicateOperator(operator, null);
assertEquals(operator, newOperator);
}
{
ScalarOperator newOperator = shuttle2.visitLikePredicateOperator(operator, null);
assertEquals(operator, newOperator);
}
} |
@SuppressWarnings("unchecked")
@Override
public <S extends StateStore> S getStateStore(final String name) {
final StateStore store = stateManager.getGlobalStore(name);
return (S) getReadWriteStore(store);
} | @Test
public void shouldNotAllowCloseForSessionStore() {
when(stateManager.getGlobalStore(GLOBAL_SESSION_STORE_NAME)).thenReturn(mock(SessionStore.class));
final StateStore store = globalContext.getStateStore(GLOBAL_SESSION_STORE_NAME);
try {
store.close();
fail("Should have thrown UnsupportedOperationException.");
} catch (final UnsupportedOperationException expected) { }
} |
@VisibleForTesting
StreamingEngineConnectionState getCurrentConnections() {
return connections.get();
} | @Test
public void testOnNewWorkerMetadata_correctlyRemovesStaleWindmillServers()
throws InterruptedException {
int metadataCount = 2;
TestGetWorkBudgetDistributor getWorkBudgetDistributor =
spy(new TestGetWorkBudgetDistributor(metadataCount));
fanOutStreamingEngineWorkProvider =
newStreamingEngineClient(
GetWorkBudget.builder().setItems(1).setBytes(1).build(),
getWorkBudgetDistributor,
noOpProcessWorkItemFn());
String workerToken = "workerToken1";
String workerToken2 = "workerToken2";
String workerToken3 = "workerToken3";
WorkerMetadataResponse firstWorkerMetadata =
WorkerMetadataResponse.newBuilder()
.setMetadataVersion(1)
.addWorkEndpoints(
WorkerMetadataResponse.Endpoint.newBuilder()
.setBackendWorkerToken(workerToken)
.build())
.addWorkEndpoints(
WorkerMetadataResponse.Endpoint.newBuilder()
.setBackendWorkerToken(workerToken2)
.build())
.putAllGlobalDataEndpoints(DEFAULT)
.build();
WorkerMetadataResponse secondWorkerMetadata =
WorkerMetadataResponse.newBuilder()
.setMetadataVersion(2)
.addWorkEndpoints(
WorkerMetadataResponse.Endpoint.newBuilder()
.setBackendWorkerToken(workerToken3)
.build())
.putAllGlobalDataEndpoints(DEFAULT)
.build();
getWorkerMetadataReady.await();
fakeGetWorkerMetadataStub.injectWorkerMetadata(firstWorkerMetadata);
fakeGetWorkerMetadataStub.injectWorkerMetadata(secondWorkerMetadata);
waitForWorkerMetadataToBeConsumed(getWorkBudgetDistributor);
StreamingEngineConnectionState currentConnections =
fanOutStreamingEngineWorkProvider.getCurrentConnections();
assertEquals(1, currentConnections.windmillConnections().size());
assertEquals(1, currentConnections.windmillStreams().size());
Set<String> workerTokens =
fanOutStreamingEngineWorkProvider.getCurrentConnections().windmillConnections().values()
.stream()
.map(WindmillConnection::backendWorkerToken)
.collect(Collectors.toSet());
assertFalse(workerTokens.contains(workerToken));
assertFalse(workerTokens.contains(workerToken2));
} |
public Flowable<String> getKeys() {
return getKeysByPattern(null);
} | @Test
public void testDeleteByPattern() {
RBucketRx<String> bucket = redisson.getBucket("test1");
sync(bucket.set("someValue"));
RMapRx<String, String> map = redisson.getMap("test2");
sync(map.fastPut("1", "2"));
Assertions.assertEquals(2, sync(redisson.getKeys().deleteByPattern("test?")).intValue());
} |
Future<String> findZookeeperLeader(Reconciliation reconciliation, Set<String> pods, TlsPemIdentity coTlsPemIdentity) {
if (pods.size() == 0) {
return Future.succeededFuture(UNKNOWN_LEADER);
} else if (pods.size() == 1) {
return Future.succeededFuture(pods.stream().findFirst().get());
}
try {
NetClientOptions netClientOptions = clientOptions(coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity());
return zookeeperLeaderWithBackoff(reconciliation, pods, netClientOptions);
} catch (Throwable e) {
return Future.failedFuture(e);
}
} | @Test
public void testLeaderFoundFirstAttempt(VertxTestContext context) throws InterruptedException {
int leader = 1;
String leaderPod = "my-cluster-zookeeper-1";
int[] ports = startMockZks(context, 2, (id, attempt) -> id == leader);
ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports);
Checkpoint a = context.checkpoint();
finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, treeSet(createPodWithId(0), createPodWithId(1)), DUMMY_IDENTITY)
.onComplete(context.succeeding(l -> context.verify(() -> {
assertThat(l, is(leaderPod));
for (FakeZk zk : zks) {
assertThat("Unexpected number of attempts for node " + zk.id, zk.attempts.get(), is(1));
}
a.flag();
})));
} |
@Override
public void shutdown(boolean terminate) {
for (PartitionContainer container : partitionContainers) {
if (container != null) {
container.shutdown();
}
}
if (antiEntropyFuture != null) {
antiEntropyFuture.cancel(true);
}
this.iterationService.shutdown();
} | @Test
public void testShutdown_withoutInit() {
ReplicatedMapService service = new ReplicatedMapService(nodeEngine);
service.shutdown(true);
} |
@ConstantFunction.List(list = {
@ConstantFunction(name = "substring", argTypes = {VARCHAR, INT}, returnType = VARCHAR),
@ConstantFunction(name = "substring", argTypes = {VARCHAR, INT, INT}, returnType = VARCHAR),
@ConstantFunction(name = "substr", argTypes = {VARCHAR, INT}, returnType = VARCHAR),
@ConstantFunction(name = "substr", argTypes = {VARCHAR, INT, INT}, returnType = VARCHAR)
})
public static ConstantOperator substring(ConstantOperator value, ConstantOperator... index) {
Preconditions.checkArgument(index.length == 1 || index.length == 2);
String string = value.getVarchar();
/// If index out of bounds, the substring method will throw exception, we need avoid it,
/// otherwise, the Constant Evaluation will fail.
/// Besides, the implementation of `substring` function in starrocks includes beginIndex and length,
/// and the index is start from 1 and can negative, so we need carefully handle it.
int beginIndex = index[0].getInt() >= 0 ? index[0].getInt() - 1 : string.length() + index[0].getInt();
int endIndex =
(index.length == 2) ? Math.min(beginIndex + index[1].getInt(), string.length()) : string.length();
if (beginIndex < 0 || beginIndex > endIndex) {
return ConstantOperator.createVarchar("");
}
return ConstantOperator.createVarchar(string.substring(beginIndex, endIndex));
} | @Test
public void testSubString() {
assertEquals("ab", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(1), ConstantOperator.createInt(2)).getVarchar());
assertEquals("abcd", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(1)).getVarchar());
assertEquals("cd", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(-2)).getVarchar());
assertEquals("c", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(-2), ConstantOperator.createInt(1)).getVarchar());
assertEquals("abcd", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(1), ConstantOperator.createInt(4)).getVarchar());
assertEquals("abcd", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(1), ConstantOperator.createInt(10)).getVarchar());
assertEquals("cd", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(3), ConstantOperator.createInt(4)).getVarchar());
assertEquals("", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(0), ConstantOperator.createInt(2)).getVarchar());
assertEquals("", ScalarOperatorFunctions.substring(ConstantOperator.createVarchar("abcd"),
ConstantOperator.createInt(5), ConstantOperator.createInt(2)).getVarchar());
assertEquals("starrocks", ScalarOperatorFunctions.substring(
new ConstantOperator("starrockscluster", Type.VARCHAR),
new ConstantOperator(1, Type.INT),
new ConstantOperator(9, Type.INT)).getVarchar());
assertEquals("rocks", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(-5, Type.INT),
new ConstantOperator(5, Type.INT)).getVarchar());
assertEquals("s", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(-1, Type.INT),
new ConstantOperator(8, Type.INT)).getVarchar());
assertEquals("", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(-100, Type.INT),
new ConstantOperator(5, Type.INT)).getVarchar());
assertEquals("", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(0, Type.INT),
new ConstantOperator(5, Type.INT)).getVarchar());
assertEquals("", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(-1, Type.INT),
new ConstantOperator(0, Type.INT)).getVarchar());
assertEquals("apple", ScalarOperatorFunctions.substring(
new ConstantOperator("apple", Type.VARCHAR),
new ConstantOperator(-5, Type.INT),
new ConstantOperator(5, Type.INT)).getVarchar());
assertEquals("", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(0, Type.INT)).getVarchar());
assertEquals("starrocks", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(1, Type.INT)).getVarchar());
assertEquals("s", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(9, Type.INT)).getVarchar());
assertEquals("", ScalarOperatorFunctions.substring(
new ConstantOperator("starrocks", Type.VARCHAR),
new ConstantOperator(10, Type.INT)).getVarchar());
} |
@Override
public void isEqualTo(@Nullable Object expected) {
@SuppressWarnings("UndefinedEquals") // method contract requires testing iterables for equality
boolean equal = Objects.equal(actual, expected);
if (equal) {
return;
}
// Fail but with a more descriptive message:
if (actual instanceof List && expected instanceof List) {
containsExactlyElementsIn((List<?>) expected).inOrder();
} else if ((actual instanceof Set && expected instanceof Set)
|| (actual instanceof Multiset && expected instanceof Multiset)) {
containsExactlyElementsIn((Collection<?>) expected);
} else {
/*
* TODO(b/18430105): Consider a special message if comparing incompatible collection types
* (similar to what MultimapSubject has).
*/
super.isEqualTo(expected);
}
} | @Test
@SuppressWarnings("UndefinedEquals") // Iterable equality isn't defined, but null equality is
public void nullEqualToNull() {
assertThat((Iterable<?>) null).isEqualTo(null);
} |
List<Record> retrieveRecords(String kinesisStream, KinesisClient kinesisClient) {
LOG.debug("About to retrieve logs records from Kinesis.");
// Create ListShard request and response and designate the Kinesis stream
final ListShardsRequest listShardsRequest = ListShardsRequest.builder().streamName(kinesisStream).build();
final ListShardsResponse listShardsResponse = kinesisClient.listShards(listShardsRequest);
final List<Record> recordsList = new ArrayList<>();
// Iterate through the shards that exist
for (Shard shard : listShardsResponse.shards()) {
final String shardId = shard.shardId();
final GetShardIteratorRequest getShardIteratorRequest =
GetShardIteratorRequest.builder()
.shardId(shardId)
.streamName(kinesisStream)
.shardIteratorType(ShardIteratorType.TRIM_HORIZON)
.build();
String shardIterator = kinesisClient.getShardIterator(getShardIteratorRequest).shardIterator();
boolean stayOnCurrentShard = true;
LOG.debug("Retrieved shard id: [{}] with shard iterator: [{}]", shardId, shardIterator);
while (stayOnCurrentShard) {
LOG.debug("Getting more records");
final GetRecordsRequest getRecordsRequest = GetRecordsRequest.builder().shardIterator(shardIterator).build();
final GetRecordsResponse getRecordsResponse = kinesisClient.getRecords(getRecordsRequest);
shardIterator = getRecordsResponse.nextShardIterator();
for (Record record : getRecordsResponse.records()) {
if (isControlMessage(record)) {
continue;
}
recordsList.add(record);
if (recordsList.size() == RECORDS_SAMPLE_SIZE) {
LOG.debug("Returning the list of records now that sample size [{}] has been met.", RECORDS_SAMPLE_SIZE);
return recordsList;
}
}
if (getRecordsResponse.millisBehindLatest() == 0) {
LOG.debug("Found the end of the shard. No more records returned from the shard.");
stayOnCurrentShard = false;
}
}
}
LOG.debug("Returning the list with [{}] records.", recordsList.size());
return recordsList;
} | @Test
public void testRetrieveRecords() throws IOException {
Shard shard = Shard.builder().shardId("shardId-1234").build();
when(kinesisClient.listShards(isA(ListShardsRequest.class)))
.thenReturn(ListShardsResponse.builder().shards(shard).build());
when(kinesisClient.getShardIterator(isA(GetShardIteratorRequest.class)))
.thenReturn(GetShardIteratorResponse.builder().shardIterator("shardIterator").build());
final Record record = Record.builder()
.approximateArrivalTimestamp(Instant.now())
.data(SdkBytes.fromByteArray(AWSTestingUtils.cloudWatchRawPayload()))
.build();
GetRecordsResponse recordsResponse = GetRecordsResponse.builder().records(record).millisBehindLatest(10000L).build();
when(kinesisClient.getRecords(isA(GetRecordsRequest.class)))
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse)
.thenReturn(recordsResponse);
List<Record> fakeRecordsList = kinesisService.retrieveRecords("kinesisStream", kinesisClient);
assertEquals(fakeRecordsList.size(), 10);
} |
public static void setOutput(Job job, OutputJobInfo outputJobInfo) throws IOException {
setOutput(job.getConfiguration(), job.getCredentials(), outputJobInfo);
} | @Test
public void testSetOutput() throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "test outputformat");
Map<String, String> partitionValues = new HashMap<String, String>();
partitionValues.put("colname", "p1");
//null server url means local mode
OutputJobInfo info = OutputJobInfo.create(dbName, tblName, partitionValues);
HCatOutputFormat.setOutput(job, info);
OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(job.getConfiguration());
assertNotNull(jobInfo.getTableInfo());
assertEquals(1, jobInfo.getPartitionValues().size());
assertEquals("p1", jobInfo.getPartitionValues().get("colname"));
assertEquals(1, jobInfo.getTableInfo().getDataColumns().getFields().size());
assertEquals("data_column", jobInfo.getTableInfo().getDataColumns().getFields().get(0).getName());
publishTest(job);
} |
static Map<String, ValueExtractor> instantiateExtractors(List<AttributeConfig> attributeConfigs,
ClassLoader classLoader) {
Map<String, ValueExtractor> extractors = createHashMap(attributeConfigs.size());
for (AttributeConfig config : attributeConfigs) {
if (extractors.containsKey(config.getName())) {
throw new IllegalArgumentException("Could not add " + config
+ ". Extractor for this attribute name already added.");
}
extractors.put(config.getName(), instantiateExtractor(config, classLoader));
}
return extractors;
} | @Test
public void instantiate_extractors_duplicateExtractor() {
// GIVEN
AttributeConfig iqExtractor
= new AttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
AttributeConfig iqExtractorDuplicate
= new AttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$IqExtractor");
// WHEN
assertThatThrownBy(() -> instantiateExtractors(asList(iqExtractor, iqExtractorDuplicate)))
.isInstanceOf(IllegalArgumentException.class);
} |
@Asn1Property(tagNo = 0x30, converter = DigestsConverter.class)
public Map<Integer, byte[]> getDigests() {
return digests;
} | @Test
public void readRvig2011Cms() throws Exception {
final LdsSecurityObject ldsSecurityObject = mapper.read(
readFromCms("rvig2011"), LdsSecurityObject.class);
assertEquals(ImmutableSet.of(1, 2, 3, 14, 15), ldsSecurityObject.getDigests().keySet());
} |
@Override
public String displayName() {
MaterialConfig materialConfig = configRepo != null ?
configRepo.getRepo() : null;
String materialName = materialConfig != null ?
materialConfig.getDisplayName() : "NULL material";
return String.format("%s at revision %s", materialName, revision);
} | @Test
public void shouldShowDisplayName() {
RepoConfigOrigin repoConfigOrigin = new RepoConfigOrigin(ConfigRepoConfig.createConfigRepoConfig(svn("http://mysvn", false), "myplugin", "id"), "123");
assertThat(repoConfigOrigin.displayName(), is("http://mysvn at revision 123"));
} |
public static boolean isBlankChar(char c) {
return isBlankChar((int) c);
} | @Test
public void issueI5UGSQTest(){
char c = '\u3164';
assertTrue(CharUtil.isBlankChar(c));
c = '\u2800';
assertTrue(CharUtil.isBlankChar(c));
} |
@Override
@Nullable
public Object convert(@Nullable String value) {
if (isNullOrEmpty(value)) {
return null;
}
LOG.debug("Trying to parse date <{}> with pattern <{}>, locale <{}>, and timezone <{}>.", value, dateFormat, locale, timeZone);
final DateTimeFormatter formatter;
if (containsTimeZone) {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale);
} else {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale)
.withZone(timeZone);
}
return DateTime.parse(value, formatter);
} | @Test
public void convertUsesEtcUTCIfTimeZoneSettingIsBlank() throws Exception {
final Converter c = new DateConverter(config("YYYY-MM-dd HH:mm:ss", " ", null));
final DateTime dateTime = (DateTime) c.convert("2014-03-12 10:00:00");
assertThat(dateTime).isEqualTo("2014-03-12T10:00:00.000Z");
} |
public IMap<String, byte[]> getJobResources(long jobId) {
return instance.getMap(jobResourcesMapName(jobId));
} | @Test
public void when_jobIsCancelled_then_resourcesImmediatelyDeleted() {
jobConfig.addClass(DummyClass.class);
var job = instance.getJet().newJob(newStreamPipeline(), jobConfig);
assertThat((Map<?, ?>) jobRepository.getJobResources(job.getId())).isNotEmpty();
cancelAndJoin(job);
assertThat((Map<?, ?>) jobRepository.getJobResources(job.getId())).isEmpty();
} |
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) {
return invoke(n, BigDecimal.ZERO);
} | @Test
void invokeOutRangeScale() {
FunctionTestUtil.assertResultError(roundHalfDownFunction.invoke(BigDecimal.valueOf(1.5),
BigDecimal.valueOf(6177)),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(roundHalfDownFunction.invoke(BigDecimal.valueOf(1.5),
BigDecimal.valueOf(-6122)),
InvalidParametersEvent.class);
} |
protected boolean hasOnlyFields(String... allowedFields) {
return hasOnlyFields(object, allowedFields);
} | @Test
public void hasOnlyFields() {
assertTrue("has unexpected fields",
cfg.hasOnlyFields(TEXT, LONG, DOUBLE, BOOLEAN, MAC, BAD_MAC,
IP, BAD_IP, PREFIX, BAD_PREFIX,
CONNECT_POINT, BAD_CONNECT_POINT, TP_PORT, BAD_TP_PORT));
assertTrue("did not detect unexpected fields",
expectInvalidField(() -> cfg.hasOnlyFields(TEXT, LONG, DOUBLE, MAC)));
} |
@Deprecated
public String getJobMetrics(Long jobId) {
return jobClient.getJobMetrics(jobId);
} | @Test
public void testGetJobMetrics() {
Common.setDeployMode(DeployMode.CLIENT);
String filePath = TestUtils.getResource("/client_test.conf");
JobConfig jobConfig = new JobConfig();
jobConfig.setName("testGetJobMetrics");
SeaTunnelClient seaTunnelClient = createSeaTunnelClient();
JobClient jobClient = seaTunnelClient.getJobClient();
try {
ClientJobExecutionEnvironment jobExecutionEnv =
seaTunnelClient.createExecutionContext(filePath, jobConfig, SEATUNNEL_CONFIG);
final ClientJobProxy clientJobProxy = jobExecutionEnv.execute();
CompletableFuture<JobStatus> objectCompletableFuture =
CompletableFuture.supplyAsync(
() -> {
return clientJobProxy.waitForJobComplete();
});
long jobId = clientJobProxy.getJobId();
await().atMost(30000, TimeUnit.MILLISECONDS)
.untilAsserted(
() ->
Assertions.assertTrue(
jobClient.getJobDetailStatus(jobId).contains("FINISHED")
&& jobClient
.listJobStatus(true)
.contains("FINISHED")));
String jobMetrics = jobClient.getJobMetrics(jobId);
log.info(jobMetrics);
Assertions.assertTrue(jobMetrics.contains(SOURCE_RECEIVED_COUNT));
Assertions.assertTrue(jobMetrics.contains(SOURCE_RECEIVED_QPS));
Assertions.assertTrue(jobMetrics.contains(SINK_WRITE_COUNT));
Assertions.assertTrue(jobMetrics.contains(SINK_WRITE_QPS));
} catch (ExecutionException | InterruptedException e) {
throw new RuntimeException(e);
} finally {
seaTunnelClient.close();
}
} |
public void removeOffset(MessageQueue mq) {
if (mq != null) {
this.offsetTable.remove(mq);
log.info("remove unnecessary messageQueue offset. group={}, mq={}, offsetTableSize={}", this.groupName, mq,
offsetTable.size());
}
} | @Test
public void testRemoveOffset() throws Exception {
OffsetStore offsetStore = new RemoteBrokerOffsetStore(mQClientFactory, group);
final MessageQueue messageQueue = new MessageQueue(topic, brokerName, 4);
offsetStore.updateOffset(messageQueue, 1024, false);
assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(1024);
offsetStore.removeOffset(messageQueue);
assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(-1);
} |
void setReplicas(PartitionReplica[] newReplicas) {
PartitionReplica[] oldReplicas = replicas;
replicas = newReplicas;
onReplicasChange(newReplicas, oldReplicas);
} | @Test
public void testGetReplicaIndex() {
replicaOwners[0] = localReplica;
replicaOwners[1] = new PartitionReplica(newAddress(5001), UuidUtil.newUnsecureUUID());
partition.setReplicas(replicaOwners);
assertEquals(0, partition.getReplicaIndex(replicaOwners[0]));
assertEquals(1, partition.getReplicaIndex(replicaOwners[1]));
assertEquals(-1, partition.getReplicaIndex(new PartitionReplica(newAddress(6000), UuidUtil.newUnsecureUUID())));
} |
protected void setCharsetWithContentType(Exchange camelExchange) {
// setup the charset from content-type header
String contentTypeHeader = ExchangeHelper.getContentType(camelExchange);
if (contentTypeHeader != null) {
String charset = HttpHeaderHelper.findCharset(contentTypeHeader);
String normalizedEncoding = HttpHeaderHelper.mapCharset(charset, StandardCharsets.UTF_8.name());
if (normalizedEncoding != null) {
camelExchange.setProperty(ExchangePropertyKey.CHARSET_NAME, normalizedEncoding);
}
}
} | @Test
public void testSetCharsetWithContentType() {
DefaultCxfBinding cxfBinding = new DefaultCxfBinding();
cxfBinding.setHeaderFilterStrategy(new DefaultHeaderFilterStrategy());
Exchange exchange = new DefaultExchange(context);
exchange.getIn().setHeader(Exchange.CONTENT_TYPE, "text/xml;charset=ISO-8859-1");
cxfBinding.setCharsetWithContentType(exchange);
String charset = ExchangeHelper.getCharsetName(exchange);
assertEquals("ISO-8859-1", charset, "Get a wrong charset");
exchange.getIn().setHeader(Exchange.CONTENT_TYPE, "text/xml");
cxfBinding.setCharsetWithContentType(exchange);
charset = ExchangeHelper.getCharsetName(exchange);
assertEquals("UTF-8", charset, "Get a worng charset name");
} |
@VisibleForTesting
Integer convertSmsTemplateAuditStatus(int templateStatus) {
switch (templateStatus) {
case 1: return SmsTemplateAuditStatusEnum.CHECKING.getStatus();
case 0: return SmsTemplateAuditStatusEnum.SUCCESS.getStatus();
case -1: return SmsTemplateAuditStatusEnum.FAIL.getStatus();
default: throw new IllegalArgumentException(String.format("未知审核状态(%d)", templateStatus));
}
} | @Test
public void testConvertSmsTemplateAuditStatus() {
assertEquals(SmsTemplateAuditStatusEnum.SUCCESS.getStatus(),
smsClient.convertSmsTemplateAuditStatus(0));
assertEquals(SmsTemplateAuditStatusEnum.CHECKING.getStatus(),
smsClient.convertSmsTemplateAuditStatus(1));
assertEquals(SmsTemplateAuditStatusEnum.FAIL.getStatus(),
smsClient.convertSmsTemplateAuditStatus(-1));
assertThrows(IllegalArgumentException.class, () -> smsClient.convertSmsTemplateAuditStatus(3),
"未知审核状态(3)");
} |
@Override
public double d(String a, String b) {
if (weight != null)
return weightedEdit(a, b);
else if (FKP == null || a.length() == 1 || b.length() == 1)
return damerau ? damerau(a, b) : levenshtein(a, b);
else
return br(a, b);
} | @Test
public void testUnitCost() {
System.out.println("unit cost");
String x = "Levenshtein";
String y = "Laeveshxtin";
String z = "Laeveshetin";
EditDistance edit = new EditDistance(20, false);
assertEquals(0, edit.d(x, x), 1E-7);
assertEquals(4, edit.d(x, y), 1E-7);
assertEquals(4, edit.d(x, z), 1E-7);
assertEquals(2, edit.d("act", "cat"), 1E-7);
assertEquals(5, edit.d("adcroft", "addessi"), 1E-7);
assertEquals(3, edit.d("baird", "baisden"), 1E-7);
assertEquals(2, edit.d("boggan", "boggs"), 1E-7);
assertEquals(5, edit.d("clayton", "cleary"), 1E-7);
assertEquals(4, edit.d("dybas", "dyckman"), 1E-7);
assertEquals(4, edit.d("emineth", "emmert"), 1E-7);
assertEquals(4, edit.d("galante", "galicki"), 1E-7);
assertEquals(1, edit.d("hardin", "harding"), 1E-7);
assertEquals(2, edit.d("kehoe", "kehr"), 1E-7);
assertEquals(5, edit.d("lowry", "lubarsky"), 1E-7);
assertEquals(3, edit.d("magallan", "magana"), 1E-7);
assertEquals(1, edit.d("mayo", "mays"), 1E-7);
assertEquals(4, edit.d("moeny", "moffett"), 1E-7);
assertEquals(2, edit.d("pare", "parent"), 1E-7);
assertEquals(2, edit.d("ramey", "ramfrey"), 1E-7);
edit = new EditDistance(20, true);
assertEquals(0, edit.d(x, x), 1E-7);
assertEquals(4, edit.d(x, y), 1E-7);
assertEquals(3, edit.d(x, z), 1E-7);
assertEquals(1, edit.d("act", "cat"), 1E-7);
assertEquals(5, edit.d("adcroft", "addessi"), 1E-7);
assertEquals(3, edit.d("baird", "baisden"), 1E-7);
assertEquals(2, edit.d("boggan", "boggs"), 1E-7);
assertEquals(6, edit.d("lcayton", "cleary"), 1E-7);
assertEquals(5, edit.d("ydbas", "dyckman"), 1E-7);
assertEquals(4, edit.d("emineth", "emmert"), 1E-7);
assertEquals(4, edit.d("galante", "galicki"), 1E-7);
assertEquals(1, edit.d("hardin", "harding"), 1E-7);
assertEquals(2, edit.d("kehoe", "kehr"), 1E-7);
assertEquals(5, edit.d("lowry", "lubarsky"), 1E-7);
assertEquals(3, edit.d("magallan", "magana"), 1E-7);
assertEquals(1, edit.d("mayo", "mays"), 1E-7);
assertEquals(4, edit.d("moeny", "moffett"), 1E-7);
assertEquals(2, edit.d("pare", "parent"), 1E-7);
assertEquals(2, edit.d("ramey", "ramfrey"), 1E-7);
} |
public final void submit(Queue<E> queue, E item) throws ConcurrentConveyorException {
for (long idleCount = 0; !queue.offer(item); idleCount++) {
SUBMIT_IDLER.idle(idleCount);
checkDrainerGone();
unparkDrainer();
checkInterrupted();
}
for (long idleCount = 0; backpressure; idleCount++) {
SUBMIT_IDLER.idle(idleCount);
checkInterrupted();
}
} | @Test
public void when_submitToGivenQueue_then_poll() {
// when
conveyor.submit(defaultQ, item1);
// then
assertSame(item1, defaultQ.poll());
} |
@Override
public List<PostDO> getPostList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return Collections.emptyList();
}
return postMapper.selectBatchIds(ids);
} | @Test
public void testGetPostList_idsAndStatus() {
// mock 数据
PostDO postDO01 = randomPojo(PostDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
postMapper.insert(postDO01);
// 测试 status 不匹配
PostDO postDO02 = randomPojo(PostDO.class, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()));
postMapper.insert(postDO02);
// 准备参数
List<Long> ids = Arrays.asList(postDO01.getId(), postDO02.getId());
// 调用
List<PostDO> list = postService.getPostList(ids, singletonList(CommonStatusEnum.ENABLE.getStatus()));
// 断言
assertEquals(1, list.size());
assertPojoEquals(postDO01, list.get(0));
} |
@Override
public String pluginNamed() {
return PluginEnum.CASDOOR.getName();
} | @Test
public void testPluginNamed() {
final String result = casdoorPluginDateHandlerTest.pluginNamed();
assertEquals(PluginEnum.CASDOOR.getName(), result);
} |
@Override
public String toString() {
return toString(true);
} | @Test
public void testToStringNoShowQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
ContentSummary contentSummary = new ContentSummary.Builder().length(length).
fileCount(fileCount).directoryCount(directoryCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(false));
} |
@Override
public ValidationResult validate(Object value) {
if (value == null || value instanceof String) {
return new ValidationResult.ValidationPassed();
} else {
return new ValidationResult.ValidationFailed("Value \"" + value + "\" is not a valid string!");
}
} | @Test
public void validateNonString() {
assertThat(validator.validate(new Object())).isInstanceOf(ValidationResult.ValidationFailed.class);
} |
public List<Stream> match(Message message) {
final Set<Stream> result = Sets.newHashSet();
final Set<String> blackList = Sets.newHashSet();
for (final Rule rule : rulesList) {
if (blackList.contains(rule.getStreamId())) {
continue;
}
final StreamRule streamRule = rule.getStreamRule();
final StreamRuleType streamRuleType = streamRule.getType();
final Stream.MatchingType matchingType = rule.getMatchingType();
if (!ruleTypesNotNeedingFieldPresence.contains(streamRuleType)
&& !message.hasField(streamRule.getField())) {
if (matchingType == Stream.MatchingType.AND) {
result.remove(rule.getStream());
// blacklist stream because it can't match anymore
blackList.add(rule.getStreamId());
}
continue;
}
final Stream stream;
if (streamRuleType != StreamRuleType.REGEX) {
stream = rule.match(message);
} else {
stream = rule.matchWithTimeOut(message, streamProcessingTimeout, TimeUnit.MILLISECONDS);
}
if (stream == null) {
if (matchingType == Stream.MatchingType.AND) {
result.remove(rule.getStream());
// blacklist stream because it can't match anymore
blackList.add(rule.getStreamId());
}
} else {
result.add(stream);
if (matchingType == Stream.MatchingType.OR) {
// blacklist stream because it is already matched
blackList.add(rule.getStreamId());
}
}
}
final Stream defaultStream = defaultStreamProvider.get();
boolean alreadyRemovedDefaultStream = false;
for (Stream stream : result) {
if (stream.getRemoveMatchesFromDefaultStream()) {
if (alreadyRemovedDefaultStream || message.removeStream(defaultStream)) {
alreadyRemovedDefaultStream = true;
if (LOG.isTraceEnabled()) {
LOG.trace("Successfully removed default stream <{}> from message <{}>", defaultStream.getId(), message.getId());
}
} else {
// A previously executed message processor (or Illuminate) has likely already removed the
// default stream from the message. Now, the message has matched a stream in the Graylog
// MessageFilterChain, and the matching stream is also set to remove the default stream.
// This is usually from user-defined stream rules, and is generally not a problem.
cannotRemoveDefaultMeter.inc();
if (LOG.isTraceEnabled()) {
LOG.trace("Couldn't remove default stream <{}> from message <{}>", defaultStream.getId(), message.getId());
}
}
}
}
return ImmutableList.copyOf(result);
} | @Test
public void testOrMatchingShouldNotMatch() {
final String dummyField = "dummyField";
final String dummyValue = "dummyValue";
final Stream stream = mock(Stream.class);
when(stream.getMatchingType()).thenReturn(Stream.MatchingType.OR);
final StreamRule streamRule1 = getStreamRuleMock("StreamRule1Id", StreamRuleType.EXACT, dummyField, "not" + dummyValue);
final StreamRule streamRule2 = getStreamRuleMock("StreamRule2Id", StreamRuleType.EXACT, dummyField, "alsoNot" + dummyValue);
when(stream.getStreamRules()).thenReturn(Lists.newArrayList(streamRule1, streamRule2));
final Message message = mock(Message.class);
when(message.getField(eq(dummyField))).thenReturn(dummyValue);
final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream));
final List<Stream> result = engine.match(message);
assertThat(result).isEmpty();
} |
public static <T> CompletableFuture<T> firstOf(List<CompletableFuture<T>> futures) {
class Combiner {
final Object monitor = new Object();
final CompletableFuture<T> combined = new CompletableFuture<>();
final int futuresCount;
Throwable error = null;
int exceptionCount = 0;
Combiner(int futuresCount) { this.futuresCount = futuresCount; }
void onCompletion(T value, Throwable error) {
if (combined.isDone()) return;
T valueToComplete = null;
Throwable exceptionToComplete = null;
synchronized (monitor) {
if (value != null) {
valueToComplete = value;
} else {
if (this.error == null) {
this.error = error;
} else {
this.error.addSuppressed(error);
}
if (++exceptionCount == futuresCount) {
exceptionToComplete = this.error;
}
}
}
if (valueToComplete != null) {
combined.complete(value);
} else if (exceptionToComplete != null) {
combined.completeExceptionally(exceptionToComplete);
}
}
}
int size = futures.size();
if (size == 0) throw new IllegalArgumentException();
if (size == 1) return futures.get(0);
Combiner combiner = new Combiner(size);
futures.forEach(future -> future.whenComplete(combiner::onCompletion));
return combiner.combined;
} | @Test
public void firstof_completes_exceptionally_when_all_futures_have_complete_exceptionally() {
CompletableFuture<String> f1 = new CompletableFuture<>();
CompletableFuture<String> f2 = new CompletableFuture<>();
CompletableFuture<String> f3 = new CompletableFuture<>();
CompletableFuture<String> result = CompletableFutures.firstOf(List.of(f1, f2, f3));
f1.completeExceptionally(new Throwable("t1"));
f2.completeExceptionally(new Throwable("t2"));
f3.completeExceptionally(new Throwable("t3"));
assertTrue(result.isDone());
assertTrue(result.isCompletedExceptionally());
try {
result.join();
fail("Exception expected");
} catch (CompletionException e) {
Throwable cause = e.getCause();
assertEquals("t1", cause.getMessage());
assertEquals(2, cause.getSuppressed().length);
}
} |
@Override
public IcebergEnumeratorState snapshotState(long checkpointId) {
return new IcebergEnumeratorState(
enumeratorPosition.get(), assigner.state(), enumerationHistory.snapshot());
} | @Test
public void testRequestingReaderUnavailableWhenSplitDiscovered() throws Exception {
TestingSplitEnumeratorContext<IcebergSourceSplit> enumeratorContext =
new TestingSplitEnumeratorContext<>(4);
ScanContext scanContext =
ScanContext.builder()
.streaming(true)
.startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL)
.build();
ManualContinuousSplitPlanner splitPlanner = new ManualContinuousSplitPlanner(scanContext, 0);
ContinuousIcebergEnumerator enumerator =
createEnumerator(enumeratorContext, scanContext, splitPlanner);
// register one reader, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
// remove the reader (like in a failure)
enumeratorContext.registeredReaders().remove(2);
// make one split available and trigger the periodic discovery
List<IcebergSourceSplit> splits =
SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1);
assertThat(splits).hasSize(1);
splitPlanner.addSplits(splits);
enumeratorContext.triggerAllActions();
assertThat(enumeratorContext.getSplitAssignments()).doesNotContainKey(2);
List<String> pendingSplitIds =
enumerator.snapshotState(1).pendingSplits().stream()
.map(IcebergSourceSplitState::split)
.map(IcebergSourceSplit::splitId)
.collect(Collectors.toList());
assertThat(pendingSplitIds).hasSameSizeAs(splits).first().isEqualTo(splits.get(0).splitId());
// register the reader again, and let it request a split
enumeratorContext.registerReader(2, "localhost");
enumerator.addReader(2);
enumerator.handleSourceEvent(2, new SplitRequestEvent());
assertThat(enumerator.snapshotState(2).pendingSplits()).isEmpty();
assertThat(enumeratorContext.getSplitAssignments().get(2).getAssignedSplits())
.contains(splits.get(0));
} |
public static void validateHostAndPort(final String type, final PluginConfiguration pluginConfig) {
validateHost(type, pluginConfig);
validatePort(type, pluginConfig);
} | @Test
void assertValidateHostAndPortWhenPortLessThanOne() {
assertThrows(IllegalArgumentException.class, () -> PluginConfigurationValidator.validateHostAndPort("foo_type", new PluginConfiguration("localhost", 0, "pwd", null)));
} |
boolean rebindDataSources() {
boolean ok;
// on cherche une datasource avec InitialContext pour afficher nom et version bdd + nom et version driver jdbc
// (le nom de la dataSource recherchée dans JNDI est du genre jdbc/Xxx qui est le nom standard d'une DataSource)
try {
final boolean rewrapDataSources = Parameter.REWRAP_DATASOURCES.getValueAsBoolean();
if (rewrapDataSources || Parameter.DATASOURCES.getValue() != null) {
// on annule le rebinding ou rewrapping éventuellement faits avant par SessionListener
// si datasources ou rewrap-datasources est défini dans le filter
stop();
}
final Map<String, DataSource> jndiDataSources = JdbcWrapperHelper.getJndiDataSources();
LOG.debug("datasources found in JNDI: " + jndiDataSources.keySet());
for (final Map.Entry<String, DataSource> entry : jndiDataSources.entrySet()) {
final String jndiName = entry.getKey();
final DataSource dataSource = entry.getValue();
try {
if (rewrapDataSources || isServerNeedsRewrap(jndiName)) {
rewrapDataSource(jndiName, dataSource);
JdbcWrapperHelper.registerRewrappedDataSource(jndiName, dataSource);
} else if (!isProxyAlready(dataSource)) {
// si dataSource est déjà un proxy, il ne faut pas faire un proxy d'un proxy ni un rebinding
final DataSource dataSourceProxy = createDataSourceProxy(jndiName,
dataSource);
JdbcWrapperHelper.rebindDataSource(servletContext, jndiName, dataSource,
dataSourceProxy);
LOG.debug("datasource rebinded: " + jndiName + " from class "
+ dataSource.getClass().getName() + " to class "
+ dataSourceProxy.getClass().getName());
}
} catch (final Throwable t) { // NOPMD
// ça n'a pas marché, tant pis pour celle-ci qui semble invalide, mais continuons avec les autres
LOG.debug("rebinding datasource " + jndiName + " failed, skipping it", t);
}
}
ok = true;
} catch (final Throwable t) { // NOPMD
// ça n'a pas marché, tant pis
LOG.debug("rebinding datasources failed, skipping", t);
ok = false;
}
return ok;
} | @Test
public void testRebindDataSources() {
// test rebind et stop (sans conteneur)
jdbcWrapper.rebindDataSources();
Utils.setProperty(Parameter.REWRAP_DATASOURCES, "true");
jdbcWrapper.rebindDataSources();
jdbcWrapper.stop();
} |
public CreateTableBuilder withPkConstraintName(String pkConstraintName) {
this.pkConstraintName = validateConstraintName(pkConstraintName);
return this;
} | @Test
@UseDataProvider("digitCharsDataProvider")
public void withPkConstraintName_throws_IAE_if_name_starts_with_number(char number) {
assertThatThrownBy(() -> underTest.withPkConstraintName(number + "a"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Constraint name must not start by a number or '_', got '" + number + "a'");
} |
static void validateDependencies(Set<Artifact> dependencies, Set<String> allowedRules, boolean failOnUnmatched)
throws EnforcerRuleException {
SortedSet<Artifact> unmatchedArtifacts = new TreeSet<>();
Set<String> matchedRules = new HashSet<>();
for (Artifact dependency : dependencies) {
boolean matches = false;
for (String rule : allowedRules) {
if (matches(dependency, rule)){
matchedRules.add(rule);
matches = true;
break;
}
}
if (!matches) {
unmatchedArtifacts.add(dependency);
}
}
SortedSet<String> unmatchedRules = new TreeSet<>(allowedRules);
unmatchedRules.removeAll(matchedRules);
if (!unmatchedArtifacts.isEmpty() || (failOnUnmatched && !unmatchedRules.isEmpty())) {
StringBuilder errorMessage = new StringBuilder("Vespa dependency enforcer failed:\n");
if (!unmatchedArtifacts.isEmpty()) {
errorMessage.append("Dependencies not matching any rule:\n");
unmatchedArtifacts.forEach(a -> errorMessage.append(" - ").append(a.toString()).append('\n'));
}
if (failOnUnmatched && !unmatchedRules.isEmpty()) {
errorMessage.append("Rules not matching any dependency:\n");
unmatchedRules.forEach(p -> errorMessage.append(" - ").append(p).append('\n'));
}
throw new EnforcerRuleException(errorMessage.toString());
}
} | @Test
void succeeds_when_all_dependencies_and_rules_match() {
Set<Artifact> dependencies = Set.of(
artifact("com.yahoo.vespa", "container-core", "8.0.0", "provided"),
artifact("com.yahoo.vespa", "testutils", "8.0.0", "test"));
Set<String> rules = Set.of(
"com.yahoo.vespa:container-core:jar:*:provided",
"com.yahoo.vespa:*:jar:*:test");
assertDoesNotThrow(() -> EnforceDependencies.validateDependencies(dependencies, rules, true));
} |
@VisibleForTesting
static AbsoluteUnixPath getAppRootChecked(
RawConfiguration rawConfiguration, ProjectProperties projectProperties)
throws InvalidAppRootException {
String appRoot = rawConfiguration.getAppRoot();
if (appRoot.isEmpty()) {
appRoot =
projectProperties.isWarProject()
? DEFAULT_JETTY_APP_ROOT
: JavaContainerBuilder.DEFAULT_APP_ROOT;
}
try {
return AbsoluteUnixPath.get(appRoot);
} catch (IllegalArgumentException ex) {
throw new InvalidAppRootException(appRoot, appRoot, ex);
}
} | @Test
public void testGetAppRootChecked_errorOnWindowsPath() {
when(rawConfiguration.getAppRoot()).thenReturn("\\windows\\path");
Exception exception =
assertThrows(
InvalidAppRootException.class,
() ->
PluginConfigurationProcessor.getAppRootChecked(
rawConfiguration, projectProperties));
assertThat(exception).hasMessageThat().isEqualTo("\\windows\\path");
} |
static void writeResponse(Configuration conf,
Writer out, String format, String propertyName)
throws IOException, IllegalArgumentException, BadFormatException {
if (FORMAT_JSON.equals(format)) {
Configuration.dumpConfiguration(conf, propertyName, out);
} else if (FORMAT_XML.equals(format)) {
conf.writeXml(propertyName, out, conf);
} else {
throw new BadFormatException("Bad format: " + format);
}
} | @Test
public void testBadFormat() throws Exception {
StringWriter sw = new StringWriter();
try {
ConfServlet.writeResponse(getTestConf(), sw, "not a format");
fail("writeResponse with bad format didn't throw!");
} catch (ConfServlet.BadFormatException bfe) {
// expected
}
assertEquals("", sw.toString());
} |
public static <V> Read<V> read() {
return new AutoValue_SparkReceiverIO_Read.Builder<V>().build();
} | @Test
public void testReadValidationFailsMissingReceiverBuilder() {
SparkReceiverIO.Read<String> read = SparkReceiverIO.read();
assertThrows(IllegalStateException.class, read::validateTransform);
} |
public static int nextCapacity(int current) {
assert current > 0 && Long.bitCount(current) == 1 : "Capacity must be a power of two.";
if (current < MIN_CAPACITY / 2) {
current = MIN_CAPACITY / 2;
}
current <<= 1;
if (current < 0) {
throw new RuntimeException("Maximum capacity exceeded.");
}
return current;
} | @Test(expected = AssertionError.class)
@RequireAssertEnabled
public void testNextCapacity_withLong_shouldThrowIfCapacityNoPowerOfTwo() {
long capacity = 23;
nextCapacity(capacity);
} |
public static double parseDouble(final String str) {
final double d = Double.parseDouble(str);
if (Double.isInfinite(d) || Double.isNaN(d)) {
throw new NumberFormatException("Invalid double value: " + str);
}
return d;
} | @Test
public void shouldParseDouble() {
assertThat(SqlDoubles.parseDouble("1.3"), is(1.3D));
} |
@Override
public void execute(ComputationStep.Context context) {
executeForBranch(treeRootHolder.getRoot());
} | @Test
public void no_event_if_qp_is_unchanged() {
QualityProfile qp = qp(QP_NAME_1, LANGUAGE_KEY_1, new Date());
qProfileStatusRepository.register(qp.getQpKey(), UNCHANGED);
mockQualityProfileMeasures(treeRootHolder.getRoot(), arrayOf(qp), arrayOf(qp));
underTest.execute(new TestComputationStepContext());
verify(eventRepository, never()).add(any(Event.class));
} |
public static PartitionKey createPartitionKey(List<String> values, List<Column> columns) throws AnalysisException {
return createPartitionKey(values, columns, Table.TableType.HIVE);
} | @Test
public void testPaimonPartitionKey() throws AnalysisException {
PartitionKey partitionKey = createPartitionKey(
Lists.newArrayList("1", "a", "3.0", "__DEFAULT_PARTITION__"), partColumns,
Table.TableType.PAIMON);
Assert.assertEquals("(\"1\", \"a\", \"3.0\", \"NULL\")", partitionKey.toSql());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.