focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public Applications(CuratorDb db) {
this.db = db;
// read and write all to make sure they are stored in the latest version of the serialized format
for (ApplicationId id : ids()) {
try (Mutex lock = db.lock(id)) {
get(id).ifPresent(application -> put(application, lock));
} catch (ApplicationLockException e) {
throw new ApplicationLockException(e);
}
}
}
|
@Test
public void testApplications() {
Applications applications = new NodeRepositoryTester().nodeRepository().applications();
ApplicationId app1 = ApplicationId.from("t1", "a1", "i1");
ApplicationId app2 = ApplicationId.from("t1", "a2", "i1");
ApplicationId app3 = ApplicationId.from("t1", "a2", "default");
assertTrue(applications.get(app1).isEmpty());
assertEquals(List.of(), applications.ids());
applications.put(Application.empty(app1), () -> {});
assertEquals(app1, applications.get(app1).get().id());
assertEquals(List.of(app1), applications.ids());
NestedTransaction t = new NestedTransaction();
applications.remove(new ApplicationTransaction(provisionLock(app1), t));
t.commit();
assertTrue(applications.get(app1).isEmpty());
assertEquals(List.of(), applications.ids());
applications.put(Application.empty(app1), () -> {});
applications.put(Application.empty(app2), () -> {});
t = new NestedTransaction();
applications.put(Application.empty(app3), new ApplicationTransaction(provisionLock(app1), t));
assertEquals(List.of(app1, app2), applications.ids());
t.commit();
assertEquals(List.of(app1, app2, app3), applications.ids());
t = new NestedTransaction();
applications.remove(new ApplicationTransaction(provisionLock(app1), t));
applications.remove(new ApplicationTransaction(provisionLock(app2), t));
applications.remove(new ApplicationTransaction(provisionLock(app3), t));
assertEquals(List.of(app1, app2, app3), applications.ids());
t.commit();
assertTrue(applications.get(app1).isEmpty());
assertEquals(List.of(), applications.ids());
}
|
@Override
public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) {
if (!shouldHandle(instances)) {
return instances;
}
List<Object> result = getTargetInstancesByRules(targetName, instances);
return super.handle(targetName, result, requestData);
}
|
@Test
public void testGetTargetInstancesByConsumerTagRules() {
RuleInitializationUtils.initConsumerTagRules();
List<Object> instances = new ArrayList<>();
Map<String, String> metadata1 = new HashMap<>();
metadata1.put("group", "red");
Map<String, String> metadata2 = new HashMap<>();
metadata2.put("group", "green");
ServiceInstance instance1 = new TestDefaultServiceInstance(metadata1);
instances.add(instance1);
ServiceInstance instance2 = new TestDefaultServiceInstance(metadata2);
instances.add(instance2);
AppCache.INSTANCE.setMetadata(metadata1);
List<Object> targetInstances = tagRouteHandler.handle("foo", instances,
new RequestData(null, null, null));
Assert.assertEquals(1, targetInstances.size());
Assert.assertEquals(instance1, targetInstances.get(0));
ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetRouteRule(Collections.emptyMap());
}
|
@Override
public Set<EntityExcerpt> listEntityExcerpts() {
return eventDefinitionService.streamAll()
.filter(ed -> ed.config().isContentPackExportable())
.map(this::createExcerpt)
.collect(Collectors.toSet());
}
|
@Test
public void listExcerptsExcludesNonContentPackExportableEventDefinitions() {
EventDefinitionFacade testFacade = new EventDefinitionFacade(
objectMapper, eventDefinitionHandler, new HashSet<>(), jobDefinitionService, mockEventDefinitionService, userService);
EventDefinitionDto dto = validEventDefinitionDto(mockEventProcessorConfig);
when(mockEventProcessorConfig.isContentPackExportable()).thenReturn(false);
when(mockEventDefinitionService.streamAll()).thenReturn(Stream.of(dto));
final Set<EntityExcerpt> excerpts = testFacade.listEntityExcerpts();
assertThat(excerpts.size()).isEqualTo(0);
}
|
@Override
public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException {
if(file.isVolume()) {
log.warn(String.format("Skip setting timestamp for %s", file));
return;
}
try {
if(null != status.getModified()) {
final String fileid = this.fileid.getFileId(file);
final File properties = new File();
properties.setModifiedTime(status.getModified() != null ? new DateTime(status.getModified()) : null);
final File latest = session.getClient().files().update(fileid, properties).setFields(DriveAttributesFinderFeature.DEFAULT_FIELDS).
setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute();
status.setResponse(new DriveAttributesFinderFeature(session, this.fileid).toAttributes(latest));
}
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Failure to write attributes of {0}", e, file);
}
}
|
@Test
public void testSetTimestamp() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path home = DriveHomeFinderService.MYDRIVE_FOLDER;
final Path test = new DriveTouchFeature(session, fileid).touch(new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
new DriveMetadataFeature(session, fileid).setMetadata(test, Collections.singletonMap("test", "t"));
final long modified = System.currentTimeMillis();
final TransferStatus status = new TransferStatus().withModified(modified);
new DriveTimestampFeature(session, fileid).setTimestamp(test, status);
assertEquals(modified, new DefaultAttributesFinderFeature(session).find(test).getModificationDate());
final PathAttributes attr = new DriveAttributesFinderFeature(session, fileid).find(test);
assertEquals(modified, attr.getModificationDate());
assertEquals(attr, status.getResponse());
assertEquals(Collections.singletonMap("test", "t"), new DriveMetadataFeature(session, fileid).getMetadata(test));
new DriveDeleteFeature(session, fileid).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selectorData, final RuleData rule) {
SignRuleHandler ruleHandler = SignPluginDataHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(rule));
if (ObjectUtils.isEmpty(ruleHandler) || !ruleHandler.getSignRequestBody()) {
VerifyResult result = signService.signatureVerify(exchange);
if (result.isFailed()) {
return WebFluxResultUtils.failedResult(ShenyuResultEnum.SIGN_IS_NOT_PASS.getCode(),
result.getReason(), exchange);
}
return chain.execute(exchange);
}
return ServerWebExchangeUtils.rewriteRequestBody(exchange, messageReaders, body -> {
VerifyResult result = signVerifyWithBody(body, exchange);
if (result.isSuccess()) {
return Mono.just(body);
}
throw new ResponsiveException(ShenyuResultEnum.SIGN_IS_NOT_PASS.getCode(), result.getReason(), exchange);
}).flatMap(chain::execute)
.onErrorResume(error -> {
if (error instanceof ResponsiveException) {
return WebFluxResultUtils.failedResult((ResponsiveException) error);
}
return Mono.error(error);
});
}
|
@Test
public void testSignPluginSignBody2() {
this.ruleData.setHandle("{\"signRequestBody\": true}");
String requestBody = "{\"data\": \"4\"}";
this.exchange = MockServerWebExchange.from(MockServerHttpRequest
.method(HttpMethod.POST, "/test?data2=3")
.header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)
.body(requestBody));
when(signService.signatureVerify(exchange, requestBody)).thenReturn(VerifyResult.fail(""));
when(this.chain.execute(any())).thenReturn(Mono.empty());
SelectorData selectorData = mock(SelectorData.class);
signPluginDataHandler.handlerRule(ruleData);
StepVerifier.create(signPlugin.doExecute(this.exchange, this.chain, selectorData, this.ruleData)).expectSubscription().verifyComplete();
}
|
public static <T> T execute(Single<T> apiCall) {
try {
return apiCall.blockingGet();
} catch (HttpException e) {
try {
if (e.response() == null || e.response().errorBody() == null) {
throw e;
}
String errorBody = e.response().errorBody().string();
OpenAiError error = mapper.readValue(errorBody, OpenAiError.class);
throw new OpenAiHttpException(error, e, e.code());
} catch (IOException ex) {
// couldn't parse OpenAI error
throw e;
}
}
}
|
@Test
void executeNullErrorBodyThrowOriginalError() {
// exception with a successful response creates an error without an error body
HttpException httpException = new HttpException(Response.success(new CompletionResult()));
Single<CompletionResult> single = Single.error(httpException);
HttpException exception = assertThrows(HttpException.class, () -> OpenAiService.execute(single));
}
|
@Override
public void onLeaderInformationChange(String componentId, LeaderInformation leaderInformation) {
synchronized (lock) {
notifyLeaderInformationChangeInternal(
componentId,
leaderInformation,
confirmedLeaderInformation.forComponentIdOrEmpty(componentId));
}
}
|
@Test
void testAllLeaderInformationChangeEventWithPartialCorrection() throws Exception {
final AtomicReference<LeaderInformationRegister> storedLeaderInformation =
new AtomicReference<>();
new Context(storedLeaderInformation) {
{
runTestWithSynchronousEventHandling(
() -> {
final UUID leaderSessionID = UUID.randomUUID();
grantLeadership(leaderSessionID);
final LeaderInformationRegister correctLeaderInformationRegister =
storedLeaderInformation.get();
assertThat(correctLeaderInformationRegister.getRegisteredComponentIds())
.containsExactlyInAnyOrder(
contenderContext0.componentId,
contenderContext1.componentId);
// change LeaderInformation partially on external storage
final String componentIdWithChange = contenderContext0.componentId;
final String componentIdWithoutChange = contenderContext1.componentId;
final LeaderInformationRegister
partiallyChangedLeaderInformationRegister =
LeaderInformationRegister.clear(
correctLeaderInformationRegister,
componentIdWithChange);
storedLeaderInformation.set(partiallyChangedLeaderInformationRegister);
leaderElectionService.onLeaderInformationChange(
partiallyChangedLeaderInformationRegister);
assertThat(
storedLeaderInformation
.get()
.forComponentId(componentIdWithChange))
.as("Removed leader information should have been reset.")
.hasValue(
correctLeaderInformationRegister.forComponentIdOrEmpty(
componentIdWithChange));
assertThat(
storedLeaderInformation
.get()
.forComponentId(componentIdWithoutChange))
.hasValue(
correctLeaderInformationRegister.forComponentIdOrEmpty(
componentIdWithoutChange));
});
}
};
}
|
public static DatabaseType getStorageType(final DataSource dataSource) {
try (Connection connection = dataSource.getConnection()) {
return DatabaseTypeFactory.get(connection.getMetaData().getURL());
} catch (final SQLFeatureNotSupportedException sqlFeatureNotSupportedException) {
if (dataSource instanceof CatalogSwitchableDataSource) {
return DatabaseTypeFactory.get(((CatalogSwitchableDataSource) dataSource).getUrl());
}
if (dataSource.getClass().getName().equals(new HikariDataSourcePoolMetaData().getType())) {
HikariDataSourcePoolFieldMetaData dataSourcePoolFieldMetaData = new HikariDataSourcePoolFieldMetaData();
String jdbcUrlFieldName = ReflectionUtils.<String>getFieldValue(dataSource, dataSourcePoolFieldMetaData.getJdbcUrlFieldName())
.orElseThrow(() -> new SQLWrapperException(sqlFeatureNotSupportedException));
return DatabaseTypeFactory.get(jdbcUrlFieldName);
}
throw new SQLWrapperException(sqlFeatureNotSupportedException);
} catch (final SQLException ex) {
throw new SQLWrapperException(ex);
}
}
|
@Test
void assertGetStorageType() throws SQLException {
assertThat(DatabaseTypeEngine.getStorageType(mockDataSource(TypedSPILoader.getService(DatabaseType.class, "H2"))).getType(), is("H2"));
}
|
public static File copyFilesFromDir(File src, File dest, boolean isOverride) throws IORuntimeException {
return FileCopier.create(src, dest).setCopyContentIfDir(true).setOnlyCopyFile(true).setOverride(isOverride).copy();
}
|
@Test
@Disabled
public void copyFilesFromDirTest() {
final File srcFile = FileUtil.file("D:\\驱动");
final File destFile = FileUtil.file("d:\\驱动备份");
FileUtil.copyFilesFromDir(srcFile, destFile, true);
}
|
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (chatMessage.getType() != ChatMessageType.TRADE
&& chatMessage.getType() != ChatMessageType.GAMEMESSAGE
&& chatMessage.getType() != ChatMessageType.SPAM
&& chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION)
{
return;
}
String message = chatMessage.getMessage();
Matcher matcher = KILLCOUNT_PATTERN.matcher(message);
if (matcher.find())
{
final String boss = matcher.group("boss");
final int kc = Integer.parseInt(matcher.group("kc"));
final String pre = matcher.group("pre");
final String post = matcher.group("post");
if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post))
{
unsetKc(boss);
return;
}
String renamedBoss = KILLCOUNT_RENAMES
.getOrDefault(boss, boss)
// The config service doesn't support keys with colons in them
.replace(":", "");
if (boss != renamedBoss)
{
// Unset old TOB kc
unsetKc(boss);
unsetPb(boss);
unsetKc(boss.replace(":", "."));
unsetPb(boss.replace(":", "."));
// Unset old story mode
unsetKc("Theatre of Blood Story Mode");
unsetPb("Theatre of Blood Story Mode");
}
setKc(renamedBoss, kc);
// We either already have the pb, or need to remember the boss for the upcoming pb
if (lastPb > -1)
{
log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb);
if (renamedBoss.contains("Theatre of Blood"))
{
// TOB team size isn't sent in the kill message, but can be computed from varbits
int tobTeamSize = tobTeamSize();
lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players");
}
else if (renamedBoss.contains("Tombs of Amascut"))
{
// TOA team size isn't sent in the kill message, but can be computed from varbits
int toaTeamSize = toaTeamSize();
lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players");
}
final double pb = getPb(renamedBoss);
// If a raid with a team size, only update the pb if it is lower than the existing pb
// so that the pb is the overall lowest of any team size
if (lastTeamSize == null || pb == 0 || lastPb < pb)
{
log.debug("Setting overall pb (old: {})", pb);
setPb(renamedBoss, lastPb);
}
if (lastTeamSize != null)
{
log.debug("Setting team size pb: {}", lastTeamSize);
setPb(renamedBoss + " " + lastTeamSize, lastPb);
}
lastPb = -1;
lastTeamSize = null;
}
else
{
lastBossKill = renamedBoss;
lastBossTime = client.getTickCount();
}
return;
}
matcher = DUEL_ARENA_WINS_PATTERN.matcher(message);
if (matcher.find())
{
final int oldWins = getKc("Duel Arena Wins");
final int wins = matcher.group(2).equals("one") ? 1 :
Integer.parseInt(matcher.group(2).replace(",", ""));
final String result = matcher.group(1);
int winningStreak = getKc("Duel Arena Win Streak");
int losingStreak = getKc("Duel Arena Lose Streak");
if (result.equals("won") && wins > oldWins)
{
losingStreak = 0;
winningStreak += 1;
}
else if (result.equals("were defeated"))
{
losingStreak += 1;
winningStreak = 0;
}
else
{
log.warn("unrecognized duel streak chat message: {}", message);
}
setKc("Duel Arena Wins", wins);
setKc("Duel Arena Win Streak", winningStreak);
setKc("Duel Arena Lose Streak", losingStreak);
}
matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message);
if (matcher.find())
{
int losses = matcher.group(1).equals("one") ? 1 :
Integer.parseInt(matcher.group(1).replace(",", ""));
setKc("Duel Arena Losses", losses);
}
matcher = KILL_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = NEW_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_PB_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = RAIDS_DURATION_PATTERN.matcher(message);
if (matcher.find())
{
matchPb(matcher);
}
matcher = HS_PB_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group("floor"));
String floortime = matcher.group("floortime");
String floorpb = matcher.group("floorpb");
String otime = matcher.group("otime");
String opb = matcher.group("opb");
String pb = MoreObjects.firstNonNull(floorpb, floortime);
setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb));
if (otime != null)
{
pb = MoreObjects.firstNonNull(opb, otime);
setPb("Hallowed Sepulchre", timeStringToSeconds(pb));
}
}
matcher = HS_KC_FLOOR_PATTERN.matcher(message);
if (matcher.find())
{
int floor = Integer.parseInt(matcher.group(1));
int kc = Integer.parseInt(matcher.group(2).replaceAll(",", ""));
setKc("Hallowed Sepulchre Floor " + floor, kc);
}
matcher = HS_KC_GHC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hallowed Sepulchre", kc);
}
matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1).replaceAll(",", ""));
setKc("Hunter Rumours", kc);
}
if (lastBossKill != null && lastBossTime != client.getTickCount())
{
lastBossKill = null;
lastBossTime = -1;
}
matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message);
if (matcher.find())
{
String item = matcher.group(1);
int petId = findPet(item);
if (petId != -1)
{
final List<Integer> petList = new ArrayList<>(getPetList());
if (!petList.contains(petId))
{
log.debug("New pet added: {}/{}", item, petId);
petList.add(petId);
setPetList(petList);
}
}
}
matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message);
if (matcher.find())
{
int kc = Integer.parseInt(matcher.group(1));
setKc("Guardians of the Rift", kc);
}
}
|
@Test
public void testNightmareNoPb()
{
ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Nightmare kill count is: <col=ff0000>1130</col>", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Team size: <col=ff0000>Solo</col> Fight duration: <col=ff0000>10:47</col>. Personal best: 8:44", null, 0);
chatCommandsPlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration("killcount", "nightmare", 1130);
verify(configManager).setRSProfileConfiguration("personalbest", "nightmare", 8 * 60 + 44.0);
}
|
@Override
public String getMethodName() {
return methodName;
}
|
@Test
void getMethodName() {
Assertions.assertEquals("sayHello", method.getMethodName());
}
|
public static Schema fromTableSchema(TableSchema tableSchema) {
return fromTableSchema(tableSchema, SchemaConversionOptions.builder().build());
}
|
@Test
public void testFromTableSchema_map_map() {
Schema beamSchema = BigQueryUtils.fromTableSchema(BQ_MAP_TYPE, INFER_MAPS_OPTIONS);
assertEquals(MAP_MAP_TYPE, beamSchema);
}
|
@Override
public void checkAuthorization(
final KsqlSecurityContext securityContext,
final MetaStore metaStore,
final Statement statement
) {
if (statement instanceof Query) {
validateQuery(securityContext, metaStore, (Query)statement);
} else if (statement instanceof InsertInto) {
validateInsertInto(securityContext, metaStore, (InsertInto)statement);
} else if (statement instanceof CreateAsSelect) {
validateCreateAsSelect(securityContext, metaStore, (CreateAsSelect)statement);
} else if (statement instanceof PrintTopic) {
validatePrintTopic(securityContext, (PrintTopic)statement);
} else if (statement instanceof CreateSource) {
validateCreateSource(securityContext, (CreateSource)statement);
}
}
|
@Test
public void shouldThrowWhenSingleSelectWithoutSubjectReadPermissionsDenied() {
// Given:
givenSubjectAccessDenied(AVRO_TOPIC + "-key", AclOperation.READ);
final Statement statement = givenStatement(String.format(
"SELECT * FROM %s;", AVRO_STREAM_TOPIC)
);
// When:
final Exception e = assertThrows(
KsqlSchemaAuthorizationException.class,
() -> authorizationValidator.checkAuthorization(securityContext, metaStore, statement)
);
// Then:
assertThat(e.getMessage(), containsString(String.format(
"Authorization denied to Read on Schema Registry subject: [%s-key]", AVRO_TOPIC
)));
}
|
public static <T> AsSingleton<T> asSingleton() {
return new AsSingleton<>();
}
|
@Test
@Category(ValidatesRunner.class)
public void testNonSingletonSideInput() throws Exception {
PCollection<Integer> oneTwoThree = pipeline.apply(Create.of(1, 2, 3));
final PCollectionView<Integer> view = oneTwoThree.apply(View.asSingleton());
oneTwoThree.apply(
"OutputSideInputs",
ParDo.of(
new DoFn<Integer, Integer>() {
@ProcessElement
public void processElement(ProcessContext c) {
c.output(c.sideInput(view));
}
})
.withSideInputs(view));
// As long as we get an error, be flexible with how a runner surfaces it
thrown.expect(Exception.class);
pipeline.run();
}
|
public static Base64URL getAccessTokenHash(JWSAlgorithm signingAlg, OAuth2AccessTokenEntity token) {
byte[] tokenBytes = token.getJwt().serialize().getBytes();
return getHash(signingAlg, tokenBytes);
}
|
@Test
public void getAccessTokenHash256() {
mockToken256.getJwt().serialize();
Base64URL expectedHash = new Base64URL("EP1gXNeESRH-n57baopfTQ");
Base64URL resultHash = IdTokenHashUtils.getAccessTokenHash(JWSAlgorithm.HS256, mockToken256);
assertEquals(expectedHash, resultHash);
}
|
static <T extends Comparable<? super T>> int compareListWithFillValue(
List<T> left, List<T> right, T fillValue) {
int longest = Math.max(left.size(), right.size());
for (int i = 0; i < longest; i++) {
T leftElement = fillValue;
T rightElement = fillValue;
if (i < left.size()) {
leftElement = left.get(i);
}
if (i < right.size()) {
rightElement = right.get(i);
}
int compareResult = leftElement.compareTo(rightElement);
if (compareResult != 0) {
return compareResult;
}
}
return 0;
}
|
@Test
public void compareWithFillValue_nonEmptyListSameSizeGreaterValue_returnsPositive() {
assertThat(
ComparisonUtility.compareListWithFillValue(
Lists.newArrayList(1, 3, 4), Lists.newArrayList(1, 2, 3), 100))
.isGreaterThan(0);
}
|
@Udf(description = "Returns the hyperbolic cosine of an INT value")
public Double cosh(
@UdfParameter(
value = "value",
description = "The value in radians to get the hyperbolic cosine of."
) final Integer value
) {
return cosh(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandleNegative() {
assertThat(udf.cosh(-0.43), closeTo(1.0938833091357991, 0.000000000000001));
assertThat(udf.cosh(-Math.PI), closeTo(11.591953275521519, 0.000000000000001));
assertThat(udf.cosh(-Math.PI * 2), closeTo(267.7467614837482, 0.000000000000001));
assertThat(udf.cosh(-6), closeTo(201.7156361224559, 0.000000000000001));
assertThat(udf.cosh(-6L), closeTo(201.7156361224559, 0.000000000000001));
}
|
@Override
public int getIteration() {
return variables.getIteration();
}
|
@Test
public void testGetIteration() {
assertThat(unmodifiables.getIteration(), CoreMatchers.is(vars.getIteration()));
}
|
public Result parse(final String string) throws DateNotParsableException {
return this.parse(string, new Date());
}
|
@Test
public void testParseFailsOnUnparsableDate() throws Exception {
assertThrows(NaturalDateParser.DateNotParsableException.class, () -> {
naturalDateParser.parse("LOLWUT");
});
}
|
public QueryPath getPath() {
return path;
}
|
@Test
public void testContent() {
MapTableField field = new MapTableField("name", QueryDataType.INT, false, QueryPath.KEY_PATH);
assertEquals("name", field.getName());
assertEquals(QueryDataType.INT, field.getType());
assertFalse(field.isHidden());
assertEquals(QueryPath.KEY_PATH, field.getPath());
}
|
@Override
public Long getTimeMillis(K name) {
return null;
}
|
@Test
public void testGetTimeMillisDefault() {
assertEquals(1, HEADERS.getTimeMillis("name1", 1));
}
|
public boolean canProcessTask(final Task task, final long now) {
final String topologyName = task.id().topologyName();
if (!hasNamedTopologies) {
// TODO implement error handling/backoff for non-named topologies (needs KIP)
return !pausedTopologies.contains(UNNAMED_TOPOLOGY);
} else {
if (pausedTopologies.contains(topologyName)) {
return false;
} else {
final NamedTopologyMetadata metadata = topologyNameToErrorMetadata.get(topologyName);
return metadata == null || (metadata.canProcess() && metadata.canProcessTask(task, now));
}
}
}
|
@Test
public void testNamedTopologiesCanBePausedIndependently() {
final Set<String> pausedTopologies = new HashSet<>();
final TaskExecutionMetadata metadata = new TaskExecutionMetadata(NAMED_TOPOLOGIES, pausedTopologies, ProcessingMode.AT_LEAST_ONCE);
final Task mockTask1 = createMockTask(TOPOLOGY1);
final Task mockTask2 = createMockTask(TOPOLOGY2);
assertTrue(metadata.canProcessTask(mockTask1, TIME_ZERO));
assertTrue(metadata.canProcessTask(mockTask2, TIME_ZERO));
pausedTopologies.add(TOPOLOGY1);
assertFalse(metadata.canProcessTask(mockTask1, TIME_ZERO));
assertTrue(metadata.canProcessTask(mockTask2, TIME_ZERO));
pausedTopologies.remove(TOPOLOGY1);
assertTrue(metadata.canProcessTask(mockTask1, TIME_ZERO));
assertTrue(metadata.canProcessTask(mockTask2, TIME_ZERO));
}
|
@Override
@Nullable
public Object convert(@Nullable String value) {
if (isNullOrEmpty(value)) {
return null;
}
LOG.debug("Trying to parse date <{}> with pattern <{}>, locale <{}>, and timezone <{}>.", value, dateFormat, locale, timeZone);
final DateTimeFormatter formatter;
if (containsTimeZone) {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale);
} else {
formatter = DateTimeFormat
.forPattern(dateFormat)
.withDefaultYear(YearMonth.now(timeZone).getYear())
.withLocale(locale)
.withZone(timeZone);
}
return DateTime.parse(value, formatter);
}
|
@Test
public void testEmptyInput() throws Exception {
final DateConverter converter = new DateConverter(config("yyyy-MM-dd'T'HH:mm:ss.SSSZ", null, null));
assertThat((DateTime) converter.convert("")).isNull();
}
|
@Override
@MethodNotAvailable
public void removeAll() {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testRemoveAllWithKeys() {
adapter.removeAll(singleton(42));
}
|
static ParseResult parse(final int javaMajorVersion, final BufferedReader br) throws IOException {
final ParseResult result = new ParseResult();
int lineNumber = 0;
while (true) {
final String line = br.readLine();
lineNumber++;
if (line == null) {
break;
}
try{
jvmOptionFromLine(javaMajorVersion, line).ifPresent(result::appendOption);
} catch (IllegalArgumentException e){
result.appendError(lineNumber, line);
};
}
return result;
}
|
@Test
public void testParseOptionWithFixedVersion() throws IOException {
JvmOptionsParser.ParseResult res = JvmOptionsParser.parse(11, asReader("8:-XX:+UseConcMarkSweepGC"));
assertTrue("No option match for Java 11", res.getJvmOptions().isEmpty());
res = JvmOptionsParser.parse(8, asReader("8:-XX:+UseConcMarkSweepGC"));
verifyOptions("Option must be present for Java 8", "-XX:+UseConcMarkSweepGC", res);
}
|
public LoggerContext configure() {
LoggerContext ctx = helper.getRootContext();
ctx.reset();
helper.enableJulChangePropagation(ctx);
configureConsole(ctx);
configureWithLogbackWritingToFile(ctx);
helper.apply(
LogLevelConfig.newBuilder(helper.getRootLoggerName())
.rootLevelFor(ProcessId.APP)
.immutableLevel("com.hazelcast",
Level.toLevel("WARN"))
.build(),
appSettings.getProps());
return ctx;
}
|
@Test
public void root_logger_writes_to_console_with_formatting_and_to_sonar_log_file_when_running_from_ITs() {
emulateRunFromCommandLine(true);
LoggerContext ctx = underTest.configure();
Logger rootLogger = ctx.getLogger(ROOT_LOGGER_NAME);
verifyAppConsoleAppender(rootLogger.getAppender("APP_CONSOLE"));
verifySonarLogFileAppender(rootLogger.getAppender("file_sonar"));
assertThat(rootLogger.iteratorForAppenders()).toIterable().hasSize(2);
ctx.getLoggerList()
.stream()
.filter(logger -> !ROOT_LOGGER_NAME.equals(logger.getName()) && !LOGGER_STARTUP.equals(logger.getName()))
.forEach(AppLoggingTest::verifyNoFileAppender);
}
|
@Override
public final int readInt() throws EOFException {
final int i = readInt(pos);
pos += INT_SIZE_IN_BYTES;
return i;
}
|
@Test
public void testReadInt() throws Exception {
int readInt = in.readInt();
int theInt = Bits.readInt(INIT_DATA, 0, byteOrder == BIG_ENDIAN);
assertEquals(theInt, readInt);
}
|
public void compileToDestination(File src, File dst) throws IOException {
for (Schema schema : queue) {
OutputFile o = compile(schema);
o.writeToDestination(src, dst);
}
if (protocol != null) {
compileInterface(protocol).writeToDestination(src, dst);
}
}
|
@Test
void pojoWithOptionalTurnedOffByDefault() throws IOException {
SpecificCompiler compiler = createCompiler();
compiler.compileToDestination(this.src, OUTPUT_DIR);
assertTrue(this.outputFile.exists());
try (BufferedReader reader = new BufferedReader(new FileReader(this.outputFile))) {
String line;
while ((line = reader.readLine()) != null) {
line = line.trim();
assertFalse(line.contains("Optional"));
}
}
}
|
public static Function getFunctionOfRound(FunctionCallExpr node, Function fn, List<Type> argumentTypes) {
return getFunctionOfRound(node.getParams(), fn, argumentTypes);
}
|
@Test
public void testGetFnOfTruncateForDecimalAndConstantExpression() {
List<Expr> params = Lists.newArrayList();
params.add(new DecimalLiteral(new BigDecimal(new BigInteger("1845076"), 2)));
params.add(new ArithmeticExpr(ArithmeticExpr.Operator.ADD, new IntLiteral(1), new IntLiteral(1)));
FunctionCallExpr node = new FunctionCallExpr(FunctionSet.TRUNCATE, params);
List<Type> paramTypes = Lists.newArrayList();
paramTypes.add(ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL32, 7, 2));
paramTypes.add(Type.TINYINT);
Function function = Expr.getBuiltinFunction(FunctionSet.TRUNCATE, paramTypes.toArray(new Type[0]),
Function.CompareMode.IS_NONSTRICT_SUPERTYPE_OF);
Assert.assertNotNull(function);
Function newFn = DecimalV3FunctionAnalyzer.getFunctionOfRound(node, function, paramTypes);
Type returnType = newFn.getReturnType();
Assert.assertTrue(returnType.isDouble());
}
|
public synchronized void replayEraseTable(List<Long> tableIds) {
List<RecycleTableInfo> removedTableInfos = removeTableFromRecycleBin(tableIds);
for (RecycleTableInfo info : removedTableInfos) {
info.table.deleteFromRecycleBin(info.dbId, true);
}
LOG.info("Finished replay erase tables. table id list: {}", StringUtils.join(tableIds, ","));
}
|
@Test
public void testReplayEraseTable() {
CatalogRecycleBin bin = new CatalogRecycleBin();
Table table = new Table(1L, "tbl", Table.TableType.HIVE, Lists.newArrayList());
bin.recycleTable(11, table, true);
bin.recycleTable(12, table, true);
List<Table> tables = bin.getTables(11L);
Assert.assertEquals(1, tables.size());
bin.replayEraseTable(Collections.singletonList(2L));
tables = bin.getTables(11);
Assert.assertEquals(1, tables.size());
bin.replayEraseTable(Collections.singletonList(1L));
tables = bin.getTables(11);
Assert.assertEquals(0, tables.size());
}
|
public static String getPrefixedCacheName(String name, URI uri, ClassLoader classLoader) {
String cacheNamePrefix = getPrefix(uri, classLoader);
if (cacheNamePrefix != null) {
return cacheNamePrefix + name;
} else {
return name;
}
}
|
@Test
public void testGetPrefixedCacheName() {
String prefix = getPrefixedCacheName(CACHE_NAME, uri, classLoader);
assertEquals(expectedPrefixedCacheName, prefix);
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldThrowOnUnknownSelectQualifier() {
// Given:
final SingleStatementContext stmt = givenQuery("SELECT unknown.COL0 FROM TEST1;");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> builder.buildStatement(stmt)
);
// Then:
assertThat(e.getMessage(), containsString(
"'UNKNOWN' is not a valid stream/table name or alias."));
}
|
static void runDebuggingWordCount(WordCountOptions options) {
Pipeline p = Pipeline.create(options);
PCollection<KV<String, Long>> filteredWords =
p.apply("ReadLines", TextIO.read().from(options.getInputFile()))
.apply(new WordCount.CountWords())
.apply(ParDo.of(new FilterTextFn(options.getFilterPattern())));
/*
* Concept #3: PAssert is a set of convenient PTransforms in the style of
* Hamcrest's collection matchers that can be used when writing Pipeline level tests
* to validate the contents of PCollections. PAssert is best used in unit tests
* with small data sets but is demonstrated here as a teaching tool.
*
* <p>Below we verify that the set of filtered words matches our expected counts. Note
* that PAssert does not provide any output and that successful completion of the
* Pipeline implies that the expectations were met. Learn more at
* https://beam.apache.org/documentation/pipelines/test-your-pipeline/ on how to test
* your Pipeline and see {@link DebuggingWordCountTest} for an example unit test.
*/
List<KV<String, Long>> expectedResults =
Arrays.asList(KV.of("Flourish", 3L), KV.of("stomach", 1L));
PAssert.that(filteredWords).containsInAnyOrder(expectedResults);
p.run().waitUntilFinish();
}
|
@Test
public void testDebuggingWordCount() throws Exception {
File inputFile = tmpFolder.newFile();
File outputFile = tmpFolder.newFile();
Files.asCharSink(inputFile, StandardCharsets.UTF_8)
.write("stomach secret Flourish message Flourish here Flourish");
WordCountOptions options = TestPipeline.testingPipelineOptions().as(WordCountOptions.class);
options.setInputFile(getFilePath(inputFile.getAbsolutePath()));
options.setOutput(getFilePath(outputFile.getAbsolutePath()));
DebuggingWordCount.runDebuggingWordCount(options);
}
|
@Override
public void suspend(Throwable cause) {
suspend(cause, null);
}
|
@Test
void testSuspendToFinished() throws Exception {
try (MockStateWithExecutionGraphContext context =
new MockStateWithExecutionGraphContext()) {
final TestingStateWithExecutionGraph stateWithExecutionGraph =
createStateWithExecutionGraph(context);
context.setExpectFinished(
aeg -> assertThat(aeg.getState()).isEqualTo(JobStatus.SUSPENDED));
stateWithExecutionGraph.suspend(new RuntimeException());
}
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String sqlServerType = typeDefine.getDataType().toUpperCase();
switch (sqlServerType) {
case SQLSERVER_BIT:
builder.sourceType(SQLSERVER_BIT);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case SQLSERVER_TINYINT:
case SQLSERVER_TINYINT_IDENTITY:
builder.sourceType(SQLSERVER_TINYINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case SQLSERVER_SMALLINT:
case SQLSERVER_SMALLINT_IDENTITY:
builder.sourceType(SQLSERVER_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case SQLSERVER_INTEGER:
case SQLSERVER_INTEGER_IDENTITY:
case SQLSERVER_INT:
case SQLSERVER_INT_IDENTITY:
builder.sourceType(SQLSERVER_INT);
builder.dataType(BasicType.INT_TYPE);
break;
case SQLSERVER_BIGINT:
case SQLSERVER_BIGINT_IDENTITY:
builder.sourceType(SQLSERVER_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case SQLSERVER_REAL:
builder.sourceType(SQLSERVER_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case SQLSERVER_FLOAT:
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() <= 24) {
builder.sourceType(SQLSERVER_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
} else {
builder.sourceType(SQLSERVER_FLOAT);
builder.dataType(BasicType.DOUBLE_TYPE);
}
break;
case SQLSERVER_DECIMAL:
case SQLSERVER_NUMERIC:
builder.sourceType(
String.format(
"%s(%s,%s)",
SQLSERVER_DECIMAL,
typeDefine.getPrecision(),
typeDefine.getScale()));
builder.dataType(
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_MONEY:
builder.sourceType(SQLSERVER_MONEY);
builder.dataType(
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_SMALLMONEY:
builder.sourceType(SQLSERVER_SMALLMONEY);
builder.dataType(
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_CHAR:
builder.sourceType(String.format("%s(%s)", SQLSERVER_CHAR, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
break;
case SQLSERVER_NCHAR:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_NCHAR, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
break;
case SQLSERVER_VARCHAR:
if (typeDefine.getLength() == -1) {
builder.sourceType(MAX_VARCHAR);
builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1));
} else {
builder.sourceType(
String.format("%s(%s)", SQLSERVER_VARCHAR, typeDefine.getLength()));
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
}
builder.dataType(BasicType.STRING_TYPE);
break;
case SQLSERVER_NVARCHAR:
if (typeDefine.getLength() == -1) {
builder.sourceType(MAX_NVARCHAR);
builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1));
} else {
builder.sourceType(
String.format("%s(%s)", SQLSERVER_NVARCHAR, typeDefine.getLength()));
builder.columnLength(
TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength()));
}
builder.dataType(BasicType.STRING_TYPE);
break;
case SQLSERVER_TEXT:
builder.sourceType(SQLSERVER_TEXT);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_31 - 1);
break;
case SQLSERVER_NTEXT:
builder.sourceType(SQLSERVER_NTEXT);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_30 - 1);
break;
case SQLSERVER_XML:
builder.sourceType(SQLSERVER_XML);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(POWER_2_31 - 1);
break;
case SQLSERVER_UNIQUEIDENTIFIER:
builder.sourceType(SQLSERVER_UNIQUEIDENTIFIER);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
break;
case SQLSERVER_SQLVARIANT:
builder.sourceType(SQLSERVER_SQLVARIANT);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(typeDefine.getLength());
break;
case SQLSERVER_BINARY:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_BINARY, typeDefine.getLength()));
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case SQLSERVER_VARBINARY:
if (typeDefine.getLength() == -1) {
builder.sourceType(MAX_VARBINARY);
builder.columnLength(POWER_2_31 - 1);
} else {
builder.sourceType(
String.format("%s(%s)", SQLSERVER_VARBINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
}
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case SQLSERVER_IMAGE:
builder.sourceType(SQLSERVER_IMAGE);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(POWER_2_31 - 1);
break;
case SQLSERVER_TIMESTAMP:
builder.sourceType(SQLSERVER_TIMESTAMP);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(8L);
break;
case SQLSERVER_DATE:
builder.sourceType(SQLSERVER_DATE);
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case SQLSERVER_TIME:
builder.sourceType(String.format("%s(%s)", SQLSERVER_TIME, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_DATETIME:
builder.sourceType(SQLSERVER_DATETIME);
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(3);
break;
case SQLSERVER_DATETIME2:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_DATETIME2, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_DATETIMEOFFSET:
builder.sourceType(
String.format("%s(%s)", SQLSERVER_DATETIMEOFFSET, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case SQLSERVER_SMALLDATETIME:
builder.sourceType(SQLSERVER_SMALLDATETIME);
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.SQLSERVER, sqlServerType, typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertDate() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("date").dataType("date").build();
Column column = SqlServerTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
}
|
public <S extends Exception, T extends TException> ExceptionHandler
convertIfInstance(Class<S> source, Class<T> target) throws T {
T targetException = null;
if (source.isInstance(e)) {
try {
targetException = JavaUtils.newInstance(target,
new Class[]{String.class}, new Object[]{e.getMessage()});
} catch (Exception ex) {
// This should not happen
throw new RuntimeException(ex);
}
}
if (targetException != null) {
throw targetException;
}
return this;
}
|
@Test
public void testConvertIfInstance() {
IOException ix = new IOException("IOException test");
try {
handleException(ix).convertIfInstance(NoSuchObjectException.class, MetaException.class);
} catch (Exception e) {
fail("Exception should not happen:" + e.getMessage());
}
try {
handleException(ix).convertIfInstance(IOException.class, MetaException.class);
fail("Should throw a exception here");
} catch (Exception e) {
assertTrue(e instanceof MetaException);
assertTrue(e.getMessage().equals(ix.getMessage()));
}
try {
handleException(ix).convertIfInstance(NoSuchObjectException.class, MetaException.class)
.convertIfInstance(IOException.class, MetaException.class);
fail("Should throw a exception here");
} catch (Exception e) {
assertTrue(e instanceof MetaException);
assertTrue(e.getMessage().equals(ix.getMessage()));
}
}
|
@InvokeOnHeader(Web3jConstants.ETH_GET_UNCLE_COUNT_BY_BLOCK_HASH)
void ethGetUncleCountByBlockHash(Message message) throws IOException {
String blockHash = message.getHeader(Web3jConstants.BLOCK_HASH, configuration::getBlockHash, String.class);
Request<?, EthGetUncleCountByBlockHash> request = web3j.ethGetUncleCountByBlockHash(blockHash);
setRequestId(message, request);
EthGetUncleCountByBlockHash response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getUncleCount());
}
}
|
@Test
public void ethGetUncleCountByBlockHashTest() throws Exception {
EthGetUncleCountByBlockHash response = Mockito.mock(EthGetUncleCountByBlockHash.class);
Mockito.when(mockWeb3j.ethGetUncleCountByBlockHash(any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getUncleCount()).thenReturn(BigInteger.ONE);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_UNCLE_COUNT_BY_BLOCK_HASH);
template.send(exchange);
BigInteger body = exchange.getIn().getBody(BigInteger.class);
assertEquals(BigInteger.ONE, body);
}
|
public static int checkNotNegative(int value, String paramName) {
if (value < 0) {
throw new IllegalArgumentException(paramName + " is " + value + " but must be >= 0");
}
return value;
}
|
@Test
public void test_checkNotNegative_whenZero() {
checkNotNegative(0, "foo");
}
|
public static String mask(String str)
{
if (str != null && str.length() > MASK_REMAIN) {
char[] chars = str.toCharArray();
Arrays.fill(chars, 0, chars.length - MASK_REMAIN, '*');
return new String(chars);
}
return str;
}
|
@Test
public void testMask()
{
assertNull(mask(null));
assertEquals("", mask(""));
assertEquals("123", mask("123"));
assertEquals("123456", mask("123456"));
assertEquals("****567890", mask("1234567890"));
}
|
void runOnce() {
if (transactionManager != null) {
try {
transactionManager.maybeResolveSequences();
RuntimeException lastError = transactionManager.lastError();
// do not continue sending if the transaction manager is in a failed state
if (transactionManager.hasFatalError()) {
if (lastError != null)
maybeAbortBatches(lastError);
client.poll(retryBackoffMs, time.milliseconds());
return;
}
if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) {
return;
}
// Check whether we need a new producerId. If so, we will enqueue an InitProducerId
// request which will be sent below
transactionManager.bumpIdempotentEpochAndResetIdIfNeeded();
if (maybeSendAndPollTransactionalRequest()) {
return;
}
} catch (AuthenticationException e) {
// This is already logged as error, but propagated here to perform any clean ups.
log.trace("Authentication exception while processing transactional request", e);
transactionManager.authenticationFailed(e);
}
}
long currentTimeMs = time.milliseconds();
long pollTimeout = sendProducerData(currentTimeMs);
client.poll(pollTimeout, currentTimeMs);
}
|
@Test
public void testTransactionAbortedExceptionOnAbortWithoutError() throws InterruptedException {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TransactionManager txnManager = new TransactionManager(logContext, "testTransactionAbortedExceptionOnAbortWithoutError", 60000, 100, apiVersions);
setupWithTransactionState(txnManager, false, null);
doInitTransactions(txnManager, producerIdAndEpoch);
// Begin the transaction
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp0);
client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp0, Errors.NONE)));
// Run it once so that the partition is added to the transaction.
sender.runOnce();
// Append a record to the accumulator.
FutureRecordMetadata metadata = appendToAccumulator(tp0, time.milliseconds(), "key", "value");
// Now abort the transaction manually.
txnManager.beginAbort();
// Try to send.
// This should abort the existing transaction and
// drain all the unsent batches with a TransactionAbortedException.
sender.runOnce();
// Now attempt to fetch the result for the record.
TestUtils.assertFutureThrows(metadata, TransactionAbortedException.class);
}
|
@SqlNullable
@Description("Returns the 2-dimensional cartesian minimum distance (based on spatial ref) between two geometries in projected units")
@ScalarFunction("ST_Distance")
@SqlType(DOUBLE)
public static Double stDistance(@SqlType(GEOMETRY_TYPE_NAME) Slice left, @SqlType(GEOMETRY_TYPE_NAME) Slice right)
{
OGCGeometry leftGeometry = EsriGeometrySerde.deserialize(left);
OGCGeometry rightGeometry = EsriGeometrySerde.deserialize(right);
verifySameSpatialReference(leftGeometry, rightGeometry);
return leftGeometry.isEmpty() || rightGeometry.isEmpty() ? null : leftGeometry.distance(rightGeometry);
}
|
@Test
public void testSTDistance()
{
assertFunction("ST_Distance(ST_Point(50, 100), ST_Point(150, 150))", DOUBLE, 111.80339887498948);
assertFunction("ST_Distance(ST_Point(50, 100), ST_GeometryFromText('POINT (150 150)'))", DOUBLE, 111.80339887498948);
assertFunction("ST_Distance(ST_GeometryFromText('POINT (50 100)'), ST_GeometryFromText('POINT (150 150)'))", DOUBLE, 111.80339887498948);
assertFunction("ST_Distance(ST_GeometryFromText('MULTIPOINT (50 100, 50 200)'), ST_GeometryFromText('Point (50 100)'))", DOUBLE, 0.0);
assertFunction("ST_Distance(ST_GeometryFromText('LINESTRING (50 100, 50 200)'), ST_GeometryFromText('LINESTRING (10 10, 20 20)'))", DOUBLE, 85.44003745317531);
assertFunction("ST_Distance(ST_GeometryFromText('MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))'), ST_GeometryFromText('LINESTRING (10 20, 20 50)'))", DOUBLE, 17.08800749063506);
assertFunction("ST_Distance(ST_GeometryFromText('POLYGON ((1 1, 1 3, 3 3, 3 1, 1 1))'), ST_GeometryFromText('POLYGON ((4 4, 4 5, 5 5, 5 4, 4 4))'))", DOUBLE, 1.4142135623730951);
assertFunction("ST_Distance(ST_GeometryFromText('MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((0 0, 0 2, 2 2, 2 0, 0 0)))'), ST_GeometryFromText('POLYGON ((10 100, 30 10, 30 100, 10 100))'))", DOUBLE, 27.892651361962706);
assertFunction("ST_Distance(ST_GeometryFromText('POINT EMPTY'), ST_Point(150, 150))", DOUBLE, null);
assertFunction("ST_Distance(ST_Point(50, 100), ST_GeometryFromText('POINT EMPTY'))", DOUBLE, null);
assertFunction("ST_Distance(ST_GeometryFromText('POINT EMPTY'), ST_GeometryFromText('POINT EMPTY'))", DOUBLE, null);
assertFunction("ST_Distance(ST_GeometryFromText('MULTIPOINT EMPTY'), ST_GeometryFromText('Point (50 100)'))", DOUBLE, null);
assertFunction("ST_Distance(ST_GeometryFromText('LINESTRING (50 100, 50 200)'), ST_GeometryFromText('LINESTRING EMPTY'))", DOUBLE, null);
assertFunction("ST_Distance(ST_GeometryFromText('MULTILINESTRING EMPTY'), ST_GeometryFromText('LINESTRING (10 20, 20 50)'))", DOUBLE, null);
assertFunction("ST_Distance(ST_GeometryFromText('POLYGON ((1 1, 1 3, 3 3, 3 1, 1 1))'), ST_GeometryFromText('POLYGON EMPTY'))", DOUBLE, null);
assertFunction("ST_Distance(ST_GeometryFromText('MULTIPOLYGON EMPTY'), ST_GeometryFromText('POLYGON ((10 100, 30 10, 30 100, 10 100))'))", DOUBLE, null);
}
|
void handleStart(Exchange exchange, MetricRegistry registry, String metricsName) {
String propertyName = getPropertyName(metricsName);
Timer.Context context = getTimerContextFromExchange(exchange, propertyName);
if (context == null) {
Timer timer = registry.timer(metricsName);
context = timer.time();
exchange.setProperty(propertyName, context);
} else {
LOG.warn("Timer \"{}\" already running", metricsName);
}
}
|
@Test
public void testHandleStart() {
when(exchange.getProperty(PROPERTY_NAME, Timer.Context.class)).thenReturn(null);
producer.handleStart(exchange, registry, METRICS_NAME);
inOrder.verify(exchange, times(1)).getProperty(PROPERTY_NAME, Timer.Context.class);
inOrder.verify(registry, times(1)).timer(METRICS_NAME);
inOrder.verify(timer, times(1)).time();
inOrder.verify(exchange, times(1)).setProperty(PROPERTY_NAME, context);
inOrder.verifyNoMoreInteractions();
}
|
@Nonnull
public static ToConverter getToConverter(QueryDataType type) {
if (type.getTypeFamily() == QueryDataTypeFamily.OBJECT) {
// User-defined types are subject to the same conversion rules as ordinary OBJECT.
type = QueryDataType.OBJECT;
}
return Objects.requireNonNull(CONVERTERS.get(type), "missing converter for " + type);
}
|
@Test
public void test_instantConversion() {
OffsetDateTime time = OffsetDateTime.of(2020, 9, 8, 11, 4, 0, 0, UTC);
Object converted = getToConverter(TIMESTAMP_WITH_TZ_INSTANT).convert(time);
assertThat(converted).isEqualTo(Instant.ofEpochMilli(1599563040000L));
}
|
@Override
public CompletableFuture<Void> cleanupAsync(JobID jobId) {
mainThreadExecutor.assertRunningInMainThread();
CompletableFuture<Void> cleanupFuture = FutureUtils.completedVoidFuture();
for (CleanupWithLabel<T> cleanupWithLabel : prioritizedCleanup) {
cleanupFuture =
cleanupFuture.thenCompose(
ignoredValue ->
withRetry(
jobId,
cleanupWithLabel.getLabel(),
cleanupWithLabel.getCleanup()));
}
return cleanupFuture.thenCompose(
ignoredValue ->
FutureUtils.completeAll(
regularCleanup.stream()
.map(
cleanupWithLabel ->
withRetry(
jobId,
cleanupWithLabel.getLabel(),
cleanupWithLabel.getCleanup()))
.collect(Collectors.toList())));
}
|
@Test
void testMediumPriorityCleanupBlocksAllLowerPrioritizedCleanups() {
final SingleCallCleanup highPriorityCleanup = SingleCallCleanup.withCompletionOnCleanup();
final SingleCallCleanup lowerThanHighPriorityCleanup =
SingleCallCleanup.withoutCompletionOnCleanup();
final SingleCallCleanup noPriorityCleanup0 = SingleCallCleanup.withCompletionOnCleanup();
final SingleCallCleanup noPriorityCleanup1 = SingleCallCleanup.withCompletionOnCleanup();
final DefaultResourceCleaner<CleanupCallback> testInstance =
createTestInstanceBuilder()
.withPrioritizedCleanup("Prio #0", highPriorityCleanup)
.withPrioritizedCleanup("Prio #1", lowerThanHighPriorityCleanup)
.withRegularCleanup("Reg #0", noPriorityCleanup0)
.withRegularCleanup("Reg #1", noPriorityCleanup1)
.build();
assertThat(highPriorityCleanup.isDone()).isFalse();
final CompletableFuture<Void> overallCleanupResult = testInstance.cleanupAsync(JOB_ID);
assertThat(highPriorityCleanup.isDone()).isTrue();
assertThat(lowerThanHighPriorityCleanup.isDone()).isFalse();
assertThat(noPriorityCleanup0.isDone()).isFalse();
assertThat(noPriorityCleanup1.isDone()).isFalse();
assertThat(overallCleanupResult.isDone()).isFalse();
lowerThanHighPriorityCleanup.completeCleanup();
assertThat(overallCleanupResult).isCompleted();
assertThat(highPriorityCleanup.isDone()).isTrue();
assertThat(lowerThanHighPriorityCleanup.isDone()).isTrue();
assertThat(noPriorityCleanup0.isDone()).isTrue();
assertThat(noPriorityCleanup1.isDone()).isTrue();
}
|
@Override
public void removePlugin(final PluginData pluginData) {
super.getWasmExtern(REMOVE_PLUGIN_METHOD_NAME)
.ifPresent(handlerPlugin -> callWASI(pluginData, handlerPlugin));
}
|
@Test
public void removePluginTest() {
pluginDataHandler.removePlugin(pluginData);
testWasmPluginDataHandler.handlerPlugin(pluginData);
testWasmPluginDataHandler.removePlugin(pluginData);
}
|
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback);
return bean;
}
|
@Test
void beansWithMethodsAnnotatedWithRecurringAnnotationCronAndIntervalWillThrowException() {
// GIVEN
final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor();
// WHEN & THEN
assertThatThrownBy(() -> recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithRecurringJobWithCronAndInterval(), "not important"))
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName)
throws Exception {
Message in = exchange.getIn();
Counter counter = registry.counter(metricsName);
Long increment = endpoint.getIncrement();
Long decrement = endpoint.getDecrement();
Long finalIncrement = getLongHeader(in, HEADER_COUNTER_INCREMENT, increment);
Long finalDecrement = getLongHeader(in, HEADER_COUNTER_DECREMENT, decrement);
if (finalIncrement != null) {
counter.inc(finalIncrement);
} else if (finalDecrement != null) {
counter.dec(finalDecrement);
} else {
counter.inc();
}
}
|
@Test
public void testProcessWithIncrementOnly() throws Exception {
Object action = null;
when(endpoint.getIncrement()).thenReturn(INCREMENT);
when(endpoint.getDecrement()).thenReturn(null);
when(in.getHeader(HEADER_COUNTER_INCREMENT, INCREMENT, Long.class)).thenReturn(INCREMENT);
producer.doProcess(exchange, endpoint, registry, METRICS_NAME);
inOrder.verify(exchange, times(1)).getIn();
inOrder.verify(registry, times(1)).counter(METRICS_NAME);
inOrder.verify(endpoint, times(1)).getIncrement();
inOrder.verify(endpoint, times(1)).getDecrement();
inOrder.verify(in, times(1)).getHeader(HEADER_COUNTER_INCREMENT, INCREMENT, Long.class);
inOrder.verify(in, times(1)).getHeader(HEADER_COUNTER_DECREMENT, action, Long.class);
inOrder.verify(counter, times(1)).inc(INCREMENT);
inOrder.verifyNoMoreInteractions();
}
|
Double applyRescaleFactor(double predictionDouble) {
return predictionDouble * targetField.getRescaleFactor();
}
|
@Test
void applyRescaleFactor() {
TargetField targetField = new TargetField(Collections.emptyList(), null, "string", null, null, null, null,
null);
KiePMMLTarget kiePMMLTarget = getBuilder(targetField).build();
assertThat(kiePMMLTarget.applyRescaleFactor(4.0)).isCloseTo(4.0, Offset.offset(0.0));
targetField = new TargetField(Collections.emptyList(), null, "string", null, null, null, null, 2.0);
kiePMMLTarget = getBuilder(targetField).build();
assertThat(kiePMMLTarget.applyRescaleFactor(4.0)).isCloseTo(8.0, Offset.offset(0.0));
}
|
@Override
public Long dbSize(RedisClusterNode node) {
return execute(node, RedisCommands.DBSIZE);
}
|
@Test
public void testDbSize() {
RedisClusterNode master = getFirstMaster();
Long size = connection.dbSize(master);
assertThat(size).isZero();
}
|
public static boolean isDeprecated(PropertyKey key) {
return DEPRECATED_CHECKER.hasAnnotation(key);
}
|
@Test
public void isDeprecated() throws Exception {
assertFalse(PropertyKey.isDeprecated("alluxio.version"));
}
|
public SerializationContext(Config config) {
scopedMetaShareEnabled = config.isScopedMetaShareEnabled();
if (scopedMetaShareEnabled) {
metaContext = new MetaContext();
}
}
|
@Test
public void testSerializationContext() {
FuryBuilder builder = new FuryBuilder().withDeserializeNonexistentClass(false);
builder.build(); // trigger finish
SerializationContext context = new SerializationContext(new Config(builder));
assertFalse(context.containsKey("A"));
context.add("A", 1);
assertTrue(context.containsKey("A"));
assertEquals(context.get("A"), 1);
context.reset();
assertFalse(context.containsKey("A"));
}
|
public boolean expireEntryIfNotSet(K key, Duration ttl, Duration maxIdleTime) {
return get(expireEntryIfNotSetAsync(key, ttl, maxIdleTime));
}
|
@Test
public void testExpireEntryIfNotSet() {
RMapCache<String, String> testMap = redisson.getMapCache("map");
testMap.put("key", "value");
testMap.expireEntryIfNotSet("key", Duration.ofMillis(0), Duration.ofMillis(20000));
assertThat(testMap.remainTimeToLive("key")).isBetween(19800L, 20000L);
}
|
public Tenant getTenant(TenantName tenantName) {
return tenants.get(tenantName);
}
|
@Test
public void testStartUp() {
assertEquals(tenantRepository.getTenant(tenant1).getName(), tenant1);
assertEquals(tenantRepository.getTenant(tenant2).getName(), tenant2);
}
|
public static TypeBuilder<Schema> builder() {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext());
}
|
@Test
void duble() {
Schema.Type type = Schema.Type.DOUBLE;
Schema simple = SchemaBuilder.builder().doubleType();
Schema expected = primitive(type, simple);
Schema built1 = SchemaBuilder.builder().doubleBuilder().prop("p", "v").endDouble();
assertEquals(expected, built1);
}
|
public void unJar(File jarFile, File toDir) throws IOException {
unJar(jarFile, toDir, MATCH_ANY);
}
|
@Test
public void testUnJarWithPattern() throws Exception {
File unjarDir = getUnjarDir("unjar-pattern");
// Unjar only a regex
RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
unjarDir,
Pattern.compile(".*baz.*"));
assertFalse("foobar not unpacked",
new File(unjarDir, TestRunJar.FOOBAR_TXT).exists());
assertTrue("foobaz unpacked",
new File(unjarDir, FOOBAZ_TXT).exists());
}
|
public Result waitForCondition(Config config, Supplier<Boolean>... conditionCheck) {
return finishOrTimeout(
config,
conditionCheck,
() -> jobIsDoneOrFinishing(config.project(), config.region(), config.jobId()));
}
|
@Test
public void testWaitForConditionTimeout() throws IOException {
when(client.getJobStatus(any(), any(), any())).thenReturn(JobState.RUNNING);
Result result = new PipelineOperator(client).waitForCondition(DEFAULT_CONFIG, () -> false);
assertThat(result).isEqualTo(Result.TIMEOUT);
}
|
public static Labels fromMap(Map<String, String> labels) {
if (labels != null) {
return new Labels(labels);
}
return EMPTY;
}
|
@Test
public void testParseNullLabelsInFromMap() {
assertThat(Labels.fromMap(null), is(Labels.EMPTY));
}
|
public NetworkTopology getNetworkTopology() {
return networktopology;
}
|
@Test
public void testNetworkTopologyInstantiation() throws Exception {
// case 1, dfs.use.dfs.network.topology=true, use the default
// DFSNetworkTopology impl.
Configuration conf1 = new HdfsConfiguration();
FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
DatanodeManager dm1 = mockDatanodeManager(fsn, conf1);
assertEquals(DFSNetworkTopology.class, dm1.getNetworkTopology().getClass());
// case 2, dfs.use.dfs.network.topology=false, use the default
// NetworkTopology impl.
Configuration conf2 = new HdfsConfiguration();
conf2.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
DatanodeManager dm2 = mockDatanodeManager(fsn, conf2);
assertEquals(NetworkTopology.class, dm2.getNetworkTopology()
.getClass());
// case 3, dfs.use.dfs.network.topology=false, and specify the
// net.topology.impl property.
Configuration conf3 = new HdfsConfiguration();
conf3.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
MockDfsNetworkTopology.class, NetworkTopology.class);
conf3.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
DatanodeManager dm3 = mockDatanodeManager(fsn, conf3);
assertEquals(MockDfsNetworkTopology.class, dm3.getNetworkTopology()
.getClass());
// case 4, dfs.use.dfs.network.topology=true, and specify the
// dfs.net.topology.impl property.
Configuration conf4 = new HdfsConfiguration();
conf4.setClass(DFSConfigKeys.DFS_NET_TOPOLOGY_IMPL_KEY,
MockDfsNetworkTopology.class, NetworkTopology.class);
conf4.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, true);
DatanodeManager dm4 = mockDatanodeManager(fsn, conf4);
assertEquals(MockDfsNetworkTopology.class, dm4.getNetworkTopology()
.getClass());
}
|
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!isValidIdentifier(identifier)) {
return false;
}
String database = identifier.namespace().level(0);
TableOperations ops = newTableOps(identifier);
TableMetadata lastMetadata = null;
if (purge) {
try {
lastMetadata = ops.current();
} catch (NotFoundException e) {
LOG.warn(
"Failed to load table metadata for table: {}, continuing drop without purge",
identifier,
e);
}
}
try {
clients.run(
client -> {
client.dropTable(
database,
identifier.name(),
false /* do not delete data */,
false /* throw NoSuchObjectException if the table doesn't exist */);
return null;
});
if (purge && lastMetadata != null) {
CatalogUtil.dropTableData(ops.io(), lastMetadata);
}
LOG.info("Dropped table: {}", identifier);
return true;
} catch (NoSuchTableException | NoSuchObjectException e) {
LOG.info("Skipping drop, table does not exist: {}", identifier, e);
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop " + identifier, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to dropTable", e);
}
}
|
@Test
public void testSetDefaultPartitionSpec() throws Exception {
Schema schema = getTestSchema();
TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl");
try {
Table table = catalog.buildTable(tableIdent, schema).create();
assertThat(hmsTableParameters())
.as("Must not have default partition spec")
.doesNotContainKey(TableProperties.DEFAULT_PARTITION_SPEC);
table.updateSpec().addField(bucket("data", 16)).commit();
assertThat(hmsTableParameters())
.containsEntry(
TableProperties.DEFAULT_PARTITION_SPEC, PartitionSpecParser.toJson(table.spec()));
} finally {
catalog.dropTable(tableIdent);
}
}
|
public URL[] getWatchResources() {
return convertToURL(getProperty("watchResources"));
}
|
@Test
public void testGetWatchResources() throws Exception {
PluginConfiguration pluginConfiguration = new PluginConfiguration(getClass().getClassLoader());
File tempFile = File.createTempFile("test", "test");
// find by URL
pluginConfiguration.properties.setProperty("watchResources", tempFile.toURI().toURL().toString());
assertEquals(tempFile.toURI().toURL(), pluginConfiguration.getWatchResources()[0]);
// find by file name
pluginConfiguration.properties.setProperty("watchResources", tempFile.getAbsolutePath());
// On Mac OS X, 10.9.4, the temp folders use a path like "/var/..." and the canonical path is like "/private/var/..."
// the getWatchResources() uses a getCanonicalFile() internally, so it returns "/private/var/...", so using
// the cananicalFile as the expectation in the assertEquals to let this test succeed.
// Instead, could also change getWatchResources() to use getAbsouluteFile() instead of getCanonicalFile()?
File canonicalFile = tempFile.getCanonicalFile();
assertEquals(canonicalFile.toURI().toURL(), pluginConfiguration.getWatchResources()[0]);
}
|
public Set<String> keySet() {
return keys;
}
|
@Test(expected = UnsupportedOperationException.class)
public void testKeySet_isImmutable() {
HazelcastProperties properties = new HazelcastProperties(config);
properties.keySet().remove("foo");
}
|
public abstract Projection complement(int fieldsNumber);
|
@Test
void testComplement() {
assertThat(Projection.of(new int[] {4, 1, 2}).complement(5))
.isEqualTo(Projection.of(new int[] {0, 3}));
assertThat(
Projection.of(new int[][] {new int[] {4}, new int[] {1}, new int[] {2}})
.complement(5))
.isEqualTo(Projection.of(new int[] {0, 3}));
assertThatThrownBy(
() ->
Projection.of(
new int[][] {
new int[] {4}, new int[] {1, 3}, new int[] {2}
})
.complement(10))
.isInstanceOf(IllegalStateException.class);
}
|
public static URI parse(String featureIdentifier) {
requireNonNull(featureIdentifier, "featureIdentifier may not be null");
if (featureIdentifier.isEmpty()) {
throw new IllegalArgumentException("featureIdentifier may not be empty");
}
// Legacy from the Cucumber Eclipse plugin
// Older versions of Cucumber allowed it.
if (CLASSPATH_SCHEME_PREFIX.equals(featureIdentifier)) {
return rootPackageUri();
}
if (nonStandardPathSeparatorInUse(featureIdentifier)) {
String standardized = replaceNonStandardPathSeparator(featureIdentifier);
return parseAssumeFileScheme(standardized);
}
if (isWindowsOS() && pathContainsWindowsDrivePattern(featureIdentifier)) {
return parseAssumeFileScheme(featureIdentifier);
}
if (probablyURI(featureIdentifier)) {
return parseProbableURI(featureIdentifier);
}
return parseAssumeFileScheme(featureIdentifier);
}
|
@Test
void can_parse_relative_file_form() {
URI uri = FeaturePath.parse("file:path/to/file.feature");
assertAll(
() -> assertThat(uri.getScheme(), is("file")),
() -> assertThat(uri.getSchemeSpecificPart(), endsWith("path/to/file.feature")));
}
|
public static CsvWriter getWriter(String filePath, Charset charset) {
return new CsvWriter(filePath, charset);
}
|
@Test
@Disabled
public void writeDataTest(){
@Data
@AllArgsConstructor
class User {
Integer userId;
String username;
String mobile;
}
List<String> header = ListUtil.of("用户id", "用户名", "手机号");
List<CsvRow> row = new ArrayList<>();
List<User> datas = new ArrayList<>();
datas.add(new User(1, "张三", "18800001111"));
datas.add(new User(2, "李四", "18800001112"));
datas.add(new User(3, "王五", "18800001113"));
datas.add(new User(4, "赵六", "18800001114"));
//可以为null
//Map<String, Integer> headMap = null;
Map<String, Integer> headMap = new HashMap<>();
headMap.put("userId", 0);
headMap.put("username", 1);
headMap.put("mobile", 2);
for (User user : datas) {
// row.size() + 1, 表示从第2行开始,第一行是标题栏
row.add(new CsvRow(row.size() + 1, headMap,
BeanUtil.beanToMap(user).values().stream().map(Object::toString).collect(Collectors.toList())));
}
CsvData csvData = new CsvData(header, row);
String path = FileUtil.isWindows() ? "d:/test/csvWriteDataTest.csv" : "~/test/csvWriteDataTest.csv";
final CsvWriter writer = CsvUtil.getWriter(path, CharsetUtil.CHARSET_UTF_8);
writer.write(csvData);
}
|
@Override
protected String buildHandle(final List<URIRegisterDTO> uriList, final SelectorDO selectorDO) {
List<DivideUpstream> addList = buildDivideUpstreamList(uriList);
List<DivideUpstream> canAddList = new CopyOnWriteArrayList<>();
boolean isEventDeleted = uriList.size() == 1 && EventType.DELETED.equals(uriList.get(0).getEventType());
if (isEventDeleted) {
addList.get(0).setStatus(false);
}
SpringCloudSelectorHandle springCloudSelectorHandle = GsonUtils.getInstance().fromJson(selectorDO.getHandle(), SpringCloudSelectorHandle.class);
List<DivideUpstream> existList = springCloudSelectorHandle.getDivideUpstreams();
if (CollectionUtils.isEmpty(existList)) {
canAddList = addList;
} else {
List<DivideUpstream> diffList = addList.stream().filter(upstream -> !existList.contains(upstream)).collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(diffList)) {
canAddList.addAll(diffList);
existList.addAll(diffList);
}
List<DivideUpstream> diffStatusList = addList.stream().filter(upstream -> !upstream.isStatus()
|| existList.stream().anyMatch(e -> e.equals(upstream) && e.isStatus() != upstream.isStatus())).collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(diffStatusList)) {
canAddList.addAll(diffStatusList);
}
}
if (doSubmit(selectorDO.getId(), canAddList)) {
return null;
}
springCloudSelectorHandle.setDivideUpstreams(CollectionUtils.isEmpty(existList) ? canAddList : existList);
return GsonUtils.getInstance().toJson(springCloudSelectorHandle);
}
|
@Test
public void testBuildHandle() {
shenyuClientRegisterSpringCloudService = spy(shenyuClientRegisterSpringCloudService);
final String returnStr = "{serviceId:'test1',gray:false,divideUpstreams:[{weight:50,warmup:10,protocol:"
+ "'http://',upstreamHost:'localhost',upstreamUrl:'localhost:8090',status:'true',timestamp:1637909490935}]}";
final String expected = "{\"serviceId\":\"test1\",\"gray\":false,\"divideUpstreams\":[{\"weight\":50,\"warmup\":10,\"protocol\":"
+ "\"http://\",\"upstreamHost\":\"localhost\",\"upstreamUrl\":\"localhost:8090\",\"status\":true,\"timestamp\":1637909490935}]}";
final URIRegisterDTO dto1 = URIRegisterDTO.builder().appName("test2")
.rpcType(RpcTypeEnum.SPRING_CLOUD.getName())
.host(HOST).port(8090).build();
final URIRegisterDTO dto2 = URIRegisterDTO.builder().appName("test2")
.rpcType(RpcTypeEnum.SPRING_CLOUD.getName())
.host(HOST).port(8091).build();
List<URIRegisterDTO> list = new ArrayList<>();
list.add(dto1);
SelectorDO selectorDO = mock(SelectorDO.class);
doReturn(false).when(shenyuClientRegisterSpringCloudService).doSubmit(any(), any());
when(selectorDO.getHandle()).thenReturn(returnStr);
String actual = shenyuClientRegisterSpringCloudService.buildHandle(list, selectorDO);
assertEquals(expected, actual);
SpringCloudSelectorHandle handle = GsonUtils.getInstance().fromJson(actual, SpringCloudSelectorHandle.class);
assertEquals(handle.getDivideUpstreams().size(), 1);
list.clear();
list.add(dto1);
list.add(dto2);
selectorDO = mock(SelectorDO.class);
doReturn(false).when(shenyuClientRegisterSpringCloudService).doSubmit(any(), any());
when(selectorDO.getHandle()).thenReturn(returnStr);
actual = shenyuClientRegisterSpringCloudService.buildHandle(list, selectorDO);
handle = GsonUtils.getInstance().fromJson(actual, SpringCloudSelectorHandle.class);
assertEquals(handle.getDivideUpstreams().size(), 2);
list.clear();
list.add(dto1);
selectorDO = mock(SelectorDO.class);
doReturn(false).when(shenyuClientRegisterSpringCloudService).doSubmit(any(), any());
when(selectorDO.getHandle()).thenReturn("{serviceId:'test1',gray:false,divideUpstreams:[]}");
actual = shenyuClientRegisterSpringCloudService.buildHandle(list, selectorDO);
handle = GsonUtils.getInstance().fromJson(actual, SpringCloudSelectorHandle.class);
assertEquals(handle.getDivideUpstreams().size(), 1);
}
|
@Udf(description = "Splits a string into an array of substrings based on a delimiter.")
public List<String> split(
@UdfParameter(
description = "The string to be split. If NULL, then function returns NULL.")
final String string,
@UdfParameter(
description = "The delimiter to split a string by. If NULL, then function returns NULL.")
final String delimiter) {
if (string == null || delimiter == null) {
return null;
}
// Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split()
// is to accept only literal strings. This method uses Guava Splitter instead, which does not
// accept any regex pattern. This is to avoid a confusion to users when splitting by regex
// special characters, such as '.' and '|'.
try {
// Guava Splitter does not accept empty delimiters. Use the Java split() method instead.
if (delimiter.isEmpty()) {
return Arrays.asList(EMPTY_DELIMITER.split(string));
} else {
return Splitter.on(delimiter).splitToList(string);
}
} catch (final Exception e) {
throw new KsqlFunctionException(
String.format("Invalid delimiter '%s' in the split() function.", delimiter), e);
}
}
|
@Test
public void shouldSplitStringByGivenDelimiter() {
assertThat(splitUdf.split("x-y", "-"), contains("x", "y"));
assertThat(splitUdf.split("x-y", "x"), contains("", "-y"));
assertThat(splitUdf.split("x-y", "y"), contains("x-", ""));
assertThat(splitUdf.split("a.b.c.d", "."), contains("a", "b", "c", "d"));
}
|
public void addValueProviders(final String segmentName,
final RocksDB db,
final Cache cache,
final Statistics statistics) {
if (storeToValueProviders.isEmpty()) {
logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId);
streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this);
} else if (storeToValueProviders.containsKey(segmentName)) {
throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId +
" has been already added. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues");
}
verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics);
logger.debug("Adding value providers for store {} of task {}", segmentName, taskId);
storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics));
}
|
@Test
public void shouldThrowIfDbToAddWasAlreadyAddedForOtherSegment() {
recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, statisticsToAdd1);
final Throwable exception = assertThrows(
IllegalStateException.class,
() -> recorder.addValueProviders(SEGMENT_STORE_NAME_2, dbToAdd1, cacheToAdd2, statisticsToAdd2)
);
assertThat(
exception.getMessage(),
is("DB instance for store " + SEGMENT_STORE_NAME_2 + " of task " + TASK_ID1 +
" was already added for another segment as a value provider. This is a bug in Kafka Streams. " +
"Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues")
);
}
|
@Override
public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) {
return fromConnectData(topic, schema, value);
}
|
@Test
public void testNonStringHeaderValueToBytes() {
assertArrayEquals(Utils.utf8("true"), converter.fromConnectHeader(TOPIC, "hdr", Schema.BOOLEAN_SCHEMA, true));
}
|
public ValidationResult validateRoleConfiguration(final String pluginId, final Map<String, String> roleConfiguration) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_VALIDATE_ROLE_CONFIG, new DefaultPluginInteractionCallback<>() {
@Override
public String requestBody(String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).validatePluginConfigurationRequestBody(roleConfiguration);
}
@Override
public ValidationResult onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).getPluginConfigurationValidationResultFromResponseBody(responseBody);
}
});
}
|
@Test
void shouldTalkToPlugin_To_ValidateRoleConfiguration() {
String responseBody = "[{\"message\":\"memberOf must not be blank.\",\"key\":\"memberOf\"}]";
when(pluginManager.submitTo(eq(PLUGIN_ID), eq(AUTHORIZATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody));
ValidationResult validationResult = authorizationExtension.validateRoleConfiguration(PLUGIN_ID, Collections.emptyMap());
assertRequest(requestArgumentCaptor.getValue(), AUTHORIZATION_EXTENSION, "2.0", REQUEST_VALIDATE_ROLE_CONFIG, "{}");
assertThat(validationResult.isSuccessful()).isEqualTo(false);
assertThat(validationResult.getErrors()).contains(
new ValidationError("memberOf", "memberOf must not be blank.")
);
}
|
@NonNull
public String processShownotes() {
String shownotes = rawShownotes;
if (TextUtils.isEmpty(shownotes)) {
Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message");
shownotes = "<html><head></head><body><p id='apNoShownotes'>" + noShownotesLabel + "</p></body></html>";
}
// replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already
if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) {
shownotes = shownotes.replace("\n", "<br />");
}
Document document = Jsoup.parse(shownotes);
cleanCss(document);
document.head().appendElement("style").attr("type", "text/css").text(webviewStyle);
addTimecodes(document);
return document.toString();
}
|
@Test
public void testProcessShownotesAddTimecodeHhmmNoChapters() {
final String timeStr = "10:11";
final long time = 3600 * 1000 * 10 + 60 * 1000 * 11;
String shownotes = "<p> Some test text with a timecode " + timeStr + " here.</p>";
ShownotesCleaner t = new ShownotesCleaner(context, shownotes, Integer.MAX_VALUE);
String res = t.processShownotes();
checkLinkCorrect(res, new long[]{time}, new String[]{timeStr});
}
|
public void log(final DriverEventCode code, final DirectBuffer buffer, final int offset, final int length)
{
if (DriverComponentLogger.ENABLED_EVENTS.contains(code))
{
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(toEventCodeId(code), encodedLength);
if (index > 0)
{
try
{
encode((UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, buffer, offset);
}
finally
{
ringBuffer.commit(index);
}
}
}
}
|
@Test
void logIsNoOpIfEventIsNotEnabled()
{
buffer.setMemory(20, 100, (byte)5);
logger.log(CMD_OUT_ERROR, buffer, 20, 100);
assertEquals(0, logBuffer.getInt(lengthOffset(0), LITTLE_ENDIAN));
}
|
public static String removeStartingCharacters(String text, char ch) {
int idx = 0;
while (text.charAt(idx) == ch) {
idx++;
}
if (idx > 0) {
return text.substring(idx);
}
return text;
}
|
@Test
public void testRemoveInitialCharacters() {
assertEquals("foo", StringHelper.removeStartingCharacters("foo", '/'));
assertEquals("foo", StringHelper.removeStartingCharacters("/foo", '/'));
assertEquals("foo", StringHelper.removeStartingCharacters("//foo", '/'));
}
|
public static int read(final AtomicBuffer buffer, final EntryConsumer entryConsumer)
{
final int capacity = buffer.capacity();
int recordsRead = 0;
int offset = 0;
while (offset < capacity)
{
final long observationCount = buffer.getLongVolatile(offset + OBSERVATION_COUNT_OFFSET);
if (observationCount <= 0)
{
break;
}
++recordsRead;
final String channel = buffer.getStringAscii(offset + CHANNEL_OFFSET);
final String source = buffer.getStringAscii(
offset + CHANNEL_OFFSET + BitUtil.align(SIZE_OF_INT + channel.length(), SIZE_OF_INT));
entryConsumer.accept(
observationCount,
buffer.getLongVolatile(offset + TOTAL_BYTES_LOST_OFFSET),
buffer.getLong(offset + FIRST_OBSERVATION_OFFSET),
buffer.getLongVolatile(offset + LAST_OBSERVATION_OFFSET),
buffer.getInt(offset + SESSION_ID_OFFSET),
buffer.getInt(offset + STREAM_ID_OFFSET),
channel,
source);
final int recordLength =
CHANNEL_OFFSET +
BitUtil.align(SIZE_OF_INT + channel.length(), SIZE_OF_INT) +
SIZE_OF_INT + source.length();
offset += BitUtil.align(recordLength, ENTRY_ALIGNMENT);
}
return recordsRead;
}
|
@Test
void shouldReadOneEntry()
{
final long initialBytesLost = 32;
final int timestampMs = 7;
final int sessionId = 3;
final int streamId = 1;
final String channel = "aeron:udp://stuff";
final String source = "127.0.0.1:8888";
lossReport.createEntry(initialBytesLost, timestampMs, sessionId, streamId, channel, source);
assertEquals(1, LossReportReader.read(buffer, entryConsumer));
verify(entryConsumer).accept(
1L, initialBytesLost, timestampMs, timestampMs, sessionId, streamId, channel, source);
verifyNoMoreInteractions(entryConsumer);
}
|
public void writeEncodedValue(EncodedValue encodedValue) throws IOException {
switch (encodedValue.getValueType()) {
case ValueType.BOOLEAN:
writer.write(Boolean.toString(((BooleanEncodedValue) encodedValue).getValue()));
break;
case ValueType.BYTE:
writer.write(
String.format("0x%x", ((ByteEncodedValue)encodedValue).getValue()));
break;
case ValueType.CHAR:
writer.write(
String.format("0x%x", (int)((CharEncodedValue)encodedValue).getValue()));
break;
case ValueType.SHORT:
writer.write(
String.format("0x%x", ((ShortEncodedValue)encodedValue).getValue()));
break;
case ValueType.INT:
writer.write(
String.format("0x%x", ((IntEncodedValue)encodedValue).getValue()));
break;
case ValueType.LONG:
writer.write(
String.format("0x%x", ((LongEncodedValue)encodedValue).getValue()));
break;
case ValueType.FLOAT:
writer.write(Float.toString(((FloatEncodedValue)encodedValue).getValue()));
break;
case ValueType.DOUBLE:
writer.write(Double.toString(((DoubleEncodedValue)encodedValue).getValue()));
break;
case ValueType.ANNOTATION:
writeAnnotation((AnnotationEncodedValue)encodedValue);
break;
case ValueType.ARRAY:
writeArray((ArrayEncodedValue)encodedValue);
break;
case ValueType.STRING:
writeQuotedString(((StringEncodedValue)encodedValue).getValue());
break;
case ValueType.FIELD:
writeFieldDescriptor(((FieldEncodedValue)encodedValue).getValue());
break;
case ValueType.ENUM:
writeFieldDescriptor(((EnumEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD:
writeMethodDescriptor(((MethodEncodedValue)encodedValue).getValue());
break;
case ValueType.TYPE:
writeType(((TypeEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD_TYPE:
writeMethodProtoDescriptor(((MethodTypeEncodedValue)encodedValue).getValue());
break;
case ValueType.METHOD_HANDLE:
writeMethodHandle(((MethodHandleEncodedValue)encodedValue).getValue());
break;
case ValueType.NULL:
writer.write("null");
break;
default:
throw new IllegalArgumentException("Unknown encoded value type");
}
}
|
@Test
public void testWriteEncodedValue_byte() throws IOException {
DexFormattedWriter writer = new DexFormattedWriter(output);
writer.writeEncodedValue(new ImmutableByteEncodedValue((byte)0x12));
Assert.assertEquals("0x12", output.toString());
}
|
void recordLatency(String node, long requestLatencyMs) {
fetchLatency.record(requestLatencyMs);
if (!node.isEmpty()) {
String nodeTimeName = "node-" + node + ".latency";
Sensor nodeRequestTime = this.metrics.getSensor(nodeTimeName);
if (nodeRequestTime != null)
nodeRequestTime.record(requestLatencyMs);
}
}
|
@Test
public void testLatency() {
metricsManager.recordLatency("", 123);
time.sleep(metrics.config().timeWindowMs() + 1);
metricsManager.recordLatency("", 456);
assertEquals(289.5, metricValue(metricsRegistry.fetchLatencyAvg), EPSILON);
assertEquals(456, metricValue(metricsRegistry.fetchLatencyMax), EPSILON);
}
|
@Override
protected void registerManagement() {
if (!this.registration.isRegisterEnabled()) {
return;
}
super.registerManagement();
}
|
@Test
public void testRegisterManagement() {
doReturn(false).when(registration).isRegisterEnabled();
assertThatCode(() -> {
polarisAutoServiceRegistration.registerManagement();
}).doesNotThrowAnyException();
doReturn(true).when(registration).isRegisterEnabled();
assertThatCode(() -> {
polarisAutoServiceRegistration.registerManagement();
}).doesNotThrowAnyException();
}
|
@Override
public MavenArtifact searchSha1(String sha1) throws IOException {
if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) {
throw new IllegalArgumentException("Invalid SHA1 format");
}
final URL url = new URL(rootURL, String.format("identify/sha1/%s",
sha1.toLowerCase()));
LOGGER.debug("Searching Nexus url {}", url);
// Determine if we need to use a proxy. The rules:
// 1) If the proxy is set, AND the setting is set to true, use the proxy
// 2) Otherwise, don't use the proxy (either the proxy isn't configured,
// or proxy is specifically set to false
final HttpURLConnection conn;
final URLConnectionFactory factory = new URLConnectionFactory(settings);
conn = factory.createHttpURLConnection(url, useProxy);
conn.setDoOutput(true);
final String authHeader = buildHttpAuthHeaderValue();
if (!authHeader.isEmpty()) {
conn.addRequestProperty("Authorization", authHeader);
}
// JSON would be more elegant, but there's not currently a dependency
// on JSON, so don't want to add one just for this
conn.addRequestProperty("Accept", "application/xml");
conn.connect();
switch (conn.getResponseCode()) {
case 200:
try {
final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder();
final Document doc = builder.parse(conn.getInputStream());
final XPath xpath = XPathFactory.newInstance().newXPath();
final String groupId = xpath
.evaluate(
"/org.sonatype.nexus.rest.model.NexusArtifact/groupId",
doc);
final String artifactId = xpath.evaluate(
"/org.sonatype.nexus.rest.model.NexusArtifact/artifactId",
doc);
final String version = xpath
.evaluate(
"/org.sonatype.nexus.rest.model.NexusArtifact/version",
doc);
final String link = xpath
.evaluate(
"/org.sonatype.nexus.rest.model.NexusArtifact/artifactLink",
doc);
final String pomLink = xpath
.evaluate(
"/org.sonatype.nexus.rest.model.NexusArtifact/pomLink",
doc);
final MavenArtifact ma = new MavenArtifact(groupId, artifactId, version);
if (link != null && !link.isEmpty()) {
ma.setArtifactUrl(link);
}
if (pomLink != null && !pomLink.isEmpty()) {
ma.setPomUrl(pomLink);
}
return ma;
} catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) {
// Anything else is jacked-up XML stuff that we really can't recover
// from well
throw new IOException(e.getMessage(), e);
}
case 404:
throw new FileNotFoundException("Artifact not found in Nexus");
default:
LOGGER.debug("Could not connect to Nexus received response code: {} {}",
conn.getResponseCode(), conn.getResponseMessage());
throw new IOException("Could not connect to Nexus");
}
}
|
@Test(expected = IllegalArgumentException.class)
@Ignore
public void testNullSha1() throws Exception {
searcher.searchSha1(null);
}
|
public static Script parse(byte[] program) throws ScriptException {
return parse(program, TimeUtils.currentTime());
}
|
@Test
public void testIp() {
byte[] bytes = ByteUtils.parseHex("41043e96222332ea7848323c08116dddafbfa917b8e37f0bdf63841628267148588a09a43540942d58d49717ad3fabfe14978cf4f0a8b84d2435dad16e9aa4d7f935ac");
Script s = Script.parse(bytes);
assertTrue(ScriptPattern.isP2PK(s));
}
|
public static FileEntriesLayer extraDirectoryLayerConfiguration(
Path sourceDirectory,
AbsoluteUnixPath targetDirectory,
List<String> includes,
List<String> excludes,
Map<String, FilePermissions> extraDirectoryPermissions,
ModificationTimeProvider modificationTimeProvider)
throws IOException {
FileEntriesLayer.Builder builder =
FileEntriesLayer.builder().setName(LayerType.EXTRA_FILES.getName());
Map<PathMatcher, FilePermissions> permissionsPathMatchers = new LinkedHashMap<>();
for (Map.Entry<String, FilePermissions> entry : extraDirectoryPermissions.entrySet()) {
permissionsPathMatchers.put(
FileSystems.getDefault().getPathMatcher(GLOB_PREFIX + entry.getKey()), entry.getValue());
}
DirectoryWalker walker = new DirectoryWalker(sourceDirectory).filterRoot();
// add exclusion filters
excludes.stream()
.map(pattern -> FileSystems.getDefault().getPathMatcher(GLOB_PREFIX + pattern))
.forEach(
pathMatcher ->
walker.filter(path -> !pathMatcher.matches(sourceDirectory.relativize(path))));
// add an inclusion filter
includes.stream()
.map(pattern -> FileSystems.getDefault().getPathMatcher(GLOB_PREFIX + pattern))
.map(
pathMatcher ->
(Predicate<Path>) path -> pathMatcher.matches(sourceDirectory.relativize(path)))
.reduce((matches1, matches2) -> matches1.or(matches2))
.ifPresent(walker::filter);
// walk the source tree and add layer entries
walker.walk(
localPath -> {
AbsoluteUnixPath pathOnContainer =
targetDirectory.resolve(sourceDirectory.relativize(localPath));
Instant modificationTime = modificationTimeProvider.get(localPath, pathOnContainer);
Optional<FilePermissions> permissions =
determinePermissions(
pathOnContainer, extraDirectoryPermissions, permissionsPathMatchers);
if (permissions.isPresent()) {
builder.addEntry(localPath, pathOnContainer, permissions.get(), modificationTime);
} else {
builder.addEntry(localPath, pathOnContainer, modificationTime);
}
});
return builder.build();
}
|
@Test
public void testExtraDirectoryLayerConfiguration_includesAndExcludesEverything()
throws URISyntaxException, IOException {
Path extraFilesDirectory = Paths.get(Resources.getResource("core/layer").toURI());
FileEntriesLayer layerConfiguration =
JavaContainerBuilderHelper.extraDirectoryLayerConfiguration(
extraFilesDirectory,
AbsoluteUnixPath.get("/"),
Arrays.asList("**/*"),
Arrays.asList("**/*"),
Collections.emptyMap(),
(ignored1, ignored2) -> Instant.EPOCH);
assertThat(layerConfiguration.getEntries()).isEmpty();
}
|
@Override
public boolean isActive() {
return task.isActive();
}
|
@Test
public void shouldDelegateIsActive() {
final ReadOnlyTask readOnlyTask = new ReadOnlyTask(task);
readOnlyTask.isActive();
verify(task).isActive();
}
|
@Override
public List<String> splitAndEvaluate() {
try (ReflectContext context = new ReflectContext(JAVA_CLASSPATH)) {
if (Strings.isNullOrEmpty(inlineExpression)) {
return Collections.emptyList();
}
return flatten(evaluate(context, GroovyUtils.split(handlePlaceHolder(inlineExpression))));
}
}
|
@Test
void assertEvaluateForExpressionIsNull() {
InlineExpressionParser parser = TypedSPILoader.getService(InlineExpressionParser.class, "ESPRESSO", new Properties());
List<String> expected = parser.splitAndEvaluate();
assertThat(expected, is(Collections.<String>emptyList()));
}
|
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testNoopWithAllVersions(VertxTestContext context) {
String kafkaVersion = VERSIONS.defaultVersion().version();
String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion();
String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion();
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion),
mockNewCluster(
null,
mockSps(kafkaVersion),
mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.defaultVersion()));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.interBrokerProtocolVersion(), nullValue());
assertThat(c.logMessageFormatVersion(), nullValue());
assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion()));
async.flag();
})));
}
|
@Override
public Timestamp getTimestamp(final int columnIndex) throws SQLException {
return (Timestamp) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, Timestamp.class), Timestamp.class);
}
|
@Test
void assertGetTimestampAndCalendarWithColumnIndex() throws SQLException {
Calendar calendar = Calendar.getInstance();
when(mergeResultSet.getCalendarValue(1, Timestamp.class, calendar)).thenReturn(new Timestamp(0L));
assertThat(shardingSphereResultSet.getTimestamp(1, calendar), is(new Timestamp(0L)));
}
|
SlackMessage createSlackMessage(EventNotificationContext ctx, SlackEventNotificationConfig config) throws PermanentEventNotificationException {
String customMessage = null;
String template = config.customMessage();
if (!isNullOrEmpty(template)) {
// If the title is not included but the channel/here still needs to be notified, add a @channel tag to the custom message
if (!config.includeTitle() && (config.notifyChannel() || config.notifyHere())) {
String tag = config.notifyChannel() ? "channel" : "here";
template = StringUtils.f("@%s\n%s", tag, template);
}
customMessage = buildCustomMessage(ctx, config, template);
}
SlackMessage.Attachment attachment = SlackMessage.Attachment.builder()
.color(config.color())
.text(customMessage)
.build();
//Note: Link names if notify channel or else the channel tag will be plain text.
boolean linkNames = config.linkNames() || config.notifyChannel() || config.notifyHere();
String templatedChannel = buildTemplatedChannel(ctx, config, config.channel());
String emoji = config.iconEmoji() != null ? ensureEmojiSyntax(config.iconEmoji()) : "";
return SlackMessage.builder()
.iconEmoji(emoji)
.iconUrl(config.iconUrl())
.username(config.userName())
.text(config.includeTitle() ? buildDefaultMessage(ctx, config) : null)
.channel(templatedChannel)
.linkNames(linkNames)
.attachments(isNullOrEmpty(template) ? Collections.emptySet() : Collections.singleton(attachment))
.build();
}
|
@Test
public void createSlackMessage() throws EventNotificationException {
String expectedText = "@channel *Alert _Event Definition Test Title_* triggered:\n> Event Definition Test Description \n";
SlackMessage actual = slackEventNotification.createSlackMessage(eventNotificationContext, slackEventNotificationConfig);
assertThat(actual.linkNames()).isTrue();
assertThat(actual.channel()).isEqualTo(expectedChannel);
assertThat(actual.text()).isEqualTo(expectedText);
assertThat(actual.iconUrl()).isEqualTo(expectedIconUrl);
assertThat(actual.iconEmoji()).isEqualTo(expectedEmoji);
assertThat(actual.username()).isEqualTo(expectedUsername);
assertThat(actual.attachments().size()).isEqualTo(1);
SlackMessage.Attachment attachment = actual.attachments().iterator().next();
assertThat(attachment.color()).isEqualTo(expectedColor);
assertThat(attachment.text()).isEqualTo(expectedAttachmentText);
}
|
static void maybeReportHybridDiscoveryIssue(PluginDiscoveryMode discoveryMode, PluginScanResult serviceLoadingScanResult, PluginScanResult mergedResult) {
SortedSet<PluginDesc<?>> missingPlugins = new TreeSet<>();
mergedResult.forEach(missingPlugins::add);
serviceLoadingScanResult.forEach(missingPlugins::remove);
if (missingPlugins.isEmpty()) {
if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN || discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) {
log.warn("All plugins have ServiceLoader manifests, consider reconfiguring {}={}",
WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD);
}
} else {
String message = String.format(
"One or more plugins are missing ServiceLoader manifests may not be usable with %s=%s: %s%n" +
"Read the documentation at %s for instructions on migrating your plugins " +
"to take advantage of the performance improvements of %s mode.",
WorkerConfig.PLUGIN_DISCOVERY_CONFIG,
PluginDiscoveryMode.SERVICE_LOAD,
missingPlugins.stream()
.map(pluginDesc -> pluginDesc.location() + "\t" + pluginDesc.className() + "\t" + pluginDesc.type() + "\t" + pluginDesc.version())
.collect(Collectors.joining("\n", "[\n", "\n]")),
"https://kafka.apache.org/documentation.html#connect_plugindiscovery",
PluginDiscoveryMode.SERVICE_LOAD
);
if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN) {
log.warn("{} To silence this warning, set {}={} in the worker config.",
message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.ONLY_SCAN);
} else if (discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) {
throw new ConnectException(String.format("%s To silence this error, set %s=%s in the worker config.",
message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.HYBRID_WARN));
}
}
}
|
@Test
public void testOnlyScanWithPlugins() {
try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Plugins.class)) {
Plugins.maybeReportHybridDiscoveryIssue(PluginDiscoveryMode.ONLY_SCAN, empty, nonEmpty);
assertTrue(logCaptureAppender.getEvents().stream().noneMatch(e -> e.getLevel().contains("ERROR") || e.getLevel().equals("WARN")));
}
}
|
public static <InputT, OutputT> DoFnInvoker<InputT, OutputT> invokerFor(
DoFn<InputT, OutputT> fn) {
return ByteBuddyDoFnInvokerFactory.only().newByteBuddyInvoker(fn);
}
|
@Test
public void testDoFnInvokersReused() throws Exception {
// Ensures that we don't create a new Invoker class for every instance of the DoFn.
IdentityParent fn1 = new IdentityParent();
IdentityParent fn2 = new IdentityParent();
assertSame(
"Invoker classes should only be generated once for each type",
DoFnInvokers.invokerFor(fn1).getClass(),
DoFnInvokers.invokerFor(fn2).getClass());
}
|
public String encode(long... numbers) {
if (numbers.length == 0) {
return "";
}
for (final long number : numbers) {
if (number < 0) {
return "";
}
if (number > MAX_NUMBER) {
throw new IllegalArgumentException("number can not be greater than " + MAX_NUMBER + "L");
}
}
return this._encode(numbers);
}
|
@Test
public void test_for_vlues_greater_int_maxval() {
final Hashids a = new Hashids("this is my salt");
Assert.assertEquals(a.encode(9876543210123L), "Y8r7W1kNN");
}
|
public void startTaskExecutors() {
for (final TaskExecutor t: taskExecutors) {
t.start();
}
}
|
@Test
public void shouldStartTaskExecutors() {
taskManager.startTaskExecutors();
verify(taskExecutor).start();
}
|
@Override
public SQLRecognizer getSelectForUpdateRecognizer(String sql, SQLStatement ast) {
List<SQLHint> hints = ((SQLSelectStatement) ast).getSelect().getQueryBlock().getFrom().getHints();
if (CollectionUtils.isNotEmpty(hints)) {
List<String> hintsTexts = hints
.stream()
.map(hint -> {
if (hint instanceof SQLExprHint) {
SQLExpr expr = ((SQLExprHint) hint).getExpr();
return expr instanceof SQLIdentifierExpr ? ((SQLIdentifierExpr) expr).getName() : "";
} else if (hint instanceof SQLCommentHint) {
return ((SQLCommentHint) hint).getText();
}
return "";
}).collect(Collectors.toList());
if (hintsTexts.contains("UPDLOCK")) {
return new SqlServerSelectForUpdateRecognizer(sql, ast);
}
}
return null;
}
|
@Test
public void getSelectForUpdateTest() {
//test with lock
String sql = "SELECT name FROM t1 WITH (ROWLOCK, UPDLOCK) WHERE id = 'id1'";
SQLStatement sqlStatement = getSQLStatement(sql);
Assertions.assertNotNull(new SqlServerOperateRecognizerHolder().getSelectForUpdateRecognizer(sql, sqlStatement));
//test with no lock
sql = "SELECT name FROM t1 WHERE id = 'id1'";
sqlStatement = getSQLStatement(sql);
Assertions.assertNull(new SqlServerOperateRecognizerHolder().getSelectForUpdateRecognizer(sql, sqlStatement));
}
|
@Override
public String name() {
return name;
}
|
@Test
public void testSetSnapshotSummary() throws Exception {
Configuration conf = new Configuration();
conf.set("iceberg.hive.table-property-max-size", "4000");
HiveTableOperations ops =
new HiveTableOperations(conf, null, null, catalog.name(), DB_NAME, "tbl");
Snapshot snapshot = mock(Snapshot.class);
Map<String, String> summary = Maps.newHashMap();
when(snapshot.summary()).thenReturn(summary);
// create a snapshot summary whose json string size is less than the limit
for (int i = 0; i < 100; i++) {
summary.put(String.valueOf(i), "value");
}
assertThat(JsonUtil.mapper().writeValueAsString(summary).length()).isLessThan(4000);
Map<String, String> parameters = Maps.newHashMap();
ops.setSnapshotSummary(parameters, snapshot);
assertThat(parameters).as("The snapshot summary must be in parameters").hasSize(1);
// create a snapshot summary whose json string size exceeds the limit
for (int i = 0; i < 1000; i++) {
summary.put(String.valueOf(i), "value");
}
long summarySize = JsonUtil.mapper().writeValueAsString(summary).length();
// the limit has been updated to 4000 instead of the default value(32672)
assertThat(summarySize).isGreaterThan(4000).isLessThan(32672);
parameters.remove(CURRENT_SNAPSHOT_SUMMARY);
ops.setSnapshotSummary(parameters, snapshot);
assertThat(parameters)
.as("The snapshot summary must not be in parameters due to the size limit")
.isEmpty();
}
|
@Override
public byte[] compress(byte[] payloadByteArr) {
return payloadByteArr;
}
|
@Test
void compress() {
byte[] input = new byte[] {1, 2, 3, 4, 5};
final byte[] compressed = Identity.IDENTITY.compress(input);
Assertions.assertEquals(input, compressed);
}
|
public CacheSimpleEntryListenerConfig setCacheEntryListenerFactory(String cacheEntryListenerFactory) {
this.cacheEntryListenerFactory = cacheEntryListenerFactory;
return this;
}
|
@Test
public void testEqualsAndHashCode() {
assumeDifferentHashCodes();
CacheSimpleEntryListenerConfig redEntryListenerConfig = new CacheSimpleEntryListenerConfig();
redEntryListenerConfig.setCacheEntryListenerFactory("red");
CacheSimpleEntryListenerConfig blackEntryListenerConfig = new CacheSimpleEntryListenerConfig();
blackEntryListenerConfig.setCacheEntryListenerFactory("black");
EqualsVerifier.forClass(CacheSimpleEntryListenerConfig.class)
.suppress(Warning.NONFINAL_FIELDS)
.withPrefabValues(CacheSimpleEntryListenerConfigReadOnly.class,
new CacheSimpleEntryListenerConfigReadOnly(redEntryListenerConfig),
new CacheSimpleEntryListenerConfigReadOnly(blackEntryListenerConfig))
.verify();
}
|
@Override
public void accept(ServerWebExchange exchange, CachedResponse cachedResponse) {
ServerHttpResponse response = exchange.getResponse();
response.getHeaders().clear();
response.getHeaders().addAll(cachedResponse.headers());
}
|
@Test
void headersFromCacheOverrideHeadersFromResponse() {
SetResponseHeadersAfterCacheExchangeMutator toTest = new SetResponseHeadersAfterCacheExchangeMutator();
inputExchange.getResponse().getHeaders().set("X-Header-1", "Value-original");
CachedResponse cachedResponse = new CachedResponse.Builder(HttpStatus.OK).header("X-Header-1", "Value-cached")
.build();
toTest.accept(inputExchange, cachedResponse);
Assertions.assertThat(inputExchange.getResponse().getHeaders())
.containsEntry("X-Header-1", List.of("Value-cached"));
}
|
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
}
|
@Test
public void testMergeForeachRestartWithMutableOnStart() throws IOException {
DefaultParamManager defaultParamManager =
new DefaultParamManager(JsonHelper.objectMapperWithYaml());
defaultParamManager.init();
Map<String, ParamDefinition> allParams =
defaultParamManager.getDefaultParamsForType(StepType.FOREACH).get();
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap(
"{'loop_params': {'value': {'i' : { 'value': [1, 2, 3], 'type': 'long_array', "
+ "'validator': 'param!=null && param.size() > 2', 'mode': 'mutable'}, "
+ "'j' : {'expression': 'param1', 'type': 'STRING_ARRAY', "
+ "'validator': 'param!=null && param.size() > 2'}}, 'type': 'MAP'}}");
AssertHelper.assertThrows(
"throws exception when a foreach source restarts and tries to mutate params with MUTABLE_ON_START mode",
MaestroValidationException.class,
"Cannot modify param with mode [MUTABLE_ON_START] for parameter [loop_params]",
() -> ParamsMergeHelper.mergeParams(allParams, paramsToMerge, foreachRestartMergeContext));
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatInsertValuesStatement() {
final String statementString = "INSERT INTO ADDRESS (NUMBER, STREET, CITY) VALUES (2, 'high', 'palo alto');";
final Statement statement = parseSingle(statementString);
final String result = SqlFormatter.formatSql(statement);
assertThat(result, is("INSERT INTO ADDRESS (NUMBER, STREET, CITY) VALUES (2, 'high', 'palo alto')"));
}
|
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().compute(key, remappingFunction);
}
|
@Test
public void testDelegationOfUnsupportedFunctionCompute() {
final BiFunction<Object, Object, Object> mockBiFunction = mock(BiFunction.class);
new PCollectionsHashMapWrapperDelegationChecker<>()
.defineMockConfigurationForUnsupportedFunction(mock -> mock.compute(eq(this), eq(mockBiFunction)))
.defineWrapperUnsupportedFunctionInvocation(wrapper -> wrapper.compute(this, mockBiFunction))
.doUnsupportedFunctionDelegationCheck();
}
|
public MergePolicyConfig getMergePolicyConfig() {
return mergePolicyConfig;
}
|
@Test
public void cacheConfigXmlTest_DefaultMergePolicy() throws IOException {
Config config = new XmlConfigBuilder(configUrl1).build();
CacheSimpleConfig cacheWithDefaultMergePolicyConfig = config.getCacheConfig("cacheWithDefaultMergePolicy");
assertNotNull(cacheWithDefaultMergePolicyConfig);
assertEquals(MergePolicyConfig.DEFAULT_MERGE_POLICY,
cacheWithDefaultMergePolicyConfig.getMergePolicyConfig().getPolicy());
}
|
public static String normalize(final String path) {
return normalize(path, true);
}
|
@Test
public void testDoubleDot() {
assertEquals("/", PathNormalizer.normalize("/.."));
assertEquals("/p", PathNormalizer.normalize("/p/n/.."));
assertEquals("/n", PathNormalizer.normalize("/p/../n"));
assertEquals("/", PathNormalizer.normalize(".."));
assertEquals("/", PathNormalizer.normalize("."));
}
|
@Override
public Set<Map.Entry<String, Object>> entrySet() {
return variables.entrySet();
}
|
@Test
public void testEntrySet() {
assertThat(unmodifiables.entrySet(), CoreMatchers.is(vars.entrySet()));
}
|
@Override
protected SemanticProperties getSemanticPropertiesForLocalPropertyFiltering() {
// Local properties for GroupReduce may only be preserved on key fields.
SingleInputSemanticProperties origProps = getOperator().getSemanticProperties();
SingleInputSemanticProperties filteredProps = new SingleInputSemanticProperties();
FieldSet readSet = origProps.getReadFields(0);
if (readSet != null) {
filteredProps.addReadFields(readSet);
}
// only add forward field information for key fields
if (this.keys != null) {
for (int f : this.keys) {
FieldSet targets = origProps.getForwardingTargetFields(0, f);
for (int t : targets) {
filteredProps.addForwardedField(f, t);
}
}
}
return filteredProps;
}
|
@Test
public void testGetSemanticProperties() {
SingleInputSemanticProperties origProps = new SingleInputSemanticProperties();
origProps.addForwardedField(0, 1);
origProps.addForwardedField(2, 2);
origProps.addForwardedField(3, 4);
origProps.addForwardedField(6, 0);
origProps.addReadFields(new FieldSet(0, 2, 4, 7));
GroupReduceOperatorBase<?, ?, ?> op = mock(GroupReduceOperatorBase.class);
when(op.getSemanticProperties()).thenReturn(origProps);
when(op.getKeyColumns(0)).thenReturn(new int[] {3, 2});
when(op.getParameters()).thenReturn(new Configuration());
GroupReduceNode node = new GroupReduceNode(op);
SemanticProperties filteredProps = node.getSemanticPropertiesForLocalPropertyFiltering();
assertTrue(filteredProps.getForwardingTargetFields(0, 0).size() == 0);
assertTrue(filteredProps.getForwardingTargetFields(0, 2).size() == 1);
assertTrue(filteredProps.getForwardingTargetFields(0, 2).contains(2));
assertTrue(filteredProps.getForwardingTargetFields(0, 3).size() == 1);
assertTrue(filteredProps.getForwardingTargetFields(0, 3).contains(4));
assertTrue(filteredProps.getForwardingTargetFields(0, 6).size() == 0);
assertTrue(filteredProps.getForwardingSourceField(0, 1) < 0);
assertTrue(filteredProps.getForwardingSourceField(0, 2) == 2);
assertTrue(filteredProps.getForwardingSourceField(0, 4) == 3);
assertTrue(filteredProps.getForwardingSourceField(0, 0) < 0);
assertTrue(filteredProps.getReadFields(0).size() == 4);
assertTrue(filteredProps.getReadFields(0).contains(0));
assertTrue(filteredProps.getReadFields(0).contains(2));
assertTrue(filteredProps.getReadFields(0).contains(4));
assertTrue(filteredProps.getReadFields(0).contains(7));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.