focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static void initRequestHeader(HttpRequestBase requestBase, Header header) {
Iterator<Map.Entry<String, String>> iterator = header.iterator();
while (iterator.hasNext()) {
Map.Entry<String, String> entry = iterator.next();
requestBase.setHeader(entry.getKey(), entry.getValue());
}
}
|
@Test
void testInitRequestHeader() {
BaseHttpMethod.HttpGetWithEntity httpRequest = new BaseHttpMethod.HttpGetWithEntity("");
Header header = Header.newInstance();
header.addParam("k", "v");
HttpUtils.initRequestHeader(httpRequest, header);
org.apache.http.Header[] headers = httpRequest.getHeaders("k");
assertEquals(1, headers.length);
assertEquals("k", headers[0].getName());
assertEquals("v", headers[0].getValue());
}
|
public EndpointResponse isValidProperty(final String property) {
try {
final Map<String, Object> properties = new HashMap<>();
properties.put(property, "");
denyListPropertyValidator.validateAll(properties);
final KsqlConfigResolver resolver = new KsqlConfigResolver();
final Optional<ConfigItem> resolvedItem = resolver.resolve(property, false);
if (ksqlEngine.getKsqlConfig().getBoolean(KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED)
&& resolvedItem.isPresent()) {
if (!PropertiesList.QueryLevelProperties.contains(resolvedItem.get().getPropertyName())) {
throw new KsqlException(String.format("When shared runtimes are enabled, the"
+ " config %s can only be set for the entire cluster and all queries currently"
+ " running in it, and not configurable for individual queries."
+ " Please use ALTER SYSTEM to change this config for all queries.",
properties));
}
}
return EndpointResponse.ok(true);
} catch (final KsqlException e) {
LOG.info("Processed unsuccessfully, reason: ", e);
return errorHandler.generateResponse(e, Errors.badRequest(e));
} catch (final Exception e) {
LOG.info("Processed unsuccessfully, reason: ", e);
throw e;
}
}
|
@Test
public void shouldNotBadRequestWhenIsValidatorIsCalledWithNonQueryLevelProps() {
final Map<String, Object> properties = new HashMap<>();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "");
givenKsqlConfigWith(ImmutableMap.of(
KsqlConfig.KSQL_SHARED_RUNTIME_ENABLED, true
));
// When:
final EndpointResponse response = ksqlResource.isValidProperty("ksql.streams.auto.offset.reset");
// Then:
assertThat(response.getStatus(), equalTo(200));
}
|
public List<Pdu> getPdus() {
return mPdus;
}
|
@Test
public void testCanParsePdusFromAltBeacon() {
org.robolectric.shadows.ShadowLog.stream = System.err;
byte[] bytes = hexStringToByteArray("02011a1aff1801beac2f234454cf6d4a0fadf2f4911ba9ffa600010002c50900000000000000000000000000000000000000000000000000000000000000");
BleAdvertisement bleAdvert = new BleAdvertisement(bytes);
assertEquals("An AltBeacon advert should have two PDUs", 3, bleAdvert.getPdus().size());
}
|
@Transactional(readOnly = true)
public User readUserIfValid(String username, String password) {
Optional<User> user = userService.readUserByUsername(username);
if (!isExistUser(user)) {
log.warn("해당 유저가 존재하지 않습니다. username: {}", username);
throw new UserErrorException(UserErrorCode.INVALID_USERNAME_OR_PASSWORD);
}
if (!isValidPassword(password, user.get())) {
log.warn("비밀번호가 일치하지 않습니다. username: {}", username);
throw new UserErrorException(UserErrorCode.INVALID_USERNAME_OR_PASSWORD);
}
return user.get();
}
|
@DisplayName("로그인 시, 비밀번호가 일치하지 않으면 UserErrorException을 발생시킨다.")
@Test
void readUserIfNotMatchedPassword() {
// given
User user = UserFixture.GENERAL_USER.toUser();
given(userService.readUserByUsername(any())).willReturn(Optional.of(user));
given(passwordEncoder.matches("password", user.getPassword())).willReturn(false);
// when - then
UserErrorException exception = assertThrows(UserErrorException.class, () -> userGeneralSignService.readUserIfValid("pennyway", "password"));
System.out.println(exception.getExplainError());
}
|
public static String resolveRaw(String str) {
int len = str.length();
if (len <= 4) {
return null;
}
int endPos = len - 1;
char last = str.charAt(endPos);
// optimize to not create new objects
if (last == ')') {
char char1 = str.charAt(0);
char char2 = str.charAt(1);
char char3 = str.charAt(2);
char char4 = str.charAt(3);
if (char1 == 'R' && char2 == 'A' && char3 == 'W' && char4 == '(') {
return str.substring(4, endPos);
}
} else if (last == '}') {
char char1 = str.charAt(0);
char char2 = str.charAt(1);
char char3 = str.charAt(2);
char char4 = str.charAt(3);
if (char1 == 'R' && char2 == 'A' && char3 == 'W' && char4 == '{') {
return str.substring(4, endPos);
}
}
// not RAW value
return null;
}
|
@Test
void testURIScannerRawType1() {
final String resolvedRaw1 = URIScanner.resolveRaw("RAW(++?w0rd)");
Assertions.assertEquals("++?w0rd", resolvedRaw1);
}
|
public boolean start(Supplier<ManagedProcess> commandLauncher) {
if (!lifecycle.tryToMoveTo(ManagedProcessLifecycle.State.STARTING)) {
// has already been started
return false;
}
try {
this.process = commandLauncher.get();
} catch (RuntimeException e) {
LOG.error("Failed to launch process [{}]", processId.getHumanReadableName(), e);
lifecycle.tryToMoveTo(ManagedProcessLifecycle.State.STOPPING);
finalizeStop();
throw e;
}
this.stdOutGobbler = new StreamGobbler(process.getInputStream(), appSettings, processId.getKey());
this.stdOutGobbler.start();
this.stdErrGobbler = new StreamGobbler(process.getErrorStream(), appSettings, processId.getKey());
this.stdErrGobbler.start();
this.stopWatcher.start();
this.eventWatcher.start();
// Could be improved by checking the status "up" in shared memory.
// Not a problem so far as this state is not used by listeners.
lifecycle.tryToMoveTo(ManagedProcessLifecycle.State.STARTED);
return true;
}
|
@Test
public void process_requests_are_listened_on_regular_basis() {
ManagedProcessEventListener listener = mock(ManagedProcessEventListener.class);
ManagedProcessHandler underTest = newHanderBuilder(A_PROCESS_ID)
.addEventListener(listener)
.setWatcherDelayMs(1L)
.build();
try (TestManagedProcess testProcess = new TestManagedProcess()) {
underTest.start(() -> testProcess);
testProcess.operational = true;
verify(listener, timeout(1_000L)).onManagedProcessEvent(A_PROCESS_ID, ManagedProcessEventListener.Type.OPERATIONAL);
}
}
|
public void unlink(Name name) {
DirectoryEntry entry = remove(checkNotReserved(name, "unlink"));
entry.file().unlinked();
}
|
@Test
public void testUnlink_nonExistentNameFails() {
try {
dir.unlink(Name.simple("bar"));
fail();
} catch (IllegalArgumentException expected) {
}
}
|
@Override
public void start() {
fetchInitialPipelineGlobalConfig();
schedulePeriodicGlobalConfigRequests();
}
|
@Test
public void testStart_startsPeriodicConfigRequests() throws IOException, InterruptedException {
WorkItem firstConfig =
new WorkItem()
.setJobId("job")
.setStreamingConfigTask(new StreamingConfigTask().setMaxWorkItemCommitBytes(10L));
WorkItem secondConfig =
new WorkItem()
.setJobId("job")
.setStreamingConfigTask(new StreamingConfigTask().setMaxWorkItemCommitBytes(15L));
WorkItem thirdConfig =
new WorkItem()
.setJobId("job")
.setStreamingConfigTask(new StreamingConfigTask().setMaxWorkItemCommitBytes(100L));
CountDownLatch numExpectedRefreshes = new CountDownLatch(3);
Set<StreamingEnginePipelineConfig> receivedPipelineConfig = new HashSet<>();
when(mockDataflowServiceClient.getGlobalStreamingConfigWorkItem())
.thenReturn(Optional.of(firstConfig))
.thenReturn(Optional.of(secondConfig))
// ConfigFetcher should still fetch subsequent configs on error.
.thenThrow(new IOException("something bad happened."))
.thenReturn(Optional.of(thirdConfig))
// ConfigFetcher should not do anything with a config that doesn't contain a
// StreamingConfigTask.
.thenReturn(Optional.of(new WorkItem().setJobId("jobId")));
streamingEngineConfigFetcher =
createConfigFetcher(
/* waitForInitialConfig= */ true,
Duration.millis(100).getMillis(),
config -> {
receivedPipelineConfig.add(config);
numExpectedRefreshes.countDown();
});
Thread asyncStartConfigLoader = new Thread(streamingEngineConfigFetcher::start);
asyncStartConfigLoader.start();
numExpectedRefreshes.await();
asyncStartConfigLoader.join();
assertThat(receivedPipelineConfig)
.containsExactly(
StreamingEnginePipelineConfig.builder()
.setMaxWorkItemCommitBytes(
firstConfig.getStreamingConfigTask().getMaxWorkItemCommitBytes())
.build(),
StreamingEnginePipelineConfig.builder()
.setMaxWorkItemCommitBytes(
secondConfig.getStreamingConfigTask().getMaxWorkItemCommitBytes())
.build(),
StreamingEnginePipelineConfig.builder()
.setMaxWorkItemCommitBytes(
thirdConfig.getStreamingConfigTask().getMaxWorkItemCommitBytes())
.build());
}
|
public static void checkPositiveInteger(long value, String argName) {
checkArgument(value > 0, "'%s' must be a positive integer.", argName);
}
|
@Test
public void testCheckPositiveInteger() throws Exception {
int positiveArg = 1;
int zero = 0;
int negativeArg = -1;
// Should not throw.
checkPositiveInteger(positiveArg, "positiveArg");
// Verify it throws.
intercept(IllegalArgumentException.class,
"'negativeArg' must be a positive integer",
() -> checkPositiveInteger(negativeArg, "negativeArg"));
intercept(IllegalArgumentException.class,
"'zero' must be a positive integer",
() -> checkPositiveInteger(zero, "zero"));
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore
.store(QueryableStoreTypes.timestampedWindowStore(), partition);
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= cacheBypassFetcher.fetch(store, key, lower, upper)) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIterator(builder.build().iterator());
}
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldFetchWithNoBounds_fetchAll() {
// When:
table.get(PARTITION, Range.all(), Range.all());
// Then:
verify(cacheBypassFetcherAll).fetchAll(
eq(tableStore),
eq(Instant.ofEpochMilli(0)),
eq(Instant.ofEpochMilli(Long.MAX_VALUE))
);
}
|
public static SqlAggregation from(QueryDataType operandType, boolean distinct) {
SqlAggregation aggregation = from(operandType);
return distinct ? new DistinctSqlAggregation(aggregation) : aggregation;
}
|
@Test
@Parameters(method = "values_distinct")
public void test_accumulateDistinct(QueryDataType operandType, List<Object> values, Object expected) {
SqlAggregation aggregation = AvgSqlAggregations.from(operandType, true);
aggregation.accumulate(null);
values.forEach(aggregation::accumulate);
assertThat(aggregation.collect()).isEqualTo(expected);
}
|
public LocalPredictionId get(String fileName, String name) {
return new LocalPredictionId(fileName, name);
}
|
@Test
void get() {
LocalPredictionId retrieved = new PredictionIds().get(fileName, name);
LocalPredictionId expected = new LocalPredictionId(fileName, name);
assertThat(retrieved).isEqualTo(expected);
}
|
public CruiseConfig deserializeConfig(String content) throws Exception {
String md5 = md5Hex(content);
Element element = parseInputStream(new ByteArrayInputStream(content.getBytes()));
LOGGER.debug("[Config Save] Updating config cache with new XML");
CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse();
setMd5(configForEdit, md5);
configForEdit.setOrigins(new FileConfigOrigin());
return configForEdit;
}
|
@Test
void shouldLoadConfigurationFileWithComplexNonEmptyString() throws Exception {
String customerXML = loadWithMigration(Objects.requireNonNull(this.getClass().getResource("/data/p4_heavy_cruise_config.xml")).getFile());
assertThat(xmlLoader.deserializeConfig(customerXML)).isNotNull();
}
|
public boolean isSameAs(Component.Type otherType) {
if (otherType.isViewsType()) {
return otherType == this.viewsMaxDepth;
}
if (otherType.isReportType()) {
return otherType == this.reportMaxDepth;
}
throw new UnsupportedOperationException(UNSUPPORTED_TYPE_UOE_MSG);
}
|
@Test
public void LEAVES_is_same_as_FILE_and_PROJECT_VIEW() {
assertThat(CrawlerDepthLimit.LEAVES.isSameAs(Type.FILE)).isTrue();
assertThat(CrawlerDepthLimit.LEAVES.isSameAs(Type.PROJECT_VIEW)).isTrue();
for (Type type : from(asList(Type.values())).filter(not(in(ImmutableSet.of(Type.FILE, Type.PROJECT_VIEW))))) {
assertThat(CrawlerDepthLimit.LEAVES.isSameAs(type)).isFalse();
}
}
|
@Override
public RegisterRMResponseProto convert2Proto(RegisterRMResponse registerRMResponse) {
final short typeCode = registerRMResponse.getTypeCode();
final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType(
MessageTypeProto.forNumber(typeCode)).build();
final String msg = registerRMResponse.getMsg();
//for code
if (registerRMResponse.getResultCode() == null) {
if (registerRMResponse.isIdentified()) {
registerRMResponse.setResultCode(ResultCode.Success);
} else {
registerRMResponse.setResultCode(ResultCode.Failed);
}
}
final AbstractResultMessageProto abstractResultMessageProto = AbstractResultMessageProto.newBuilder().setMsg(
msg == null ? "" : msg).setResultCode(ResultCodeProto.valueOf(registerRMResponse.getResultCode().name()))
.setAbstractMessage(abstractMessage).build();
final String extraData = registerRMResponse.getExtraData();
AbstractIdentifyResponseProto abstractIdentifyResponseProto = AbstractIdentifyResponseProto.newBuilder()
.setAbstractResultMessage(abstractResultMessageProto).setExtraData(extraData == null ? "" : extraData)
.setVersion(registerRMResponse.getVersion()).setIdentified(registerRMResponse.isIdentified()).build();
RegisterRMResponseProto result = RegisterRMResponseProto.newBuilder().setAbstractIdentifyResponse(
abstractIdentifyResponseProto).build();
return result;
}
|
@Test
public void convert2Proto() {
RegisterRMResponse registerRMResponse = new RegisterRMResponse();
registerRMResponse.setResultCode(ResultCode.Failed);
registerRMResponse.setMsg("msg");
registerRMResponse.setIdentified(true);
registerRMResponse.setVersion("11");
registerRMResponse.setExtraData("extraData");
RegisterRMResponseConvertor convertor = new RegisterRMResponseConvertor();
RegisterRMResponseProto proto = convertor.convert2Proto(registerRMResponse);
RegisterRMResponse real = convertor.convert2Model(proto);
assertThat((real.getTypeCode())).isEqualTo(registerRMResponse.getTypeCode());
assertThat((real.getMsg())).isEqualTo(registerRMResponse.getMsg());
assertThat((real.getResultCode())).isEqualTo(registerRMResponse.getResultCode());
assertThat((real.isIdentified())).isEqualTo(registerRMResponse.isIdentified());
assertThat((real.getVersion())).isEqualTo(registerRMResponse.getVersion());
assertThat((real.getExtraData())).isEqualTo(registerRMResponse.getExtraData());
}
|
public static String toString(boolean bool, String trueString, String falseString) {
return bool ? trueString : falseString;
}
|
@Test
public void toStringTest() {
assertEquals("true", BooleanUtil.toStringTrueFalse(true));
assertEquals("false", BooleanUtil.toStringTrueFalse(false));
assertEquals("yes", BooleanUtil.toStringYesNo(true));
assertEquals("no", BooleanUtil.toStringYesNo(false));
assertEquals("on", BooleanUtil.toStringOnOff(true));
assertEquals("off", BooleanUtil.toStringOnOff(false));
}
|
public static String writeValueAsString(Object value) {
try {
return OBJECT_MAPPER.writeValueAsString(value);
} catch (JsonProcessingException e) {
throw new IllegalArgumentException("The given Json object value: "
+ value + " cannot be transformed to a String", e);
}
}
|
@Test
public void optionalMappingJDK8ModuleTest() {
// To address the issue: Java 8 optional type `java.util.Optional` not supported by default: add Module "com.fasterxml.jackson.datatype:jackson-datatype-jdk8" to enable handling
assertThat(JacksonUtil.writeValueAsString(Optional.of("hello"))).isEqualTo("\"hello\"");
assertThat(JacksonUtil.writeValueAsString(List.of(Optional.of("abc")))).isEqualTo("[\"abc\"]");
assertThat(JacksonUtil.writeValueAsString(Set.of(Optional.empty()))).isEqualTo("[null]");
}
|
@Nonnull
@Override
public Optional<? extends Algorithm> parse(
@Nullable final String str, @Nonnull DetectionLocation detectionLocation) {
if (str == null) {
return Optional.empty();
}
String algorithmStr;
Optional<Mode> modeOptional = Optional.empty();
Optional<? extends Padding> paddingOptional = Optional.empty();
if (str.contains("/")) {
int slashIndex = str.indexOf("/");
algorithmStr = str.substring(0, slashIndex);
String rest = str.substring(slashIndex + 1);
if (rest.contains("/")) {
slashIndex = rest.indexOf("/");
// mode
final String modeStr = rest.substring(0, slashIndex);
final JcaModeMapper jcaModeMapper = new JcaModeMapper();
modeOptional = jcaModeMapper.parse(modeStr, detectionLocation);
// padding
String paddingStr = rest.substring(slashIndex + 1);
final JcaPaddingMapper jcaPaddingMapper = new JcaPaddingMapper();
paddingOptional = jcaPaddingMapper.parse(paddingStr, detectionLocation);
}
} else {
algorithmStr = str;
}
// check if it is pbe
JcaPasswordBasedEncryptionMapper pbeMapper = new JcaPasswordBasedEncryptionMapper();
Optional<PasswordBasedEncryption> pbeOptional =
pbeMapper.parse(algorithmStr, detectionLocation);
if (pbeOptional.isPresent()) {
// pbe
return pbeOptional;
}
Optional<? extends Algorithm> possibleCipher = map(algorithmStr, detectionLocation);
if (possibleCipher.isEmpty()) {
return Optional.empty();
}
final Algorithm algorithm = possibleCipher.get();
modeOptional.ifPresent(algorithm::put);
paddingOptional.ifPresent(algorithm::put);
return Optional.of(algorithm);
}
|
@Test
void blockSize() {
DetectionLocation testDetectionLocation =
new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL");
JcaCipherMapper jcaCipherMapper = new JcaCipherMapper();
Optional<? extends Algorithm> cipherOptional =
jcaCipherMapper.parse("AES/CFB8/NoPadding", testDetectionLocation);
assertThat(cipherOptional).isPresent();
assertThat(cipherOptional.get().is(BlockCipher.class)).isTrue();
Cipher cipher = (Cipher) cipherOptional.get();
assertThat(cipher).isInstanceOf(AES.class);
assertThat(cipher.getMode()).isPresent();
Mode mode = cipher.getMode().get();
assertThat(mode).isInstanceOf(CFB.class);
assertThat(mode.getBlockSize()).isPresent();
assertThat(mode.getBlockSize().get().getValue()).isEqualTo(8);
assertThat(cipher.getPadding()).isEmpty();
}
|
public static void addSortedParams(UriBuilder uriBuilder, DataMap params, ProtocolVersion version)
{
if(version.compareTo(AllProtocolVersions.RESTLI_PROTOCOL_2_0_0.getProtocolVersion()) >= 0)
{
addSortedParams(uriBuilder, params);
}
else
{
QueryParamsDataMap.addSortedParams(uriBuilder, params);
}
}
|
@Test
public void testProjectionMask()
{
DataMap queryParams = new DataMap();
DataMap fields = new DataMap();
fields.put("name", 1);
DataMap friends = new DataMap();
friends.put("$start", 1);
friends.put("$count", 2);
fields.put("friends", friends);
queryParams.put("fields", fields);
DataMap paramMap = new DataMap();
paramMap.put("foo", "bar");
paramMap.put("empty", new DataMap());
queryParams.put("aParam", paramMap);
DataList paramList = new DataList();
paramList.add("x");
paramList.add("y");
paramList.add("z");
queryParams.put("bParam", paramList);
UriBuilder uriBuilder = new UriBuilder();
URIParamUtils.addSortedParams(uriBuilder, queryParams);
URI uri = uriBuilder.build();
String query = uri.getQuery();
Assert.assertEquals(query, "aParam=(empty:(),foo:bar)&bParam=List(x,y,z)&fields=name,friends:($start:1,$count:2)");
String rawQuery = uri.getRawQuery();
Assert.assertEquals(rawQuery, "aParam=(empty:(),foo:bar)&bParam=List(x,y,z)&fields=name,friends:($start:1,$count:2)");
}
|
public DoubleArrayAsIterable usingTolerance(double tolerance) {
return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject());
}
|
@Test
public void usingTolerance_containsNoneOf_primitiveDoubleArray_failure() {
expectFailureWhenTestingThat(array(1.1, TOLERABLE_2POINT2, 3.3))
.usingTolerance(DEFAULT_TOLERANCE)
.containsNoneOf(array(99.99, 2.2));
assertFailureKeys(
"value of",
"expected not to contain any of",
"testing whether",
"but contained",
"corresponding to",
"---",
"full contents");
assertFailureValue("expected not to contain any of", "[99.99, 2.2]");
assertFailureValue("but contained", "[" + TOLERABLE_2POINT2 + "]");
assertFailureValue("corresponding to", "2.2");
}
|
@Override
public DataForm parse(XmlPullParser parser, int initialDepth, XmlEnvironment xmlEnvironment) throws XmlPullParserException, IOException, SmackParsingException {
DataForm.Type dataFormType = DataForm.Type.fromString(parser.getAttributeValue("", "type"));
DataForm.Builder dataForm = DataForm.builder(dataFormType);
String formType = null;
DataForm.ReportedData reportedData = null;
outerloop: while (true) {
XmlPullParser.Event eventType = parser.next();
switch (eventType) {
case START_ELEMENT:
String name = parser.getName();
String namespace = parser.getNamespace();
XmlEnvironment elementXmlEnvironment = XmlEnvironment.from(parser, xmlEnvironment);
switch (name) {
case "instructions":
dataForm.addInstruction(parser.nextText());
break;
case "title":
dataForm.setTitle(parser.nextText());
break;
case "field":
// Note that we parse this form field without any potential reportedData. We only use reportedData
// to lookup form field types of fields under <item/>.
FormField formField = parseField(parser, elementXmlEnvironment, formType);
TextSingleFormField hiddenFormTypeField = formField.asHiddenFormTypeFieldIfPossible();
if (hiddenFormTypeField != null) {
if (formType != null) {
throw new SmackParsingException("Multiple hidden form type fields");
}
formType = hiddenFormTypeField.getValue();
}
dataForm.addField(formField);
break;
case "item":
DataForm.Item item = parseItem(parser, elementXmlEnvironment, formType, reportedData);
dataForm.addItem(item);
break;
case "reported":
if (reportedData != null) {
throw new SmackParsingException("Data form with multiple <reported/> elements");
}
reportedData = parseReported(parser, elementXmlEnvironment, formType);
dataForm.setReportedData(reportedData);
break;
// See XEP-133 Example 32 for a corner case where the data form contains this extension.
case RosterPacket.ELEMENT:
if (namespace.equals(RosterPacket.NAMESPACE)) {
dataForm.addExtensionElement(RosterPacketProvider.INSTANCE.parse(parser, null));
}
break;
// See XEP-141 Data Forms Layout
case DataLayout.ELEMENT:
if (namespace.equals(DataLayout.NAMESPACE)) {
dataForm.addExtensionElement(DataLayoutProvider.parse(parser));
}
break;
}
break;
case END_ELEMENT:
if (parser.getDepth() == initialDepth) {
break outerloop;
}
break;
default:
// Catch all for incomplete switch (MissingCasesInEnumSwitch) statement.
break;
}
}
return dataForm.build();
}
|
@Test
public void testRetrieveFieldWithEmptyLabel() throws XmlPullParserException, IOException, SmackParsingException {
String form =
"<x xmlns='jabber:x:data' type='form'>" +
" <title>Advanced User Search</title>" +
" <instructions>The following fields are available for searching. Wildcard (*) characters are allowed as part of the query.</instructions>" +
" <field var='FORM_TYPE' label='' type='hidden'>" +
" <value>jabber:iq:search</value>" +
" </field>" +
" <field label='Search' var='search'>" +
" <required/>" +
" </field>" +
" <field label='Username' var='Username' type='boolean'>" +
" <value>true</value>" +
" </field>" +
" <field label='Name' var='Name' type='boolean'>" +
" <value>true</value>" +
" </field>" +
" <field label='Email' var='Email' type='boolean'>" +
" <value>true</value>" +
" </field>" +
"</x>";
XmlPullParser parser = PacketParserUtils.getParserFor(form);
DataForm dataForm = DataFormProvider.INSTANCE.parse(parser);
FormField usernameFormField = dataForm.getField("FORM_TYPE");
assertEquals(FormField.Type.hidden, usernameFormField.getType());
assertEquals("", usernameFormField.getLabel());
}
|
public void popAsync(MessageQueue mq, long invisibleTime, int maxNums, String consumerGroup,
long timeout, PopCallback popCallback, boolean poll, int initMode, boolean order, String expressionType, String expression)
throws MQClientException, RemotingException, InterruptedException {
FindBrokerResult findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(mq.getBrokerName(), MixAll.MASTER_ID, true);
if (null == findBrokerResult) {
this.mQClientFactory.updateTopicRouteInfoFromNameServer(mq.getTopic());
findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(mq.getBrokerName(), MixAll.MASTER_ID, true);
}
if (findBrokerResult != null) {
PopMessageRequestHeader requestHeader = new PopMessageRequestHeader();
requestHeader.setConsumerGroup(consumerGroup);
requestHeader.setTopic(mq.getTopic());
requestHeader.setQueueId(mq.getQueueId());
requestHeader.setMaxMsgNums(maxNums);
requestHeader.setInvisibleTime(invisibleTime);
requestHeader.setInitMode(initMode);
requestHeader.setExpType(expressionType);
requestHeader.setExp(expression);
requestHeader.setOrder(order);
requestHeader.setBrokerName(mq.getBrokerName());
//give 1000 ms for server response
if (poll) {
requestHeader.setPollTime(timeout);
requestHeader.setBornTime(System.currentTimeMillis());
// timeout + 10s, fix the too earlier timeout of client when long polling.
timeout += 10 * 1000;
}
String brokerAddr = findBrokerResult.getBrokerAddr();
this.mQClientFactory.getMQClientAPIImpl().popMessageAsync(mq.getBrokerName(), brokerAddr, requestHeader, timeout, popCallback);
return;
}
throw new MQClientException("The broker[" + mq.getBrokerName() + "] not exist", null);
}
|
@Test
public void testPopAsync() throws RemotingException, InterruptedException, MQClientException {
PopCallback popCallback = mock(PopCallback.class);
when(mQClientFactory.getMQClientAPIImpl()).thenReturn(mqClientAPIImpl);
pullAPIWrapper.popAsync(createMessageQueue(),
System.currentTimeMillis(),
1,
defaultGroup,
defaultTimeout,
popCallback,
true,
1,
false,
"",
"");
verify(mqClientAPIImpl, times(1)).popMessageAsync(eq(defaultBroker),
eq(defaultBrokerAddr),
any(PopMessageRequestHeader.class),
eq(13000L),
any(PopCallback.class));
}
|
@Override
public NacosLoggingAdapter build() {
return new Log4J2NacosLoggingAdapter();
}
|
@Test
void build() {
Log4j2NacosLoggingAdapterBuilder builder = new Log4j2NacosLoggingAdapterBuilder();
NacosLoggingAdapter adapter = builder.build();
assertNotNull(adapter);
assertTrue(adapter instanceof Log4J2NacosLoggingAdapter);
}
|
@Override
public ResultSet getClientInfoProperties() throws SQLException {
return createDatabaseMetaDataResultSet(getDatabaseMetaData().getClientInfoProperties());
}
|
@Test
void assertGetClientInfoProperties() throws SQLException {
when(databaseMetaData.getClientInfoProperties()).thenReturn(resultSet);
assertThat(shardingSphereDatabaseMetaData.getClientInfoProperties(), instanceOf(DatabaseMetaDataResultSet.class));
}
|
public static PathOutputCommitter createCommitter(Path outputPath,
TaskAttemptContext context) throws IOException {
return getCommitterFactory(outputPath,
context.getConfiguration())
.createOutputCommitter(outputPath, context);
}
|
@Test
public void testNamedCommitterFactory() throws Throwable {
Configuration conf = new Configuration();
// set up for the schema factory
conf.set(COMMITTER_FACTORY_CLASS, NAMED_COMMITTER_FACTORY);
conf.set(NAMED_COMMITTER_CLASS, SimpleCommitter.class.getName());
SimpleCommitter sc = createCommitter(
NamedCommitterFactory.class,
SimpleCommitter.class, HDFS_PATH, conf);
assertEquals("Wrong output path from " + sc,
HDFS_PATH,
sc.getOutputPath());
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
void testMultipleWatermarks() {
assertThatThrownBy(
() ->
TableSchema.builder()
.field("f0", DataTypes.TIMESTAMP())
.field(
"f1",
DataTypes.ROW(
DataTypes.FIELD("q1", DataTypes.STRING()),
DataTypes.FIELD(
"q2", DataTypes.TIMESTAMP(3))))
.watermark(
"f1.q2", WATERMARK_EXPRESSION, WATERMARK_DATATYPE)
.watermark("f0", WATERMARK_EXPRESSION, WATERMARK_DATATYPE)
.build())
.isInstanceOf(IllegalStateException.class)
.hasMessage("Multiple watermark definition is not supported yet.");
}
|
@Override
public boolean shouldWait() {
RingbufferContainer ringbuffer = getRingBufferContainerOrNull();
if (ringbuffer == null) {
return true;
}
if (ringbuffer.isTooLargeSequence(sequence) || ringbuffer.isStaleSequence(sequence)) {
//no need to wait, let the operation continue and fail in beforeRun
return false;
}
// the sequence is not readable
return sequence == ringbuffer.tailSequence() + 1;
}
|
@Test
public void whenOneAfterTail() {
ringbuffer.add("tail");
ReadOneOperation op = getReadOneOperation(ringbuffer.tailSequence() + 1);
// since there is no item, we should wait
boolean shouldWait = op.shouldWait();
assertTrue(shouldWait);
}
|
public static FlinkJobServerDriver fromConfig(FlinkServerConfiguration configuration) {
return create(
configuration,
createJobServerFactory(configuration),
createArtifactServerFactory(configuration),
() -> FlinkJobInvoker.create(configuration));
}
|
@Test
public void testConfigurationFromConfig() {
FlinkJobServerDriver.FlinkServerConfiguration config =
new FlinkJobServerDriver.FlinkServerConfiguration();
FlinkJobServerDriver driver = FlinkJobServerDriver.fromConfig(config);
assertThat(driver.configuration, is(config));
}
|
@Override
public void populateDisplayData(DisplayData.Builder builder) {
builder.delegate(delegate());
}
|
@Test
public void populateDisplayDataDelegates() {
doThrow(RuntimeException.class)
.when(delegate)
.populateDisplayData(any(DisplayData.Builder.class));
thrown.expect(RuntimeException.class);
DisplayData.from(forwarding);
}
|
@Override
public Health health() {
Map<String, Health> healths = rateLimiterRegistry.getAllRateLimiters().stream()
.filter(this::isRegisterHealthIndicator)
.collect(Collectors.toMap(RateLimiter::getName, this::mapRateLimiterHealth));
Status status = statusAggregator.getAggregateStatus(healths.values().stream().map(Health::getStatus).collect(Collectors.toSet()));
return Health.status(status).withDetails(healths).build();
}
|
@Test
public void health() throws Exception {
// given
RateLimiterConfig config = mock(RateLimiterConfig.class);
AtomicRateLimiter.AtomicRateLimiterMetrics metrics = mock(
AtomicRateLimiter.AtomicRateLimiterMetrics.class);
AtomicRateLimiter rateLimiter = mock(AtomicRateLimiter.class);
RateLimiterRegistry rateLimiterRegistry = mock(RateLimiterRegistry.class);
io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties instanceProperties =
mock(
io.github.resilience4j.common.ratelimiter.configuration.CommonRateLimiterConfigurationProperties.InstanceProperties.class);
RateLimiterConfigurationProperties rateLimiterProperties = mock(
RateLimiterConfigurationProperties.class);
//when
when(rateLimiter.getRateLimiterConfig()).thenReturn(config);
when(rateLimiter.getName()).thenReturn("test");
when(rateLimiterProperties.findRateLimiterProperties("test"))
.thenReturn(Optional.of(instanceProperties));
when(instanceProperties.getRegisterHealthIndicator()).thenReturn(true);
when(instanceProperties.getAllowHealthIndicatorToFail()).thenReturn(true);
when(rateLimiter.getMetrics()).thenReturn(metrics);
when(rateLimiter.getDetailedMetrics()).thenReturn(metrics);
when(rateLimiterRegistry.getAllRateLimiters()).thenReturn(Set.of(rateLimiter));
when(config.getTimeoutDuration()).thenReturn(Duration.ofNanos(30L));
when(metrics.getAvailablePermissions())
.thenReturn(5, -1, -2);
when(metrics.getNumberOfWaitingThreads())
.thenReturn(0, 1, 2);
when(metrics.getNanosToWait())
.thenReturn(20L, 40L);
// then
RateLimitersHealthIndicator healthIndicator =
new RateLimitersHealthIndicator(rateLimiterRegistry, rateLimiterProperties, new SimpleStatusAggregator());
Health health = healthIndicator.health();
then(health.getStatus()).isEqualTo(Status.UP);
health = healthIndicator.health();
then(health.getStatus()).isEqualTo(Status.UNKNOWN);
health = healthIndicator.health();
then(health.getStatus()).isEqualTo(Status.DOWN);
then(health.getDetails().get("test")).isInstanceOf(Health.class);
then(((Health) health.getDetails().get("test")).getDetails())
.contains(
entry("availablePermissions", -2),
entry("numberOfWaitingThreads", 2)
);
}
|
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
String targetObjectId = reader.readLine();
String methodName = reader.readLine();
List<Object> arguments = getArguments(reader);
ReturnObject returnObject = invokeMethod(methodName, targetObjectId, arguments);
String returnCommand = Protocol.getOutputCommand(returnObject);
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
}
|
@Test
public void testStringMethodWithNull() {
String inputCommand = target + "\nmethod4\nn\ne\n";
try {
command.execute("c", new BufferedReader(new StringReader(inputCommand)), writer);
assertEquals("!yro1\n", sWriter.toString());
assertEquals(3, ((ExampleClass) gateway.getObject("o1")).getField1());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
|
@Override
public void run() {
logSubprocess();
}
|
@Test
public void shouldLogDefaultMessageWhenNoMessageGiven() {
logger = new SubprocessLogger(stubProcess());
String allLogs;
try (LogFixture log = logFixtureFor(SubprocessLogger.class, Level.ALL)) {
logger.run();
String result;
synchronized (log) {
result = log.getLog();
}
allLogs = result;
}
assertThat(allLogs, containsString("Logged all subprocesses."));
}
|
public static ConfigDefinitionKey parseConfigName(Element configE) {
if (!configE.getNodeName().equals("config")) {
throw new IllegalArgumentException("The root element must be 'config', but was '" + configE.getNodeName() + "'");
}
if (!configE.hasAttribute("name")) {
throw new IllegalArgumentException
("The 'config' element must have a 'name' attribute that matches the name of the config definition");
}
String elementString = configE.getAttribute("name");
if (!elementString.contains(".")) {
throw new IllegalArgumentException("The config name '" + elementString +
"' contains illegal characters. Only names with the pattern " +
namespacePattern.pattern() + "." + namePattern.pattern() + " are legal.");
}
Tuple2<String, String> t = ConfigUtils.getNameAndNamespaceFromString(elementString);
String xmlName = t.first;
String xmlNamespace = t.second;
if (!validName(xmlName)) {
throw new IllegalArgumentException("The config name '" + xmlName +
"' contains illegal characters. Only names with the pattern " +
namePattern.toString() + " are legal.");
}
if (!validNamespace(xmlNamespace)) {
throw new IllegalArgumentException("The config namespace '" + xmlNamespace +
"' contains illegal characters. Only namespaces with the pattern " +
namespacePattern.toString() + " are legal.");
}
return new ConfigDefinitionKey(xmlName, xmlNamespace);
}
|
@Test
void testNameParsingInvalidNamespace() {
assertThrows(IllegalArgumentException.class, () -> {
Element configRoot = getDocument(new StringReader("<config name=\"_foo.function-test\" version=\"1\">" +
"<int_val>1</int_val> +" +
"</config>"));
DomConfigPayloadBuilder.parseConfigName(configRoot);
});
}
|
public synchronized Map<String, ResourcePlugin> getNameToPlugins() {
return configuredPlugins;
}
|
@Test(timeout = 30000)
public void testNodeStatusUpdaterWithResourcePluginsEnabled()
throws Exception {
final ResourcePluginManager rpm = stubResourcePluginmanager();
nm = new ResourcePluginMockNM(rpm);
nm.init(conf);
nm.start();
NodeResourceUpdaterPlugin nodeResourceUpdaterPlugin =
rpm.getNameToPlugins().get("resource1")
.getNodeResourceHandlerInstance();
verify(nodeResourceUpdaterPlugin)
.updateConfiguredResource(any(Resource.class));
}
|
public void setProperty(String name, String value) {
if (value == null) {
return;
}
name = Introspector.decapitalize(name);
PropertyDescriptor prop = getPropertyDescriptor(name);
if (prop == null) {
addWarn("No such property [" + name + "] in " + objClass.getName() + ".");
} else {
try {
setProperty(prop, name, value);
} catch (PropertySetterException ex) {
addWarn("Failed to set property [" + name + "] to value \"" + value
+ "\". ", ex);
}
}
}
|
@Test
public void testFilterReply() {
// test case reproducing bug #52
setter.setProperty("filterReply", "ACCEPT");
assertEquals(FilterReply.ACCEPT, house.getFilterReply());
}
|
@Override
public TransportClient getClient(Map<String, ? extends Object> properties)
{
SSLContext sslContext;
SSLParameters sslParameters;
// Copy the properties map since we don't want to mutate the passed-in map by removing keys
properties = new HashMap<String,Object>(properties);
sslContext = coerceAndRemoveFromMap(HTTP_SSL_CONTEXT, properties, SSLContext.class);
sslParameters = coerceAndRemoveFromMap(HTTP_SSL_PARAMS, properties, SSLParameters.class);
return getClient(properties, sslContext, sslParameters);
}
|
@Test
public void testSSLParams() throws Exception
{
HttpClientFactory factory = new HttpClientFactory.Builder().build();
Map<String,Object> params = new HashMap<>();
SSLParameters sslParameters = new SSLParameters();
sslParameters.setProtocols(new String[]{ "Unsupported" });
params.put(HttpClientFactory.HTTP_SSL_CONTEXT, SSLContext.getDefault());
params.put(HttpClientFactory.HTTP_SSL_PARAMS, sslParameters);
try
{
factory.getClient(Collections.unmodifiableMap(params));
Assert.fail("Should have failed");
}
catch (IllegalArgumentException e)
{
Assert.assertTrue(e.getMessage().contains("None of the requested protocols: [Unsupported] are found in SSLContext"),
"Unexpected error message " + e.getMessage());
}
}
|
boolean matchesNonValueField(final Optional<SourceName> source, final ColumnName column) {
if (!source.isPresent()) {
return sourceSchemas.values().stream()
.anyMatch(schema ->
SystemColumns.isPseudoColumn(column) || schema.isKeyColumn(column));
}
final SourceName sourceName = source.get();
final LogicalSchema sourceSchema = sourceSchemas.get(sourceName);
if (sourceSchema == null) {
throw new IllegalArgumentException("Unknown source: " + sourceName);
}
return sourceSchema.isKeyColumn(column) || SystemColumns.isPseudoColumn(column);
}
|
@Test
public void shouldMatchNonValueFieldNameIfAliasedKeyField() {
assertThat(sourceSchemas.matchesNonValueField(Optional.of(ALIAS_2), K1), is(true));
}
|
@Override
public DescriptiveUrl toUploadUrl(final Path file, final Sharee sharee, final Object options, final PasswordCallback callback) throws BackgroundException {
final Host bookmark = session.getHost();
final StringBuilder request = new StringBuilder(String.format("https://%s%s/apps/files_sharing/api/v1/shares?path=%s&shareType=%d&permissions=%d",
bookmark.getHostname(), new NextcloudHomeFeature(bookmark).find(NextcloudHomeFeature.Context.ocs).getAbsolute(),
URIEncoder.encode(PathRelativizer.relativize(NextcloudHomeFeature.Context.files.home(bookmark).find().getAbsolute(), file.getAbsolute())),
Sharee.world.equals(sharee) ? SHARE_TYPE_PUBLIC_LINK : SHARE_TYPE_USER,
SHARE_PERMISSIONS_CREATE
));
final Credentials password = callback.prompt(bookmark,
LocaleFactory.localizedString("Passphrase", "Cryptomator"),
MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()),
new LoginOptions().anonymous(true).keychain(false).icon(bookmark.getProtocol().disk()));
if(password.isPasswordAuthentication()) {
request.append(String.format("&password=%s", URIEncoder.encode(password.getPassword())));
}
final HttpPost resource = new HttpPost(request.toString());
resource.setHeader("OCS-APIRequest", "true");
resource.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType());
try {
return session.getClient().execute(resource, new OcsUploadShareResponseHandler() {
@Override
public DescriptiveUrl handleEntity(final HttpEntity entity) throws IOException {
final XmlMapper mapper = new XmlMapper();
final Share value = mapper.readValue(entity.getContent(), Share.class);
// Additional request, because permissions are ignored in POST
final StringBuilder request = new StringBuilder(String.format("https://%s/ocs/v1.php/apps/files_sharing/api/v1/shares/%s?permissions=%d",
bookmark.getHostname(),
value.data.id,
SHARE_PERMISSIONS_CREATE
));
final HttpPut put = new HttpPut(request.toString());
put.setHeader("OCS-APIRequest", "true");
put.setHeader(HttpHeaders.ACCEPT, ContentType.APPLICATION_XML.getMimeType());
session.getClient().execute(put, new VoidResponseHandler());
return super.handleEntity(entity);
}
});
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
|
@Test
public void testToUploadUrl() throws Exception {
final Path home = new NextcloudHomeFeature(session.getHost()).find();
final Path folder = new DAVDirectoryFeature(session, new NextcloudAttributesFinderFeature(session)).mkdir(new Path(home, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final DescriptiveUrl url = new NextcloudShareFeature(session).toUploadUrl(folder, Share.Sharee.world, null, new DisabledPasswordCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new Credentials(null, new AlphanumericRandomStringService(10).random());
}
});
assertNotSame(DescriptiveUrl.EMPTY, url);
new DAVDeleteFeature(session).delete(Collections.singletonList(folder), new DisabledPasswordCallback(), new Delete.DisabledCallback());
}
|
public static SimpleFunction<byte[], Row> getJsonBytesToRowFunction(Schema beamSchema) {
return new JsonToRowFn<byte[]>(beamSchema) {
@Override
public Row apply(byte[] input) {
String jsonString = byteArrayToJsonString(input);
return RowJsonUtils.jsonToRow(objectMapper, jsonString);
}
};
}
|
@Test
public void testGetJsonBytesToRowFunction() {
for (TestCase<? extends RowEncodable> caze : testCases) {
Row expected = caze.row;
Row actual = JsonUtils.getJsonBytesToRowFunction(expected.getSchema()).apply(caze.jsonBytes);
assertEquals(caze.userT.toString(), expected, actual);
}
}
|
void start(Iterable<ShardCheckpoint> checkpoints) {
LOG.info(
"Pool {} - starting for stream {} consumer {}. Checkpoints = {}",
poolId,
read.getStreamName(),
consumerArn,
checkpoints);
for (ShardCheckpoint shardCheckpoint : checkpoints) {
checkState(
!state.containsKey(shardCheckpoint.getShardId()),
"Duplicate shard id %s",
shardCheckpoint.getShardId());
ShardState shardState =
new ShardState(
initShardSubscriber(shardCheckpoint), shardCheckpoint, watermarkPolicyFactory);
state.put(shardCheckpoint.getShardId(), shardState);
}
}
|
@Test
public void poolReSubscribesWhenNoRecordsCome() throws Exception {
kinesis = new EFOStubbedKinesisAsyncClient(10);
kinesis.stubSubscribeToShard("shard-000", eventsWithoutRecords(31, 3));
kinesis.stubSubscribeToShard("shard-001", eventsWithoutRecords(8, 3));
KinesisReaderCheckpoint initialCheckpoint =
initialLatestCheckpoint(ImmutableList.of("shard-000", "shard-001"));
pool = new EFOShardSubscribersPool(readSpec, consumerArn, kinesis);
pool.start(initialCheckpoint);
PoolAssertion.assertPool(pool)
.givesCheckPointedRecords(
ShardAssertion.shard("shard-000").gives().withLastCheckpointSequenceNumber(33),
ShardAssertion.shard("shard-001").gives().withLastCheckpointSequenceNumber(10));
assertThat(kinesis.subscribeRequestsSeen())
.containsExactlyInAnyOrder(
subscribeLatest("shard-000"),
subscribeLatest("shard-001"),
subscribeAfterSeqNumber("shard-000", "33"),
subscribeAfterSeqNumber("shard-001", "10"));
}
|
public String getD() {
return d;
}
|
@Test
public void testPublicJWKCreation()
throws InvalidAlgorithmParameterException,
NoSuchAlgorithmException,
NoSuchProviderException {
KeyPair keyPair = Keys.createSecp256k1KeyPair();
BCECPublicKey publicKey = (BCECPublicKey) keyPair.getPublic();
Secp256k1JWK jwk = new Secp256k1JWK.Builder(publicKey).build();
assertPublicJWK(jwk);
assertNull(jwk.getD());
}
|
@Override
public void trash(final Local file) throws LocalAccessDeniedException {
synchronized(NSWorkspace.class) {
if(log.isDebugEnabled()) {
log.debug(String.format("Move %s to Trash", file));
}
// Asynchronous operation. 0 if the operation is performed synchronously and succeeds, and a positive
// integer if the operation is performed asynchronously and succeeds
if(!workspace.performFileOperation(
NSWorkspace.RecycleOperation,
new NFDNormalizer().normalize(file.getParent().getAbsolute()).toString(), StringUtils.EMPTY,
NSArray.arrayWithObject(new NFDNormalizer().normalize(file.getName()).toString()))) {
throw new LocalAccessDeniedException(String.format("Failed to move %s to Trash", file.getName()));
}
}
}
|
@Test
public void testTrashRepeated() throws Exception {
final WorkspaceTrashFeature f = new WorkspaceTrashFeature();
Local l = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
new DefaultLocalTouchFeature().touch(l);
assertTrue(l.exists());
f.trash(l);
assertFalse(l.exists());
}
|
public void validate(ExternalIssueReport report, Path reportPath) {
if (report.rules != null && report.issues != null) {
Set<String> ruleIds = validateRules(report.rules, reportPath);
validateIssuesCctFormat(report.issues, ruleIds, reportPath);
} else if (report.rules == null && report.issues != null) {
String documentationLink = documentationLinkGenerator.getDocumentationLink(DOCUMENTATION_SUFFIX);
LOGGER.warn("External issues were imported with a deprecated format which will be removed soon. " +
"Please switch to the newest format to fully benefit from Clean Code: {}", documentationLink);
validateIssuesDeprecatedFormat(report.issues, reportPath);
} else {
throw new IllegalStateException(String.format("Failed to parse report '%s': invalid report detected.", reportPath));
}
}
|
@Test
public void validate_whenMissingStartLineFieldForPrimaryLocation_shouldThrowException() throws IOException {
ExternalIssueReport report = read(REPORTS_LOCATION);
report.issues[0].primaryLocation.textRange.startLine = null;
assertThatThrownBy(() -> validator.validate(report, reportPath))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Failed to parse report 'report-path': missing mandatory field 'startLine of the text range' in the primary location of the issue.");
}
|
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
return getAccessPrivilege(addr.getHostAddress(),
addr.getCanonicalHostName());
}
|
@Test
public void testRegexGrouping() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
"192.168.0.(12|34)");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
// address1 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname2));
matcher = new NfsExports(CacheSize, ExpirationPeriod, "\\w*.a.b.com");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege("1.2.3.4", "web.a.b.com"));
// address "1.2.3.4" will hit the cache
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege("1.2.3.4", "email.a.b.org"));
}
|
public static CronPattern of(String pattern) {
return new CronPattern(pattern);
}
|
@Test
public void matchDayOfWeekTest() {
// 星期四
CronPattern pattern = CronPattern.of("39 0 0 * * Thu");
assertMatch(pattern, "2017-02-09 00:00:39");
// 星期日的三种形式
pattern = CronPattern.of("39 0 0 * * Sun");
assertMatch(pattern, "2022-03-27 00:00:39");
pattern = CronPattern.of("39 0 0 * * 0");
assertMatch(pattern, "2022-03-27 00:00:39");
pattern = CronPattern.of("39 0 0 * * 7");
assertMatch(pattern, "2022-03-27 00:00:39");
}
|
public String getClientReturnId(String sessionId) {
Optional<OpenIdSession> session = openIdRepository.findById(sessionId);
if (session.isEmpty()) return null;
OpenIdSession openIdSession = session.get();
var returnUrl = openIdSession.getRedirectUri() + "?state=" + openIdSession.getState();
if (!"success".equals(openIdSession.getAuthenticationState())) {
return returnUrl + "&error=CANCELLED";
}
return returnUrl + "&code=" + openIdSession.getCode();
}
|
@Test
void getClientReturnNotSuccess() {
OpenIdSession openIdSession = new OpenIdSession();
when(httpServletRequest.getSession()).thenReturn(httpSession);
when(httpSession.getId()).thenReturn(null);
when(openIdRepository.findById(anyString())).thenReturn(Optional.of(openIdSession));
String response = openIdService.getClientReturnId("sessionId");
assertEquals("null?state=null&error=CANCELLED", response);
}
|
public boolean isAfterFlink114() {
return flinkInterpreter.getFlinkVersion().isAfterFlink114();
}
|
@Test
void testBatchPyFlink() throws InterpreterException, IOException {
if (!flinkInnerInterpreter.getFlinkVersion().isAfterFlink114()){
IPyFlinkInterpreterTest.testBatchPyFlink(interpreter, flinkScalaInterpreter);
}
}
|
@Override
public void doRun() {
final Instant mustBeOlderThan = Instant.now().minus(maximumSearchAge);
searchDbService.getExpiredSearches(findReferencedSearchIds(),
mustBeOlderThan).forEach(searchDbService::delete);
}
|
@Test
public void testForEmptySearches() {
final ViewSummaryDTO view = mock(ViewSummaryDTO.class);
when(viewService.streamAll()).thenReturn(Stream.of(view));
when(searchDbService.streamAll()).thenReturn(Stream.empty());
this.searchesCleanUpJob.doRun();
verify(searchDbService, never()).delete(any());
}
|
@SuppressWarnings("checkstyle:MissingSwitchDefault")
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
int version = currentVersion() + 1;
CommitStatus commitStatus = CommitStatus.FAILURE;
/* This method adds no fs scheme, and it persists in HTS that way. */
final String newMetadataLocation = rootMetadataFileLocation(metadata, version);
HouseTable houseTable = HouseTable.builder().build();
try {
// Now that we have metadataLocation we stamp it in metadata property.
Map<String, String> properties = new HashMap<>(metadata.properties());
failIfRetryUpdate(properties);
String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli());
properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString);
if (base == null) {
properties.put(getCanonicalFieldName("creationTime"), currentTsString);
}
properties.put(
getCanonicalFieldName("tableVersion"),
properties.getOrDefault(
getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION));
properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation);
String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY);
String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY);
boolean isStageCreate =
Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY));
logPropertiesMap(properties);
TableMetadata updatedMetadata = metadata.replaceProperties(properties);
if (serializedSnapshotsToPut != null) {
List<Snapshot> snapshotsToPut =
SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut);
Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff =
SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots());
List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst();
List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond();
snapshotInspector.validateSnapshotsUpdate(
updatedMetadata, appendedSnapshots, deletedSnapshots);
Map<String, SnapshotRef> snapshotRefs =
serializedSnapshotRefs == null
? new HashMap<>()
: SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs);
updatedMetadata =
maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true);
updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots);
}
final TableMetadata updatedMtDataRef = updatedMetadata;
metricsReporter.executeWithStats(
() ->
TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)),
InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY);
houseTable = houseTableMapper.toHouseTable(updatedMetadata);
if (!isStageCreate) {
houseTableRepository.save(houseTable);
} else {
/**
* Refresh current metadata for staged tables from newly created metadata file and disable
* "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata,
* TableMetadata)}
*/
refreshFromMetadataLocation(newMetadataLocation);
}
commitStatus = CommitStatus.SUCCESS;
} catch (InvalidIcebergSnapshotException e) {
throw new BadRequestException(e, e.getMessage());
} catch (CommitFailedException e) {
throw e;
} catch (HouseTableCallerException
| HouseTableNotFoundException
| HouseTableConcurrentUpdateException e) {
throw new CommitFailedException(e);
} catch (Throwable persistFailure) {
// Try to reconnect and determine the commit status for unknown exception
log.error(
"Encounter unexpected error while updating metadata.json for table:" + tableIdentifier,
persistFailure);
commitStatus = checkCommitStatus(newMetadataLocation, metadata);
switch (commitStatus) {
case SUCCESS:
log.debug("Calling doCommit succeeded");
break;
case FAILURE:
// logging error and exception-throwing co-existence is needed, given the exception
// handler in
// org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow
// the
// nested exception information.
log.error("Exception details:", persistFailure);
throw new CommitFailedException(
persistFailure,
String.format(
"Persisting metadata file %s at version %s for table %s failed while persisting to house table",
newMetadataLocation, version, GSON.toJson(houseTable)));
case UNKNOWN:
throw new CommitStateUnknownException(persistFailure);
}
} finally {
switch (commitStatus) {
case FAILURE:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR);
break;
case UNKNOWN:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN);
break;
default:
break; /*should never happen, kept to silence SpotBugs*/
}
}
}
|
@Test
void testDoCommitAppendAndDeleteSnapshots() throws IOException {
List<Snapshot> testSnapshots = IcebergTestUtil.getSnapshots();
List<Snapshot> extraTestSnapshots = IcebergTestUtil.getExtraSnapshots();
// add all snapshots to the base metadata
TableMetadata base = BASE_TABLE_METADATA;
for (Snapshot snapshot : testSnapshots) {
base =
TableMetadata.buildFrom(base)
.setBranchSnapshot(snapshot, SnapshotRef.MAIN_BRANCH)
.build();
}
Map<String, String> properties = new HashMap<>(base.properties());
try (MockedStatic<TableMetadataParser> ignoreWriteMock =
Mockito.mockStatic(TableMetadataParser.class)) {
// all only last 2 snapshots to new metadata
List<Snapshot> newSnapshots = new ArrayList<>();
newSnapshots.addAll(testSnapshots.subList(2, 4));
newSnapshots.addAll(extraTestSnapshots);
properties.put(
CatalogConstants.SNAPSHOTS_JSON_KEY, SnapshotsUtil.serializedSnapshots(newSnapshots));
properties.put(
CatalogConstants.SNAPSHOTS_REFS_KEY,
SnapshotsUtil.serializeMap(
IcebergTestUtil.obtainSnapshotRefsFromSnapshot(
newSnapshots.get(newSnapshots.size() - 1))));
properties.put(getCanonicalFieldName("tableLocation"), TEST_LOCATION);
TableMetadata metadata = base.replaceProperties(properties);
openHouseInternalTableOperations.doCommit(base, metadata);
Mockito.verify(mockHouseTableMapper).toHouseTable(tblMetadataCaptor.capture());
Map<String, String> updatedProperties = tblMetadataCaptor.getValue().properties();
Assertions.assertEquals(
5,
updatedProperties.size()); /*location, lastModifiedTime, version and deleted_snapshots*/
Assertions.assertEquals(
TEST_LOCATION, updatedProperties.get(getCanonicalFieldName("tableVersion")));
// verify only 4 snapshots are added
Assertions.assertEquals(
extraTestSnapshots.stream()
.map(s -> Long.toString(s.snapshotId()))
.collect(Collectors.joining(",")),
updatedProperties.get(getCanonicalFieldName("appended_snapshots")));
// verify 2 snapshots are deleted
Assertions.assertEquals(
testSnapshots.subList(0, 2).stream()
.map(s -> Long.toString(s.snapshotId()))
.collect(Collectors.joining(",")),
updatedProperties.get(getCanonicalFieldName("deleted_snapshots")));
Assertions.assertTrue(updatedProperties.containsKey(getCanonicalFieldName("tableLocation")));
Mockito.verify(mockHouseTableRepository, Mockito.times(1)).save(Mockito.eq(mockHouseTable));
}
}
|
public abstract boolean compare(A actual, E expected);
|
@Test
public void testTransforming_actual_compare_nullTransformedValues() {
assertThat(HYPHEN_INDEXES.compare("mailing-list", null)).isFalse();
assertThat(HYPHEN_INDEXES.compare("forum", 7)).isFalse();
assertThat(HYPHEN_INDEXES.compare("forum", null)).isTrue();
}
|
public static String getNamenodeServiceAddr(final Configuration conf,
String nsId, String nnId) {
if (nsId == null) {
nsId = getOnlyNameServiceIdOrNull(conf);
}
String serviceAddrKey = DFSUtilClient.concatSuffixes(
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
String addrKey = DFSUtilClient.concatSuffixes(
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
String serviceRpcAddr = conf.get(serviceAddrKey);
if (serviceRpcAddr == null) {
serviceRpcAddr = conf.get(addrKey);
}
return serviceRpcAddr;
}
|
@Test
public void getNameNodeServiceAddr() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
// One nameservice with two NNs
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
conf.set(DFS_NAMESERVICES, "ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST);
// The rpc address is used if no service address is defined
assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
assertEquals(NS1_NN2_HOST, DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
// A nameservice is specified explicitly
assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1"));
assertEquals(null, DFSUtil.getNamenodeServiceAddr(conf, "invalid", "nn1"));
// The service addrs are used when they are defined
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST_SVC);
conf.set(DFSUtil.addKeySuffixes(
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_HOST_SVC);
assertEquals(NS1_NN1_HOST_SVC, DFSUtil.getNamenodeServiceAddr(conf, null, "nn1"));
assertEquals(NS1_NN2_HOST_SVC, DFSUtil.getNamenodeServiceAddr(conf, null, "nn2"));
// We can determine the nameservice ID, there's only one listed
assertEquals("ns1", DFSUtil.getNamenodeNameServiceId(conf));
assertEquals("ns1", DFSUtil.getSecondaryNameServiceId(conf));
}
|
public SimpleRabbitListenerContainerFactory decorateSimpleRabbitListenerContainerFactory(
SimpleRabbitListenerContainerFactory factory
) {
return decorateRabbitListenerContainerFactory(factory);
}
|
@Test void decorateSimpleRabbitListenerContainerFactory_prepends_as_first_when_absent() {
SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory();
factory.setAdviceChain(new CacheInterceptor());
// the order of advices is important for the downstream interceptor to see the tracing context
assertThat(rabbitTracing.decorateSimpleRabbitListenerContainerFactory(factory).getAdviceChain())
.hasSize(2)
.matches(adviceArray -> adviceArray[0] instanceof TracingRabbitListenerAdvice);
}
|
protected String getConnectionName( VFSFile vfsFile ) {
String connectionName = null;
if ( vfsFile != null ) {
try {
connectionName = getConnectionFileName( vfsFile ).getConnection();
} catch ( NullPointerException | FileException e ) {
// DO NOTHING
}
}
return connectionName;
}
|
@Test
public void testGetConnectionName() throws Exception {
assertNull( vfsFileProvider.getConnectionName( createTestFile( null ) ) );
assertNull( vfsFileProvider.getConnectionName( createTestFile( "" ) ) );
assertNull( vfsFileProvider.getConnectionName( createTestFile( " " ) ) );
assertNull( vfsFileProvider.getConnectionName( createTestFile( "someGarbage" ) ) );
assertNull( vfsFileProvider.getConnectionName( createTestFile( "pvfs:/123" ) ) ); // missing slash "/"
assertNull( vfsFileProvider.getConnectionName( createTestFile( "pvfs://" ) ) );
assertNull( vfsFileProvider.getConnectionName( createTestFile( "xyz://" ) ) );
assertEquals( "abc", vfsFileProvider.getConnectionName( createTestFile( "pvfs://abc" ) ) );
assertEquals( "abc", vfsFileProvider.getConnectionName( createTestFile( "pvfs://abc/" ) ) );
assertEquals( "abc", vfsFileProvider.getConnectionName( createTestFile( "pvfs://abc/def/ghi/jkl/mno.csv" ) ) );
assertEquals( "Special Character name &#! <>", vfsFileProvider.getConnectionName(
createTestFile( "pvfs://Special Character name &#! <>/def/ghi/jkl/mno.csv" ) ) );
}
|
public static String mainName(File file) {
if (file.isDirectory()) {
return file.getName();
}
return mainName(file.getName());
}
|
@Test
public void mainNameTest() {
final String s = FileNameUtil.mainName("abc.tar.gz");
assertEquals("abc", s);
}
|
@Nonnull
public static <V> Set<V> findDuplicates(@Nonnull final Collection<V>... collections)
{
final Set<V> merged = new HashSet<>();
final Set<V> duplicates = new HashSet<>();
for (Collection<V> collection : collections) {
for (V o : collection) {
if (!merged.add(o)) {
duplicates.add(o);
}
}
}
return duplicates;
}
|
@Test
public void testSingleCollectionWithoutDuplicates() throws Exception
{
// Setup test fixture.
final List<String> input = Arrays.asList("a", "b", "c");
// Execute system under test.
@SuppressWarnings("unchecked")
final Set<String> result = CollectionUtils.findDuplicates(input);
// Verify results.
assertTrue(result.isEmpty());
}
|
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
}
|
@Test
public void matchIPv6DstTest() {
Criterion criterion = Criteria.matchIPv6Dst(ipPrefix6);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
}
|
@Override
public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final EnumSet<OpenMode> flags;
if(status.isAppend()) {
if(status.isExists()) {
// No append flag. Otherwise the offset field of SSH_FXP_WRITE requests is ignored.
flags = EnumSet.of(OpenMode.WRITE);
}
else {
// Allocate offset
flags = EnumSet.of(OpenMode.CREAT, OpenMode.WRITE);
}
}
else {
// A new file is created; if the file already exists, it is opened and truncated to preserve ownership of file.
if(status.isExists()) {
if(file.isSymbolicLink()) {
// Workaround for #7327
session.sftp().remove(file.getAbsolute());
flags = EnumSet.of(OpenMode.CREAT, OpenMode.TRUNC, OpenMode.WRITE);
}
else {
flags = EnumSet.of(OpenMode.TRUNC, OpenMode.WRITE);
}
}
else {
flags = EnumSet.of(OpenMode.CREAT, OpenMode.TRUNC, OpenMode.WRITE);
}
}
final RemoteFile handle = session.sftp().open(file.getAbsolute(), flags);
final int maxUnconfirmedWrites = this.getMaxUnconfirmedWrites(status);
if(log.isInfoEnabled()) {
log.info(String.format("Using %d unconfirmed writes", maxUnconfirmedWrites));
}
if(log.isInfoEnabled()) {
log.info(String.format("Skipping %d bytes", status.getOffset()));
}
// Open stream at offset
return new VoidStatusOutputStream(new ChunkedOutputStream(handle.new RemoteFileOutputStream(status.getOffset(), maxUnconfirmedWrites) {
private final AtomicBoolean close = new AtomicBoolean();
@Override
public void close() throws IOException {
if(close.get()) {
log.warn(String.format("Skip double close of stream %s", this));
return;
}
try {
super.close();
}
finally {
handle.close();
close.set(true);
}
}
}, preferences.getInteger("sftp.write.chunksize")));
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Upload {0} failed", e, file);
}
}
|
@Test
public void testWriteRangeEndFirst() throws Exception {
final SFTPWriteFeature feature = new SFTPWriteFeature(session);
final Path test = new Path(new SFTPHomeDirectoryService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final byte[] content = RandomUtils.nextBytes(2048);
{
// Write end of file first
final TransferStatus status = new TransferStatus();
status.setLength(1024L);
status.setOffset(1024L);
status.setAppend(true);
final OutputStream out = feature.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out);
out.flush();
out.close();
}
assertEquals(2048, new DefaultAttributesFinderFeature(session).find(test).getSize());
{
// Write beginning of file up to the last chunk
final TransferStatus status = new TransferStatus().exists(true);
status.setExists(true);
status.setOffset(0L);
status.setLength(1024L);
status.setAppend(true);
final OutputStream out = feature.write(test, status, new DisabledConnectionCallback());
new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out);
out.flush();
out.close();
}
assertEquals(2048, new DefaultAttributesFinderFeature(session).find(test).getSize());
final ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
IOUtils.copy(new SFTPReadFeature(session).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()), out);
assertArrayEquals(content, out.toByteArray());
assertTrue(new DefaultFindFeature(session).find(test));
assertEquals(content.length, new DefaultAttributesFinderFeature(session).find(test).getSize());
new SFTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public final String getName() {
return getEnvironment().getTaskInfo().getTaskNameWithSubtasks();
}
|
@Test
void testStateBackendLoadingAndClosing() throws Exception {
Configuration taskManagerConfig = new Configuration();
taskManagerConfig.set(STATE_BACKEND, TestMemoryStateBackendFactory.class.getName());
StreamConfig cfg = new StreamConfig(new Configuration());
cfg.setStateKeySerializer(mock(TypeSerializer.class));
cfg.setOperatorID(new OperatorID(4711L, 42L));
TestStreamSource<Long, MockSourceFunction> streamSource =
new TestStreamSource<>(new MockSourceFunction());
cfg.setStreamOperator(streamSource);
cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
Task task =
createTask(
StateBackendTestSource.class,
shuffleEnvironment,
cfg,
taskManagerConfig,
EXECUTOR_EXTENSION.getExecutor());
StateBackendTestSource.fail = false;
task.startTaskThread();
// wait for clean termination
task.getExecutingThread().join();
// ensure that the state backends and stream iterables are closed ...
verify(TestStreamSource.operatorStateBackend).close();
verify(TestStreamSource.keyedStateBackend).close();
verify(TestStreamSource.rawOperatorStateInputs).close();
verify(TestStreamSource.rawKeyedStateInputs).close();
// ... and disposed
verify(TestStreamSource.operatorStateBackend).dispose();
verify(TestStreamSource.keyedStateBackend).dispose();
assertThat(task.getExecutionState()).isEqualTo(ExecutionState.FINISHED);
}
}
|
@Bean
public ShenyuPlugin grpcPlugin() {
return new GrpcPlugin();
}
|
@Test
public void testGrpcPlugin() {
applicationContextRunner.run(context -> {
ShenyuPlugin plugin = context.getBean("grpcPlugin", ShenyuPlugin.class);
assertNotNull(plugin);
}
);
}
|
@Override
public void deleteById(HouseTablePrimaryKey houseTablePrimaryKey) {
getHtsRetryTemplate(Arrays.asList(IllegalStateException.class))
.execute(
context ->
apiInstance
.deleteTable(
houseTablePrimaryKey.getDatabaseId(), houseTablePrimaryKey.getTableId())
.onErrorResume(e -> handleHtsHttpError(e).then())
.block());
}
|
@Test
public void testRepoDelete() {
mockHtsServer.enqueue(
new MockResponse()
.setResponseCode(204)
.setBody("")
.addHeader("Content-Type", "application/json"));
Assertions.assertDoesNotThrow(
() ->
htsRepo.deleteById(
HouseTablePrimaryKey.builder()
.tableId(HOUSE_TABLE.getTableId())
.databaseId(HOUSE_TABLE.getDatabaseId())
.build()));
}
|
public Type getType() {
return token.getType();
}
|
@Test
public void testEnclosing2() throws Exception {
// If we don't override, the best we can get is List<Set<T>>
// TypeRememberer<List<Set<String>>> rememberer =
// new GenericMaker2<String>(){}.getGenericMaker().getRememberer();
// assertNotEquals(
// new TypeToken<List<Set<String>>>() {}.getType(),
// rememberer.descriptorByInstance.getType());
// If we've overridden the getGenericMaker we can determine the types.
TypeRememberer<List<Set<String>>> rememberer =
new GenericMaker2<String>() {
@Override
public GenericMaker<Set<String>> getGenericMaker() {
return new GenericMaker<Set<String>>() {};
}
}.getGenericMaker().getRememberer();
assertEquals(
new TypeToken<List<Set<String>>>() {}.getType(), rememberer.descriptorByInstance.getType());
}
|
@Override
protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) {
final String param = exchange.getAttribute(Constants.PARAM_TRANSFORM);
ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT);
assert shenyuContext != null;
MetaData metaData = exchange.getAttribute(Constants.META_DATA);
if (!checkMetaData(metaData)) {
LOG.error(" path is :{}, meta data have error.... {}", shenyuContext.getPath(), metaData);
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.META_DATA_ERROR);
return WebFluxResultUtils.result(exchange, error);
}
assert metaData != null;
if (StringUtils.isNoneBlank(metaData.getParameterTypes()) && StringUtils.isBlank(param)) {
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.GRPC_HAVE_BODY_PARAM);
return WebFluxResultUtils.result(exchange, error);
}
final ShenyuGrpcClient client = GrpcClientCache.getGrpcClient(selector.getId());
if (Objects.isNull(client)) {
exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR);
Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.GRPC_CLIENT_NULL);
return WebFluxResultUtils.result(exchange, error);
}
// load balance context
Context.current().withValue(GrpcConstants.GRPC_SELECTOR_ID, selector.getId()).attach();
Context.current().withValue(GrpcConstants.GRPC_RULE_ID, rule.getId()).attach();
Context.current().withValue(GrpcConstants.GRPC_REMOTE_ADDRESS,
Objects.requireNonNull(exchange.getRequest().getRemoteAddress()).getAddress().getHostAddress()).attach();
GrpcExtInfo extInfo = GsonUtils.getGson().fromJson(metaData.getRpcExt(), GrpcExtInfo.class);
CallOptions callOptions = CallOptions.DEFAULT.withDeadlineAfter(extInfo.timeout, TimeUnit.MILLISECONDS);
Map<String, Map<String, String>> rpcContext = exchange.getAttribute(Constants.GENERAL_CONTEXT);
Optional.ofNullable(rpcContext).map(context -> context.get(PluginEnum.GRPC.getName())).ifPresent(
context -> Context.current().withValue(RPC_CONTEXT_KEY, context).attach());
CompletableFuture<ShenyuGrpcResponse> result = client.call(metaData, callOptions, param, extInfo.methodType);
Context.current().detach(Context.ROOT);
return Mono.fromFuture(result.thenApply(ret -> {
exchange.getAttributes().put(Constants.RPC_RESULT, ret.getResults());
exchange.getAttributes().put(Constants.CLIENT_RESPONSE_RESULT_TYPE, ResultEnum.SUCCESS.getName());
return ret;
})).onErrorMap(ShenyuException::new).then(chain.execute(exchange));
}
|
@Test
public void testDoExecuteMetaDataError() {
ServerWebExchange exchange = getServerWebExchange();
exchange.getAttributes().put(Constants.META_DATA, getMetaData());
RuleData data = mock(RuleData.class);
StepVerifier.create(grpcPlugin.doExecute(exchange, chain, selector, data)).expectSubscription().verifyComplete();
}
|
public static Object replace(Object root, DataIterator it, Object value)
{
return transform(root, it, Transforms.constantValue(value));
}
|
@Test
public void testReplaceRoot() throws Exception
{
SimpleTestData data = IteratorTestData.createSimpleTestData();
Object result = Builder.create(data.getDataElement(), IterationOrder.PRE_ORDER)
.filterBy(Predicates.dataSchemaNameEquals("Foo"))
.replace(new DataMap());
assertTrue(result instanceof DataMap);
assertEquals(((DataMap)result).size(), 0);
}
|
public static String join(Object[] a, char delimiter) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < a.length; i++) {
sb.append(a[i]);
if (i != a.length - 1) {
sb.append(delimiter);
}
}
return sb.toString();
}
|
@Test
void testJoin() {
String[] foo = {"a", "b"};
assertEquals("a,b", StringUtils.join(foo, ','));
assertEquals("a,b", StringUtils.join(Arrays.asList(foo), ","));
}
|
private CompletionStage<RestResponse> putInCache(NettyRestResponse.Builder responseBuilder,
AdvancedCache<Object, Object> cache, Object key, byte[] data, Long ttl,
Long idleTime) {
Configuration config = SecurityActions.getCacheConfiguration(cache);
final Metadata metadata = CacheOperationsHelper.createMetadata(config, ttl, idleTime);
responseBuilder.header("etag", calcETAG(data));
CompletionStage<Object> stage;
// Indexing is still blocking - can be removed when https://issues.redhat.com/browse/ISPN-11731 is complete
if (config.indexing().enabled()) {
stage = CompletableFuture.supplyAsync(() -> cache.putAsync(key, data, metadata), invocationHelper.getExecutor())
.thenCompose(Function.identity());
} else {
stage = cache.putAsync(key, data, metadata);
}
return stage.thenApply(o -> responseBuilder.build());
}
|
@Test
public void testIntKeysAndJSONToTextValues() {
Integer key = 1234;
String keyContentType = "application/x-java-object;type=java.lang.Integer";
String value = "{\"a\": 1}";
putInCache("default", key, keyContentType, value, APPLICATION_JSON_TYPE);
RestResponse response = get("default", key, keyContentType, TEXT_PLAIN_TYPE);
ResponseAssertion.assertThat(response).hasReturnedText(value);
}
|
@Override
public AuthRuleGroup getServiceAuthRule(String service) {
//TODO 暂不支持
return null;
}
|
@Test
public void getServiceAuthRule() {
}
|
public void pruneColumns(Configuration conf, Path inputFile, Path outputFile, List<String> cols)
throws IOException {
RewriteOptions options = new RewriteOptions.Builder(conf, inputFile, outputFile)
.prune(cols)
.build();
ParquetRewriter rewriter = new ParquetRewriter(options);
rewriter.processBlocks();
rewriter.close();
}
|
@Test
public void testPruneNestedColumn() throws Exception {
// Create Parquet file
String inputFile = createParquetFile("input");
String outputFile = createTempFile("output");
// Remove nested column
List<String> cols = Arrays.asList("Links.Backward");
columnPruner.pruneColumns(conf, new Path(inputFile), new Path(outputFile), cols);
// Verify the schema are not changed for the columns not pruned
ParquetMetadata pmd =
ParquetFileReader.readFooter(conf, new Path(outputFile), ParquetMetadataConverter.NO_FILTER);
MessageType schema = pmd.getFileMetaData().getSchema();
List<Type> fields = schema.getFields();
assertEquals(fields.size(), 4);
assertEquals(fields.get(0).getName(), "DocId");
assertEquals(fields.get(1).getName(), "Name");
assertEquals(fields.get(2).getName(), "Gender");
assertEquals(fields.get(3).getName(), "Links");
List<Type> subFields = fields.get(3).asGroupType().getFields();
assertEquals(subFields.size(), 1);
assertEquals(subFields.get(0).getName(), "Forward");
// Verify the data are not changed for the columns not pruned
List<String> prunePaths = Arrays.asList("Links.Backward");
validateColumns(inputFile, prunePaths);
}
|
public static ReportMetricsRequest fromJson(String json) {
return JsonUtil.parse(json, ReportMetricsRequestParser::fromJson);
}
|
@Test
public void missingFields() {
assertThatThrownBy(() -> ReportMetricsRequestParser.fromJson("{}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing string: report-type");
assertThatThrownBy(
() -> ReportMetricsRequestParser.fromJson("{\"report-type\":\"scan-report\"}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing string: table-name");
assertThatThrownBy(
() ->
ReportMetricsRequestParser.fromJson(
"{\"report-type\":\"scan-report\", \"table-name\" : \"x\"}"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Cannot parse missing long: snapshot-id");
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public List<Long> createCodegenList(Long userId, CodegenCreateListReqVO reqVO) {
List<Long> ids = new ArrayList<>(reqVO.getTableNames().size());
// 遍历添加。虽然效率会低一点,但是没必要做成完全批量,因为不会这么大量
reqVO.getTableNames().forEach(tableName -> ids.add(createCodegen(userId, reqVO.getDataSourceConfigId(), tableName)));
return ids;
}
|
@Test
public void testCreateCodegenList() {
// 准备参数
Long userId = randomLongId();
CodegenCreateListReqVO reqVO = randomPojo(CodegenCreateListReqVO.class,
o -> o.setDataSourceConfigId(1L).setTableNames(Collections.singletonList("t_yunai")));
// mock 方法(TableInfo)
TableInfo tableInfo = mock(TableInfo.class);
when(databaseTableService.getTable(eq(1L), eq("t_yunai")))
.thenReturn(tableInfo);
when(tableInfo.getComment()).thenReturn("芋艿");
// mock 方法(TableInfo fields)
TableField field01 = mock(TableField.class);
when(field01.getComment()).thenReturn("主键");
TableField field02 = mock(TableField.class);
when(field02.getComment()).thenReturn("名字");
List<TableField> fields = Arrays.asList(field01, field02);
when(tableInfo.getFields()).thenReturn(fields);
// mock 方法(CodegenTableDO)
CodegenTableDO table = randomPojo(CodegenTableDO.class);
when(codegenBuilder.buildTable(same(tableInfo))).thenReturn(table);
// mock 方法(AdminUserRespDTO)
AdminUserRespDTO user = randomPojo(AdminUserRespDTO.class, o -> o.setNickname("芋头"));
when(userApi.getUser(eq(userId))).thenReturn(user);
// mock 方法(CodegenColumnDO)
List<CodegenColumnDO> columns = randomPojoList(CodegenColumnDO.class);
when(codegenBuilder.buildColumns(eq(table.getId()), same(fields)))
.thenReturn(columns);
// mock 方法(CodegenProperties)
when(codegenProperties.getFrontType()).thenReturn(CodegenFrontTypeEnum.VUE3.getType());
// 调用
List<Long> result = codegenService.createCodegenList(userId, reqVO);
// 断言
assertEquals(1, result.size());
// 断言(CodegenTableDO)
CodegenTableDO dbTable = codegenTableMapper.selectList().get(0);
assertPojoEquals(table, dbTable);
assertEquals(1L, dbTable.getDataSourceConfigId());
assertEquals(CodegenSceneEnum.ADMIN.getScene(), dbTable.getScene());
assertEquals(CodegenFrontTypeEnum.VUE3.getType(), dbTable.getFrontType());
assertEquals("芋头", dbTable.getAuthor());
// 断言(CodegenColumnDO)
List<CodegenColumnDO> dbColumns = codegenColumnMapper.selectList();
assertEquals(columns.size(), dbColumns.size());
assertTrue(dbColumns.get(0).getPrimaryKey());
for (int i = 0; i < dbColumns.size(); i++) {
assertPojoEquals(columns.get(i), dbColumns.get(i));
}
}
|
public String getName() {
final String path = uri.getPath();
final int slash = path.lastIndexOf(SEPARATOR);
return path.substring(slash + 1);
}
|
@Test
void testGetName() {
Path p = new Path("/my/fancy/path");
assertThat(p.getName()).isEqualTo("path");
p = new Path("/my/fancy/path/");
assertThat(p.getName()).isEqualTo("path");
p = new Path("hdfs:///my/path");
assertThat(p.getName()).isEqualTo("path");
p = new Path("hdfs:///myPath/");
assertThat(p.getName()).isEqualTo("myPath");
p = new Path("/");
assertThat(p.getName()).isEmpty();
p = new Path("C:/my/windows/path");
assertThat(p.getName()).isEqualTo("path");
p = new Path("file:/C:/my/windows/path");
assertThat(p.getName()).isEqualTo("path");
}
|
public QueueConfiguration getConfiguration() {
return configuration;
}
|
@Test
public void testCreateEndpointWithMaxConfig() {
context.getRegistry().bind("creds", new StorageSharedKeyCredential("fake", "fake"));
final String uri = "azure-storage-queue://camelazure/testqueue"
+ "?credentials=#creds&operation=deleteQueue&timeToLive=PT100s&visibilityTimeout=PT10s&maxMessages=1";
final QueueEndpoint endpoint = (QueueEndpoint) context.getEndpoint(uri);
assertEquals("camelazure", endpoint.getConfiguration().getAccountName());
assertEquals("testqueue", endpoint.getConfiguration().getQueueName());
assertNull(endpoint.getConfiguration().getServiceClient());
assertEquals(QueueOperationDefinition.deleteQueue, endpoint.getConfiguration().getOperation());
assertEquals(Duration.ofSeconds(100), endpoint.getConfiguration().getTimeToLive());
assertEquals(Duration.ofSeconds(10), endpoint.getConfiguration().getVisibilityTimeout());
assertEquals(1, endpoint.getConfiguration().getMaxMessages());
}
|
public Optional<VersionedProfile> get(UUID uuid, String version) {
Optional<VersionedProfile> profile = redisGet(uuid, version);
if (profile.isEmpty()) {
profile = profiles.get(uuid, version);
profile.ifPresent(versionedProfile -> redisSet(uuid, versionedProfile));
}
return profile;
}
|
@Test
public void testGetProfileBrokenCache() {
final UUID uuid = UUID.randomUUID();
final byte[] name = TestRandomUtil.nextBytes(81);
final VersionedProfile profile = new VersionedProfile("someversion", name, "someavatar", null, null,
null, null, "somecommitment".getBytes());
when(commands.hget(eq("profiles::" + uuid), eq("someversion"))).thenThrow(new RedisException("Connection lost"));
when(profiles.get(eq(uuid), eq("someversion"))).thenReturn(Optional.of(profile));
Optional<VersionedProfile> retrieved = profilesManager.get(uuid, "someversion");
assertTrue(retrieved.isPresent());
assertSame(retrieved.get(), profile);
verify(commands, times(1)).hget(eq("profiles::" + uuid), eq("someversion"));
verify(commands, times(1)).hset(eq("profiles::" + uuid), eq("someversion"), anyString());
verifyNoMoreInteractions(commands);
verify(profiles, times(1)).get(eq(uuid), eq("someversion"));
verifyNoMoreInteractions(profiles);
}
|
public synchronized void synchronizeClusterSchemas( ClusterSchema clusterSchema ) {
synchronizeClusterSchemas( clusterSchema, clusterSchema.getName() );
}
|
@Test
public void synchronizeClusterSchemas() throws Exception {
final String clusterSchemaName = "SharedClusterSchema";
TransMeta transformarion1 = createTransMeta();
ClusterSchema clusterSchema1 = createClusterSchema( clusterSchemaName, true );
transformarion1.setClusterSchemas( Collections.singletonList( clusterSchema1 ) );
spoonDelegates.trans.addTransformation( transformarion1 );
TransMeta transformarion2 = createTransMeta();
ClusterSchema clusterSchema2 = createClusterSchema( clusterSchemaName, true );
transformarion2.setClusterSchemas( Collections.singletonList( clusterSchema2 ) );
spoonDelegates.trans.addTransformation( transformarion2 );
clusterSchema2.setDynamic( true );
sharedUtil.synchronizeClusterSchemas( clusterSchema2 );
assertThat( clusterSchema1.isDynamic(), equalTo( true ) );
}
|
@Override
public void trackChannelEvent(String eventName) {
}
|
@Test
public void trackChannelEvent() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.trackChannelEvent("TestChannelEvent");
}
|
@Override
public String getDestination() {
return StringUtils.isBlank(destination) ? DEFAULT_ROOT.getPath() : FilenameUtils.separatorsToUnix(destination);
}
|
@Test
public void shouldNotOverrideDefaultArtifactDestinationWhenNotSpecified() {
BuildArtifactConfig artifactConfig = new BuildArtifactConfig("src", null);
assertThat(artifactConfig.getDestination(), is(""));
TestArtifactConfig testArtifactConfig = new TestArtifactConfig("src", null);
assertThat(testArtifactConfig.getDestination(), is("testoutput"));
}
|
public static CompilationUnit getFromFileName(String fileName) {
try {
final InputStream resource = Thread.currentThread().getContextClassLoader().getResourceAsStream(fileName);
return StaticJavaParser.parse(resource);
} catch (ParseProblemException e) {
throw new KiePMMLInternalException(String.format("Failed to parse %s due to %s", fileName,
e.getMessage()), e);
} catch (Exception e) {
throw new ExternalException(String.format("Failed to read %s due to %s", fileName, e.getMessage()), e);
}
}
|
@Test
void getFromFileName() {
CompilationUnit retrieved = JavaParserUtils.getFromFileName(TEMPLATE_FILE);
assertThat(retrieved).isNotNull();
}
|
public Schema addToSchema(Schema schema) {
validate(schema);
schema.addProp(LOGICAL_TYPE_PROP, name);
schema.setLogicalType(this);
return schema;
}
|
@Test
void decimalWithNonByteArrayTypes() {
final LogicalType decimal = LogicalTypes.decimal(5, 2);
// test simple types
Schema[] nonBytes = new Schema[] { Schema.createRecord("Record", null, null, false),
Schema.createArray(Schema.create(Schema.Type.BYTES)), Schema.createMap(Schema.create(Schema.Type.BYTES)),
Schema.createEnum("Enum", null, null, Arrays.asList("a", "b")),
Schema.createUnion(Arrays.asList(Schema.create(Schema.Type.BYTES), Schema.createFixed("fixed", null, null, 4))),
Schema.create(Schema.Type.BOOLEAN), Schema.create(Schema.Type.INT), Schema.create(Schema.Type.LONG),
Schema.create(Schema.Type.FLOAT), Schema.create(Schema.Type.DOUBLE), Schema.create(Schema.Type.NULL),
Schema.create(Schema.Type.STRING) };
for (final Schema schema : nonBytes) {
assertThrows("Should reject type: " + schema.getType(), IllegalArgumentException.class,
"Logical type decimal must be backed by fixed or bytes", () -> {
decimal.addToSchema(schema);
return null;
});
}
}
|
public ProviderBuilder transporter(String transporter) {
this.transporter = transporter;
return getThis();
}
|
@Test
void transporter() {
ProviderBuilder builder = ProviderBuilder.newBuilder();
builder.transporter("mocktransporter");
Assertions.assertEquals("mocktransporter", builder.build().getTransporter());
}
|
public static OP_TYPE getOpType(final List<Field<?>> fields, final Model model, final String targetFieldName) {
return Stream.of(getOpTypeFromTargets(model.getTargets(), targetFieldName),
getOpTypeFromMiningFields(model.getMiningSchema(), targetFieldName),
getOpTypeFromFields(fields, targetFieldName))
.filter(Optional::isPresent)
.map(Optional::get)
.findFirst()
.orElseThrow(() -> new KiePMMLInternalException(String.format("Failed to find OpType for field" +
" %s", targetFieldName)));
}
|
@Test
void getOpTypeByMiningFields() {
final Model model = new RegressionModel();
final DataDictionary dataDictionary = new DataDictionary();
final MiningSchema miningSchema = new MiningSchema();
IntStream.range(0, 3).forEach(i -> {
final DataField dataField = getRandomDataField();
dataDictionary.addDataFields(dataField);
final MiningField miningField = getRandomMiningField();
miningField.setName(dataField.getName());
miningSchema.addMiningFields(miningField);
});
model.setMiningSchema(miningSchema);
miningSchema.getMiningFields().forEach(miningField -> {
OP_TYPE retrieved =
org.kie.pmml.compiler.api.utils.ModelUtils.getOpType(getFieldsFromDataDictionary(dataDictionary),
model,miningField.getName());
assertThat(retrieved).isNotNull();
OP_TYPE expected = OP_TYPE.byName(miningField.getOpType().value());
assertThat(retrieved).isEqualTo(expected);
});
}
|
@Override
public void pickAddress() throws Exception {
if (publicAddress != null || bindAddress != null) {
return;
}
try {
AddressDefinition publicAddressDef = getPublicAddressByPortSearch();
if (publicAddressDef != null) {
publicAddress = createAddress(publicAddressDef, publicAddressDef.port);
logger.info("Using public address: " + publicAddress);
} else {
publicAddress = bindAddress;
logger.finest("Using public address the same as the bind address: " + publicAddress);
}
} catch (Exception e) {
ServerSocketChannel serverSocketChannel = getServerSocketChannel(endpointQualifier);
if (serverSocketChannel != null) {
serverSocketChannel.close();
}
logger.severe(e);
throw e;
}
}
|
@Test
public void testPublicAddress_withBlankAddress() {
config.getNetworkConfig().setPublicAddress(" ");
addressPicker = new DefaultAddressPicker(config, logger);
assertThrows(IllegalArgumentException.class, () -> addressPicker.pickAddress());
}
|
public List<BingTile> findChildren()
{
return findChildren(zoomLevel + 1);
}
|
@Test
public void testFindChildren()
{
assertEquals(
toSortedQuadkeys(BingTile.fromQuadKey("").findChildren()),
ImmutableList.of("0", "1", "2", "3"));
assertEquals(
toSortedQuadkeys(BingTile.fromQuadKey("0123").findChildren()),
ImmutableList.of("01230", "01231", "01232", "01233"));
assertEquals(
toSortedQuadkeys(BingTile.fromQuadKey("").findChildren(2)),
ImmutableList.of("00", "01", "02", "03", "10", "11", "12", "13", "20", "21", "22", "23", "30", "31", "32", "33"));
assertThatThrownBy(() -> BingTile.fromCoordinates(0, 0, MAX_ZOOM_LEVEL).findChildren())
.hasMessage(format("newZoom must be less than or equal to %s: %s", MAX_ZOOM_LEVEL, MAX_ZOOM_LEVEL + 1));
assertThatThrownBy(() -> BingTile.fromCoordinates(0, 0, 13).findChildren(MAX_ZOOM_LEVEL + 1))
.hasMessage(format("newZoom must be less than or equal to %s: %s", MAX_ZOOM_LEVEL, MAX_ZOOM_LEVEL + 1));
assertThatThrownBy(() -> BingTile.fromCoordinates(0, 0, 13).findChildren(12))
.hasMessage(format("newZoom must be greater than or equal to current zoom %s: %s", 13, 12));
}
|
@Override
public byte[] fromConnectData(String topic, Schema schema, Object value) {
if (schema == null && value == null) {
return null;
}
JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value);
try {
return serializer.serialize(topic, jsonValue);
} catch (SerializationException e) {
throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e);
}
}
|
@Test
public void byteToJson() {
JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT8_SCHEMA, (byte) 12));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"int8\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(12, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).intValue());
}
|
public static <T> ListenableFuture<T> submit(final RequestBuilder<T> requestBuilder) {
return transformFromTargetAndResult(submitInternal(requestBuilder));
}
|
@Test
public void testBaseLoad() throws Exception {
ColorDrawable expected = new ColorDrawable(Color.RED);
ListenableFuture<Drawable> future = GlideFutures.submit(Glide.with(app).load(expected));
assertThat(((ColorDrawable) Futures.getDone(future)).getColor()).isEqualTo(expected.getColor());
}
|
@Draft
public ZMsg msgBinaryPicture(String picture, Object... args)
{
if (!BINARY_FORMAT.matcher(picture).matches()) {
throw new ZMQException(picture + " is not in expected binary format " + BINARY_FORMAT.pattern(),
ZError.EPROTO);
}
ZMsg msg = new ZMsg();
// Pass 1: calculate total size of data frame
int frameSize = 0;
for (int index = 0; index < picture.length(); index++) {
char pattern = picture.charAt(index);
switch (pattern) {
case '1': {
frameSize += 1;
break;
}
case '2': {
frameSize += 2;
break;
}
case '4': {
frameSize += 4;
break;
}
case '8': {
frameSize += 8;
break;
}
case 's': {
String string = (String) args[index];
frameSize += 1 + (string != null ? string.getBytes(ZMQ.CHARSET).length : 0);
break;
}
case 'S': {
String string = (String) args[index];
frameSize += 4 + (string != null ? string.getBytes(ZMQ.CHARSET).length : 0);
break;
}
case 'b':
case 'c': {
byte[] block = (byte[]) args[index];
frameSize += 4 + block.length;
break;
}
case 'f': {
ZFrame frame = (ZFrame) args[index];
msg.add(frame);
break;
}
case 'm': {
ZMsg other = (ZMsg) args[index];
if (other == null) {
msg.add(new ZFrame((byte[]) null));
}
else {
msg.addAll(other);
}
break;
}
default:
assert (false) : "invalid picture element '" + pattern + "'";
}
}
// Pass 2: encode data into data frame
ZFrame frame = new ZFrame(new byte[frameSize]);
ZNeedle needle = new ZNeedle(frame);
for (int index = 0; index < picture.length(); index++) {
char pattern = picture.charAt(index);
switch (pattern) {
case '1': {
needle.putNumber1((int) args[index]);
break;
}
case '2': {
needle.putNumber2((int) args[index]);
break;
}
case '4': {
needle.putNumber4((int) args[index]);
break;
}
case '8': {
needle.putNumber8((long) args[index]);
break;
}
case 's': {
needle.putString((String) args[index]);
break;
}
case 'S': {
needle.putLongString((String) args[index]);
break;
}
case 'b':
case 'c': {
byte[] block = (byte[]) args[index];
needle.putNumber4(block.length);
needle.putBlock(block, block.length);
break;
}
case 'f':
case 'm':
break;
default:
assert (false) : "invalid picture element '" + pattern + "'";
}
}
msg.addFirst(frame);
return msg;
}
|
@Test(expected = ZMQException.class)
public void testInvalidBinaryPictureFormat()
{
String picture = "a";
pic.msgBinaryPicture(picture, 255);
}
|
@Override
public void filter(ContainerRequestContext requestContext) throws IOException {
ThreadContext.unbindSubject();
final boolean secure = requestContext.getSecurityContext().isSecure();
final MultivaluedMap<String, String> headers = requestContext.getHeaders();
final Map<String, Cookie> cookies = requestContext.getCookies();
final Request grizzlyRequest = grizzlyRequestProvider.get();
final String host = RestTools.getRemoteAddrFromRequest(grizzlyRequest, trustedProxies);
final String authHeader = headers.getFirst(HttpHeaders.AUTHORIZATION);
final Set<Class<?>> matchedResources = requestContext.getUriInfo().getMatchedResources().stream()
.map(Object::getClass).collect(Collectors.toSet());
final SecurityContext securityContext;
if (authHeader != null && authHeader.startsWith("Basic")) {
final String base64UserPass = authHeader.substring(authHeader.indexOf(' ') + 1);
final String userPass = decodeBase64(base64UserPass);
final String[] split = userPass.split(":", 2);
if (split.length != 2) {
throw new BadRequestException("Invalid credentials in Authorization header");
}
securityContext = createSecurityContext(split[0],
split[1],
secure,
SecurityContext.BASIC_AUTH,
host,
grizzlyRequest.getRemoteAddr(),
headers,
cookies,
matchedResources);
} else {
securityContext = createSecurityContext(null, null, secure, null, host,
grizzlyRequest.getRemoteAddr(),
headers,
cookies,
matchedResources);
}
requestContext.setSecurityContext(securityContext);
}
|
@Test
public void filterWithoutAuthorizationHeaderShouldDoNothing() throws Exception {
final MultivaluedHashMap<String, String> headers = new MultivaluedHashMap<>();
when(requestContext.getHeaders()).thenReturn(headers);
filter.filter(requestContext);
final ArgumentCaptor<SecurityContext> argument = ArgumentCaptor.forClass(SecurityContext.class);
verify(requestContext).setSecurityContext(argument.capture());
assertThat(argument.getValue()).isExactlyInstanceOf(ShiroSecurityContext.class);
assertThat(argument.getValue().getAuthenticationScheme()).isNull();
}
|
@Override
public Flux<BooleanResponse<ExpireCommand>> expire(Publisher<ExpireCommand> commands) {
return execute(commands, command -> {
Assert.notNull(command.getKey(), "Key must not be null!");
byte[] keyBuf = toByteArray(command.getKey());
Mono<Boolean> m = write(keyBuf, StringCodec.INSTANCE, EXPIRE, keyBuf, command.getTimeout().getSeconds());
return m.map(v -> new BooleanResponse<>(command, v));
});
}
|
@Test
public void testExpiration() {
RedissonConnectionFactory factory = new RedissonConnectionFactory(redisson);
ReactiveStringRedisTemplate t = new ReactiveStringRedisTemplate(factory);
t.opsForValue().set("123", "4343").block();
t.expire("123", Duration.ofMillis(1001)).block();
assertThat(t.getExpire("123").block().toMillis()).isBetween(900L, 1000L);
}
|
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public AppInfo get() {
return getAppInfo();
}
|
@Test
public void testAMXML() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("mapreduce")
.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8,
response.getType().toString());
String xml = response.getEntity(String.class);
verifyAMInfoXML(xml, appContext);
}
|
@Override
public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException {
try (InputStream input = provider.open(requireNonNull(path))) {
final JsonNode node = mapper.readTree(createParser(input));
if (node == null) {
throw ConfigurationParsingException
.builder("Configuration at " + path + " must not be empty")
.build(path);
}
return build(node, path);
} catch (JsonParseException e) {
throw ConfigurationParsingException
.builder("Malformed " + formatName)
.setCause(e)
.setLocation(e.getLocation())
.setDetail(e.getMessage())
.build(path);
}
}
|
@Test
void handlesExistingOverrideWithPeriod() throws Exception {
System.setProperty("dw.my\\.logger.level", "debug");
final Example example = factory.build(configurationSourceProvider, validFile);
assertThat(example.getLogger())
.containsEntry("level", "debug");
}
|
public String getCommandTopicName() {
return commandTopic.getCommandTopicName();
}
|
@Test
public void shouldGetCommandTopicName() {
assertThat(commandStore.getCommandTopicName(), equalTo(COMMAND_TOPIC_NAME));
}
|
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getRestoreConsumerConfigs(final String clientId) {
final Map<String, Object> baseConsumerProps = getCommonConsumerConfigs();
// Get restore consumer override configs
final Map<String, Object> restoreConsumerProps = originalsWithPrefix(RESTORE_CONSUMER_PREFIX);
baseConsumerProps.putAll(restoreConsumerProps);
// no need to set group id for a restore consumer
baseConsumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG);
// no need to set instance id for a restore consumer
baseConsumerProps.remove(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
// add client id with stream client id prefix
baseConsumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
baseConsumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
return baseConsumerProps;
}
|
@Test
public void shouldBeSupportNonPrefixedRestoreConsumerConfigs() {
props.put(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG, 1);
final StreamsConfig streamsConfig = new StreamsConfig(props);
final Map<String, Object> consumerConfigs = streamsConfig.getRestoreConsumerConfigs(groupId);
assertEquals(1, consumerConfigs.get(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG));
}
|
@Override
public <S, C extends Config<S>> C createConfig(S subject, Class<C> configClass) {
ConfigFactory<S, C> factory = getConfigFactory(configClass);
Versioned<JsonNode> json = configs.computeIfAbsent(key(subject, configClass),
k -> factory.isList() ?
mapper.createArrayNode() :
mapper.createObjectNode());
return createConfig(subject, configClass, json.value());
}
|
@Test
public void testCreateConfig() {
configStore.addConfigFactory(new MockConfigFactory(BasicConfig.class, "config1"));
configStore.createConfig("config1", BasicConfig.class);
assertThat(configStore.getConfigClasses("config1"), hasSize(1));
assertThat(configStore.getSubjects(String.class, BasicConfig.class), hasSize(1));
assertThat(configStore.getSubjects(String.class), hasSize(1));
BasicConfig queried = configStore.getConfig("config1", BasicConfig.class);
assertThat(queried, notNullValue());
configStore.clearConfig("config1", BasicConfig.class);
assertThat(configStore.getConfigClasses("config1"), hasSize(0));
assertThat(configStore.getSubjects(String.class, BasicConfig.class), hasSize(0));
assertThat(configStore.getSubjects(String.class), hasSize(0));
BasicConfig queriedAfterClear = configStore.getConfig("config1", BasicConfig.class);
assertThat(queriedAfterClear, nullValue());
}
|
public void remove(ConnectorTaskId id) {
final ScheduledFuture<?> task = committers.remove(id);
if (task == null)
return;
try (LoggingContext loggingContext = LoggingContext.forTask(id)) {
task.cancel(false);
if (!task.isDone())
task.get();
} catch (CancellationException e) {
// ignore
log.trace("Offset commit thread was cancelled by another thread while removing connector task with id: {}", id);
} catch (ExecutionException | InterruptedException e) {
throw new ConnectException("Unexpected interruption in SourceTaskOffsetCommitter while removing task with id: " + id, e);
}
}
|
@Test
public void testRemoveSuccess() {
expectRemove();
committers.put(taskId, taskFuture);
committer.remove(taskId);
assertTrue(committers.isEmpty());
}
|
public static boolean isTrue(String b) {
return b != null && StringUtils.TRUE.equalsIgnoreCase(b);
}
|
@Test
public void testIsTrue() {
Assert.assertTrue(CommonUtils.isTrue("true"));
Assert.assertTrue(CommonUtils.isTrue("True"));
Assert.assertFalse(CommonUtils.isTrue("111"));
Assert.assertFalse(CommonUtils.isTrue((String) null));
Assert.assertFalse(CommonUtils.isTrue(""));
Assert.assertFalse(CommonUtils.isTrue((Boolean) null));
Assert.assertTrue(CommonUtils.isTrue(Boolean.TRUE));
Assert.assertFalse(CommonUtils.isTrue(Boolean.FALSE));
}
|
@Nullable protected TagsExtractor getQuickTextTagsSearcher() {
return mTagsExtractor;
}
|
@Test
public void testQuickTextEnabledPluginsPrefsChangedDoesNotCauseReloadIfTagsSearchIsDisabled()
throws Exception {
SharedPrefsHelper.setPrefsValue(R.string.settings_key_search_quick_text_tags, false);
Assert.assertSame(
TagsExtractorImpl.NO_OP, mAnySoftKeyboardUnderTest.getQuickTextTagsSearcher());
SharedPrefsHelper.setPrefsValue(QuickTextKeyFactory.PREF_ID_PREFIX + "ddddd", "sdfsdfsd");
Assert.assertSame(
TagsExtractorImpl.NO_OP, mAnySoftKeyboardUnderTest.getQuickTextTagsSearcher());
}
|
public int getPrecision() {
return precision;
}
|
@Test
public void default_precision_is_38() {
DecimalColumnDef def = new DecimalColumnDef.Builder()
.setColumnName("issues")
.setScale(20)
.setIsNullable(true)
.build();
assertThat(def.getPrecision()).isEqualTo(38);
}
|
public static RowCoder of(Schema schema) {
return new RowCoder(schema);
}
|
@Test
public void testArrays() throws Exception {
Schema schema = Schema.builder().addArrayField("f_array", FieldType.STRING).build();
Row row = Row.withSchema(schema).addArray("one", "two", "three", "four").build();
CoderProperties.coderDecodeEncodeEqual(RowCoder.of(schema), row);
}
|
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
}
|
@Test
public void getTypeNameInputPositiveOutputNotNull19() {
// Arrange
final int type = 19;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Table_map", actual);
}
|
public static <T> T parseObject(String text, Class<T> clazz) {
if (StringUtil.isBlank(text)) {
return null;
}
return JSON_FACADE.parseObject(text, clazz);
}
|
@Test
public void assertParseObject() {
Assert.assertNull(JSONUtil.parseObject(null, Foo.class));
Assert.assertNull(JSONUtil.parseObject(" ", Foo.class));
Assert.assertEquals(EXPECTED_FOO, JSONUtil.parseObject(EXPECTED_FOO_JSON, Foo.class));
}
|
public static boolean isValidOrigin(String sourceHost, ZeppelinConfiguration zConf)
throws UnknownHostException, URISyntaxException {
String sourceUriHost = "";
if (sourceHost != null && !sourceHost.isEmpty()) {
sourceUriHost = new URI(sourceHost).getHost();
sourceUriHost = (sourceUriHost == null) ? "" : sourceUriHost.toLowerCase();
}
sourceUriHost = sourceUriHost.toLowerCase();
String currentHost = InetAddress.getLocalHost().getHostName().toLowerCase();
return zConf.getAllowedOrigins().contains("*")
|| currentHost.equals(sourceUriHost)
|| "localhost".equals(sourceUriHost)
|| zConf.getAllowedOrigins().contains(sourceHost);
}
|
@Test
void notAURIOrigin()
throws URISyntaxException, UnknownHostException {
assertFalse(CorsUtils.isValidOrigin("test123",
ZeppelinConfiguration.load("zeppelin-site.xml")));
}
|
public WorkflowActionResponse deactivate(String workflowId, User caller) {
Checks.notNull(caller, "caller cannot be null to deactivate workflow [%s]", workflowId);
String timeline = workflowDao.deactivate(workflowId, caller);
LOG.info(timeline);
TimelineEvent event =
TimelineActionEvent.builder()
.action(Actions.WorkflowAction.DEACTIVATE)
.author(caller)
.message(timeline)
.build();
return WorkflowActionResponse.from(workflowId, event);
}
|
@Test
public void testDeactivate() {
when(workflowDao.deactivate("sample-minimal-wf", tester)).thenReturn("foo");
actionHandler.deactivate("sample-minimal-wf", tester);
verify(workflowDao, times(1)).deactivate("sample-minimal-wf", tester);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.