focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public PulsarAdmin build() throws PulsarClientException {
return new PulsarAdminImpl(conf.getServiceUrl(), conf, clientBuilderClassLoader, acceptGzipCompression);
}
|
@Test
public void testBuildFailsWhenServiceUrlNotSet() {
assertThatIllegalArgumentException().isThrownBy(() -> PulsarAdmin.builder().build())
.withMessageContaining("Service URL needs to be specified");
}
|
@Override
public void recordMisses(int count) {
missCount.inc(count);
}
|
@Test
public void miss() {
stats.recordMisses(2);
assertThat(registry.counter(PREFIX + ".misses").getCount()).isEqualTo(2);
}
|
public boolean overlaps(final BoundingBox pBoundingBox, double pZoom) {
//FIXME this is a total hack but it works around a number of issues related to vertical map
//replication and horiztonal replication that can cause polygons to completely disappear when
//panning
if (pZoom < 3)
return true;
boolean latMatch = false;
boolean lonMatch = false;
//vertical wrapping detection
if (pBoundingBox.mLatSouth <= mLatNorth &&
pBoundingBox.mLatSouth >= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLonWest >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//normal case, non overlapping
if (mLonEast >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//special case for when *this completely surrounds the pBoundbox
if (mLonWest <= pBoundingBox.mLonWest &&
mLonEast >= pBoundingBox.mLonEast &&
mLatNorth >= pBoundingBox.mLatNorth &&
mLatSouth <= pBoundingBox.mLatSouth)
return true;
//normal case, non overlapping
if (mLatNorth >= pBoundingBox.mLatSouth && mLatNorth <= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLatSouth >= pBoundingBox.mLatSouth && mLatSouth <= mLatSouth)
latMatch = true;
if (mLonWest > mLonEast) {
//the date line is included in the bounding box
//we want to match lon from the dateline to the eastern bounds of the box
//and the dateline to the western bounds of the box
if (mLonEast <= pBoundingBox.mLonEast && pBoundingBox.mLonWest >= mLonWest)
lonMatch = true;
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast <= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast)
lonMatch = false;
}
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast >= pBoundingBox.mLonEast) {
lonMatch = true;
}
/*
//that is completely within this
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast<= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast )
lonMatch = false;
}
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast>= pBoundingBox.mLonEast) {
lonMatch = true;
}*/
}
return latMatch && lonMatch;
}
|
@Test
public void testOverlapsDateLine2() {
// ________________
// | | |
// |** ?? | **|
// |-*----+-----*-|
// |** | **|
// | | |
// ----------------
//box is notated as *
//test area is notated as ?
BoundingBox box = new BoundingBox(45, -178, -45, -1);
Assert.assertTrue(box.overlaps(box, 4));
BoundingBox farAway = new BoundingBox(45, -74, 44, -72);
Assert.assertFalse(box.overlaps(farAway, 4));
// ________________
// |******| **|
// | ?? *| * |
// |-----*+-----*-|
// | *| * |
// |******| **|
// ----------------
//box is notated as *
//test area is notated as ?
box = new BoundingBox(45, 0, -45, 170);
Assert.assertTrue(box.overlaps(box, 4));
farAway = new BoundingBox(40, -72, 38, -74);
Assert.assertTrue(box.overlaps(farAway, 4));
farAway = new BoundingBox(40, 5, 38, 4);
Assert.assertFalse(box.overlaps(farAway, 4));
farAway = new BoundingBox(-40, 5, -42, 4);
Assert.assertFalse(box.overlaps(farAway, 4));
}
|
@Override
public void trackAppInstall(JSONObject properties, boolean disableCallback) {
}
|
@Test
public void testTrackAppInstall() {
mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() {
@Override
public boolean onTrackEvent(String eventName, JSONObject eventProperties) {
Assert.fail();
return false;
}
});
mSensorsAPI.trackAppInstall(new JSONObject());
}
|
public static ExpansionServer create(ExpansionService service, String host, int port)
throws IOException {
return new ExpansionServer(service, host, port);
}
|
@Test
public void testNonEmptyFilesToStage() {
String[] args = {"--filesToStage=nonExistent1.jar,nonExistent2.jar"};
ExpansionService service = new ExpansionService(args);
assertThat(
service
.createPipeline(PipelineOptionsFactory.create())
.getOptions()
.as(PortablePipelineOptions.class)
.getFilesToStage(),
equalTo(Arrays.asList("nonExistent1.jar", "nonExistent2.jar")));
}
|
@Override
public void commitJob(JobContext originalContext) throws IOException {
commitJobs(Collections.singletonList(originalContext), Operation.OTHER);
}
|
@Test
public void testRetryTask() throws IOException {
HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter();
Table table = table(temp.getRoot().getPath(), false);
JobConf conf = jobConf(table, 2);
// Write records and abort the tasks
writeRecords(table.name(), 2, 0, false, true, conf);
HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 0);
HiveIcebergTestUtils.validateData(table, Collections.emptyList(), 0);
// Write records but do not abort the tasks
// The data files remain since we can not identify them but should not be read
writeRecords(table.name(), 2, 1, false, false, conf);
HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 2);
HiveIcebergTestUtils.validateData(table, Collections.emptyList(), 0);
// Write and commit the records
List<Record> expected = writeRecords(table.name(), 2, 2, true, false, conf);
committer.commitJob(new JobContextImpl(conf, JOB_ID));
HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 4);
HiveIcebergTestUtils.validateData(table, expected, 1);
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuilder buf = new StringBuilder();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case START_STATE:
handleStartState(c, tokenList, buf);
break;
case DEFAULT_VAL_STATE:
handleDefaultValueState(c, tokenList, buf);
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addLiteralToken(tokenList, buf);
break;
case DEFAULT_VAL_STATE:
// trailing colon. see also LOGBACK-1140
buf.append(CoreConstants.COLON_CHAR);
addLiteralToken(tokenList, buf);
break;
case START_STATE:
// trailing $. see also LOGBACK-1149
buf.append(CoreConstants.DOLLAR);
addLiteralToken(tokenList, buf);
break;
}
return tokenList;
}
|
@Test
public void colon() throws ScanException {
String input = "a:b";
Tokenizer tokenizer = new Tokenizer(input);
List<Token> tokenList = tokenizer.tokenize();
witnessList.add(new Token(Token.Type.LITERAL, "a"));
witnessList.add(new Token(Token.Type.LITERAL, ":b"));
assertEquals(witnessList, tokenList);
}
|
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception {
return fromXmlPartial(toInputStream(partial, UTF_8), o);
}
|
@Test
void shouldLoadStageArtifactPurgeSettingsFromXmlPartial() throws Exception {
String stageXmlPartial =
"""
<stage name="mingle" artifactCleanupProhibited="true">
<jobs>
<job name="functional">
<artifacts>
<log src="artifact1.xml" dest="cruise-output" />
</artifacts>
</job>
</jobs>
</stage>
""";
StageConfig stage = xmlLoader.fromXmlPartial(stageXmlPartial, StageConfig.class);
assertThat(stage.isArtifactCleanupProhibited()).isTrue();
stageXmlPartial =
"""
<stage name="mingle" artifactCleanupProhibited="false">
<jobs>
<job name="functional">
<artifacts>
<log src="artifact1.xml" dest="cruise-output" />
</artifacts>
</job>
</jobs>
</stage>
""";
stage = xmlLoader.fromXmlPartial(stageXmlPartial, StageConfig.class);
assertThat(stage.isArtifactCleanupProhibited()).isFalse();
stageXmlPartial =
"""
<stage name="mingle">
<jobs>
<job name="functional">
<artifacts>
<log src="artifact1.xml" dest="cruise-output" />
</artifacts>
</job>
</jobs>
</stage>
""";
stage = xmlLoader.fromXmlPartial(stageXmlPartial, StageConfig.class);
assertThat(stage.isArtifactCleanupProhibited()).isFalse();
}
|
public String getCeTaskId() {
verifyInitialized();
return ceTaskId;
}
|
@Test
public void getCeTaskId_should_fail_if_not_initialized() {
assertThatThrownBy(() -> underTest.getCeTaskId())
.isInstanceOf(IllegalStateException.class);
}
|
public static RateLimiterRegistry of(Configuration configuration, CompositeCustomizer<RateLimiterConfigCustomizer> customizer){
CommonRateLimiterConfigurationProperties rateLimiterProperties = CommonsConfigurationRateLimiterConfiguration.of(configuration);
Map<String, RateLimiterConfig> rateLimiterConfigMap = rateLimiterProperties.getInstances()
.entrySet().stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
entry -> rateLimiterProperties.createRateLimiterConfig(entry.getValue(), customizer, entry.getKey())));
return RateLimiterRegistry.of(rateLimiterConfigMap);
}
|
@Test
public void testRateLimiterRegistryFromPropertiesFile() throws ConfigurationException {
Configuration config = CommonsConfigurationUtil.getConfiguration(PropertiesConfiguration.class, TestConstants.RESILIENCE_CONFIG_PROPERTIES_FILE_NAME);
RateLimiterRegistry registry = CommonsConfigurationRateLimiterRegistry.of(config, new CompositeCustomizer<>(List.of()));
Assertions.assertThat(registry.rateLimiter(TestConstants.BACKEND_A).getName()).isEqualTo(TestConstants.BACKEND_A);
Assertions.assertThat(registry.rateLimiter(TestConstants.BACKEND_B).getName()).isEqualTo(TestConstants.BACKEND_B);
}
|
public static void checkDrivingLicenceMrz(String mrz) {
if (mrz.charAt(0) != 'D') {
throw new VerificationException("MRZ should start with D");
}
if (mrz.charAt(1) != '1') {
throw new VerificationException("Only BAP configuration is supported (1)");
}
if (!mrz.substring(2, 5).equals("NLD")) {
throw new VerificationException("Only Dutch driving licence supported");
}
if (mrz.length() != 30) {
throw new VerificationException("Dutch MRZ should have length of 30");
}
checkMrzCheckDigit(mrz);
}
|
@Test
public void checkDrivingLicenceMrzCountryWrong() {
assertThrows(VerificationException.class, () -> {
MrzUtils.checkDrivingLicenceMrz("PPPPPPPPPPPPPPPPPPPPPPPPPPPPPP");
});
}
|
public static <T> List<List<T>> splitAvg(List<T> list, int limit) {
if (CollUtil.isEmpty(list)) {
return empty();
}
return (list instanceof RandomAccess)
? new RandomAccessAvgPartition<>(list, limit)
: new AvgPartition<>(list, limit);
}
|
@Test
public void splitAvgTest() {
List<List<Object>> lists = ListUtil.splitAvg(null, 3);
assertEquals(ListUtil.empty(), lists);
lists = ListUtil.splitAvg(Arrays.asList(1, 2, 3, 4), 1);
assertEquals("[[1, 2, 3, 4]]", lists.toString());
lists = ListUtil.splitAvg(Arrays.asList(1, 2, 3, 4), 2);
assertEquals("[[1, 2], [3, 4]]", lists.toString());
lists = ListUtil.splitAvg(Arrays.asList(1, 2, 3, 4), 3);
assertEquals("[[1, 2], [3], [4]]", lists.toString());
lists = ListUtil.splitAvg(Arrays.asList(1, 2, 3, 4), 4);
assertEquals("[[1], [2], [3], [4]]", lists.toString());
lists = ListUtil.splitAvg(Arrays.asList(1, 2, 3), 2);
assertEquals("[[1, 2], [3]]", lists.toString());
}
|
@Override
public void handle(final RoutingContext routingContext) {
routingContext.addEndHandler(ar -> {
// After the response is complete, log results here.
final int status = routingContext.request().response().getStatusCode();
if (!loggingRateLimiter.shouldLog(logger, routingContext.request().path(), status)) {
return;
}
final long contentLength = routingContext.request().response().bytesWritten();
final HttpVersion version = routingContext.request().version();
final HttpMethod method = routingContext.request().method();
final String uri = enableQueryLogging
? routingContext.request().uri()
: routingContext.request().path();
if (endpointFilter.isPresent() && endpointFilter.get().matcher(uri).matches()) {
return;
}
final long requestBodyLength = routingContext.request().bytesRead();
final String versionFormatted;
switch (version) {
case HTTP_1_0:
versionFormatted = "HTTP/1.0";
break;
case HTTP_1_1:
versionFormatted = "HTTP/1.1";
break;
case HTTP_2:
versionFormatted = "HTTP/2.0";
break;
default:
versionFormatted = "-";
}
final String name = Optional.ofNullable((ApiUser) routingContext.user())
.map(u -> u.getPrincipal().getName())
.orElse("-");
final String userAgent = Optional.ofNullable(
routingContext.request().getHeader(HTTP_HEADER_USER_AGENT)).orElse("-");
final String timestamp = Utils.formatRFC1123DateTime(clock.millis());
final SocketAddress socketAddress = routingContext.request().remoteAddress();
final String message = String.format(
"%s - %s [%s] \"%s %s %s\" %d %d \"-\" \"%s\" %d",
socketAddress == null ? "null" : socketAddress.host(),
name,
timestamp,
method,
uri,
versionFormatted,
status,
contentLength,
userAgent,
requestBodyLength);
doLog(status, message);
});
routingContext.next();
}
|
@Test
public void shouldProduceLog() {
// Given:
when(response.getStatusCode()).thenReturn(200);
// When:
loggingHandler.handle(routingContext);
verify(routingContext).addEndHandler(endCallback.capture());
endCallback.getValue().handle(null);
// Then:
verify(logger).info(logStringCaptor.capture());
assertThat(logStringCaptor.getValue(),
is("123.111.222.333 - - [Sun, 12 Nov 2023 18:23:54 GMT] "
+ "\"POST /query HTTP/1.1\" 200 5678 \"-\" \"bot\" 3456"));
}
|
@CheckForNull
public Duration calculate(DefaultIssue issue) {
if (issue.isFromExternalRuleEngine()) {
return issue.effort();
}
Rule rule = ruleRepository.getByKey(issue.ruleKey());
DebtRemediationFunction fn = rule.getRemediationFunction();
if (fn != null) {
verifyEffortToFix(issue, fn);
Duration debt = Duration.create(0);
String gapMultiplier = fn.gapMultiplier();
if (fn.type().usesGapMultiplier() && !Strings.isNullOrEmpty(gapMultiplier)) {
int effortToFixValue = MoreObjects.firstNonNull(issue.gap(), 1).intValue();
// TODO convert to Duration directly in Rule#remediationFunction -> better performance + error handling
debt = durations.decode(gapMultiplier).multiply(effortToFixValue);
}
String baseEffort = fn.baseEffort();
if (fn.type().usesBaseEffort() && !Strings.isNullOrEmpty(baseEffort)) {
// TODO convert to Duration directly in Rule#remediationFunction -> better performance + error handling
debt = debt.add(durations.decode(baseEffort));
}
return debt;
}
return null;
}
|
@Test
public void linear_with_offset_function() {
double effortToFix = 3.0;
int coefficient = 2;
int offset = 5;
issue.setGap(effortToFix);
rule.setFunction(new DefaultDebtRemediationFunction(
DebtRemediationFunction.Type.LINEAR_OFFSET, coefficient + "min", offset + "min"));
assertThat(underTest.calculate(issue).toMinutes()).isEqualTo((int) ((coefficient * effortToFix) + offset));
}
|
public ModelAndView() {
}
|
@Test
public void testModelAndView(){
ModelAndView modelAndView = new ModelAndView();
Assert.assertEquals(0, modelAndView.getModel().size());
}
|
public boolean isUnknown() {
return Double.isInfinite(getRowCount())
|| Double.isInfinite(getRate())
|| Double.isInfinite(getWindow());
}
|
@Test
public void testKnownRel() {
String sql = " select * from ORDER_DETAILS1 ";
RelNode root = env.parseQuery(sql);
NodeStats nodeStats =
root.metadata(NodeStatsMetadata.class, root.getCluster().getMetadataQuery()).getNodeStats();
Assert.assertFalse(nodeStats.isUnknown());
}
|
public List<String> toList(boolean trim) {
return toList((str) -> trim ? StrUtil.trim(str) : str);
}
|
@Test
public void splitByLengthTest(){
String text = "1234123412341234";
SplitIter splitIter = new SplitIter(text,
new LengthFinder(4),
Integer.MAX_VALUE,
false
);
final List<String> strings = splitIter.toList(false);
assertEquals(4, strings.size());
}
|
public List<Service> getServices() {
synchronized (serviceList) {
return Collections.unmodifiableList(new ArrayList<>(serviceList));
}
}
|
@Test
public void testServiceStartup() {
ServiceManager serviceManager = new ServiceManager("ServiceManager");
// Add services
for (int i = 0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service = new CompositeServiceImpl(i);
if (i == FAILED_SERVICE_SEQ_NUMBER) {
service.setThrowExceptionOnStart(true);
}
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services = serviceManager.getServices().toArray(
new CompositeServiceImpl[0]);
Configuration conf = new Configuration();
// Initialise the composite service
serviceManager.init(conf);
// Start the composite service
try {
serviceManager.start();
fail("Exception should have been thrown due to startup failure of last service");
} catch (ServiceTestRuntimeException e) {
for (int i = 0; i < NUM_OF_SERVICES - 1; i++) {
if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
// Failed service state should be INITED
assertEquals("Service state should have been ", STATE.INITED,
services[NUM_OF_SERVICES - 1].getServiceState());
} else {
assertEquals("Service state should have been ", STATE.STOPPED,
services[i].getServiceState());
}
}
}
}
|
@Override
public V getNow(V valueIfAbsent) {
V value = super.getNow(valueIfAbsent);
return (deserialize && value instanceof Data) ? serializationService.toObject(value)
: value;
}
|
@Test
public void test_getNow_Data() {
Object value = "value";
DeserializingCompletableFuture<Object> future = new DeserializingCompletableFuture<>(serializationService, deserialize);
future.complete(serializationService.toData(value));
if (deserialize) {
assertEquals(value, future.getNow("default"));
} else {
assertEquals(serializationService.toData(value), future.getNow("default"));
}
}
|
public void extractTablesFromSelect(final SelectStatement selectStatement) {
if (selectStatement.getCombine().isPresent()) {
CombineSegment combineSegment = selectStatement.getCombine().get();
extractTablesFromSelect(combineSegment.getLeft().getSelect());
extractTablesFromSelect(combineSegment.getRight().getSelect());
}
if (selectStatement.getFrom().isPresent() && !selectStatement.getCombine().isPresent()) {
extractTablesFromTableSegment(selectStatement.getFrom().get());
}
selectStatement.getWhere().ifPresent(optional -> extractTablesFromExpression(optional.getExpr()));
if (null != selectStatement.getProjections() && !selectStatement.getCombine().isPresent()) {
extractTablesFromProjections(selectStatement.getProjections());
}
selectStatement.getGroupBy().ifPresent(optional -> extractTablesFromOrderByItems(optional.getGroupByItems()));
selectStatement.getOrderBy().ifPresent(optional -> extractTablesFromOrderByItems(optional.getOrderByItems()));
selectStatement.getHaving().ifPresent(optional -> extractTablesFromExpression(optional.getExpr()));
selectStatement.getWithSegment().ifPresent(optional -> extractTablesFromCTEs(optional.getCommonTableExpressions()));
selectStatement.getLock().ifPresent(this::extractTablesFromLock);
}
|
@Test
void assertExtractJoinTableSegmentsFromSelect() {
JoinTableSegment joinTableSegment = new JoinTableSegment();
joinTableSegment.setLeft(new SimpleTableSegment(new TableNameSegment(16, 22, new IdentifierValue("t_order"))));
joinTableSegment.setRight(new SimpleTableSegment(new TableNameSegment(37, 48, new IdentifierValue("t_order_item"))));
joinTableSegment.setJoinType("INNER");
joinTableSegment.setCondition(new BinaryOperationExpression(56, 79, new ColumnSegment(56, 65, new IdentifierValue("order_id")),
new ColumnSegment(69, 79, new IdentifierValue("order_id")), "=", "oi.order_id = o.order_id"));
SelectStatement selectStatement = mock(SelectStatement.class);
when(selectStatement.getFrom()).thenReturn(Optional.of(joinTableSegment));
tableExtractor.extractTablesFromSelect(selectStatement);
assertThat(tableExtractor.getJoinTables().size(), is(1));
assertThat(tableExtractor.getJoinTables().iterator().next(), is(joinTableSegment));
}
|
@Override
public Serde<GenericKey> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> schemaRegistryClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
return createInner(
format,
schema,
ksqlConfig,
schemaRegistryClientFactory,
loggerNamePrefix,
processingLogContext,
tracker
);
}
|
@Test
public void shouldThrowOnNestedMapKeyColumn() {
// Given:
schema = PersistenceSchema.from(
ImmutableList.of(column(SqlTypes.struct()
.field("F", SqlTypes.map(SqlTypes.STRING, SqlTypes.STRING))
.build())),
SerdeFeatures.of()
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> factory
.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.empty())
);
// Then:
assertThat(e.getMessage(), containsString(
"Map keys, including types that contain maps, are not supported as they may lead "
+ "to unexpected behavior due to inconsistent serialization. Key column name: `foo`. "
+ "Column type: STRUCT<`F` MAP<STRING, STRING>>. "
+ "See https://github.com/confluentinc/ksql/issues/6621 for more."));
}
|
@Override
public BigDecimal getBigNumber( Object object ) throws KettleValueException {
Long timestampAsInteger = getInteger( object );
if ( null != timestampAsInteger ) {
return BigDecimal.valueOf( timestampAsInteger );
} else {
return null;
}
}
|
@Test
public void testConvertTimestampToBigNumber_DefaultMode() throws KettleValueException {
System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE,
Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_LEGACY );
ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp();
BigDecimal result = valueMetaTimestamp.getBigNumber( TIMESTAMP_WITH_NANOSECONDS );
assertEquals( BigDecimal.valueOf( 1567308896123L ), result );
System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, "Something invalid!" );
valueMetaTimestamp = new ValueMetaTimestamp();
result = valueMetaTimestamp.getBigNumber( TIMESTAMP_WITH_NANOSECONDS );
assertEquals( BigDecimal.valueOf( 1567308896123L ), result );
}
|
public List<String> getAllTableNames(String dbName) {
return get(tableNamesCache, dbName);
}
|
@Test
public void testGetAllTableNames() {
CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore(
metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false);
List<String> databaseNames = cachingHiveMetastore.getAllTableNames("xxx");
Assert.assertEquals(Lists.newArrayList("table1", "table2"), databaseNames);
}
|
public void readRep( Repository rep, IMetaStore metaStore, ObjectId id_step, List<DatabaseMeta> databases )
throws KettleException {
try {
encoding = rep.getStepAttributeString( id_step, "encoding" );
nameSpace = rep.getStepAttributeString( id_step, "name_space" );
mainElement = rep.getStepAttributeString( id_step, "xml_main_element" );
repeatElement = rep.getStepAttributeString( id_step, "xml_repeat_element" );
fileName = rep.getStepAttributeString( id_step, "file_name" );
extension = rep.getStepAttributeString( id_step, "file_extention" );
servletOutput = rep.getStepAttributeBoolean( id_step, "file_servlet_output" );
doNotOpenNewFileInit = rep.getStepAttributeBoolean( id_step, "do_not_open_newfile_init" );
splitEvery = (int) rep.getStepAttributeInteger( id_step, "file_split" );
stepNrInFilename = rep.getStepAttributeBoolean( id_step, "file_add_stepnr" );
dateInFilename = rep.getStepAttributeBoolean( id_step, "file_add_date" );
timeInFilename = rep.getStepAttributeBoolean( id_step, "file_add_time" );
SpecifyFormat = rep.getStepAttributeBoolean( id_step, "SpecifyFormat" );
omitNullValues = rep.getStepAttributeBoolean( id_step, "omit_null_values" );
date_time_format = rep.getStepAttributeString( id_step, "date_time_format" );
addToResultFilenames = rep.getStepAttributeBoolean( id_step, "add_to_result_filenames" );
zipped = rep.getStepAttributeBoolean( id_step, "file_zipped" );
int nrfields = rep.countNrStepAttributes( id_step, "field_name" );
allocate( nrfields );
for ( int i = 0; i < nrfields; i++ ) {
outputFields[i] = new XMLField();
outputFields[i].setContentType( ContentType.valueOf( Const.NVL( rep.getStepAttributeString( id_step, i,
"field_content_type" ), ContentType.Element.name() ) ) );
outputFields[i].setFieldName( rep.getStepAttributeString( id_step, i, "field_name" ) );
outputFields[i].setElementName( rep.getStepAttributeString( id_step, i, "field_element" ) );
outputFields[i].setType( rep.getStepAttributeString( id_step, i, "field_type" ) );
outputFields[i].setFormat( rep.getStepAttributeString( id_step, i, "field_format" ) );
outputFields[i].setCurrencySymbol( rep.getStepAttributeString( id_step, i, "field_currency" ) );
outputFields[i].setDecimalSymbol( rep.getStepAttributeString( id_step, i, "field_decimal" ) );
outputFields[i].setGroupingSymbol( rep.getStepAttributeString( id_step, i, "field_group" ) );
outputFields[i].setNullString( rep.getStepAttributeString( id_step, i, "field_nullif" ) );
outputFields[i].setLength( (int) rep.getStepAttributeInteger( id_step, i, "field_length" ) );
outputFields[i].setPrecision( (int) rep.getStepAttributeInteger( id_step, i, "field_precision" ) );
}
} catch ( Exception e ) {
throw new KettleException( "Unexpected error reading step information from the repository", e );
}
}
|
@SuppressWarnings( "ConstantConditions" )
@Test
public void testReadRep() throws Exception {
XMLOutputMeta xmlOutputMeta = new XMLOutputMeta();
Repository rep = mock( Repository.class );
IMetaStore metastore = mock( IMetaStore.class );
DatabaseMeta dbMeta = mock( DatabaseMeta.class );
String encoding = "UTF-8";
String namespace = "";
String mainElement = "rows";
String repeatElement = "row";
String fileName = "repFileName";
StringObjectId oid = new StringObjectId( "oid" );
String fileExtension = "repxml";
boolean servletOutput = true;
boolean newFile = true;
long split = 100L;
boolean addStepNbr = false;
boolean addDate = false;
boolean addTime = true;
boolean specifyFormat = true;
boolean omitNull = false;
String dateTimeFormat = "yyyyMMdd";
boolean addToResult = true;
boolean zipped = true;
String contentType = "Element";
String fieldName = "aField";
String fieldElement = "field";
String fieldType = "String";
long fieldLength = 20L;
long fieldPrecision = 0L;
when( rep.getStepAttributeString( oid, "encoding" ) ).thenReturn( encoding );
when( rep.getStepAttributeString( oid, "name_space" ) ).thenReturn( namespace );
when( rep.getStepAttributeString( oid, "xml_main_element" ) ).thenReturn( mainElement );
when( rep.getStepAttributeString( oid, "xml_repeat_element" ) ).thenReturn( repeatElement );
when( rep.getStepAttributeString( oid, "file_name" ) ).thenReturn( fileName );
when( rep.getStepAttributeString( oid, "file_extention" ) ).thenReturn( fileExtension );
when( rep.getStepAttributeBoolean( oid, "file_servlet_output" ) ).thenReturn( servletOutput );
when( rep.getStepAttributeBoolean( oid, "do_not_open_newfile_init" ) ).thenReturn( newFile );
when( rep.getStepAttributeInteger( oid, "file_split" ) ).thenReturn( split );
when( rep.getStepAttributeBoolean( oid, "file_add_stepnr" ) ).thenReturn( addStepNbr );
when( rep.getStepAttributeBoolean( oid, "file_add_date" ) ).thenReturn( addDate );
when( rep.getStepAttributeBoolean( oid, "file_add_time" ) ).thenReturn( addTime );
when( rep.getStepAttributeBoolean( oid, "SpecifyFormat" ) ).thenReturn( specifyFormat );
when( rep.getStepAttributeBoolean( oid, "omit_null_values" ) ).thenReturn( omitNull );
when( rep.getStepAttributeString( oid, "date_time_format" ) ).thenReturn( dateTimeFormat );
when( rep.getStepAttributeBoolean( oid, "add_to_result_filenames" ) ).thenReturn( addToResult );
when( rep.getStepAttributeBoolean( oid, "file_zipped" ) ).thenReturn( zipped );
when( rep.countNrStepAttributes( oid, "field_name" ) ).thenReturn( 1 );
when( rep.getStepAttributeString( oid, 0, "field_content_type" ) ).thenReturn( contentType );
when( rep.getStepAttributeString( oid, 0, "field_name" ) ).thenReturn( fieldName );
when( rep.getStepAttributeString( oid, 0, "field_element" ) ).thenReturn( fieldElement );
when( rep.getStepAttributeString( oid, 0, "field_type" ) ).thenReturn( fieldType );
when( rep.getStepAttributeString( oid, 0, "field_format" ) ).thenReturn( null );
when( rep.getStepAttributeString( oid, 0, "field_currency" ) ).thenReturn( null );
when( rep.getStepAttributeString( oid, 0, "field_decimal" ) ).thenReturn( null );
when( rep.getStepAttributeString( oid, 0, "field_group" ) ).thenReturn( null );
when( rep.getStepAttributeString( oid, 0, "field_nullif" ) ).thenReturn( null );
when( rep.getStepAttributeInteger( oid, 0, "field_length" ) ).thenReturn( fieldLength );
when( rep.getStepAttributeInteger( oid, 0, "field_precision" ) ).thenReturn( fieldPrecision );
xmlOutputMeta.readRep( rep, metastore, oid, Collections.singletonList( dbMeta ) );
assertEquals( fileName, xmlOutputMeta.getFileName() );
assertTrue( xmlOutputMeta.isDoNotOpenNewFileInit() );
assertTrue( xmlOutputMeta.isServletOutput() );
assertEquals( fileExtension, xmlOutputMeta.getExtension() );
assertFalse( xmlOutputMeta.isStepNrInFilename() );
assertFalse( xmlOutputMeta.isDateInFilename() );
assertTrue( xmlOutputMeta.isTimeInFilename() );
assertTrue( xmlOutputMeta.isSpecifyFormat() );
assertEquals( dateTimeFormat, xmlOutputMeta.getDateTimeFormat() );
assertTrue( xmlOutputMeta.isAddToResultFiles() );
assertTrue( xmlOutputMeta.isZipped() );
assertEquals( encoding, xmlOutputMeta.getEncoding() );
assertTrue( StringUtil.isEmpty( xmlOutputMeta.getNameSpace() ) );
assertEquals( mainElement, xmlOutputMeta.getMainElement() );
assertEquals( repeatElement, xmlOutputMeta.getRepeatElement() );
assertEquals( split, xmlOutputMeta.getSplitEvery() );
assertFalse( xmlOutputMeta.isOmitNullValues() );
XMLField[] outputFields = xmlOutputMeta.getOutputFields();
assertEquals( 1, outputFields.length );
assertEquals( fieldName, outputFields[0].getFieldName() );
assertEquals( XMLField.ContentType.Element, outputFields[0].getContentType() );
assertEquals( fieldElement, outputFields[0].getElementName() );
assertEquals( fieldLength, outputFields[0].getLength() );
assertEquals( fieldPrecision, outputFields[0].getPrecision() );
Mockito.reset( rep, metastore );
StringObjectId transid = new StringObjectId( "transid" );
xmlOutputMeta.saveRep( rep, metastore, transid, oid );
verify( rep ).saveStepAttribute( transid, oid, "encoding", encoding );
verify( rep ).saveStepAttribute( transid, oid, "name_space", namespace );
verify( rep ).saveStepAttribute( transid, oid, "xml_main_element", mainElement );
verify( rep ).saveStepAttribute( transid, oid, "xml_repeat_element", repeatElement );
verify( rep ).saveStepAttribute( transid, oid, "file_name", fileName );
verify( rep ).saveStepAttribute( transid, oid, "file_extention", fileExtension );
verify( rep ).saveStepAttribute( transid, oid, "file_servlet_output", servletOutput );
verify( rep ).saveStepAttribute( transid, oid, "do_not_open_newfile_init", newFile );
verify( rep ).saveStepAttribute( transid, oid, "file_split", split );
verify( rep ).saveStepAttribute( transid, oid, "file_add_stepnr", addStepNbr );
verify( rep ).saveStepAttribute( transid, oid, "file_add_date", addDate );
verify( rep ).saveStepAttribute( transid, oid, "file_add_time", addTime );
verify( rep ).saveStepAttribute( transid, oid, "SpecifyFormat", specifyFormat );
verify( rep ).saveStepAttribute( transid, oid, "omit_null_values", omitNull );
verify( rep ).saveStepAttribute( transid, oid, "date_time_format", dateTimeFormat );
verify( rep ).saveStepAttribute( transid, oid, "add_to_result_filenames", addToResult );
verify( rep ).saveStepAttribute( transid, oid, "file_zipped", zipped );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_content_type", contentType );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_name", fieldName );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_element", fieldElement );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_type", fieldType );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_format", null );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_currency", null );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_decimal", null );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_group", null );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_nullif", null );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_length", fieldLength );
verify( rep ).saveStepAttribute( transid, oid, 0, "field_precision", fieldPrecision );
Mockito.verifyNoMoreInteractions( rep, metastore );
}
|
public static String getSimpleQuery(Map<String, Object> params, String script, boolean noteForm) {
String replaced = script;
Pattern pattern = noteForm ? VAR_NOTE_PTN : VAR_PTN;
Matcher match = pattern.matcher(replaced);
while (match.find()) {
int first = match.start();
if (!noteForm && first > 0 && replaced.charAt(first - 1) == '$') {
continue;
}
Input input = getInputForm(match);
Object value;
if (params.containsKey(input.name)) {
value = params.get(input.name);
} else {
value = input.getDefaultValue();
}
String expanded;
if (value instanceof Object[] || value instanceof Collection) { // multi-selection
OptionInput optionInput = (OptionInput) input;
String delimiter = input.argument;
if (delimiter == null) {
delimiter = DEFAULT_DELIMITER;
}
Collection<Object> checked = value instanceof Collection ? (Collection<Object>) value
: Arrays.asList((Object[]) value);
List<Object> validChecked = new LinkedList<>();
for (Object o : checked) {
// filter out obsolete checked values
if (optionInput.getOptions() != null) {
for (ParamOption option : optionInput.getOptions()) {
if (option.getValue().equals(o)) {
validChecked.add(o);
break;
}
}
}
}
if (validChecked.isEmpty()) {
expanded = StringUtils.join(checked, delimiter);
} else {
params.put(input.name, validChecked);
expanded = StringUtils.join(validChecked, delimiter);
}
} else {
// single-selection
expanded = StringUtils.defaultString((String) value, "");
}
replaced = match.replaceFirst(expanded);
match = pattern.matcher(replaced);
}
return replaced;
}
|
@Test
void testFormSubstitution() {
// test form substitution without new forms
String script = "INPUT=${input_form=}SELECTED=${select_form(Selection Form)=" +
",s_op1|s_op2|s_op3}\nCHECKED=${checkbox:checkbox_form=c_op1|c_op2,c_op1|c_op2|c_op3}";
Map<String, Object> params = new HashMap<>();
params.put("input_form", "some_input");
params.put("select_form", "s_op2");
params.put("checkbox_form", new String[]{"c_op1", "c_op3"});
String replaced = Input.getSimpleQuery(params, script, false);
assertEquals("INPUT=some_inputSELECTED=s_op2\nCHECKED=c_op1,c_op3", replaced);
// test form substitution with new forms
script = "INPUT=${input_form=}SELECTED=${select_form(Selection Form)=,s_op1|s_op2|s_op3}\n" +
"CHECKED=${checkbox:checkbox_form=c_op1|c_op2,c_op1|c_op2|c_op3}\n" +
"NEW_CHECKED=${checkbox( and ):new_check=nc_a|nc_c,nc_a|nc_b|nc_c}";
replaced = Input.getSimpleQuery(params, script, false);
assertEquals("INPUT=some_inputSELECTED=s_op2\nCHECKED=c_op1,c_op3\n" +
"NEW_CHECKED=nc_a and nc_c", replaced);
// test form substitution with obsoleted values
script = "INPUT=${input_form=}SELECTED=${select_form(Selection Form)=,s_op1|s_op2|s_op3}\n" +
"CHECKED=${checkbox:checkbox_form=c_op1|c_op2,c_op1|c_op2|c_op3_new}\n" +
"NEW_CHECKED=${checkbox( and ):new_check=nc_a|nc_c,nc_a|nc_b|nc_c}";
replaced = Input.getSimpleQuery(params, script, false);
assertEquals("INPUT=some_inputSELECTED=s_op2\nCHECKED=c_op1\n" +
"NEW_CHECKED=nc_a and nc_c", replaced);
// textbox without param value provided
script = "INPUT='${input_form}'";
params = new HashMap<>();
replaced = Input.getSimpleQuery(params, script, false);
assertEquals("INPUT=''", replaced);
}
|
public static HintValueContext extractHint(final String sql) {
if (!containsSQLHint(sql)) {
return new HintValueContext();
}
HintValueContext result = new HintValueContext();
int hintKeyValueBeginIndex = getHintKeyValueBeginIndex(sql);
String hintKeyValueText = sql.substring(hintKeyValueBeginIndex, sql.indexOf(SQL_COMMENT_SUFFIX, hintKeyValueBeginIndex));
Map<String, String> hintKeyValues = getSQLHintKeyValues(hintKeyValueText);
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY)) {
result.setDataSourceName(getHintValue(hintKeyValues, SQLHintPropertiesKey.DATASOURCE_NAME_KEY));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)) {
result.setWriteRouteOnly(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.WRITE_ROUTE_ONLY_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)) {
result.setSkipSQLRewrite(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SKIP_SQL_REWRITE_KEY)));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY)) {
String property = getHintValue(hintKeyValues, SQLHintPropertiesKey.DISABLE_AUDIT_NAMES_KEY);
result.getDisableAuditNames().addAll(getSplitterSQLHintValue(property));
}
if (containsHintKey(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)) {
result.setShadow(Boolean.parseBoolean(getHintValue(hintKeyValues, SQLHintPropertiesKey.SHADOW_KEY)));
}
for (Entry<String, String> entry : hintKeyValues.entrySet()) {
Object value = convert(entry.getValue());
Comparable<?> comparable = value instanceof Comparable ? (Comparable<?>) value : Objects.toString(value);
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_DATABASE_VALUE_KEY)) {
result.getShardingDatabaseValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
if (containsHintKey(Objects.toString(entry.getKey()), SQLHintPropertiesKey.SHARDING_TABLE_VALUE_KEY)) {
result.getShardingTableValues().put(Objects.toString(entry.getKey()).toUpperCase(), comparable);
}
}
return result;
}
|
@Test
void assertSQLHintShardingDatabaseValueWithStringHintValue() {
HintValueContext actual = SQLHintUtils.extractHint("/* SHARDINGSPHERE_HINT: t_order.SHARDING_DATABASE_VALUE=a */");
assertThat(actual.getHintShardingDatabaseValue("t_order"), is(Collections.singletonList("a")));
}
|
public double p90() {
return getLinearInterpolation(0.90);
}
|
@Test
public void testP90() {
HistogramData histogramData1 = HistogramData.linear(0, 0.2, 50);
histogramData1.record(0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
assertThat(String.format("%.3f", histogramData1.p90()), equalTo("8.200"));
HistogramData histogramData2 = HistogramData.linear(0, 0.02, 50);
histogramData2.record(0, 0, 0);
assertThat(String.format("%.3f", histogramData2.p90()), equalTo("0.018"));
}
|
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) {
if (left == null && right == null) {
return true;
}
if (left == null || right == null) {
return false;
}
if (!left.getSchema().getName().equals(right.getSchema().getName())) {
return false;
}
extractCommonObjectSchema(left, right);
return compare(left, right);
}
|
@Test
public void shouldUseExactFlagToConsiderExtraFieldsInEquality_usingPrimitives() {
TypeState1 typeState1 = new TypeState1();
typeState1.longField = 1L;
typeState1.stringField = "A";
typeState1.doubleField = 1.0;
typeState1.basicIntField = 1;
typeState1.valueOnlyInTypeState1 = "A"; // This field being set should make the records unequal.
writer1.reset();
mapper1.writeFlat(typeState1, writer1);
FlatRecord rec1 = writer1.generateFlatRecord();
TypeState2 typeState2 = new TypeState2();
typeState2.longField = 1L;
typeState2.stringField = "A";
typeState2.doubleField = 1.0;
typeState2.basicIntField = 1;
writer2.reset();
mapper2.writeFlat(typeState2, writer2);
FlatRecord rec2 = writer2.generateFlatRecord();
FlatRecordTraversalObjectNode leftNode = new FlatRecordTraversalObjectNode(rec1);
FlatRecordTraversalObjectNode rightNode = new FlatRecordTraversalObjectNode(rec2);
assertThat(FlatRecordTraversalObjectNodeEquality.equals(leftNode, rightNode)).isTrue();
assertThat(FlatRecordTraversalObjectNodeEquality.equals(rightNode, leftNode)).isTrue();
}
|
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) {
if (list == null) {
return FEELFnResult.ofResult(false);
}
boolean result = false;
for (final Object element : list) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" +
" a Boolean"));
} else {
if (element != null) {
result |= (Boolean) element;
}
}
}
return FEELFnResult.ofResult(result);
}
|
@Test
void invokeListParamNull() {
FunctionTestUtil.assertResult(anyFunction.invoke((List) null), false);
}
|
@Override
public double interpolate(double x1, double x2) {
int i = x1terp.search(x1);
int j = x2terp.search(x2);
double t = (x1-x1terp.xx[i])/(x1terp.xx[i+1]-x1terp.xx[i]);
double u = (x2-x2terp.xx[j])/(x2terp.xx[j+1]-x2terp.xx[j]);
return (1.-t)*(1.-u)*y[i][j] + t*(1.-u)*y[i+1][j] + (1.-t)*u*y[i][j+1] + t*u*y[i+1][j+1];
}
|
@Test
public void testInterpolate() {
System.out.println("interpolate");
double[] x1 = {1950, 1960, 1970, 1980, 1990};
double[] x2 = {10, 20, 30};
double[][] y = {
{150.697, 199.592, 187.625},
{179.323, 195.072, 250.287},
{203.212, 179.092, 322.767},
{226.505, 153.706, 426.730},
{249.633, 120.281, 598.243}
};
BilinearInterpolation instance = new BilinearInterpolation(x1, x2, y);
assertEquals(190.6287, instance.interpolate(1975, 15), 1E-4);
}
|
public static void free(final DirectBuffer buffer)
{
if (null != buffer)
{
free(buffer.byteBuffer());
}
}
|
@Test
void freeIsANoOpIfDirectBufferContainsNonDirectByteBuffer()
{
final DirectBuffer buffer = mock(DirectBuffer.class);
final ByteBuffer byteBuffer = ByteBuffer.allocate(4);
when(buffer.byteBuffer()).thenReturn(byteBuffer);
BufferUtil.free(buffer);
byteBuffer.put(1, (byte)5);
assertEquals(5, byteBuffer.get(1));
}
|
@Override
public Path resolveSibling(Path other) {
throw new UnsupportedOperationException();
}
|
@Test
public void testResolveSibling() {
assertEquals(
"gs://bucket/bar/moo",
GcsPath.fromUri("gs://bucket/bar/foo").resolveSibling("moo").toString());
assertEquals(
"gs://bucket/moo", GcsPath.fromUri("gs://bucket/foo").resolveSibling("moo").toString());
thrown.expect(UnsupportedOperationException.class);
GcsPath.fromUri("gs://bucket/").resolveSibling("moo");
}
|
@Override
public String rpcType() {
return RpcTypeEnum.WEB_SOCKET.getName();
}
|
@Test
public void testRpcType() {
Assertions.assertEquals(webSocketShenyuContextDecorator.rpcType(), "websocket");
}
|
@ConstantFunction(name = "subtract", argTypes = {BIGINT, BIGINT}, returnType = BIGINT, isMonotonic = true)
public static ConstantOperator subtractBigInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createBigint(Math.subtractExact(first.getBigint(), second.getBigint()));
}
|
@Test
public void subtractBigInt() {
assertEquals(0, ScalarOperatorFunctions.subtractBigInt(O_BI_100, O_BI_100).getBigint());
}
|
@Override
public LogicalSchema getSchema() {
return schema;
}
|
@Test
public void shouldHaveFullyQualifiedSchema() {
// When:
final LogicalSchema schema = node.getSchema();
// Then:
assertThat(schema, is(REAL_SCHEMA.withPseudoAndKeyColsInValue(false)));
}
|
@Override
public void loadXML( Node stepnode, List<DatabaseMeta> databases, IMetaStore metaStore ) throws KettleXMLException {
readData( stepnode );
}
|
@Test
public void testLoadXML() throws Exception {
SystemDataMeta systemDataMeta = new SystemDataMeta();
DocumentBuilderFactory documentBuilderFactory = DocumentBuilderFactory.newInstance();
DocumentBuilder documentBuilder = documentBuilderFactory.newDocumentBuilder();
Document document = documentBuilder.parse( new InputSource( new StringReader( expectedXML ) ) );
Node node = document;
IMetaStore store = null;
systemDataMeta.loadXML( node, null, store );
assertEquals( expectedSystemDataMeta, systemDataMeta );
}
|
public static DeploymentDescriptor merge(List<DeploymentDescriptor> descriptorHierarchy, MergeMode mode) {
if (descriptorHierarchy == null || descriptorHierarchy.isEmpty()) {
throw new IllegalArgumentException("Descriptor hierarchy list cannot be empty");
}
if (descriptorHierarchy.size() == 1) {
return descriptorHierarchy.get(0);
}
Deque<DeploymentDescriptor> stack = new ArrayDeque<>();
descriptorHierarchy.forEach(stack::push);
while (stack.size() > 1) {
stack.push(merge(stack.pop(), stack.pop(), mode));
}
// last element from the stack is the one that contains all merged descriptors
return stack.pop();
}
|
@Test
public void testDeploymentDesciptorMergeMergeCollectionsAvoidDuplicates() {
DeploymentDescriptor primary = new DeploymentDescriptorImpl("org.jbpm.domain");
primary.getBuilder()
.addMarshalingStrategy(new ObjectModel("org.jbpm.test.CustomStrategy", new Object[]{"param2"}));
assertThat(primary).isNotNull();
assertThat(primary.getPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(primary.getAuditPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(primary.getAuditMode()).isEqualTo(AuditMode.JPA);
assertThat(primary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA);
assertThat(primary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON);
assertThat(primary.getMarshallingStrategies().size()).isEqualTo(1);
assertThat(primary.getConfiguration().size()).isEqualTo(0);
assertThat(primary.getEnvironmentEntries().size()).isEqualTo(0);
assertThat(primary.getEventListeners().size()).isEqualTo(0);
assertThat(primary.getGlobals().size()).isEqualTo(0);
assertThat(primary.getTaskEventListeners().size()).isEqualTo(0);
assertThat(primary.getWorkItemHandlers().size()).isEqualTo(0);
DeploymentDescriptor secondary = new DeploymentDescriptorImpl("org.jbpm.domain");
secondary.getBuilder()
.auditMode(AuditMode.JMS)
.persistenceMode(PersistenceMode.JPA)
.persistenceUnit(null)
.auditPersistenceUnit("")
.addMarshalingStrategy(new ObjectModel("org.jbpm.test.CustomStrategy", new Object[]{"param2"}));
assertThat(secondary).isNotNull();
assertThat(secondary.getPersistenceUnit()).isEqualTo(null);
assertThat(secondary.getAuditPersistenceUnit()).isEqualTo("");
assertThat(secondary.getAuditMode()).isEqualTo(AuditMode.JMS);
assertThat(secondary.getPersistenceMode()).isEqualTo(PersistenceMode.JPA);
assertThat(secondary.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON);
assertThat(secondary.getMarshallingStrategies().size()).isEqualTo(1);
assertThat(secondary.getConfiguration().size()).isEqualTo(0);
assertThat(secondary.getEnvironmentEntries().size()).isEqualTo(0);
assertThat(secondary.getEventListeners().size()).isEqualTo(0);
assertThat(secondary.getGlobals().size()).isEqualTo(0);
assertThat(secondary.getTaskEventListeners().size()).isEqualTo(0);
assertThat(secondary.getWorkItemHandlers().size()).isEqualTo(0);
// and now let's merge them
DeploymentDescriptor outcome = DeploymentDescriptorMerger.merge(primary, secondary,
MergeMode.MERGE_COLLECTIONS);
assertThat(outcome).isNotNull();
assertThat(outcome.getPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(outcome.getAuditPersistenceUnit()).isEqualTo("org.jbpm.domain");
assertThat(outcome.getAuditMode()).isEqualTo(AuditMode.JMS);
assertThat(outcome.getPersistenceMode()).isEqualTo(PersistenceMode.JPA);
assertThat(outcome.getRuntimeStrategy()).isEqualTo(RuntimeStrategy.SINGLETON);
assertThat(outcome.getMarshallingStrategies().size()).isEqualTo(1);
assertThat(outcome.getConfiguration().size()).isEqualTo(0);
assertThat(outcome.getEnvironmentEntries().size()).isEqualTo(0);
assertThat(outcome.getEventListeners().size()).isEqualTo(0);
assertThat(outcome.getGlobals().size()).isEqualTo(0);
assertThat(outcome.getTaskEventListeners().size()).isEqualTo(0);
assertThat(outcome.getWorkItemHandlers().size()).isEqualTo(0);
}
|
@Override
public void updateMember(ShareGroupMember newMember) {
if (newMember == null) {
throw new IllegalArgumentException("newMember cannot be null.");
}
ShareGroupMember oldMember = members.put(newMember.memberId(), newMember);
maybeUpdateSubscribedTopicNamesAndGroupSubscriptionType(oldMember, newMember);
maybeUpdateGroupState();
}
|
@Test
public void testUpdateSubscriptionMetadata() {
Uuid fooTopicId = Uuid.randomUuid();
Uuid barTopicId = Uuid.randomUuid();
Uuid zarTopicId = Uuid.randomUuid();
MetadataImage image = new MetadataImageBuilder()
.addTopic(fooTopicId, "foo", 1)
.addTopic(barTopicId, "bar", 2)
.addTopic(zarTopicId, "zar", 3)
.addRacks()
.build();
ShareGroupMember member1 = new ShareGroupMember.Builder("member1")
.setSubscribedTopicNames(Collections.singletonList("foo"))
.build();
ShareGroupMember member2 = new ShareGroupMember.Builder("member2")
.setSubscribedTopicNames(Collections.singletonList("bar"))
.build();
ShareGroupMember member3 = new ShareGroupMember.Builder("member3")
.setSubscribedTopicNames(Collections.singletonList("zar"))
.build();
ShareGroup shareGroup = createShareGroup("group-foo");
// It should be empty by default.
assertEquals(
Collections.emptyMap(),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account member 1.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(null, member1),
image.topics(),
image.cluster()
)
);
// Updating the group with member1.
shareGroup.updateMember(member1);
// It should return foo now.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 1.
assertEquals(
Collections.emptyMap(),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(member1, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account member 2.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(null, member2),
image.topics(),
image.cluster()
)
);
// Updating the group with member2.
shareGroup.updateMember(member2);
// It should return foo and bar.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 2.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(member2, null),
image.topics(),
image.cluster()
)
);
// Removing member1 results in returning bar.
assertEquals(
mkMap(
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(member1, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account member 3.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(null, member3),
image.topics(),
image.cluster()
)
);
// Updating group with member3.
shareGroup.updateMember(member3);
// It should return foo, bar and zar.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(null, null),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 1, member 2 and member 3
assertEquals(
Collections.emptyMap(),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(new HashSet<>(Arrays.asList(member1, member2, member3))),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 2 and member 3.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(new HashSet<>(Arrays.asList(member2, member3))),
image.topics(),
image.cluster()
)
);
// Compute while taking into account removal of member 1.
assertEquals(
mkMap(
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(Collections.singleton(member1)),
image.topics(),
image.cluster()
)
);
// It should return foo, bar and zar.
assertEquals(
mkMap(
mkEntry("foo", new TopicMetadata(fooTopicId, "foo", 1, mkMapOfPartitionRacks(1))),
mkEntry("bar", new TopicMetadata(barTopicId, "bar", 2, mkMapOfPartitionRacks(2))),
mkEntry("zar", new TopicMetadata(zarTopicId, "zar", 3, mkMapOfPartitionRacks(3)))
),
shareGroup.computeSubscriptionMetadata(
shareGroup.computeSubscribedTopicNames(Collections.emptySet()),
image.topics(),
image.cluster()
)
);
}
|
@Udf
public List<Long> generateSeriesLong(
@UdfParameter(description = "The beginning of the series") final long start,
@UdfParameter(description = "Marks the end of the series (inclusive)") final long end
) {
return generateSeriesLong(start, end, end - start > 0 ? 1 : -1);
}
|
@Test
public void shouldComputeNegativeLongRange() {
final List<Long> range = rangeUdf.generateSeriesLong(9, 0);
assertThat(range, hasSize(10));
long val = 9;
for (final Long i : range) {
assertThat(val--, is(i));
}
}
|
@Override
public Optional<HealthStatus> deflectorHealth(Collection<String> indices) {
if (indices.isEmpty()) {
return Optional.of(HealthStatus.Green);
}
final Map<String, String> aliasMapping = catApi.aliases();
final Set<String> mappedIndices = indices
.stream()
.map(index -> aliasMapping.getOrDefault(index, index))
.collect(Collectors.toSet());
final Set<IndexSummaryResponse> indexSummaries = catApi.indices()
.stream()
.filter(indexSummary -> mappedIndices.contains(indexSummary.index()))
.collect(Collectors.toSet());
if (indexSummaries.size() < mappedIndices.size()) {
return Optional.empty();
}
return indexSummaries.stream()
.map(IndexSummaryResponse::health)
.map(HealthStatus::fromString)
.min(HealthStatus::compareTo);
}
|
@Test
void testDeflectorHealth() {
when(catApi.aliases()).thenReturn(Map.of(
"foo_deflector", "foo_42",
"bar_deflector", "bar_17",
"baz_deflector", "baz_23"
));
when(catApi.indices()).thenReturn(List.of(
new IndexSummaryResponse("foo_42", "", "RED"),
new IndexSummaryResponse("bar_17", "", "YELLOW"),
new IndexSummaryResponse("baz_23", "", "GREEN")
));
assertThat(clusterAdapter.deflectorHealth(Set.of("foo_deflector", "bar_deflector", "baz_deflector"))).contains(HealthStatus.Red);
}
|
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException{
PluginRiskConsent riskConsent = PluginRiskConsent.valueOf(config.get(PLUGINS_RISK_CONSENT).orElse(NOT_ACCEPTED.name()));
if (userSession.hasSession() && userSession.isLoggedIn()
&& userSession.isSystemAdministrator() && riskConsent == REQUIRED) {
redirectTo(response, request.getContextPath() + PLUGINS_RISK_CONSENT_PATH);
}
chain.doFilter(request, response);
}
|
@Test
public void doFilter_givenNotLoggedInAndRequired_dontRedirect() throws Exception {
PluginsRiskConsentFilter consentFilter = new PluginsRiskConsentFilter(configuration, userSession);
when(userSession.hasSession()).thenReturn(true);
when(userSession.isLoggedIn()).thenReturn(false);
when(configuration.get(PLUGINS_RISK_CONSENT)).thenReturn(Optional.of(PluginRiskConsent.REQUIRED.name()));
consentFilter.doFilter(request, response, chain);
verify(response, times(0)).sendRedirect(Mockito.anyString());
}
|
public LinkedList<LinkedList<Node>> computeWeaklyConnectedComponents(Graph graph, HashMap<Node, Integer> indices) {
int N = graph.getNodeCount();
//Keep track of which nodes have been seen
int[] color = new int[N];
Progress.start(progress, N);
int seenCount = 0;
LinkedList<LinkedList<Node>> components = new LinkedList<>();
while (seenCount < N) {
//The search Q
LinkedList<Node> Q = new LinkedList<>();
//The component-list
LinkedList<Node> component = new LinkedList<>();
//Seed the search Q
NodeIterable iter = graph.getNodes();
for (Node next : iter) {
if (color[indices.get(next)] == 0) {
Q.add(next);
iter.doBreak();
break;
}
}
//While there are more nodes to search
while (!Q.isEmpty()) {
if (isCanceled) {
return new LinkedList<>();
}
//Get the next Node and add it to the component list
Node u = Q.removeFirst();
component.add(u);
color[indices.get(u)] = 2;
//Iterate over all of u's neighbors
EdgeIterable edgeIter = graph.getEdges(u);
//For each neighbor
for (Edge edge : edgeIter) {
Node reachable = graph.getOpposite(u, edge);
int id = indices.get(reachable);
//If this neighbor is unvisited
if (color[id] == 0) {
//Mark it as used
color[id] = 1;
//Add it to the search Q
Q.addLast(reachable);
}
}
seenCount++;
Progress.progress(progress, seenCount);
}
components.add(component);
}
return components;
}
|
@Test
public void testNullGraphWeaklyConnectedComponents() {
GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(5);
UndirectedGraph graph = graphModel.getUndirectedGraph();
Node n0 = graph.getNode("0");
Node n1 = graph.getNode("1");
Node n2 = graph.getNode("2");
Node n3 = graph.getNode("3");
Node n4 = graph.getNode("4");
ConnectedComponents c = new ConnectedComponents();
HashMap<Node, Integer> indices = new HashMap<>();
indices.put(n0, 0);
indices.put(n1, 1);
indices.put(n2, 2);
indices.put(n3, 3);
indices.put(n4, 4);
LinkedList<LinkedList<Node>> components = c.computeWeaklyConnectedComponents(graph, indices);
assertEquals(components.size(), 5);
}
|
@Override
public Class<? extends MaxLabeledStorageBuilder> builder() {
return MaxLabeledStorageBuilder.class;
}
|
@Test
public void testBuilder() throws IllegalAccessException, InstantiationException {
function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1);
function.calculate();
StorageBuilder<MaxLabeledFunction> storageBuilder = function.builder().newInstance();
final HashMapConverter.ToStorage toStorage = new HashMapConverter.ToStorage();
storageBuilder.entity2Storage(function, toStorage);
final Map<String, Object> map = toStorage.obtain();
map.put(MaxLabeledFunction.VALUE, ((DataTable) map.get(MaxLabeledFunction.VALUE)).toStorageData());
MaxLabeledFunction function2 = storageBuilder.storage2Entity(new HashMapConverter.ToEntity(map));
assertThat(function2.getValue()).isEqualTo(function.getValue());
}
|
public Record convert(final AbstractWALEvent event) {
if (filter(event)) {
return createPlaceholderRecord(event);
}
if (!(event instanceof AbstractRowEvent)) {
return createPlaceholderRecord(event);
}
PipelineTableMetaData tableMetaData = getPipelineTableMetaData(((AbstractRowEvent) event).getTableName());
if (event instanceof WriteRowEvent) {
return handleWriteRowEvent((WriteRowEvent) event, tableMetaData);
}
if (event instanceof UpdateRowEvent) {
return handleUpdateRowEvent((UpdateRowEvent) event, tableMetaData);
}
if (event instanceof DeleteRowEvent) {
return handleDeleteRowEvent((DeleteRowEvent) event, tableMetaData);
}
throw new UnsupportedSQLOperationException("");
}
|
@Test
void assertConvertPlaceholderEvent() {
Record record = walEventConverter.convert(new PlaceholderEvent());
assertThat(record, instanceOf(PlaceholderRecord.class));
}
|
@Override
public ReadWriteBuffer onCall(Command command, ReadWriteBuffer parameter) throws IOException {
Path p = null;
if (null == command) {
return null;
}
if (command.equals(GET_OUTPUT_PATH)) {
p = output.getOutputFileForWrite(-1);
} else if (command.equals(GET_OUTPUT_INDEX_PATH)) {
p = output.getOutputIndexFileForWrite(-1);
} else if (command.equals(GET_SPILL_PATH)) {
p = output.getSpillFileForWrite(spillNumber++, -1);
} else if (command.equals(GET_COMBINE_HANDLER)) {
if (null == combinerHandler) {
return null;
}
final ReadWriteBuffer result = new ReadWriteBuffer(8);
result.writeLong(combinerHandler.getId());
return result;
} else {
throw new IOException("Illegal command: " + command.toString());
}
if (p != null) {
final ReadWriteBuffer result = new ReadWriteBuffer();
result.writeString(p.toUri().getPath());
return result;
} else {
throw new IOException("MapOutputFile can't allocate spill/output file");
}
}
|
@Test
public void testOnCall() throws IOException {
this.handler = new NativeCollectorOnlyHandler(taskContext, nativeHandler, pusher, combiner);
boolean thrown = false;
try {
handler.onCall(new Command(-1), null);
} catch(final IOException e) {
thrown = true;
}
Assert.assertTrue("exception thrown", thrown);
final String expectedOutputPath = StringUtils.join(File.separator,
new String[] {LOCAL_DIR, "output", "file.out"});
final String expectedOutputIndexPath = StringUtils.join(File.separator,
new String[] {LOCAL_DIR, "output", "file.out.index"});
final String expectedSpillPath = StringUtils.join(File.separator,
new String[] {LOCAL_DIR, "output", "spill0.out"});
final String outputPath = handler.onCall(
NativeCollectorOnlyHandler.GET_OUTPUT_PATH, null).readString();
Assert.assertEquals(expectedOutputPath, outputPath);
final String outputIndexPath = handler.onCall(
NativeCollectorOnlyHandler.GET_OUTPUT_INDEX_PATH, null).readString();
Assert.assertEquals(expectedOutputIndexPath, outputIndexPath);
final String spillPath = handler.onCall(
NativeCollectorOnlyHandler.GET_SPILL_PATH, null).readString();
Assert.assertEquals(expectedSpillPath, spillPath);
}
|
public void add(String element) {
addAsync(element).toCompletableFuture().join();
}
|
@Test
@Timeout(5)
public void testEventuallyExpires() throws InterruptedException {
final FaultTolerantRedisCluster redisCluster = REDIS_CLUSTER_EXTENSION.getRedisCluster();
final CardinalityEstimator estimator = new CardinalityEstimator(redisCluster, "test", Duration.ofMillis(100));
estimator.add("1");
long count;
do {
count = redisCluster.withCluster(conn -> conn.sync().pfcount("cardinality_estimator::test"));
Thread.sleep(1);
} while (count != 0);
}
|
@ConstantFunction(name = "timediff", argTypes = {DATETIME, DATETIME}, returnType = TIME, isMonotonic = true)
public static ConstantOperator timeDiff(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createTime(Duration.between(second.getDatetime(), first.getDatetime()).getSeconds());
}
|
@Test
public void timeDiff() {
assertEquals(-2534400.0, ScalarOperatorFunctions.timeDiff(O_DT_20101102_183010, O_DT_20101202_023010).getTime(),
1);
}
|
@Override
public int run(String[] argv) {
if (argv.length < 1) {
printUsage("");
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = argv[i++];
//
// verify that we have enough command line parameters
//
if ("-safemode".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-report".equals(cmd)) {
if (argv.length > DFS_REPORT_ARGS.length + 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-saveNamespace".equals(cmd)) {
if (argv.length != 1 && argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-rollEdits".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-restoreFailedStorage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNodes".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-finalizeUpgrade".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if (RollingUpgradeCommand.matches(cmd)) {
if (argv.length > 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-upgrade".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-metasave".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshServiceAcl".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refresh".equals(cmd)) {
if (argv.length < 3) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-printTopology".equals(cmd)) {
if(argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNamenodes".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-getVolumeReport".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-reconfig".equals(cmd)) {
if (argv.length != 4) {
printUsage(cmd);
return exitCode;
}
} else if ("-deleteBlockPool".equals(cmd)) {
if ((argv.length != 3) && (argv.length != 4)) {
printUsage(cmd);
return exitCode;
}
} else if ("-setBalancerBandwidth".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-getBalancerBandwidth".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-fetchImage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-shutdownDatanode".equals(cmd)) {
if ((argv.length != 2) && (argv.length != 3)) {
printUsage(cmd);
return exitCode;
}
} else if ("-getDatanodeInfo".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-triggerBlockReport".equals(cmd)) {
if ((argv.length < 2) || (argv.length > 5)) {
printUsage(cmd);
return exitCode;
}
} else if ("-listOpenFiles".equals(cmd)) {
if ((argv.length > 4)) {
printUsage(cmd);
return exitCode;
}
}
// initialize DFSAdmin
init();
Exception debugException = null;
exitCode = 0;
try {
if ("-report".equals(cmd)) {
report(argv, i);
} else if ("-safemode".equals(cmd)) {
setSafeMode(argv, i);
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
allowSnapshot(argv);
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
disallowSnapshot(argv);
} else if ("-provisionSnapshotTrash".equalsIgnoreCase(cmd)) {
provisionSnapshotTrash(argv);
} else if ("-saveNamespace".equals(cmd)) {
exitCode = saveNamespace(argv);
} else if ("-rollEdits".equals(cmd)) {
exitCode = rollEdits();
} else if ("-restoreFailedStorage".equals(cmd)) {
exitCode = restoreFailedStorage(argv[i]);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = refreshNodes();
} else if ("-finalizeUpgrade".equals(cmd)) {
exitCode = finalizeUpgrade();
} else if (RollingUpgradeCommand.matches(cmd)) {
exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
} else if ("-upgrade".equals(cmd)) {
exitCode = upgrade(argv[i]);
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {
exitCode = new ClearQuotaCommand(argv, i, getConf()).runAll();
} else if (SetQuotaCommand.matches(cmd)) {
exitCode = new SetQuotaCommand(argv, i, getConf()).runAll();
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
exitCode = new ClearSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if (SetSpaceQuotaCommand.matches(cmd)) {
exitCode = new SetSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcl();
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings();
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration();
} else if ("-refreshCallQueue".equals(cmd)) {
exitCode = refreshCallQueue();
} else if ("-refresh".equals(cmd)) {
exitCode = genericRefresh(argv, i);
} else if ("-printTopology".equals(cmd)) {
exitCode = printTopology();
} else if ("-refreshNamenodes".equals(cmd)) {
exitCode = refreshNamenodes(argv, i);
} else if ("-getVolumeReport".equals(cmd)) {
exitCode = getVolumeReport(argv, i);
} else if ("-deleteBlockPool".equals(cmd)) {
exitCode = deleteBlockPool(argv, i);
} else if ("-setBalancerBandwidth".equals(cmd)) {
exitCode = setBalancerBandwidth(argv, i);
} else if ("-getBalancerBandwidth".equals(cmd)) {
exitCode = getBalancerBandwidth(argv, i);
} else if ("-fetchImage".equals(cmd)) {
exitCode = fetchImage(argv, i);
} else if ("-shutdownDatanode".equals(cmd)) {
exitCode = shutdownDatanode(argv, i);
} else if ("-evictWriters".equals(cmd)) {
exitCode = evictWriters(argv, i);
} else if ("-getDatanodeInfo".equals(cmd)) {
exitCode = getDatanodeInfo(argv, i);
} else if ("-reconfig".equals(cmd)) {
exitCode = reconfig(argv, i);
} else if ("-triggerBlockReport".equals(cmd)) {
exitCode = triggerBlockReport(argv);
} else if ("-listOpenFiles".equals(cmd)) {
exitCode = listOpenFiles(argv);
} else if ("-help".equals(cmd)) {
if (i < argv.length) {
printHelp(argv[i]);
} else {
printHelp("");
}
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("");
}
} catch (IllegalArgumentException arge) {
debugException = arge;
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
debugException = e;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": "
+ content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
debugException = ex;
}
} catch (Exception e) {
exitCode = -1;
debugException = e;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
}
if (LOG.isDebugEnabled() && debugException != null) {
LOG.debug("Exception encountered:", debugException);
}
return exitCode;
}
|
@Test
public void testRefreshProxyUser() throws Exception {
Path dirPath = new Path("/testdir1");
Path subDirPath = new Path("/testdir1/subdir1");
UserGroupInformation loginUserUgi = UserGroupInformation.getLoginUser();
String proxyUser = "fakeuser";
String realUser = loginUserUgi.getShortUserName();
UserGroupInformation proxyUgi =
UserGroupInformation.createProxyUserForTesting(proxyUser,
loginUserUgi, loginUserUgi.getGroupNames());
// create a directory as login user and re-assign it to proxy user
loginUserUgi.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws Exception {
cluster.getFileSystem().mkdirs(dirPath);
cluster.getFileSystem().setOwner(dirPath, proxyUser,
proxyUgi.getPrimaryGroupName());
return 0;
}
});
// try creating subdirectory inside the directory as proxy user,
// This should fail because of the current user hasn't still been proxied
try {
proxyUgi.doAs(new PrivilegedExceptionAction<Integer>() {
@Override public Integer run() throws Exception {
cluster.getFileSystem().mkdirs(subDirPath);
return 0;
}
});
} catch (RemoteException re) {
Assert.assertTrue(re.unwrapRemoteException()
instanceof AccessControlException);
Assert.assertTrue(re.unwrapRemoteException().getMessage()
.equals("User: " + realUser +
" is not allowed to impersonate " + proxyUser));
}
// refresh will look at configuration on the server side
// add additional resource with the new value
// so the server side will pick it up
String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(realUser);
String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(realUser);
String rsrc = "testGroupMappingRefresh_rsrc.xml";
tempResource = TestRefreshUserMappings.addNewConfigResource(rsrc,
userKeyGroups, "*", userKeyHosts, "*");
String[] args = new String[]{"-refreshSuperUserGroupsConfiguration"};
admin.run(args);
// After proxying the fakeuser, the mkdir should work
proxyUgi.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws Exception {
cluster.getFileSystem().mkdirs(dirPath);
return 0;
}
});
}
|
public Analysis analyze(Statement statement)
{
return analyze(statement, false);
}
|
@Test
public void testExplainAnalyzeFormatJson()
{
analyze("EXPLAIN ANALYZE (format JSON) SELECT * FROM t1");
}
|
@Override
public Set<String> getAssociatedRoles() {
Subject subject = org.apache.shiro.SecurityUtils.getSubject();
Set<String> roles = new HashSet<>();
Map<String, String> allRoles = null;
if (subject.isAuthenticated()) {
Collection<Realm> realmsList = getRealmsList();
for (Realm realm : realmsList) {
String name = realm.getClass().getName();
if (INI_REALM.equals(name)) {
allRoles = ((IniRealm) realm).getIni().get("roles");
break;
} else if (LDAP_REALM.equals(name)) {
try {
AuthorizationInfo auth =
((LdapRealm) realm)
.queryForAuthorizationInfo(
new SimplePrincipalCollection(subject.getPrincipal(), realm.getName()),
((LdapRealm) realm).getContextFactory());
if (auth != null) {
roles = new HashSet<>(auth.getRoles());
}
} catch (NamingException e) {
LOGGER.error("Can't fetch roles", e);
}
break;
} else if (ACTIVE_DIRECTORY_GROUP_REALM.equals(name)) {
allRoles = ((ActiveDirectoryGroupRealm) realm).getListRoles();
break;
} else if (realm instanceof KnoxJwtRealm) {
roles = ((KnoxJwtRealm) realm).mapGroupPrincipals(getPrincipal());
break;
}
}
if (allRoles != null) {
for (Map.Entry<String, String> pair : allRoles.entrySet()) {
if (subject.hasRole(pair.getKey())) {
roles.add(pair.getKey());
}
}
}
}
return roles;
}
|
@Test
void testKnoxGetRoles() {
setupPrincipalName("test");
KnoxJwtRealm realm = spy(new KnoxJwtRealm());
LifecycleUtils.init(realm);
Set<String> testRoles = new HashSet<String>();
testRoles.add("role1");
testRoles.add("role2");
when(realm.mapGroupPrincipals("test")).thenReturn(testRoles);
DefaultSecurityManager securityManager = new DefaultSecurityManager(realm);
ThreadContext.bind(securityManager);
Set<String> roles = shiroSecurityService.getAssociatedRoles();
assertEquals(testRoles, roles);
}
|
@Nonnull
@Override
public Result addChunk(ByteBuf buffer) {
final byte[] readable = new byte[buffer.readableBytes()];
buffer.readBytes(readable, buffer.readerIndex(), buffer.readableBytes());
final GELFMessage msg = new GELFMessage(readable);
final ByteBuf aggregatedBuffer;
switch (msg.getGELFType()) {
case CHUNKED:
try {
chunkCounter.inc();
aggregatedBuffer = checkForCompletion(msg);
if (aggregatedBuffer == null) {
return VALID_EMPTY_RESULT;
}
} catch (IllegalArgumentException | IllegalStateException | IndexOutOfBoundsException e) {
log.debug("Invalid gelf message chunk, dropping message.", e);
return INVALID_RESULT;
}
break;
case ZLIB:
case GZIP:
case UNCOMPRESSED:
aggregatedBuffer = Unpooled.wrappedBuffer(readable);
break;
case UNSUPPORTED:
return INVALID_RESULT;
default:
return INVALID_RESULT;
}
return new Result(aggregatedBuffer, true);
}
|
@Test
public void addSingleChunk() {
final ByteBuf[] singleChunk = createChunkedMessage(512, 1024);
final CodecAggregator.Result result = aggregator.addChunk(singleChunk[0]);
assertNotNull("message should be complete", result.getMessage());
assertEquals(1, counterValueNamed(metricRegistry, COMPLETE_MESSAGES));
assertEquals(1, counterValueNamed(metricRegistry, CHUNK_COUNTER));
assertEquals(0, counterValueNamed(metricRegistry, WAITING_MESSAGES));
assertEquals(0, counterValueNamed(metricRegistry, EXPIRED_CHUNKS));
assertEquals(0, counterValueNamed(metricRegistry, EXPIRED_MESSAGES));
assertEquals(0, counterValueNamed(metricRegistry, DUPLICATE_CHUNKS));
}
|
@VisibleForTesting
S3Client getS3Client() {
return this.s3Client.get();
}
|
@Test
public void testGetPathStyleAccessEnabledWithS3Options() throws URISyntaxException {
S3FileSystem s3FileSystem = new S3FileSystem(s3OptionsWithPathStyleAccessEnabled());
URL s3Url =
s3FileSystem
.getS3Client()
.utilities()
.getUrl(GetUrlRequest.builder().bucket("bucket").key("file").build());
assertEquals("https://s3.us-west-1.amazonaws.com/bucket/file", s3Url.toURI().toString());
}
|
@Override
public Database getDb(String dbName) {
org.apache.hadoop.hive.metastore.api.Database db = client.getDb(dbName);
return HiveMetastoreApiConverter.toDatabase(db, dbName);
}
|
@Test
public void testGetDb() {
HiveMetaClient client = new MockedHiveMetaClient();
HiveMetastore metastore = new HiveMetastore(client, "xxx", MetastoreType.HMS);
Database database = metastore.getDb("db1");
Assert.assertEquals("db1", database.getFullName());
try {
metastore.getDb("db2");
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e instanceof StarRocksConnectorException);
}
}
|
@ScalarOperator(EQUAL)
@SqlType(StandardTypes.BOOLEAN)
@SqlNullable
public static Boolean equal(@SqlType(StandardTypes.BIGINT) long left, @SqlType(StandardTypes.BIGINT) long right)
{
return left == right;
}
|
@Test
public void testEqual()
{
assertFunction("100000000037 = 100000000037", BOOLEAN, true);
assertFunction("37 = 100000000017", BOOLEAN, false);
assertFunction("100000000017 = 37", BOOLEAN, false);
assertFunction("100000000017 = 100000000017", BOOLEAN, true);
}
|
static boolean calculateActualParam(NamedParameter np, List<String> names, Object[] actualParams,
boolean isVariableParameters, String variableParamPrefix,
List<Object> variableParams) {
logger.trace("calculateActualParam {} {} {} {} {} {}", np, names, actualParams, isVariableParameters, variableParamPrefix, variableParams);
if (names.contains(np.getName())) {
actualParams[names.indexOf(np.getName())] = np.getValue();
return true;
} else if (isVariableParameters) {
return calculateActualParamVariableParameters(np, variableParamPrefix, variableParams);
} else {
// invalid parameter, method is incompatible
return false;
}
}
|
@Test
void calculateActualParam() {
// populate by NamedParameter value
NamedParameter np = new NamedParameter("n", BigDecimal.valueOf(1.5));
List<String> names = Collections.singletonList("n");
Object[] actualParams = new Object[1];
boolean isVariableParameters = false;
String variableParamPrefix = null;
List<Object> variableParams = null;
assertTrue(BaseFEELFunctionHelper.calculateActualParam(np, names, actualParams, isVariableParameters,
variableParamPrefix, variableParams));
assertEquals(np.getValue(), actualParams[0]);
np = new NamedParameter("undefined", BigDecimal.valueOf(1.5));
actualParams = new Object[1];
assertFalse(BaseFEELFunctionHelper.calculateActualParam(np, names, actualParams, isVariableParameters,
variableParamPrefix, variableParams));
// populate by variableparameters
variableParamPrefix = "varPref";
int varIndex = 12;
np = new NamedParameter(variableParamPrefix + varIndex, BigDecimal.valueOf(1.5));
names = Collections.singletonList("n");
actualParams = new Object[1];
isVariableParameters = true;
variableParams = new ArrayList();
assertTrue(BaseFEELFunctionHelper.calculateActualParam(np, names, actualParams, isVariableParameters,
variableParamPrefix, variableParams));
assertEquals(varIndex, variableParams.size());
for (int i = 0; i < varIndex - 1; i++) {
assertNull(variableParams.get(i));
}
assertEquals(np.getValue(), variableParams.get(varIndex - 1));
}
|
@Override
public void close() throws Exception {
Exception exception = null;
for (CompletableFuture<CloseableFnDataReceiver<BeamFnApi.Elements>> receiver :
ImmutableList.copyOf(receivers.values())) {
// Cancel any observer waiting for the client to complete. If the receiver has already been
// completed or cancelled, this call will be ignored.
receiver.cancel(true);
if (!receiver.isCompletedExceptionally()) {
try {
receiver.get().close();
} catch (Exception e) {
if (exception == null) {
exception = e;
} else {
exception.addSuppressed(e);
}
}
}
}
// Cancel any outbound calls and complete any inbound calls, as this multiplexer is hanging up
outboundObserver.onError(
Status.CANCELLED.withDescription("Multiplexer hanging up").asException());
inboundObserver.onCompleted();
if (exception != null) {
throw exception;
}
}
|
@Test
public void testClose() throws Exception {
Collection<BeamFnApi.Elements> outboundValues = new ArrayList<>();
Collection<Throwable> errorWasReturned = new ArrayList<>();
AtomicBoolean wasClosed = new AtomicBoolean();
final BeamFnDataGrpcMultiplexer multiplexer =
new BeamFnDataGrpcMultiplexer(
DESCRIPTOR,
OutboundObserverFactory.clientDirect(),
inboundObserver ->
TestStreams.withOnNext(outboundValues::add)
.withOnError(errorWasReturned::add)
.build());
multiplexer.registerConsumer(
DATA_INSTRUCTION_ID,
new CloseableFnDataReceiver<BeamFnApi.Elements>() {
@Override
public void flush() throws Exception {
fail("Unexpected call");
}
@Override
public void close() throws Exception {
wasClosed.set(true);
}
@Override
public void accept(BeamFnApi.Elements input) throws Exception {
fail("Unexpected call");
}
});
multiplexer.close();
assertTrue(wasClosed.get());
assertThat(
Iterables.getOnlyElement(errorWasReturned).getMessage(),
containsString("Multiplexer hanging up"));
}
|
@Bean
@Order(Ordered.HIGHEST_PRECEDENCE + 1)
public ErrorWebExceptionHandler errorWebExceptionHandler() {
return new GlobalErrorHandler();
}
|
@Test
public void testErrorWebExceptionHandler() {
applicationContextRunner.run(context -> {
ErrorWebExceptionHandler globalErrorHandler = context.getBean("errorWebExceptionHandler", ErrorWebExceptionHandler.class);
assertNotNull(globalErrorHandler);
});
}
|
public static Schema reassignOrRefreshIds(Schema schema, Schema idSourceSchema) {
return reassignOrRefreshIds(schema, idSourceSchema, true);
}
|
@Test
public void testReassignOrRefreshIds() {
Schema schema =
new Schema(
Lists.newArrayList(
required(10, "a", Types.IntegerType.get()),
required(11, "c", Types.IntegerType.get()),
required(12, "B", Types.IntegerType.get())),
Sets.newHashSet(10));
Schema sourceSchema =
new Schema(
Lists.newArrayList(
required(1, "a", Types.IntegerType.get()),
required(15, "B", Types.IntegerType.get())));
final Schema actualSchema = TypeUtil.reassignOrRefreshIds(schema, sourceSchema);
final Schema expectedSchema =
new Schema(
Lists.newArrayList(
required(1, "a", Types.IntegerType.get()),
required(16, "c", Types.IntegerType.get()),
required(15, "B", Types.IntegerType.get())));
assertThat(actualSchema.asStruct()).isEqualTo(expectedSchema.asStruct());
}
|
@POST
@Path(KMSRESTConstants.KEYS_RESOURCE)
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
@SuppressWarnings("unchecked")
public Response createKey(Map jsonKey) throws Exception {
try{
LOG.trace("Entering createKey Method.");
KMSWebApp.getAdminCallsMeter().mark();
UserGroupInformation user = HttpUserGroupInformation.get();
final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
final String material;
material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
String description = (String)
jsonKey.get(KMSRESTConstants.DESCRIPTION_FIELD);
LOG.debug("Creating key with name {}, cipher being used{}, " +
"length of key {}, description of key {}", name, cipher,
length, description);
Map<String, String> attributes = (Map<String, String>)
jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
KMSOp.CREATE_KEY, name);
}
final KeyProvider.Options options = new KeyProvider.Options(
KMSWebApp.getConfiguration());
if (cipher != null) {
options.setCipher(cipher);
}
if (length != 0) {
options.setBitLength(length);
}
options.setDescription(description);
options.setAttributes(attributes);
KeyProvider.KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyProvider.KeyVersion keyVersion = (material != null)
? provider.createKey(name, Base64.decodeBase64(material),
options)
: provider.createKey(name, options);
provider.flush();
return keyVersion;
}
}
);
kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
(material != null) + " Description:" + description);
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
keyVersion = removeKeyMaterial(keyVersion);
}
Map json = KMSUtil.toJSON(keyVersion);
String requestURL = KMSMDCFilter.getURL();
int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
requestURL = requestURL.substring(0, idx);
LOG.trace("Exiting createKey Method.");
return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
.type(MediaType.APPLICATION_JSON)
.header("Location", getKeyURI(requestURL, name)).entity(json).build();
} catch (Exception e) {
LOG.debug("Exception in createKey.", e);
throw e;
}
}
|
@Test
public void testKMSAuthFailureRetry() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.authentication.token.validity", "1");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k4.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k0", new byte[16],
new KeyProvider.Options(conf));
// This happens before rollover
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
// Atleast 2 rollovers.. so should induce signer Exception
Thread.sleep(3500);
kp.createKey("k2", new byte[16],
new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
// Test retry count
runServer(null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
conf.setInt(KMSClientProvider.AUTH_RETRY, 0);
final URI uri = createKMSUri(getKMSUrl());
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
// Atleast 2 rollovers.. so should induce signer Exception
Thread.sleep(3500);
try {
kp.createKey("k4", new byte[16],
new KeyProvider.Options(conf));
Assert.fail("This should not succeed..");
} catch (IOException e) {
Assert.assertTrue(
"HTTP exception must be a 401 : " + e.getMessage(), e
.getMessage().contains("401"));
}
return null;
}
});
return null;
}
});
}
|
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
String topic,
ConsumerPosition consumerPosition,
@Nullable String containsStringFilter,
@Nullable String filterId,
@Nullable Integer limit,
@Nullable String keySerde,
@Nullable String valueSerde) {
return loadMessages(
cluster,
topic,
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
consumerPosition,
getMsgFilter(containsStringFilter, filterId),
fixPageSize(limit)
);
}
|
@Test
void loadMessagesReturnsExceptionWhenTopicNotFound() {
StepVerifier.create(messagesService
.loadMessages(cluster, NON_EXISTING_TOPIC,
new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null),
null, null, 1, "String", "String"))
.expectError(TopicNotFoundException.class)
.verify();
}
|
@NotNull
@Override
public List<InetAddress> lookup(@NotNull String host) throws UnknownHostException {
InetAddress address = InetAddress.getByName(host);
if (configuration.getBoolean(SONAR_VALIDATE_WEBHOOKS_PROPERTY).orElse(SONAR_VALIDATE_WEBHOOKS_DEFAULT_VALUE)
&& (address.isLoopbackAddress() || address.isAnyLocalAddress() || isLocalAddress(address))) {
throw new IllegalArgumentException("Invalid URL: loopback and wildcard addresses are not allowed for webhooks.");
}
return Collections.singletonList(address);
}
|
@Test
public void lookup_fail_on_ipv6_local_case_insensitive() throws UnknownHostException, SocketException {
Optional<InetAddress> inet6Address = Collections.list(NetworkInterface.getNetworkInterfaces())
.stream()
.flatMap(ni -> Collections.list(ni.getInetAddresses()).stream())
.filter(i -> i instanceof Inet6Address).findAny();
if (!inet6Address.isPresent()) {
return;
}
String differentCaseAddress = getDifferentCaseInetAddress(inet6Address.get());
when(configuration.getBoolean(SONAR_VALIDATE_WEBHOOKS_PROPERTY))
.thenReturn(Optional.of(true));
when(networkInterfaceProvider.getNetworkInterfaceAddresses())
.thenReturn(ImmutableList.of(inet6Address.get()));
Assertions.assertThatThrownBy(() -> underTest.lookup(differentCaseAddress))
.hasMessageContaining(INVALID_URL)
.isInstanceOf(IllegalArgumentException.class);
}
|
FleetControllerOptions getOptions() { return options; }
|
@Test
void testSimple() throws Exception {
ClusterController controller = new ClusterController();
StorDistributionConfig.Builder distributionConfig = new StorDistributionConfig.Builder();
StorDistributionConfig.Group.Builder group = new StorDistributionConfig.Group.Builder();
group.index("0").name("foo");
StorDistributionConfig.Group.Nodes.Builder node = new StorDistributionConfig.Group.Nodes.Builder();
node.index(0);
group.nodes.add(node);
distributionConfig.group.add(group);
FleetcontrollerConfig.Builder fleetcontrollerConfig = new FleetcontrollerConfig.Builder();
fleetcontrollerConfig
.cluster_name("storage")
.index(0)
.zookeeper_server("zoo")
.min_node_ratio_per_group(0.123)
.enable_cluster_feed_block(true)
.cluster_feed_block_limit("foo", 0.5)
.cluster_feed_block_limit("bar", 0.7)
.cluster_feed_block_noise_level(0.05)
.include_distribution_config_in_cluster_state_bundle(true);
SlobroksConfig.Builder slobroksConfig = new SlobroksConfig.Builder();
SlobroksConfig.Slobrok.Builder slobrok = new SlobroksConfig.Slobrok.Builder();
slobrok.connectionspec("foo");
slobroksConfig.slobrok.add(slobrok);
ZookeepersConfig.Builder zookeepersConfig = new ZookeepersConfig.Builder();
zookeepersConfig.zookeeperserverlist("foo");
Metric metric = new Metric() {
@Override
public void set(String s, Number number, Context context) {
}
@Override
public void add(String s, Number number, Context context) {
}
@Override
public Context createContext(Map<String, ?> stringMap) {
return null;
}
};
// Used in standalone mode to get config without a cluster controller instance
ClusterControllerClusterConfigurer configurer = new ClusterControllerClusterConfigurer(
null,
new StorDistributionConfig(distributionConfig),
new FleetcontrollerConfig(fleetcontrollerConfig),
new SlobroksConfig(slobroksConfig),
new ZookeepersConfig(zookeepersConfig),
metric,
null
);
assertNotNull(configurer.getOptions());
assertEquals(0.123, configurer.getOptions().minNodeRatioPerGroup(), 0.01);
assertTrue(configurer.getOptions().clusterFeedBlockEnabled());
assertEquals(0.5, configurer.getOptions().clusterFeedBlockLimit().get("foo"), 0.01);
assertEquals(0.7, configurer.getOptions().clusterFeedBlockLimit().get("bar"), 0.01);
assertEquals(0.05, configurer.getOptions().clusterFeedBlockNoiseLevel(), 0.001);
assertTrue(configurer.getOptions().includeDistributionConfigInClusterStateBundles());
try {
zookeepersConfig.zookeeperserverlist("");
new ClusterControllerClusterConfigurer(
controller,
new StorDistributionConfig(distributionConfig),
new FleetcontrollerConfig(fleetcontrollerConfig),
new SlobroksConfig(slobroksConfig),
new ZookeepersConfig(zookeepersConfig),
metric,
null
);
fail("Should not get here");
} catch (Exception e) {
assertEquals("zookeeper server address must be set, was ''", e.getMessage());
}
}
|
@Override
public Optional<ErrorResponse> filter(DiscFilterRequest request) {
try {
Optional<AthenzPrincipal> certificatePrincipal = getClientCertificate(request)
.map(AthenzIdentities::from)
.map(AthenzPrincipal::new);
if (certificatePrincipal.isEmpty()) {
String errorMessage = "Unable to authenticate Athenz identity. " +
"Either client certificate or principal token is required.";
return createResponse(request, Response.Status.UNAUTHORIZED, errorMessage);
}
AthenzPrincipal principal = certificatePrincipal.get();
request.setUserPrincipal(principal);
request.setRemoteUser(principal.getName());
request.setAttribute(RESULT_PRINCIPAL, principal);
return Optional.empty();
} catch (Exception e) {
return createResponse(request, Response.Status.UNAUTHORIZED, e.getMessage());
}
}
|
@Test
void certificate_is_accepted() {
DiscFilterRequest request = FilterTestUtils.newRequestBuilder().withClientCertificate(CERTIFICATE).build();
ResponseHandlerMock responseHandler = new ResponseHandlerMock();
AthenzPrincipalFilter filter = createFilter(false);
filter.filter(request, responseHandler);
AthenzPrincipal expectedPrincipal = new AthenzPrincipal(IDENTITY);
assertAuthenticated(request, expectedPrincipal);
}
|
@Override
public ProcessingResult process(ReplicationTask task) {
try {
EurekaHttpResponse<?> httpResponse = task.execute();
int statusCode = httpResponse.getStatusCode();
Object entity = httpResponse.getEntity();
if (logger.isDebugEnabled()) {
logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null);
}
if (isSuccess(statusCode)) {
task.handleSuccess();
} else if (statusCode == 503) {
logger.debug("Server busy (503) reply for task {}", task.getTaskName());
return ProcessingResult.Congestion;
} else {
task.handleFailure(statusCode, entity);
return ProcessingResult.PermanentError;
}
} catch (Throwable e) {
if (maybeReadTimeOut(e)) {
logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e);
//read timeout exception is more Congestion then TransientError, return Congestion for longer delay
return ProcessingResult.Congestion;
} else if (isNetworkConnectException(e)) {
logNetworkErrorSample(task, e);
return ProcessingResult.TransientError;
} else {
logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception",
peerId, task.getTaskName(), e);
return ProcessingResult.PermanentError;
}
}
return ProcessingResult.Success;
}
|
@Test
public void testNonBatchableTaskExecution() throws Exception {
TestableInstanceReplicationTask task = aReplicationTask().withAction(Action.Heartbeat).withReplyStatusCode(200).build();
ProcessingResult status = replicationTaskProcessor.process(task);
assertThat(status, is(ProcessingResult.Success));
}
|
public synchronized ClientInstanceIds clientInstanceIds(final Duration timeout) {
if (timeout.isNegative()) {
throw new IllegalArgumentException("The timeout cannot be negative.");
}
if (state().hasNotStarted()) {
throw new IllegalStateException("KafkaStreams has not been started, you can retry after calling start().");
}
if (state().isShuttingDown() || state.hasCompletedShutdown()) {
throw new IllegalStateException("KafkaStreams has been stopped (" + state + ").");
}
final Timer remainingTime = time.timer(timeout.toMillis());
final ClientInstanceIdsImpl clientInstanceIds = new ClientInstanceIdsImpl();
// (1) fan-out calls to threads
// StreamThread for main/restore consumers and producer(s)
final Map<String, KafkaFuture<Uuid>> consumerFutures = new HashMap<>();
final Map<String, KafkaFuture<Map<String, KafkaFuture<Uuid>>>> producerFutures = new HashMap<>();
synchronized (changeThreadCount) {
for (final StreamThread streamThread : threads) {
consumerFutures.putAll(streamThread.consumerClientInstanceIds(timeout));
producerFutures.put(streamThread.getName(), streamThread.producersClientInstanceIds(timeout));
}
}
// GlobalThread
KafkaFuture<Uuid> globalThreadFuture = null;
if (globalStreamThread != null) {
globalThreadFuture = globalStreamThread.globalConsumerInstanceId(timeout);
}
// (2) get admin client instance id in a blocking fashion, while Stream/GlobalThreads work in parallel
try {
clientInstanceIds.setAdminInstanceId(adminClient.clientInstanceId(timeout));
remainingTime.update(time.milliseconds());
} catch (final IllegalStateException telemetryDisabledError) {
// swallow
log.debug("Telemetry is disabled on the admin client.");
} catch (final TimeoutException timeoutException) {
throw timeoutException;
} catch (final Exception error) {
throw new StreamsException("Could not retrieve admin client instance id.", error);
}
// (3) collect client instance ids from threads
// (3a) collect consumers from StreamsThread
for (final Map.Entry<String, KafkaFuture<Uuid>> consumerFuture : consumerFutures.entrySet()) {
final Uuid instanceId = getOrThrowException(
consumerFuture.getValue(),
remainingTime.remainingMs(),
() -> String.format(
"Could not retrieve consumer instance id for %s.",
consumerFuture.getKey()
)
);
remainingTime.update(time.milliseconds());
// could be `null` if telemetry is disabled on the consumer itself
if (instanceId != null) {
clientInstanceIds.addConsumerInstanceId(
consumerFuture.getKey(),
instanceId
);
} else {
log.debug(String.format("Telemetry is disabled for %s.", consumerFuture.getKey()));
}
}
// (3b) collect producers from StreamsThread
for (final Map.Entry<String, KafkaFuture<Map<String, KafkaFuture<Uuid>>>> threadProducerFuture : producerFutures.entrySet()) {
final Map<String, KafkaFuture<Uuid>> streamThreadProducerFutures = getOrThrowException(
threadProducerFuture.getValue(),
remainingTime.remainingMs(),
() -> String.format(
"Could not retrieve producer instance id for %s.",
threadProducerFuture.getKey()
)
);
remainingTime.update(time.milliseconds());
for (final Map.Entry<String, KafkaFuture<Uuid>> producerFuture : streamThreadProducerFutures.entrySet()) {
final Uuid instanceId = getOrThrowException(
producerFuture.getValue(),
remainingTime.remainingMs(),
() -> String.format(
"Could not retrieve producer instance id for %s.",
producerFuture.getKey()
)
);
remainingTime.update(time.milliseconds());
// could be `null` if telemetry is disabled on the producer itself
if (instanceId != null) {
clientInstanceIds.addProducerInstanceId(
producerFuture.getKey(),
instanceId
);
} else {
log.debug(String.format("Telemetry is disabled for %s.", producerFuture.getKey()));
}
}
}
// (3c) collect from GlobalThread
if (globalThreadFuture != null) {
final Uuid instanceId = getOrThrowException(
globalThreadFuture,
remainingTime.remainingMs(),
() -> "Could not retrieve global consumer client instance id."
);
remainingTime.update(time.milliseconds());
// could be `null` if telemetry is disabled on the client itself
if (instanceId != null) {
clientInstanceIds.addConsumerInstanceId(
globalStreamThread.getName(),
instanceId
);
} else {
log.debug("Telemetry is disabled for the global consumer.");
}
}
return clientInstanceIds;
}
|
@Test
public void shouldThrowOnClientInstanceIdsWithNegativeTimeout() {
prepareStreams();
prepareStreamThread(streamThreadOne, 1);
prepareStreamThread(streamThreadTwo, 2);
try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier, time)) {
final IllegalArgumentException error = assertThrows(
IllegalArgumentException.class,
() -> streams.clientInstanceIds(Duration.ofMillis(-1L))
);
assertThat(
error.getMessage(),
equalTo("The timeout cannot be negative.")
);
}
}
|
public List<Attendee> availableAttendeesOf(List<Schedule> schedules) {
Map<Attendee, Long> groupAttendeeByScheduleCount = schedules.stream()
.filter(this::isScheduleWithinDateTimeRange)
.collect(groupingBy(Schedule::getAttendee, counting()));
long confirmedTimeSlotCount = countTimeSlotOfConfirmedMeeting();
return groupAttendeeByScheduleCount.keySet().stream()
.filter(key -> groupAttendeeByScheduleCount.get(key) == confirmedTimeSlotCount)
.toList();
}
|
@DisplayName("확정된 약속의 범위에 포함되는 스케줄들 중 참석 가능한 참석자들을 반환한다.")
@Test
void availableAttendeesOf() {
Meeting meeting = MeetingFixture.MOVIE.create();
Attendee attendee1 = AttendeeFixture.GUEST_MARK.create(meeting);
Attendee attendee2 = AttendeeFixture.HOST_JAZZ.create(meeting);
LocalDate today = LocalDate.now();
ConfirmedMeeting confirmedMeeting = new ConfirmedMeeting(
meeting
, LocalDateTime.of(today, LocalTime.of(0, 0))
, LocalDateTime.of(today, LocalTime.of(1, 0))
);
List<Schedule> schedules = List.of(
new Schedule(attendee1, new AvailableDate(today, meeting), Timeslot.TIME_0000),
new Schedule(attendee1, new AvailableDate(today, meeting), Timeslot.TIME_0030),
new Schedule(attendee1, new AvailableDate(today, meeting), Timeslot.TIME_0100),
new Schedule(attendee2, new AvailableDate(today, meeting), Timeslot.TIME_0000)
);
List<Attendee> attendees = confirmedMeeting.availableAttendeesOf(schedules);
assertAll(
() -> assertThat(attendees).hasSize(1),
() -> assertThat(attendees).containsExactly(attendee1)
);
}
|
@Override
public String buildContext() {
final ResourceDO after = (ResourceDO) getAfter();
if (Objects.isNull(getBefore())) {
return String.format("the resource [%s] is %s", after.getTitle(), StringUtils.lowerCase(getType().getType().toString()));
}
return String.format("the resource [%s] is %s : %s", after.getTitle(), StringUtils.lowerCase(getType().getType().toString()), contrast());
}
|
@Test
public void resourceDeleteBuildContextTest() {
ResourceChangedEvent resourceDeleteEvent = new ResourceChangedEvent(after, after, EventTypeEnum.RESOURCE_DELETE, "test-operator");
String typeStr = StringUtils.lowerCase(EventTypeEnum.RESOURCE_DELETE.getType().toString());
String context = String.format("the resource [%s] is %s : %s", after.getTitle(), typeStr, "it no change");
assertEquals(context, resourceDeleteEvent.buildContext());
}
|
@Override
public GetResourceProfileResponse getResourceProfile(
GetResourceProfileRequest request) throws YarnException, IOException {
if (request == null || request.getProfileName() == null) {
routerMetrics.incrGetResourceProfileFailedRetrieved();
String msg = "Missing getResourceProfile request or profileName.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_RESOURCEPROFILE, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, null);
}
long startTime = clock.getTime();
ClientMethod remoteMethod = new ClientMethod("getResourceProfile",
new Class[] {GetResourceProfileRequest.class}, new Object[] {request});
Collection<GetResourceProfileResponse> resourceProfile = null;
try {
resourceProfile = invokeConcurrent(remoteMethod, GetResourceProfileResponse.class);
} catch (Exception ex) {
routerMetrics.incrGetResourceProfileFailedRetrieved();
String msg = "Unable to get resource profile due to exception.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_RESOURCEPROFILE, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, ex);
}
long stopTime = clock.getTime();
routerMetrics.succeededGetResourceProfileRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), GET_RESOURCEPROFILE,
TARGET_CLIENT_RM_SERVICE);
return RouterYarnClientUtils.mergeClusterResourceProfileResponse(resourceProfile);
}
|
@Test
public void testGetResourceProfile() throws Exception {
LOG.info("Test FederationClientInterceptor : Get Resource Profile request.");
// null request
LambdaTestUtils.intercept(YarnException.class,
"Missing getResourceProfile request or profileName.",
() -> interceptor.getResourceProfile(null));
// normal request
GetResourceProfileRequest request = GetResourceProfileRequest.newInstance("maximum");
GetResourceProfileResponse response = interceptor.getResourceProfile(request);
Assert.assertNotNull(response);
Assert.assertEquals(32768, response.getResource().getMemorySize());
Assert.assertEquals(16, response.getResource().getVirtualCores());
GetResourceProfileRequest request2 = GetResourceProfileRequest.newInstance("default");
GetResourceProfileResponse response2 = interceptor.getResourceProfile(request2);
Assert.assertNotNull(response2);
Assert.assertEquals(8192, response2.getResource().getMemorySize());
Assert.assertEquals(8, response2.getResource().getVirtualCores());
GetResourceProfileRequest request3 = GetResourceProfileRequest.newInstance("minimum");
GetResourceProfileResponse response3 = interceptor.getResourceProfile(request3);
Assert.assertNotNull(response3);
Assert.assertEquals(4096, response3.getResource().getMemorySize());
Assert.assertEquals(4, response3.getResource().getVirtualCores());
}
|
UuidGenerator loadUuidGenerator() {
Class<? extends UuidGenerator> objectFactoryClass = options.getUuidGeneratorClass();
ClassLoader classLoader = classLoaderSupplier.get();
ServiceLoader<UuidGenerator> loader = ServiceLoader.load(UuidGenerator.class, classLoader);
if (objectFactoryClass == null) {
return loadSingleUuidGeneratorOrDefault(loader);
}
return loadSelectedUuidGenerator(loader, objectFactoryClass);
}
|
@Test
void test_case_13() {
Options options = () -> null;
UuidGeneratorServiceLoader loader = new UuidGeneratorServiceLoader(
() -> new ServiceLoaderTestClassLoader(UuidGenerator.class,
IncrementingUuidGenerator.class),
options);
assertThat(loader.loadUuidGenerator(), instanceOf(IncrementingUuidGenerator.class));
}
|
public static String loadText(InputStream in) throws IOException {
return new String(in.readAllBytes());
}
|
@Test
public void testFileToString() throws Exception {
File file = ResourceUtils.getResourceAsFile("filecontent/a.txt");
assertEquals("dk19i21)@+#(OR", PackageHelper.loadText(file));
}
|
@Override
public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException {
if(proxy.isSupported(source, target)) {
return proxy.copy(source, target, status, callback, listener);
}
// Copy between encrypted and unencrypted data room
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(target))) {
// File key must be set for new upload
status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey());
}
final Path result = copy.copy(source, target, status, callback, listener);
nodeid.cache(target, null);
return result.withAttributes(new SDSAttributesFinderFeature(session, nodeid).find(result));
}
|
@Test
public void testCopyToEncryptedDataRoom() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final Path room1 = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
final Path room2 = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final byte[] content = RandomUtils.nextBytes(32769);
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final Path test = new Path(room2, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSDirectS3MultipartWriteFeature writer = new SDSDirectS3MultipartWriteFeature(session, nodeid);
final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out);
final Path target = new Path(room1, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final SDSCopyFeature proxy = new SDSCopyFeature(session, nodeid);
final SDSDelegatingCopyFeature feature = new SDSDelegatingCopyFeature(session, nodeid, proxy);
assertNotNull(feature.copy(test, target, new TransferStatus().withLength(content.length), new DisabledConnectionCallback(), new DisabledStreamListener()).attributes().getVersionId());
assertFalse(proxy.isSupported(test, target));
assertTrue(feature.isSupported(test, target));
assertTrue(new SDSFindFeature(session, nodeid).find(test));
assertTrue(new SDSFindFeature(session, nodeid).find(target));
final byte[] compare = new byte[content.length];
final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(target, new TransferStatus().withLength(content.length), new DisabledConnectionCallback() {
@Override
public void warn(final Host bookmark, final String title, final String message, final String defaultButton, final String cancelButton, final String preference) {
//
}
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
new SDSDeleteFeature(session, nodeid).delete(Arrays.asList(room1, room2), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
@Nonnull
public <T> Future<T> submit(@Nonnull Callable<T> task) {
throwRejectedExecutionExceptionIfShutdown();
try {
T result = task.call();
return new CompletedFuture<>(result, null);
} catch (Exception e) {
return new CompletedFuture<>(null, e);
}
}
|
@Test
void testSubmitCallableWithNoopShutdown() {
final CompletableFuture<Thread> future = new CompletableFuture<>();
testWithNoopShutdown(testInstance -> testInstance.submit(callableFromFuture(future)));
assertThat(future).isCompletedWithValue(Thread.currentThread());
}
|
public static TimeUnit getMetricsRateUnit(Map<String, Object> daemonConf) {
return getTimeUnitForConfig(daemonConf, Config.STORM_DAEMON_METRICS_REPORTER_PLUGIN_RATE_UNIT);
}
|
@Test
public void getMetricsRateUnit() {
Map<String, Object> daemonConf = new HashMap<>();
assertNull(MetricsUtils.getMetricsRateUnit(daemonConf));
daemonConf.put(Config.STORM_DAEMON_METRICS_REPORTER_PLUGIN_RATE_UNIT, "SECONDS");
assertEquals(TimeUnit.SECONDS, MetricsUtils.getMetricsRateUnit(daemonConf));
daemonConf.put(Config.STORM_DAEMON_METRICS_REPORTER_PLUGIN_RATE_UNIT, "MINUTES");
assertEquals(TimeUnit.MINUTES, MetricsUtils.getMetricsRateUnit(daemonConf));
}
|
public Set<Long> getSkipTokens() {
return this.skipTokens;
}
|
@Test
public void tesSkipTokens() {
Set<Long> skipTokens = embedder.getSkipTokens();
assertTrue(skipTokens.contains(999L));
assertTrue(skipTokens.contains(1000L));
assertTrue(skipTokens.contains(1001L));
assertTrue(skipTokens.contains(1002L));
assertTrue(skipTokens.contains(1003L));
assertTrue(skipTokens.contains(1031L));
}
|
@Override
public void updateSubnet(Subnet osSubnet) {
checkNotNull(osSubnet, ERR_NULL_SUBNET);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getId()), ERR_NULL_SUBNET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getNetworkId()), ERR_NULL_SUBNET_NET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getCidr()), ERR_NULL_SUBNET_CIDR);
osNetworkStore.updateSubnet(osSubnet);
log.info(String.format(MSG_SUBNET, osSubnet.getCidr(), MSG_UPDATED));
}
|
@Test(expected = NullPointerException.class)
public void testUpdateSubnetWithNull() {
target.updateSubnet(null);
}
|
@Override
public PTransform<PCollection<Row>, PCollectionTuple> buildTransform(
FileWriteSchemaTransformConfiguration configuration, Schema schema) {
return new PTransform<PCollection<Row>, PCollectionTuple>() {
@Override
public PCollectionTuple expand(PCollection<Row> input) {
FileWriteSchemaTransformConfiguration.CsvConfiguration csvConfiguration =
getCSVConfiguration(configuration);
CSVFormat csvFormat =
CSVFormat.Predefined.valueOf(csvConfiguration.getPredefinedCsvFormat()).getFormat();
CsvIO.Write<Row> write =
CsvIO.writeRows(configuration.getFilenamePrefix(), csvFormat).withSuffix(suffix);
if (configuration.getCompression() != null) {
write = write.withCompression(getCompression(configuration));
}
if (configuration.getNumShards() != null) {
int numShards = getNumShards(configuration);
// Python SDK external transforms do not support null values requiring additional check.
if (numShards > 0) {
write = write.withNumShards(numShards);
}
}
if (!Strings.isNullOrEmpty(configuration.getShardNameTemplate())) {
write = write.withShardTemplate(getShardNameTemplate(configuration));
}
if (!Strings.isNullOrEmpty(configuration.getFilenameSuffix())) {
write = write.withSuffix(getFilenameSuffix(configuration));
}
WriteFilesResult<String> result = input.apply("Row to CSV", write);
PCollection<String> output =
result
.getPerDestinationOutputFilenames()
.apply("perDestinationOutputFilenames", Values.create());
return PCollectionTuple.of(RESULT_TAG, output);
}
};
}
|
@Test
public void timeContainingSchemaWithListRemovedShouldWriteCSV() {
String prefix =
folder(TimeContaining.class, "timeContainingSchemaWithListRemovedShouldWriteCSV");
String validField = "instant";
PCollection<Row> input =
writePipeline.apply(
Create.of(DATA.timeContainingRows).withRowSchema(TIME_CONTAINING_SCHEMA));
PCollection<Row> modifiedInput = input.apply(Select.fieldNames(validField));
Schema modifiedSchema = modifiedInput.getSchema();
FileWriteSchemaTransformConfiguration configuration = buildConfiguration(prefix);
PCollection<String> result =
modifiedInput
.apply(getProvider().buildTransform(configuration, modifiedSchema))
.get(RESULT_TAG);
PCollection<Long> numFiles = result.apply(Count.globally());
PAssert.thatSingleton(numFiles).isEqualTo(1L);
writePipeline.run().waitUntilFinish();
PCollection<String> csv =
readPipeline.apply(TextIO.read().from(configuration.getFilenamePrefix() + "*"));
List<String> expected = new ArrayList<>();
expected.add(validField);
DateTimeFormatter formatter = ISODateTimeFormat.dateTime();
for (Row row : DATA.timeContainingRows) {
expected.add(formatter.print(row.getDateTime(validField)));
}
PAssert.that(csv).containsInAnyOrder(expected);
readPipeline.run();
}
|
public static Configuration unix() {
return UnixHolder.UNIX;
}
|
@Test
public void testDefaultUnixConfiguration() {
Configuration config = Configuration.unix();
assertThat(config.pathType).isEqualTo(PathType.unix());
assertThat(config.roots).containsExactly("/");
assertThat(config.workingDirectory).isEqualTo("/work");
assertThat(config.nameCanonicalNormalization).isEmpty();
assertThat(config.nameDisplayNormalization).isEmpty();
assertThat(config.pathEqualityUsesCanonicalForm).isFalse();
assertThat(config.blockSize).isEqualTo(8192);
assertThat(config.maxSize).isEqualTo(4L * 1024 * 1024 * 1024);
assertThat(config.maxCacheSize).isEqualTo(-1);
assertThat(config.attributeViews).containsExactly("basic");
assertThat(config.attributeProviders).isEmpty();
assertThat(config.defaultAttributeValues).isEmpty();
assertThat(config.fileTimeSource).isEqualTo(SystemFileTimeSource.INSTANCE);
}
|
void importPlaylistItems(
List<MusicPlaylistItem> playlistItems,
IdempotentImportExecutor executor,
UUID jobId,
TokensAndUrlAuthData authData)
throws Exception {
if (playlistItems != null && !playlistItems.isEmpty()) {
Map<String, List<MusicPlaylistItem>> playlistItemsByPlaylist =
playlistItems.stream()
.filter(playlistItem -> !executor.isKeyCached(playlistItem.toString()))
.collect(Collectors.groupingBy(MusicPlaylistItem::getPlaylistId));
for (Entry<String, List<MusicPlaylistItem>> playlistEntry :
playlistItemsByPlaylist.entrySet()) {
String originalPlaylistId = playlistEntry.getKey();
UnmodifiableIterator<List<MusicPlaylistItem>> batches =
Iterators.partition(playlistEntry.getValue().iterator(), PLAYLIST_ITEM_BATCH_SIZE);
while (batches.hasNext()) {
importPlaylistItemBatch(jobId, authData, batches.next(), executor, originalPlaylistId);
}
}
}
return;
}
|
@Test
public void importPlaylistItemsCreatePlaylistFailure() throws Exception {
MusicPlaylistItem playlistItem1 =
new MusicPlaylistItem(
new MusicRecording(
"item1_isrc", null, 180000L, new MusicRelease("r1_icpn", null, null), null, false),
"p1_id",
1);
MusicPlaylistItem playlistItem2 =
new MusicPlaylistItem(
new MusicRecording(
"item2_isrc", null, 180000L, new MusicRelease("r1_icpn", null, null), null, false),
"p1_id",
1);
// Run test
googleMusicImporter.importPlaylistItems(
Lists.newArrayList(playlistItem1, playlistItem2), executor, uuid, null);
// Expected executor to have two errors
assertThat(executor.getErrors()).hasSize(2);
ErrorDetail errorDetail = executor.getErrors().iterator().next();
assertEquals(String.valueOf(playlistItem1), errorDetail.id());
assertThat(errorDetail.exception()).contains("Fail to create Playlist p1_id");
}
|
public static void checkNotNullAndNotEmpty(@Nullable String value, String propertyName) {
Preconditions.checkNotNull(value, "Property '" + propertyName + "' cannot be null");
Preconditions.checkArgument(
!value.trim().isEmpty(), "Property '" + propertyName + "' cannot be an empty string");
}
|
@Test
public void testCheckNotEmpty_collectionPass() {
Validator.checkNotNullAndNotEmpty(ImmutableList.of("value"), "ignored");
// pass
}
|
public String getFilepath() {
return filepath;
}
|
@Test
public void testConstructorMessageAndFilepath() {
try {
throw new KettleFileNotFoundException( errorMessage, filepath );
} catch ( KettleFileNotFoundException e ) {
assertEquals( null, e.getCause() );
assertTrue( e.getMessage().contains( errorMessage ) );
assertEquals( filepath, e.getFilepath() );
}
}
|
public RepeatRegistrationException(String message) {
super(message);
}
|
@Test
void testRepeatRegistrationException() {
assertAll(
() -> assertThrowsExactly(RepeatRegistrationException.class, () -> {
throw new RepeatRegistrationException("error");
}),
() -> assertThrowsExactly(RepeatRegistrationException.class, () -> {
throw new RepeatRegistrationException("error", new Throwable("error"));
}),
() -> assertThrowsExactly(RepeatRegistrationException.class, () -> {
throw new RepeatRegistrationException(new Throwable("error"));
})
);
}
|
public static Packet ensureUniqueAndStableStanzaID( final Packet packet, final JID self )
{
if ( !JiveGlobals.getBooleanProperty( "xmpp.sid.enabled", true ) )
{
return packet;
}
if ( packet instanceof IQ && !JiveGlobals.getBooleanProperty( "xmpp.sid.iq.enabled", false ) )
{
return packet;
}
if ( packet instanceof Message && !JiveGlobals.getBooleanProperty( "xmpp.sid.message.enabled", true ) )
{
return packet;
}
if ( packet instanceof Presence && !JiveGlobals.getBooleanProperty( "xmpp.sid.presence.enabled", false ) )
{
return packet;
}
final Element parentElement;
if ( packet instanceof IQ ) {
parentElement = ((IQ) packet).getChildElement();
} else {
parentElement = packet.getElement();
}
// The packet likely is an IQ result or error, which can, but are not required to have a child element.
// To have a consistent behavior for these, we'll not add a stanza-ID here.
if ( parentElement == null )
{
Log.debug( "Unable to find appropriate element. Not adding stanza-id to packet: {}", packet );
return packet;
}
// Stanza ID generating entities, which encounter a <stanza-id/> element where the 'by' attribute matches the 'by'
// attribute they would otherwise set, MUST delete that element even if they are not adding their own stanza ID.
final Iterator<Element> existingElementIterator = parentElement.elementIterator( QName.get( "stanza-id", "urn:xmpp:sid:0" ) );
while (existingElementIterator.hasNext()) {
final Element element = existingElementIterator.next();
if (self.toString().equals( element.attributeValue( "by" ) ) ) {
Log.warn( "Removing a 'stanza-id' element from an inbound stanza, as its 'by' attribute value matches the value that we would set. Offending stanza: {}", packet );
existingElementIterator.remove();
}
}
final String id = UUID.randomUUID().toString();
Log.debug( "Using newly generated value '{}' for stanza that has id '{}'.", id, packet.getID() );
final Element stanzaIdElement = parentElement.addElement( QName.get( "stanza-id", "urn:xmpp:sid:0" ) );
stanzaIdElement.addAttribute( "id", id );
stanzaIdElement.addAttribute( "by", self.toString() );
return packet;
}
|
@Test
public void testGeneratesStanzaIDElement() throws Exception
{
// Setup fixture.
final Packet input = new Message();
final JID self = new JID( "foobar" );
// Execute system under test.
final Packet result = StanzaIDUtil.ensureUniqueAndStableStanzaID( input, self );
// Verify results.
assertNotNull( result );
final Element stanzaIDElement = result.getElement().element( QName.get( "stanza-id", "urn:xmpp:sid:0" ) );
assertNotNull( stanzaIDElement );
assertDoesNotThrow(() -> UUID.fromString( stanzaIDElement.attributeValue( "id" ) ));
assertEquals( self.toString(), stanzaIDElement.attributeValue( "by" ) );
}
|
public String getType() {
return type;
}
|
@Test
void getType_shouldReturnTypePassedInConstructor() {
Notification notification = new Notification("type");
assertThat(notification.getType()).isEqualTo("type");
}
|
public EndpointResponse streamQuery(
final KsqlSecurityContext securityContext,
final KsqlRequest request,
final CompletableFuture<Void> connectionClosedFuture,
final Optional<Boolean> isInternalRequest,
final MetricsCallbackHolder metricsCallbackHolder,
final Context context
) {
throwIfNotConfigured();
activenessRegistrar.updateLastRequestTime();
final PreparedStatement<?> statement = parseStatement(request);
CommandStoreUtil.httpWaitForCommandSequenceNumber(
commandQueue, request, commandQueueCatchupTimeout);
return handleStatement(securityContext, request, statement, connectionClosedFuture,
isInternalRequest, metricsCallbackHolder, context);
}
|
@Test
public void shouldThrowOnHandleStatementIfNotConfigured() {
// Given:
testResource = new StreamedQueryResource(
mockKsqlEngine,
ksqlRestConfig,
mockStatementParser,
commandQueue,
DISCONNECT_CHECK_INTERVAL,
COMMAND_QUEUE_CATCHUP_TIMOEUT,
activenessRegistrar,
Optional.of(authorizationValidator),
errorsHandler,
denyListPropertyValidator,
queryExecutor
);
when(mockKsqlEngine.getKsqlConfig()).thenReturn(KsqlConfig.empty());
// When:
final KsqlRestException e = assertThrows(
KsqlRestException.class,
() -> testResource.streamQuery(
securityContext,
new KsqlRequest("query", Collections.emptyMap(), Collections.emptyMap(), null),
new CompletableFuture<>(),
Optional.empty(),
new MetricsCallbackHolder(),
context
)
);
// Then:
assertThat(e, exceptionStatusCode(is(SERVICE_UNAVAILABLE.code())));
assertThat(e, exceptionErrorMessage(errorMessage(Matchers.is("Server initializing"))));
}
|
@Override
public ObjectName createName(String type, String domain, String name) {
try {
ObjectName objectName;
Hashtable<String, String> properties = new Hashtable<>();
properties.put("name", name);
properties.put("type", type);
objectName = new ObjectName(domain, properties);
/*
* The only way we can find out if we need to quote the properties is by
* checking an ObjectName that we've constructed.
*/
if (objectName.isDomainPattern()) {
domain = ObjectName.quote(domain);
}
if (objectName.isPropertyValuePattern("name") || shouldQuote(objectName.getKeyProperty("name"))) {
properties.put("name", ObjectName.quote(name));
}
if (objectName.isPropertyValuePattern("type") || shouldQuote(objectName.getKeyProperty("type"))) {
properties.put("type", ObjectName.quote(type));
}
objectName = new ObjectName(domain, properties);
return objectName;
} catch (MalformedObjectNameException e) {
try {
return new ObjectName(domain, "name", ObjectName.quote(name));
} catch (MalformedObjectNameException e1) {
LOGGER.warn("Unable to register {} {}", type, name, e1);
throw new RuntimeException(e1);
}
}
}
|
@Test
public void createsObjectNameWithNameAsKeyPropertyName() {
DefaultObjectNameFactory f = new DefaultObjectNameFactory();
ObjectName on = f.createName("type", "com.domain", "something.with.dots");
assertThat(on.getKeyProperty("name")).isEqualTo("something.with.dots");
}
|
public static String canonicalNameToJvmName(String canonicalName) {
boolean isArray = canonicalName.endsWith("[]");
if (isArray) {
String t = ""; // 计数,看上几维数组
while (isArray) {
canonicalName = canonicalName.substring(0, canonicalName.length() - 2);
t += "[";
isArray = canonicalName.endsWith("[]");
}
if ("boolean".equals(canonicalName)) {
canonicalName = t + "Z";
} else if ("byte".equals(canonicalName)) {
canonicalName = t + "B";
} else if ("char".equals(canonicalName)) {
canonicalName = t + "C";
} else if ("double".equals(canonicalName)) {
canonicalName = t + "D";
} else if ("float".equals(canonicalName)) {
canonicalName = t + "F";
} else if ("int".equals(canonicalName)) {
canonicalName = t + "I";
} else if ("long".equals(canonicalName)) {
canonicalName = t + "J";
} else if ("short".equals(canonicalName)) {
canonicalName = t + "S";
} else {
canonicalName = t + "L" + canonicalName + ";";
}
}
return canonicalName;
}
|
@Test
public void canonicalNameToJvmName() throws Exception {
}
|
public CounterProducer(MetricsEndpoint endpoint) {
super(endpoint);
}
|
@Test
public void testCounterProducer() {
assertThat(producer.getEndpoint().equals(endpoint), is(true));
}
|
public static boolean isOriginalOrder(List<Integer> joinOrder)
{
for (int i = 0; i < joinOrder.size(); i++) {
if (joinOrder.get(i) != i) {
return false;
}
}
return true;
}
|
@Test
public void testIsOriginalOrder()
{
assertTrue(isOriginalOrder(ImmutableList.of(0, 1, 2, 3, 4)));
assertFalse(isOriginalOrder(ImmutableList.of(0, 2, 1, 3, 4)));
}
|
public String azimuth2compassPoint(double azimuth) {
String cp;
double slice = 360.0 / 16;
if (azimuth < slice) {
cp = "N";
} else if (azimuth < slice * 3) {
cp = "NE";
} else if (azimuth < slice * 5) {
cp = "E";
} else if (azimuth < slice * 7) {
cp = "SE";
} else if (azimuth < slice * 9) {
cp = "S";
} else if (azimuth < slice * 11) {
cp = "SW";
} else if (azimuth < slice * 13) {
cp = "W";
} else if (azimuth < slice * 15) {
cp = "NW";
} else {
cp = "N";
}
return cp;
}
|
@Test
public void testAzimuthCompassPoint() {
assertEquals("S", AC.azimuth2compassPoint(199));
}
|
public GrpcChannel acquireChannel(GrpcNetworkGroup networkGroup,
GrpcServerAddress serverAddress, AlluxioConfiguration conf, boolean alwaysEnableTLS) {
GrpcChannelKey channelKey = getChannelKey(networkGroup, serverAddress, conf);
CountingReference<ManagedChannel> channelRef =
mChannels.compute(channelKey, (key, ref) -> {
boolean shutdownExistingConnection = false;
int existingRefCount = 0;
if (ref != null) {
// Connection exists, wait for health check.
if (waitForConnectionReady(ref.get(), conf)) {
LOG.debug("Acquiring an existing connection. ConnectionKey: {}. Ref-count: {}", key,
ref.getRefCount());
return ref.reference();
} else {
// Health check failed.
shutdownExistingConnection = true;
}
}
// Existing connection should be shutdown.
if (shutdownExistingConnection) {
existingRefCount = ref.getRefCount();
LOG.debug("Shutting down an existing unhealthy connection. "
+ "ConnectionKey: {}. Ref-count: {}", key, existingRefCount);
// Shutdown the channel forcefully as it's already unhealthy.
shutdownManagedChannel(ref.get());
}
// Create a new managed channel.
LOG.debug("Creating a new managed channel. ConnectionKey: {}. Ref-count:{},"
+ " alwaysEnableTLS:{} config TLS:{}", key, existingRefCount, alwaysEnableTLS,
conf.getBoolean(alluxio.conf.PropertyKey.NETWORK_TLS_ENABLED));
ManagedChannel managedChannel = createManagedChannel(channelKey, conf, alwaysEnableTLS);
// Set map reference.
return new CountingReference<>(managedChannel, existingRefCount).reference();
});
return new GrpcChannel(channelKey, channelRef.get());
}
|
@Test
public void testRoundRobin() throws Exception {
int streamingGroupSize =
sConf.getInt(PropertyKey.USER_NETWORK_STREAMING_MAX_CONNECTIONS);
try (CloseableTestServer server = createServer()) {
List<GrpcServerAddress> addresses = new ArrayList<>(streamingGroupSize);
// Create channel keys.
for (int i = 0; i < streamingGroupSize; i++) {
addresses.add(server.getConnectAddress());
}
// Acquire connections.
List<GrpcChannel> connections =
addresses.stream()
.map(address -> GrpcChannelPool.INSTANCE.acquireChannel(
GrpcNetworkGroup.STREAMING, address, sConf, false))
.collect(Collectors.toList());
// Validate all are different.
Assert.assertEquals(streamingGroupSize, connections.stream().distinct().count());
}
}
|
@ApiOperation(value = "Parse a processing pipeline without saving it")
@POST
@Path("/parse")
@NoAuditEvent("only used to parse a pipeline, no changes made in the system")
public PipelineSource parse(@ApiParam(name = "pipeline", required = true) @NotNull PipelineSource pipelineSource) throws ParseException {
final Pipeline pipeline;
try {
pipeline = pipelineRuleParser.parsePipeline(pipelineSource.id(), pipelineSource.source());
} catch (ParseException e) {
throw new BadRequestException(Response.status(Response.Status.BAD_REQUEST).entity(e.getErrors()).build());
}
final DateTime now = DateTime.now(DateTimeZone.UTC);
return PipelineSource.builder()
.title(pipeline.name())
.description(pipelineSource.description())
.source(pipelineSource.source())
.stages(pipeline.stages().stream()
.map(stage -> StageSource.create(
stage.stage(),
stage.match(),
stage.ruleReferences()))
.collect(Collectors.toList()))
.createdAt(now)
.modifiedAt(now)
.build();
}
|
@Test
public void shouldParseAPipelineSuccessfully() {
final PipelineSource pipelineSource = PipelineSource.builder()
.source("pipeline \"Graylog Git Pipline\"\nstage 0 match either\n" +
"rule \"geo loc of dev\"\nrule \"open source dev\"\nend")
.stages(Collections.emptyList())
.title("Graylog Git Pipeline")
.build();
final SortedSet stages = ImmutableSortedSet.of(
Stage.builder()
.stage(0)
.ruleReferences(ImmutableList.of("geo loc of dev", "open source dev"))
.match(Stage.Match.EITHER)
.build()
);
final List<StageSource> expectedStages = ImmutableList.of(
StageSource.create(0, Stage.Match.EITHER, ImmutableList.of(
"geo loc of dev", "open source dev"
))
);
final Pipeline pipeline = Pipeline.builder()
.name("Graylog Git Pipeline")
.stages(stages)
.build();
when(pipelineRuleParser.parsePipeline(pipelineSource.id(), pipelineSource.source()))
.thenReturn(pipeline);
final PipelineSource result = this.pipelineResource.parse(pipelineSource);
verify(pipelineRuleParser).parsePipeline(pipelineSource.id(), pipelineSource.source());
assertThat(result.source()).isEqualTo(pipelineSource.source());
assertThat(result.stages()).isEqualTo(expectedStages);
}
|
@Override
public CompletableFuture<JobID> submitJob(@Nonnull JobGraph jobGraph) {
CompletableFuture<java.nio.file.Path> jobGraphFileFuture =
CompletableFuture.supplyAsync(
() -> {
try {
final java.nio.file.Path jobGraphFile =
Files.createTempFile(
"flink-jobgraph-" + jobGraph.getJobID(), ".bin");
try (ObjectOutputStream objectOut =
new ObjectOutputStream(
Files.newOutputStream(jobGraphFile))) {
objectOut.writeObject(jobGraph);
}
return jobGraphFile;
} catch (IOException e) {
throw new CompletionException(
new FlinkException("Failed to serialize JobGraph.", e));
}
},
executorService);
CompletableFuture<Tuple2<JobSubmitRequestBody, Collection<FileUpload>>> requestFuture =
jobGraphFileFuture.thenApply(
jobGraphFile -> {
List<String> jarFileNames = new ArrayList<>(8);
List<JobSubmitRequestBody.DistributedCacheFile> artifactFileNames =
new ArrayList<>(8);
Collection<FileUpload> filesToUpload = new ArrayList<>(8);
filesToUpload.add(
new FileUpload(
jobGraphFile, RestConstants.CONTENT_TYPE_BINARY));
for (Path jar : jobGraph.getUserJars()) {
jarFileNames.add(jar.getName());
filesToUpload.add(
new FileUpload(
Paths.get(jar.toUri()),
RestConstants.CONTENT_TYPE_JAR));
}
for (Map.Entry<String, DistributedCache.DistributedCacheEntry>
artifacts : jobGraph.getUserArtifacts().entrySet()) {
final Path artifactFilePath =
new Path(artifacts.getValue().filePath);
try {
// Only local artifacts need to be uploaded.
if (!artifactFilePath.getFileSystem().isDistributedFS()) {
artifactFileNames.add(
new JobSubmitRequestBody.DistributedCacheFile(
artifacts.getKey(),
artifactFilePath.getName()));
filesToUpload.add(
new FileUpload(
Paths.get(artifactFilePath.getPath()),
RestConstants.CONTENT_TYPE_BINARY));
}
} catch (IOException e) {
throw new CompletionException(
new FlinkException(
"Failed to get the FileSystem of artifact "
+ artifactFilePath
+ ".",
e));
}
}
final JobSubmitRequestBody requestBody =
new JobSubmitRequestBody(
jobGraphFile.getFileName().toString(),
jarFileNames,
artifactFileNames);
return Tuple2.of(
requestBody, Collections.unmodifiableCollection(filesToUpload));
});
final CompletableFuture<JobSubmitResponseBody> submissionFuture =
requestFuture.thenCompose(
requestAndFileUploads -> {
LOG.info(
"Submitting job '{}' ({}).",
jobGraph.getName(),
jobGraph.getJobID());
return sendRetriableRequest(
JobSubmitHeaders.getInstance(),
EmptyMessageParameters.getInstance(),
requestAndFileUploads.f0,
requestAndFileUploads.f1,
isConnectionProblemOrServiceUnavailable(),
(receiver, error) -> {
if (error != null) {
LOG.warn(
"Attempt to submit job '{}' ({}) to '{}' has failed.",
jobGraph.getName(),
jobGraph.getJobID(),
receiver,
error);
} else {
LOG.info(
"Successfully submitted job '{}' ({}) to '{}'.",
jobGraph.getName(),
jobGraph.getJobID(),
receiver);
}
});
});
submissionFuture
.exceptionally(ignored -> null) // ignore errors
.thenCompose(ignored -> jobGraphFileFuture)
.thenAccept(
jobGraphFile -> {
try {
Files.delete(jobGraphFile);
} catch (IOException e) {
LOG.warn("Could not delete temporary file {}.", jobGraphFile, e);
}
});
return submissionFuture
.thenApply(ignore -> jobGraph.getJobID())
.exceptionally(
(Throwable throwable) -> {
throw new CompletionException(
new JobSubmissionException(
jobGraph.getJobID(),
"Failed to submit JobGraph.",
ExceptionUtils.stripCompletionException(throwable)));
});
}
|
@Test
@Timeout(value = 120_000, unit = TimeUnit.MILLISECONDS)
void testJobSubmissionWithUserArtifact(@TempDir java.nio.file.Path temporaryPath)
throws Exception {
try (final TestRestServerEndpoint restServerEndpoint =
createRestServerEndpoint(new TestJobSubmitHandler())) {
try (RestClusterClient<?> restClusterClient =
createRestClusterClient(restServerEndpoint.getServerAddress().getPort())) {
File file = temporaryPath.resolve("hello.txt").toFile();
Files.write(file.toPath(), "hello world".getBytes(ConfigConstants.DEFAULT_CHARSET));
// Add file path with scheme
jobGraph.addUserArtifact(
"file",
new DistributedCache.DistributedCacheEntry(file.toURI().toString(), false));
// Add file path without scheme
jobGraph.addUserArtifact(
"file2",
new DistributedCache.DistributedCacheEntry(file.toURI().getPath(), false));
restClusterClient.submitJob(jobGraph).get();
}
}
}
|
@Override
public void getErrors(ErrorCollection errors, String parentLocation) {
String location = this.getLocation(parentLocation);
errors.checkMissing(location, "spec", spec);
}
|
@Test
public void shouldDeserializeFromAPILikeObject() {
String json = """
{
"spec": "0 0 22 ? * MON-FRI",
"only_on_changes": true
}""";
CRTimer deserializedValue = gson.fromJson(json,CRTimer.class);
assertThat(deserializedValue.getSpec(),is("0 0 22 ? * MON-FRI"));
assertThat(deserializedValue.isOnlyOnChanges(),is(true));
ErrorCollection errors = deserializedValue.getErrors();
assertTrue(errors.isEmpty());
}
|
@Override
public int fieldMetaIndex( int index ) {
return ( index >= fieldsCount || index < 0 ) ? FieldsMapping.FIELD_DOES_NOT_EXIST : index;
}
|
@Test
public void fieldMetaIndex() {
assertEquals( 1, fieldsMapping.fieldMetaIndex( 1 ) );
}
|
@GetSize
public double getSize(
@Element SubscriptionPartition subscriptionPartition,
@Restriction OffsetByteRange restriction) {
if (restriction.getRange().getTo() != Long.MAX_VALUE) {
return restriction.getByteCount();
}
return newTracker(subscriptionPartition, restriction).getProgress().getWorkRemaining();
}
|
@Test
public void getProgressBoundedReturnsBytes() {
assertTrue(
DoubleMath.fuzzyEquals(
123.0,
sdf.getSize(PARTITION, OffsetByteRange.of(new OffsetRange(87, 8000), 123)),
.0001));
verifyNoInteractions(tracker);
}
|
static Properties loadPropertiesFile(File homeDir) {
Properties p = new Properties();
File propsFile = new File(new File(homeDir, "conf"), "sonar.properties");
if (propsFile.exists()) {
try (Reader reader = new InputStreamReader(new FileInputStream(propsFile), UTF_8)) {
p.load(reader);
return p;
} catch (IOException e) {
throw new IllegalStateException("Cannot open file " + propsFile, e);
}
} else {
throw new IllegalStateException("Configuration file not found: " + propsFile);
}
}
|
@Test
public void loadPropertiesFile_fails_with_ISE_if_sonar_properties_not_in_conf_dir() throws IOException {
File homeDir = temporaryFolder.newFolder();
assertThatThrownBy(() -> Shutdowner.loadPropertiesFile(homeDir))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Configuration file not found: " + new File(new File(homeDir, "conf"), "sonar.properties").getAbsolutePath());
}
|
@Override
public void open(Configuration parameters) throws Exception {
this.rateLimiterTriggeredCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED);
this.concurrentRunThrottledCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED);
this.nothingToTriggerCounter =
getRuntimeContext()
.getMetricGroup()
.addGroup(
TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT)
.counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER);
this.triggerCounters =
taskNames.stream()
.map(
name ->
getRuntimeContext()
.getMetricGroup()
.addGroup(TableMaintenanceMetrics.GROUP_KEY, name)
.counter(TableMaintenanceMetrics.TRIGGERED))
.collect(Collectors.toList());
this.nextEvaluationTimeState =
getRuntimeContext()
.getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG));
this.accumulatedChangesState =
getRuntimeContext()
.getListState(
new ListStateDescriptor<>(
"triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class)));
this.lastTriggerTimesState =
getRuntimeContext()
.getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG));
tableLoader.open();
}
|
@Test
void testDataFileCount() throws Exception {
TriggerManager manager =
manager(
sql.tableLoader(TABLE_NAME), new TriggerEvaluator.Builder().dataFileCount(3).build());
try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness =
harness(manager)) {
testHarness.open();
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(1).build(), 0);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(2).build(), 1);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(3).build(), 2);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(5).build(), 3);
// No trigger in this case
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(1).build(), 3);
addEventAndCheckResult(testHarness, TableChange.builder().dataFileCount(2).build(), 4);
}
}
|
@Override
public JavaKeyStore load(SecureConfig config) {
if (!exists(config)) {
throw new SecretStoreException.LoadException(
String.format("Can not find Logstash keystore at %s. Please verify this file exists and is a valid Logstash keystore.",
config.getPlainText("keystore.file") == null ? "<undefined>" : new String(config.getPlainText("keystore.file"))));
}
try {
init(config);
lock.lock();
try (final InputStream is = Files.newInputStream(keyStorePath)) {
try {
keyStore.load(is, this.keyStorePass);
} catch (IOException ioe) {
if (ioe.getCause() instanceof UnrecoverableKeyException) {
throw new SecretStoreException.AccessException(
String.format("Can not access Logstash keystore at %s. Please verify correct file permissions and keystore password.",
keyStorePath.toAbsolutePath()), ioe);
} else {
throw new SecretStoreException.LoadException(String.format("Found a file at %s, but it is not a valid Logstash keystore.",
keyStorePath.toAbsolutePath().toString()), ioe);
}
}
byte[] marker = retrieveSecret(LOGSTASH_MARKER);
if (marker == null) {
throw new SecretStoreException.LoadException(String.format("Found a keystore at %s, but it is not a Logstash keystore.",
keyStorePath.toAbsolutePath().toString()));
}
LOGGER.debug("Using existing keystore at {}", keyStorePath.toAbsolutePath());
return this;
}
} catch (SecretStoreException sse) {
throw sse;
} catch (Exception e) { //should never happen
throw new SecretStoreException.UnknownException("Error while trying to load the Logstash keystore", e);
} finally {
releaseLock(lock);
config.clearValues();
}
}
|
@Test
public void testNoPathDefined() {
assertThrows(SecretStoreException.LoadException.class, () -> {
new JavaKeyStore().load(new SecureConfig());
});
}
|
public RowExpression extract(PlanNode node)
{
return node.accept(new Visitor(domainTranslator, functionAndTypeManager), null);
}
|
@Test
public void testTopN()
{
PlanNode node = new TopNNode(
Optional.empty(),
newId(),
filter(baseTableScan,
and(
equals(AV, BV),
equals(BV, CV),
lessThan(CV, bigintLiteral(10)))),
1, new OrderingScheme(ImmutableList.of(new Ordering(AV, SortOrder.ASC_NULLS_FIRST))), TopNNode.Step.PARTIAL);
RowExpression effectivePredicate = effectivePredicateExtractor.extract(node);
// Pass through
assertEquals(normalizeConjuncts(effectivePredicate),
normalizeConjuncts(
equals(AV, BV),
equals(BV, CV),
lessThan(CV, bigintLiteral(10))));
}
|
public static String encodeBytes(byte[] bytes) {
return Base64.encodeToString(bytes, false);
}
|
@Test
public void testBytes() {
byte[] bytes = { 1, 100, 127, 0, 60, 15, -128, -1, 14, -55 };
String bytesString = Protocol.encodeBytes(bytes);
byte[] bytes2 = Base64.decode(bytesString);
assertArrayEquals(bytes, bytes2);
Gateway g = new Gateway(null);
ReturnObject rObject = g.getReturnObject(bytes);
assertNotNull(rObject.getPrimitiveObject());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.