focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void getMyDefaultAdministratorRights() {
bot.execute(new SetMyDefaultAdministratorRights()
.forChannels(false)
.rights(new ChatAdministratorRights()
.canManageChat(false)
.canDeleteMessages(false)
.canManageVideoChats(false)
.canRestrictMembers(false)
.canPromoteMembers(false)
.canChangeInfo(false)
.canInviteUsers(false)
.canPostMessages(false)
.canEditMessages(false)
.canPinMessages(false)
.canManageTopics(false)
.canPostStories(false)
.canEditStories(false)
.canDeleteStories(false)
));
ChatAdministratorRights rights = bot.execute(new GetMyDefaultAdministratorRights().forChannels(false)).result();
assertFalse(rights.isAnonymous());
assertFalse(rights.canManageChat());
assertFalse(rights.canDeleteMessages());
assertFalse(rights.canManageVideoChats());
assertFalse(rights.canRestrictMembers());
assertFalse(rights.canPromoteMembers());
assertFalse(rights.canChangeInfo());
assertFalse(rights.canInviteUsers());
assertFalse(rights.canPostMessages()); // channels only
assertFalse(rights.canEditMessages()); // channels only
assertFalse(rights.canPinMessages());
assertFalse(rights.canManageTopics());
assertFalse(rights.canPostStories());
assertFalse(rights.canEditStories());
assertFalse(rights.canDeleteStories());
}
|
public static Collection<MdbValidityStatus> assertEjbClassValidity(final ClassInfo mdbClass) {
Collection<MdbValidityStatus> mdbComplianceIssueList = new ArrayList<>(MdbValidityStatus.values().length);
final String className = mdbClass.name().toString();
verifyModifiers(className, mdbClass.flags(), mdbComplianceIssueList);
for (MethodInfo method : mdbClass.methods()) {
if ("onMessage".equals(method.name())) {
verifyOnMessageMethod(className, method.flags(), mdbComplianceIssueList);
}
if ("finalize".equals(method.name())) {
EjbLogger.DEPLOYMENT_LOGGER.mdbCantHaveFinalizeMethod(className);
mdbComplianceIssueList.add(MdbValidityStatus.MDB_SHOULD_NOT_HAVE_FINALIZE_METHOD);
}
}
return mdbComplianceIssueList;
}
|
@Test
public void mdbWithPrivateOnMessageMethod() {
assertTrue(assertEjbClassValidity(buildClassInfoForClass(InvalidMdbOnMessageCantBePrivate.class.getName())).contains(
MdbValidityStatus.MDB_ON_MESSAGE_METHOD_CANT_BE_PRIVATE));
}
|
public void validate(List<String> values, String type, List<String> options) {
TypeValidation typeValidation = findByKey(type);
for (String value : values) {
typeValidation.validate(value, options);
}
}
|
@Test
public void validate() {
TypeValidation fakeTypeValidation = mock(TypeValidation.class);
when(fakeTypeValidation.key()).thenReturn("Fake");
TypeValidations typeValidations = new TypeValidations(newArrayList(fakeTypeValidation));
typeValidations.validate("10", "Fake", newArrayList("a"));
verify(fakeTypeValidation).validate("10", newArrayList("a"));
}
|
@GET
@Produces(MediaType.APPLICATION_JSON)
@Path("{networkId}/devices")
public Response getVirtualDevices(@PathParam("networkId") long networkId) {
NetworkId nid = NetworkId.networkId(networkId);
Set<VirtualDevice> vdevs = vnetService.getVirtualDevices(nid);
return ok(encodeArray(VirtualDevice.class, "devices", vdevs)).build();
}
|
@Test
public void testGetVirtualDevicesEmptyArray() {
NetworkId networkId = networkId4;
expect(mockVnetService.getVirtualDevices(networkId)).andReturn(ImmutableSet.of()).anyTimes();
replay(mockVnetService);
WebTarget wt = target();
String location = "vnets/" + networkId.toString() + "/devices";
String response = wt.path(location).request().get(String.class);
assertThat(response, is("{\"devices\":[]}"));
verify(mockVnetService);
}
|
@Override
public BeamSqlTable buildBeamSqlTable(Table table) {
Schema schema = table.getSchema();
ObjectNode properties = table.getProperties();
Optional<ParsedLocation> parsedLocation = Optional.empty();
if (!Strings.isNullOrEmpty(table.getLocation())) {
parsedLocation = Optional.of(parseLocation(checkArgumentNotNull(table.getLocation())));
}
List<String> topics =
mergeParam(parsedLocation.map(loc -> loc.topic), (ArrayNode) properties.get("topics"));
List<String> allBootstrapServers =
mergeParam(
parsedLocation.map(loc -> loc.brokerLocation),
(ArrayNode) properties.get("bootstrap_servers"));
String bootstrapServers = String.join(",", allBootstrapServers);
Optional<String> payloadFormat =
properties.has("format")
? Optional.of(properties.get("format").asText())
: Optional.empty();
if (Schemas.isNestedSchema(schema)) {
Optional<PayloadSerializer> serializer =
payloadFormat.map(
format ->
PayloadSerializers.getSerializer(
format,
checkArgumentNotNull(schema.getField(PAYLOAD_FIELD).getType().getRowSchema()),
TableUtils.convertNode2Map(properties)));
return new NestedPayloadKafkaTable(schema, bootstrapServers, topics, serializer);
} else {
/*
* CSV is handled separately because multiple rows can be produced from a single message, which
* adds complexity to payload extraction. It remains here and as the default because it is the
* historical default, but it will not be extended to support attaching extended attributes to
* rows.
*/
if (payloadFormat.orElse("csv").equals("csv")) {
return new BeamKafkaCSVTable(schema, bootstrapServers, topics);
}
PayloadSerializer serializer =
PayloadSerializers.getSerializer(
payloadFormat.get(), schema, TableUtils.convertNode2Map(properties));
return new PayloadSerializerKafkaTable(schema, bootstrapServers, topics, serializer);
}
}
|
@Test
public void testBuildWithExtraServers() {
Table table =
mockTableWithExtraServers("hello", ImmutableList.of("localhost:1111", "localhost:2222"));
BeamSqlTable sqlTable = provider.buildBeamSqlTable(table);
assertNotNull(sqlTable);
assertTrue(sqlTable instanceof BeamKafkaCSVTable);
BeamKafkaCSVTable kafkaTable = (BeamKafkaCSVTable) sqlTable;
assertEquals(
LOCATION_BROKER + ",localhost:1111,localhost:2222", kafkaTable.getBootstrapServers());
assertEquals(ImmutableList.of(LOCATION_TOPIC), kafkaTable.getTopics());
}
|
public static Optional<Object> invokeMethod(Object target, String methodName, Class<?>[] paramsType,
Object[] params) {
if (methodName == null || target == null) {
return Optional.empty();
}
final Optional<Method> method = findMethod(target.getClass(), methodName, paramsType);
if (method.isPresent()) {
return invokeMethod(target, method.get(), params);
}
return Optional.empty();
}
|
@Test
public void invokeMethod() {
final TestReflect testReflect = new TestReflect();
String name = "Mike";
final Optional<Object> hasParams = ReflectUtils
.invokeMethod(testReflect, "hasParams", new Class[]{String.class}, new Object[]{name});
Assert.assertTrue(hasParams.isPresent() && hasParams.get() instanceof String);
Assert.assertEquals(hasParams.get(), testReflect.hasParams(name));
}
|
static <T extends CompoundPredicate> T flattenCompound(Predicate predicateLeft, Predicate predicateRight, Class<T> klass) {
// The following could have been achieved with {@link com.hazelcast.query.impl.predicates.FlatteningVisitor},
// however since we only care for 2-argument flattening, we can avoid constructing a visitor and its internals
// for each token pass at the cost of the following explicit code.
Predicate[] predicates;
if (klass.isInstance(predicateLeft) || klass.isInstance(predicateRight)) {
Predicate[] left = getSubPredicatesIfClass(predicateLeft, klass);
Predicate[] right = getSubPredicatesIfClass(predicateRight, klass);
predicates = new Predicate[left.length + right.length];
ArrayUtils.concat(left, right, predicates);
} else {
predicates = new Predicate[]{predicateLeft, predicateRight};
}
try {
T compoundPredicate = klass.getDeclaredConstructor().newInstance();
compoundPredicate.setPredicates(predicates);
return compoundPredicate;
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException(String.format("%s must have a public default constructor", klass.getName()));
}
}
|
@Test
public void testOr_whenNoPredicateOr() {
Predicate<Object, Object> predicate1 = Predicates.alwaysTrue();
Predicate<Object, Object> predicate2 = Predicates.alwaysTrue();
OrPredicate concatenatedOr = SqlPredicate.flattenCompound(predicate1, predicate2, OrPredicate.class);
assertEquals(2, concatenatedOr.getPredicates().length);
assertSame(predicate1, concatenatedOr.getPredicates()[0]);
assertSame(predicate2, concatenatedOr.getPredicates()[1]);
}
|
@Override
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
|
@Test
void isInfoEnabled() {
jobRunrDashboardLogger.isInfoEnabled();
verify(slfLogger).isInfoEnabled();
}
|
@Override
public StatusOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final EnumSet<OpenMode> flags;
if(status.isAppend()) {
if(status.isExists()) {
// No append flag. Otherwise the offset field of SSH_FXP_WRITE requests is ignored.
flags = EnumSet.of(OpenMode.WRITE);
}
else {
// Allocate offset
flags = EnumSet.of(OpenMode.CREAT, OpenMode.WRITE);
}
}
else {
// A new file is created; if the file already exists, it is opened and truncated to preserve ownership of file.
if(status.isExists()) {
if(file.isSymbolicLink()) {
// Workaround for #7327
session.sftp().remove(file.getAbsolute());
flags = EnumSet.of(OpenMode.CREAT, OpenMode.TRUNC, OpenMode.WRITE);
}
else {
flags = EnumSet.of(OpenMode.TRUNC, OpenMode.WRITE);
}
}
else {
flags = EnumSet.of(OpenMode.CREAT, OpenMode.TRUNC, OpenMode.WRITE);
}
}
final RemoteFile handle = session.sftp().open(file.getAbsolute(), flags);
final int maxUnconfirmedWrites = this.getMaxUnconfirmedWrites(status);
if(log.isInfoEnabled()) {
log.info(String.format("Using %d unconfirmed writes", maxUnconfirmedWrites));
}
if(log.isInfoEnabled()) {
log.info(String.format("Skipping %d bytes", status.getOffset()));
}
// Open stream at offset
return new VoidStatusOutputStream(new ChunkedOutputStream(handle.new RemoteFileOutputStream(status.getOffset(), maxUnconfirmedWrites) {
private final AtomicBoolean close = new AtomicBoolean();
@Override
public void close() throws IOException {
if(close.get()) {
log.warn(String.format("Skip double close of stream %s", this));
return;
}
try {
super.close();
}
finally {
handle.close();
close.set(true);
}
}
}, preferences.getInteger("sftp.write.chunksize")));
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Upload {0} failed", e, file);
}
}
|
@Test(expected = NotfoundException.class)
public void testWriteNotFound() throws Exception {
final Path test = new Path(new SFTPHomeDirectoryService(session).find().getAbsolute() + "/nosuchdirectory/" + UUID.randomUUID(), EnumSet.of(Path.Type.file));
new SFTPWriteFeature(session).write(test, new TransferStatus(), new DisabledConnectionCallback());
}
|
public ResT receive(long timeoutMs) throws IOException {
if (mCompleted) {
return null;
}
if (mCanceled) {
throw new CancelledException(formatErrorMessage("Stream is already canceled."));
}
long startMs = System.currentTimeMillis();
while (true) {
long waitedForMs = System.currentTimeMillis() - startMs;
if (waitedForMs >= timeoutMs) {
throw new DeadlineExceededException(formatErrorMessage(
"Timeout waiting for response after %dms. clientClosed: %s clientCancelled: %s "
+ "serverClosed: %s", timeoutMs, mClosed, mCanceled, mClosedFromRemote));
}
// Wait for a minute max
long waitMs = Math.min(timeoutMs - waitedForMs, Constants.MINUTE_MS);
try {
Object response = mResponses.poll(waitMs, TimeUnit.MILLISECONDS);
if (response == null) {
checkError(); // The stream could have errored while we were waiting
// Log a warning before looping again
LOG.warn("Client did not receive message from stream, will wait again. totalWaitMs: {} "
+ "clientClosed: {} clientCancelled: {} serverClosed: {} description: {}",
System.currentTimeMillis() - startMs, mClosed, mCanceled, mClosedFromRemote,
mDescription);
continue;
}
if (response == mResponseObserver) {
mCompleted = true;
return null;
}
checkError();
return (ResT) response;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CancelledException(
formatErrorMessage("Interrupted while waiting for response."), e);
}
}
}
|
@Test
public void receiveMoreThanBufferSize() throws Exception {
WriteResponse[] responses = Stream.generate(() -> WriteResponse.newBuilder().build())
.limit(BUFFER_SIZE * 2).toArray(WriteResponse[]::new);
EXECUTOR.submit(() -> {
for (WriteResponse response : responses) {
mResponseObserver.onNext(response);
}
});
Thread.sleep(SHORT_TIMEOUT);
for (WriteResponse response : responses) {
WriteResponse actualResponse = mStream.receive(TIMEOUT);
assertEquals(response, actualResponse);
}
}
|
@Override
public <T extends Response> CompletableFuture<T> sendAsync(
Request request, Class<T> responseType) {
CompletableFuture<T> result = new CompletableFuture<>();
long requestId = request.getId();
requestForId.put(requestId, new WebSocketRequest<>(result, responseType));
try {
sendRequest(request, requestId);
} catch (IOException e) {
closeRequest(requestId, e);
}
return result;
}
|
@Test
public void testReceiveError() throws Exception {
CompletableFuture<Web3ClientVersion> reply =
service.sendAsync(request, Web3ClientVersion.class);
sendErrorReply();
assertTrue(reply.isDone());
Web3ClientVersion version = reply.get();
assertTrue(version.hasError());
assertEquals(new Response.Error(-1, "Error message"), version.getError());
}
|
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
boolean paramCheckEnabled = ServerParamCheckConfig.getInstance().isParamCheckEnabled();
if (!paramCheckEnabled) {
chain.doFilter(request, response);
return;
}
HttpServletRequest req = (HttpServletRequest) request;
HttpServletResponse resp = (HttpServletResponse) response;
try {
Method method = methodsCache.getMethod(req);
if (method == null) {
chain.doFilter(req, resp);
return;
}
ExtractorManager.Extractor extractor = method.getAnnotation(ExtractorManager.Extractor.class);
if (extractor == null) {
extractor = method.getDeclaringClass().getAnnotation(ExtractorManager.Extractor.class);
if (extractor == null) {
chain.doFilter(request, response);
return;
}
}
AbstractHttpParamExtractor httpParamExtractor = ExtractorManager.getHttpExtractor(extractor);
List<ParamInfo> paramInfoList = httpParamExtractor.extractParam(req);
ParamCheckerManager paramCheckerManager = ParamCheckerManager.getInstance();
AbstractParamChecker paramChecker = paramCheckerManager.getParamChecker(ServerParamCheckConfig.getInstance().getActiveParamChecker());
ParamCheckResponse paramCheckResponse = paramChecker.checkParamInfoList(paramInfoList);
if (paramCheckResponse.isSuccess()) {
chain.doFilter(req, resp);
} else {
Loggers.CONTROL.info("Param check invalid,{},url:{}", paramCheckResponse.getMessage(), req.getRequestURI());
generate400Response(resp, paramCheckResponse.getMessage());
}
} catch (NacosException e) {
Loggers.CONTROL.error("exception: {}", e.getMessage());
throw new NacosRuntimeException(ErrorCode.UnKnowError.getCode(), e);
}
}
|
@Test
void testDoFilterMethodNotFound() throws IOException, ServletException {
when(methodsCache.getMethod(request)).thenReturn(null);
filter.doFilter(request, response, chain);
verify(chain).doFilter(request, response);
}
|
@Override
public long size() {
return mSize.longValue();
}
|
@Test
public void longRunningIterAndRestore() throws Exception {
// Manually set this flag, otherwise an exception will be thrown when the exclusive lock
// is forced.
Configuration.set(PropertyKey.TEST_MODE, false);
prepareBlocks(FILE_NUMBER);
// Prepare a checkpoint file
File checkpointFile = File.createTempFile("checkpoint-for-recovery", "");
try (BufferedOutputStream out =
new BufferedOutputStream(new FileOutputStream(checkpointFile))) {
mStore.writeToCheckpoint(out);
}
// Create a bunch of long running iterators on the InodeStore
CountDownLatch readerLatch = new CountDownLatch(THREAD_NUMBER);
CountDownLatch restoreLatch = new CountDownLatch(1);
ArrayBlockingQueue<Exception> errors = new ArrayBlockingQueue<>(THREAD_NUMBER);
ArrayBlockingQueue<Integer> results = new ArrayBlockingQueue<>(THREAD_NUMBER);
List<Future<Void>> futures =
submitIterJob(THREAD_NUMBER, errors, results, readerLatch, restoreLatch);
// Await for the 20 threads to be iterating in the middle, then trigger the shutdown event
readerLatch.await();
try (CheckpointInputStream in = new CheckpointInputStream(
(new DataInputStream(new FileInputStream(checkpointFile))))) {
mStore.restoreFromCheckpoint(in);
}
// Verify that the iterators can still run
restoreLatch.countDown();
waitForReaders(futures);
// All iterators should abort because the RocksDB contents have changed
assertEquals(THREAD_NUMBER, errors.size());
long completed = results.stream().filter(n -> n == FILE_NUMBER).count();
assertEquals(0, completed);
long aborted = results.stream().filter(n -> n == 10).count();
assertEquals(THREAD_NUMBER, aborted);
}
|
public static FilterPredicate rewrite(FilterPredicate pred) {
Objects.requireNonNull(pred, "pred cannot be null");
return pred.accept(INSTANCE);
}
|
@Test
public void testBaseCases() {
UserDefined<Integer, DummyUdp> ud = userDefined(intColumn, DummyUdp.class);
assertNoOp(eq(intColumn, 17));
assertNoOp(notEq(intColumn, 17));
assertNoOp(lt(intColumn, 17));
assertNoOp(ltEq(intColumn, 17));
assertNoOp(gt(intColumn, 17));
assertNoOp(gtEq(intColumn, 17));
assertNoOp(and(eq(intColumn, 17), eq(doubleColumn, 12.0)));
assertNoOp(or(eq(intColumn, 17), eq(doubleColumn, 12.0)));
assertNoOp(ud);
Contains<Integer> containsLhs = contains(eq(intColumn, 17));
Contains<Integer> containsRhs = contains(eq(intColumn, 7));
assertNoOp(containsLhs);
assertEquals(containsLhs.and(containsRhs), rewrite(and(containsLhs, containsRhs)));
assertEquals(containsLhs.or(containsRhs), rewrite(or(containsLhs, containsRhs)));
}
|
@VisibleForTesting
public static void validateIngestionConfig(TableConfig tableConfig, @Nullable Schema schema) {
IngestionConfig ingestionConfig = tableConfig.getIngestionConfig();
if (ingestionConfig != null) {
String tableNameWithType = tableConfig.getTableName();
// Batch
if (ingestionConfig.getBatchIngestionConfig() != null) {
BatchIngestionConfig cfg = ingestionConfig.getBatchIngestionConfig();
List<Map<String, String>> batchConfigMaps = cfg.getBatchConfigMaps();
try {
if (CollectionUtils.isNotEmpty(batchConfigMaps)) {
// Validate that BatchConfig can be created
batchConfigMaps.forEach(b -> new BatchConfig(tableNameWithType, b));
}
} catch (Exception e) {
throw new IllegalStateException("Could not create BatchConfig using the batchConfig map", e);
}
if (tableConfig.isDimTable()) {
Preconditions.checkState(cfg.getSegmentIngestionType().equalsIgnoreCase("REFRESH"),
"Dimension tables must have segment ingestion type REFRESH");
}
}
if (tableConfig.isDimTable()) {
Preconditions.checkState(ingestionConfig.getBatchIngestionConfig() != null,
"Dimension tables must have batch ingestion configuration");
}
// Stream
// stream config map can either be in ingestion config or indexing config. cannot be in both places
if (ingestionConfig.getStreamIngestionConfig() != null) {
IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
Preconditions.checkState(indexingConfig == null || MapUtils.isEmpty(indexingConfig.getStreamConfigs()),
"Should not use indexingConfig#getStreamConfigs if ingestionConfig#StreamIngestionConfig is provided");
List<Map<String, String>> streamConfigMaps = ingestionConfig.getStreamIngestionConfig().getStreamConfigMaps();
Preconditions.checkState(streamConfigMaps.size() == 1, "Only 1 stream is supported in REALTIME table");
}
// Filter config
FilterConfig filterConfig = ingestionConfig.getFilterConfig();
if (filterConfig != null) {
String filterFunction = filterConfig.getFilterFunction();
if (filterFunction != null) {
if (_disableGroovy && FunctionEvaluatorFactory.isGroovyExpression(filterFunction)) {
throw new IllegalStateException(
"Groovy filter functions are disabled for table config. Found '" + filterFunction + "'");
}
try {
FunctionEvaluatorFactory.getExpressionEvaluator(filterFunction);
} catch (Exception e) {
throw new IllegalStateException("Invalid filter function " + filterFunction, e);
}
}
}
// Aggregation configs
List<AggregationConfig> aggregationConfigs = ingestionConfig.getAggregationConfigs();
Set<String> aggregationSourceColumns = new HashSet<>();
if (!CollectionUtils.isEmpty(aggregationConfigs)) {
Preconditions.checkState(!tableConfig.getIndexingConfig().isAggregateMetrics(),
"aggregateMetrics cannot be set with AggregationConfig");
Set<String> aggregationColumns = new HashSet<>();
for (AggregationConfig aggregationConfig : aggregationConfigs) {
String columnName = aggregationConfig.getColumnName();
String aggregationFunction = aggregationConfig.getAggregationFunction();
if (columnName == null || aggregationFunction == null) {
throw new IllegalStateException(
"columnName/aggregationFunction cannot be null in AggregationConfig " + aggregationConfig);
}
FieldSpec fieldSpec = null;
if (schema != null) {
fieldSpec = schema.getFieldSpecFor(columnName);
Preconditions.checkState(fieldSpec != null, "The destination column '" + columnName
+ "' of the aggregation function must be present in the schema");
Preconditions.checkState(fieldSpec.getFieldType() == FieldSpec.FieldType.METRIC,
"The destination column '" + columnName + "' of the aggregation function must be a metric column");
}
if (!aggregationColumns.add(columnName)) {
throw new IllegalStateException("Duplicate aggregation config found for column '" + columnName + "'");
}
ExpressionContext expressionContext;
try {
expressionContext = RequestContextUtils.getExpression(aggregationConfig.getAggregationFunction());
} catch (Exception e) {
throw new IllegalStateException(
"Invalid aggregation function '" + aggregationFunction + "' for column '" + columnName + "'", e);
}
Preconditions.checkState(expressionContext.getType() == ExpressionContext.Type.FUNCTION,
"aggregation function must be a function for: %s", aggregationConfig);
FunctionContext functionContext = expressionContext.getFunction();
AggregationFunctionType functionType =
AggregationFunctionType.getAggregationFunctionType(functionContext.getFunctionName());
validateIngestionAggregation(functionType);
List<ExpressionContext> arguments = functionContext.getArguments();
int numArguments = arguments.size();
if (functionType == DISTINCTCOUNTHLL) {
Preconditions.checkState(numArguments >= 1 && numArguments <= 2,
"DISTINCT_COUNT_HLL can have at most two arguments: %s", aggregationConfig);
if (numArguments == 2) {
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of DISTINCT_COUNT_HLL must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of DISTINCT_COUNT_HLL must be a number: %s", aggregationConfig);
}
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL must be BYTES: %s", aggregationConfig);
}
} else if (functionType == DISTINCTCOUNTHLLPLUS) {
Preconditions.checkState(numArguments >= 1 && numArguments <= 3,
"DISTINCT_COUNT_HLL_PLUS can have at most three arguments: %s", aggregationConfig);
if (numArguments == 2) {
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of DISTINCT_COUNT_HLL_PLUS must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of DISTINCT_COUNT_HLL_PLUS must be a number: %s", aggregationConfig);
}
if (numArguments == 3) {
ExpressionContext thirdArgument = arguments.get(2);
Preconditions.checkState(thirdArgument.getType() == ExpressionContext.Type.LITERAL,
"Third argument of DISTINCT_COUNT_HLL_PLUS must be literal: %s", aggregationConfig);
String literal = thirdArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Third argument of DISTINCT_COUNT_HLL_PLUS must be a number: %s", aggregationConfig);
}
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL_PLUS must be BYTES: %s", aggregationConfig);
}
} else if (functionType == SUMPRECISION) {
Preconditions.checkState(numArguments >= 2 && numArguments <= 3,
"SUM_PRECISION must specify precision (required), scale (optional): %s", aggregationConfig);
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of SUM_PRECISION must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of SUM_PRECISION must be a number: %s", aggregationConfig);
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BIG_DECIMAL || dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL must be BIG_DECIMAL or BYTES: %s", aggregationConfig);
}
} else {
Preconditions.checkState(numArguments == 1, "%s can only have one argument: %s", functionType,
aggregationConfig);
}
ExpressionContext firstArgument = arguments.get(0);
Preconditions.checkState(firstArgument.getType() == ExpressionContext.Type.IDENTIFIER,
"First argument of aggregation function: %s must be identifier, got: %s", functionType,
firstArgument.getType());
aggregationSourceColumns.add(firstArgument.getIdentifier());
}
if (schema != null) {
Preconditions.checkState(new HashSet<>(schema.getMetricNames()).equals(aggregationColumns),
"all metric columns must be aggregated");
}
// This is required by MutableSegmentImpl.enableMetricsAggregationIfPossible().
// That code will disable ingestion aggregation if all metrics aren't noDictionaryColumns.
// But if you do that after the table is already created, all future aggregations will
// just be the default value.
Map<String, DictionaryIndexConfig> configPerCol = StandardIndexes.dictionary().getConfig(tableConfig, schema);
aggregationColumns.forEach(column -> {
DictionaryIndexConfig dictConfig = configPerCol.get(column);
Preconditions.checkState(dictConfig != null && dictConfig.isDisabled(),
"Aggregated column: %s must be a no-dictionary column", column);
});
}
// Enrichment configs
List<EnrichmentConfig> enrichmentConfigs = ingestionConfig.getEnrichmentConfigs();
if (enrichmentConfigs != null) {
for (EnrichmentConfig enrichmentConfig : enrichmentConfigs) {
RecordEnricherRegistry.validateEnrichmentConfig(enrichmentConfig,
new RecordEnricherValidationConfig(_disableGroovy));
}
}
// Transform configs
List<TransformConfig> transformConfigs = ingestionConfig.getTransformConfigs();
if (transformConfigs != null) {
Set<String> transformColumns = new HashSet<>();
for (TransformConfig transformConfig : transformConfigs) {
String columnName = transformConfig.getColumnName();
String transformFunction = transformConfig.getTransformFunction();
if (columnName == null || transformFunction == null) {
throw new IllegalStateException(
"columnName/transformFunction cannot be null in TransformConfig " + transformConfig);
}
if (!transformColumns.add(columnName)) {
throw new IllegalStateException("Duplicate transform config found for column '" + columnName + "'");
}
if (schema != null) {
Preconditions.checkState(
schema.getFieldSpecFor(columnName) != null || aggregationSourceColumns.contains(columnName),
"The destination column '" + columnName
+ "' of the transform function must be present in the schema or as a source column for "
+ "aggregations");
}
FunctionEvaluator expressionEvaluator;
if (_disableGroovy && FunctionEvaluatorFactory.isGroovyExpression(transformFunction)) {
throw new IllegalStateException(
"Groovy transform functions are disabled for table config. Found '" + transformFunction
+ "' for column '" + columnName + "'");
}
try {
expressionEvaluator = FunctionEvaluatorFactory.getExpressionEvaluator(transformFunction);
} catch (Exception e) {
throw new IllegalStateException(
"Invalid transform function '" + transformFunction + "' for column '" + columnName + "'", e);
}
List<String> arguments = expressionEvaluator.getArguments();
if (arguments.contains(columnName)) {
throw new IllegalStateException(
"Arguments of a transform function '" + arguments + "' cannot contain the destination column '"
+ columnName + "'");
}
}
}
// Complex configs
ComplexTypeConfig complexTypeConfig = ingestionConfig.getComplexTypeConfig();
if (complexTypeConfig != null && schema != null) {
Map<String, String> prefixesToRename = complexTypeConfig.getPrefixesToRename();
if (MapUtils.isNotEmpty(prefixesToRename)) {
Set<String> fieldNames = schema.getColumnNames();
for (String prefix : prefixesToRename.keySet()) {
for (String field : fieldNames) {
Preconditions.checkState(!field.startsWith(prefix),
"Fields in the schema may not begin with any prefix specified in the prefixesToRename"
+ " config. Name conflict with field: " + field + " and prefix: " + prefix);
}
}
}
}
SchemaConformingTransformerConfig schemaConformingTransformerConfig =
ingestionConfig.getSchemaConformingTransformerConfig();
if (null != schemaConformingTransformerConfig && null != schema) {
SchemaConformingTransformer.validateSchema(schema, schemaConformingTransformerConfig);
}
SchemaConformingTransformerV2Config schemaConformingTransformerV2Config =
ingestionConfig.getSchemaConformingTransformerV2Config();
if (null != schemaConformingTransformerV2Config && null != schema) {
SchemaConformingTransformerV2.validateSchema(schema, schemaConformingTransformerV2Config);
}
}
}
|
@Test
public void ingestionConfigForDimensionTableTest() {
Map<String, String> batchConfigMap = new HashMap<>();
batchConfigMap.put(BatchConfigProperties.INPUT_DIR_URI, "s3://foo");
batchConfigMap.put(BatchConfigProperties.OUTPUT_DIR_URI, "gs://bar");
batchConfigMap.put(BatchConfigProperties.INPUT_FS_CLASS, "org.foo.S3FS");
batchConfigMap.put(BatchConfigProperties.OUTPUT_FS_CLASS, "org.foo.GcsFS");
batchConfigMap.put(BatchConfigProperties.INPUT_FORMAT, "avro");
batchConfigMap.put(BatchConfigProperties.RECORD_READER_CLASS, "org.foo.Reader");
// valid dimension table ingestion config
IngestionConfig ingestionConfig = new IngestionConfig();
ingestionConfig.setBatchIngestionConfig(
new BatchIngestionConfig(Collections.singletonList(batchConfigMap), "REFRESH", null));
TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setIsDimTable(true)
.setIngestionConfig(ingestionConfig).build();
TableConfigUtils.validateIngestionConfig(tableConfig, null);
// dimension tables should have batch ingestion config
ingestionConfig.setBatchIngestionConfig(null);
try {
TableConfigUtils.validateIngestionConfig(tableConfig, null);
Assert.fail("Should fail for Dimension table without batch ingestion config");
} catch (IllegalStateException e) {
// expected
}
// dimension tables should have batch ingestion config of type REFRESH
ingestionConfig.setBatchIngestionConfig(
new BatchIngestionConfig(Collections.singletonList(batchConfigMap), "APPEND", null));
try {
TableConfigUtils.validateIngestionConfig(tableConfig, null);
Assert.fail("Should fail for Dimension table with ingestion type APPEND (should be REFRESH)");
} catch (IllegalStateException e) {
// expected
}
}
|
@Override
public void onBlocked(BlockException ex, Context context, ResourceWrapper resourceWrapper, DefaultNode param,
int count, Object... args) {
for (MetricExtension m : MetricExtensionProvider.getMetricExtensions()) {
if (m instanceof AdvancedMetricExtension) {
((AdvancedMetricExtension) m).onBlocked(resourceWrapper, count, context.getOrigin(), ex, args);
} else {
m.addBlock(resourceWrapper.getName(), count, context.getOrigin(), ex, args);
}
}
}
|
@Test
public void onBlocked() throws Exception {
FakeMetricExtension extension = new FakeMetricExtension();
FakeAdvancedMetricExtension advancedExtension = new FakeAdvancedMetricExtension();
MetricExtensionProvider.addMetricExtension(extension);
MetricExtensionProvider.addMetricExtension(advancedExtension);
MetricEntryCallback entryCallback = new MetricEntryCallback();
StringResourceWrapper resourceWrapper = new StringResourceWrapper("resource", EntryType.OUT);
Context context = mock(Context.class);
when(context.getOrigin()).thenReturn("origin1");
int count = 2;
Object[] args = {"args1", "args2"};
entryCallback.onBlocked(new FlowException("xx"), context, resourceWrapper, null, count, args);
// assert extension
Assert.assertEquals(extension.block, count);
// assert advancedExtension
Assert.assertEquals(advancedExtension.block, count);
}
|
@Override
public void handle(final RoutingContext routingContext) {
if (routingContext.request().isSSL()) {
final String indicatedServerName = routingContext.request().connection()
.indicatedServerName();
final String requestHost = routingContext.request().host();
if (indicatedServerName != null && requestHost != null) {
// sometimes the port is present in the host header, remove it
final String requestHostNoPort = requestHost.replaceFirst(":\\d+", "");
if (!requestHostNoPort.equals(indicatedServerName)) {
log.error(String.format(
"Sni check failed, host header: %s, sni value %s",
requestHostNoPort,
indicatedServerName)
);
routingContext.fail(MISDIRECTED_REQUEST.code(),
new KsqlApiException("This request was incorrectly sent to this ksqlDB server",
Errors.ERROR_CODE_MISDIRECTED_REQUEST));
return;
}
}
}
routingContext.next();
}
|
@Test
public void shouldNotReturnMisdirectedResponseIfMatchHostPort() {
// Given:
when(serverRequest.host()).thenReturn("localhost:80");
when(httpConnection.indicatedServerName()).thenReturn("localhost");
// When:
sniHandler.handle(routingContext);
// Then:
verify(routingContext, never()).fail(anyInt(), any());
verify(routingContext, times(1)).next();
}
|
@Override
public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException {
try {
final IRODSFileSystemAO fs = session.getClient();
final IRODSFile s = fs.getIRODSFileFactory().instanceIRODSFile(file.getAbsolute());
if(!s.exists()) {
throw new NotfoundException(String.format("%s doesn't exist", file.getAbsolute()));
}
if(status.isExists()) {
delete.delete(Collections.singletonMap(renamed, status), connectionCallback, callback);
}
final IRODSFile d = fs.getIRODSFileFactory().instanceIRODSFile(renamed.getAbsolute());
s.renameTo(d);
return renamed;
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map("Cannot rename {0}", e, file);
}
}
|
@Test
public void testMoveDirectory() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path source = new Path(new IRODSHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
final Path destination = new Path(new IRODSHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new IRODSDirectoryFeature(session).mkdir(source, new TransferStatus());
final String filename = new AlphanumericRandomStringService().random();
new IRODSTouchFeature(session).touch(new Path(source, filename, EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(session.getFeature(Find.class).find(new Path(source, filename, EnumSet.of(Path.Type.file))));
new IRODSDirectoryFeature(session).mkdir(destination, new TransferStatus());
new IRODSMoveFeature(session).move(source, destination, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(session.getFeature(Find.class).find(source));
assertFalse(session.getFeature(Find.class).find(new Path(source, filename, EnumSet.of(Path.Type.file))));
assertTrue(session.getFeature(Find.class).find(destination));
assertTrue(session.getFeature(Find.class).find(new Path(destination, filename, EnumSet.of(Path.Type.file))));
session.getFeature(Delete.class).delete(Collections.singletonList(destination), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(session.getFeature(Find.class).find(destination));
session.close();
}
|
@Override
public Health check(Set<NodeHealth> nodeHealths) {
Set<NodeHealth> appNodes = nodeHealths.stream()
.filter(s -> s.getDetails().getType() == NodeDetails.Type.APPLICATION)
.collect(Collectors.toSet());
return Arrays.stream(AppNodeClusterHealthSubChecks.values())
.map(s -> s.check(appNodes))
.reduce(Health.GREEN, HealthReducer::merge);
}
|
@Test
public void status_YELLOW_when_two_GREEN_application_node_and_any_number_of_other_is_RED_or_GREEN() {
Set<NodeHealth> nodeHealths = of(
// at least 1 RED
of(appNodeHealth(RED)),
// 0 to 10 RED/GREEN
randomNumberOfAppNodeHealthOfAnyStatus(GREEN, RED),
// 2 GREEN
nodeHealths(GREEN, GREEN))
.flatMap(s -> s)
.collect(toSet());
Health check = underTest.check(nodeHealths);
assertThat(check)
.forInput(nodeHealths)
.hasStatus(Health.Status.YELLOW)
.andCauses("At least one application node is RED");
}
|
static String getModel(String fileName) {
return FileNameUtils.getSuffix(fileName).replace(FINAL_SUFFIX, "");
}
|
@Test
void getModel() {
String fileName = "file_name.model_json";
String expected = "model";
String source = fileName;
assertThat(IndexFile.getModel(source)).isEqualTo(expected);
source = File.separator + "dir" + File.separator + fileName;
assertThat(IndexFile.getModel(source)).isEqualTo(expected);
}
|
@Override
@Nonnull
public List<Sdk> selectSdks(Configuration configuration, UsesSdk usesSdk) {
Config config = configuration.get(Config.class);
Set<Sdk> sdks = new TreeSet<>(configuredSdks(config, usesSdk));
if (enabledSdks != null) {
sdks = Sets.intersection(sdks, enabledSdks);
}
return Lists.newArrayList(sdks);
}
|
@Test
public void withAllSdksConfigAndNoMinSdkVersion_shouldUseFullSdkRangeFromAndroidManifest()
throws Exception {
when(usesSdk.getTargetSdkVersion()).thenReturn(22);
when(usesSdk.getMinSdkVersion()).thenReturn(1);
when(usesSdk.getMaxSdkVersion()).thenReturn(22);
assertThat(
sdkPicker.selectSdks(
buildConfig(new Config.Builder().setSdk(Config.ALL_SDKS)), usesSdk))
.containsExactly(
sdkCollection.getSdk(16),
sdkCollection.getSdk(17),
sdkCollection.getSdk(18),
sdkCollection.getSdk(19),
sdkCollection.getSdk(21),
sdkCollection.getSdk(22));
}
|
public RegisterTypeCommand create(final RegisterType statement) {
final String name = statement.getName();
final boolean ifNotExists = statement.getIfNotExists();
final SqlType type = statement.getType().getSqlType();
if (!ifNotExists && metaStore.resolveType(name).isPresent()) {
throw new KsqlException(
"Cannot register custom type '" + name + "' "
+ "since it is already registered with type: " + metaStore.resolveType(name).get()
);
}
return new RegisterTypeCommand(type, name);
}
|
@Test
public void shouldCreateCommandForRegisterTypeWhenIfNotExitsSet() {
// Given:
final RegisterType ddlStatement = new RegisterType(
Optional.empty(),
NOT_EXISTING_TYPE,
new Type(SqlStruct.builder().field("foo", SqlPrimitiveType.of(SqlBaseType.STRING)).build()),
true
);
// When:
final RegisterTypeCommand result = factory.create(ddlStatement);
// Then:
assertThat(result.getType(), equalTo(ddlStatement.getType().getSqlType()));
assertThat(result.getTypeName(), equalTo(NOT_EXISTING_TYPE));
}
|
@Override
public void upgrade() {
MongoCollection<Document> roles = mongoDatabase.getCollection("roles");
Document viewsUserRole = roles.findOneAndDelete(eq("name", "Views User"));
if (viewsUserRole != null) {
removeRoleFromUsers(viewsUserRole);
}
}
|
@Test
public void doesntFailIfOldPermissionsNotPresent() {
Document role = insertRole("Dancing Monkey");
Document user = insertUserWithRoles(role);
migration.upgrade();
assertThat(rolesCollection.find()).containsOnly(role);
assertThat(usersCollection.find()).containsOnly(user);
}
|
@Override
public void clear(long fromIndex, long toIndex) {
get(clearAsync(fromIndex, toIndex));
}
|
@Test
public void testClear() {
RBitSet bs = redisson.getBitSet("testbitset");
bs.set(0, 8);
bs.clear(0, 3);
assertThat(bs.toString()).isEqualTo("{3, 4, 5, 6, 7}");
}
|
public void resetOffsetsFromResetPlan(final Consumer<byte[], byte[]> client,
final Set<TopicPartition> inputTopicPartitions,
final Map<TopicPartition, Long> topicPartitionsAndOffset) {
final Map<TopicPartition, Long> endOffsets = client.endOffsets(inputTopicPartitions);
final Map<TopicPartition, Long> beginningOffsets = client.beginningOffsets(inputTopicPartitions);
final Map<TopicPartition, Long> validatedTopicPartitionsAndOffset =
checkOffsetRange(topicPartitionsAndOffset, beginningOffsets, endOffsets);
for (final TopicPartition topicPartition : inputTopicPartitions) {
client.seek(topicPartition, validatedTopicPartitionsAndOffset.get(topicPartition));
}
}
|
@Test
public void testResetUsingPlanWhenBeforeBeginningOffset() {
final Map<TopicPartition, Long> endOffsets = new HashMap<>();
endOffsets.put(topicPartition, 4L);
consumer.updateEndOffsets(endOffsets);
final Map<TopicPartition, Long> beginningOffsets = new HashMap<>();
beginningOffsets.put(topicPartition, 3L);
consumer.updateBeginningOffsets(beginningOffsets);
final Map<TopicPartition, Long> topicPartitionsAndOffset = new HashMap<>();
topicPartitionsAndOffset.put(topicPartition, 1L);
streamsResetter.resetOffsetsFromResetPlan(consumer, inputTopicPartitions, topicPartitionsAndOffset);
final ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(500));
assertEquals(2, records.count());
}
|
public WriteLock writeLock() {
return writeMutex;
}
|
@Test
public void testSetNodeData() throws Exception {
CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
try {
client.start();
final byte[] nodeData = new byte[] {1, 2, 3, 4};
InterProcessReadWriteLock lock = new InterProcessReadWriteLock(client, "/lock", nodeData);
// mutate passed-in node data, lock has made copy
nodeData[0] = 5;
lock.writeLock().acquire();
List<String> children = client.getChildren().forPath("/lock");
assertEquals(1, children.size());
byte dataInZk[] = client.getData().forPath("/lock/" + children.get(0));
assertNotNull(dataInZk);
assertArrayEquals(new byte[] {1, 2, 3, 4}, dataInZk);
lock.writeLock().release();
} finally {
TestCleanState.closeAndTestClean(client);
}
}
|
@Override
public PayloadSerializer getSerializer(Schema schema, Map<String, Object> tableParams) {
Class<? extends Message> protoClass = getClass(tableParams);
inferAndVerifySchema(protoClass, schema);
SimpleFunction<byte[], Row> toRowFn = ProtoMessageSchema.getProtoBytesToRowFn(protoClass);
return PayloadSerializer.of(
ProtoMessageSchema.getRowToProtoBytesFn(protoClass),
bytes -> {
Row rawRow = toRowFn.apply(bytes);
return castRow(rawRow, rawRow.getSchema(), schema);
});
}
|
@Test
public void invalidArgs() {
assertThrows(
IllegalArgumentException.class,
() -> provider.getSerializer(SHUFFLED_SCHEMA, ImmutableMap.of()));
assertThrows(
IllegalArgumentException.class,
() -> provider.getSerializer(SHUFFLED_SCHEMA, ImmutableMap.of("protoClass", "")));
assertThrows(
ClassCastException.class,
() ->
provider.getSerializer(
SHUFFLED_SCHEMA, ImmutableMap.of("protoClass", ImmutableList.class.getName())));
assertThrows(
IllegalArgumentException.class,
() ->
provider.getSerializer(
Schema.builder()
.addStringField("f_NOTACTUALLYINMESSAGE")
.addInt32Field("f_int")
.addArrayField("f_float_array", FieldType.FLOAT)
.addDoubleField("f_double")
.addInt64Field("f_long")
.build(),
ImmutableMap.of("protoClass", PayloadMessages.TestMessage.class.getName())));
}
|
@VisibleForTesting
GenericData getDataModel() {
return dataModelSupplier.get();
}
|
@Test
void getDataModel() {
assertThat(
((AvroParquetRecordFormat) AvroParquetReaders.forGenericRecord(schema))
.getDataModel()
.getClass())
.isEqualTo(GenericData.class);
assertThat(
((AvroParquetRecordFormat)
AvroParquetReaders.forSpecificRecord(Address.class))
.getDataModel()
.getClass())
.isEqualTo(SpecificData.class);
assertThat(
((AvroParquetRecordFormat) AvroParquetReaders.forReflectRecord(Datum.class))
.getDataModel()
.getClass())
.isEqualTo(ReflectData.class);
}
|
public static void saveContractInfo(Contract contract) {
if (!CONTRACT_MAP.containsKey(contract.getServiceKey())) {
CONTRACT_MAP.putIfAbsent(contract.getServiceKey(), contract);
} else if (Objects.equals(contract.getServiceType(), ServiceType.DUBBO.getType())) {
Contract oldContract = CONTRACT_MAP.get(contract.getServiceKey());
oldContract.setMethodInfoList(contract.getMethodInfoList());
} else {
Contract oldContract = CONTRACT_MAP.get(contract.getServiceKey());
oldContract.getMethodInfoList().addAll(contract.getMethodInfoList());
}
}
|
@Test
public void saveContractInfo() {
Contract contract = new Contract();
contract.setServiceKey(CollectorCacheTest.class.getName());
contract.setServiceType(ServiceType.DUBBO.getType());
contract.setInterfaceName(CollectorCacheTest.class.getName());
List<MethodInfo> methodInfoList = new ArrayList<>();
for (Method method : CollectorCacheTest.class.getMethods()) {
MethodInfo methodInfo = new MethodInfo();
methodInfo.setName(method.getName());
methodInfoList.add(methodInfo);
}
CollectorCache.saveContractInfo(contract);
Assert.assertFalse(CollectorCache.CONTRACT_MAP.isEmpty());
CollectorCache.saveContractInfo(contract);
Assert.assertTrue(CollectorCache.CONTRACT_MAP.size() == 1);
Assert.assertTrue(CollectorCache.CONTRACT_MAP.containsKey(CollectorCacheTest.class.getName()));
}
|
public <T> List<T> apply(T[] a) {
return apply(Arrays.asList(a));
}
|
@Test
public void normalRange() {
Range r = new Range(2,4);
assertEquals("[c, d]", toS(r.apply(array)));
assertEquals("[c, d]", toS(r.apply(list)));
assertEquals("[c, d]", toS(r.apply(set)));
}
|
private static void nativeMemory(XmlGenerator gen, NativeMemoryConfig nativeMemory) {
gen.open("native-memory", "enabled", nativeMemory.isEnabled(),
"allocator-type", nativeMemory.getAllocatorType())
.node("capacity", null, "value", nativeMemory.getCapacity().getValue(),
"unit", nativeMemory.getCapacity().getUnit())
.node("min-block-size", nativeMemory.getMinBlockSize())
.node("page-size", nativeMemory.getPageSize())
.node("metadata-space-percentage", nativeMemory.getMetadataSpacePercentage());
PersistentMemoryConfig pmemConfig = nativeMemory.getPersistentMemoryConfig();
List<PersistentMemoryDirectoryConfig> directoryConfigs = pmemConfig.getDirectoryConfigs();
gen.open("persistent-memory",
"enabled", pmemConfig.isEnabled(),
"mode", pmemConfig.getMode().name());
if (!directoryConfigs.isEmpty()) {
gen.open("directories");
for (PersistentMemoryDirectoryConfig dirConfig : directoryConfigs) {
if (dirConfig.isNumaNodeSet()) {
gen.node("directory", dirConfig.getDirectory(),
"numa-node", dirConfig.getNumaNode());
} else {
gen.node("directory", dirConfig.getDirectory());
}
}
gen.close();
}
gen.close().close();
}
|
@Test
public void nativeMemory() {
NativeMemoryConfig expected = new NativeMemoryConfig();
expected.setEnabled(true)
.setAllocatorType(MemoryAllocatorType.STANDARD)
.setMetadataSpacePercentage(70)
.setMinBlockSize(randomInt())
.setPageSize(randomInt())
.setCapacity(new Capacity(randomInt(), MemoryUnit.BYTES));
clientConfig.setNativeMemoryConfig(expected);
NativeMemoryConfig actual = newConfigViaGenerator().getNativeMemoryConfig();
assertEquals(clientConfig.getNativeMemoryConfig(), actual);
}
|
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception {
return newGetter(object, parent, modifier, field.getType(), field::get,
(t, et) -> new FieldGetter(parent, field, modifier, t, et));
}
|
@Test
public void newFieldGetter_whenExtractingFromNull_Array_AndReducerSuffixInNotEmpty_thenReturnNullGetter()
throws Exception {
OuterObject object = OuterObject.nullInner("name");
Getter getter = GetterFactory.newFieldGetter(object, null, innersArrayField, "[any]");
Class<?> returnType = getter.getReturnType();
assertEquals(InnerObject.class, returnType);
}
|
protected static SimpleDateFormat getLog4j2Appender() {
Optional<Appender> log4j2xmlAppender =
configuration.getAppenders().values().stream()
.filter( a -> a.getName().equalsIgnoreCase( log4J2Appender ) ).findFirst();
if ( log4j2xmlAppender.isPresent() ) {
ArrayList<String> matchesArray = new ArrayList<>();
String dateFormatFromLog4j2xml = log4j2xmlAppender.get().getLayout().getContentFormat().get( "format" );
Pattern pattern = Pattern.compile( "(\\{(.*?)})" );
Matcher matcher = pattern.matcher( dateFormatFromLog4j2xml );
while ( matcher.find() ) {
matchesArray.add( matcher.group( 2 ) );
}
if ( !matchesArray.isEmpty() ) {
return processMatches( matchesArray );
}
}
return new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss" );
}
|
@Test
public void testGetLog4j2UsingAppender12() {
// Test with no matching appender name and set Default Pattern value "yyyy/MM/dd HH:mm:ss"
KettleLogLayout.log4J2Appender = "pdi-execution-appender-test-twelve";
Assert.assertEquals( "yyyy/MM/dd HH:mm:ss",
KettleLogLayout.getLog4j2Appender().toPattern() );
}
|
@Override
public Optional<ScmInfo> getScmInfo(Component component) {
requireNonNull(component, "Component cannot be null");
if (component.getType() != Component.Type.FILE) {
return Optional.empty();
}
return scmInfoCache.computeIfAbsent(component, this::getScmInfoForComponent);
}
|
@Test
public void read_from_DB_if_no_report_and_file_unchanged_and_copyFromPrevious_is_true() {
createDbScmInfoWithOneLine();
when(fileStatuses.isUnchanged(FILE_SAME)).thenReturn(true);
addFileSourceInReport(1);
addCopyFromPrevious();
ScmInfo scmInfo = underTest.getScmInfo(FILE_SAME).get();
assertThat(scmInfo.getAllChangesets()).hasSize(1);
assertChangeset(scmInfo.getChangesetForLine(1), "rev1", "author1", 10L);
verify(fileStatuses).isUnchanged(FILE_SAME);
verify(dbLoader).getScmInfo(FILE_SAME);
verifyNoMoreInteractions(dbLoader);
verifyNoMoreInteractions(fileStatuses);
verifyNoInteractions(diff);
}
|
@Override
public ConsumerBuilder<T> topic(String... topicNames) {
checkArgument(topicNames != null && topicNames.length > 0,
"Passed in topicNames should not be null or empty.");
return topics(Arrays.stream(topicNames).collect(Collectors.toList()));
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testConsumerBuilderImplWhenTopicNamesVarargsIsNull() {
consumerBuilderImpl.topic(null);
}
|
public Map<String, ServiceInfo> getServiceInfoMap() {
return serviceInfoMap;
}
|
@Test
void testGetServiceInfoMap() throws NoSuchFieldException, IllegalAccessException {
assertEquals(0, holder.getServiceInfoMap().size());
Field fieldNotifierEventScope = ServiceInfoHolder.class.getDeclaredField("notifierEventScope");
fieldNotifierEventScope.setAccessible(true);
assertEquals("scope-001", fieldNotifierEventScope.get(holder));
}
|
@Operation(summary = "Read the answers of the 3 apdus and read the pip/pp to send to digid x")
@PostMapping(value = { Constants.URL_OLD_RDW_POLYMORPHICDATA,
Constants.URL_RDW_POLYMORPHICDATA }, consumes = "application/json", produces = "application/json")
public PolyDataResponse getPolymorphicDataRestService(@Valid @RequestBody PolyDataRequest request,
@RequestHeader(value = "X-FORWARDED-FOR") String clientIp) {
return rdwService.getPolymorphicDataRestService(request, clientIp);
}
|
@Test
public void getPolymorphicDataRestServiceTest() {
PolyDataResponse expectedResponse = new PolyDataResponse();
when(rdwServiceMock.getPolymorphicDataRestService(any(PolyDataRequest.class), anyString())).thenReturn(expectedResponse);
PolyDataResponse actualResponse = rdwController.getPolymorphicDataRestService(new PolyDataRequest(), "");
assertEquals(expectedResponse, actualResponse);
}
|
String upload(File report) {
LOG.debug("Upload report");
long startTime = System.currentTimeMillis();
Part filePart = new Part(MediaTypes.ZIP, report);
PostRequest post = new PostRequest("api/ce/submit")
.setMediaType(MediaTypes.PROTOBUF)
.setParam("projectKey", moduleHierarchy.root().key())
.setParam("projectName", moduleHierarchy.root().getOriginalName())
.setPart("report", filePart);
ciConfiguration.getDevOpsPlatformInfo().ifPresent(devOpsPlatformInfo -> {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(DEVOPS_PLATFORM_URL ,devOpsPlatformInfo.getUrl()));
post.setParam(CHARACTERISTIC, buildCharacteristicParam(DEVOPS_PLATFORM_PROJECT_IDENTIFIER, devOpsPlatformInfo.getProjectIdentifier()));
});
String branchName = branchConfiguration.branchName();
if (branchName != null) {
if (branchConfiguration.branchType() != PULL_REQUEST) {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(CeTaskCharacteristics.BRANCH, branchName));
post.setParam(CHARACTERISTIC, buildCharacteristicParam(BRANCH_TYPE, branchConfiguration.branchType().name()));
} else {
post.setParam(CHARACTERISTIC, buildCharacteristicParam(CeTaskCharacteristics.PULL_REQUEST, branchConfiguration.pullRequestKey()));
}
}
WsResponse response;
try {
post.setWriteTimeOutInMs(properties.reportPublishTimeout() * 1000);
response = wsClient.call(post);
} catch (Exception e) {
throw new IllegalStateException("Failed to upload report: " + e.getMessage(), e);
}
try {
response.failIfNotSuccessful();
} catch (HttpException e) {
throw MessageException.of(String.format("Server failed to process report. Please check server logs: %s", DefaultScannerWsClient.createErrorMessage(e)));
}
try (InputStream protobuf = response.contentStream()) {
return Ce.SubmitResponse.parser().parseFrom(protobuf).getTaskId();
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
long stopTime = System.currentTimeMillis();
LOG.info("Analysis report uploaded in " + (stopTime - startTime) + "ms");
}
}
|
@Test
public void test_send_branches_characteristics() throws Exception {
String branchName = "feature";
when(branchConfiguration.branchName()).thenReturn(branchName);
when(branchConfiguration.branchType()).thenReturn(BRANCH);
WsResponse response = mock(WsResponse.class);
PipedOutputStream out = new PipedOutputStream();
PipedInputStream in = new PipedInputStream(out);
Ce.SubmitResponse.newBuilder().build().writeTo(out);
out.close();
when(response.failIfNotSuccessful()).thenReturn(response);
when(response.contentStream()).thenReturn(in);
when(wsClient.call(any(WsRequest.class))).thenReturn(response);
underTest.upload(reportTempFolder.newFile());
ArgumentCaptor<WsRequest> capture = ArgumentCaptor.forClass(WsRequest.class);
verify(wsClient).call(capture.capture());
WsRequest wsRequest = capture.getValue();
assertThat(wsRequest.getParameters().getKeys()).hasSize(2);
assertThat(wsRequest.getParameters().getValues("projectKey")).containsExactly("org.sonarsource.sonarqube:sonarqube");
assertThat(wsRequest.getParameters().getValues("characteristic"))
.containsExactlyInAnyOrder("branch=" + branchName, "branchType=" + BRANCH.name());
}
|
public void lockClusterState(ClusterStateChange stateChange, Address initiator, UUID txnId, long leaseTime,
int memberListVersion, long partitionStateStamp) {
Preconditions.checkNotNull(stateChange);
clusterServiceLock.lock();
try {
if (!node.getNodeExtension().isStartCompleted()) {
throw new IllegalStateException("Can not lock cluster state! Startup is not completed yet!");
}
if (node.getClusterService().getClusterJoinManager().isMastershipClaimInProgress()) {
throw new IllegalStateException("Can not lock cluster state! Mastership claim is in progress!");
}
if (stateChange.isOfType(Version.class)) {
validateNodeCompatibleWith((Version) stateChange.getNewState());
validateClusterVersionChange((Version) stateChange.getNewState());
}
checkMemberListVersion(memberListVersion);
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
lockOrExtendClusterState(initiator, txnId, leaseTime);
try {
// check migration status and partition-state version again
// if partition state is changed then release the lock and fail.
checkMigrationsAndPartitionStateStamp(stateChange, partitionStateStamp);
} catch (IllegalStateException e) {
stateLockRef.set(LockGuard.NOT_LOCKED);
throw e;
}
} finally {
clusterServiceLock.unlock();
}
}
|
@Test(expected = IllegalArgumentException.class)
public void test_lockClusterState_nonPositiveLeaseTime() throws Exception {
Address initiator = newAddress();
clusterStateManager.lockClusterState(ClusterStateChange.from(FROZEN), initiator, TXN, -1000, MEMBERLIST_VERSION,
PARTITION_STAMP);
}
|
@Override
public BackgroundException map(final ApiException failure) {
final StringBuilder buffer = new StringBuilder();
if(StringUtils.isNotBlank(failure.getMessage())) {
for(String s : StringUtils.split(failure.getMessage(), ",")) {
this.append(buffer, LocaleFactory.localizedString(s, "EUE"));
}
}
if(null != failure.getResponseHeaders()) {
final List<List<String>> headers = failure.getResponseHeaders().entrySet().stream()
.filter(e -> "X-UI-ENHANCED-STATUS".equalsIgnoreCase(e.getKey())).map(Map.Entry::getValue).collect(Collectors.toList());
for(List<String> header : headers) {
for(String s : header) {
this.append(buffer, LocaleFactory.localizedString(s, "EUE"));
}
}
}
for(Throwable cause : ExceptionUtils.getThrowableList(failure)) {
if(cause instanceof ProcessingException) {
return new InteroperabilityException(cause.getMessage(), cause);
}
if(cause instanceof SocketException) {
// Map Connection has been shutdown: javax.net.ssl.SSLException: java.net.SocketException: Broken pipe
return new DefaultSocketExceptionMappingService().map((SocketException) cause);
}
if(cause instanceof HttpResponseException) {
return new DefaultHttpResponseExceptionMappingService().map((HttpResponseException) cause);
}
if(cause instanceof IOException) {
return new DefaultIOExceptionMappingService().map((IOException) cause);
}
if(cause instanceof IllegalStateException) {
// Caused by: ApiException: javax.ws.rs.ProcessingException: java.lang.IllegalStateException: Connection pool shut down
return new ConnectionCanceledException(cause);
}
}
switch(failure.getCode()) {
case HttpStatus.SC_UNPROCESSABLE_ENTITY:
return new LockedException(buffer.toString(), failure);
case HttpStatus.SC_TOO_MANY_REQUESTS:
final Optional<Map.Entry<String, List<String>>> header
= failure.getResponseHeaders().entrySet().stream().filter(e -> HttpHeaders.RETRY_AFTER.equals(e.getKey())).findAny();
if(header.isPresent()) {
final Optional<String> value = header.get().getValue().stream().findAny();
return value.map(s -> new RetriableAccessDeniedException(buffer.toString(),
Duration.ofSeconds(Long.parseLong(s)), failure)).orElseGet(() -> new RetriableAccessDeniedException(buffer.toString(), failure));
}
}
return new DefaultHttpResponseExceptionMappingService().map(failure, buffer, failure.getCode());
}
|
@Test
public void testParseError() {
assertEquals("LIMIT_MAX_FOLDER_COUNT. LIMIT_MAX_RESOURCE_COUNT. Please contact your web hosting service provider for assistance.",
new EueExceptionMappingService().map(new ApiException("LIMIT_MAX_FOLDER_COUNT,LIMIT_MAX_RESOURCE_COUNT", null, 500, null)).getDetail());
}
|
@Override
public void execute(final ConnectionSession connectionSession) {
String name = showStatement.getName().orElse("").toLowerCase(Locale.ROOT);
if ("ALL".equalsIgnoreCase(name)) {
executeShowAll(connectionSession);
return;
}
queryResultMetaData = new RawQueryResultMetaData(Collections.singletonList(new RawQueryResultColumnMetaData("", "", name, Types.VARCHAR, "VARCHAR", -1, 0)));
VariableRowDataGenerator variableRowDataGenerator = VARIABLE_ROW_DATA_GENERATORS.getOrDefault(name, unused -> new String[]{"", "", ""});
mergedResult = new LocalDataMergedResult(Collections.singletonList(new LocalDataQueryResultRow(variableRowDataGenerator.getVariable(connectionSession)[1])));
}
|
@Test
void assertExecuteShowAll() throws SQLException {
ConnectionSession connectionSession = mock(ConnectionSession.class);
PostgreSQLShowVariableExecutor executor = new PostgreSQLShowVariableExecutor(new PostgreSQLShowStatement("ALL"));
executor.execute(connectionSession);
QueryResultMetaData actualMetaData = executor.getQueryResultMetaData();
assertThat(actualMetaData.getColumnCount(), is(3));
assertThat(actualMetaData.getColumnLabel(1), is("name"));
assertThat(actualMetaData.getColumnLabel(2), is("setting"));
assertThat(actualMetaData.getColumnLabel(3), is("description"));
MergedResult actualResult = executor.getMergedResult();
Map<String, String> expected = new LinkedHashMap<>(7, 1F);
expected.put("application_name", "PostgreSQL");
expected.put("client_encoding", "UTF8");
expected.put("integer_datetimes", "on");
expected.put("TimeZone", "Etc/UTC");
expected.put("transaction_isolation", "read committed");
expected.put("transaction_read_only", "off");
expected.put("server_version", ShardingSphereVersion.VERSION);
for (Entry<String, String> entry : expected.entrySet()) {
assertTrue(actualResult.next());
assertThat(actualResult.getValue(1, String.class), is(entry.getKey()));
assertThat(actualResult.getValue(2, String.class), is(entry.getValue()));
}
assertFalse(actualResult.next());
}
|
public TimelineWriteResponse putEntities(TimelineEntities entities,
UserGroupInformation callerUgi) throws IOException {
LOG.debug("putEntities(entities={}, callerUgi={})", entities, callerUgi);
TimelineWriteResponse response = null;
try {
boolean isStorageUp = checkRetryWithSleep();
if (isStorageUp) {
// synchronize on the writer object so that no other threads can
// flush the writer buffer concurrently and swallow any exception
// caused by the timeline enitites that are being put here.
synchronized (writer) {
response = writeTimelineEntities(entities, callerUgi);
flushBufferedTimelineEntities();
}
} else {
String msg = String.format("Failed to putEntities(" +
"entities=%s, callerUgi=%s) as Timeline Storage is Down",
entities, callerUgi);
throw new IOException(msg);
}
} catch (InterruptedException ex) {
String msg = String.format("Interrupted while retrying to putEntities(" +
"entities=%s, callerUgi=%s)", entities, callerUgi);
throw new IOException(msg);
}
return response;
}
|
@Test
void testPutEntity() throws IOException {
TimelineWriter writer = mock(TimelineWriter.class);
TimelineHealth timelineHealth = new TimelineHealth(TimelineHealth.
TimelineHealthStatus.RUNNING, "");
when(writer.getHealthStatus()).thenReturn(timelineHealth);
Configuration conf = new Configuration();
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES, 5);
conf.setLong(YarnConfiguration.TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS,
500L);
TimelineCollector collector = new TimelineCollectorForTest(writer);
collector.init(conf);
TimelineEntities entities = generateTestEntities(1, 1);
collector.putEntities(
entities, UserGroupInformation.createRemoteUser("test-user"));
verify(writer, times(1)).write(any(TimelineCollectorContext.class),
any(TimelineEntities.class), any(UserGroupInformation.class));
verify(writer, times(1)).flush();
}
|
@Override
@Deprecated
public void process(final org.apache.kafka.streams.processor.ProcessorSupplier<? super K, ? super V> processorSupplier,
final String... stateStoreNames) {
process(processorSupplier, Named.as(builder.newProcessorName(PROCESSOR_NAME)), stateStoreNames);
}
|
@Test
public void shouldNotAllowNullStoreNamesOnProcessWithNamed() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.process(processorSupplier, Named.as("processor"), (String[]) null));
assertThat(exception.getMessage(), equalTo("stateStoreNames can't be a null array"));
}
|
public static void verifyPrecondition(boolean assertionResult, String errorMessage) {
if (!assertionResult) {
throw new RuntimeException(errorMessage);
}
}
|
@Test
public void testVerifyPreconditionFailure() {
assertThrows(RuntimeException.class, () -> verifyPrecondition(false, ""));
}
|
public static DynamicMessage messageFromTableRow(
SchemaInformation schemaInformation,
Descriptor descriptor,
TableRow tableRow,
boolean ignoreUnknownValues,
boolean allowMissingRequiredFields,
final @Nullable TableRow unknownFields,
@Nullable String changeType,
long changeSequenceNum)
throws SchemaConversionException {
return messageFromTableRow(
schemaInformation,
descriptor,
tableRow,
ignoreUnknownValues,
allowMissingRequiredFields,
unknownFields,
changeType,
Long.toHexString(changeSequenceNum));
}
|
@Test
public void testMessageFromTableRow() throws Exception {
TableRow tableRow =
new TableRow()
.set("nestedValue1", BASE_TABLE_ROW)
.set("nestedValue2", BASE_TABLE_ROW)
.set("nestedValueNoF1", BASE_TABLE_ROW_NO_F)
.set("nestedValueNoF2", BASE_TABLE_ROW_NO_F);
Descriptor descriptor =
TableRowToStorageApiProto.getDescriptorFromTableSchema(NESTED_TABLE_SCHEMA, true, false);
TableRowToStorageApiProto.SchemaInformation schemaInformation =
TableRowToStorageApiProto.SchemaInformation.fromTableSchema(NESTED_TABLE_SCHEMA);
DynamicMessage msg =
TableRowToStorageApiProto.messageFromTableRow(
schemaInformation, descriptor, tableRow, false, false, null, null, -1);
assertEquals(4, msg.getAllFields().size());
Map<String, FieldDescriptor> fieldDescriptors =
descriptor.getFields().stream()
.collect(Collectors.toMap(FieldDescriptor::getName, Functions.identity()));
assertBaseRecord((DynamicMessage) msg.getField(fieldDescriptors.get("nestedvalue1")), true);
assertBaseRecord((DynamicMessage) msg.getField(fieldDescriptors.get("nestedvalue2")), true);
assertBaseRecord((DynamicMessage) msg.getField(fieldDescriptors.get("nestedvaluenof1")), false);
assertBaseRecord((DynamicMessage) msg.getField(fieldDescriptors.get("nestedvaluenof2")), false);
}
|
public void replayAddKey(EncryptionKeyPB keyPB) {
EncryptionKey key = create(keyPB);
keysLock.writeLock().lock();
try {
idToKey.put(key.id, key);
} finally {
keysLock.writeLock().unlock();
}
}
|
@Test
public void testReplayAddKey() {
KeyMgr keyMgr = new KeyMgr();
EncryptionKeyPB pb = new EncryptionKeyPB();
pb.id = 1L;
pb.algorithm = EncryptionAlgorithmPB.AES_128;
pb.encryptedKey = new byte[16];
pb.type = EncryptionKeyTypePB.NORMAL_KEY;
pb.createTime = 3L;
keyMgr.replayAddKey(pb);
}
|
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
final SMBSession.DiskShareWrapper share = session.openShare(folder);
try {
share.get().mkdir(new SMBPathContainerService(session).getKey(folder));
}
catch(SMBRuntimeException e) {
throw new SMBExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
finally {
session.releaseShare(share);
}
return folder;
}
|
@Test
public void testMakeDirectory() throws Exception {
final Path test = new SMBDirectoryFeature(session).mkdir(
new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
assertTrue(new SMBFindFeature(session).find(test));
assertThrows(ConflictException.class, () -> new SMBDirectoryFeature(session).mkdir(test, new TransferStatus()));
new SMBDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static Result find(List<Path> files, Consumer<LogEvent> logger) {
List<String> mainClasses = new ArrayList<>();
for (Path file : files) {
// Makes sure classFile is valid.
if (!Files.exists(file)) {
logger.accept(LogEvent.debug("MainClassFinder: " + file + " does not exist; ignoring"));
continue;
}
if (!Files.isRegularFile(file)) {
logger.accept(
LogEvent.debug("MainClassFinder: " + file + " is not a regular file; skipping"));
continue;
}
if (!file.toString().endsWith(".class")) {
logger.accept(
LogEvent.debug("MainClassFinder: " + file + " is not a class file; skipping"));
continue;
}
MainClassVisitor mainClassVisitor = new MainClassVisitor();
try (InputStream classFileInputStream = Files.newInputStream(file)) {
ClassReader reader = new ClassReader(classFileInputStream);
reader.accept(mainClassVisitor, 0);
if (mainClassVisitor.visitedMainClass) {
mainClasses.add(reader.getClassName().replace('/', '.'));
}
} catch (IllegalArgumentException ex) {
throw new UnsupportedOperationException(
"Check the full stace trace, and if the root cause is from ASM ClassReader about "
+ "unsupported class file version, see "
+ "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md"
+ "#i-am-seeing-unsupported-class-file-major-version-when-building",
ex);
} catch (ArrayIndexOutOfBoundsException ignored) {
// Not a valid class file (thrown by ClassReader if it reads an invalid format)
logger.accept(LogEvent.warn("Invalid class file found: " + file));
} catch (IOException ignored) {
// Could not read class file.
logger.accept(LogEvent.warn("Could not read file: " + file));
}
}
if (mainClasses.size() == 1) {
// Valid class found.
return Result.success(mainClasses.get(0));
}
if (mainClasses.isEmpty()) {
// No main class found anywhere.
return Result.mainClassNotFound();
}
// More than one main class found.
return Result.multipleMainClasses(mainClasses);
}
|
@Test
public void testFindMainClass_innerClasses() throws URISyntaxException, IOException {
Path rootDirectory =
Paths.get(Resources.getResource("core/class-finder-tests/inner-classes").toURI());
MainClassFinder.Result mainClassFinderResult =
MainClassFinder.find(new DirectoryWalker(rootDirectory).walk(), logEventConsumer);
Assert.assertSame(Result.Type.MAIN_CLASS_FOUND, mainClassFinderResult.getType());
MatcherAssert.assertThat(
mainClassFinderResult.getFoundMainClass(),
CoreMatchers.containsString("HelloWorld$InnerClass"));
}
|
public static SonarRuntime forSonarQube(Version apiVersion, SonarQubeSide side, SonarEdition edition) {
return new SonarRuntimeImpl(apiVersion, SonarProduct.SONARQUBE, side, edition);
}
|
@Test(expected = IllegalArgumentException.class)
public void sonarqube_requires_side() {
SonarRuntimeImpl.forSonarQube(A_VERSION, null, null);
}
|
public static TimeLimiterMetrics ofTimeLimiter(TimeLimiter timeLimiter) {
return new TimeLimiterMetrics(List.of(timeLimiter));
}
|
@Test
public void shouldRecordErrors() {
TimeLimiter timeLimiter = TimeLimiter.of(TimeLimiterConfig.ofDefaults());
metricRegistry.registerAll(TimeLimiterMetrics.ofTimeLimiter(timeLimiter));
timeLimiter.onError(new RuntimeException());
timeLimiter.onError(new RuntimeException());
assertThat(metricRegistry).hasMetricsSize(3);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + SUCCESSFUL)
.hasValue(0L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + FAILED)
.hasValue(2L);
assertThat(metricRegistry).counter(DEFAULT_PREFIX + TIMEOUT)
.hasValue(0L);
}
|
@Override
public ThreadPoolBulkheadConfig getBulkheadConfig() {
return config;
}
|
@Test
public void testCustomSettings() {
assertThat(bulkhead.getBulkheadConfig().getMaxThreadPoolSize()).isEqualTo(2);
assertThat(bulkhead.getBulkheadConfig().getQueueCapacity()).isEqualTo(10);
assertThat(bulkhead.getBulkheadConfig().getCoreThreadPoolSize()).isEqualTo(1);
assertThat(bulkhead.getBulkheadConfig().getKeepAliveDuration())
.isEqualTo(Duration.ofMillis(10));
}
|
public int numBufferedStreams() {
return pendingStreams.size();
}
|
@Test
public void receivingGoAwayFailsBufferedStreams() throws Http2Exception {
encoder.writeSettingsAck(ctx, newPromise());
setMaxConcurrentStreams(5);
int streamId = 3;
List<ChannelFuture> futures = new ArrayList<ChannelFuture>();
for (int i = 0; i < 9; i++) {
futures.add(encoderWriteHeaders(streamId, newPromise()));
streamId += 2;
}
assertEquals(5, connection.numActiveStreams());
assertEquals(4, encoder.numBufferedStreams());
connection.goAwayReceived(11, 8, EMPTY_BUFFER);
assertEquals(5, connection.numActiveStreams());
assertEquals(0, encoder.numBufferedStreams());
int failCount = 0;
for (ChannelFuture f : futures) {
if (f.cause() != null) {
assertTrue(f.cause() instanceof Http2GoAwayException);
failCount++;
}
}
assertEquals(4, failCount);
}
|
@VisibleForTesting
int log2Floor(long n) {
checkArgument(n >= 0);
return n == 0 ? -1 : LongMath.log2(n, RoundingMode.FLOOR);
}
|
@Test
public void testLog2Floor_zero() {
OrderedCode orderedCode = new OrderedCode();
assertEquals(-1, orderedCode.log2Floor(0));
}
|
public void endTransactionOneway(
final String addr,
final EndTransactionRequestHeader requestHeader,
final String remark,
final long timeoutMillis
) throws RemotingException, InterruptedException {
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.END_TRANSACTION, requestHeader);
request.setRemark(remark);
this.remotingClient.invokeOneway(addr, request, timeoutMillis);
}
|
@Test
public void testEndTransactionOneway() throws RemotingException, InterruptedException {
mockInvokeSync();
EndTransactionRequestHeader requestHeader = mock(EndTransactionRequestHeader.class);
mqClientAPI.endTransactionOneway(defaultBrokerAddr, requestHeader, "", defaultTimeout);
}
|
public Schema mergeTables(
Map<FeatureOption, MergingStrategy> mergingStrategies,
Schema sourceSchema,
List<SqlNode> derivedColumns,
List<SqlWatermark> derivedWatermarkSpecs,
SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder =
new SchemaBuilder(
mergingStrategies,
sourceSchema,
(FlinkTypeFactory) validator.getTypeFactory(),
dataTypeFactory,
validator,
escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey);
return schemaBuilder.build();
}
|
@Test
void mergeOverwritingComputedColumnWithMetadataColumn() {
Schema sourceSchema =
Schema.newBuilder()
.column("one", DataTypes.INT())
.columnByExpression("two", "one + 3")
.build();
List<SqlNode> derivedColumns =
Collections.singletonList(metadataColumn("two", DataTypes.BOOLEAN(), false));
Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies();
mergingStrategies.put(FeatureOption.METADATA, MergingStrategy.OVERWRITING);
assertThatThrownBy(
() ->
util.mergeTables(
mergingStrategies,
sourceSchema,
derivedColumns,
Collections.emptyList(),
null))
.isInstanceOf(ValidationException.class)
.hasMessage(
"A column named 'two' already exists in the base table."
+ " Metadata columns can only overwrite other metadata columns.");
}
|
public static List<ACL> parseACLs(String aclString) throws
BadAclFormatException {
List<ACL> acl = Lists.newArrayList();
if (aclString == null) {
return acl;
}
List<String> aclComps = Lists.newArrayList(
Splitter.on(',').omitEmptyStrings().trimResults()
.split(aclString));
for (String a : aclComps) {
// from ZooKeeperMain private method
int firstColon = a.indexOf(':');
int lastColon = a.lastIndexOf(':');
if (firstColon == -1 || lastColon == -1 || firstColon == lastColon) {
throw new BadAclFormatException(
"ACL '" + a + "' not of expected form scheme:id:perm");
}
ACL newAcl = new ACL();
newAcl.setId(new Id(a.substring(0, firstColon), a.substring(
firstColon + 1, lastColon)));
newAcl.setPerms(getPermFromString(a.substring(lastColon + 1)));
acl.add(newAcl);
}
return acl;
}
|
@Test
public void testEmptyACL() {
List<ACL> result = ZKUtil.parseACLs("");
assertTrue(result.isEmpty());
}
|
@Override
public void debug(String msg) {
logger.debug(msg);
}
|
@Test
void testDebug() {
jobRunrDashboardLogger.debug("Debug");
verify(slfLogger).debug("Debug");
}
|
public static JavaBeanDescriptor serialize(Object obj) {
return serialize(obj, JavaBeanAccessor.FIELD);
}
|
@Test
void testSerialize_Primitive_NUll() {
JavaBeanDescriptor descriptor;
descriptor = JavaBeanSerializeUtil.serialize(null);
Assertions.assertNull(descriptor);
}
|
@Override
public void doHealthCheck() {
try {
initIfNecessary();
for (Service each : client.getAllPublishedService()) {
if (switchDomain.isHealthCheckEnabled(each.getGroupedServiceName())) {
InstancePublishInfo instancePublishInfo = client.getInstancePublishInfo(each);
ClusterMetadata metadata = getClusterMetadata(each, instancePublishInfo);
ApplicationUtils.getBean(HealthCheckProcessorV2Delegate.class).process(this, each, metadata);
if (Loggers.EVT_LOG.isDebugEnabled()) {
Loggers.EVT_LOG.debug("[HEALTH-CHECK] schedule health check task: {}", client.getClientId());
}
}
}
} catch (Throwable e) {
Loggers.SRV_LOG.error("[HEALTH-CHECK] error while process health check for {}", client.getClientId(), e);
} finally {
if (!cancelled) {
initCheckRT();
HealthCheckReactor.scheduleCheck(this);
// worst == 0 means never checked
if (this.getCheckRtWorst() > 0) {
// TLog doesn't support float so we must convert it into long
long checkRtLastLast = getCheckRtLastLast();
this.setCheckRtLastLast(this.getCheckRtLast());
if (checkRtLastLast > 0) {
long diff = ((this.getCheckRtLast() - this.getCheckRtLastLast()) * 10000) / checkRtLastLast;
if (Loggers.CHECK_RT.isDebugEnabled()) {
Loggers.CHECK_RT.debug("{}->normalized: {}, worst: {}, best: {}, last: {}, diff: {}",
client.getClientId(), this.getCheckRtNormalized(), this.getCheckRtWorst(),
this.getCheckRtBest(), this.getCheckRtLast(), diff);
}
}
}
}
}
}
|
@Test
void testDoHealthCheck() {
when(ipPortBasedClient.getAllPublishedService()).thenReturn(returnService());
healthCheckTaskV2.setCheckRtWorst(1);
healthCheckTaskV2.setCheckRtLastLast(1);
assertEquals(1, healthCheckTaskV2.getCheckRtWorst());
assertEquals(1, healthCheckTaskV2.getCheckRtLastLast());
healthCheckTaskV2.run();
healthCheckTaskV2.passIntercept();
healthCheckTaskV2.doHealthCheck();
verify(ipPortBasedClient, times(3)).getAllPublishedService();
verify(switchDomain, times(3)).isHealthCheckEnabled(service.getGroupedServiceName());
}
|
@SuppressFBWarnings(value = "DMI_RANDOM_USED_ONLY_ONCE")
public static LocalCommands open(
final KsqlEngine ksqlEngine,
final File directory
) {
if (!directory.exists()) {
if (!directory.mkdirs()) {
throw new KsqlServerException("Couldn't create the local commands directory: "
+ directory.getPath()
+ "\n Make sure the directory exists and is readable/writable for KSQL server "
+ "\n or its parent directory is readable/writable by KSQL server"
+ "\n or change it to a readable/writable directory by setting '"
+ KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG
+ "' config in the properties file."
);
}
try {
Files.setPosixFilePermissions(directory.toPath(),
PosixFilePermissions.fromString("rwx------"));
} catch (final IOException e) {
throw new KsqlServerException(String.format(
"Couldn't set POSIX permissions on the backups directory: %s. Error = %s",
directory.getPath(), e.getMessage()));
}
}
if (!directory.isDirectory()) {
throw new KsqlServerException(directory.getPath()
+ " is not a directory."
+ "\n Make sure the directory exists and is readable/writable for KSQL server "
+ "\n or its parent directory is readable/writable by KSQL server"
+ "\n or change it to a readable/writable directory by setting '"
+ KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG
+ "' config in the properties file."
);
}
if (!directory.canWrite() || !directory.canRead() || !directory.canExecute()) {
throw new KsqlServerException("The local commands directory is not readable/writable "
+ "for KSQL server: "
+ directory.getPath()
+ "\n Make sure the directory exists and is readable/writable for KSQL server "
+ "\n or change it to a readable/writable directory by setting '"
+ KsqlRestConfig.KSQL_LOCAL_COMMANDS_LOCATION_CONFIG
+ "' config in the properties file."
);
}
final File file = new File(directory, String.format("local_commands_%d_%s%s",
System.currentTimeMillis(), Integer.toHexString(RANDOM.nextInt()),
LOCAL_COMMANDS_FILE_SUFFIX));
return new LocalCommands(directory, ksqlEngine, LocalCommandsFile.createWriteable(file));
}
|
@Test
public void shouldThrowWhenCommandLocationIsNotWritable() throws IOException {
// Given
final File file = commandsDir.newFolder();
Files.setPosixFilePermissions(file.toPath(), PosixFilePermissions.fromString("r-x------"));
// When
final Exception e = assertThrows(
KsqlServerException.class,
() -> LocalCommands.open(ksqlEngine, file)
);
// Then
assertThat(e.getMessage(), containsString(String.format(
"The local commands directory is not readable/writable for KSQL server: %s",
file.getAbsolutePath()
)));
}
|
@Override
public List<String> splitAndEvaluate() {
return Strings.isNullOrEmpty(inlineExpression) ? Collections.emptyList() : split(inlineExpression);
}
|
@Test
void assertEvaluateForLong() {
StringBuilder expression = new StringBuilder();
for (int i = 0; i < 1024; i++) {
expression.append("ds_");
expression.append(i / 64);
expression.append(".t_user_");
expression.append(i);
if (i != 1023) {
expression.append(",");
}
}
List<String> expected = TypedSPILoader.getService(InlineExpressionParser.class, "LITERAL", PropertiesBuilder.build(
new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, expression.toString()))).splitAndEvaluate();
assertThat(expected.size(), is(1024));
assertThat(expected, hasItems("ds_0.t_user_0", "ds_15.t_user_1023"));
}
|
public static <P> String getMethodName(Func1<P, ?> func) {
return resolve(func).getImplMethodName();
}
|
@Test
public void getMethodNameTest() {
final String methodName = LambdaUtil.getMethodName(MyTeacher::getAge);
assertEquals("getAge", methodName);
}
|
@HighFrequencyInvocation
public Optional<ShardingSphereUser> findUser(final Grantee grantee) {
return configuration.getUsers().stream().filter(each -> each.getGrantee().accept(grantee)).findFirst();
}
|
@Test
void assertFindUser() {
AuthorityRule rule = createAuthorityRule();
Optional<ShardingSphereUser> actual = rule.findUser(new Grantee("admin", "localhost"));
assertTrue(actual.isPresent());
assertThat(actual.get().getGrantee().getUsername(), is("admin"));
assertThat(actual.get().getGrantee().getHostname(), is("localhost"));
}
|
@Override
public URI getCallBackUri() {
return callBackUri;
}
|
@Test
public void testGetCallBackUri() {
assertEquals("callback", hystrixCommand.getCallBackUri().getHost());
}
|
@Override
public int size() {
return count(members, selector);
}
|
@Test
public void testSizeWhenAllSelected() {
MemberSelectingCollection<MemberImpl> collection = new MemberSelectingCollection<>(members,
NO_OP_MEMBER_SELECTOR);
assertEquals(3, collection.size());
}
|
public DataSource createDataSourceProxy(DataSource dataSource) {
return createDataSourceProxy(null, dataSource);
}
|
@Test
public void testCreateDataSourceProxy() throws Exception {
// on fait le ménage au cas où TestMonitoringSpringInterceptor ait été exécuté juste avant
cleanUp();
assertTrue("getBasicDataSourceProperties0",
JdbcWrapper.getBasicDataSourceProperties().isEmpty());
assertEquals("getMaxConnectionCount0", -1, JdbcWrapper.getMaxConnectionCount());
final org.apache.tomcat.jdbc.pool.DataSource tomcatJdbcDataSource = new org.apache.tomcat.jdbc.pool.DataSource();
tomcatJdbcDataSource.setUrl(H2_DATABASE_URL);
tomcatJdbcDataSource.setDriverClassName("org.h2.Driver");
tomcatJdbcDataSource.setMaxActive(123);
final DataSource tomcatJdbcProxy = jdbcWrapper.createDataSourceProxy("test2",
tomcatJdbcDataSource);
assertNotNull("createDataSourceProxy1", tomcatJdbcProxy);
tomcatJdbcProxy.getConnection().close();
assertFalse("getBasicDataSourceProperties1",
JdbcWrapper.getBasicDataSourceProperties().isEmpty());
assertEquals("getMaxConnectionCount1", 123, JdbcWrapper.getMaxConnectionCount());
final org.apache.commons.dbcp2.BasicDataSource dbcp2DataSource = new org.apache.commons.dbcp2.BasicDataSource();
dbcp2DataSource.setUrl(H2_DATABASE_URL);
dbcp2DataSource.setMaxTotal(456);
final DataSource dbcp2Proxy = jdbcWrapper.createDataSourceProxy(dbcp2DataSource);
assertNotNull("createDataSourceProxy2b", dbcp2Proxy);
final org.apache.tomcat.dbcp.dbcp2.BasicDataSource tomcat2DataSource = new org.apache.tomcat.dbcp.dbcp2.BasicDataSource();
tomcat2DataSource.setUrl(H2_DATABASE_URL);
tomcat2DataSource.setMaxTotal(789);
final DataSource tomcat2Proxy = jdbcWrapper.createDataSourceProxy("test",
tomcat2DataSource);
assertNotNull("createDataSourceProxy3b", tomcat2Proxy);
}
|
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) {
SourceConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getTopicName())) {
mergedConfig.setTopicName(newConfig.getTopicName());
}
if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) {
mergedConfig.setSerdeClassName(newConfig.getSerdeClassName());
}
if (!StringUtils.isEmpty(newConfig.getSchemaType())) {
mergedConfig.setSchemaType(newConfig.getSchemaType());
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (isBatchSource(existingConfig) != isBatchSource(newConfig)) {
throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource");
}
if (newConfig.getBatchSourceConfig() != null) {
validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig());
mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
}
|
@Test
public void testMergeDifferentResources() {
SourceConfig sourceConfig = createSourceConfig();
Resources resources = new Resources();
resources.setCpu(0.3);
resources.setRam(1232L);
resources.setDisk(123456L);
SourceConfig newSourceConfig = createUpdatedSourceConfig("resources", resources);
SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig);
assertEquals(
mergedConfig.getResources(),
resources
);
mergedConfig.setResources(sourceConfig.getResources());
assertEquals(
new Gson().toJson(sourceConfig),
new Gson().toJson(mergedConfig)
);
}
|
@Override
public LogicalSchema getSchema() {
return schema;
}
|
@Test
public void shouldHaveFullyQualifiedJoinSchemaWithSyntheticKey() {
// Given:
when(joinKey.resolveKeyName(any(), any())).thenReturn(SYNTH_KEY);
// When:
final JoinNode joinNode = new JoinNode(nodeId, OUTER, joinKey, true, left,
right, empty(),"KAFKA");
// When:
assertThat(joinNode.getSchema(), is(LogicalSchema.builder()
.keyColumn(SYNTH_KEY, SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_C0"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_L1"), SqlTypes.STRING)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_ROWTIME"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_ROWPARTITION"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_ROWOFFSET"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(LEFT_ALIAS.text() + "_leftKey"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_C0"), SqlTypes.STRING)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_R1"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_ROWTIME"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_ROWPARTITION"), SqlTypes.INTEGER)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_ROWOFFSET"), SqlTypes.BIGINT)
.valueColumn(ColumnName.of(RIGHT_ALIAS.text() + "_rightKey"), SqlTypes.BIGINT)
.valueColumn(SYNTH_KEY, SqlTypes.BIGINT)
.build()
));
}
|
@Override
public String convert(final ReadwriteSplittingRuleConfiguration ruleConfig) {
if (ruleConfig.getDataSourceGroups().isEmpty()) {
return "";
}
StringBuilder result = new StringBuilder(ReadwriteSplittingDistSQLConstants.CREATE_READWRITE_SPLITTING_RULE);
Iterator<ReadwriteSplittingDataSourceGroupRuleConfiguration> iterator = ruleConfig.getDataSourceGroups().iterator();
while (iterator.hasNext()) {
appendStaticReadWriteSplittingRule(ruleConfig.getLoadBalancers(), iterator.next(), result);
if (iterator.hasNext()) {
result.append(DistSQLConstants.COMMA);
}
}
result.append(DistSQLConstants.SEMI);
return result.toString();
}
|
@Test
void assertConvert() {
ReadwriteSplittingDataSourceGroupRuleConfiguration dataSourceGroupConfig =
new ReadwriteSplittingDataSourceGroupRuleConfiguration("readwrite_ds", "ds_primary", Arrays.asList("ds_slave_0", "ds_slave_1"), "test");
ReadwriteSplittingRuleConfiguration readwriteSplittingRuleConfig = new ReadwriteSplittingRuleConfiguration(Collections.singleton(dataSourceGroupConfig),
Collections.singletonMap("test", new AlgorithmConfiguration("random", PropertiesBuilder.build(new PropertiesBuilder.Property("read_weight", "2:1")))));
ReadwriteSplittingRuleConfigurationToDistSQLConverter readwriteSplittingRuleConfigurationToDistSQLConverter = new ReadwriteSplittingRuleConfigurationToDistSQLConverter();
assertThat(readwriteSplittingRuleConfigurationToDistSQLConverter.convert(readwriteSplittingRuleConfig),
is("CREATE READWRITE_SPLITTING RULE readwrite_ds (" + System.lineSeparator() + "WRITE_STORAGE_UNIT=ds_primary," + System.lineSeparator() + "READ_STORAGE_UNITS(ds_slave_0,ds_slave_1),"
+ System.lineSeparator() + "TRANSACTIONAL_READ_QUERY_STRATEGY='DYNAMIC'," + System.lineSeparator() + "TYPE(NAME='random', PROPERTIES('read_weight'='2:1'))"
+ System.lineSeparator() + ");"));
}
|
public static AbstractPredictor getZooPredictor(DJLEndpoint endpoint)
throws ModelNotFoundException, MalformedModelException, IOException {
String applicationPath = endpoint.getApplication();
// CV
if (IMAGE_CLASSIFICATION.getPath().equals(applicationPath)) {
return new ZooImageClassificationPredictor(endpoint);
} else if (OBJECT_DETECTION.getPath().equals(applicationPath)) {
return new ZooObjectDetectionPredictor(endpoint);
} else if (SEMANTIC_SEGMENTATION.getPath().equals(applicationPath)) {
return new ZooSemanticSegmentationPredictor(endpoint);
} else if (INSTANCE_SEGMENTATION.getPath().equals(applicationPath)) {
return new ZooInstanceSegmentationPredictor(endpoint);
} else if (POSE_ESTIMATION.getPath().equals(applicationPath)) {
return new ZooPoseEstimationPredictor(endpoint);
} else if (ACTION_RECOGNITION.getPath().equals(applicationPath)) {
return new ZooActionRecognitionPredictor(endpoint);
} else if (WORD_RECOGNITION.getPath().equals(applicationPath)) {
return new ZooWordRecognitionPredictor(endpoint);
} else if (IMAGE_GENERATION.getPath().equals(applicationPath)) {
return new ZooImageGenerationPredictor(endpoint);
} else if (IMAGE_ENHANCEMENT.getPath().equals(applicationPath)) {
return new ZooImageEnhancementPredictor(endpoint);
}
// NLP
if (FILL_MASK.getPath().equals(applicationPath)) {
return new ZooFillMaskPredictor(endpoint);
} else if (QUESTION_ANSWER.getPath().equals(applicationPath)) {
return new ZooQuestionAnswerPredictor(endpoint);
} else if (TEXT_CLASSIFICATION.getPath().equals(applicationPath)) {
return new ZooTextClassificationPredictor(endpoint);
} else if (SENTIMENT_ANALYSIS.getPath().equals(applicationPath)) {
return new ZooSentimentAnalysisPredictor(endpoint);
} else if (TOKEN_CLASSIFICATION.getPath().equals(applicationPath)) {
return new ZooTokenClassificationPredictor(endpoint);
} else if (WORD_EMBEDDING.getPath().equals(applicationPath)) {
return new ZooWordEmbeddingPredictor(endpoint);
} else if (TEXT_GENERATION.getPath().equals(applicationPath)) {
return new ZooTextGenerationPredictor(endpoint);
} else if (MACHINE_TRANSLATION.getPath().equals(applicationPath)) {
return new ZooMachineTranslationPredictor(endpoint);
} else if (MULTIPLE_CHOICE.getPath().equals(applicationPath)) {
return new ZooMultipleChoicePredictor(endpoint);
} else if (TEXT_EMBEDDING.getPath().equals(applicationPath)) {
return new ZooTextEmbeddingPredictor(endpoint);
}
// Tabular
if (LINEAR_REGRESSION.getPath().equals(applicationPath)) {
return new ZooLinearRegressionPredictor(endpoint);
} else if (SOFTMAX_REGRESSION.getPath().equals(applicationPath)) {
return new ZooSoftmaxRegressionPredictor(endpoint);
}
// Audio
if (Application.Audio.ANY.getPath().equals(applicationPath)) {
return new ZooAudioPredictor(endpoint);
}
// Time Series
if (FORECASTING.getPath().equals(applicationPath)) {
return new ZooForecastingPredictor(endpoint);
}
throw new RuntimeCamelException("Application not supported: " + applicationPath);
}
|
@Test
void testGetZooPredictor() throws ModelNotFoundException, MalformedModelException, IOException {
// CV
assertInstanceOf(ZooImageClassificationPredictor.class,
getZooPredictor(zooEndpoint("cv/image_classification", "ai.djl.zoo:mlp:0.0.3")));
assertInstanceOf(ZooObjectDetectionPredictor.class,
getZooPredictor(zooEndpoint("cv/object_detection", "ai.djl.zoo:ssd:0.0.2")));
assertInstanceOf(ZooSemanticSegmentationPredictor.class,
getZooPredictor(zooEndpoint("cv/semantic_segmentation", "ai.djl.pytorch:deeplabv3:0.0.1")));
assertInstanceOf(ZooInstanceSegmentationPredictor.class,
getZooPredictor(zooEndpoint("cv/instance_segmentation", "ai.djl.mxnet:mask_rcnn:0.0.1")));
assertInstanceOf(ZooPoseEstimationPredictor.class,
getZooPredictor(zooEndpoint("cv/pose_estimation", "ai.djl.mxnet:simple_pose:0.0.1")));
assertInstanceOf(ZooActionRecognitionPredictor.class,
getZooPredictor(zooEndpoint("cv/action_recognition", "ai.djl.mxnet:action_recognition:0.0.1")));
// No builtin zoo model available for "cv/word_recognition"
assertInstanceOf(ZooImageGenerationPredictor.class,
getZooPredictor(zooEndpoint("cv/image_generation", "ai.djl.pytorch:biggan-deep:0.0.1")));
// No builtin zoo model available for "cv/image_enhancement"
// NLP
// No builtin zoo model available for "nlp/fill_mask"
assertInstanceOf(ZooQuestionAnswerPredictor.class,
getZooPredictor(zooEndpoint("nlp/question_answer", "ai.djl.pytorch:bertqa:0.0.1")));
// No builtin zoo model available for "nlp/text_classification"
assertInstanceOf(ZooSentimentAnalysisPredictor.class,
getZooPredictor(zooEndpoint("nlp/sentiment_analysis", "ai.djl.pytorch:distilbert:0.0.1")));
// No builtin zoo model available for "nlp/token_classification"
assertInstanceOf(ZooWordEmbeddingPredictor.class,
getZooPredictor(zooEndpoint("nlp/word_embedding", "ai.djl.mxnet:glove:0.0.2")));
// No builtin zoo model available for "nlp/text_generation"
// No builtin zoo model available for "nlp/machine_translation"
// No builtin zoo model available for "nlp/multiple_choice"
// No builtin zoo model available for "nlp/text_embedding"
// Tabular
// No builtin zoo model available for "tabular/linear_regression"
// No builtin zoo model available for "tabular/softmax_regression"
// Audio
// No builtin zoo model available for "audio"
// Time Series
assertInstanceOf(ZooForecastingPredictor.class,
getZooPredictor(zooEndpoint("timeseries/forecasting", "ai.djl.pytorch:deepar:0.0.1")));
}
|
public static <T extends DatabaseTypedSPI> T getService(final Class<T> spiClass, final DatabaseType databaseType) {
return findService(spiClass, databaseType).orElseGet(() -> TypedSPILoader.getService(spiClass, null));
}
|
@Test
void assertGetServiceWithRegisteredDatabaseType() {
assertDoesNotThrow(() -> DatabaseTypedSPILoader.getService(DatabaseTypedSPIFixture.class, TypedSPILoader.getService(DatabaseType.class, "TRUNK")));
}
|
public static final File createLocalTempFile(final File basefile,
final String prefix,
final boolean isDeleteOnExit)
throws IOException {
File tmp = File.createTempFile(prefix + basefile.getName(),
"", basefile.getParentFile());
if (isDeleteOnExit) {
tmp.deleteOnExit();
}
return tmp;
}
|
@Test (timeout = 30000)
public void testCreateLocalTempFile() throws IOException {
final File baseFile = new File(tmp, "base");
File tmp1 = FileUtil.createLocalTempFile(baseFile, "foo", false);
File tmp2 = FileUtil.createLocalTempFile(baseFile, "foo", true);
assertFalse(tmp1.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertFalse(tmp2.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertTrue(tmp1.exists() && tmp2.exists());
assertTrue(tmp1.canWrite() && tmp2.canWrite());
assertTrue(tmp1.canRead() && tmp2.canRead());
Verify.delete(tmp1);
Verify.delete(tmp2);
assertTrue(!tmp1.exists() && !tmp2.exists());
}
|
@Override
public YamlAuthorityRuleConfiguration swapToYamlConfiguration(final AuthorityRuleConfiguration data) {
YamlAuthorityRuleConfiguration result = new YamlAuthorityRuleConfiguration();
result.setPrivilege(algorithmSwapper.swapToYamlConfiguration(data.getPrivilegeProvider()));
result.setUsers(data.getUsers().stream().map(userSwapper::swapToYamlConfiguration).collect(Collectors.toList()));
result.setDefaultAuthenticator(data.getDefaultAuthenticator());
data.getAuthenticators().forEach((key, value) -> result.getAuthenticators().put(key, algorithmSwapper.swapToYamlConfiguration(value)));
return result;
}
|
@Test
void assertSwapToYamlConfiguration() {
AuthorityRuleConfiguration authorityRuleConfig = new AuthorityRuleConfiguration(Collections.emptyList(), new AlgorithmConfiguration("ALL_PERMITTED", new Properties()),
Collections.singletonMap("md5", createAlgorithmConfiguration()), "scram_sha256");
YamlAuthorityRuleConfiguration actual = swapper.swapToYamlConfiguration(authorityRuleConfig);
assertTrue(actual.getUsers().isEmpty());
assertNotNull(actual.getPrivilege());
assertThat(actual.getDefaultAuthenticator(), is("scram_sha256"));
assertThat(actual.getAuthenticators().size(), is(1));
}
|
@Override
public CompletableFuture<Void> deleteOffloaded(long ledgerId, UUID uid,
Map<String, String> offloadDriverMetadata) {
BlobStoreLocation bsKey = getBlobStoreLocation(offloadDriverMetadata);
String readBucket = bsKey.getBucket(offloadDriverMetadata);
CompletableFuture<Void> promise = new CompletableFuture<>();
scheduler.chooseThread(ledgerId).execute(() -> {
try {
BlobStore readBlobstore = getBlobStore(config.getBlobStoreLocation());
readBlobstore.removeBlobs(readBucket,
ImmutableList.of(DataBlockUtils.dataBlockOffloadKey(ledgerId, uid),
DataBlockUtils.indexBlockOffloadKey(ledgerId, uid)));
promise.complete(null);
} catch (Throwable t) {
log.error("Failed delete Blob", t);
promise.completeExceptionally(t);
}
});
return promise.whenComplete((__, t) -> {
if (null != this.ml) {
this.offloaderStats.recordDeleteOffloadOps(
TopicName.fromPersistenceNamingEncoding(this.ml.getName()), t == null);
}
});
}
|
@Test
public void testDeleteOffloaded() throws Exception {
ReadHandle readHandle = buildReadHandle(DEFAULT_BLOCK_SIZE, 1);
UUID uuid = UUID.randomUUID();
BlobStoreManagedLedgerOffloader offloader = getOffloader();
// verify object exist after offload
offloader.offload(readHandle, uuid, new HashMap<>()).get();
Assert.assertTrue(blobStore.blobExists(BUCKET, DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid)));
Assert.assertTrue(blobStore.blobExists(BUCKET, DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid)));
// verify object deleted after delete
offloader.deleteOffloaded(readHandle.getId(), uuid, config.getOffloadDriverMetadata()).get();
Assert.assertFalse(blobStore.blobExists(BUCKET, DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid)));
Assert.assertFalse(blobStore.blobExists(BUCKET, DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid)));
}
|
public List<ConnectionProvider<? extends ConnectionDetails>> getProviders() {
return Collections.list( this.connectionProviders.elements() );
}
|
@Test
public void testGetProviders() {
addProvider();
assertEquals( 1, connectionManager.getProviders().size() );
assertEquals( TestConnectionWithBucketsProvider.SCHEME, connectionManager.getProviders().get( 0 ).getKey() );
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final String msg = new String(rawMessage.getPayload(), charset);
try (Timer.Context ignored = this.decodeTime.time()) {
final ResolvableInetSocketAddress address = rawMessage.getRemoteAddress();
final InetSocketAddress remoteAddress;
if (address == null) {
remoteAddress = null;
} else {
remoteAddress = address.getInetSocketAddress();
}
return parse(msg, remoteAddress == null ? null : remoteAddress.getAddress(), rawMessage.getTimestamp());
}
}
|
@Test
public void testDecodeStructuredIssue549() throws Exception {
final Message message = codec.decode(buildRawMessage(STRUCTURED_ISSUE_549));
assertNotNull(message);
assertEquals("RT_FLOW_SESSION_DENY [junos@2636.1.1.1.2.39 source-address=\"1.2.3.4\" source-port=\"56639\" destination-address=\"5.6.7.8\" destination-port=\"2003\" service-name=\"None\" protocol-id=\"6\" icmp-type=\"0\" policy-name=\"log-all-else\" source-zone-name=\"campus\" destination-zone-name=\"mngmt\" application=\"UNKNOWN\" nested-application=\"UNKNOWN\" username=\"N/A\" roles=\"N/A\" packet-incoming-interface=\"reth6.0\" encrypted=\"No\"]", message.getMessage());
assertEquals(new DateTime("2014-05-01T08:26:51.179Z", DateTimeZone.UTC), ((DateTime) message.getField("timestamp")).withZone(DateTimeZone.UTC));
assertEquals("1.2.3.4", message.getField("source-address"));
assertEquals("56639", message.getField("source-port"));
assertEquals("5.6.7.8", message.getField("destination-address"));
assertEquals("2003", message.getField("destination-port"));
assertEquals("None", message.getField("service-name"));
assertEquals("6", message.getField("protocol-id"));
assertEquals("0", message.getField("icmp-type"));
assertEquals("log-all-else", message.getField("policy-name"));
assertEquals("campus", message.getField("source-zone-name"));
assertEquals("mngmt", message.getField("destination-zone-name"));
assertEquals("UNKNOWN", message.getField("application"));
assertEquals("UNKNOWN", message.getField("nested-application"));
assertEquals("N/A", message.getField("username"));
assertEquals("N/A", message.getField("roles"));
assertEquals("reth6.0", message.getField("packet-incoming-interface"));
assertEquals("No", message.getField("encrypted"));
assertEquals(1, message.getField("facility_num"));
}
|
@Deprecated
public List<IndexSegment> prune(List<IndexSegment> segments, QueryContext query) {
return prune(segments, query, new SegmentPrunerStatistics());
}
|
@Test
public void notEmptyValidSegmentsAreNotPruned() {
SegmentPrunerService service = new SegmentPrunerService(_emptyPrunerConf);
IndexSegment indexSegment = mockIndexSegment(10, "col1", "col2");
SegmentPrunerStatistics stats = new SegmentPrunerStatistics();
List<IndexSegment> indexes = new ArrayList<>();
indexes.add(indexSegment);
String query = "select col1 from t1";
QueryContext queryContext = QueryContextConverterUtils.getQueryContext(query);
List<IndexSegment> actual = service.prune(indexes, queryContext, stats);
Assert.assertEquals(actual, indexes);
Assert.assertEquals(stats.getInvalidSegments(), 0);
}
|
public boolean authenticate(LDAPConnection connection, String bindDn, EncryptedValue password) throws LDAPException {
checkArgument(!isNullOrEmpty(bindDn), "Binding with empty principal is forbidden.");
checkArgument(password != null, "Binding with null credentials is forbidden.");
checkArgument(password.isSet(), "Binding with empty credentials is forbidden.");
final SimpleBindRequest bindRequest = new SimpleBindRequest(bindDn, encryptedValueService.decrypt(password));
LOG.trace("Re-binding with DN <{}> using password", bindDn);
try {
final BindResult bind = connection.bind(bindRequest);
if (!bind.getResultCode().equals(ResultCode.SUCCESS)) {
LOG.trace("Re-binding DN <{}> failed", bindDn);
throw new RuntimeException(bind.toString());
}
final boolean authenticated = connection.getLastBindRequest().equals(bindRequest);
LOG.trace("Binding DN <{}> did not throw, connection authenticated: {}", bindDn, authenticated);
return authenticated;
} catch (LDAPBindException e) {
LOG.trace("Re-binding DN <{}> failed", bindDn);
return false;
}
}
|
@Test
public void testAuthenticateFail() throws LDAPException {
final boolean authenticated = connector.authenticate(connection, "cn=John Doe,ou=users,dc=example,dc=com", encryptedValueService.encrypt("wrongpass"));
assertThat(authenticated).isFalse();
}
|
@Override
public String getPrefix() {
return String.format("%s.%s", IRODSProtocol.class.getPackage().getName(), StringUtils.upperCase(this.getType().name()));
}
|
@Test
public void testGetPrefix() {
assertEquals("ch.cyberduck.core.irods.IRODS", new IRODSProtocol().getPrefix());
}
|
public static MDS of(double[][] proximity) {
return of(proximity, new Properties());
}
|
@Test
public void test() {
System.out.println("MDS");
double[] eig = {19538377.0895, 11856555.3340};
double[][] points = {
{ 2290.274680, 1798.80293},
{ -825.382790, 546.81148},
{ 59.183341, -367.08135},
{ -82.845973, -429.91466},
{ -352.499435, -290.90843},
{ 293.689633, -405.31194},
{ 681.931545, -1108.64478},
{ -9.423364, 240.40600},
{-2048.449113, 642.45854},
{ 561.108970, -773.36929},
{ 164.921799, -549.36704},
{-1935.040811, 49.12514},
{ -226.423236, 187.08779},
{-1423.353697, 305.87513},
{ -299.498710, 388.80726},
{ 260.878046, 416.67381},
{ 587.675679, 81.18224},
{ -156.836257, -211.13911},
{ 709.413282, 1109.36665},
{ 839.445911, -1836.79055},
{ 911.230500, 205.93020}
};
MDS mds = MDS.of(Eurodist.x);
assertArrayEquals(eig, mds.scores, 1E-4);
double sign0 = Math.signum(points[0][0] * mds.coordinates[0][0]);
double sign1 = Math.signum(points[0][1] * mds.coordinates[0][1]);
for (int i = 0; i < points.length; i++) {
points[i][0] *= sign0;
points[i][1] *= sign1;
assertArrayEquals(points[i], mds.coordinates[i], 1E-4);
}
}
|
public static void validate(final String table, final String column, final Comparable<?> shadowValue) {
for (Class<?> each : UNSUPPORTED_TYPES) {
ShardingSpherePreconditions.checkState(!each.isAssignableFrom(shadowValue.getClass()), () -> new UnsupportedShadowColumnTypeException(table, column, each));
}
}
|
@Test
void assertValidateEnumType() {
assertThrows(UnsupportedShadowColumnTypeException.class, () -> ShadowValueValidator.validate("tbl", "col", mock(Enum.class)));
}
|
@Override
public Set<Long> calculateUsers(DelegateExecution execution, String param) {
Object result = FlowableUtils.getExpressionValue(execution, param);
return Convert.toSet(Long.class, result);
}
|
@Test
public void testCalculateUsers() {
try (MockedStatic<FlowableUtils> flowableUtilMockedStatic = mockStatic(FlowableUtils.class)) {
// 准备参数
String param = "1,2";
DelegateExecution execution = mock(DelegateExecution.class);
// mock 方法
flowableUtilMockedStatic.when(() -> FlowableUtils.getExpressionValue(same(execution), eq(param)))
.thenReturn(asSet(1L, 2L));
// 调用
Set<Long> results = strategy.calculateUsers(execution, param);
// 断言
assertEquals(asSet(1L, 2L), results);
}
}
|
@Override
public void isNotEqualTo(@Nullable Object expected) {
super.isNotEqualTo(expected);
}
|
@Test
public void isNotEqualTo_WithoutToleranceParameter_FailEquals() {
expectFailureWhenTestingThat(array(2.2d, 5.4d, POSITIVE_INFINITY, NEGATIVE_INFINITY))
.isNotEqualTo(array(2.2d, 5.4d, POSITIVE_INFINITY, NEGATIVE_INFINITY));
}
|
public JetSqlRow project(Object object) {
target.setTarget(object, null);
return ExpressionUtil.projection(predicate, projection, this, evalContext);
}
|
@Test
@SuppressWarnings("unchecked")
public void when_filteredByPredicate_then_returnsNull() {
RowProjector projector = new RowProjector(
new String[]{"target"},
new QueryDataType[]{INT},
new IdentityTarget(),
(Expression<Boolean>) ConstantExpression.create(Boolean.FALSE, BOOLEAN),
emptyList(),
mock(ExpressionEvalContext.class)
);
JetSqlRow row = projector.project(1);
assertThat(row).isNull();
}
|
@Override
public Object deserialize(Writable writable) {
return ((Container<?>) writable).get();
}
|
@Test
public void testDeserialize() {
HiveIcebergSerDe serDe = new HiveIcebergSerDe();
Record record = RandomGenericData.generate(SCHEMA, 1, 0).get(0);
Container<Record> container = new Container<>();
container.set(record);
assertThat(serDe.deserialize(container)).isEqualTo(record);
}
|
public ShardingSphereDatabase getDatabase(final String name) {
ShardingSpherePreconditions.checkNotEmpty(name, NoDatabaseSelectedException::new);
ShardingSphereMetaData metaData = getMetaDataContexts().getMetaData();
ShardingSpherePreconditions.checkState(metaData.containsDatabase(name), () -> new UnknownDatabaseException(name));
return metaData.getDatabase(name);
}
|
@Test
void assertAddSchema() {
contextManager.getMetaDataContextManager().getSchemaMetaDataManager().addSchema("foo_db", "bar_schema");
verify(metaDataContexts.getMetaData().getDatabase("foo_db")).addSchema(anyString(), any(ShardingSphereSchema.class));
}
|
public static PostgreSQLCommandPacket newInstance(final PostgreSQLCommandPacketType commandPacketType, final PostgreSQLPacketPayload payload) {
if (!PostgreSQLCommandPacketType.isExtendedProtocolPacketType(commandPacketType)) {
payload.getByteBuf().skipBytes(1);
return getPostgreSQLCommandPacket(commandPacketType, payload);
}
List<PostgreSQLCommandPacket> result = new ArrayList<>();
while (payload.hasCompletePacket()) {
PostgreSQLCommandPacketType type = PostgreSQLCommandPacketType.valueOf(payload.readInt1());
int length = payload.getByteBuf().getInt(payload.getByteBuf().readerIndex());
PostgreSQLPacketPayload slicedPayload = new PostgreSQLPacketPayload(payload.getByteBuf().readSlice(length), payload.getCharset());
result.add(getPostgreSQLCommandPacket(type, slicedPayload));
}
return new PostgreSQLAggregatedCommandPacket(result);
}
|
@Test
void assertNewInstanceWithFlushComPacket() {
assertThat(PostgreSQLCommandPacketFactory.newInstance(PostgreSQLCommandPacketType.FLUSH_COMMAND, payload), instanceOf(PostgreSQLAggregatedCommandPacket.class));
}
|
public Plan validateReservationUpdateRequest(
ReservationSystem reservationSystem, ReservationUpdateRequest request)
throws YarnException {
ReservationId reservationId = request.getReservationId();
Plan plan = validateReservation(reservationSystem, reservationId,
AuditConstants.UPDATE_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.UPDATE_RESERVATION_REQUEST);
return plan;
}
|
@Test
public void testUpdateReservationInvalidDeadline() {
ReservationUpdateRequest request =
createSimpleReservationUpdateRequest(1, 1, 1, 0, 3);
Plan plan = null;
try {
plan = rrValidator.validateReservationUpdateRequest(rSystem, request);
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message
.startsWith("The specified deadline: 0 is the past"));
LOG.info(message);
}
}
|
@Override
public CompletableFuture<KubernetesWatch> watchPodsAndDoCallback(
Map<String, String> labels, WatchCallbackHandler<KubernetesPod> podCallbackHandler) {
return FutureUtils.retryWithDelay(
() ->
CompletableFuture.supplyAsync(
() ->
new KubernetesWatch(
this.internalClient
.pods()
.withLabels(labels)
.withResourceVersion(
KUBERNETES_ZERO_RESOURCE_VERSION)
.watch(
new KubernetesPodsWatcher(
podCallbackHandler))),
kubeClientExecutorService),
new ExponentialBackoffRetryStrategy(
maxRetryAttempts, initialRetryInterval, maxRetryInterval),
t -> ExceptionUtils.findThrowable(t, KubernetesClientException.class).isPresent(),
new ScheduledExecutorServiceAdapter(kubeClientExecutorService));
}
|
@Test
void testWatchPodsAndDoCallback() throws Exception {
mockPodEventWithLabels(
NAMESPACE, TASKMANAGER_POD_NAME, KUBERNETES_ZERO_RESOURCE_VERSION, TESTING_LABELS);
// the count latch for events.
CompletableFuture<Action> podAddedAction = new CompletableFuture();
CompletableFuture<Action> podDeletedAction = new CompletableFuture();
CompletableFuture<Action> podModifiedAction = new CompletableFuture();
TestingWatchCallbackHandler<KubernetesPod> watchCallbackHandler =
TestingWatchCallbackHandler.<KubernetesPod>builder()
.setOnAddedConsumer((ignore) -> podAddedAction.complete(Action.ADDED))
.setOnDeletedConsumer((ignore) -> podDeletedAction.complete(Action.DELETED))
.setOnModifiedConsumer(
(ignore) -> podModifiedAction.complete(Action.MODIFIED))
.build();
this.flinkKubeClient.watchPodsAndDoCallback(TESTING_LABELS, watchCallbackHandler);
assertThat(podAddedAction.get()).isEqualTo(Action.ADDED);
assertThat(podDeletedAction.get()).isEqualTo(Action.DELETED);
assertThat(podModifiedAction.get()).isEqualTo(Action.MODIFIED);
}
|
@VisibleForTesting
static String parseBAHighGambleWidget(final String text)
{
final Matcher highGambleMatch = BA_HIGH_GAMBLE_REWARD_PATTERN.matcher(text);
if (highGambleMatch.find())
{
String gambleCount = highGambleMatch.group("gambleCount");
return String.format("High Gamble(%s)", gambleCount);
}
return "High Gamble(count not found)";
}
|
@Test
public void testBAHighGambleRewardParsing()
{
assertEquals("High Gamble(100)", ScreenshotPlugin.parseBAHighGambleWidget(BA_HIGH_GAMBLE_REWARD));
}
|
@Override
public CompletableFuture<KubernetesWorkerNode> requestResource(
TaskExecutorProcessSpec taskExecutorProcessSpec) {
final KubernetesTaskManagerParameters parameters =
createKubernetesTaskManagerParameters(
taskExecutorProcessSpec, getBlockedNodeRetriever().getAllBlockedNodeIds());
final KubernetesPod taskManagerPod =
KubernetesTaskManagerFactory.buildTaskManagerKubernetesPod(
taskManagerPodTemplate, parameters);
final String podName = taskManagerPod.getName();
final CompletableFuture<KubernetesWorkerNode> requestResourceFuture =
new CompletableFuture<>();
requestResourceFutures.put(podName, requestResourceFuture);
log.info(
"Creating new TaskManager pod with name {} and resource <{},{}>.",
podName,
parameters.getTaskManagerMemoryMB(),
parameters.getTaskManagerCPU());
final CompletableFuture<Void> createPodFuture =
flinkKubeClient.createTaskManagerPod(taskManagerPod);
FutureUtils.assertNoException(
createPodFuture.handleAsync(
(ignore, exception) -> {
if (exception != null) {
log.warn(
"Could not create pod {}, exception: {}",
podName,
exception);
CompletableFuture<KubernetesWorkerNode> future =
requestResourceFutures.remove(taskManagerPod.getName());
if (future != null) {
future.completeExceptionally(exception);
}
} else {
if (requestResourceFuture.isCancelled()) {
stopPod(podName);
log.info(
"pod {} is cancelled before create pod finish, stop it.",
podName);
} else {
log.info("Pod {} is created.", podName);
}
}
return null;
},
getMainThreadExecutor()));
FutureUtils.assertNoException(
requestResourceFuture.handle(
(ignore, t) -> {
if (t == null) {
return null;
}
// Unwrap CompletionException cause if any
if (t instanceof CompletionException && t.getCause() != null) {
t = t.getCause();
}
if (t instanceof CancellationException) {
requestResourceFutures.remove(taskManagerPod.getName());
if (createPodFuture.isDone()) {
log.info(
"pod {} is cancelled before scheduled, stop it.",
podName);
stopPod(taskManagerPod.getName());
}
} else if (t instanceof RetryableException
|| t instanceof KubernetesClientException) {
// ignore transient / retriable errors
} else {
log.error("Error completing resource request.", t);
ExceptionUtils.rethrow(t);
}
return null;
}));
return requestResourceFuture;
}
|
@Test
void testKubernetesExceptionHandling() throws Exception {
new Context() {
{
runTest(
() ->
FlinkAssertions.assertThatFuture(
runInMainThread(
() -> {
getDriver()
.requestResource(
TASK_EXECUTOR_PROCESS_SPEC)
.completeExceptionally(
new CompletionException(
new KubernetesClientException(
"test")));
}))
.eventuallySucceeds());
}
};
}
|
@Override
public MailAccountDO getMailAccount(Long id) {
return mailAccountMapper.selectById(id);
}
|
@Test
public void testGetMailAccount() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbMailAccount.getId();
// 调用
MailAccountDO mailAccount = mailAccountService.getMailAccount(id);
// 断言
assertPojoEquals(dbMailAccount, mailAccount);
}
|
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testUpgradeWithIbpv(VertxTestContext context) {
String kafkaVersion = VERSIONS.defaultVersion().version();
String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION;
String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION;
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, oldInterBrokerProtocolVersion, null),
mockNewCluster(
null,
mockSps(kafkaVersion),
mockUniformPods(kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.defaultVersion()));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.interBrokerProtocolVersion(), is(nullValue())); // Is null because it is set in the Kafka CR
assertThat(c.logMessageFormatVersion(), is(oldInterBrokerProtocolVersion)); // Mirrors the inter.broker.protocol.version
async.flag();
})));
}
|
@Override
public ListenableFuture<?> execute(Rollback statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters)
{
Session session = stateMachine.getSession();
if (!session.getTransactionId().isPresent()) {
throw new PrestoException(NOT_IN_TRANSACTION, "No transaction in progress");
}
TransactionId transactionId = session.getTransactionId().get();
stateMachine.clearTransactionId();
transactionManager.asyncAbort(transactionId);
return immediateFuture(null);
}
|
@Test
public void testNoTransactionRollback()
{
TransactionManager transactionManager = createTestTransactionManager();
Session session = sessionBuilder()
.build();
QueryStateMachine stateMachine = createQueryStateMachine("ROLLBACK", session, true, transactionManager, executor, metadata);
RollbackTask rollbackTask = new RollbackTask();
try {
getFutureValue(rollbackTask.execute(new Rollback(), transactionManager, metadata, new AllowAllAccessControl(), stateMachine, emptyList()));
fail();
}
catch (PrestoException e) {
assertEquals(e.getErrorCode(), NOT_IN_TRANSACTION.toErrorCode());
}
assertFalse(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId());
assertFalse(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().isPresent());
assertTrue(transactionManager.getAllTransactionInfos().isEmpty());
}
|
public WorkflowDefinition addWorkflowDefinition(
WorkflowDefinition workflowDef, Properties changes) {
LOG.info("Adding a new workflow definition with an id [{}]", workflowDef.getWorkflow().getId());
final Workflow workflow = workflowDef.getWorkflow();
final Metadata metadata = workflowDef.getMetadata();
return withMetricLogError(
() ->
withRetryableTransaction(
conn -> {
WorkflowInfo workflowInfo = getWorkflowInfoForUpdate(conn, workflow.getId());
final long nextVersionId = workflowInfo.getLatestVersionId() + 1;
// update the metadata with version info and then metadata is complete.
metadata.setWorkflowVersionId(nextVersionId);
TriggerUuids triggerUuids =
insertMaestroWorkflowVersion(conn, metadata, workflow);
PropertiesSnapshot snapshot =
updateWorkflowProps(
conn,
workflow.getId(),
metadata.getVersionAuthor(),
metadata.getCreateTime(),
workflowInfo.getPrevPropertiesSnapshot(),
changes,
new PropertiesUpdate(Type.ADD_WORKFLOW_DEFINITION));
// add new snapshot to workflowDef
if (snapshot != null) {
workflowDef.setPropertiesSnapshot(snapshot);
} else {
workflowDef.setPropertiesSnapshot(workflowInfo.getPrevPropertiesSnapshot());
}
final long[] upsertRes = upsertMaestroWorkflow(conn, workflowDef);
Checks.notNull(
upsertRes,
"the upsert result should not be null for workflow [%s]",
workflow.getId());
workflowDef.setIsLatest(true); // a new version will always be latest
// add default flag and modified_time and then workflowDef is complete
workflowDef.setIsDefault(
workflowInfo.getPrevActiveVersionId() == Constants.INACTIVE_VERSION_ID
|| workflowDef.getIsActive());
workflowDef.setModifyTime(upsertRes[0]);
workflowDef.setInternalId(upsertRes[1]);
if (workflowDef.getIsActive()) {
workflowInfo.setNextActiveWorkflow(
MaestroWorkflowVersion.builder()
.definition(workflow)
.triggerUuids(triggerUuids)
.metadata(metadata)
.build(),
workflowDef.getPropertiesSnapshot());
} else if (workflowInfo.getPrevActiveVersionId()
!= Constants.INACTIVE_VERSION_ID) {
// getting an inactive new version but having an active old version
updateWorkflowInfoForNextActiveWorkflow(
conn,
workflow.getId(),
workflowInfo.getPrevActiveVersionId(),
workflowInfo,
workflowDef.getPropertiesSnapshot());
}
if (workflowInfo.withWorkflow()) {
addWorkflowTriggersIfNeeded(conn, workflowInfo);
}
MaestroJobEvent jobEvent =
logToTimeline(
conn, workflowDef, snapshot, workflowInfo.getPrevActiveVersionId());
publisher.publishOrThrow(
jobEvent, "Failed to publish maestro definition change job event.");
return workflowDef;
}),
"addWorkflowDefinition",
"Failed creating a new workflow definition {}",
workflow.getId());
}
|
@Test
public void testAddWorkflowDefinition() throws Exception {
WorkflowDefinition wfd = loadWorkflow(TEST_WORKFLOW_ID1);
WorkflowDefinition definition =
workflowDao.addWorkflowDefinition(wfd, wfd.getPropertiesSnapshot().extractProperties());
assertEquals(wfd, definition);
verify(publisher, times(1)).publishOrThrow(any(), any());
verify(triggerClient, times(0)).upsertTriggerSubscription(any(), any(), any());
}
|
private MqttMessage getMessage( Object[] row ) throws KettleStepException {
MqttMessage mqttMessage = new MqttMessage();
try {
mqttMessage.setQos( Integer.parseInt( meta.qos ) );
} catch ( NumberFormatException e ) {
throw new KettleStepException(
getString( PKG, "MQTTProducer.Error.QOS", meta.qos ) );
}
//noinspection ConstantConditions
mqttMessage.setPayload( getFieldData( row, meta.messageField )
.map( this::dataAsBytes )
.orElse( null ) ); //allow nulls to pass through
return mqttMessage;
}
|
@Test
public void testErrorOnPublishStopsAll() throws Exception {
handleAsSecondRow( trans );
MqttException mqttException = mock( MqttException.class );
when( mqttException.getMessage() ).thenReturn( "publish failed" );
when( mqttClient.isConnected() ).thenReturn( true, false );
doThrow( mqttException ).when( mqttClient ).publish( any(), any() );
trans.startThreads();
trans.waitUntilFinished();
verify( mqttClient ).disconnect();
verify( logChannel ).logError(
"MQTT Producer - Received an exception publishing the message."
+ " Check that Quality of Service level 0 is supported by your MQTT Broker" );
verify( logChannel ).logError( "publish failed", mqttException );
assertEquals( 0, trans.getSteps().get( 1 ).step.getLinesOutput() );
}
|
@EventListener(ApplicationEvent.class)
void onApplicationEvent(ApplicationEvent event) {
if (AnnotationUtils.findAnnotation(event.getClass(), SharedEvent.class) == null) {
return;
}
// we should copy the plugins list to avoid ConcurrentModificationException
var startedPlugins = new ArrayList<>(pluginManager.getStartedPlugins());
// broadcast event to all started plugins except the publisher
for (var startedPlugin : startedPlugins) {
var plugin = startedPlugin.getPlugin();
if (!(plugin instanceof SpringPlugin springPlugin)) {
continue;
}
var context = springPlugin.getApplicationContext();
// make sure the context is running before publishing the event
if (context instanceof Lifecycle lifecycle && lifecycle.isRunning()) {
context.publishEvent(new HaloSharedEventDelegator(this, event));
}
}
}
|
@Test
void shouldNotDispatchEventToAllStartedPluginsWhilePluginContextIsNotLifecycle() {
var pw = mock(PluginWrapper.class);
var plugin = mock(SpringPlugin.class);
var context = mock(ApplicationContext.class);
when(plugin.getApplicationContext()).thenReturn(context);
when(pw.getPlugin()).thenReturn(plugin);
when(pluginManager.getStartedPlugins()).thenReturn(List.of(pw));
var event = new FakeSharedEvent(this);
dispatcher.onApplicationEvent(event);
verify(context, never()).publishEvent(event);
}
|
public void copyToWithPermission(FilePath target) throws IOException, InterruptedException {
// Use NIO copy with StandardCopyOption.COPY_ATTRIBUTES when copying on the same machine.
if (this.channel == target.channel) {
act(new CopyToWithPermission(target));
return;
}
copyTo(target);
// copy file permission
target.chmod(mode());
target.setLastModifiedIfPossible(lastModified());
}
|
@Test public void copyToWithPermission() throws IOException, InterruptedException {
File tmp = temp.getRoot();
File child = new File(tmp, "child");
FilePath childP = new FilePath(child);
childP.touch(4711);
Chmod chmodTask = new Chmod();
chmodTask.setProject(new Project());
chmodTask.setFile(child);
chmodTask.setPerm("0400");
chmodTask.execute();
FilePath copy = new FilePath(channels.british, tmp.getPath()).child("copy");
childP.copyToWithPermission(copy);
assertEquals(childP.mode(), copy.mode());
if (!Functions.isWindows()) {
assertEquals(childP.lastModified(), copy.lastModified());
}
// JENKINS-11073:
// Windows seems to have random failures when setting the timestamp on newly generated
// files. So test that:
for (int i = 0; i < 100; i++) {
copy = new FilePath(channels.british, tmp.getPath()).child("copy" + i);
childP.copyToWithPermission(copy);
}
}
|
@Override
public Stream<MappingField> resolveAndValidateFields(
boolean isKey,
List<MappingField> userFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey);
Class<?> typeClass = getMetadata(fieldsByPath)
.<Class<?>>map(KvMetadataJavaResolver::loadClass)
.orElseGet(() -> loadClass(options, isKey));
QueryDataType type = QueryDataTypeUtils.resolveTypeForClass(typeClass);
if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) {
return userFields.isEmpty()
? resolvePrimitiveField(isKey, type)
: resolveAndValidatePrimitiveField(isKey, fieldsByPath, type);
} else {
return userFields.isEmpty()
? resolveObjectFields(isKey, typeClass)
: resolveAndValidateObjectFields(isKey, fieldsByPath, typeClass);
}
}
|
@Test
@Parameters({
"true, __key",
"false, this"
})
public void when_userDeclaresObjectField_then_itsNameHasPrecedenceOverResolvedOne(boolean key, String prefix) {
Map<String, String> options = Map.of(
(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT,
(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), Type.class.getName()
);
Stream<MappingField> fields = INSTANCE.resolveAndValidateFields(
key,
singletonList(field("renamed_field", QueryDataType.INT, prefix + ".field")),
options,
null
);
assertThat(fields).containsExactly(field("renamed_field", QueryDataType.INT, prefix + ".field"));
}
|
@Override
public void visit(Entry target) {
final FreeplaneMenuBar menuBar = userInputListenerFactory.getMenuBar();
addMnemonicsBeforeShowing(menuBar);
new EntryAccessor().setComponent(target, menuBar);
}
|
@Test
public void createsEmptyToolbarComponent() {
Entry toolbarEntry = new Entry();
final IUserInputListenerFactory userInputListenerFactory = mock(IUserInputListenerFactory.class);
final FreeplaneMenuBar menubar = TestMenuBarFactory.createFreeplaneMenuBar();
when(userInputListenerFactory.getMenuBar()).thenReturn(menubar);
final JMenubarBuilder toolbarBuilder = new JMenubarBuilder(userInputListenerFactory);
toolbarBuilder.visit(toolbarEntry);
assertThat(new EntryAccessor().getComponent(toolbarEntry), CoreMatchers.<Object> is(menubar));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.