focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static <S> RemoteIterator<S> filteringRemoteIterator(
RemoteIterator<S> iterator,
FunctionRaisingIOE<? super S, Boolean> filter) {
return new FilteringRemoteIterator<>(iterator, filter);
}
|
@Test
public void testFilterAllAccepted() throws Throwable {
// nothing gets through
RemoteIterator<Integer> it = filteringRemoteIterator(
new CountdownRemoteIterator(100),
i -> true);
verifyInvoked(it, 100, c -> counter++);
assertStringValueContains(it, "CountdownRemoteIterator");
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
JsonPrimitive other = (JsonPrimitive) obj;
if (value == null) {
return other.value == null;
}
if (isIntegral(this) && isIntegral(other)) {
return (this.value instanceof BigInteger || other.value instanceof BigInteger)
? this.getAsBigInteger().equals(other.getAsBigInteger())
: this.getAsNumber().longValue() == other.getAsNumber().longValue();
}
if (value instanceof Number && other.value instanceof Number) {
if (value instanceof BigDecimal && other.value instanceof BigDecimal) {
// Uses compareTo to ignore scale of values, e.g. `0` and `0.00` should be considered equal
return this.getAsBigDecimal().compareTo(other.getAsBigDecimal()) == 0;
}
double thisAsDouble = this.getAsDouble();
double otherAsDouble = other.getAsDouble();
// Don't use Double.compare(double, double) because that considers -0.0 and +0.0 not equal
return (thisAsDouble == otherAsDouble)
|| (Double.isNaN(thisAsDouble) && Double.isNaN(otherAsDouble));
}
return value.equals(other.value);
}
|
@Test
public void testEquals() {
MoreAsserts.assertEqualsAndHashCode(new JsonPrimitive("A"), new JsonPrimitive("A"));
MoreAsserts.assertEqualsAndHashCode(new JsonPrimitive(true), new JsonPrimitive(true));
MoreAsserts.assertEqualsAndHashCode(new JsonPrimitive(5L), new JsonPrimitive(5L));
MoreAsserts.assertEqualsAndHashCode(new JsonPrimitive('a'), new JsonPrimitive('a'));
MoreAsserts.assertEqualsAndHashCode(new JsonPrimitive(Float.NaN), new JsonPrimitive(Float.NaN));
MoreAsserts.assertEqualsAndHashCode(
new JsonPrimitive(Float.NEGATIVE_INFINITY), new JsonPrimitive(Float.NEGATIVE_INFINITY));
MoreAsserts.assertEqualsAndHashCode(
new JsonPrimitive(Float.POSITIVE_INFINITY), new JsonPrimitive(Float.POSITIVE_INFINITY));
MoreAsserts.assertEqualsAndHashCode(
new JsonPrimitive(Double.NaN), new JsonPrimitive(Double.NaN));
MoreAsserts.assertEqualsAndHashCode(
new JsonPrimitive(Double.NEGATIVE_INFINITY), new JsonPrimitive(Double.NEGATIVE_INFINITY));
MoreAsserts.assertEqualsAndHashCode(
new JsonPrimitive(Double.POSITIVE_INFINITY), new JsonPrimitive(Double.POSITIVE_INFINITY));
assertThat(new JsonPrimitive("a").equals(new JsonPrimitive("b"))).isFalse();
assertThat(new JsonPrimitive(true).equals(new JsonPrimitive(false))).isFalse();
assertThat(new JsonPrimitive(0).equals(new JsonPrimitive(1))).isFalse();
}
|
@Override
protected void write(final PostgreSQLPacketPayload payload) {
}
|
@Test
void assertWrite() {
PostgreSQLUnsupportedCommandPacket rowPacket = new PostgreSQLUnsupportedCommandPacket(PostgreSQLMessagePacketType.AUTHENTICATION_REQUEST);
rowPacket.write(new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8));
assertThat(byteBuf.writerIndex(), is(0));
}
|
@Override
public E putIfAbsent(String key, E value) {
return computeIfAbsent(key, k -> value);
}
|
@Test
public void putIfAbsent_cacheThrowsException_throwsUnwrappedEntryProcessorException() {
Function<String, Integer> mappingFunction = s -> {
throw new EntryProcessorException(new IllegalStateException());
};
doReturn(null).when(mutableEntryMock).getValue();
entryProcessorMock = new CacheRegistryStore.AtomicComputeProcessor<>();
entryProcessorArgMock = mappingFunction;
try {
classUnderTest.putIfAbsent(CACHE_KEY, 54);
fail("Test should've thrown EntryProcessorException");
} catch (RuntimeException e) {
assertTrue(e.getCause() instanceof IllegalStateException);
}
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void power3() {
String inputExpression = "y ** 5 * 3";
BaseNode infix = parse( inputExpression, mapOf(entry("y", BuiltInType.NUMBER)) );
assertThat( infix).isInstanceOf(InfixOpNode.class);
assertThat( infix.getResultType()).isEqualTo(BuiltInType.NUMBER);
assertThat( infix.getText()).isEqualTo(inputExpression);
InfixOpNode mult = (InfixOpNode) infix;
assertThat( mult.getLeft()).isInstanceOf(InfixOpNode.class);
assertThat( mult.getLeft().getText()).isEqualTo( "y ** 5");
assertThat( mult.getOperator()).isEqualTo(InfixOperator.MULT);
assertThat( mult.getRight()).isInstanceOf(NumberNode.class);
assertThat( mult.getRight().getText()).isEqualTo("3");
InfixOpNode exp = (InfixOpNode) mult.getLeft();
assertThat( exp.getLeft()).isInstanceOf(NameRefNode.class);
assertThat( exp.getLeft().getText()).isEqualTo("y");
assertThat( exp.getOperator()).isEqualTo(InfixOperator.POW);
assertThat( exp.getRight()).isInstanceOf(NumberNode.class);
assertThat( exp.getRight().getText()).isEqualTo("5");
}
|
@Override
public void loadData(Priority priority, DataCallback<? super T> callback) {
this.callback = callback;
serializer.startRequest(priority, url, this);
}
|
@Test
public void testRequestComplete_with200AndCancelled_callsCallbackWithNullException()
throws Exception {
UrlResponseInfo info = getInfo(0, 200);
fetcher.loadData(Priority.LOW, callback);
Callback urlCallback = urlRequestListenerCaptor.getValue();
urlCallback.onResponseStarted(request, info);
urlCallback.onCanceled(request, info);
verify(callback, timeout(1000)).onLoadFailed(ArgumentMatchers.<Exception>isNull());
}
|
public static String between(String text, String after, String before) {
text = after(text, after);
if (text == null) {
return null;
}
return before(text, before);
}
|
@Test
public void testBetween() {
assertEquals("org.apache.camel.model.OnCompletionDefinition",
between("java.util.List<org.apache.camel.model.OnCompletionDefinition>", "<", ">"));
}
|
public <T> HttpRestResult<T> putJson(String url, Header header, Query query, String body, Type responseType)
throws Exception {
RequestHttpEntity requestHttpEntity = new RequestHttpEntity(header.setContentType(MediaType.APPLICATION_JSON),
query, body);
return execute(url, HttpMethod.PUT, requestHttpEntity, responseType);
}
|
@Test
void testPutJson() throws Exception {
when(requestClient.execute(any(), eq("PUT"), any())).thenReturn(mockResponse);
when(mockResponse.getStatusCode()).thenReturn(200);
when(mockResponse.getBody()).thenReturn(new ByteArrayInputStream("test".getBytes()));
Header header = Header.newInstance().setContentType(MediaType.APPLICATION_XML);
HttpRestResult<String> result = restTemplate.putJson("http://127.0.0.1:8848/nacos/test", header, "body", String.class);
assertTrue(result.ok());
assertEquals(Header.EMPTY, result.getHeader());
assertEquals("test", result.getData());
assertEquals(MediaType.APPLICATION_JSON, header.getValue(HttpHeaderConsts.CONTENT_TYPE));
}
|
@GET
@Path("{path:.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
public Response get(@PathParam("path") String path,
@Context UriInfo uriInfo,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params,
@Context HttpServletRequest request)
throws IOException, FileSystemAccessException {
// Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode
if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) &&
(op.value() != HttpFSFileSystem.Operation.LISTSTATUS) &&
accessMode == AccessMode.WRITEONLY) {
return Response.status(Response.Status.FORBIDDEN).build();
}
UserGroupInformation user = HttpUserGroupInformation.get();
Response response;
path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
MDC.put("hostname", request.getRemoteAddr());
switch (op.value()) {
case OPEN: {
Boolean noRedirect = params.get(
NoRedirectParam.NAME, NoRedirectParam.class);
if (noRedirect) {
URI redirectURL = createOpenRedirectionURL(uriInfo);
final String js = JsonUtil.toJsonString("Location", redirectURL);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
//Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter
final FSOperations.FSOpen command = new FSOperations.FSOpen(path);
final FileSystem fs = createFileSystem(user);
InputStream is = null;
UserGroupInformation ugi = UserGroupInformation
.createProxyUser(user.getShortUserName(),
UserGroupInformation.getLoginUser());
try {
is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() {
@Override
public InputStream run() throws Exception {
return command.execute(fs);
}
});
} catch (InterruptedException ie) {
LOG.warn("Open interrupted.", ie);
Thread.currentThread().interrupt();
}
Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]",
new Object[] { path, offset, len });
InputStreamEntity entity = new InputStreamEntity(is, offset, len);
response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM)
.build();
}
break;
}
case GETFILESTATUS: {
FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS: {
String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command =
new FSOperations.FSListStatus(path, filter);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETHOMEDIRECTORY: {
enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("Home Directory for [{}]", user);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case INSTRUMENTATION: {
enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class);
Set<String> userGroups = groups.getGroupsSet(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException(
"User not in HttpFSServer admin group");
}
Instrumentation instrumentation =
HttpFSServerWebApp.get().get(Instrumentation.class);
Map snapshot = instrumentation.getSnapshot();
response = Response.ok(snapshot).build();
break;
}
case GETCONTENTSUMMARY: {
FSOperations.FSContentSummary command =
new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("Content summary for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETQUOTAUSAGE: {
FSOperations.FSQuotaUsage command =
new FSOperations.FSQuotaUsage(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("Quota Usage for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM: {
FSOperations.FSFileChecksum command =
new FSOperations.FSFileChecksum(path);
Boolean noRedirect = params.get(
NoRedirectParam.NAME, NoRedirectParam.class);
AUDIT_LOG.info("[{}]", path);
if (noRedirect) {
URI redirectURL = createOpenRedirectionURL(uriInfo);
final String js = JsonUtil.toJsonString("Location", redirectURL);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
Map json = fsExecute(user, command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
}
break;
}
case GETFILEBLOCKLOCATIONS: {
long offset = 0;
long len = Long.MAX_VALUE;
Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
Long lenParam = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
if (offsetParam != null && offsetParam > 0) {
offset = offsetParam;
}
if (lenParam != null && lenParam > 0) {
len = lenParam;
}
FSOperations.FSFileBlockLocations command =
new FSOperations.FSFileBlockLocations(path, offset, len);
@SuppressWarnings("rawtypes")
Map locations = fsExecute(user, command);
final String json = JsonUtil.toJsonString("BlockLocations", locations);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETACLSTATUS: {
FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETXATTRS: {
List<String> xattrNames =
params.getValues(XAttrNameParam.NAME, XAttrNameParam.class);
XAttrCodec encoding =
params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class);
FSOperations.FSGetXAttrs command =
new FSOperations.FSGetXAttrs(path, xattrNames, encoding);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTXATTRS: {
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case LISTSTATUS_BATCH: {
String startAfter = params.get(
HttpFSParametersProvider.StartAfterParam.NAME,
HttpFSParametersProvider.StartAfterParam.class);
byte[] token = HttpFSUtils.EMPTY_BYTES;
if (startAfter != null) {
token = startAfter.getBytes(StandardCharsets.UTF_8);
}
FSOperations.FSListStatusBatch command = new FSOperations
.FSListStatusBatch(path, token);
@SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] token [{}]", path, token);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOT: {
FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETALLSTORAGEPOLICY: {
FSOperations.FSGetAllStoragePolicies command =
new FSOperations.FSGetAllStoragePolicies();
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTORAGEPOLICY: {
FSOperations.FSGetStoragePolicy command =
new FSOperations.FSGetStoragePolicy(path);
JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTDIFF: {
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
OldSnapshotNameParam.class);
String snapshotName = params.get(SnapshotNameParam.NAME,
SnapshotNameParam.class);
FSOperations.FSGetSnapshotDiff command =
new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName,
snapshotName);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTDIFFLISTING: {
String oldSnapshotName = params.get(OldSnapshotNameParam.NAME,
OldSnapshotNameParam.class);
String snapshotName = params.get(SnapshotNameParam.NAME,
SnapshotNameParam.class);
String snapshotDiffStartPath = params
.get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME,
HttpFSParametersProvider.SnapshotDiffStartPathParam.class);
Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME,
HttpFSParametersProvider.SnapshotDiffIndexParam.class);
FSOperations.FSGetSnapshotDiffListing command =
new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName,
snapshotName, snapshotDiffStartPath, snapshotDiffIndex);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTTABLEDIRECTORYLIST: {
FSOperations.FSGetSnapshottableDirListing command =
new FSOperations.FSGetSnapshottableDirListing();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSNAPSHOTLIST: {
FSOperations.FSGetSnapshotListing command =
new FSOperations.FSGetSnapshotListing(path);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSERVERDEFAULTS: {
FSOperations.FSGetServerDefaults command =
new FSOperations.FSGetServerDefaults();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case CHECKACCESS: {
String mode = params.get(FsActionParam.NAME, FsActionParam.class);
FsActionParam fsparam = new FsActionParam(mode);
FSOperations.FSAccess command = new FSOperations.FSAccess(path,
FsAction.getFsAction(fsparam.value()));
fsExecute(user, command);
AUDIT_LOG.info("[{}]", "/");
response = Response.ok().build();
break;
}
case GETECPOLICY: {
FSOperations.FSGetErasureCodingPolicy command =
new FSOperations.FSGetErasureCodingPolicy(path);
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETECPOLICIES: {
FSOperations.FSGetErasureCodingPolicies command =
new FSOperations.FSGetErasureCodingPolicies();
String js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETECCODECS: {
FSOperations.FSGetErasureCodingCodecs command =
new FSOperations.FSGetErasureCodingCodecs();
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GET_BLOCK_LOCATIONS: {
long offset = 0;
long len = Long.MAX_VALUE;
Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class);
Long lenParam = params.get(LenParam.NAME, LenParam.class);
AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam);
if (offsetParam != null && offsetParam > 0) {
offset = offsetParam;
}
if (lenParam != null && lenParam > 0) {
len = lenParam;
}
FSOperations.FSFileBlockLocationsLegacy command =
new FSOperations.FSFileBlockLocationsLegacy(path, offset, len);
@SuppressWarnings("rawtypes")
Map locations = fsExecute(user, command);
final String json = JsonUtil.toJsonString("LocatedBlocks", locations);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILELINKSTATUS: {
FSOperations.FSFileLinkStatus command =
new FSOperations.FSFileLinkStatus(path);
@SuppressWarnings("rawtypes") Map js = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETSTATUS: {
FSOperations.FSStatus command = new FSOperations.FSStatus(path);
@SuppressWarnings("rawtypes") Map js = fsExecute(user, command);
response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETTRASHROOTS: {
Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class);
FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers);
Map json = fsExecute(user, command);
AUDIT_LOG.info("allUsers [{}]", allUsers);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
default: {
throw new IOException(
MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value()));
}
}
return response;
}
|
@Test
@TestDir
@TestJetty
@TestHdfs
public void testGetFileBlockLocations() throws Exception {
createHttpFSServer(false, false);
// Create a test directory
String pathStr = "/tmp/tmp-get-block-location-test";
createDirWithHttp(pathStr, "700", null);
Path path = new Path(pathStr);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem
.get(path.toUri(), TestHdfsHelper.getHdfsConf());
String file1 = pathStr + "/file1";
createWithHttp(file1, null);
HttpURLConnection conn = sendRequestToHttpFSServer(file1,
"GETFILEBLOCKLOCATIONS", "length=10&offset10");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
BlockLocation[] locations1 = dfs.getFileBlockLocations(new Path(file1), 0, 1);
Assert.assertNotNull(locations1);
Map<?, ?> jsonMap = JsonSerialization.mapReader().readValue(conn.getInputStream());
BlockLocation[] httpfsBlockLocations = JsonUtilClient.toBlockLocationArray(jsonMap);
assertEquals(locations1.length, httpfsBlockLocations.length);
for (int i = 0; i < locations1.length; i++) {
assertEquals(locations1[i].toString(), httpfsBlockLocations[i].toString());
}
conn.getInputStream().close();
}
|
@Override
public Optional<Track<T>> clean(Track<T> track) {
TreeSet<Point<T>> points = new TreeSet<>(track.points());
Iterator<Point<T>> iter = points.iterator();
LatLong anchor = null;
Instant anchorTime = null;
while (iter.hasNext()) {
Point<T> point = iter.next();
//the 1st time through this loop set the anchor information
if (anchor == null) {
anchor = point.latLong();
anchorTime = point.time();
continue;
}
if (tooCloseInSpace(anchor, point) && tooCloseInTime(anchorTime, point.time())) {
iter.remove();
} else {
anchor = point.latLong();
anchorTime = point.time();
}
}
return Optional.of(Track.of(points));
}
|
@Test
public void testTrackWithPause() {
Track<String> track = trackWithPause();
Track<String> cleanedTrack = (new DistanceDownSampler<String>()).clean(track).get();
int numRemovedPoints = track.size() - cleanedTrack.size();
assertEquals(
901, track.size(), "1 seed point + 300 moving + 300 stagnant + 300 moving"
);
assertEquals(
290, numRemovedPoints, "Of the 300 stagnant points only the 10 heartbeat points should remain"
);
}
|
@Override
public ByteBuf writeLong(long value) {
ensureWritable0(8);
_setLong(writerIndex, value);
writerIndex += 8;
return this;
}
|
@Test
public void testWriteLongAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().writeLong(1);
}
});
}
|
@Override
public ResultSet executeQuery(String sql)
throws SQLException {
validateState();
try {
if (!DriverUtils.queryContainsLimitStatement(sql)) {
sql += " " + LIMIT_STATEMENT + " " + _maxRows;
}
String enabledSql = DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions());
ResultSetGroup resultSetGroup = _session.execute(enabledSql);
if (resultSetGroup.getResultSetCount() == 0) {
_resultSet = PinotResultSet.empty();
return _resultSet;
}
_resultSet = new PinotResultSet(resultSetGroup.getResultSet(0));
return _resultSet;
} catch (PinotClientException e) {
throw new SQLException(String.format("Failed to execute query : %s", sql), e);
}
}
|
@Test
public void testSetOptionAsInteger()
throws Exception {
Properties props = new Properties();
props.put(QueryOptionKey.USE_MULTISTAGE_ENGINE, "2");
PinotConnection pinotConnection =
new PinotConnection(props, "dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport);
Statement statement = pinotConnection.createStatement();
Assert.assertNotNull(statement);
statement.executeQuery(BASIC_TEST_QUERY);
String expectedSql =
DriverUtils.createSetQueryOptionString(QueryOptionKey.USE_MULTISTAGE_ENGINE, 2) + BASIC_TEST_QUERY;
Assert.assertEquals(_dummyPinotClientTransport.getLastQuery().substring(0, expectedSql.length()), expectedSql);
}
|
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
String consumerGroup = commandLine.getOptionValue("g").trim();
String topic = commandLine.getOptionValue("t").trim();
String timeStampStr = commandLine.getOptionValue("s").trim();
long timestamp = 0;
try {
timestamp = Long.parseLong(timeStampStr);
} catch (NumberFormatException e) {
Date date = UtilAll.parseDate(timeStampStr, UtilAll.YYYY_MM_DD_HH_MM_SS_SSS);
if (date != null) {
timestamp = UtilAll.parseDate(timeStampStr, UtilAll.YYYY_MM_DD_HH_MM_SS_SSS).getTime();
} else {
System.out.printf("specified timestamp invalid.%n");
return;
}
}
boolean force = true;
if (commandLine.hasOption('f')) {
force = Boolean.parseBoolean(commandLine.getOptionValue("f").trim());
}
defaultMQAdminExt.start();
resetOffset(defaultMQAdminExt, consumerGroup, topic, timestamp, force, timeStampStr);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
|
@Test
public void testExecute() {
ResetOffsetByTimeOldCommand cmd = new ResetOffsetByTimeOldCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-g default-group", "-t unit-test", "-s 1412131213231", "-f false"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
assertThat(commandLine.getOptionValue('g').trim()).isEqualTo("default-group");
assertThat(commandLine.getOptionValue('t').trim()).isEqualTo("unit-test");
assertThat(commandLine.getOptionValue('s').trim()).isEqualTo("1412131213231");
}
|
private CompletableFuture<Boolean> isSuperUser() {
assert ctx.executor().inEventLoop();
if (service.isAuthenticationEnabled() && service.isAuthorizationEnabled()) {
CompletableFuture<Boolean> isAuthRoleAuthorized = service.getAuthorizationService().isSuperUser(
authRole, authenticationData);
if (originalPrincipal != null) {
CompletableFuture<Boolean> isOriginalPrincipalAuthorized = service.getAuthorizationService()
.isSuperUser(originalPrincipal,
originalAuthData != null ? originalAuthData : authenticationData);
return isOriginalPrincipalAuthorized.thenCombine(isAuthRoleAuthorized,
(originalPrincipal, authRole) -> originalPrincipal && authRole);
} else {
return isAuthRoleAuthorized;
}
} else {
return CompletableFuture.completedFuture(true);
}
}
|
@Test(timeOut = 30000)
public void testClusterAccess() throws Exception {
svcConfig.setAuthorizationEnabled(true);
AuthorizationService authorizationService =
spyWithClassAndConstructorArgs(AuthorizationService.class, svcConfig, pulsar.getPulsarResources());
Field providerField = AuthorizationService.class.getDeclaredField("provider");
providerField.setAccessible(true);
PulsarAuthorizationProvider authorizationProvider =
spyWithClassAndConstructorArgs(PulsarAuthorizationProvider.class, svcConfig,
pulsar.getPulsarResources());
providerField.set(authorizationService, authorizationProvider);
doReturn(authorizationService).when(brokerService).getAuthorizationService();
svcConfig.setAuthorizationEnabled(true);
doReturn(CompletableFuture.completedFuture(false)).when(authorizationProvider)
.isSuperUser(Mockito.anyString(), Mockito.any(), Mockito.any());
doReturn(CompletableFuture.completedFuture(false)).when(authorizationProvider)
.validateTenantAdminAccess(Mockito.anyString(), Mockito.any(), Mockito.any());
doReturn(CompletableFuture.completedFuture(true)).when(authorizationProvider)
.checkPermission(any(TopicName.class), Mockito.anyString(),
any(AuthAction.class));
resetChannel();
setChannelConnected();
ByteBuf clientCommand = Commands.newProducer(successTopicName, 1 /* producer id */, 1 /* request id */,
"prod-name", Collections.emptyMap(), false);
channel.writeInbound(clientCommand);
assertTrue(getResponse() instanceof CommandProducerSuccess);
resetChannel();
setChannelConnected();
clientCommand = Commands.newProducer(topicWithNonLocalCluster, 1 /* producer id */, 1 /* request id */,
"prod-name", Collections.emptyMap(), false);
channel.writeInbound(clientCommand);
assertTrue(getResponse() instanceof CommandError);
channel.finish();
}
|
public RouteInfoManager getRouteInfoManager() {
return routeInfoManager;
}
|
@Test
public void getRouteInfoManager() {
RouteInfoManager manager = namesrvController.getRouteInfoManager();
Assert.assertNotNull(manager);
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof AlluxioURI)) {
return false;
}
AlluxioURI that = (AlluxioURI) o;
return mUri.equals(that.mUri);
}
|
@Test
public void equalsTests() {
assertFalse(new AlluxioURI("alluxio://127.0.0.1:8080/a/b/c.txt").equals(new AlluxioURI(
"alluxio://localhost:8080/a/b/c.txt")));
AlluxioURI[] uriFromDifferentConstructor =
new AlluxioURI[] {new AlluxioURI("alluxio://127.0.0.1:8080/a/b/c.txt"),
new AlluxioURI("alluxio", Authority.fromString("127.0.0.1:8080"), "/a/b/c.txt"),
new AlluxioURI(
new AlluxioURI("alluxio://127.0.0.1:8080/a"), new AlluxioURI("b/c.txt"))};
for (int i = 0; i < uriFromDifferentConstructor.length - 1; i++) {
assertTrue(uriFromDifferentConstructor[i].equals(uriFromDifferentConstructor[i + 1]));
}
}
|
@Override
public void open() {
super.open();
for (String propertyKey : properties.stringPropertyNames()) {
LOGGER.debug("propertyKey: {}", propertyKey);
String[] keyValue = propertyKey.split("\\.", 2);
if (2 == keyValue.length) {
LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]);
Properties prefixProperties;
if (basePropertiesMap.containsKey(keyValue[0])) {
prefixProperties = basePropertiesMap.get(keyValue[0]);
} else {
prefixProperties = new Properties();
basePropertiesMap.put(keyValue[0].trim(), prefixProperties);
}
prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey));
}
}
Set<String> removeKeySet = new HashSet<>();
for (String key : basePropertiesMap.keySet()) {
if (!COMMON_KEY.equals(key)) {
Properties properties = basePropertiesMap.get(key);
if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) {
LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.",
key, DRIVER_KEY, key, key, URL_KEY);
removeKeySet.add(key);
}
}
}
for (String key : removeKeySet) {
basePropertiesMap.remove(key);
}
LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap);
setMaxLineResults();
setMaxRows();
//TODO(zjffdu) Set different sql splitter for different sql dialects.
this.sqlSplitter = new SqlSplitter();
}
|
@Test
void testStatementPrecode() throws IOException, InterpreterException {
Properties properties = new Properties();
properties.setProperty("default.driver", "org.h2.Driver");
properties.setProperty("default.url", getJdbcConnection());
properties.setProperty("default.user", "");
properties.setProperty("default.password", "");
properties.setProperty(DEFAULT_STATEMENT_PRECODE, "set @v='statement'");
JDBCInterpreter jdbcInterpreter = new JDBCInterpreter(properties);
jdbcInterpreter.open();
String sqlQuery = "select @v";
InterpreterResult interpreterResult = jdbcInterpreter.interpret(sqlQuery, context);
assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code());
List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage();
assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType());
assertEquals("@V\nstatement\n", resultMessages.get(0).getData());
}
|
public void compensateSubscribeData(String group, String topic, SubscriptionData subscriptionData) {
ConsumerGroupInfo consumerGroupInfo = consumerCompensationTable.computeIfAbsent(group, ConsumerGroupInfo::new);
consumerGroupInfo.getSubscriptionTable().put(topic, subscriptionData);
}
|
@Test
public void compensateSubscribeDataTest() {
ConsumerGroupInfo consumerGroupInfo = consumerManager.getConsumerGroupInfo(GROUP, true);
Assertions.assertThat(consumerGroupInfo).isNull();
consumerManager.compensateSubscribeData(GROUP, TOPIC, new SubscriptionData(TOPIC, SubscriptionData.SUB_ALL));
consumerGroupInfo = consumerManager.getConsumerGroupInfo(GROUP, true);
Assertions.assertThat(consumerGroupInfo).isNotNull();
Assertions.assertThat(consumerGroupInfo.getSubscriptionTable().size()).isEqualTo(1);
SubscriptionData subscriptionData = consumerGroupInfo.getSubscriptionTable().get(TOPIC);
Assertions.assertThat(subscriptionData).isNotNull();
Assertions.assertThat(subscriptionData.getTopic()).isEqualTo(TOPIC);
Assertions.assertThat(subscriptionData.getSubString()).isEqualTo(SubscriptionData.SUB_ALL);
}
|
@Override
public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) {
selectedBundlesCache.clear();
final double overloadThreshold = conf.getLoadBalancerBrokerOverloadedThresholdPercentage() / 100.0;
final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
// Check every broker and select
loadData.getBrokerData().forEach((broker, brokerData) -> {
final LocalBrokerData localData = brokerData.getLocalData();
final double currentUsage = localData.getMaxResourceUsageWithWeight(
conf.getLoadBalancerCPUResourceWeight(),
conf.getLoadBalancerDirectMemoryResourceWeight(),
conf.getLoadBalancerBandwidthInResourceWeight(),
conf.getLoadBalancerBandwidthOutResourceWeight());
if (currentUsage < overloadThreshold) {
if (log.isDebugEnabled()) {
log.debug("[{}] Broker is not overloaded, ignoring at this point ({})", broker,
localData.printResourceUsage());
}
return;
}
// We want to offload enough traffic such that this broker will go below the overload threshold
// Also, add a small margin so that this broker won't be very close to the threshold edge.
double percentOfTrafficToOffload = currentUsage - overloadThreshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN;
double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut();
double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload;
log.info(
"Attempting to shed load on {}, which has resource usage {}% above threshold {}%"
+ " -- Offloading at least {} MByte/s of traffic ({})",
broker, 100 * currentUsage, 100 * overloadThreshold, minimumThroughputToOffload / 1024 / 1024,
localData.printResourceUsage());
MutableDouble trafficMarkedToOffload = new MutableDouble(0);
MutableBoolean atLeastOneBundleSelected = new MutableBoolean(false);
if (localData.getBundles().size() > 1) {
// Sort bundles by throughput, then pick the biggest N which combined
// make up for at least the minimum throughput to offload
loadData.getBundleDataForLoadShedding().entrySet().stream()
.filter(e -> localData.getBundles().contains(e.getKey()))
.map((e) -> {
// Map to throughput value
// Consider short-term byte rate to address system resource burden
String bundle = e.getKey();
BundleData bundleData = e.getValue();
TimeAverageMessageData shortTermData = bundleData.getShortTermData();
double throughput = shortTermData.getMsgThroughputIn() + shortTermData
.getMsgThroughputOut();
return Pair.of(bundle, throughput);
}).filter(e -> {
// Only consider bundles that were not already unloaded recently
return !recentlyUnloadedBundles.containsKey(e.getLeft());
}).sorted((e1, e2) -> {
// Sort by throughput in reverse order
return Double.compare(e2.getRight(), e1.getRight());
}).forEach(e -> {
if (trafficMarkedToOffload.doubleValue() < minimumThroughputToOffload
|| atLeastOneBundleSelected.isFalse()) {
selectedBundlesCache.put(broker, e.getLeft());
trafficMarkedToOffload.add(e.getRight());
atLeastOneBundleSelected.setTrue();
}
});
} else if (localData.getBundles().size() == 1) {
log.warn(
"HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. "
+ "No Load Shedding will be done on this broker",
localData.getBundles().iterator().next(), broker);
} else {
log.warn("Broker {} is overloaded despite having no bundles", broker);
}
});
return selectedBundlesCache;
}
|
@Test
public void testBrokerWithSingleBundle() {
LoadData loadData = new LoadData();
LocalBrokerData broker1 = new LocalBrokerData();
broker1.setBandwidthIn(new ResourceUsage(999, 1000));
broker1.setBandwidthOut(new ResourceUsage(999, 1000));
broker1.setBundles(Sets.newHashSet("bundle-1"));
BundleData bundle1 = new BundleData();
TimeAverageMessageData db1 = new TimeAverageMessageData();
db1.setMsgThroughputIn(1000);
db1.setMsgThroughputOut(1000);
bundle1.setShortTermData(db1);
loadData.getBundleData().put("bundle-1", bundle1);
loadData.getBrokerData().put("broker-1", new BrokerData(broker1));
assertTrue(os.findBundlesForUnloading(loadData, conf).isEmpty());
}
|
@Override
public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) {
table.refresh();
if (lastPosition != null) {
return discoverIncrementalSplits(lastPosition);
} else {
return discoverInitialSplits();
}
}
|
@Test
public void testIncrementalFromSnapshotId() throws Exception {
appendTwoSnapshots();
ScanContext scanContext =
ScanContext.builder()
.startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_ID)
.startSnapshotId(snapshot2.snapshotId())
.build();
ContinuousSplitPlannerImpl splitPlanner =
new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null);
ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null);
assertThat(initialResult.fromPosition()).isNull();
// For inclusive behavior of snapshot2, the initial result should point to snapshot1 (as
// snapshot2's parent)
assertThat(initialResult.toPosition().snapshotId().longValue())
.isEqualTo(snapshot1.snapshotId());
assertThat(initialResult.toPosition().snapshotTimestampMs().longValue())
.isEqualTo(snapshot1.timestampMillis());
assertThat(initialResult.splits()).isEmpty();
ContinuousEnumerationResult secondResult = splitPlanner.planSplits(initialResult.toPosition());
assertThat(secondResult.fromPosition().snapshotId().longValue())
.isEqualTo(snapshot1.snapshotId());
assertThat(secondResult.fromPosition().snapshotTimestampMs().longValue())
.isEqualTo(snapshot1.timestampMillis());
assertThat(secondResult.toPosition().snapshotId().longValue())
.isEqualTo(snapshot2.snapshotId());
assertThat(secondResult.toPosition().snapshotTimestampMs().longValue())
.isEqualTo(snapshot2.timestampMillis());
IcebergSourceSplit split = Iterables.getOnlyElement(secondResult.splits());
assertThat(split.task().files()).hasSize(1);
Set<String> discoveredFiles =
split.task().files().stream()
.map(fileScanTask -> fileScanTask.file().path().toString())
.collect(Collectors.toSet());
// should discover dataFile2 appended in snapshot2
Set<String> expectedFiles = ImmutableSet.of(dataFile2.path().toString());
assertThat(discoveredFiles).containsExactlyElementsOf(expectedFiles);
IcebergEnumeratorPosition lastPosition = secondResult.toPosition();
for (int i = 0; i < 3; ++i) {
lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition;
}
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
if (tradingRecord != null && !tradingRecord.isClosed()) {
Num entryPrice = tradingRecord.getCurrentPosition().getEntry().getNetPrice();
Num currentPrice = this.referencePrice.getValue(index);
Num threshold = this.stopLossThreshold.getValue(index);
int barsSinceEntry = index - tradingRecord.getCurrentPosition().getEntry().getIndex() + 1;
if (tradingRecord.getCurrentPosition().getEntry().isBuy()) {
HighestValueIndicator highestPrice = new HighestValueIndicator(this.referencePrice, barsSinceEntry);
Num thresholdPrice = entryPrice.max(highestPrice.getValue(index)).minus(threshold);
return currentPrice.isLessThan(thresholdPrice);
} else {
LowestValueIndicator lowestPrice = new LowestValueIndicator(this.referencePrice, barsSinceEntry);
Num thresholdPrice = entryPrice.min(lowestPrice.getValue(index)).plus(threshold);
return currentPrice.isGreaterThan(thresholdPrice);
}
}
return false;
}
|
@Test
public void testEdgeCaseNoTrade() {
AverageTrueRangeTrailingStopLossRule rule = new AverageTrueRangeTrailingStopLossRule(series, 3, 1.0);
TradingRecord tradingRecord = new BaseTradingRecord();
// No trade, so the rule should never be satisfied
assertFalse(rule.isSatisfied(0, tradingRecord));
assertFalse(rule.isSatisfied(1, tradingRecord));
assertFalse(rule.isSatisfied(2, tradingRecord));
}
|
public static Select select(String fieldName) { return new Select(fieldName);
}
|
@Test
void float_numeric_operations() {
String q = Q.select("*")
.from("sd1")
.where("f1").le(1.1)
.and("f2").lt(2.2)
.and("f3").ge(3.3)
.and("f4").gt(4.4)
.and("f5").eq(5.5)
.and("f6").inRange(6.6, 7.7)
.build();
assertEquals(q, "yql=select * from sd1 where f1 <= 1.1 and f2 < 2.2 and f3 >= 3.3 and f4 > 4.4 and f5 = 5.5 and range(f6, 6.6, 7.7)");
}
|
@GetMapping
@PreAuthorize("hasAnyAuthority('ADMIN', 'USER')")
public CustomResponse<CustomPagingResponse<ProductResponse>> getProducts(
@RequestBody @Valid final ProductPagingRequest productPagingRequest) {
final CustomPage<Product> productPage = productReadService.getProducts(productPagingRequest);
final CustomPagingResponse<ProductResponse> productPagingResponse =
customPageToCustomPagingResponseMapper.toPagingResponse(productPage);
return CustomResponse.successOf(productPagingResponse);
}
|
@Test
void givenProductPagingRequest_whenGetProductsFromAdmin_thenReturnCustomPageProduct() throws Exception {
// Given
ProductPagingRequest pagingRequest = ProductPagingRequest.builder()
.pagination(
CustomPaging.builder()
.pageSize(1)
.pageNumber(1)
.build()
).build();
String productId = UUID.randomUUID().toString();
ProductEntity expected = ProductEntity.builder()
.id(productId)
.name("Test Product")
.unitPrice(BigDecimal.valueOf(12))
.amount(BigDecimal.valueOf(5))
.build();
List<ProductEntity> productEntities = new ArrayList<>();
productEntities.addAll(Collections.singletonList(expected));
Page<ProductEntity> productEntityPage = new PageImpl<>(productEntities, PageRequest.of(1, 1), productEntities.size());
List<Product> productDomainModels = productEntities.stream()
.map(entity -> new Product(entity.getId(), entity.getName(), entity.getAmount(),entity.getUnitPrice()))
.collect(Collectors.toList());
CustomPage<Product> productPage = CustomPage.of(productDomainModels, productEntityPage);
// When
when(productReadService.getProducts(any(ProductPagingRequest.class))).thenReturn(productPage);
// Then
mockMvc.perform(MockMvcRequestBuilders.get("/api/v1/products")
.contentType(MediaType.APPLICATION_JSON)
.content(objectMapper.writeValueAsString(pagingRequest))
.header(HttpHeaders.AUTHORIZATION, "Bearer " + mockAdminToken.getAccessToken()))
.andDo(MockMvcResultHandlers.print())
.andExpect(MockMvcResultMatchers.status().isOk())
.andExpect(MockMvcResultMatchers.jsonPath("$.httpStatus").value("OK"))
.andExpect(MockMvcResultMatchers.jsonPath("$.isSuccess").value(true))
.andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].id").value(expected.getId()))
.andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].name").value(expected.getName()))
.andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].amount").value(expected.getAmount()))
.andExpect(MockMvcResultMatchers.jsonPath("$.response.content[0].unitPrice").value(expected.getUnitPrice()));
// Verify
verify(productReadService, times(1)).getProducts(any(ProductPagingRequest.class));
}
|
@JsonIgnore
public LongParamDefinition getCompletedByTsParam() {
if (completedByTs != null) {
return ParamDefinition.buildParamDefinition(PARAM_NAME, completedByTs);
}
if (completedByHour != null) {
String timeZone = tz == null ? "WORKFLOW_CRON_TIMEZONE" : String.format("'%s'", tz);
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(COMPLETED_HOUR_TCT_TS, timeZone, completedByHour))
.build();
}
if (durationMinutes != null) {
return LongParamDefinition.builder()
.name(PARAM_NAME)
.expression(String.format(DURATION_MINUTES_TCT_TS, durationMinutes))
.build();
}
throw new MaestroInternalError(
"Invalid TCT definition, neither of time fields is set: %s", this);
}
|
@Test
public void testGetCompletedByTsParamWithCompletedByHourWithoutTz() {
Tct tct = new Tct();
tct.setCompletedByHour(1);
LongParamDefinition expected =
LongParamDefinition.builder()
.name("completed_by_ts")
.expression(
"tz_dateint_formatter = DateTimeFormat.forPattern('yyyyMMdd').withZone(DateTimeZone.forID(WORKFLOW_CRON_TIMEZONE));"
+ "dt = tz_dateint_formatter.parseDateTime(TARGET_RUN_DATE).plusHours(1).minusSeconds(1);"
+ "return dt.getMillis();")
.build();
LongParamDefinition actual = tct.getCompletedByTsParam();
assertEquals(expected, actual);
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
try {
final AttributedList<Path> objects = new AttributedList<>();
Marker marker = new Marker(null, null);
final String containerId = fileid.getVersionId(containerService.getContainer(directory));
// Seen placeholders
final Map<String, Long> revisions = new HashMap<>();
boolean hasDirectoryPlaceholder = containerService.isContainer(directory);
do {
if(log.isDebugEnabled()) {
log.debug(String.format("List directory %s with marker %s", directory, marker));
}
final B2ListFilesResponse response;
if(versioning.isEnabled()) {
// In alphabetical order by file name, and by reverse of date/time uploaded for
// versions of files with the same name.
response = session.getClient().listFileVersions(containerId,
marker.nextFilename, marker.nextFileId, chunksize,
this.createPrefix(directory),
String.valueOf(Path.DELIMITER));
}
else {
response = session.getClient().listFileNames(containerId,
marker.nextFilename, chunksize,
this.createPrefix(directory),
String.valueOf(Path.DELIMITER));
}
marker = this.parse(directory, objects, response, revisions);
if(null == marker.nextFileId) {
if(!response.getFiles().isEmpty()) {
hasDirectoryPlaceholder = true;
}
}
listener.chunk(directory, objects);
}
while(marker.hasNext());
if(!hasDirectoryPlaceholder && objects.isEmpty()) {
if(log.isWarnEnabled()) {
log.warn(String.format("No placeholder found for directory %s", directory));
}
throw new NotfoundException(directory.getAbsolute());
}
return objects;
}
catch(B2ApiException e) {
throw new B2ExceptionMappingService(fileid).map("Listing directory {0} failed", e, directory);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
|
@Test
public void testListEmptyFolder() throws Exception {
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path folder = new B2DirectoryFeature(session, fileid).mkdir(new Path(bucket, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final AtomicBoolean callback = new AtomicBoolean();
assertTrue(new B2ObjectListService(session, fileid).list(folder, new DisabledListProgressListener() {
@Override
public void chunk(final Path parent, final AttributedList<Path> list) {
assertNotSame(AttributedList.EMPTY, list);
callback.set(true);
}
}).isEmpty());
assertTrue(callback.get());
new B2DeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public void delete(final String key) {
try {
if (isExisted(key)) {
client.delete().deletingChildrenIfNeeded().forPath(key);
}
// CHECKSTYLE:OFF
} catch (final Exception ex) {
// CHECKSTYLE:ON
ZookeeperExceptionHandler.handleException(ex);
}
}
|
@Test
void assertDeleteExistKey() throws Exception {
when(existsBuilder.forPath("/test/children/1")).thenReturn(new Stat());
when(deleteBuilder.deletingChildrenIfNeeded()).thenReturn(backgroundVersionable);
REPOSITORY.delete("/test/children/1");
verify(backgroundVersionable).forPath("/test/children/1");
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatOuterJoinWithoutJoinWindow() {
final Join join = new Join(leftAlias, ImmutableList.of(new JoinedSource(
Optional.empty(),
rightAlias,
JoinedSource.Type.OUTER,
criteria,
Optional.empty())));
final String expected = "`left` L\nFULL OUTER JOIN `right` R ON (('left.col0' = 'right.col0'))";
assertEquals(expected, SqlFormatter.formatSql(join));
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetGenericFunctionVariadic() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("genericFunctionType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getVarArgsSchemaFromType(genericType);
// Then:
assertThat(returnType, is(LambdaType.of(ImmutableList.of(GenericType.of("T")), GenericType.of("U"))));
}
|
@Override
public boolean compareAndSet(long expectedValue, long updateValue) {
return complete(asyncCounter.compareAndSet(expectedValue, updateValue));
}
|
@Test
public void testCompareAndSet() {
boolean compareTrue = atomicCounter.compareAndSet(INITIAL_VALUE, ADDED_VALUE);
assertThat(compareTrue, is(true));
boolean compareFalse = atomicCounter.compareAndSet(INITIAL_VALUE, ADDED_VALUE);
assertThat(compareFalse, is(false));
}
|
public int toBeTransformedMethod(int input) {
return input;
}
|
@Test
public void test_method1() {
final ToBeTransformedClass instance = new ToBeTransformedClass();
System.out.println("========================================");
if (TtlAgent.isTtlAgentLoaded()) {
System.out.println("Test **WITH** TTL Agent");
assertEquals(42, instance.toBeTransformedMethod(21));
} else {
System.out.println("Test WITHOUT TTL Agent");
assertEquals(21, instance.toBeTransformedMethod(21));
}
System.out.println("========================================");
}
|
public ResourceGroupId expandTemplate(VariableMap context)
{
ResourceGroupId id = null;
for (ResourceGroupNameTemplate segment : segments) {
String expanded = segment.expandTemplate(context);
if (id == null) {
id = new ResourceGroupId(expanded);
}
else {
id = new ResourceGroupId(id, expanded);
}
}
return id;
}
|
@Test
public void testExpansion()
{
ResourceGroupIdTemplate template = new ResourceGroupIdTemplate("test.${SCHEMA}.${USER}.${SOURCE}");
ResourceGroupId expected = new ResourceGroupId(new ResourceGroupId(new ResourceGroupId(new ResourceGroupId("test"), "schema"), "u"), "s");
assertEquals(template.expandTemplate(new VariableMap(ImmutableMap.of("SCHEMA", "schema", "USER", "u", "SOURCE", "s"))), expected);
template = new ResourceGroupIdTemplate("test.${USER}");
assertEquals(template.expandTemplate(new VariableMap(ImmutableMap.of("USER", "alice.smith", "SOURCE", "s"))), new ResourceGroupId(new ResourceGroupId("test"), "alice.smith"));
}
|
public static String getProtocolVersion(final String databaseName, final DatabaseType databaseType) {
return null == databaseName ? getDefaultProtocolVersion(databaseType) : SERVER_INFORMATION_MAP.getOrDefault(databaseName, getDefaultProtocolVersion(databaseType));
}
|
@Test
void assertGetServerVersionWithoutDatabase() {
CommonConstants.PROXY_VERSION.set("5.0.0");
assertThat(DatabaseProtocolServerInfo.getProtocolVersion(null, TypedSPILoader.getService(DatabaseType.class, "FIXTURE")), is("1.0-ShardingSphere-Proxy 5.0.0"));
}
|
@Nullable
public static URI getUriWithPort(@Nullable final URI uri, final int port) {
if (uri == null) {
return null;
}
try {
if (uri.getPort() == -1) {
final int realPort;
switch (uri.getScheme()) {
case "http":
realPort = 80;
break;
case "https":
realPort = 443;
break;
default:
realPort = port;
}
return new URI(
uri.getScheme(),
uri.getUserInfo(),
uri.getHost(),
realPort,
uri.getPath(),
uri.getQuery(),
uri.getFragment());
}
return uri;
} catch (URISyntaxException e) {
throw new RuntimeException("Could not parse URI.", e);
}
}
|
@Test
public void testGetUriWithPort() throws Exception {
final URI uriWithPort = new URI("http://example.com:12345");
final URI httpUriWithoutPort = new URI("http://example.com");
final URI httpsUriWithoutPort = new URI("https://example.com");
final URI uriWithUnknownSchemeAndWithoutPort = new URI("foobar://example.com");
assertEquals(12345, Tools.getUriWithPort(uriWithPort, 1).getPort());
assertEquals(80, Tools.getUriWithPort(httpUriWithoutPort, 1).getPort());
assertEquals(443, Tools.getUriWithPort(httpsUriWithoutPort, 1).getPort());
assertEquals(1, Tools.getUriWithPort(uriWithUnknownSchemeAndWithoutPort, 1).getPort());
}
|
@Override
public synchronized void stateChanged(CuratorFramework client, ConnectionState newState) {
if (circuitBreaker.isOpen()) {
handleOpenStateChange(newState);
} else {
handleClosedStateChange(newState);
}
}
|
@Test
public void testBasic() throws Exception {
RecordingListener recordingListener = new RecordingListener();
TestRetryPolicy retryPolicy = new TestRetryPolicy();
CircuitBreakingConnectionStateListener listener =
new CircuitBreakingConnectionStateListener(dummyClient, recordingListener, retryPolicy, service);
listener.stateChanged(dummyClient, ConnectionState.RECONNECTED);
assertEquals(timing.takeFromQueue(recordingListener.stateChanges), ConnectionState.RECONNECTED);
listener.stateChanged(dummyClient, ConnectionState.SUSPENDED);
assertEquals(timing.takeFromQueue(recordingListener.stateChanges), ConnectionState.SUSPENDED);
listener.stateChanged(dummyClient, ConnectionState.SUSPENDED); // 2nd suspended is ignored
assertTrue(recordingListener.stateChanges.isEmpty());
listener.stateChanged(dummyClient, ConnectionState.LOST);
assertEquals(timing.takeFromQueue(recordingListener.stateChanges), ConnectionState.LOST);
synchronized (listener) // don't let retry policy run while we're pushing state changes
{
listener.stateChanged(dummyClient, ConnectionState.READ_ONLY); // all further events are ignored
listener.stateChanged(dummyClient, ConnectionState.RECONNECTED); // all further events are ignored
listener.stateChanged(dummyClient, ConnectionState.SUSPENDED); // all further events are ignored
listener.stateChanged(dummyClient, ConnectionState.LOST); // all further events are ignored
listener.stateChanged(
dummyClient,
ConnectionState.SUSPENDED); // all further events are ignored - this will be the last event
}
retryTiming.multiple(2).sleep();
assertTrue(recordingListener.stateChanges.isEmpty());
retryPolicy.isRetrying = false; // retry policy will return false
assertEquals(timing.takeFromQueue(recordingListener.stateChanges), ConnectionState.SUSPENDED);
}
|
@Override
public Object[] toArray() {
Set<V> res = get(readAllAsync());
return res.toArray();
}
|
@Test
public void testToArray() throws InterruptedException {
RSetCache<String> set = redisson.getSetCache("set");
set.add("1");
set.add("4");
set.add("2", 1, TimeUnit.SECONDS);
set.add("5");
set.add("3");
Thread.sleep(1500);
assertThat(set.toArray()).containsOnly("1", "4", "5", "3");
String[] strs = set.toArray(new String[0]);
assertThat(strs).containsOnly("1", "4", "5", "3");
set.destroy();
}
|
@Operation(summary = "start new activation session with username/password", tags = { SwaggerConfig.ACTIVATE_WEBSITE, SwaggerConfig.ACTIVATE_SMS }, operationId = "sms",
parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")})
@PostMapping(value = "sms", produces = "application/json")
@ResponseBody
public AppResponse sendSms(@Valid @RequestBody AppSessionRequest request) throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException {
return service.processAction(ActivationFlowFactory.TYPE, Action.SEND_SMS, request);
}
|
@Test
void validateIfCorrectProcessesAreCalledSendSms() throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException {
ResendSmsRequest request = new ResendSmsRequest();
activationController.sendSms(request);
verify(flowService, times(1)).processAction(anyString(), any(Action.class), any(AppSessionRequest.class));
}
|
@Override
public RedisClusterNode clusterGetNodeForKey(byte[] key) {
int slot = executorService.getConnectionManager().calcSlot(key);
return clusterGetNodeForSlot(slot);
}
|
@Test
public void testClusterGetNodeForKey() {
RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes());
assertThat(node).isNotNull();
}
|
static int assignActiveTaskMovements(final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients,
final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag,
final Map<ProcessId, ClientState> clientStates,
final Map<ProcessId, Set<TaskId>> warmups,
final AtomicInteger remainingWarmupReplicas) {
final BiFunction<ProcessId, TaskId, Boolean> caughtUpPredicate =
(client, task) -> taskIsCaughtUpOnClient(task, client, tasksToCaughtUpClients);
final ConstrainedPrioritySet caughtUpClientsByTaskLoad = new ConstrainedPrioritySet(
caughtUpPredicate,
client -> clientStates.get(client).assignedTaskLoad()
);
final Queue<TaskMovement> taskMovements = new PriorityQueue<>(
Comparator.comparing(TaskMovement::numCaughtUpClients).thenComparing(TaskMovement::task)
);
for (final Map.Entry<ProcessId, ClientState> clientStateEntry : clientStates.entrySet()) {
final ProcessId client = clientStateEntry.getKey();
final ClientState state = clientStateEntry.getValue();
for (final TaskId task : state.activeTasks()) {
// if the desired client is not caught up, and there is another client that _is_ more caught up, then
// we schedule a movement, so we can move the active task to a more caught-up client. We'll try to
// assign a warm-up to the desired client so that we can move it later on.
if (taskIsNotCaughtUpOnClientAndOtherMoreCaughtUpClientsExist(task, client, clientStates, tasksToCaughtUpClients, tasksToClientByLag)) {
taskMovements.add(new TaskMovement(task, client, tasksToCaughtUpClients.get(task)));
}
}
caughtUpClientsByTaskLoad.offer(client);
}
final int movementsNeeded = taskMovements.size();
while (!taskMovements.isEmpty()) {
final TaskMovement movement = taskMovements.poll();
// Attempt to find a caught up standby, otherwise find any caught up client, failing that use the most
// caught up client.
final boolean moved = tryToSwapStandbyAndActiveOnCaughtUpClient(clientStates, caughtUpClientsByTaskLoad, movement) ||
tryToMoveActiveToCaughtUpClientAndTryToWarmUp(clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement) ||
tryToMoveActiveToMostCaughtUpClient(tasksToClientByLag, clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement);
if (!moved) {
throw new IllegalStateException("Tried to move task to more caught-up client as scheduled before but none exist");
}
}
return movementsNeeded;
}
|
@Test
public void shouldAssignTasksToClientsAndReturnFalseWhenAllClientsCaughtUp() {
final int maxWarmupReplicas = Integer.MAX_VALUE;
final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2);
final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients = new HashMap<>();
final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag = new HashMap<>();
for (final TaskId task : allTasks) {
tasksToCaughtUpClients.put(task, mkSortedSet(PID_1, PID_2, PID_3));
tasksToClientByLag.put(task, mkOrderedSet(PID_1, PID_2, PID_3));
}
final ClientState client1 = getClientStateWithActiveAssignment(mkSet(TASK_0_0, TASK_1_0), allTasks, allTasks);
final ClientState client2 = getClientStateWithActiveAssignment(mkSet(TASK_0_1, TASK_1_1), allTasks, allTasks);
final ClientState client3 = getClientStateWithActiveAssignment(mkSet(TASK_0_2, TASK_1_2), allTasks, allTasks);
assertThat(
assignActiveTaskMovements(
tasksToCaughtUpClients,
tasksToClientByLag,
getClientStatesMap(client1, client2, client3),
new TreeMap<>(),
new AtomicInteger(maxWarmupReplicas)
),
is(0)
);
}
|
private boolean membersReachableOnlyViaPublicAddress(Collection<Member> members) {
List<Member> shuffledList = new ArrayList<>(members);
Collections.shuffle(shuffledList);
Iterator<Member> iter = shuffledList.iterator();
for (int i = 0; i < REACHABLE_CHECK_NUMBER; i++) {
if (!iter.hasNext()) {
iter = shuffledList.iterator();
}
Member member = iter.next();
Address publicAddress = member.getAddressMap().get(CLIENT_PUBLIC_ENDPOINT_QUALIFIER);
Address internalAddress = member.getAddress();
if (publicAddress == null) {
logger.fine("The public address is not available on the member. The client will use internal addresses");
return false;
}
if (isReachable(internalAddress, REACHABLE_ADDRESS_TIMEOUT_MILLIS)) {
logger.fine("The internal address is reachable. The client will use the internal addresses");
return false;
}
if (!isReachable(publicAddress, NON_REACHABLE_ADDRESS_TIMEOUT_MILLIS)) {
logger.fine("Public address %s is not reachable. The client will use internal addresses", publicAddress);
return false;
}
}
logger.fine("Members are accessible via only public addresses. The client will use public addresses");
return true;
}
|
@Test
public void membersReachableOnlyViaPublicAddress() {
// given
Hazelcast.newHazelcastInstance();
TranslateToPublicAddressProvider translateProvider = createTranslateProvider();
// when
translateProvider.init(new InitialMembershipEvent(mock(Cluster.class),
new HashSet<>(Collections.singletonList(member(UNREACHABLE_HOST, REACHABLE_HOST)))));
boolean result = translateProvider.getAsBoolean();
// then
assertTrue(result);
}
|
public static void addBars(BarSeries barSeries, List<Bar> newBars) {
if (newBars != null && !newBars.isEmpty()) {
sortBars(newBars);
for (Bar bar : newBars) {
if (barSeries.isEmpty() || bar.getEndTime().isAfter(barSeries.getLastBar().getEndTime())) {
barSeries.addBar(bar);
}
}
}
}
|
@Test
public void addBars() {
BarSeries barSeries = new BaseBarSeries("1day", numFunction.apply(0));
List<Bar> bars = new ArrayList<>();
time = ZonedDateTime.of(2019, 6, 1, 1, 1, 0, 0, ZoneId.systemDefault());
final Bar bar0 = new MockBar(time, 1d, 2d, 3d, 4d, 5d, 0d, 7, numFunction);
final Bar bar1 = new MockBar(time.plusDays(1), 1d, 1d, 1d, 1d, 1d, 1d, 1, numFunction);
final Bar bar2 = new MockBar(time.plusDays(2), 1d, 1d, 1d, 1d, 1d, 1d, 1, numFunction);
bars.add(bar2);
bars.add(bar0);
bars.add(bar1);
// add 3 bars to empty barSeries
BarSeriesUtils.addBars(barSeries, bars);
assertEquals(bar0.getEndTime(), barSeries.getFirstBar().getEndTime());
assertEquals(bar2.getEndTime(), barSeries.getLastBar().getEndTime());
final Bar bar3 = new MockBar(time.plusDays(3), 1d, 1d, 1d, 1d, 1d, 1d, 1, numFunction);
bars.add(bar3);
// add 1 bar to non empty barSeries
BarSeriesUtils.addBars(barSeries, bars);
assertEquals(bar3.getEndTime(), barSeries.getLastBar().getEndTime());
}
|
public static String getExactlyValue(final String value) {
return null == value ? null : tryGetRealContentInBackticks(CharMatcher.anyOf(EXCLUDED_CHARACTERS).removeFrom(value));
}
|
@Test
void assertGetExactlyValue() {
assertThat(SQLUtils.getExactlyValue("`xxx`"), is("xxx"));
assertThat(SQLUtils.getExactlyValue("[xxx]"), is("xxx"));
assertThat(SQLUtils.getExactlyValue("\"xxx\""), is("xxx"));
assertThat(SQLUtils.getExactlyValue("'xxx'"), is("xxx"));
assertThat(SQLUtils.getExactlyValue("`[xxx`"), is("xxx"));
assertThat(SQLUtils.getExactlyValue("```[xxx```"), is("`xxx`"));
}
|
@Override
public ObjectNode encode(FlowEntry flowEntry, CodecContext context) {
checkNotNull(flowEntry, "Flow entry cannot be null");
ObjectNode result = context.mapper().createObjectNode()
.put(GROUP_ID, flowEntry.groupId().id())
.put(STATE, flowEntry.state().toString())
//FIXME life is destroying precision (seconds granularity is default)
.put(LIFE, flowEntry.life())
.put(LIVE_TYPE, flowEntry.liveType().toString())
.put(LAST_SEEN, flowEntry.lastSeen())
.put(PACKETS, flowEntry.packets())
.put(BYTES, flowEntry.bytes())
// encode FlowRule-specific fields using the FlowRule codec
.setAll(context.codec(FlowRule.class).encode((FlowRule) flowEntry, context));
if (flowEntry.treatment() != null) {
final JsonCodec<TrafficTreatment> treatmentCodec =
context.codec(TrafficTreatment.class);
result.set(TREATMENT, treatmentCodec.encode(flowEntry.treatment(), context));
}
if (flowEntry.selector() != null) {
final JsonCodec<TrafficSelector> selectorCodec =
context.codec(TrafficSelector.class);
result.set(SELECTOR, selectorCodec.encode(flowEntry.selector(), context));
}
return result;
}
|
@Test
public void testEncode() throws IOException {
InputStream jsonStream = FlowEntryCodec.class.getResourceAsStream(JSON_FILE);
ObjectNode jsonString = (ObjectNode) context.mapper().readTree(jsonStream);
ObjectNode expected = flowEntryCodec.encode(FLOW_ENTRY, context);
// only set by the internal FlowRule encoder, so should not appear in the JSON string
expected.remove("id");
expected.remove("appId");
expected.remove("tableName");
// only set by the FlowEntry encoder but not used for the decoder
// so should not appear in the JSON, or a decoding error occurs
expected.remove(FlowEntryCodec.GROUP_ID);
expected.remove(FlowEntryCodec.LAST_SEEN);
// assert equality of those values separately. see below
assertEquals(expected.get(FlowEntryCodec.LIFE).asLong(), jsonString.get(FlowEntryCodec.LIFE).asLong());
assertEquals(expected.get(FlowEntryCodec.PACKETS).asLong(), jsonString.get(FlowEntryCodec.PACKETS).asLong());
assertEquals(expected.get(FlowEntryCodec.BYTES).asLong(), jsonString.get(FlowEntryCodec.BYTES).asLong());
// if those numeric values are included in expected as a result of the encoding,
// AssertionError occurs even though both expected and jsonString are semantically identical
expected.remove(FlowEntryCodec.LIFE);
expected.remove(FlowEntryCodec.PACKETS);
expected.remove(FlowEntryCodec.BYTES);
jsonString.remove(FlowEntryCodec.LIFE);
jsonString.remove(FlowEntryCodec.PACKETS);
jsonString.remove(FlowEntryCodec.BYTES);
assertEquals(expected, jsonString);
}
|
@VisibleForTesting
void parseWorkflowParameter(
Map<String, Parameter> workflowParams, Parameter param, String workflowId) {
parseWorkflowParameter(workflowParams, param, workflowId, new HashSet<>());
}
|
@Test
public void testParseWorkflowParameterWithImplicitToString() {
StringParameter bar = StringParameter.builder().name("bar").expression("foo - 1;").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap("foo", LongParameter.builder().expression("1+2+3;").build()),
bar,
"test-workflow");
assertEquals("5", bar.getEvaluatedResult());
assertEquals(
"Implicitly converted the evaluated result to a string for type class java.lang.Long",
bar.getMeta().get("info"));
bar = StringParameter.builder().name("bar").expression("foo - 1;").build();
paramEvaluator.parseWorkflowParameter(
Collections.singletonMap(
"foo", LongParameter.builder().evaluatedResult(6L).evaluatedTime(123L).build()),
bar,
"test-workflow");
assertEquals("5", bar.getEvaluatedResult());
assertEquals(
"Implicitly converted the evaluated result to a string for type class java.lang.Long",
bar.getMeta().get("info"));
}
|
public static Sensor punctuateSensor(final String threadId,
final StreamsMetricsImpl streamsMetrics) {
return invocationRateAndCountAndAvgAndMaxLatencySensor(
threadId,
PUNCTUATE,
PUNCTUATE_RATE_DESCRIPTION,
PUNCTUATE_TOTAL_DESCRIPTION,
PUNCTUATE_AVG_LATENCY_DESCRIPTION,
PUNCTUATE_MAX_LATENCY_DESCRIPTION,
Sensor.RecordingLevel.INFO,
streamsMetrics
);
}
|
@Test
public void shouldGetPunctuateSensor() {
final String operation = "punctuate";
final String operationLatency = operation + StreamsMetricsImpl.LATENCY_SUFFIX;
final String totalDescription = "The total number of calls to punctuate";
final String rateDescription = "The average per-second number of calls to punctuate";
final String avgLatencyDescription = "The average punctuate latency";
final String maxLatencyDescription = "The maximum punctuate latency";
when(streamsMetrics.threadLevelSensor(THREAD_ID, operation, RecordingLevel.INFO)).thenReturn(expectedSensor);
when(streamsMetrics.threadLevelTagMap(THREAD_ID)).thenReturn(tagMap);
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = ThreadMetrics.punctuateSensor(THREAD_ID, streamsMetrics);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addInvocationRateAndCountToSensor(
expectedSensor,
THREAD_LEVEL_GROUP,
tagMap,
operation,
rateDescription,
totalDescription
)
);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addAvgAndMaxToSensor(
expectedSensor,
THREAD_LEVEL_GROUP,
tagMap,
operationLatency,
avgLatencyDescription,
maxLatencyDescription
)
);
assertThat(sensor, is(expectedSensor));
}
}
|
protected int getMaxUnconfirmedWrites(final TransferStatus status) {
if(TransferStatus.UNKNOWN_LENGTH == status.getLength()) {
return preferences.getInteger("sftp.write.maxunconfirmed");
}
return Integer.min((int) (status.getLength() / preferences.getInteger("connection.chunksize")) + 1,
preferences.getInteger("sftp.write.maxunconfirmed"));
}
|
@Test
public void testUnconfirmedReadsNumber() {
final SFTPWriteFeature feature = new SFTPWriteFeature(session);
assertEquals(33, feature.getMaxUnconfirmedWrites(new TransferStatus().withLength(TransferStatus.MEGA * 1L)));
assertEquals(64, feature.getMaxUnconfirmedWrites(new TransferStatus().withLength((long) (TransferStatus.GIGA * 1.3))));
}
|
@Override
public String toString() {
if (translateProducerToString == null) {
translateProducerToString = "TranslateProducer[" + URISupport.sanitizeUri(getEndpoint().getEndpointUri()) + "]";
}
return translateProducerToString;
}
|
@Test
public void translateTextPojoTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:translatePojoText", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn()
.setBody(TranslateTextRequest.builder().sourceLanguageCode(Translate2LanguageEnum.ITALIAN.toString())
.targetLanguageCode(Translate2LanguageEnum.ENGLISH.toString()).text("ciao").build());
}
});
MockEndpoint.assertIsSatisfied(context);
String resultGet = exchange.getIn().getBody(String.class);
assertEquals("Hello", resultGet);
}
|
public static BlockingQueue<Runnable> buildQueue(int size) {
return buildQueue(size, false);
}
|
@Test
public void buildQueue() throws Exception {
BlockingQueue<Runnable> queue = ThreadPoolUtils.buildQueue(0);
Assert.assertEquals(queue.getClass(), SynchronousQueue.class);
queue = ThreadPoolUtils.buildQueue(-1);
Assert.assertEquals(queue.getClass(), LinkedBlockingQueue.class);
queue = ThreadPoolUtils.buildQueue(10);
Assert.assertEquals(queue.getClass(), LinkedBlockingQueue.class);
}
|
public static Date parseDate(String datetimeStr) {
if (StringUtils.isEmpty(datetimeStr)) {
return null;
}
datetimeStr = datetimeStr.trim();
if (datetimeStr.contains("-")) {
if (datetimeStr.contains(":")) {
datetimeStr = datetimeStr.replace(" ", "T");
}
} else if (datetimeStr.contains(":")) {
datetimeStr = "T" + datetimeStr;
}
DateTime dateTime = new DateTime(datetimeStr, dateTimeZone);
return dateTime.toDate();
}
|
@PrepareForTest(StringUtils.class)
@Test
public void parseDateInputNotNullOutputNull() throws Exception {
// Setup mocks
PowerMockito.mockStatic(StringUtils.class);
// Arrange
final String datetimeStr = "a/b/c";
final Method isEmptyMethod =
DTUMemberMatcher.method(StringUtils.class, "isEmpty", String.class);
PowerMockito.doReturn(true)
.when(StringUtils.class, isEmptyMethod)
.withArguments(or(isA(String.class), isNull(String.class)));
// Act
final Date actual = Util.parseDate(datetimeStr);
// Assert result
Assert.assertNull(actual);
}
|
public static Type[] getActualTypes(Type type, Type... typeVariables) {
return ActualTypeMapperPool.getActualTypes(type, typeVariables);
}
|
@Test
public void getActualTypesTest() {
// 测试多层级泛型参数是否能获取成功
Type idType = TypeUtil.getActualType(Level3.class, ReflectUtil.getField(Level3.class, "id"));
assertEquals(Long.class, idType);
}
|
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) {
// Set of Visited Schemas
IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>();
// Stack that contains the Schams to process and afterVisitNonTerminal
// functions.
// Deque<Either<Schema, Supplier<SchemaVisitorAction>>>
// Using either has a cost which we want to avoid...
Deque<Object> dq = new ArrayDeque<>();
dq.addLast(start);
Object current;
while ((current = dq.pollLast()) != null) {
if (current instanceof Supplier) {
// we are executing a non terminal post visit.
SchemaVisitorAction action = ((Supplier<SchemaVisitorAction>) current).get();
switch (action) {
case CONTINUE:
break;
case SKIP_SUBTREE:
throw new UnsupportedOperationException();
case SKIP_SIBLINGS:
while (dq.getLast() instanceof Schema) {
dq.removeLast();
}
break;
case TERMINATE:
return visitor.get();
default:
throw new UnsupportedOperationException("Invalid action " + action);
}
} else {
Schema schema = (Schema) current;
boolean terminate;
if (!visited.containsKey(schema)) {
Schema.Type type = schema.getType();
switch (type) {
case ARRAY:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType()));
visited.put(schema, schema);
break;
case RECORD:
terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema)
.collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator());
visited.put(schema, schema);
break;
case UNION:
terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes());
visited.put(schema, schema);
break;
case MAP:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType()));
visited.put(schema, schema);
break;
case NULL:
case BOOLEAN:
case BYTES:
case DOUBLE:
case ENUM:
case FIXED:
case FLOAT:
case INT:
case LONG:
case STRING:
terminate = visitTerminal(visitor, schema, dq);
break;
default:
throw new UnsupportedOperationException("Invalid type " + type);
}
} else {
terminate = visitTerminal(visitor, schema, dq);
}
if (terminate) {
return visitor.get();
}
}
}
return visitor.get();
}
|
@Test
void visit7() {
String s7 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": ["
+ "{\"name\": \"f1\", \"type\": {\"type\": \"record\", \"name\": \"css2\", \"fields\": "
+ "[{\"name\": \"f11\", \"type\": \"int\"}]}}," + "{\"name\": \"f2\", \"type\": \"long\"}" + "]}";
assertEquals("c1.css2.\"int\"!!", Schemas.visit(new Schema.Parser().parse(s7), new TestVisitor()));
}
|
@Override
public boolean remove(Object o) {
// will throw UnsupportedOperationException; delegate anyway for testability
return underlying().remove(o);
}
|
@Test
public void testDelegationOfUnsupportedFunctionRemove() {
new PCollectionsHashSetWrapperDelegationChecker<>()
.defineMockConfigurationForUnsupportedFunction(mock -> mock.remove(eq(this)))
.defineWrapperUnsupportedFunctionInvocation(wrapper -> wrapper.remove(this))
.doUnsupportedFunctionDelegationCheck();
}
|
@Override
public String sendCall(String to, String data, DefaultBlockParameter defaultBlockParameter)
throws IOException {
EthCall ethCall =
web3j.ethCall(
Transaction.createEthCallTransaction(getFromAddress(), to, data),
defaultBlockParameter)
.send();
assertCallNotReverted(ethCall);
return ethCall.getValue() != null
? ethCall.getValue()
: ethCall.getError() != null ? ethCall.getError().getData() : null;
}
|
@Test
void sendCallErrorResponseNotRevert() throws IOException {
EthCall lookupDataHex = new EthCall();
Response.Error error = new Response.Error();
error.setCode(3);
error.setMessage("execution reverted");
error.setData(responseData);
lookupDataHex.setError(error);
Request request = mock(Request.class);
when(request.send()).thenReturn(lookupDataHex);
when(web3j.ethCall(any(Transaction.class), any(DefaultBlockParameter.class)))
.thenReturn(request);
String result =
clientTransactionManager.sendCall(
"0xAdress", "data", DefaultBlockParameter.valueOf("latest"));
assertEquals(responseData, result);
}
|
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
}
|
@Test
public void matchIPProtocolTest() {
Criterion criterion = Criteria.matchIPProtocol((byte) 250);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
}
|
public static List<FieldSchema> convert(Schema schema) {
return schema.columns().stream()
.map(col -> new FieldSchema(col.name(), convertToTypeString(col.type()), col.doc()))
.collect(Collectors.toList());
}
|
@Test
public void testSchemaConvertToIcebergSchemaForEveryPrimitiveType() {
Schema schemaWithEveryType = HiveSchemaUtil.convert(getSupportedFieldSchemas());
assertThat(schemaWithEveryType.asStruct()).isEqualTo(getSchemaWithSupportedTypes().asStruct());
}
|
@Override
public Optional<String> getLocalHadoopConfigurationDirectory() {
final String hadoopConfDirEnv = System.getenv(Constants.ENV_HADOOP_CONF_DIR);
if (StringUtils.isNotBlank(hadoopConfDirEnv)) {
return Optional.of(hadoopConfDirEnv);
}
final String hadoopHomeEnv = System.getenv(Constants.ENV_HADOOP_HOME);
if (StringUtils.isNotBlank(hadoopHomeEnv)) {
// Hadoop 2.2+
final File hadoop2ConfDir = new File(hadoopHomeEnv, "/etc/hadoop");
if (hadoop2ConfDir.exists()) {
return Optional.of(hadoop2ConfDir.getAbsolutePath());
}
// Hadoop 1.x
final File hadoop1ConfDir = new File(hadoopHomeEnv, "/conf");
if (hadoop1ConfDir.exists()) {
return Optional.of(hadoop1ConfDir.getAbsolutePath());
}
}
return Optional.empty();
}
|
@Test
void testGetLocalHadoopConfigurationDirectoryFromHadoopConfDirEnv() throws Exception {
runTestWithEmptyEnv(
() -> {
final String hadoopConfDir = "/etc/hadoop/conf";
setEnv(Constants.ENV_HADOOP_CONF_DIR, hadoopConfDir);
final Optional<String> optional =
testingKubernetesParameters.getLocalHadoopConfigurationDirectory();
assertThat(optional).isPresent();
assertThat(optional.get()).isEqualTo(hadoopConfDir);
});
}
|
public void clear(int position) {
int segmentPosition = position >>> log2SegmentSize; /// which segment -- div by num bits per segment
int longPosition = (position >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment
int bitPosition = position & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64)
AtomicLongArray segment = getSegment(segmentPosition);
long mask = ~(1L << bitPosition);
// Thread safety: we need to loop until we win the race to set the long value.
while(true) {
// determine what the new long value will be after we set the appropriate bit.
long currentLongValue = segment.get(longPosition);
long newLongValue = currentLongValue & mask;
// if no other thread has modified the value since we read it, we won the race and we are done.
if(segment.compareAndSet(longPosition, currentLongValue, newLongValue))
break;
}
}
|
@Test
public void testClear() {
ThreadSafeBitSet set1 = new ThreadSafeBitSet();
set1.set(10);
set1.set(20);
set1.set(21);
set1.set(22);
set1.clear(21);
Assert.assertEquals(3, set1.cardinality());
}
|
public static String trimToNull(CharSequence str) {
final String trimStr = trim(str);
return EMPTY.equals(trimStr) ? null : trimStr;
}
|
@Test
public void trimToNullTest(){
String a = " ";
assertNull(CharSequenceUtil.trimToNull(a));
a = "";
assertNull(CharSequenceUtil.trimToNull(a));
a = null;
assertNull(CharSequenceUtil.trimToNull(a));
}
|
public int getAppActivitiesFailedRetrieved() {
return numGetAppActivitiesFailedRetrieved.value();
}
|
@Test
public void testGetAppActivitiesRetrievedFailed() {
long totalBadBefore = metrics.getAppActivitiesFailedRetrieved();
badSubCluster.getAppActivitiesFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getAppActivitiesFailedRetrieved());
}
|
@VisibleForTesting
public ProcessContinuation run(
RestrictionTracker<OffsetRange, Long> tracker,
OutputReceiver<PartitionRecord> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
InitialPipelineState initialPipelineState)
throws Exception {
LOG.debug("DNP: Watermark: " + watermarkEstimator.getState());
LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom());
if (tracker.currentRestriction().getFrom() == 0L) {
if (!tracker.tryClaim(0L)) {
LOG.error(
"Could not claim initial DetectNewPartition restriction. No partitions are outputted.");
return ProcessContinuation.stop();
}
watermarkEstimator.setWatermark(initialPipelineState.getStartTime());
if (initialPipelineState.isResume()) {
resumeFromPreviousPipelineAction.run(receiver);
} else {
generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime());
}
return ProcessContinuation.resume();
}
// Create a new partition reconciler every run to reset the state each time.
partitionReconciler = new PartitionReconciler(metadataTableDao, metrics);
orphanedMetadataCleaner = new OrphanedMetadataCleaner();
// Calculating the new value of watermark is a resource intensive process. We have to do a full
// scan of the metadata table and then ensure we're not missing partitions and then calculate
// the low watermark. This is usually a fairly fast process even with thousands of partitions.
// However, sometimes this may take so long that the runner checkpoints before the watermark is
// calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to
// restart, wasting the resources spent calculating the watermark. On restart, we will try to
// calculate the watermark again. The problem causing the slow watermark calculation can persist
// leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate
// the watermark after successful tryClaim. Then we write to the metadata table the new
// watermark. On the start of each run we read the watermark and update the DoFn's watermark.
DetectNewPartitionsState detectNewPartitionsState =
metadataTableDao.readDetectNewPartitionsState();
if (detectNewPartitionsState != null) {
watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark());
}
// Terminate if endTime <= watermark that means all partitions have read up to or beyond
// watermark. We no longer need to manage splits and merges, we can terminate.
if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) {
tracker.tryClaim(tracker.currentRestriction().getTo());
return ProcessContinuation.stop();
}
if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) {
LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction());
return ProcessContinuation.stop();
}
// Read StreamPartitions to calculate watermark.
List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null;
if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) {
streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark();
}
// Process NewPartitions and track the ones successfully outputted.
List<NewPartition> newPartitions = metadataTableDao.readNewPartitions();
List<ByteStringRange> outputtedNewPartitions = new ArrayList<>();
for (NewPartition newPartition : newPartitions) {
if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) {
outputtedNewPartitions.add(newPartition.getPartition());
} else if (streamPartitionsWithWatermark != null) {
// streamPartitionsWithWatermark is not null on runs that we update watermark. We only run
// reconciliation when we update watermark. Only add incompleteNewPartitions if
// reconciliation is being run
partitionReconciler.addIncompleteNewPartitions(newPartition);
orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition);
}
}
// Process the watermark using read StreamPartitions and NewPartitions.
if (streamPartitionsWithWatermark != null) {
Optional<Instant> maybeWatermark =
getNewWatermark(streamPartitionsWithWatermark, newPartitions);
maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark);
// Only start reconciling after the pipeline has been running for a while.
if (tracker.currentRestriction().getFrom() > 50) {
// Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being
// streamed. This isn't perfect because there may be partitions moving between
// StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not
// include NewPartitions marked as deleted from a previous DNP run not yet processed by
// RCSP.
List<ByteStringRange> existingPartitions =
streamPartitionsWithWatermark.stream()
.map(StreamPartitionWithWatermark::getPartition)
.collect(Collectors.toList());
existingPartitions.addAll(outputtedNewPartitions);
List<ByteStringRange> missingStreamPartitions =
getMissingPartitionsFromEntireKeySpace(existingPartitions);
orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions);
partitionReconciler.addMissingPartitions(missingStreamPartitions);
processReconcilerPartitions(
receiver, watermarkEstimator, initialPipelineState.getStartTime());
cleanUpOrphanedMetadata();
}
}
return ProcessContinuation.resume().withResumeDelay(Duration.millis(100));
}
|
@Test
public void testUpdateWatermarkAfterCheckpoint() throws Exception {
Instant watermark = endTime;
OffsetRange offsetRange = new OffsetRange(1, Long.MAX_VALUE);
when(tracker.currentRestriction()).thenReturn(offsetRange);
when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true);
// Watermark estimator is not updated before a checkpoint.
assertEquals(
DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
assertEquals(startTime, watermarkEstimator.currentWatermark());
assertNull(metadataTableDao.readDetectNewPartitionsState());
// Update the watermark in the metadata table. This run terminates because watermark == endTime.
metadataTableDao.updateDetectNewPartitionWatermark(watermark);
// Watermark estimator will be updated with the watermark from the metadata table.
assertEquals(
DoFn.ProcessContinuation.stop(),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
assertEquals(watermark, watermarkEstimator.currentWatermark());
assertEquals(watermark, metadataTableDao.readDetectNewPartitionsState().getWatermark());
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatLeftJoinWithoutJoinWindow() {
final Join join = new Join(leftAlias, ImmutableList.of(new JoinedSource(
Optional.empty(),
rightAlias,
JoinedSource.Type.LEFT,
criteria,
Optional.empty())));
final String result = SqlFormatter.formatSql(join);
final String expected = "`left` L\nLEFT OUTER JOIN `right` R ON (('left.col0' = 'right.col0'))";
assertEquals(expected, result);
}
|
public void initializeResources(Herder herder) {
this.herder = herder;
super.initializeResources();
}
|
@Test
public void testStandaloneConfig() throws IOException {
Map<String, String> configMap = baseServerProps();
configMap.put("offset.storage.file.filename", "/tmp");
doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId();
doReturn(plugins).when(herder).plugins();
expectEmptyRestExtensions();
doReturn(Arrays.asList("a", "b")).when(herder).connectors();
server = new ConnectRestServer(null, restClient, configMap);
server.initializeServer();
server.initializeResources(herder);
HttpRequest request = new HttpGet("/connectors");
HttpResponse response = executeRequest(server.advertisedUrl(), request);
assertEquals(200, response.getStatusLine().getStatusCode());
}
|
@Override
public boolean disablePlugin(String pluginId) {
if (currentPluginId.equals(pluginId)) {
return original.disablePlugin(pluginId);
} else {
throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute disablePlugin for foreign pluginId!");
}
}
|
@Test
public void disablePlugin() {
pluginManager.loadPlugins();
assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.disablePlugin(OTHER_PLUGIN_ID));
assertTrue(wrappedPluginManager.disablePlugin(THIS_PLUGIN_ID));
}
|
@Override
public SplitResult<OffsetRange> trySplit(double fractionOfRemainder) {
// If current tracking range is no longer growable, split it as a normal range.
if (range.getTo() != Long.MAX_VALUE || range.getTo() == range.getFrom()) {
return super.trySplit(fractionOfRemainder);
}
// If current range has been done, there is no more space to split.
if (lastAttemptedOffset != null && lastAttemptedOffset == Long.MAX_VALUE) {
return null;
}
BigDecimal cur =
(lastAttemptedOffset == null)
? BigDecimal.valueOf(range.getFrom()).subtract(BigDecimal.ONE, MathContext.DECIMAL128)
: BigDecimal.valueOf(lastAttemptedOffset);
// Fetch the estimated end offset. If the estimated end is smaller than the next offset, use
// the next offset as end.
BigDecimal estimateRangeEnd =
BigDecimal.valueOf(rangeEndEstimator.estimate())
.max(cur.add(BigDecimal.ONE, MathContext.DECIMAL128));
// Convert to BigDecimal in computation to prevent overflow, which may result in loss of
// precision.
// split = cur + max(1, (estimateRangeEnd - cur) * fractionOfRemainder)
BigDecimal splitPos =
cur.add(
estimateRangeEnd
.subtract(cur, MathContext.DECIMAL128)
.multiply(BigDecimal.valueOf(fractionOfRemainder), MathContext.DECIMAL128)
.max(BigDecimal.ONE),
MathContext.DECIMAL128);
long split = splitPos.longValue();
if (split > estimateRangeEnd.longValue()) {
return null;
}
OffsetRange res = new OffsetRange(split, range.getTo());
this.range = new OffsetRange(range.getFrom(), split);
return SplitResult.of(range, res);
}
|
@Test
public void testCheckpointJustStarted() throws Exception {
SimpleEstimator simpleEstimator = new SimpleEstimator();
GrowableOffsetRangeTracker tracker = new GrowableOffsetRangeTracker(0L, simpleEstimator);
assertTrue(tracker.tryClaim(5L));
simpleEstimator.setEstimateRangeEnd(0L);
SplitResult res = tracker.trySplit(0);
tracker.checkDone();
assertEquals(new OffsetRange(0, 6), res.getPrimary());
assertEquals(new OffsetRange(0, 6), tracker.currentRestriction());
assertEquals(new OffsetRange(6, Long.MAX_VALUE), res.getResidual());
tracker = new GrowableOffsetRangeTracker(0L, simpleEstimator);
assertTrue(tracker.tryClaim(5L));
simpleEstimator.setEstimateRangeEnd(20L);
res = tracker.trySplit(0);
tracker.checkDone();
assertEquals(new OffsetRange(0, 6), res.getPrimary());
assertEquals(new OffsetRange(6, Long.MAX_VALUE), res.getResidual());
}
|
@Override
public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) {
if (readError == null) {
try {
processSymbols(lineBuilder);
} catch (RangeOffsetConverter.RangeOffsetConverterException e) {
readError = new ReadError(Data.SYMBOLS, lineBuilder.getLine());
LOG.warn(format("Inconsistency detected in Symbols data. Symbols will be ignored for file '%s'", file.getKey()), e);
}
}
return Optional.ofNullable(readError);
}
|
@Test
public void read_symbols_defined_on_many_lines() {
TextRange declaration = newTextRange(LINE_1, LINE_2, OFFSET_1, OFFSET_3);
when(rangeOffsetConverter.offsetToString(declaration, LINE_1, DEFAULT_LINE_LENGTH)).thenReturn(RANGE_LABEL_1);
when(rangeOffsetConverter.offsetToString(declaration, LINE_2, DEFAULT_LINE_LENGTH)).thenReturn(RANGE_LABEL_2);
TextRange reference = newTextRange(LINE_3, LINE_4, OFFSET_1, OFFSET_3);
when(rangeOffsetConverter.offsetToString(reference, LINE_3, DEFAULT_LINE_LENGTH)).thenReturn(RANGE_LABEL_1);
when(rangeOffsetConverter.offsetToString(reference, LINE_4, DEFAULT_LINE_LENGTH)).thenReturn(RANGE_LABEL_2);
SymbolsLineReader symbolsLineReader = newReader(newSymbol(declaration, reference));
assertThat(symbolsLineReader.read(line1)).isEmpty();
assertThat(symbolsLineReader.read(line2)).isEmpty();
assertThat(symbolsLineReader.read(line3)).isEmpty();
assertThat(symbolsLineReader.read(line4)).isEmpty();
assertThat(line1.getSymbols()).isEqualTo(RANGE_LABEL_1 + ",1");
assertThat(line2.getSymbols()).isEqualTo(RANGE_LABEL_2 + ",1");
assertThat(line3.getSymbols()).isEqualTo(RANGE_LABEL_1 + ",1");
assertThat(line4.getSymbols()).isEqualTo(RANGE_LABEL_2 + ",1");
}
|
@Override
public List<ServiceInstance> getInstances(String serviceName) {
if (Objects.equals(serviceName, this.selfInstance.getServiceName())) {
List<ServiceInstance> serviceInstances = this.delegate.getInstances(serviceName);
if (containSelf(serviceInstances, this.selfInstance)) {
// contains self instance already
return serviceInstances;
}
// add self instance to result
List<ServiceInstance> result = new ArrayList<>(serviceInstances.size() + 1);
result.add(this.selfInstance);
result.addAll(serviceInstances);
return result;
} else {
return this.delegate.getInstances(serviceName);
}
}
|
@Test
void getInstances_other_service_name() {
final String otherServiceName = "other-service";
DatabaseDiscoveryClient client = Mockito.mock(DatabaseDiscoveryClient.class);
Mockito.when(client.getInstances(otherServiceName))
.thenReturn(
Collections.singletonList(
newServiceInstance(otherServiceName, "http://10.240.34.56:8081/", "beijing")
)
);
final String selfServiceName = "self-service";
ServiceInstance selfInstance = newServiceInstance(
selfServiceName, "http://10.240.34.56:8081/", "beijing"
);
DatabaseDiscoveryClient decorator = new DatabaseDiscoveryClientAlwaysAddSelfInstanceDecoratorImpl(
client, selfInstance
);
List<ServiceInstance> serviceInstances = decorator.getInstances(otherServiceName);
assertEquals(1, serviceInstances.size());
ServiceInstance otherServiceNameInstance = serviceInstances.get(0);
assertEquals(otherServiceName, otherServiceNameInstance.getServiceName());
Mockito.verify(client, Mockito.times(1))
.getInstances(Mockito.eq(otherServiceName));
Mockito.verify(client, Mockito.never())
.getInstances(Mockito.eq(selfServiceName));
}
|
@Override
public void putTaskConfigs(String connName, List<Map<String, String>> configs, Callback<Void> callback, InternalRequestSignature requestSignature) {
throw new UnsupportedOperationException("Kafka Connect in standalone mode does not support externally setting task configurations.");
}
|
@Test
public void testPutTaskConfigs() {
initialize(false);
Callback<Void> cb = mock(Callback.class);
assertThrows(UnsupportedOperationException.class, () -> herder.putTaskConfigs(CONNECTOR_NAME,
singletonList(singletonMap("config", "value")), cb, null));
}
|
@Override
public Thread newThread(Runnable r) {
String name = prefix + "_" + counter.incrementAndGet();
if (totalSize > 1) {
name += "_" + totalSize;
}
Thread thread = new FastThreadLocalThread(group, r, name);
thread.setDaemon(makeDaemons);
if (thread.getPriority() != Thread.NORM_PRIORITY) {
thread.setPriority(Thread.NORM_PRIORITY);
}
return thread;
}
|
@Test
public void testNewThread() {
NamedThreadFactory namedThreadFactory = new NamedThreadFactory("testNameThread", 5);
Thread testNameThread = namedThreadFactory
.newThread(() -> {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
});
assertThat(testNameThread.getName()).startsWith("testNameThread");
assertThat(testNameThread.isDaemon()).isTrue();
}
|
@Override
protected Future<KafkaBridgeStatus> createOrUpdate(Reconciliation reconciliation, KafkaBridge assemblyResource) {
KafkaBridgeStatus kafkaBridgeStatus = new KafkaBridgeStatus();
String namespace = reconciliation.namespace();
KafkaBridgeCluster bridge;
try {
bridge = KafkaBridgeCluster.fromCrd(reconciliation, assemblyResource, sharedEnvironmentProvider);
} catch (Exception e) {
LOGGER.warnCr(reconciliation, e);
StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, e);
return Future.failedFuture(new ReconciliationException(kafkaBridgeStatus, e));
}
KafkaClientAuthentication auth = assemblyResource.getSpec().getAuthentication();
List<CertSecretSource> trustedCertificates = assemblyResource.getSpec().getTls() == null ? Collections.emptyList() : assemblyResource.getSpec().getTls().getTrustedCertificates();
Promise<KafkaBridgeStatus> createOrUpdatePromise = Promise.promise();
boolean bridgeHasZeroReplicas = bridge.getReplicas() == 0;
String initCrbName = KafkaBridgeResources.initContainerClusterRoleBindingName(bridge.getCluster(), namespace);
ClusterRoleBinding initCrb = bridge.generateClusterRoleBinding();
LOGGER.debugCr(reconciliation, "Updating Kafka Bridge cluster");
kafkaBridgeServiceAccount(reconciliation, namespace, bridge)
.compose(i -> bridgeInitClusterRoleBinding(reconciliation, initCrbName, initCrb))
.compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, bridge.getComponentName(), bridge.getReplicas(), operationTimeoutMs))
.compose(scale -> serviceOperations.reconcile(reconciliation, namespace, KafkaBridgeResources.serviceName(bridge.getCluster()), bridge.generateService()))
.compose(i -> MetricsAndLoggingUtils.metricsAndLogging(reconciliation, configMapOperations, bridge.logging(), null))
.compose(metricsAndLogging -> configMapOperations.reconcile(reconciliation, namespace, KafkaBridgeResources.metricsAndLogConfigMapName(reconciliation.name()), bridge.generateMetricsAndLogConfigMap(metricsAndLogging)))
.compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, bridge.getComponentName(), bridge.generatePodDisruptionBudget()))
.compose(i -> VertxUtil.authTlsHash(secretOperations, namespace, auth, trustedCertificates))
.compose(hash -> deploymentOperations.reconcile(reconciliation, namespace, bridge.getComponentName(), bridge.generateDeployment(Collections.singletonMap(Annotations.ANNO_STRIMZI_AUTH_HASH, Integer.toString(hash)), pfa.isOpenshift(), imagePullPolicy, imagePullSecrets)))
.compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, bridge.getComponentName(), bridge.getReplicas(), operationTimeoutMs))
.compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, bridge.getComponentName(), 1_000, operationTimeoutMs))
.compose(i -> bridgeHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, bridge.getComponentName(), 1_000, operationTimeoutMs))
.onComplete(reconciliationResult -> {
StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, reconciliationResult.mapEmpty().cause());
if (!bridgeHasZeroReplicas) {
int port = KafkaBridgeCluster.DEFAULT_REST_API_PORT;
if (bridge.getHttp() != null) {
port = bridge.getHttp().getPort();
}
kafkaBridgeStatus.setUrl(KafkaBridgeResources.url(bridge.getCluster(), namespace, port));
}
kafkaBridgeStatus.setReplicas(bridge.getReplicas());
kafkaBridgeStatus.setLabelSelector(bridge.getSelectorLabels().toSelectorString());
if (reconciliationResult.succeeded()) {
createOrUpdatePromise.complete(kafkaBridgeStatus);
} else {
createOrUpdatePromise.fail(new ReconciliationException(kafkaBridgeStatus, reconciliationResult.cause()));
}
});
return createOrUpdatePromise.future();
}
|
@Test
public void testCreateOrUpdateWithNoDiffCausesNoChanges(VertxTestContext context) {
ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
var mockBridgeOps = supplier.kafkaBridgeOperator;
DeploymentOperator mockDcOps = supplier.deploymentOperations;
PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
ConfigMapOperator mockCmOps = supplier.configMapOperations;
ServiceOperator mockServiceOps = supplier.serviceOperations;
String kbName = "foo";
String kbNamespace = "test";
KafkaBridge kb = ResourceUtils.createKafkaBridge(kbNamespace, kbName, image, 1,
BOOTSTRAP_SERVERS, KAFKA_BRIDGE_PRODUCER_SPEC, KAFKA_BRIDGE_CONSUMER_SPEC, KAFKA_BRIDGE_HTTP_SPEC, true);
KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, SHARED_ENV_PROVIDER);
when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb);
when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb));
when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture());
when(mockServiceOps.get(kbNamespace, bridge.getComponentName())).thenReturn(bridge.generateService());
when(mockDcOps.get(kbNamespace, bridge.getComponentName())).thenReturn(bridge.generateDeployment(Map.of(), true, null, null));
when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> serviceNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Service> serviceCaptor = ArgumentCaptor.forClass(Service.class);
when(mockServiceOps.reconcile(any(), eq(kbNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class);
when(mockDcOps.reconcile(any(), eq(kbNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleUp(any(), eq(kbNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<String> dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class);
ArgumentCaptor<Integer> dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
when(mockDcOps.scaleDown(any(), eq(kbNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture(), anyLong())).thenReturn(Future.succeededFuture());
ArgumentCaptor<PodDisruptionBudget> pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class);
when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture());
when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx,
new PlatformFeaturesAvailability(true, kubernetesVersion),
new MockCertManager(), new PasswordGenerator(10, "a", "a"),
supplier,
ResourceUtils.dummyClusterOperatorConfig(VERSIONS));
Checkpoint async = context.checkpoint();
ops.createOrUpdate(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, kbNamespace, kbName), kb)
.onComplete(context.succeeding(v -> context.verify(() -> {
// Verify service
List<Service> capturedServices = serviceCaptor.getAllValues();
assertThat(capturedServices, hasSize(1));
// Verify Deployment Config
List<Deployment> capturedDc = dcCaptor.getAllValues();
assertThat(capturedDc, hasSize(1));
// Verify PodDisruptionBudget
List<PodDisruptionBudget> capturedPdb = pdbCaptor.getAllValues();
assertThat(capturedPdb, hasSize(1));
PodDisruptionBudget pdb = capturedPdb.get(0);
assertThat(pdb.getMetadata().getName(), is(bridge.getComponentName()));
assertThat(pdb, is(bridge.generatePodDisruptionBudget()));
// Verify scaleDown / scaleUp were not called
assertThat(dcScaleDownNameCaptor.getAllValues(), hasSize(1));
assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1));
async.flag();
})));
}
|
public static TaskAndAction createRemoveTask(final TaskId taskId,
final CompletableFuture<StateUpdater.RemovedTaskResult> future) {
Objects.requireNonNull(taskId, "Task ID of task to remove is null!");
Objects.requireNonNull(future, "Future for task to remove is null!");
return new TaskAndAction(null, taskId, Action.REMOVE, future);
}
|
@Test
public void shouldThrowIfRemoveTaskActionIsCreatedWithNullTaskId() {
final Exception exception = assertThrows(
NullPointerException.class,
() -> createRemoveTask(null, new CompletableFuture<>())
);
assertTrue(exception.getMessage().contains("Task ID of task to remove is null!"));
}
|
@SuppressWarnings("unchecked")
public <T> RefererConfig<T> get(final String path) {
try {
return (RefererConfig<T>) cache.get(path);
} catch (ExecutionException e) {
throw new ShenyuException(e);
}
}
|
@Test
public void testGet() {
ApplicationConfigCache applicationConfigCache = ApplicationConfigCache.getInstance();
Assertions.assertEquals(applicationConfigCache.get("/motan").toString(), "<motan:referer />");
}
|
public boolean isDeeperThan(Component.Type otherType) {
if (otherType.isViewsType()) {
return this.viewsMaxDepth != null && this.viewsMaxDepth.isDeeperThan(otherType);
}
if (otherType.isReportType()) {
return this.reportMaxDepth != null && this.reportMaxDepth.isDeeperThan(otherType);
}
throw new UnsupportedOperationException(UNSUPPORTED_TYPE_UOE_MSG);
}
|
@Test
public void PROJECT_isDeeper_than_no_type() {
for (Type type : Type.values()) {
assertThat(CrawlerDepthLimit.PROJECT.isDeeperThan(type)).as("isHigherThan(%s)", type).isFalse();
}
}
|
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
if (stream == null) {
throw new NullPointerException("null stream");
}
Throwable t;
boolean alive = false;
ForkClient client = acquireClient();
try {
ContentHandler tee =
(handler instanceof AbstractRecursiveParserWrapperHandler) ? handler :
new TeeContentHandler(handler, new MetadataContentHandler(metadata));
t = client.call("parse", stream, tee, metadata, context);
alive = true;
} catch (TikaException te) {
// Problem occurred on our side
alive = true;
throw te;
} catch (IOException e) {
// Problem occurred on the other side
throw new TikaException("Failed to communicate with a forked parser process." +
" The process has most likely crashed due to some error" +
" like running out of memory. A new process will be" +
" started for the next parsing request.", e);
} finally {
releaseClient(client, alive);
}
if (t instanceof IOException) {
throw (IOException) t;
} else if (t instanceof SAXException) {
throw (SAXException) t;
} else if (t instanceof TikaException) {
throw (TikaException) t;
} else if (t != null) {
throw new TikaException("Unexpected error in forked server process", t);
}
}
|
@Test
public void testRPWWithNonSerializableContentHandler() throws Exception {
Parser parser = new AutoDetectParser();
RecursiveParserWrapper wrapper = new RecursiveParserWrapper(parser);
RecursiveParserWrapperHandler handler =
new RecursiveParserWrapperHandler(new NonSerializableHandlerFactory());
try (ForkParser fork = new ForkParser(ForkParserTest.class.getClassLoader(), wrapper);
InputStream is = getResourceAsStream("/test-documents/embedded_then_npe.xml")) {
Metadata metadata = new Metadata();
ParseContext context = new ParseContext();
fork.parse(is, handler, metadata, context);
fail();
} catch (TikaException e) {
assertTrue(e.getCause() instanceof NullPointerException);
assertContains("another", e.getCause().getMessage());
}
List<Metadata> metadataList = handler.getMetadataList();
Metadata m0 = metadataList.get(0);
assertEquals("Nikolai Lobachevsky", m0.get(TikaCoreProperties.CREATOR));
assertContains("main_content", m0.get(TikaCoreProperties.TIKA_CONTENT));
assertContains("embed1.xml", m0.get(TikaCoreProperties.TIKA_CONTENT));
Metadata m1 = metadataList.get(1);
assertEquals("embeddedAuthor", m1.get(TikaCoreProperties.CREATOR));
assertContains("some_embedded_content", m1.get(TikaCoreProperties.TIKA_CONTENT));
assertEquals("/embed1.xml", m1.get(TikaCoreProperties.EMBEDDED_RESOURCE_PATH));
}
|
@Override
public long skip(long n) throws IOException {
if (n <= 0) {
return 0;
}
synchronized (this) {
checkNotClosed();
if (finished) {
return 0;
}
// available() must be an int, so the min must be also
int skip = (int) Math.min(Math.max(file.size() - pos, 0), n);
pos += skip;
return skip;
}
}
|
@Test
public void testSkip() throws IOException {
JimfsInputStream in = newInputStream(1, 2, 3, 4, 5, 6, 7, 8);
assertThat(in.skip(0)).isEqualTo(0);
assertThat(in.skip(-10)).isEqualTo(0);
assertThat(in.skip(2)).isEqualTo(2);
assertThat(in.read()).isEqualTo(3);
assertThat(in.skip(3)).isEqualTo(3);
assertThat(in.read()).isEqualTo(7);
assertThat(in.skip(10)).isEqualTo(1);
assertEmpty(in);
assertThat(in.skip(10)).isEqualTo(0);
assertEmpty(in);
}
|
@Override
public void close() throws Exception {
handlesToClose.forEach(IOUtils::closeQuietly);
handlesToClose.clear();
if (sharedResources != null) {
sharedResources.close();
}
cleanRelocatedDbLogs();
}
|
@Test
public void testSharedResourcesAfterClose() throws Exception {
OpaqueMemoryResource<ForStSharedResources> sharedResources = getSharedResources();
ForStResourceContainer container = new ForStResourceContainer(null, sharedResources);
container.close();
ForStSharedResources forStSharedResources = sharedResources.getResourceHandle();
assertThat(forStSharedResources.getCache().isOwningHandle(), is(false));
assertThat(forStSharedResources.getWriteBufferManager().isOwningHandle(), is(false));
}
|
@Override
public int hashCode() {
return 0;
}
|
@Test
public void testHashCode() {
Permission permission = new Permission("classname", "name", "actions");
assertEquals(0, permission.hashCode());
}
|
@Override
public void execute(EventNotificationContext ctx) throws EventNotificationException {
final TeamsEventNotificationConfigV2 config = (TeamsEventNotificationConfigV2) ctx.notificationConfig();
LOG.debug("TeamsEventNotificationV2 backlog size in method execute is [{}]", config.backlogSize());
try {
final String requestBody = generateBody(ctx, config);
requestClient.send(requestBody, config.webhookUrl());
} catch (TemporaryEventNotificationException exp) {
// Scheduler needs to retry a TemporaryEventNotificationException
throw exp;
} catch (PermanentEventNotificationException exp) {
String errorMessage = StringUtils.f("Error sending Teams Notification ID: %s. %s", ctx.notificationId(), exp.getMessage());
final Notification systemNotification = notificationService.buildNow()
.addNode(nodeId.getNodeId())
.addType(Notification.Type.GENERIC)
.addSeverity(Notification.Severity.URGENT)
.addDetail("title", "TeamsEventNotificationV2 Failed")
.addDetail("description", errorMessage);
notificationService.publishIfFirst(systemNotification);
throw exp;
} catch (Exception exp) {
throw new EventNotificationException("There was an exception triggering the TeamsEventNotification", exp);
}
}
|
@Test(expected = EventNotificationException.class)
public void executeWithInvalidWebhookUrl() throws EventNotificationException {
givenGoodNotificationService();
givenTeamsClientThrowsPermException();
// When execute is called with an invalid webhook URL, we expect an event notification exception.
teamsEventNotification.execute(eventNotificationContext);
}
|
@Override
public int hashCode() {
return raw.hashCode();
}
|
@Test
void two_identical_tables_are_considered_equal() {
assertEquals(createSimpleTable(), createSimpleTable());
assertEquals(createSimpleTable().hashCode(), createSimpleTable().hashCode());
}
|
@Override
public boolean createReservation(ReservationId reservationId, String user,
Plan plan, ReservationDefinition contract) throws PlanningException {
LOG.info("placing the following ReservationRequest: " + contract);
try {
boolean res =
planner.createReservation(reservationId, user, plan, contract);
if (res) {
LOG.info("OUTCOME: SUCCESS, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
} else {
LOG.info("OUTCOME: FAILURE, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
}
return res;
} catch (PlanningException e) {
LOG.info("OUTCOME: FAILURE, Reservation ID: " + reservationId.toString()
+ ", Contract: " + contract.toString());
throw e;
}
}
|
@Test
public void testAll() throws PlanningException {
prepareBasicPlan();
// create an ALL request
ReservationDefinition rr = new ReservationDefinitionPBImpl();
rr.setArrival(100 * step);
rr.setDeadline(120 * step);
rr.setRecurrenceExpression(recurrenceExpression);
ReservationRequests reqs = new ReservationRequestsPBImpl();
reqs.setInterpreter(ReservationRequestInterpreter.R_ALL);
ReservationRequest r = ReservationRequest.newInstance(
Resource.newInstance(1024, 1), 5, 5, 10 * step);
ReservationRequest r2 = ReservationRequest.newInstance(
Resource.newInstance(2048, 2), 10, 10, 20 * step);
List<ReservationRequest> list = new ArrayList<ReservationRequest>();
list.add(r);
list.add(r2);
reqs.setReservationResources(list);
rr.setReservationRequests(reqs);
// submit to agent
ReservationId reservationID = ReservationSystemTestUtil
.getNewReservationId();
agent.createReservation(reservationID, "u1", plan, rr);
// validate results, we expect the second one to be accepted
assertTrue("Agent-based allocation failed", reservationID != null);
assertTrue("Agent-based allocation failed", plan.getAllReservations()
.size() == 3);
ReservationAllocation cs = plan.getReservationById(reservationID);
if (allocateLeft) {
assertTrue(cs.toString(), check(cs, 100 * step, 110 * step, 25, 1024, 1));
assertTrue(cs.toString(), check(cs, 110 * step, 120 * step, 20, 1024, 1));
} else {
assertTrue(cs.toString(), check(cs, 100 * step, 110 * step, 20, 1024, 1));
assertTrue(cs.toString(), check(cs, 110 * step, 120 * step, 25, 1024, 1));
}
System.out.println("--------AFTER ALL ALLOCATION (queue: " + reservationID
+ ")----------");
System.out.println(plan.toString());
System.out.println(plan.toCumulativeString());
}
|
public static String formatSql(final AstNode root) {
final StringBuilder builder = new StringBuilder();
new Formatter(builder).process(root, 0);
return StringUtils.stripEnd(builder.toString(), "\n");
}
|
@Test
public void shouldFormatDescribeStreams() {
// Given:
final DescribeStreams describeStreams = new DescribeStreams(Optional.empty(), false);
// When:
final String formatted = SqlFormatter.formatSql(describeStreams);
// Then:
assertThat(formatted, is("DESCRIBE STREAMS"));
}
|
public KafkaMetadataState computeNextMetadataState(KafkaStatus kafkaStatus) {
KafkaMetadataState currentState = metadataState;
metadataState = switch (currentState) {
case KRaft -> onKRaft(kafkaStatus);
case ZooKeeper -> onZooKeeper(kafkaStatus);
case KRaftMigration -> onKRaftMigration(kafkaStatus);
case KRaftDualWriting -> onKRaftDualWriting(kafkaStatus);
case KRaftPostMigration -> onKRaftPostMigration(kafkaStatus);
case PreKRaft -> onPreKRaft(kafkaStatus);
};
if (metadataState != currentState) {
LOGGER.infoCr(reconciliation, "Transitioning metadata state from [{}] to [{}] with strimzi.io/kraft annotation [{}]", currentState, metadataState, kraftAnno);
} else {
LOGGER.debugCr(reconciliation, "Metadata state [{}] with strimzi.io/kraft annotation [{}]", metadataState, kraftAnno);
}
return metadataState;
}
|
@Test
public void testFromKRaftPostMigrationToPreKRaft() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editMetadata()
.addToAnnotations(Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled")
.endMetadata()
.withNewStatus()
.withKafkaMetadataState(KRaftPostMigration)
.endStatus()
.build();
KafkaMetadataStateManager kafkaMetadataStateManager = new KafkaMetadataStateManager(Reconciliation.DUMMY_RECONCILIATION, kafka);
assertEquals(kafkaMetadataStateManager.computeNextMetadataState(kafka.getStatus()), PreKRaft);
}
|
@Deprecated
@SuppressWarnings("deprecation")
public void recordEviction() {
// This method is scheduled for removal in version 3.0 in favor of recordEviction(weight)
recordEviction(1);
}
|
@Test
public void evictionWithWeight() {
stats.recordEviction(3);
assertThat(registry.histogram(PREFIX + ".evictions").getCount()).isEqualTo(1);
assertThat(registry.counter(PREFIX + ".evictions-weight").getCount()).isEqualTo(3);
}
|
@ApiOperation(value = "Parse a processing pipeline without saving it")
@POST
@Path("/parse")
@NoAuditEvent("only used to parse a pipeline, no changes made in the system")
public PipelineSource parse(@ApiParam(name = "pipeline", required = true) @NotNull PipelineSource pipelineSource) throws ParseException {
final Pipeline pipeline;
try {
pipeline = pipelineRuleParser.parsePipeline(pipelineSource.id(), pipelineSource.source());
} catch (ParseException e) {
throw new BadRequestException(Response.status(Response.Status.BAD_REQUEST).entity(e.getErrors()).build());
}
final DateTime now = DateTime.now(DateTimeZone.UTC);
return PipelineSource.builder()
.title(pipeline.name())
.description(pipelineSource.description())
.source(pipelineSource.source())
.stages(pipeline.stages().stream()
.map(stage -> StageSource.create(
stage.stage(),
stage.match(),
stage.ruleReferences()))
.collect(Collectors.toList()))
.createdAt(now)
.modifiedAt(now)
.build();
}
|
@Test
public void shouldNotParseAPipelineSuccessfullyIfRaisingAnError() {
final PipelineSource pipelineSource = PipelineSource.builder()
.source("foo")
.stages(Collections.emptyList())
.title("Graylog Git Pipeline")
.build();
when(pipelineRuleParser.parsePipeline(pipelineSource.id(), pipelineSource.source()))
.thenThrow(new ParseException(Collections.emptySet()));
assertThatExceptionOfType(BadRequestException.class)
.isThrownBy(() -> this.pipelineResource.parse(pipelineSource));
}
|
@Override
public final <T extends DnsRecord> T decodeRecord(ByteBuf in) throws Exception {
final int startOffset = in.readerIndex();
final String name = decodeName(in);
final int endOffset = in.writerIndex();
if (endOffset - in.readerIndex() < 10) {
// Not enough data
in.readerIndex(startOffset);
return null;
}
final DnsRecordType type = DnsRecordType.valueOf(in.readUnsignedShort());
final int aClass = in.readUnsignedShort();
final long ttl = in.readUnsignedInt();
final int length = in.readUnsignedShort();
final int offset = in.readerIndex();
if (endOffset - offset < length) {
// Not enough data
in.readerIndex(startOffset);
return null;
}
@SuppressWarnings("unchecked")
T record = (T) decodeRecord(name, type, aClass, ttl, in, offset, length);
in.readerIndex(offset + length);
return record;
}
|
@Test
public void testDecodePtrRecord() throws Exception {
DefaultDnsRecordDecoder decoder = new DefaultDnsRecordDecoder();
ByteBuf buffer = Unpooled.buffer().writeByte(0);
int readerIndex = buffer.readerIndex();
int writerIndex = buffer.writerIndex();
try {
DnsPtrRecord record = (DnsPtrRecord) decoder.decodeRecord(
"netty.io", DnsRecordType.PTR, DnsRecord.CLASS_IN, 60, buffer, 0, 1);
assertEquals("netty.io.", record.name());
assertEquals(DnsRecord.CLASS_IN, record.dnsClass());
assertEquals(60, record.timeToLive());
assertEquals(DnsRecordType.PTR, record.type());
assertEquals(readerIndex, buffer.readerIndex());
assertEquals(writerIndex, buffer.writerIndex());
} finally {
buffer.release();
}
}
|
static String formatFunctionName(final String value)
{
if (value.isEmpty())
{
return value;
}
return sanitizeMethodOrProperty(toLowerSnakeCase(value));
}
|
@Test
void functionNameCasing()
{
assertEquals("", formatFunctionName(""));
assertEquals("a", formatFunctionName("a"));
assertEquals("a", formatFunctionName("A"));
assertEquals("car", formatFunctionName("Car"));
assertEquals("car", formatFunctionName("car"));
assertEquals("decode_car", formatFunctionName("DecodeCar"));
assertEquals("decode_car", formatFunctionName("decodeCar"));
assertEquals("decode_car", formatFunctionName("decode_car"));
assertEquals("decode_car", formatFunctionName("Decode_car"));
assertEquals("decode_car", formatFunctionName("decode_Car"));
assertEquals("decode_car", formatFunctionName("Decode_Car"));
assertEquals("decode_car", formatFunctionName("DECODE_Car"));
assertEquals("decode_car", formatFunctionName("DECODE_car"));
assertEquals("decode_car", formatFunctionName("DECODECar"));
assertEquals("decode_car", formatFunctionName("DECODE_CAR"));
assertEquals("decode_ca_r", formatFunctionName("DECODE_caR"));
// special cases
assertEquals("pricenull_9", formatFunctionName("PRICENULL9"));
assertEquals("price_9_book", formatFunctionName("PRICE9Book"));
assertEquals("issue_435", formatFunctionName("issue435"));
assertEquals("r#type", formatFunctionName("type"));
assertEquals("upper_case", formatFunctionName("UPPERCase"));
assertEquals("no_md_entries", formatFunctionName("NoMDEntries"));
assertEquals("md_entry_type_book", formatFunctionName("MD_EntryTYPEBook"));
assertEquals("cl_ord_id", formatFunctionName("ClOrdID"));
assertEquals("ab_c", formatFunctionName("aBc"));
assertEquals("ab_cd", formatFunctionName("aBcD"));
assertEquals("ab_cd", formatFunctionName("aB_cD"));
assertEquals("ab_cd", formatFunctionName("AbCd"));
}
|
@GetMapping("/findAllGroup")
@RequiresPermissions("system:meta:list")
public ShenyuAdminResult findAllGroup() {
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, metaDataService.findAllGroup());
}
|
@Test
public void testFindAllGroup() throws Exception {
final Map<String, List<MetaDataVO>> result = new HashMap<>();
String groupName = "groupName-1";
List<MetaDataVO> metaDataVOS = new ArrayList<>();
metaDataVOS.add(metaDataVO);
result.put(groupName, metaDataVOS);
given(this.metaDataService.findAllGroup()).willReturn(result);
this.mockMvc.perform(MockMvcRequestBuilders.get("/meta-data/findAllGroup"))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.QUERY_SUCCESS)))
.andExpect(jsonPath("$.data." + groupName + "[0].appName", is(metaDataVO.getAppName())))
.andReturn();
}
|
@Override
public String toString() {
return key;
}
|
@Test
public void testToString() {
NamespaceBundle bundle0 = factory.getBundle(NamespaceName.get("pulsar/use/ns1"),
Range.range(0l, BoundType.CLOSED, 0x10000000L, BoundType.OPEN));
assertEquals(bundle0.toString(), "pulsar/use/ns1/0x00000000_0x10000000");
bundle0 = factory.getBundle(NamespaceName.get("pulsar/use/ns1"),
Range.range(0x10000000l, BoundType.CLOSED, NamespaceBundles.FULL_UPPER_BOUND, BoundType.CLOSED));
assertEquals(bundle0.toString(), "pulsar/use/ns1/0x10000000_0xffffffff");
}
|
@Nullable
public static MergeType getMergeType(Map<String, String> taskConfig) {
String mergeType = taskConfig.get(MergeTask.MERGE_TYPE_KEY);
return mergeType != null ? MergeType.valueOf(mergeType.toUpperCase()) : null;
}
|
@Test
public void testGetMergeType() {
assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "concat")),
MergeType.CONCAT);
assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "Rollup")),
MergeType.ROLLUP);
assertEquals(MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "DeDuP")),
MergeType.DEDUP);
assertNull(MergeTaskUtils.getMergeType(Collections.emptyMap()));
try {
MergeTaskUtils.getMergeType(Collections.singletonMap(MergeTask.MERGE_TYPE_KEY, "unsupported"));
fail();
} catch (IllegalArgumentException e) {
// Expected
}
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldBuildLambdaFunctionWithMultipleLambdas() {
// Given:
final SingleStatementContext stmt = givenQuery("SELECT TRANSFORM_ARRAY(Col4, X => X + 5, (X,Y) => X + Y) FROM TEST1;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat(result.getSelect(), is(new Select(ImmutableList.of(
new SingleColumn(
new FunctionCall(
FunctionName.of("TRANSFORM_ARRAY"),
ImmutableList.of(
column("COL4"),
new LambdaFunctionCall(
ImmutableList.of("X"),
new ArithmeticBinaryExpression(
Operator.ADD,
new LambdaVariable("X"),
new IntegerLiteral(5))
),
new LambdaFunctionCall(
ImmutableList.of("X", "Y"),
new ArithmeticBinaryExpression(
Operator.ADD,
new LambdaVariable("X"),
new LambdaVariable("Y")
)
)
)
),
Optional.empty())
))));
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void sendVenue() {
float lat = 21.999998f, lng = 105.2f;
String title = "title", address = "addr", frsqrId = "asdfasdf", frsqrType = "frType";
Venue venue = bot.execute(new SendVenue(chatId, lat, lng, title, address)
.foursquareId(frsqrId).foursquareType(frsqrType)
).message().venue();
assertEquals(lat, venue.location().latitude(), 0f);
assertEquals(lng, venue.location().longitude(), 0f);
assertEquals(address, venue.address());
assertEquals(title, venue.title());
assertEquals(frsqrId, venue.foursquareId());
assertEquals(frsqrType, venue.foursquareType());
String ggId = "ggId", ggType = "library";
venue = bot.execute(new SendVenue(chatId, lat, lng, title, address)
.googlePlaceId(ggId).googlePlaceType(ggType)
).message().venue();
assertEquals(ggId, venue.googlePlaceId());
assertEquals(ggType, venue.googlePlaceType());
}
|
@Override
public String getName() {
if (_distinctResult == 1) {
return TransformFunctionType.IS_DISTINCT_FROM.getName();
}
return TransformFunctionType.IS_NOT_DISTINCT_FROM.getName();
}
|
@Test
public void testDistinctFromBothNull()
throws Exception {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format(_expression, INT_SV_NULL_COLUMN, INT_SV_NULL_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertEquals(transformFunction.getName(), _isDistinctFrom ? "is_distinct_from" : "is_not_distinct_from");
boolean[] expectedIntValues = new boolean[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedIntValues[i] = !_isDistinctFrom;
}
testTransformFunction(expression, expectedIntValues, _projectionBlock, _dataSourceMap);
}
|
public boolean hasExceptions() {
return !unhealthyDataDirs.isEmpty();
}
|
@Test
public void testHasExceptionsReturnsCorrectValue() {
AddBlockPoolException e = new AddBlockPoolException();
assertFalse(e.hasExceptions());
FsVolumeImpl fakeVol = mock(FsVolumeImpl.class);
ConcurrentHashMap<FsVolumeSpi, IOException> vols =
new ConcurrentHashMap<FsVolumeSpi, IOException>();
vols.put(fakeVol, new IOException("Error 1"));
e = new AddBlockPoolException(vols);
assertTrue(e.hasExceptions());
}
|
public static String getDefaultNodesPath() {
return defaultNodesPath;
}
|
@Test
void testGetDefaultNodesPath() {
String defaultVal = ParamUtil.getDefaultNodesPath();
assertEquals("serverlist", defaultVal);
String expect = "test";
ParamUtil.setDefaultNodesPath(expect);
assertEquals(expect, ParamUtil.getDefaultNodesPath());
}
|
@SuppressWarnings("varargs")
@SafeVarargs
@Udf
public final <T> T coalesce(final T first, final T... others) {
if (first != null) {
return first;
}
if (others == null) {
return null;
}
return Arrays.stream(others)
.filter(Objects::nonNull)
.findFirst()
.orElse(null);
}
|
@Test
public void shouldReturnNullForEmptyOthers() {
assertThat(udf.coalesce(null, new Double[]{}), is(nullValue()));
}
|
public ByTopicRecordTranslator<K, V> forTopic(String topic, Func<ConsumerRecord<K, V>, List<Object>> func, Fields fields) {
return forTopic(topic, new SimpleRecordTranslator<>(func, fields));
}
|
@Test
public void testTopicCollision() {
assertThrows(IllegalStateException.class, () -> {
ByTopicRecordTranslator<String, String> trans =
new ByTopicRecordTranslator<>((r) -> new Values(r.key()), new Fields("key"));
trans.forTopic("foo", (r) -> new Values(r.value()), new Fields("value"), "foo1");
trans.forTopic("foo", (r) -> new Values(r.key(), r.value()), new Fields("key", "value"), "foo2");
});
}
|
public List<PrometheusQueryResult> queryMetric(String queryString,
long startTimeMs,
long endTimeMs) throws IOException {
URI queryUri = URI.create(_prometheusEndpoint.toURI() + QUERY_RANGE_API_PATH);
HttpPost httpPost = new HttpPost(queryUri);
List<NameValuePair> data = new ArrayList<>();
data.add(new BasicNameValuePair(QUERY, queryString));
/* "start" and "end" are expected to be unix timestamp in seconds (number of seconds since the Unix epoch).
They accept values with a decimal point (up to 64 bits). The samples returned are inclusive of the "end"
timestamp provided.
*/
data.add(new BasicNameValuePair(START, String.valueOf((double) startTimeMs / SEC_TO_MS)));
data.add(new BasicNameValuePair(END, String.valueOf((double) endTimeMs / SEC_TO_MS)));
// step is expected to be in seconds, and accept values with a decimal point (up to 64 bits).
data.add(new BasicNameValuePair(STEP, String.valueOf((double) _samplingIntervalMs / SEC_TO_MS)));
httpPost.setEntity(new UrlEncodedFormEntity(data));
try (CloseableHttpResponse response = _httpClient.execute(httpPost)) {
int responseCode = response.getStatusLine().getStatusCode();
HttpEntity entity = response.getEntity();
InputStream content = entity.getContent();
String responseString = IOUtils.toString(content, StandardCharsets.UTF_8);
if (responseCode != HttpServletResponse.SC_OK) {
throw new IOException(String.format("Received non-success response code on Prometheus API HTTP call,"
+ " response code = %d, response body = %s",
responseCode, responseString));
}
PrometheusResponse prometheusResponse = GSON.fromJson(responseString, PrometheusResponse.class);
if (prometheusResponse == null) {
throw new IOException(String.format(
"No response received from Prometheus API query, response body = %s", responseString));
}
if (!SUCCESS.equals(prometheusResponse.status())) {
throw new IOException(String.format(
"Prometheus API query was not successful, response body = %s", responseString));
}
if (prometheusResponse.data() == null
|| prometheusResponse.data().result() == null) {
throw new IOException(String.format(
"Response from Prometheus HTTP API is malformed, response body = %s", responseString));
}
EntityUtils.consume(entity);
return prometheusResponse.data().result();
}
}
|
@Test(expected = IOException.class)
public void testFailureResponseWith403Code() throws Exception {
this.serverBootstrap.registerHandler("/api/v1/query_range", new HttpRequestHandler() {
@Override
public void handle(HttpRequest request, HttpResponse response, HttpContext context) {
response.setStatusCode(HttpServletResponse.SC_FORBIDDEN);
response.setEntity(new StringEntity(
"{\"status\": \"failure\", \"data\": {\"result\": []}}", StandardCharsets.UTF_8));
}
});
HttpHost httpHost = this.start();
PrometheusAdapter prometheusAdapter
= new PrometheusAdapter(this.httpclient, httpHost, SAMPLING_INTERVAL_MS);
prometheusAdapter.queryMetric(
"kafka_server_BrokerTopicMetrics_OneMinuteRate{name=\"BytesOutPerSec\",topic=\"\"}",
START_TIME_MS, END_TIME_MS);
}
|
public static LocalDateTime offset(LocalDateTime time, long number, TemporalUnit field) {
return TemporalUtil.offset(time, number, field);
}
|
@Test
public void offset() {
final LocalDateTime localDateTime = LocalDateTimeUtil.parse("2020-01-23T12:23:56");
LocalDateTime offset = LocalDateTimeUtil.offset(localDateTime, 1, ChronoUnit.DAYS);
// 非同一对象
assertNotSame(localDateTime, offset);
assertEquals("2020-01-24T12:23:56", offset.toString());
offset = LocalDateTimeUtil.offset(localDateTime, -1, ChronoUnit.DAYS);
assertEquals("2020-01-22T12:23:56", offset.toString());
}
|
@Override
public Optional<Buffer> getNextBuffer(
TieredStoragePartitionId partitionId,
TieredStorageSubpartitionId subpartitionId,
int segmentId) {
// Get current segment id and buffer index.
Tuple2<Integer, Integer> bufferIndexAndSegmentId =
currentBufferIndexAndSegmentIds
.computeIfAbsent(partitionId, ignore -> new HashMap<>())
.getOrDefault(subpartitionId, Tuple2.of(0, 0));
int currentBufferIndex = bufferIndexAndSegmentId.f0;
int currentSegmentId = bufferIndexAndSegmentId.f1;
if (segmentId != currentSegmentId) {
remoteStorageScanner.watchSegment(partitionId, subpartitionId, segmentId);
}
// Read buffer from the partition file in remote storage.
MemorySegment memorySegment = MemorySegmentFactory.allocateUnpooledSegment(bufferSizeBytes);
PartitionFileReader.ReadBufferResult readBufferResult = null;
try {
readBufferResult =
partitionFileReader.readBuffer(
partitionId,
subpartitionId,
segmentId,
currentBufferIndex,
memorySegment,
FreeingBufferRecycler.INSTANCE,
null,
null);
} catch (IOException e) {
memorySegment.free();
ExceptionUtils.rethrow(e, "Failed to read buffer from partition file.");
}
if (readBufferResult != null && !readBufferResult.getReadBuffers().isEmpty()) {
List<Buffer> readBuffers = readBufferResult.getReadBuffers();
checkState(readBuffers.size() == 1);
Buffer buffer = readBuffers.get(0);
currentBufferIndexAndSegmentIds
.get(partitionId)
.put(subpartitionId, Tuple2.of(++currentBufferIndex, segmentId));
return Optional.of(buffer);
} else {
memorySegment.free();
}
synchronized (availableSubpartitionsQueues) {
availableSubpartitionsQueues.get(partitionId).remove(subpartitionId);
}
return Optional.empty();
}
|
@Test
void testGetEmptyBuffer() {
TieredStoragePartitionId partitionId =
new TieredStoragePartitionId(new ResultPartitionID());
RemoteTierConsumerAgent remoteTierConsumerAgent =
new RemoteTierConsumerAgent(
Collections.singletonList(
new TieredStorageConsumerSpec(
0,
partitionId,
new TieredStorageInputChannelId(0),
new ResultSubpartitionIndexSet(0))),
new RemoteStorageScanner(remoteStoragePath),
new TestingPartitionFileReader.Builder().build(),
1024);
assertThat(
remoteTierConsumerAgent.getNextBuffer(
partitionId, new TieredStorageSubpartitionId(0), 0))
.isEmpty();
}
|
@Override
public AppResponse process(Flow flow, AppSessionRequest request) {
if (appSession.getRegistrationId() == null) {
return new NokResponse();
}
Map<String, String> result = digidClient.getExistingApplication(appSession.getRegistrationId());
if (result.get(lowerUnderscore(STATUS)).equals("OK")) {
return new OkResponse();
} else if (result.get(lowerUnderscore(STATUS)).equals("PENDING")) {
// switch state to require replace action
appSession.setState(State.EXISTING_APPLICATION_FOUND.name());
return new StatusResponse("PENDING");
} else {
return new NokResponse();
}
}
|
@Test
void processOKTest(){
when(digidClientMock.getExistingApplication(1337L)).thenReturn(Map.of(
lowerUnderscore(STATUS), "OK"
));
AppResponse appResponse = checkExistingApplication.process(flowMock, null);
assertTrue(appResponse instanceof OkResponse);
assertEquals("OK", ((OkResponse) appResponse).getStatus());
}
|
public static boolean equals(FlatRecordTraversalObjectNode left, FlatRecordTraversalObjectNode right) {
if (left == null && right == null) {
return true;
}
if (left == null || right == null) {
return false;
}
if (!left.getSchema().getName().equals(right.getSchema().getName())) {
return false;
}
extractCommonObjectSchema(left, right);
return compare(left, right);
}
|
@Test
public void shouldFindTwoPrimitiveSetsToBeDifferentIfContentIsDifferent() {
IntSetTypeState1 intTypeState1 = new IntSetTypeState1();
intTypeState1.id = "ID";
intTypeState1.intSet = new HashSet<>(Arrays.asList(15, 5));
writer1.reset();
mapper1.writeFlat(intTypeState1, writer1);
FlatRecord rec1 = writer1.generateFlatRecord();
IntSetTypeState2 intTypeState2 = new IntSetTypeState2();
intTypeState2.id = "ID";
intTypeState2.intSet = new HashSet<>(Arrays.asList(5, 20));
writer2.reset();
mapper2.writeFlat(intTypeState2, writer2);
FlatRecord rec2 = writer2.generateFlatRecord();
FlatRecordTraversalObjectNode leftNode = new FlatRecordTraversalObjectNode(rec1);
FlatRecordTraversalObjectNode rightNode = new FlatRecordTraversalObjectNode(rec2);
// The order of the elements in the Set should not matter
assertThat(FlatRecordTraversalObjectNodeEquality.equals(leftNode, rightNode)).isFalse();
assertThat(FlatRecordTraversalObjectNodeEquality.equals(rightNode, leftNode)).isFalse();
}
|
@Subscribe
public void onChatMessage(ChatMessage e)
{
if (e.getType() != ChatMessageType.GAMEMESSAGE && e.getType() != ChatMessageType.SPAM)
{
return;
}
CompostState compostUsed = determineCompostUsed(e.getMessage());
if (compostUsed == null)
{
return;
}
this.expirePendingActions();
pendingCompostActions.values()
.stream()
.filter(this::playerIsBesidePatch)
.findFirst()
.ifPresent(pc ->
{
setCompostState(pc.getFarmingPatch(), compostUsed);
pendingCompostActions.remove(pc.getFarmingPatch());
});
}
|
@Test
public void onChatMessage_handlesBucketUseMessages()
{
ChatMessage chatEvent = mock(ChatMessage.class);
when(chatEvent.getType()).thenReturn(ChatMessageType.SPAM);
when(chatEvent.getMessage()).thenReturn("You treat the herb patch with compost.");
compostTracker.pendingCompostActions.put(farmingPatch, new CompostTracker.PendingCompost(Instant.MAX, worldPoint, farmingPatch));
compostTracker.onChatMessage(chatEvent);
verify(configManager).setRSProfileConfiguration("timetracking", "MOCK.compost", CompostState.COMPOST);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.