focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
@Override
public Num calculate(BarSeries series, Position position) {
Num profitLossRatio = profitLossRatioCriterion.calculate(series, position);
Num numberOfPositions = numberOfPositionsCriterion.calculate(series, position);
Num numberOfWinningPositions = numberOfWinningPositionsCriterion.calculate(series, position);
return calculate(series, profitLossRatio, numberOfWinningPositions, numberOfPositions);
} | @Test
public void calculateProfitWithMixedShortPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 160, 200, 120, 100, 80, 60);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(1, series),
Trade.sellAt(2, series), Trade.buyAt(5, series));
AnalysisCriterion avgLoss = getCriterion();
assertNumEquals(0.25, avgLoss.calculate(series, tradingRecord));
} |
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) {
Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType);
return BINARY_PROTOCOL_VALUES.get(binaryColumnType);
} | @Test
void assertGetBinaryProtocolValueWithMySQLTypeDate() {
assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.DATE), instanceOf(MySQLDateBinaryProtocolValue.class));
} |
synchronized void markSuspectBlock(String storageId, ExtendedBlock block) {
if (!isEnabled()) {
LOG.debug("Not scanning suspicious block {} on {}, because the block " +
"scanner is disabled.", block, storageId);
return;
}
VolumeScanner scanner = scanners.get(storageId);
if (scanner == null) {
// This could happen if the volume is in the process of being removed.
// The removal process shuts down the VolumeScanner, but the volume
// object stays around as long as there are references to it (which
// should not be that long.)
LOG.info("Not scanning suspicious block {} on {}, because there is no " +
"volume scanner for that storageId.", block, storageId);
return;
}
scanner.markSuspectBlock(block);
} | @Test(timeout=120000)
public void testMarkSuspectBlock() throws Exception {
Configuration conf = new Configuration();
// Set a really long scan period.
conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
conf.set(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
TestScanResultHandler.class.getName());
conf.setLong(INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS, 0L);
final TestContext ctx = new TestContext(conf, 1);
final int NUM_EXPECTED_BLOCKS = 10;
ctx.createFiles(0, NUM_EXPECTED_BLOCKS, 1);
final TestScanResultHandler.Info info =
TestScanResultHandler.getInfo(ctx.volumes.get(0));
String storageID = ctx.volumes.get(0).getStorageID();
synchronized (info) {
info.sem = new Semaphore(4);
info.shouldRun = true;
info.notify();
}
// Scan the first 4 blocks
LOG.info("Waiting for the first 4 blocks to be scanned.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
if (info.blocksScanned >= 4) {
LOG.info("info = {}. blockScanned has now reached 4.", info);
return true;
} else {
LOG.info("info = {}. Waiting for blockScanned to reach 4.", info);
return false;
}
}
}
}, 50, 30000);
// We should have scanned 4 blocks
synchronized (info) {
assertEquals("Expected 4 good blocks.", 4, info.goodBlocks.size());
info.goodBlocks.clear();
assertEquals("Expected 4 blocksScanned", 4, info.blocksScanned);
assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
info.blocksScanned = 0;
}
ExtendedBlock first = ctx.getFileBlock(0, 0);
ctx.datanode.getBlockScanner().markSuspectBlock(storageID, first);
// When we increment the semaphore, the TestScanResultHandler will finish
// adding the block that it was scanning previously (the 5th block).
// We increment the semaphore twice so that the handler will also
// get a chance to see the suspect block which we just requested the
// VolumeScanner to process.
info.sem.release(2);
LOG.info("Waiting for 2 more blocks to be scanned.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
if (info.blocksScanned >= 2) {
LOG.info("info = {}. blockScanned has now reached 2.", info);
return true;
} else {
LOG.info("info = {}. Waiting for blockScanned to reach 2.", info);
return false;
}
}
}
}, 50, 30000);
synchronized (info) {
assertTrue("Expected block " + first + " to have been scanned.",
info.goodBlocks.contains(first));
assertEquals(2, info.goodBlocks.size());
info.goodBlocks.clear();
assertEquals("Did not expect bad blocks.", 0, info.badBlocks.size());
assertEquals(2, info.blocksScanned);
info.blocksScanned = 0;
}
// Re-mark the same block as suspect.
ctx.datanode.getBlockScanner().markSuspectBlock(storageID, first);
info.sem.release(10);
LOG.info("Waiting for 5 more blocks to be scanned.");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
synchronized (info) {
if (info.blocksScanned >= 5) {
LOG.info("info = {}. blockScanned has now reached 5.", info);
return true;
} else {
LOG.info("info = {}. Waiting for blockScanned to reach 5.", info);
return false;
}
}
}
}, 50, 30000);
synchronized (info) {
assertEquals(5, info.goodBlocks.size());
assertEquals(0, info.badBlocks.size());
assertEquals(5, info.blocksScanned);
// We should not have rescanned the "suspect block",
// because it was recently rescanned by the suspect block system.
// This is a test of the "suspect block" rate limiting.
Assert.assertFalse("We should not " +
"have rescanned block " + first + ", because it should have been " +
"in recentSuspectBlocks.", info.goodBlocks.contains(first));
info.blocksScanned = 0;
}
ctx.close();
} |
@Override
public void upgrade() {
if (clusterConfigService.get(MigrationCompleted.class) != null) {
LOG.debug("Migration already completed.");
return;
}
final Set<String> viewIds = new HashSet<>();
final FindIterable<Document> documents = viewsCollection.find();
boolean viewMigrated;
for (final Document view : documents) {
viewMigrated = false;
final Document states = view.get("state", Document.class);
for (Map.Entry<String, Object> obj : states.entrySet()) {
final Document state = (Document) obj.getValue();
if (state.get("widgets") instanceof List) {
@SuppressWarnings("unchecked")
final List<Document> widgets = (List) state.get("widgets");
for (final Document widget : widgets) {
final String type = widget.getString("type");
if (type.equals("aggregation")) {
final Document config = widget.get("config", Document.class);
final Document formatSettings = config.get("formatting_settings", Document.class);
if (formatSettings == null) {
continue;
}
final Object charColorsObj = formatSettings.get("chart_colors");
if (charColorsObj == null) {
continue;
}
viewMigrated = true;
@SuppressWarnings({"unchecked", "rawtypes"})
final Map<String, String> chartColors =
(Map) charColorsObj;
List<Document> chartColorSettings = chartColors.entrySet().stream().map(entry -> {
final Document chartColorFieldSetting = new Document();
chartColorFieldSetting.put("field_name", entry.getKey());
chartColorFieldSetting.put("chart_color", entry.getValue());
return chartColorFieldSetting;
}).collect(Collectors.toList());
formatSettings.put("chart_colors", chartColorSettings);
config.put("formatting_settings", formatSettings);
widget.put("config", config);
}
}
if (viewMigrated) {
state.put("widgets", widgets);
}
}
}
if (viewMigrated) {
viewsCollection.updateOne(new BasicDBObject("_id", view.getObjectId("_id")), new Document("$set", view));
final String viewId = view.getObjectId("_id").toString();
viewIds.add(viewId);
}
}
LOG.info("Migration completed. {} views where migrated.", viewIds.size());
clusterConfigService.write(V20190127111728_MigrateWidgetFormatSettings.MigrationCompleted.create(
viewIds.size(), viewIds));
} | @Test
@MongoDBFixtures("V20190127111728_MigrateWidgetFormatSettings_without_color_mapping.json")
public void testMigrationWithoutChartColorMapping() {
final BasicDBObject dbQuery1 = new BasicDBObject();
dbQuery1.put("_id", new ObjectId("5e2ee372b22d7970576b2eb3"));
final MongoCollection<Document> collection = mongoDB.mongoConnection()
.getMongoDatabase()
.getCollection("views");
migration.upgrade();
final FindIterable<Document> views = collection.find(dbQuery1);
final Document view1 = views.first();
@SuppressWarnings("unchecked")
final List<Document> widgets1 = (List) view1.get("state", Document.class).get("2c67cc0f-c62e-47c1-8b70-e3198925e6bc", Document.class).get("widgets");
assertThat(widgets1.size()).isEqualTo(2);
Set<Document> aggregationWidgets =widgets1.stream().filter(w -> w.getString("type")
.equals("aggregation")).collect(Collectors.toSet());
assertThat(aggregationWidgets.size()).isEqualTo(1);
final Document aggregationWidget = aggregationWidgets.iterator().next();
final Document config = aggregationWidget.get("config", Document.class);
final Document formattingSettings = config.get("formatting_settings", Document.class);
assertThat(formattingSettings.get("chart_colors")).isNull();
} |
@Override
public Optional<ClusterHealthStatus> getClusterHealthStatus() {
try {
ClusterHealthResponse healthResponse = getRestHighLevelClient().cluster()
.health(new ClusterHealthRequest().waitForYellowStatus().timeout(timeValueSeconds(30)), RequestOptions.DEFAULT);
return Optional.of(healthResponse.getStatus());
} catch (IOException e) {
LOG.trace("Failed to check health status ", e);
return Optional.empty();
}
} | @Test
public void should_return_status() {
mockServerResponse(200, JSON_SUCCESS_RESPONSE);
assertThat(underTest.getClusterHealthStatus())
.hasValue(ClusterHealthStatus.YELLOW);
} |
@Override
public void execute() throws MojoExecutionException {
if (skip) {
getLog().info("Skipped execution of ActiveMQ Broker");
return;
}
addActiveMQSystemProperties();
getLog().info("Loading broker configUri: " + configUri);
if (this.xBeanFileResolver.isXBeanFile(configUri)) {
getLog().debug("configUri before transformation: " + configUri);
configUri = this.xBeanFileResolver.toUrlCompliantAbsolutePath(configUri);
getLog().debug("configUri after transformation: " + configUri);
}
this.useBrokerManager().start(fork, configUri);
//
// Register the transport connector URIs in the Maven project.
//
this.registerTransportConnectorUris();
getLog().info("Started the ActiveMQ Broker");
} | @Test
public void testExceptionOnGetPublishableConnectString () throws Exception {
TransportConnector mockTransportConnector = Mockito.mock(TransportConnector.class);
Mockito.when(mockTransportConnector.toString()).thenReturn("x-conn-x");
Mockito.when(mockTransportConnector.getPublishableConnectString()).thenThrow(testException);
this.transportConnectorList.add(mockTransportConnector);
this.startBrokerMojo.execute();
Mockito.verify(this.mockMavenLog).warn("error on obtaining broker connector uri; connector=x-conn-x",
this.testException);
} |
public final void hasSize(int expectedSize) {
checkArgument(expectedSize >= 0, "expectedSize(%s) must be >= 0", expectedSize);
check("size()").that(checkNotNull(actual).size()).isEqualTo(expectedSize);
} | @Test
public void hasSizeNegative() {
try {
assertThat(ImmutableTable.of(1, 2, 3)).hasSize(-1);
fail();
} catch (IllegalArgumentException expected) {
}
} |
private Mono<ServerResponse> search(ServerRequest request) {
return Mono.fromSupplier(
() -> new SearchParam(request.queryParams()))
.map(param -> {
var option = new SearchOption();
option.setIncludeTypes(List.of(PostHaloDocumentsProvider.POST_DOCUMENT_TYPE));
option.setKeyword(param.getKeyword());
option.setLimit(param.getLimit());
option.setHighlightPreTag(param.getHighlightPreTag());
option.setHighlightPostTag(param.getHighlightPostTag());
return option;
})
.flatMap(this::performSearch)
.flatMap(result -> ServerResponse.ok().bodyValue(result));
} | @Test
void shouldBeCompatibleWithOldSearchApi() {
var searchResult = new SearchResult();
when(searchService.search(any(SearchOption.class)))
.thenReturn(Mono.just(searchResult));
client.get().uri(uriBuilder -> uriBuilder.path("/indices/post")
.queryParam("keyword", "halo")
.build())
.exchange()
.expectStatus().isOk()
.expectBody(SearchResult.class)
.isEqualTo(searchResult);
verify(searchService).search(assertArg(o -> {
assertEquals("halo", o.getKeyword());
// make sure the filters are overwritten
assertTrue(o.getFilterExposed());
assertTrue(o.getFilterPublished());
assertFalse(o.getFilterRecycled());
}));
} |
public void afterDelivery() throws ResourceException {
LOG.trace("Invoking MessageEndpoint.afterDelivery()");
state.afterDelivery(this);
} | @Test(timeout = 60000)
public void testAfterDeliveryFailure() throws Exception {
setupBeforeDeliverySuccessful();
setupOnMessageSuccessful();
context.checking(new Expectations() {{
oneOf (mockEndpointAndListener).afterDelivery(); will(throwException(new ResourceException()));
}});
setupExpectRelease();
doBeforeDeliveryExpectSuccess();
doOnMessageExpectSuccess();
try {
endpointProxy.afterDelivery();
fail("An exception should have been thrown");
} catch (Exception e) {
assertTrue(true);
}
doFullyDeadCheck();
} |
@Override
public int getStartIndex() {
if (null == owner) {
return tableName.getStartIndex();
}
return owner.getOwner().isPresent() ? owner.getOwner().get().getStartIndex() : owner.getStartIndex();
} | @Test
void assertGetStartIndexWithoutOwner() {
SimpleTableSegment tableSegment = new SimpleTableSegment(new TableNameSegment(10, 13, new IdentifierValue("tbl")));
assertThat(tableSegment.getStartIndex(), is(10));
} |
public static <R> R callInstanceMethod(
final Object instance, final String methodName, ClassParameter<?>... classParameters) {
perfStatsCollector.incrementCount(
String.format(
"ReflectionHelpers.callInstanceMethod-%s_%s",
instance.getClass().getName(), methodName));
try {
final Class<?>[] classes = ClassParameter.getClasses(classParameters);
final Object[] values = ClassParameter.getValues(classParameters);
return traverseClassHierarchy(
instance.getClass(),
NoSuchMethodException.class,
traversalClass -> {
Method declaredMethod = traversalClass.getDeclaredMethod(methodName, classes);
declaredMethod.setAccessible(true);
return (R) declaredMethod.invoke(instance, values);
});
} catch (InvocationTargetException e) {
if (e.getTargetException() instanceof RuntimeException) {
throw (RuntimeException) e.getTargetException();
}
if (e.getTargetException() instanceof Error) {
throw (Error) e.getTargetException();
}
throw new RuntimeException(e.getTargetException());
} catch (Exception e) {
throw new RuntimeException(e);
}
} | @Test
public void callInstanceMethodReflectively_rethrowsError() {
ExampleDescendant example = new ExampleDescendant();
try {
ReflectionHelpers.callInstanceMethod(example, "throwError");
fail("Expected exception not thrown");
} catch (RuntimeException e) {
throw new RuntimeException("Incorrect exception thrown", e);
} catch (TestError e) {
}
} |
@NonNull
public List<FilePath> list() throws IOException, InterruptedException {
return list((FileFilter) null);
} | @Test public void listWithDefaultExcludes() throws Exception {
File baseDir = temp.getRoot();
final Set<FilePath> expected = new HashSet<>();
expected.add(createFilePath(baseDir, "top", "sub", "backup~"));
expected.add(createFilePath(baseDir, "top", "CVS", "somefile,v"));
expected.add(createFilePath(baseDir, "top", ".git", "config"));
// none of the files are included by default (default includes true)
assertEquals(0, new FilePath(baseDir).list("**", "").length);
final FilePath[] result = new FilePath(baseDir).list("**", "", false);
assertEquals(expected, new HashSet<>(Arrays.asList(result)));
} |
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
} | @Test
public void testSpdySynReplyFrameHeaderBlock() throws Exception {
short type = 2;
byte flags = 0;
int length = 4;
int headerBlockLength = 1024;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length + headerBlockLength);
encodeControlFrameHeader(buf, type, flags, length + headerBlockLength);
buf.writeInt(streamId);
ByteBuf headerBlock = Unpooled.buffer(headerBlockLength);
for (int i = 0; i < 256; i ++) {
headerBlock.writeInt(RANDOM.nextInt());
}
decoder.decode(buf);
decoder.decode(headerBlock);
verify(delegate).readSynReplyFrame(streamId, false);
verify(delegate).readHeaderBlock(headerBlock.slice(0, headerBlock.writerIndex()));
verify(delegate).readHeaderBlockEnd();
assertFalse(buf.isReadable());
assertFalse(headerBlock.isReadable());
buf.release();
headerBlock.release();
} |
@Override
public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
try {
final Set<? extends AuthenticationRequest> options = new SwiftAuthenticationService().getRequest(host, prompt);
for(Iterator<? extends AuthenticationRequest> iter = options.iterator(); iter.hasNext(); ) {
try {
final AuthenticationRequest auth = iter.next();
if(log.isInfoEnabled()) {
log.info(String.format("Attempt authentication with %s", auth));
}
client.authenticate(auth);
break;
}
catch(GenericException failure) {
final BackgroundException reason = new SwiftExceptionMappingService().map(failure);
if(reason instanceof LoginFailureException
|| reason instanceof AccessDeniedException
|| reason instanceof InteroperabilityException) {
if(!iter.hasNext()) {
throw failure;
}
}
else {
throw failure;
}
}
cancel.verify();
}
}
catch(GenericException e) {
throw new SwiftExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
} | @Test(expected = LoginFailureException.class)
public void testLoginFailure() throws Exception {
final Host host = new Host(new SwiftProtocol(), "identity.api.rackspacecloud.com", new Credentials(
"a", "s"
));
assertNotNull(session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()));
assertTrue(session.isConnected());
assertNotNull(session.getClient());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() != 2) {
onInvalidDataReceived(device, data);
return;
}
final int value = data.getIntValue(Data.FORMAT_UINT16_LE, 0);
final GlucoseFeatures features = new GlucoseFeatures(value);
onGlucoseFeaturesReceived(device, features);
} | @Test
public void onInvalidDataReceived() {
final Data data = new Data();
callback.onDataReceived(null, data);
assertTrue(invalidData);
} |
@Override
@SneakyThrows(TransactionException.class)
public void commit(final boolean rollbackOnly) {
checkSeataATEnabled();
try {
SeataTransactionHolder.get().commit();
} finally {
SeataTransactionHolder.clear();
RootContext.unbind();
SeataXIDContext.remove();
}
} | @Test
void assertCommit() {
SeataTransactionHolder.set(GlobalTransactionContext.getCurrentOrCreate());
setXID("testXID");
seataTransactionManager.commit(false);
assertResult(GlobalCommitRequest.class, GlobalCommitResponse.class);
} |
public ResT receive(long timeoutMs) throws IOException {
if (mCompleted) {
return null;
}
if (mCanceled) {
throw new CancelledException(formatErrorMessage("Stream is already canceled."));
}
long startMs = System.currentTimeMillis();
while (true) {
long waitedForMs = System.currentTimeMillis() - startMs;
if (waitedForMs >= timeoutMs) {
throw new DeadlineExceededException(formatErrorMessage(
"Timeout waiting for response after %dms. clientClosed: %s clientCancelled: %s "
+ "serverClosed: %s", timeoutMs, mClosed, mCanceled, mClosedFromRemote));
}
// Wait for a minute max
long waitMs = Math.min(timeoutMs - waitedForMs, Constants.MINUTE_MS);
try {
Object response = mResponses.poll(waitMs, TimeUnit.MILLISECONDS);
if (response == null) {
checkError(); // The stream could have errored while we were waiting
// Log a warning before looping again
LOG.warn("Client did not receive message from stream, will wait again. totalWaitMs: {} "
+ "clientClosed: {} clientCancelled: {} serverClosed: {} description: {}",
System.currentTimeMillis() - startMs, mClosed, mCanceled, mClosedFromRemote,
mDescription);
continue;
}
if (response == mResponseObserver) {
mCompleted = true;
return null;
}
checkError();
return (ResT) response;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CancelledException(
formatErrorMessage("Interrupted while waiting for response."), e);
}
}
} | @Test
public void onCompleted() throws Exception {
mResponseObserver.onCompleted();
WriteResponse actualResponse = mStream.receive(TIMEOUT);
assertNull(actualResponse);
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testUpdatePositionWithLastRecordMissingFromBatch() {
buildFetcher();
MemoryRecords records = MemoryRecords.withRecords(Compression.NONE,
new SimpleRecord("0".getBytes(), "v".getBytes()),
new SimpleRecord("1".getBytes(), "v".getBytes()),
new SimpleRecord("2".getBytes(), "v".getBytes()),
new SimpleRecord(null, "value".getBytes()));
// Remove the last record to simulate compaction
MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter(0, 0) {
@Override
protected BatchRetentionResult checkBatchRetention(RecordBatch batch) {
return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false);
}
@Override
protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) {
return record.key() != null;
}
}, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
result.outputBuffer().flip();
MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer());
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, compactedRecords, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetchRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(3, fetchedRecords.size());
for (int i = 0; i < 3; i++) {
assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key()));
}
// The next offset should point to the next batch
assertEquals(4L, subscriptions.position(tp0).offset);
} |
@Override
public void updateLevel(MemberLevelUpdateReqVO updateReqVO) {
// 校验存在
validateLevelExists(updateReqVO.getId());
// 校验配置是否有效
validateConfigValid(updateReqVO.getId(), updateReqVO.getName(), updateReqVO.getLevel(), updateReqVO.getExperience());
// 更新
MemberLevelDO updateObj = MemberLevelConvert.INSTANCE.convert(updateReqVO);
memberLevelMapper.updateById(updateObj);
} | @Test
public void testUpdateLevel_notExists() {
// 准备参数
MemberLevelUpdateReqVO reqVO = randomPojo(MemberLevelUpdateReqVO.class);
// 调用, 并断言异常
assertServiceException(() -> levelService.updateLevel(reqVO), LEVEL_NOT_EXISTS);
} |
public static HttpAction buildUnauthenticatedAction(final WebContext context) {
val hasHeader = context.getResponseHeader(HttpConstants.AUTHENTICATE_HEADER).isPresent();
if (alwaysUse401ForUnauthenticated) {
// add the WWW-Authenticate header to be compliant with the HTTP spec if it does not already exist
if (!hasHeader) {
context.setResponseHeader(HttpConstants.AUTHENTICATE_HEADER, HttpConstants.BEARER_HEADER_PREFIX + "realm=\"pac4j\"");
}
return new UnauthorizedAction();
} else {
if (hasHeader) {
return new UnauthorizedAction();
} else {
return new ForbiddenAction();
}
}
} | @Test
public void testBuildUnauthenticated401WithoutHeader() {
final WebContext context = MockWebContext.create();
val action = HttpActionHelper.buildUnauthenticatedAction(context);
assertTrue(action instanceof UnauthorizedAction);
assertEquals("Bearer realm=\"pac4j\"", context.getResponseHeader(HttpConstants.AUTHENTICATE_HEADER).get());
} |
@Override
public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return join(otherStream, toValueJoinerWithKey(joiner), windows);
} | @Test
public void shouldNotAllowNullValueJoinerOnTableJoin() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.join(testTable, (ValueJoiner<? super String, ? super String, ?>) null));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
} |
@Override
public void collectLong(MetricDescriptor descriptor, long value) {
for (MetricsCollector collector : collectors) {
collector.collectLong(descriptor, value);
}
} | @Test
public void testCollectLong() {
compositeCollector.collectLong(metricsDescriptor, 42);
verify(collectorMock1).collectLong(metricsDescriptor, 42);
verify(collectorMock2).collectLong(metricsDescriptor, 42);
} |
public void initCustomSigners() {
String[] customSigners = ownerConf.getTrimmedStrings(CUSTOM_SIGNERS);
if (customSigners == null || customSigners.length == 0) {
// No custom signers specified, nothing to do.
LOG.debug("No custom signers specified");
return;
}
for (String customSigner : customSigners) {
String[] parts = customSigner.split(":");
if (!(parts.length == 1 || parts.length == 2 || parts.length == 3)) {
String message = "Invalid format (Expected name, name:SignerClass,"
+ " name:SignerClass:SignerInitializerClass)"
+ " for CustomSigner: [" + customSigner + "]";
LOG.error(message);
throw new IllegalArgumentException(message);
}
if (parts.length == 1) {
// Nothing to do. Trying to use a pre-defined Signer
} else {
// Register any custom Signer
maybeRegisterSigner(parts[0], parts[1], ownerConf);
// If an initializer is specified, take care of instantiating it and
// setting it up
if (parts.length == 3) {
Class<? extends AwsSignerInitializer> clazz = null;
try {
clazz = (Class<? extends AwsSignerInitializer>) ownerConf
.getClassByName(parts[2]);
} catch (ClassNotFoundException e) {
throw new RuntimeException(String.format(
"SignerInitializer class" + " [%s] not found for signer [%s]",
parts[2], parts[0]), e);
}
LOG.debug("Creating signer initializer: [{}] for signer: [{}]",
parts[2], parts[0]);
AwsSignerInitializer signerInitializer = ReflectionUtils
.newInstance(clazz, null);
initializers.add(signerInitializer);
signerInitializer
.registerStore(bucketName, ownerConf, delegationTokenProvider,
ownerUgi);
}
}
}
} | @Test
public void testMultipleCustomSignerInitialization() throws IOException {
Configuration config = new Configuration();
config.set(CUSTOM_SIGNERS,
"testsigner1:" + SignerForTest1.class.getName() + "," + "testsigner2:"
+ SignerForTest2.class.getName());
SignerManager signerManager = new SignerManager("dontcare", null, config,
UserGroupInformation.getCurrentUser());
signerManager.initCustomSigners();
Signer s1 = SignerFactory.createSigner("testsigner1", null);
s1.sign(null, null);
Assertions.assertThat(SignerForTest1.initialized)
.as(SignerForTest1.class.getName() + " not initialized")
.isEqualTo(true);
Signer s2 = SignerFactory.createSigner("testsigner2", null);
s2.sign(null, null);
Assertions.assertThat(SignerForTest2.initialized)
.as(SignerForTest2.class.getName() + " not initialized")
.isEqualTo(true);
} |
public static void validateValue(Schema schema, Object value) {
validateValue(null, schema, value);
} | @Test
public void testValidateValueMismatchFloat() {
assertThrows(DataException.class,
() -> ConnectSchema.validateValue(Schema.FLOAT32_SCHEMA, 1.0));
} |
@Udf
public <T> List<T> mapValues(final Map<String, T> input) {
if (input == null) {
return null;
}
return Lists.newArrayList(input.values());
} | @Test
public void shouldGetKeys() {
final Map<String, String> input = new HashMap<>();
input.put("foo", "spam");
input.put("bar", "baloney");
assertThat(udf.mapValues(input), containsInAnyOrder("spam", "baloney"));
} |
static void populateOutputFields(final PMML4Result toUpdate,
final ProcessingDTO processingDTO) {
logger.debug("populateOutputFields {} {}", toUpdate, processingDTO);
for (KiePMMLOutputField outputField : processingDTO.getOutputFields()) {
Object variableValue = outputField.evaluate(processingDTO);
if (variableValue != null) {
String variableName = outputField.getName();
toUpdate.addResultVariable(variableName, variableValue);
processingDTO.addKiePMMLNameValue(new KiePMMLNameValue(variableName, variableValue));
}
}
} | @Test
void populateTransformedOutputFieldWithFieldRef() {
final String mapMissingTo = "mapMissingTo";
final String variableName = "variableName";
final Object variableValue = 543.65434;
KiePMMLFieldRef kiePMMLFieldRef = new KiePMMLFieldRef(variableName, Collections.emptyList(), mapMissingTo);
KiePMMLOutputField outputField = KiePMMLOutputField.builder(OUTPUT_NAME, Collections.emptyList())
.withResultFeature(RESULT_FEATURE.TRANSFORMED_VALUE)
.withKiePMMLExpression(kiePMMLFieldRef)
.build();
KiePMMLTestingModel kiePMMLModel = testingModelBuilder(outputField).build();
ProcessingDTO processingDTO = buildProcessingDTOWithNameValues(kiePMMLModel, new KiePMMLNameValue(variableName, variableValue));
PMML4Result toUpdate = new PMML4Result();
PostProcess.populateOutputFields(toUpdate, processingDTO);
assertThat(toUpdate.getResultVariables()).isNotEmpty();
assertThat(toUpdate.getResultVariables()).containsKey(OUTPUT_NAME);
assertThat(toUpdate.getResultVariables().get(OUTPUT_NAME)).isEqualTo(variableValue);
} |
@Override
public void startCaching() {
this.delegate = new CachingRuleFinder(dbClient, ruleDescriptionFormatter);
} | @Test
public void startCaching_sets_caching_delegate() {
underTest.startCaching();
assertThat(underTest.delegate).isInstanceOf(CachingRuleFinder.class);
} |
@Override
public String getDestination(Exchange exchange, Endpoint endpoint) {
String topic = exchange.getIn().getHeader(OVERRIDE_TOPIC, String.class);
if (topic == null) {
topic = stripSchemeAndOptions(endpoint);
}
return topic;
} | @Test
public void testGetDestinationNoHeaderTopic() {
Exchange exchange = Mockito.mock(Exchange.class);
Message message = Mockito.mock(Message.class);
Endpoint endpoint = Mockito.mock(Endpoint.class);
Mockito.when(exchange.getIn()).thenReturn(message);
Mockito.when(endpoint.getEndpointUri())
.thenReturn("kafka:test?brokers=localhost:9092&consumersCount=1");
AbstractMessagingSpanDecorator decorator = new KafkaSpanDecorator();
assertEquals("test", decorator.getDestination(exchange, endpoint));
} |
@Udf
public String extractParam(
@UdfParameter(description = "a valid URL") final String input,
@UdfParameter(description = "the parameter key") final String paramToFind) {
final String query = UrlParser.extract(input, URI::getQuery);
if (query == null) {
return null;
}
for (final String param : PARAM_SPLITTER.split(query)) {
final List<String> kvParam = KV_SPLITTER.splitToList(param);
if (kvParam.size() == 1 && kvParam.get(0).equals(paramToFind)) {
return "";
} else if (kvParam.size() == 2 && kvParam.get(0).equals(paramToFind)) {
return kvParam.get(1);
}
}
return null;
} | @Test
public void shouldThrowExceptionForMalformedURL() {
// When:
final KsqlException e = assertThrows(
KsqlException.class,
() -> extractUdf.extractParam("http://257.1/bogus/[url", "foo bar")
);
// Given:
assertThat(e.getMessage(), containsString("URL input has invalid syntax: http://257.1/bogus/[url"));
} |
@Override
public XHTMLExtension parse(XmlPullParser parser, int initialDepth, XmlEnvironment xmlEnvironment) throws IOException, XmlPullParserException {
XHTMLExtension xhtmlExtension = new XHTMLExtension();
while (true) {
XmlPullParser.Event eventType = parser.getEventType();
if (eventType == XmlPullParser.Event.START_ELEMENT) {
String name = parser.getName();
if (name.equals(Message.BODY)) {
xhtmlExtension.addBody(PacketParserUtils.parseElement(parser));
}
} else if (eventType == XmlPullParser.Event.END_ELEMENT) {
if (parser.getDepth() == initialDepth) {
return xhtmlExtension;
}
}
parser.next();
}
} | @Test
public void parsesWell() throws IOException, XmlPullParserException {
InputStream inputStream = getClass().getResourceAsStream(XHTML_EXTENSION_SAMPLE_RESOURCE_NAME);
XmlPullParser parser = PacketParserUtils.getParserFor(inputStream);
XHTMLExtensionProvider provider = new XHTMLExtensionProvider();
ExtensionElement extension = provider.parse(parser, parser.getDepth(), null);
assertThat(extension, instanceOf(XHTMLExtension.class));
XHTMLExtension attachmentsInfo = (XHTMLExtension) extension;
assertThat(sampleXhtml(), equalsCharSequence(attachmentsInfo.getBodies().get(0)));
} |
@Nullable
public static PipelineBreakerResult executePipelineBreakers(OpChainSchedulerService scheduler,
MailboxService mailboxService, WorkerMetadata workerMetadata, StagePlan stagePlan,
Map<String, String> opChainMetadata, long requestId, long deadlineMs) {
PipelineBreakerContext pipelineBreakerContext = new PipelineBreakerContext();
PipelineBreakerVisitor.visitPlanRoot(stagePlan.getRootNode(), pipelineBreakerContext);
if (!pipelineBreakerContext.getPipelineBreakerMap().isEmpty()) {
try {
// TODO: This PlanRequestContext needs to indicate it is a pre-stage opChain and only listens to pre-stage
// OpChain receive-mail callbacks.
// see also: MailboxIdUtils TODOs, de-couple mailbox id from query information
OpChainExecutionContext opChainExecutionContext =
new OpChainExecutionContext(mailboxService, requestId, deadlineMs, opChainMetadata,
stagePlan.getStageMetadata(), workerMetadata, null);
return execute(scheduler, pipelineBreakerContext, opChainExecutionContext);
} catch (Exception e) {
LOGGER.error("Caught exception executing pipeline breaker for request: {}, stage: {}", requestId,
stagePlan.getStageMetadata().getStageId(), e);
return new PipelineBreakerResult(pipelineBreakerContext.getNodeIdMap(), Collections.emptyMap(),
TransferableBlockUtils.getErrorTransferableBlock(e), null);
}
} else {
return null;
}
} | @Test
public void shouldReturnEmptyBlockWhenPBExecuteWithIncorrectMailboxNode() {
MailboxReceiveNode incorrectlyConfiguredMailboxNode = getPBReceiveNode(3);
StagePlan stagePlan = new StagePlan(incorrectlyConfiguredMailboxNode, _stageMetadata);
// when
PipelineBreakerResult pipelineBreakerResult =
PipelineBreakerExecutor.executePipelineBreakers(_scheduler, _mailboxService, _workerMetadata, stagePlan,
ImmutableMap.of(), 0, Long.MAX_VALUE);
// then
// should return empty block list
Assert.assertNotNull(pipelineBreakerResult);
Assert.assertNull(pipelineBreakerResult.getErrorBlock());
Assert.assertEquals(pipelineBreakerResult.getResultMap().size(), 1);
List<TransferableBlock> resultBlocks = pipelineBreakerResult.getResultMap().values().iterator().next();
Assert.assertEquals(resultBlocks.size(), 0);
Assert.assertNotNull(pipelineBreakerResult.getStageQueryStats());
} |
@Override
public LogicalSchema getSchema() {
return getSource().getSchema();
} | @Test
public void shouldExtractConstraintForSpecialCol_tableScan() {
// Given:
when(plannerOptions.getTableScansEnabled()).thenReturn(true);
when(source.getSchema()).thenReturn(INPUT_SCHEMA);
final Expression expression = new ComparisonExpression(
Type.EQUAL,
new UnqualifiedColumnReferenceExp(ColumnName.of("WINDOWSTART")),
new IntegerLiteral(1234)
);
// Then:
expectTableScan(expression, true);
} |
protected Archiver( String id, int maxWorkQueueSize, Duration maxPurgeInterval, Duration gracePeriod )
{
if ( maxWorkQueueSize < 1 )
{
throw new IllegalArgumentException( "Argument 'maxWorkQueueSize' must be a positive integer." );
}
if ( gracePeriod.compareTo( maxPurgeInterval ) > 0 )
{
throw new IllegalArgumentException( "Value for argument 'gracePeriod' cannot be larger than 'maxPurgeInterval'." );
}
this.id = id;
this.maxWorkQueueSize = maxWorkQueueSize;
this.maxPurgeInterval = maxPurgeInterval;
this.gracePeriod = gracePeriod;
} | @Test
public void testArchiver() throws Exception
{
// Setup fixture.
final int maxWorkQueueSize = 100;
final Duration maxPurgeInterval = Duration.ofMillis( 5000 );
final Duration gracePeriod = Duration.ofMillis( 50 );
final DummyArchiver archiver = new DummyArchiver( "test", maxWorkQueueSize, maxPurgeInterval, gracePeriod );
final Thread thread = new Thread( archiver );
try
{
// Execute system under test.
thread.start();
archiver.archive( 1 );
// Verify result.
waitUntilArchivingIsDone( archiver, 1 );
assertFalse( archiver.store.isEmpty() );
final Duration timeUntilLogged = Duration.between( archiver.getStarted(), archiver.store.get( 1 ) );
assertTrue( timeUntilLogged.compareTo( gracePeriod ) >= 0 );
assertTrue( timeUntilLogged.compareTo( maxPurgeInterval ) < 0 ); // this needs not be entirely true (due to garbage collection, etc), but is a fair assumption.
}
finally
{
// Teardown fixture.
archiver.stop();
}
} |
@Nonnull
@Override
public Sketch<IntegerSummary> getResult() {
return unionAll();
} | @Test
public void testUnionWithEmptyInput() {
TupleIntSketchAccumulator accumulator = new TupleIntSketchAccumulator(_setOps, _nominalEntries, 3);
TupleIntSketchAccumulator emptyAccumulator = new TupleIntSketchAccumulator(_setOps, _nominalEntries, 3);
accumulator.merge(emptyAccumulator);
Assert.assertTrue(accumulator.isEmpty());
Assert.assertEquals(accumulator.getResult().getEstimate(), 0.0);
} |
public StarMgrJournal(Journal journal) {
this.journal = journal;
} | @Test
public void testStarMgrJournal() {
new MockUp<Journal>() {
@Mock
public void write(DataOutput out) throws IOException {
}
};
new MockUp<EditLog>() {
@Mock
public void logStarMgrOperation(StarMgrJournal journal) {
}
};
StarMgrJournal starMgrJournal = new StarMgrJournal(journal);
try {
starMgrJournal.write(null);
} catch (IOException e) {
}
Assert.assertEquals(journal, starMgrJournal.getJournal());
editLog.logStarMgrOperation(starMgrJournal);
} |
public void setCluster(String cluster) {
this.cluster = cluster;
} | @Test
void testSerialize() throws JsonProcessingException {
ServiceQueryRequest request = new ServiceQueryRequest(NAMESPACE, SERVICE, GROUP);
request.setCluster(Constants.DEFAULT_CLUSTER_NAME);
String json = mapper.writeValueAsString(request);
checkSerializeBasedInfo(json);
assertTrue(json.contains("\"cluster\":\"" + Constants.DEFAULT_CLUSTER_NAME + "\""));
assertTrue(json.contains("\"healthyOnly\":false"));
assertTrue(json.contains("\"udpPort\":0"));
} |
@Override
@Deprecated
public void showUpX5WebView(Object x5WebView, JSONObject properties, boolean isSupportJellyBean, boolean enableVerify) {
} | @Test
public void testShowUpX5WebView() {
WebView webView = new WebView(mApplication);
mSensorsAPI.showUpX5WebView(webView, false);
} |
@Override
public void updateSubnet(Subnet osSubnet) {
checkNotNull(osSubnet, ERR_NULL_SUBNET);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getId()), ERR_NULL_SUBNET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getNetworkId()), ERR_NULL_SUBNET_NET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getCidr()), ERR_NULL_SUBNET_CIDR);
osNetworkStore.updateSubnet(osSubnet);
log.info(String.format(MSG_SUBNET, osSubnet.getCidr(), MSG_UPDATED));
} | @Test(expected = IllegalArgumentException.class)
public void testUpdateSubnetWithNullCidr() {
final Subnet testSubnet = NeutronSubnet.builder()
.networkId(NETWORK_ID)
.build();
testSubnet.setId(SUBNET_ID);
target.updateSubnet(testSubnet);
} |
public static String substituteVariables(String template, Map<String, String> variables) {
Pattern pattern = Pattern.compile("\\$\\{(.+?)\\}");
Matcher matcher = pattern.matcher(template);
// StringBuilder cannot be used here because Matcher expects StringBuffer
StringBuffer buffer = new StringBuffer();
while (matcher.find()) {
if (variables.containsKey(matcher.group(1))) {
String replacement = variables.get(matcher.group(1));
// quote to work properly with $ and {,} signs
matcher.appendReplacement(buffer, replacement != null ? Matcher.quoteReplacement(replacement) : "null");
}
}
matcher.appendTail(buffer);
return buffer.toString();
} | @Test
public void testSubVariables() {
Map<String, String> variables = new HashMap<>();
variables.put("v1", "abc");
variables.put("v2", "def");
String text = "This is a test for ${v1} and ${v2}";
String expect = "This is a test for abc and def";
Assert.assertEquals(expect, Util.substituteVariables(text, variables));
} |
@Override
public List<Node> sniff(List<Node> nodes) {
if (attribute == null || value == null) {
return nodes;
}
return nodes.stream()
.filter(node -> nodeMatchesFilter(node, attribute, value))
.collect(Collectors.toList());
} | @Test
void returnsMatchingNodesIfGivenAttributeIsInList() throws Exception {
final Node matchingNode = mockNode(ImmutableMap.of(
"something", ImmutableList.of("somevalue", "42", "pi")
));
final List<Node> nodes = Collections.singletonList(matchingNode);
final NodesSniffer nodesSniffer = new FilteredOpenSearchNodesSniffer("something", "42");
assertThat(nodesSniffer.sniff(nodes)).isEqualTo(nodes);
} |
@Override
public boolean matches(String hashedPasswordAndSalt, String otherPassword) {
checkArgument(supports(hashedPasswordAndSalt), "Supplied hashed password is not supported, it does not start with "
+ PREFIX + " or does not contain a salt.");
final int saltIndex = hashedPasswordAndSalt.lastIndexOf(SALT_PREFIX);
final String salt = hashedPasswordAndSalt.substring(saltIndex + SALT_PREFIX.length());
return hash(otherPassword, salt).equals(hashedPasswordAndSalt);
} | @Test
public void testMatches() throws Exception {
assertThat(bCryptPasswordAlgorithm.matches("{bcrypt}$2a$12$8lRgZZTqRWO2.Mk37Gl7re7uD0QoDkdSF/UtFfVx0BqqgI23/jtkO{salt}$2a$12$8lRgZZTqRWO2.Mk37Gl7re", "foobar")).isTrue();
} |
@PostMapping("/register")
@Operation(summary = "Register an email to an account, email needs to be verified to become active")
public DEmailRegisterResult registerEmail(@RequestBody DEmailRegisterRequest deprecatedRequest) {
AppSession appSession = validate(deprecatedRequest);
var request = deprecatedRequest.getRequest();
var result = accountService.registerEmail(appSession.getAccountId(), request);
return DEmailRegisterResult.copyFrom(result);
} | @Test
public void validEmailRegister() {
DEmailRegisterRequest request = new DEmailRegisterRequest();
request.setAppSessionId("id");
request.setEmail("email");
EmailRegisterResult result = new EmailRegisterResult();
result.setStatus(Status.OK);
result.setError("error");
result.setEmailAddress("address");
result.setMaxAmountEmails(3);
when(accountService.registerEmail(eq(1L), any())).thenReturn(result);
DEmailRegisterResult registerResult = emailController.registerEmail(request);
assertEquals(Status.OK, registerResult.getStatus());
assertEquals("error", registerResult.getError());
assertEquals(3, registerResult.getMaxAmountEmails());
assertEquals("address", registerResult.getEmailAddress());
} |
@Override
public void shutdown() {
delegate.shutdown();
} | @Test
public void shutdown_delegates_to_executorService() {
underTest.shutdown();
inOrder.verify(executorService).shutdown();
inOrder.verifyNoMoreInteractions();
} |
public static Builder builder() {
return new Builder();
} | @Test
// Test cases that can't be constructed with our Builder class but that will parse correctly
public void testCanDeserializeWithoutDefaultValues() throws JsonProcessingException {
GetNamespaceResponse withoutProps =
GetNamespaceResponse.builder().withNamespace(NAMESPACE).build();
String jsonWithNullProperties = "{\"namespace\":[\"accounting\",\"tax\"],\"properties\":null}";
assertEquals(deserialize(jsonWithNullProperties), withoutProps);
} |
Map<String, Config> describeTopicConfigs(Set<String> topics)
throws InterruptedException, ExecutionException {
Set<ConfigResource> resources = topics.stream()
.map(x -> new ConfigResource(ConfigResource.Type.TOPIC, x))
.collect(Collectors.toSet());
return adminCall(
() -> sourceAdminClient.describeConfigs(resources).all().get().entrySet().stream()
.collect(Collectors.toMap(x -> x.getKey().name(), Entry::getValue)),
() -> String.format("describe configs for topics %s on %s cluster", topics, config.sourceClusterAlias())
);
} | @Test
public void testMissingDescribeConfigsAcl() throws Exception {
Admin sourceAdmin = mock(Admin.class);
MirrorSourceConnector connector = new MirrorSourceConnector(
sourceAdmin,
mock(Admin.class),
new MirrorSourceConfig(makeProps())
);
ExecutionException describeConfigsFailure = new ExecutionException(
"Failed to describe topic configs",
new TopicAuthorizationException("Topic authorization failed")
);
@SuppressWarnings("unchecked")
KafkaFuture<Map<ConfigResource, Config>> describeConfigsFuture = mock(KafkaFuture.class);
when(describeConfigsFuture.get()).thenThrow(describeConfigsFailure);
DescribeConfigsResult describeConfigsResult = mock(DescribeConfigsResult.class);
when(describeConfigsResult.all()).thenReturn(describeConfigsFuture);
when(sourceAdmin.describeConfigs(any())).thenReturn(describeConfigsResult);
try (LogCaptureAppender connectorLogs = LogCaptureAppender.createAndRegister(MirrorUtils.class)) {
connectorLogs.setClassLogger(MirrorUtils.class, Level.TRACE);
Set<String> topics = new HashSet<>();
topics.add("topic1");
topics.add("topic2");
ExecutionException exception = assertThrows(ExecutionException.class, () -> connector.describeTopicConfigs(topics));
assertEquals(
exception.getCause().getClass().getSimpleName() + " occurred while trying to describe configs for topics [topic1, topic2] on source1 cluster",
connectorLogs.getMessages().get(0)
);
}
} |
@Override
public void finish(boolean sort, boolean storeFinalResult) {
if (_hasOrderBy) {
long startTimeNs = System.nanoTime();
_topRecords = _tableResizer.getTopRecords(_lookupMap, _resultSize, sort);
long resizeTimeNs = System.nanoTime() - startTimeNs;
_numResizes++;
_resizeTimeNs += resizeTimeNs;
} else {
_topRecords = _lookupMap.values();
}
// TODO: Directly return final result in _tableResizer.getTopRecords to avoid extracting final result multiple times
assert !(_hasFinalInput && !storeFinalResult);
if (storeFinalResult && !_hasFinalInput) {
ColumnDataType[] columnDataTypes = _dataSchema.getColumnDataTypes();
int numAggregationFunctions = _aggregationFunctions.length;
for (int i = 0; i < numAggregationFunctions; i++) {
columnDataTypes[i + _numKeyColumns] = _aggregationFunctions[i].getFinalResultColumnType();
}
for (Record record : _topRecords) {
Object[] values = record.getValues();
for (int i = 0; i < numAggregationFunctions; i++) {
int colId = i + _numKeyColumns;
values[colId] = _aggregationFunctions[i].extractFinalResult(values[colId]);
}
}
}
} | @Test(dataProvider = "initDataProvider")
public void testNonConcurrentIndexedTable(String orderBy, List<String> survivors) {
QueryContext queryContext = QueryContextConverterUtils.getQueryContext(
"SELECT SUM(m1), MAX(m2) FROM testTable GROUP BY d1, d2, d3, d4 ORDER BY " + orderBy);
DataSchema dataSchema =
new DataSchema(new String[]{"d1", "d2", "d3", "d4", "sum(m1)", "max(m2)"}, new ColumnDataType[]{
ColumnDataType.STRING, ColumnDataType.INT, ColumnDataType.DOUBLE, ColumnDataType.INT,
ColumnDataType.DOUBLE, ColumnDataType.DOUBLE
});
// Test SimpleIndexedTable
IndexedTable indexedTable = new SimpleIndexedTable(dataSchema, queryContext, 5, TRIM_SIZE, TRIM_THRESHOLD);
IndexedTable mergeTable = new SimpleIndexedTable(dataSchema, queryContext, 10, TRIM_SIZE, TRIM_THRESHOLD);
testNonConcurrent(indexedTable, mergeTable);
indexedTable.finish(true);
checkSurvivors(indexedTable, survivors);
// Test ConcurrentIndexedTable
indexedTable = new ConcurrentIndexedTable(dataSchema, queryContext, 5, TRIM_SIZE, TRIM_THRESHOLD);
mergeTable = new SimpleIndexedTable(dataSchema, queryContext, 10, TRIM_SIZE, TRIM_THRESHOLD);
testNonConcurrent(indexedTable, mergeTable);
indexedTable.finish(true);
checkSurvivors(indexedTable, survivors);
} |
Record convert(Object data) {
return convert(data, null);
} | @Test
public void testStructValueInMapConvert() {
Table table = mock(Table.class);
when(table.schema()).thenReturn(STRUCT_IN_MAP_SCHEMA);
RecordConverter converter = new RecordConverter(table, config);
Struct data = createNestedStructData();
Struct struct =
new Struct(CONNECT_STRUCT_IN_MAP_SCHEMA)
.put("stma", ImmutableMap.of("key1", data, "key2", data));
Record record = converter.convert(struct);
Map<?, ?> fieldVal = (Map<?, ?>) record.getField("stma");
Record mapVal = (Record) fieldVal.get("key1");
assertNestedRecordValues(mapVal);
} |
public static String decodeQueryParam(final String value) {
return URLDecoder.decode(value, StandardCharsets.UTF_8);
} | @Test
public void testDecodeQueryParam() {
assertEquals("a=1&b=2", HttpParamConverter.decodeQueryParam("a%3d1%26b%3d2"));
assertThrows(IllegalArgumentException.class, () -> HttpParamConverter.decodeQueryParam("a%3d1%26b%3d2%%"));
} |
public Document process(Document input) throws IOException, TransformerException {
Document doc = Xml.copyDocument(input);
includeFile(application, doc.getDocumentElement());
return doc;
} | @Test(expected = IllegalArgumentException.class)
public void testIllegalParent2() throws ParserConfigurationException, IOException, SAXException, TransformerException {
File app = new File("src/test/resources/multienvapp_fail_parent2");
DocumentBuilder docBuilder = Xml.getPreprocessDocumentBuilder();
new IncludeProcessor(app).process(docBuilder.parse(getServices(app)));
fail("absolute include path should not be allowed");
} |
static void quoteExternalName(StringBuilder sb, String externalName) {
List<String> parts = splitByNonQuotedDots(externalName);
for (int i = 0; i < parts.size(); i++) {
String unescaped = unescapeQuotes(parts.get(i));
String unquoted = unquoteIfQuoted(unescaped);
DIALECT.quoteIdentifier(sb, unquoted);
if (i < parts.size() - 1) {
sb.append(".");
}
}
} | @Test
public void quoteExternalName_simple() {
String externalName = "my_table";
StringBuilder sb = new StringBuilder();
MappingHelper.quoteExternalName(sb, externalName);
assertThat(sb.toString()).isEqualTo("\"my_table\"");
} |
@Transactional
@Cacheable(CACHE_DATABASE_SEARCH)
@CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true)
public SearchHits<ExtensionSearch> search(ISearchService.Options options) {
// grab all extensions
var matchingExtensions = repositories.findAllActiveExtensions();
// no extensions in the database
if (matchingExtensions.isEmpty()) {
return new SearchHitsImpl<>(0,TotalHitsRelation.OFF, 0f, null, null, Collections.emptyList(), null, null);
}
// exlude namespaces
if(options.namespacesToExclude != null) {
for(var namespaceToExclude : options.namespacesToExclude) {
matchingExtensions = matchingExtensions.filter(extension -> !extension.getNamespace().getName().equals(namespaceToExclude));
}
}
// filter target platform
if(TargetPlatform.isValid(options.targetPlatform)) {
matchingExtensions = matchingExtensions.filter(extension -> extension.getVersions().stream().anyMatch(ev -> ev.getTargetPlatform().equals(options.targetPlatform)));
}
// filter category
if (options.category != null) {
matchingExtensions = matchingExtensions.filter(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
return latest.getCategories().stream().anyMatch(category -> category.equalsIgnoreCase(options.category));
});
}
// filter text
if (options.queryString != null) {
matchingExtensions = matchingExtensions.filter(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
return extension.getName().toLowerCase().contains(options.queryString.toLowerCase())
|| extension.getNamespace().getName().contains(options.queryString.toLowerCase())
|| (latest.getDescription() != null && latest.getDescription()
.toLowerCase().contains(options.queryString.toLowerCase()))
|| (latest.getDisplayName() != null && latest.getDisplayName()
.toLowerCase().contains(options.queryString.toLowerCase()));
});
}
// need to perform the sortBy ()
// 'relevance' | 'timestamp' | 'rating' | 'downloadCount';
Stream<ExtensionSearch> searchEntries;
if("relevance".equals(options.sortBy) || "rating".equals(options.sortBy)) {
var searchStats = new SearchStats(repositories);
searchEntries = matchingExtensions.stream().map(extension -> relevanceService.toSearchEntry(extension, searchStats));
} else {
searchEntries = matchingExtensions.stream().map(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
var targetPlatforms = repositories.findExtensionTargetPlatforms(extension);
return extension.toSearch(latest, targetPlatforms);
});
}
var comparators = new HashMap<>(Map.of(
"relevance", new RelevanceComparator(),
"timestamp", new TimestampComparator(),
"rating", new RatingComparator(),
"downloadCount", new DownloadedCountComparator()
));
var comparator = comparators.get(options.sortBy);
if(comparator != null) {
searchEntries = searchEntries.sorted(comparator);
}
var sortedExtensions = searchEntries.collect(Collectors.toList());
// need to do sortOrder
// 'asc' | 'desc';
if ("desc".equals(options.sortOrder)) {
// reverse the order
Collections.reverse(sortedExtensions);
}
// Paging
var totalHits = sortedExtensions.size();
var endIndex = Math.min(sortedExtensions.size(), options.requestedOffset + options.requestedSize);
var startIndex = Math.min(endIndex, options.requestedOffset);
sortedExtensions = sortedExtensions.subList(startIndex, endIndex);
List<SearchHit<ExtensionSearch>> searchHits;
if (sortedExtensions.isEmpty()) {
searchHits = Collections.emptyList();
} else {
// client is interested only in the extension IDs
searchHits = sortedExtensions.stream().map(extensionSearch -> new SearchHit<>(null, null, null, 0.0f, null, null, null, null, null, null, extensionSearch)).collect(Collectors.toList());
}
return new SearchHitsImpl<>(totalHits, TotalHitsRelation.OFF, 0f, null, null, searchHits, null, null);
} | @Test
public void testRelevance() {
var ext1 = mockExtension("yaml", 1.0, 100, 100, "redhat", List.of("Snippets", "Programming Languages"));
var ext2 = mockExtension("java", 4.0, 100, 10000, "redhat", List.of("Snippets", "Programming Languages"));
var ext3 = mockExtension("openshift", 1.0, 100, 10, "redhat", List.of("Snippets", "Other"));
Mockito.when(repositories.findAllActiveExtensions()).thenReturn(Streamable.of(List.of(ext1, ext2, ext3)));
var searchOptions = new ISearchService.Options(null, null, TargetPlatform.NAME_UNIVERSAL, 50, 0, null, "relevance", false);
var result = search.search(searchOptions);
// should find all extensions but order should be different
assertThat(result.getTotalHits()).isEqualTo(3);
var hits = result.getSearchHits();
// java should have the most relevance
assertThat(getIdFromExtensionHits(hits, 0)).isEqualTo(getIdFromExtensionName("openshift"));
assertThat(getIdFromExtensionHits(hits, 1)).isEqualTo(getIdFromExtensionName("yaml"));
assertThat(getIdFromExtensionHits(hits, 2)).isEqualTo(getIdFromExtensionName("java"));
} |
public static boolean shouldEnablePushdownForTable(ConnectorSession session, Table table, String path, Optional<Partition> optionalPartition)
{
if (!isS3SelectPushdownEnabled(session)) {
return false;
}
if (path == null) {
return false;
}
// Hive table partitions could be on different storages,
// as a result, we have to check each individual optionalPartition
Properties schema = optionalPartition
.map(partition -> getHiveSchema(partition, table))
.orElseGet(() -> getHiveSchema(table));
return shouldEnablePushdownForTable(table, path, schema);
} | @Test
public void testShouldNotEnableSelectPushdownWhenIsNotS3StoragePath()
{
assertFalse(shouldEnablePushdownForTable(session, table, null, Optional.empty()));
assertFalse(shouldEnablePushdownForTable(session, table, "", Optional.empty()));
assertFalse(shouldEnablePushdownForTable(session, table, "s3:/invalid", Optional.empty()));
assertFalse(shouldEnablePushdownForTable(session, table, "s3:/invalid", Optional.of(partition)));
} |
public synchronized GpuDeviceInformation parseXml(String xmlContent)
throws YarnException {
InputSource inputSource = new InputSource(new StringReader(xmlContent));
SAXSource source = new SAXSource(xmlReader, inputSource);
try {
return (GpuDeviceInformation) unmarshaller.unmarshal(source);
} catch (JAXBException e) {
String msg = "Failed to parse XML output of " +
GPU_SCRIPT_REFERENCE + "!";
LOG.error(msg, e);
throw new YarnException(msg, e);
}
} | @Test
public void testParseMissingTags() throws IOException, YarnException {
File f = new File("src/test/resources/nvidia-smi-output-missing-tags.xml");
String s = FileUtils.readFileToString(f, StandardCharsets.UTF_8);
GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
GpuDeviceInformation info = parser.parseXml(s);
assertEquals("375.66", info.getDriverVersion());
assertEquals(1, info.getGpus().size());
PerGpuDeviceInformation gpu = info.getGpus().get(0);
assertEquals("N/A", gpu.getProductName());
assertEquals("N/A", gpu.getUuid());
assertEquals(-1, gpu.getMinorNumber());
assertNull(gpu.getGpuMemoryUsage());
assertNull(gpu.getTemperature());
assertNull(gpu.getGpuUtilizations());
} |
@Override
protected Set<StepField> getUsedFields( GetXMLDataMeta meta ) {
Set<StepField> usedFields = new HashSet<>();
if ( meta.isInFields() ) {
Set<StepField> stepFields = createStepFields( meta.getXMLField(), getInputs() );
usedFields.addAll( stepFields );
}
return usedFields;
} | @Test
public void testGetUsedFields() throws Exception {
when( meta.isInFields() ).thenReturn( true );
when( meta.getXMLField() ).thenReturn( "xml" );
StepNodes inputs = new StepNodes();
inputs.addNode( "previousStep", "xml", node );
inputs.addNode( "previousStep", "otherField", node );
doReturn( inputs ).when( analyzer ).getInputs();
Set<StepField> usedFields = analyzer.getUsedFields( meta );
assertEquals( 1, usedFields.size() );
assertEquals( "xml", usedFields.iterator().next().getFieldName() );
} |
@Override
protected boolean copyObject(String src, String dst) {
LOG.debug("Copying {} to {}", src, dst);
try {
mClient.copyObject(mBucketName, src, mBucketName, dst);
return true;
} catch (ServiceException e) {
LOG.error("Failed to rename file {} to {}", src, dst, e);
return false;
}
} | @Test
public void testCopyObject() {
// test successful copy object
Mockito.when(mClient.copyObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.anyString(), ArgumentMatchers.anyString())).thenReturn(null);
boolean result = mOSSUnderFileSystem.copyObject(SRC, DST);
Assert.assertTrue(result);
// test copy object exception
Mockito.when(mClient.copyObject(ArgumentMatchers.anyString(),
ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.anyString())).thenThrow(ServiceException.class);
try {
mOSSUnderFileSystem.copyObject(SRC, DST);
} catch (Exception e) {
Assert.assertTrue(e instanceof ServiceException);
}
} |
ProducerListeners listeners() {
return new ProducerListeners(eventListeners.toArray(new HollowProducerEventListener[0]));
} | @Test
public void fireAnnouncementStartDontStopWhenOneFails() {
long version = 31337;
HollowProducer.ReadState readState = Mockito.mock(HollowProducer.ReadState.class);
Mockito.when(readState.getVersion()).thenReturn(version);
Mockito.doThrow(RuntimeException.class).when(listener).onAnnouncementStart(version);
listenerSupport.listeners().fireAnnouncementStart(readState);
Mockito.verify(listener).onAnnouncementStart(version);
Mockito.verify(listener).onAnnouncementStart(readState);
} |
@Udf
public Long trunc(@UdfParameter final Long val) {
return val;
} | @Test
public void shouldTruncateSimpleDoublePositive() {
assertThat(udf.trunc(0.0d), is(0L));
assertThat(udf.trunc(1.23d), is(1L));
assertThat(udf.trunc(1.0d), is(1L));
assertThat(udf.trunc(1.5d), is(1L));
assertThat(udf.trunc(1.75d), is(1L));
assertThat(udf.trunc(1.53e6d), is(1530000L));
assertThat(udf.trunc(10.01d), is(10L));
assertThat(udf.trunc(12345.5d), is(12345L));
assertThat(udf.trunc(9.99d), is(9L));
assertThat(udf.trunc(110.1), is(110L));
assertThat(udf.trunc(1530000.01d), is(1530000L));
assertThat(udf.trunc(9999999.99d), is(9999999L));
} |
public static OffsetCommitResponse parse(ByteBuffer buffer, short version) {
return new OffsetCommitResponse(new OffsetCommitResponseData(new ByteBufferAccessor(buffer), version));
} | @Test
public void testParse() {
OffsetCommitResponseData data = new OffsetCommitResponseData()
.setTopics(Arrays.asList(
new OffsetCommitResponseTopic().setPartitions(
Collections.singletonList(new OffsetCommitResponsePartition()
.setPartitionIndex(partitionOne)
.setErrorCode(errorOne.code()))),
new OffsetCommitResponseTopic().setPartitions(
Collections.singletonList(new OffsetCommitResponsePartition()
.setPartitionIndex(partitionTwo)
.setErrorCode(errorTwo.code())))
))
.setThrottleTimeMs(throttleTimeMs);
for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) {
ByteBuffer buffer = MessageUtil.toByteBuffer(data, version);
OffsetCommitResponse response = OffsetCommitResponse.parse(buffer, version);
assertEquals(expectedErrorCounts, response.errorCounts());
if (version >= 3) {
assertEquals(throttleTimeMs, response.throttleTimeMs());
} else {
assertEquals(DEFAULT_THROTTLE_TIME, response.throttleTimeMs());
}
assertEquals(version >= 4, response.shouldClientThrottle(version));
}
} |
public boolean updateClusterUsage(CounterMode counterMode) {
return updateGroupUsage(counterMode, GroupCapacityPersistService.CLUSTER, PropertyUtil.getDefaultClusterQuota(),
false);
} | @Test
void testUpdateClusterUsage() {
when(groupCapacityPersistService.incrementUsageWithDefaultQuotaLimit(any())).thenReturn(true);
when(groupCapacityPersistService.decrementUsage(any())).thenReturn(true);
service.updateClusterUsage(CounterMode.INCREMENT);
Mockito.verify(groupCapacityPersistService, times(1)).incrementUsageWithDefaultQuotaLimit(any());
service.updateClusterUsage(CounterMode.DECREMENT);
Mockito.verify(groupCapacityPersistService, times(1)).decrementUsage(any());
} |
@Udf(description = "Converts a TIMESTAMP value from one timezone to another")
public Timestamp convertTz(
@UdfParameter(
description = "The TIMESTAMP value.") final Timestamp timestamp,
@UdfParameter(
description = "The fromTimeZone in java.util.TimeZone ID format. For example: \"UTC\","
+ " \"America/Los_Angeles\", \"PST\", \"Europe/London\"") final String fromTimeZone,
@UdfParameter(
description = "The toTimeZone in java.util.TimeZone ID format. For example: \"UTC\","
+ " \"America/Los_Angeles\", \"PST\", \"Europe/London\"") final String toTimeZone
) {
if (timestamp == null || fromTimeZone == null || toTimeZone == null) {
return null;
}
try {
final long offset = TimeZone.getTimeZone(ZoneId.of(toTimeZone)).getOffset(timestamp.getTime())
- TimeZone.getTimeZone(ZoneId.of(fromTimeZone)).getOffset(timestamp.getTime());
return new Timestamp(timestamp.getTime() + offset);
} catch (DateTimeException e) {
throw new KsqlFunctionException("Invalid time zone: " + e.getMessage());
}
} | @Test
public void shouldReturnNullForNullToTimeZone() {
// When:
final Object result = udf.convertTz(Timestamp.valueOf("2000-01-01 00:00:00"), "America/Los_Angeles", null);
// Then:
assertNull(result);
} |
public String getSegmentName() {
return _segmentName;
} | @Test
public void testSegmentNameGeneration() {
UploadedRealtimeSegmentName uploadedRealtimeSegmentName =
new UploadedRealtimeSegmentName("tableName", 1, 1717027200000L, "uploaded", "2");
String expectedSegmentName = "uploaded__tableName__1__20240530T0000Z__2";
Assert.assertEquals(uploadedRealtimeSegmentName.getSegmentName(), expectedSegmentName);
} |
@Override
public Distribution distribute(D2CanaryDistributionStrategy strategy)
{
switch (strategy.getStrategy()) {
case TARGET_HOSTS:
return distributeByTargetHosts(strategy);
case TARGET_APPLICATIONS:
return distributeByTargetApplications(strategy);
case PERCENTAGE:
return distributeByPercentage(strategy);
case DISABLED:
return Distribution.STABLE;
default:
_log.warn("Invalid distribution strategy type: " + strategy.getStrategy().name());
return Distribution.STABLE;
}
} | @Test(dataProvider = "getEdgeCaseStrategies")
public void testEdgeCases(D2CanaryDistributionStrategy strategy)
{
Assert.assertEquals(new CanaryDistributionProviderImplFixture().getSpiedImpl().distribute(strategy),
CanaryDistributionProvider.Distribution.STABLE, "Invalid strategies should return stable");
} |
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
} | @Test
public void testNewBagNoFetch() throws Exception {
StateTag<BagState<String>> addr = StateTags.bag("bag", StringUtf8Coder.of());
BagState<String> bag = underTestNewKey.state(NAMESPACE, addr);
assertThat(bag.read(), Matchers.emptyIterable());
// Shouldn't need to read from windmill for this.
Mockito.verifyZeroInteractions(mockReader);
} |
@Override
public ClusterInfo clusterGetClusterInfo() {
RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO);
Map<String, String> entries = syncFuture(f);
Properties props = new Properties();
for (Entry<String, String> entry : entries.entrySet()) {
props.setProperty(entry.getKey(), entry.getValue());
}
return new ClusterInfo(props);
} | @Test
public void testClusterGetClusterInfo() {
ClusterInfo info = connection.clusterGetClusterInfo();
assertThat(info.getSlotsFail()).isEqualTo(0);
assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT);
} |
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
} | @Test
public void testProgressivelyDiscardUnknownEmptyFrame() throws Exception {
short type = 5;
byte flags = (byte) 0xFF;
int segment = 4;
int length = 2 * segment;
ByteBuf header = Unpooled.buffer(SPDY_HEADER_SIZE);
ByteBuf segment1 = Unpooled.buffer(segment);
ByteBuf segment2 = Unpooled.buffer(segment);
encodeControlFrameHeader(header, type, flags, length);
segment1.writeInt(RANDOM.nextInt());
segment2.writeInt(RANDOM.nextInt());
decoder.decode(header);
decoder.decode(segment1);
decoder.decode(segment2);
verifyZeroInteractions(delegate);
assertFalse(header.isReadable());
assertFalse(segment1.isReadable());
assertFalse(segment2.isReadable());
header.release();
segment1.release();
segment2.release();
} |
@Override
public List<Connection> getConnections(final String databaseName, final String dataSourceName, final int connectionOffset, final int connectionSize,
final ConnectionMode connectionMode) throws SQLException {
return getConnections0(databaseName, dataSourceName, connectionOffset, connectionSize, connectionMode);
} | @Test
void assertGetConnectionsWhenPartInCacheWithConnectionStrictlyMode() throws SQLException {
databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 1, ConnectionMode.MEMORY_STRICTLY);
List<Connection> actual = databaseConnectionManager.getConnections(DefaultDatabase.LOGIC_NAME, "ds", 0, 3, ConnectionMode.CONNECTION_STRICTLY);
assertThat(actual.size(), is(3));
} |
static String getChecklistCompletionState(StatsSingleNote infos) {
int percentage = Math.round(
(float) infos.getChecklistCompletedItemsNumber() / infos.getChecklistItemsNumber() * 100);
return infos.getChecklistCompletedItemsNumber() + " (" + percentage + "%)";
} | @Test
public void getChecklistCompletionState() {
StatsSingleNote infos = new StatsSingleNote();
infos.setChecklistItemsNumber(42);
infos.setChecklistCompletedItemsNumber(12);
String completionState = NoteInfosActivity.getChecklistCompletionState(infos);
assertTrue(completionState.contains(infos.getChecklistCompletedItemsNumber() + ""));
assertTrue(completionState.contains("29%"));
} |
@SuppressWarnings("deprecation")
@Override
public ByteBuf asReadOnly() {
if (isReadOnly()) {
return this;
}
return Unpooled.unmodifiableBuffer(this);
} | @Test
public void testReadOnlyRelease() {
ByteBuf buf = newBuffer(8);
assertEquals(1, buf.refCnt());
assertTrue(buf.asReadOnly().release());
assertEquals(0, buf.refCnt());
} |
public static boolean notAllOf(Object collection, Object value) {
if (collection == null) {
throw new IllegalArgumentException("collection cannot be null");
}
if (value == null) {
throw new IllegalArgumentException("value cannot be null");
}
// collection to check against
Collection targetCollection = getTargetCollection(collection, value);
// elements to check
if (DMNParseUtil.isParseableCollection(value)) {
Collection valueCollection = DMNParseUtil.parseCollection(value, targetCollection);
return valueCollection == null || !targetCollection.containsAll(valueCollection);
} else if (DMNParseUtil.isJavaCollection(value)) {
return !targetCollection.containsAll((Collection) value);
} else if (DMNParseUtil.isArrayNode(value)) {
Collection valueCollection = DMNParseUtil.getCollectionFromArrayNode((ArrayNode) value);
return valueCollection == null || !targetCollection.containsAll(valueCollection);
} else {
Object formattedValue = DMNParseUtil.getFormattedValue(value, targetCollection);
return !targetCollection.contains(formattedValue);
}
} | @Test
public void notAllOf() {
assertThat(CollectionUtil.notAllOf(Arrays.asList("group1", "group2"), Arrays.asList("group3", "group4"))).isTrue();
assertThat(CollectionUtil.notAllOf(Arrays.asList("group1", "group2"), Arrays.asList("group1", "group2"))).isFalse();
assertThat(CollectionUtil.notAllOf(Arrays.asList("group1", "group2"), Arrays.asList("group2", "group3"))).isTrue();
assertThat(CollectionUtil.notAllOf(Arrays.asList("group1", "group2"), "group3")).isTrue();
assertThat(CollectionUtil.notAllOf(Arrays.asList("group1", "group2"), "group2")).isFalse();
assertThat(CollectionUtil.notAllOf("group1, group2", "group3, group4")).isTrue();
assertThat(CollectionUtil.notAllOf("group1, group2", "group1, group2")).isFalse();
assertThat(CollectionUtil.notAllOf("group1, group2", "group2, group3")).isTrue();
ObjectMapper mapper = new ObjectMapper();
assertThat(CollectionUtil.notAllOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group3", "group4"))))
.isTrue();
assertThat(CollectionUtil.notAllOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group1", "group2"))))
.isFalse();
assertThat(CollectionUtil.notAllOf(mapper.valueToTree(Arrays.asList("group1", "group2")), mapper.valueToTree(Arrays.asList("group2", "group3"))))
.isTrue();
} |
public final void containsAnyIn(@Nullable Iterable<?> expected) {
checkNotNull(expected);
Collection<?> actual = iterableToCollection(checkNotNull(this.actual));
for (Object item : expected) {
if (actual.contains(item)) {
return;
}
}
if (hasMatchingToStringPair(actual, expected)) {
failWithoutActual(
fact("expected to contain any of", countDuplicatesAndAddTypeInfo(expected)),
simpleFact("but did not"),
fact(
"though it did contain",
countDuplicatesAndAddTypeInfo(
retainMatchingToString(checkNotNull(this.actual), /* itemsToCheck= */ expected))),
fullContents());
} else {
failWithActual("expected to contain any of", expected);
}
} | @Test
public void iterableContainsAnyInArray() {
assertThat(asList(1, 2, 3)).containsAnyIn(new Integer[] {1, 10, 100});
expectFailureWhenTestingThat(asList(1, 2, 3)).containsAnyIn(new Integer[] {5, 6, 0});
assertFailureKeys("expected to contain any of", "but was");
assertFailureValue("expected to contain any of", "[5, 6, 0]");
} |
@Override
public void doSendSms(SmsSendMessage message) {
// 获得渠道对应的 SmsClient 客户端
SmsClient smsClient = smsChannelService.getSmsClient(message.getChannelId());
Assert.notNull(smsClient, "短信客户端({}) 不存在", message.getChannelId());
// 发送短信
try {
SmsSendRespDTO sendResponse = smsClient.sendSms(message.getLogId(), message.getMobile(),
message.getApiTemplateId(), message.getTemplateParams());
smsLogService.updateSmsSendResult(message.getLogId(), sendResponse.getSuccess(),
sendResponse.getApiCode(), sendResponse.getApiMsg(),
sendResponse.getApiRequestId(), sendResponse.getSerialNo());
} catch (Throwable ex) {
log.error("[doSendSms][发送短信异常,日志编号({})]", message.getLogId(), ex);
smsLogService.updateSmsSendResult(message.getLogId(), false,
"EXCEPTION", ExceptionUtil.getRootCauseMessage(ex), null, null);
}
} | @Test
@SuppressWarnings("unchecked")
public void testDoSendSms() throws Throwable {
// 准备参数
SmsSendMessage message = randomPojo(SmsSendMessage.class);
// mock SmsClientFactory 的方法
SmsClient smsClient = spy(SmsClient.class);
when(smsChannelService.getSmsClient(eq(message.getChannelId()))).thenReturn(smsClient);
// mock SmsClient 的方法
SmsSendRespDTO sendResult = randomPojo(SmsSendRespDTO.class);
when(smsClient.sendSms(eq(message.getLogId()), eq(message.getMobile()), eq(message.getApiTemplateId()),
eq(message.getTemplateParams()))).thenReturn(sendResult);
// 调用
smsSendService.doSendSms(message);
// 断言
verify(smsLogService).updateSmsSendResult(eq(message.getLogId()),
eq(sendResult.getSuccess()), eq(sendResult.getApiCode()),
eq(sendResult.getApiMsg()), eq(sendResult.getApiRequestId()), eq(sendResult.getSerialNo()));
} |
public LogoutRequestModel parseLogoutRequest(HttpServletRequest request) throws SamlValidationException, SamlParseException, SamlSessionException, DienstencatalogusException {
final LogoutRequestModel logoutRequestModel = new LogoutRequestModel();
try {
final BaseHttpServletRequestXMLMessageDecoder decoder = decodeRequest(request);
var logoutRequest = (LogoutRequest) decoder.getMessageContext().getMessage();
final SAMLBindingContext bindingContext = decoder.getMessageContext().getSubcontext(SAMLBindingContext.class);
logoutRequestModel.setLogoutRequest(logoutRequest);
logoutRequestModel.setRequest(request);
validateRequest(logoutRequestModel);
var id = logoutRequest.getNameID() != null ? logoutRequest.getNameID().getValue() : logoutRequest.getSessionIndexes().get(0).getValue();
var samlSession = samlSessionRepository.findById(id)
.orElseThrow(() -> new SamlSessionException("LogoutRequest no saml session found for nameID: " + id));
logoutRequestModel.setConnectionEntityId(samlSession.getConnectionEntityId());
logoutRequestModel.setServiceEntityId(samlSession.getServiceEntityId());
logoutRequestModel.setServiceUuid(samlSession.getServiceUuid());
logoutRequestModel.setRelayState(bindingContext.getRelayState());
logoutRequestModel.setEntranceSession(samlSession.getProtocolType().equals(ProtocolType.SAML_COMBICONNECT));
dcMetadataService.resolveDcMetadata(logoutRequestModel);
if (!logoutRequestModel.getConnectionEntityId().equals(logoutRequestModel.getLogoutRequest().getIssuer().getValue())) {
throw new SamlValidationException("Issuer not equal to connectorEntityId");
}
verifySignature(logoutRequestModel, logoutRequestModel.getLogoutRequest().getSignature());
logout(samlSession);
if (logger.isDebugEnabled())
OpenSAMLUtils.logSAMLObject((LogoutRequest) decoder.getMessageContext().getMessage());
} catch (MessageDecodingException e) {
throw new SamlParseException("Authentication deflate decode exception", e);
} catch (ComponentInitializationException e) {
throw new SamlParseException("Authentication deflate initialization exception", e);
}
return logoutRequestModel;
} | @Test
public void parseLogoutRequestNoNameID() {
httpRequestMock.setParameter("SAMLRequest", "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS");
Exception exception = assertThrows(SamlValidationException.class,
() -> logoutService.parseLogoutRequest(httpRequestMock));
assertEquals("LogoutRequest validation error", exception.getMessage());
} |
public RowMetaInterface getErrorRowMeta( long nrErrors, String errorDescriptions, String fieldNames,
String errorCodes ) {
RowMetaInterface row = new RowMeta();
String nrErr = variables.environmentSubstitute( getNrErrorsValuename() );
if ( !Utils.isEmpty( nrErr ) ) {
ValueMetaInterface v = new ValueMetaInteger( nrErr );
v.setLength( 3 );
row.addValueMeta( v );
}
String errDesc = variables.environmentSubstitute( getErrorDescriptionsValuename() );
if ( !Utils.isEmpty( errDesc ) ) {
ValueMetaInterface v = new ValueMetaString( errDesc );
row.addValueMeta( v );
}
String errFields = variables.environmentSubstitute( getErrorFieldsValuename() );
if ( !Utils.isEmpty( errFields ) ) {
ValueMetaInterface v = new ValueMetaString( errFields );
row.addValueMeta( v );
}
String errCodes = variables.environmentSubstitute( getErrorCodesValuename() );
if ( !Utils.isEmpty( errCodes ) ) {
ValueMetaInterface v = new ValueMetaString( errCodes );
row.addValueMeta( v );
}
return row;
} | @Test
public void testGetErrorRowMeta() {
VariableSpace vars = new Variables();
vars.setVariable( "VarNumberErrors", "nbrErrors" );
vars.setVariable( "VarErrorDescription", "errorDescription" );
vars.setVariable( "VarErrorFields", "errorFields" );
vars.setVariable( "VarErrorCodes", "errorCodes" );
StepErrorMeta testObject = new StepErrorMeta( vars, new StepMeta(), new StepMeta(),
"${VarNumberErrors}", "${VarErrorDescription}", "${VarErrorFields}", "${VarErrorCodes}" );
RowMetaInterface result = testObject.getErrorRowMeta( 10, "some data was bad", "factId", "BAD131" );
assertNotNull( result );
assertEquals( 4, result.size() );
assertEquals( ValueMetaInterface.TYPE_INTEGER, result.getValueMeta( 0 ).getType() );
assertEquals( "nbrErrors", result.getValueMeta( 0 ).getName() );
assertEquals( ValueMetaInterface.TYPE_STRING, result.getValueMeta( 1 ).getType() );
assertEquals( "errorDescription", result.getValueMeta( 1 ).getName() );
assertEquals( ValueMetaInterface.TYPE_STRING, result.getValueMeta( 2 ).getType() );
assertEquals( "errorFields", result.getValueMeta( 2 ).getName() );
assertEquals( ValueMetaInterface.TYPE_STRING, result.getValueMeta( 3 ).getType() );
assertEquals( "errorCodes", result.getValueMeta( 3 ).getName() );
} |
@Override
public Boolean mSet(Map<byte[], byte[]> tuple) {
if (isQueueing() || isPipelined()) {
for (Entry<byte[], byte[]> entry: tuple.entrySet()) {
write(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue());
}
return true;
}
CommandBatchService es = new CommandBatchService(executorService);
for (Entry<byte[], byte[]> entry: tuple.entrySet()) {
es.writeAsync(entry.getKey(), StringCodec.INSTANCE, RedisCommands.SET, entry.getKey(), entry.getValue());
}
es.execute();
return true;
} | @Test
public void testMSet() {
Map<byte[], byte[]> map = new HashMap<>();
for (int i = 0; i < 10; i++) {
map.put(("test" + i).getBytes(), ("test" + i*100).getBytes());
}
connection.mSet(map);
for (Map.Entry<byte[], byte[]> entry : map.entrySet()) {
assertThat(connection.get(entry.getKey())).isEqualTo(entry.getValue());
}
} |
@Override
public Optional<ClusterHealthStatus> getClusterHealthStatus() {
try {
ClusterHealthResponse healthResponse = getRestHighLevelClient().cluster()
.health(new ClusterHealthRequest().waitForYellowStatus().timeout(timeValueSeconds(30)), RequestOptions.DEFAULT);
return Optional.of(healthResponse.getStatus());
} catch (IOException e) {
LOG.trace("Failed to check health status ", e);
return Optional.empty();
}
} | @Test
public void should_add_authentication_header() throws InterruptedException {
mockServerResponse(200, JSON_SUCCESS_RESPONSE);
String password = "test-password";
EsConnectorImpl underTest = new EsConnectorImpl(Sets.newHashSet(HostAndPort.fromParts(mockWebServer.getHostName(), mockWebServer.getPort())),
password, null, null);
assertThat(underTest.getClusterHealthStatus())
.hasValue(ClusterHealthStatus.YELLOW);
assertThat(mockWebServer.takeRequest().getHeader("Authorization")).isEqualTo("Basic ZWxhc3RpYzp0ZXN0LXBhc3N3b3Jk");
} |
public Object getCell(final int columnIndex) {
Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.size() + 1);
return data.get(columnIndex - 1);
} | @Test
void assertGetCellWithZeroIndex() {
assertThrows(IllegalArgumentException.class, () -> new LocalDataQueryResultRow().getCell(0));
} |
@Override
public Object get(Object key) {
return null;
} | @Test
public void testGet() throws Exception {
assertNull(NULL_QUERY_CACHE.get(1));
} |
@Override
public void appendDataInfluence(String entityName, String entityId, String fieldName,
String fieldCurrentValue) {
// might be
if (traceContext.tracer() == null) {
return;
}
if (traceContext.tracer().getActiveSpan() == null) {
return;
}
String spanId = traceContext.tracer().getActiveSpan().spanId();
OpType type = traceContext.tracer().getActiveSpan().getOpType();
ApolloAuditLogDataInfluence.Builder builder = ApolloAuditLogDataInfluence.builder().spanId(spanId)
.entityName(entityName).entityId(entityId).fieldName(fieldName);
if (type == null) {
return;
}
switch (type) {
case CREATE:
case UPDATE:
builder.newVal(fieldCurrentValue);
break;
case DELETE:
builder.oldVal(fieldCurrentValue);
}
dataInfluenceService.save(builder.build());
} | @Test
public void testAppendDataInfluenceCaseTracerIsNull() {
Mockito.when(traceContext.tracer()).thenReturn(null);
api.appendDataInfluence(entityName, entityId, fieldName, fieldCurrentValue);
Mockito.verify(traceContext, Mockito.times(1)).tracer();
} |
@Override
public String toString() {
return toString(false);
} | @Test
void emptyRecordSchema() {
Schema schema = createDefaultRecord();
String schemaString = schema.toString();
assertNotNull(schemaString);
} |
@VisibleForTesting
void setTokensFor(ContainerLaunchContext containerLaunchContext, boolean fetchToken)
throws Exception {
Credentials credentials = new Credentials();
LOG.info("Loading delegation tokens available locally to add to the AM container");
// for user
UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();
Collection<Token<? extends TokenIdentifier>> usrTok =
currUsr.getCredentials().getAllTokens();
for (Token<? extends TokenIdentifier> token : usrTok) {
LOG.info("Adding user token " + token.getService() + " with " + token);
credentials.addToken(token.getService(), token);
}
if (fetchToken) {
LOG.info("Fetching delegation tokens to add to the AM container.");
DelegationTokenManager delegationTokenManager =
new DefaultDelegationTokenManager(flinkConfiguration, null, null, null);
DelegationTokenContainer container = new DelegationTokenContainer();
delegationTokenManager.obtainDelegationTokens(container);
// This is here for backward compatibility to make log aggregation work
for (Map.Entry<String, byte[]> e : container.getTokens().entrySet()) {
if (flinkConfiguration.get(APP_MASTER_TOKEN_SERVICES).contains(e.getKey())) {
credentials.addAll(HadoopDelegationTokenConverter.deserialize(e.getValue()));
}
}
}
ByteBuffer tokens = ByteBuffer.wrap(HadoopDelegationTokenConverter.serialize(credentials));
containerLaunchContext.setTokens(tokens);
LOG.info("Delegation tokens added to the AM container.");
} | @Test
public void testSetTokensForYarnAppMaster() {
final Configuration flinkConfig = new Configuration();
flinkConfig.set(
APP_MASTER_TOKEN_SERVICES,
Arrays.asList(TestYarnAMDelegationTokenProvider.SERVICE_NAME));
YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(flinkConfig);
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
try {
yarnClusterDescriptor.setTokensFor(amContainer, true);
Credentials credentials = new Credentials();
try (DataInputStream dis =
new DataInputStream(
new ByteArrayInputStream(amContainer.getTokens().array()))) {
credentials.readTokenStorageStream(dis);
}
assertThat(credentials.getAllTokens())
.hasSize(1)
.contains(TestYarnAMDelegationTokenProvider.TEST_YARN_AM_TOKEN);
} catch (Exception e) {
fail("Should not throw exception when setting tokens for AM container.");
}
} |
@Override
public void cycle() {
if (!getConfig().isWritable()) {
LOG.debug("Not cycling non-writable index set <{}> ({})", getConfig().id(), getConfig().title());
return;
}
int oldTargetNumber;
try {
oldTargetNumber = getNewestIndexNumber();
} catch (NoTargetIndexException ex) {
oldTargetNumber = -1;
}
final int newTargetNumber = oldTargetNumber + 1;
final String newTarget = buildIndexName(newTargetNumber);
final String oldTarget = buildIndexName(oldTargetNumber);
if (oldTargetNumber == -1) {
LOG.info("Cycling from <none> to <{}>.", newTarget);
} else {
LOG.info("Cycling from <{}> to <{}>.", oldTarget, newTarget);
}
// Create new index.
LOG.info("Creating target index <{}>.", newTarget);
if (!indices.create(newTarget, this)) {
throw new RuntimeException("Could not create new target index <" + newTarget + ">.");
}
LOG.info("Waiting for allocation of index <{}>.", newTarget);
final HealthStatus healthStatus = indices.waitForRecovery(newTarget);
checkIfHealthy(healthStatus, (status) -> new RuntimeException("New target index did not become healthy (target index: <" + newTarget + ">)"));
LOG.debug("Health status of index <{}>: {}", newTarget, healthStatus);
addDeflectorIndexRange(newTarget);
LOG.info("Index <{}> has been successfully allocated.", newTarget);
// Point deflector to new index.
final String indexAlias = getWriteIndexAlias();
LOG.info("Pointing index alias <{}> to new index <{}>.", indexAlias, newTarget);
final Activity activity = new Activity(IndexSet.class);
if (oldTargetNumber == -1) {
// Only pointing, not cycling.
pointTo(newTarget);
activity.setMessage("Cycled index alias <" + indexAlias + "> from <none> to <" + newTarget + ">.");
} else {
// Re-pointing from existing old index to the new one.
LOG.debug("Switching over index alias <{}>.", indexAlias);
pointTo(newTarget, oldTarget);
setIndexReadOnlyAndCalculateRange(oldTarget);
activity.setMessage("Cycled index alias <" + indexAlias + "> from <" + oldTarget + "> to <" + newTarget + ">.");
}
LOG.info("Successfully pointed index alias <{}> to index <{}>.", indexAlias, newTarget);
activityWriter.write(activity);
auditEventSender.success(AuditActor.system(nodeId), ES_WRITE_INDEX_UPDATE, ImmutableMap.of("indexName", newTarget));
} | @Test
public void cycleAddsUnknownDeflectorRange() {
final String newIndexName = "graylog_1";
final Map<String, Set<String>> indexNameAliases = ImmutableMap.of(
"graylog_0", Collections.singleton("graylog_deflector"));
when(indices.getIndexNamesAndAliases(anyString())).thenReturn(indexNameAliases);
when(indices.create(newIndexName, mongoIndexSet)).thenReturn(true);
when(indices.waitForRecovery(newIndexName)).thenReturn(HealthStatus.Green);
final MongoIndexSet mongoIndexSet = createIndexSet(config);
mongoIndexSet.cycle();
verify(indexRangeService, times(1)).createUnknownRange(newIndexName);
} |
public <KIn, VIn, KOut, VOut> ProcessorSupplier<KIn, VIn, KOut, VOut> process(String spanName,
ProcessorSupplier<KIn, VIn, KOut, VOut> processorSupplier) {
return new TracingProcessorSupplier<>(this, spanName, processorSupplier);
} | @Test void newProcessorSupplier_should_tag_app_id_and_task_id() {
Processor<String, String, String, String> processor =
fakeV2ProcessorSupplier.get();
processor.init(processorV2ContextSupplier.get());
processor.process(new Record<>(TEST_KEY, TEST_VALUE, new Date().getTime()));
assertThat(spans.get(0).tags()).containsOnly(
entry("kafka.streams.application.id", TEST_APPLICATION_ID),
entry("kafka.streams.task.id", TEST_TASK_ID));
} |
public static FormattingTuple format(String messagePattern, Object arg) {
return arrayFormat(messagePattern, new Object[]{arg});
} | @Test
public void testExceptionIn_toString() {
Object o = new Object() {
@Override
public String toString() {
throw new IllegalStateException("a");
}
};
String result = MessageFormatter.format("Troublesome object {}", o).getMessage();
assertEquals("Troublesome object [FAILED toString()]", result);
} |
@Override
public Connection getConnection(Properties properties, String connectionString, SSLContextSettings sslContextSettings) {
try {
RestClientConfigurationBuilder builder = new RestClientConfigurationBuilder().withProperties(properties);
if (connectionString == null || connectionString.isEmpty() || "-".equals(connectionString)) {
builder.addServer().host("localhost").port(11222);
} else {
Matcher matcher = HOST_PORT.matcher(connectionString);
if (matcher.matches()) {
String host = matcher.group(1);
String port = matcher.group(2);
builder.addServer().host(host).port(port != null ? Integer.parseInt(port) : 11222);
} else {
URL url = new URL(connectionString);
if (!url.getProtocol().equals("http") && !url.getProtocol().equals("https")) {
throw new IllegalArgumentException();
}
int port = url.getPort();
builder.addServer().host(url.getHost()).port(port > 0 ? port : url.getDefaultPort());
String userInfo = url.getUserInfo();
if (userInfo != null) {
String[] split = userInfo.split(":");
builder.security().authentication().username(URLDecoder.decode(split[0], StandardCharsets.UTF_8));
if (split.length == 2) {
builder.security().authentication().password(URLDecoder.decode(split[1], StandardCharsets.UTF_8));
}
}
if (url.getProtocol().equals("https")) {
SslConfigurationBuilder ssl = builder.security().ssl().enable();
if (sslContextSettings != null) {
ssl.sslContext(sslContextSettings.getSslContext())
.trustManagers(sslContextSettings.getTrustManagers())
.hostnameVerifier(sslContextSettings.getHostnameVerifier());
}
}
}
}
builder.header("User-Agent", Version.getBrandName() + " CLI " + Version.getBrandVersion());
return new RestConnection(builder);
} catch (Throwable e) {
return null;
}
} | @Test
public void testUrlWithoutCredentials() {
RestConnector connector = new RestConnector();
RestConnection connection = (RestConnection) connector.getConnection(new Properties(),"http://localhost:11222", null);
RestClientConfigurationBuilder builder = connection.getBuilder();
RestClientConfiguration configuration = builder.build();
assertEquals(11222, configuration.servers().get(0).port());
assertEquals("localhost", configuration.servers().get(0).host());
assertFalse(configuration.security().authentication().enabled());
} |
public void refreshJobRetentionSettings() {
if (getServiceState() == STATE.STARTED) {
conf = createConf();
long maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS,
JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE);
hsManager.setMaxHistoryAge(maxHistoryAge);
if (futureHistoryCleaner != null) {
futureHistoryCleaner.cancel(false);
}
futureHistoryCleaner = null;
scheduleHistoryCleaner();
} else {
LOG.warn("Failed to execute refreshJobRetentionSettings : Job History service is not started");
}
} | @Test
public void testRefreshJobRetentionSettings() throws IOException,
InterruptedException {
String root = "mockfs://foo/";
String historyDoneDir = root + "mapred/history/done";
long now = System.currentTimeMillis();
long someTimeYesterday = now - (25l * 3600 * 1000);
long timeBefore200Secs = now - (200l * 1000);
// Get yesterday's date in YY/MM/DD format
String timestampComponent = JobHistoryUtils
.timestampDirectoryComponent(someTimeYesterday);
// Create a folder under yesterday's done dir
Path donePathYesterday = new Path(historyDoneDir, timestampComponent + "/"
+ "000000");
FileStatus dirCreatedYesterdayStatus = new FileStatus(0, true, 0, 0,
someTimeYesterday, donePathYesterday);
// Get today's date in YY/MM/DD format
timestampComponent = JobHistoryUtils
.timestampDirectoryComponent(timeBefore200Secs);
// Create a folder under today's done dir
Path donePathToday = new Path(historyDoneDir, timestampComponent + "/"
+ "000000");
FileStatus dirCreatedTodayStatus = new FileStatus(0, true, 0, 0,
timeBefore200Secs, donePathToday);
// Create a jhist file with yesterday's timestamp under yesterday's done dir
Path fileUnderYesterdayDir = new Path(donePathYesterday.toString(),
"job_1372363578825_0015-" + someTimeYesterday + "-user-Sleep+job-"
+ someTimeYesterday + "-1-1-SUCCEEDED-default.jhist");
FileStatus fileUnderYesterdayDirStatus = new FileStatus(10, false, 0, 0,
someTimeYesterday, fileUnderYesterdayDir);
// Create a jhist file with today's timestamp under today's done dir
Path fileUnderTodayDir = new Path(donePathYesterday.toString(),
"job_1372363578825_0016-" + timeBefore200Secs + "-user-Sleep+job-"
+ timeBefore200Secs + "-1-1-SUCCEEDED-default.jhist");
FileStatus fileUnderTodayDirStatus = new FileStatus(10, false, 0, 0,
timeBefore200Secs, fileUnderTodayDir);
HistoryFileManager historyManager = spy(new HistoryFileManager());
jobHistory = spy(new JobHistory());
List<FileStatus> fileStatusList = new LinkedList<FileStatus>();
fileStatusList.add(dirCreatedYesterdayStatus);
fileStatusList.add(dirCreatedTodayStatus);
// Make the initial delay of history job cleaner as 4 secs
doReturn(4).when(jobHistory).getInitDelaySecs();
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
List<FileStatus> list1 = new LinkedList<FileStatus>();
list1.add(fileUnderYesterdayDirStatus);
doReturn(list1).when(historyManager).scanDirectoryForHistoryFiles(
eq(donePathYesterday), any(FileContext.class));
List<FileStatus> list2 = new LinkedList<FileStatus>();
list2.add(fileUnderTodayDirStatus);
doReturn(list2).when(historyManager).scanDirectoryForHistoryFiles(
eq(donePathToday), any(FileContext.class));
doReturn(fileStatusList).when(historyManager)
.getHistoryDirsForCleaning(Mockito.anyLong());
doReturn(true).when(historyManager).deleteDir(any(FileStatus.class));
JobListCache jobListCache = mock(JobListCache.class);
HistoryFileInfo fileInfo = mock(HistoryFileInfo.class);
doReturn(jobListCache).when(historyManager).createJobListCache();
when(jobListCache.get(any(JobId.class))).thenReturn(fileInfo);
doNothing().when(fileInfo).delete();
// Set job retention time to 24 hrs and cleaner interval to 2 secs
Configuration conf = new Configuration();
conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, 24l * 3600 * 1000);
conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, 2 * 1000);
jobHistory.init(conf);
jobHistory.start();
assertEquals(2 * 1000l, jobHistory.getCleanerInterval());
// Only yesterday's jhist file should get deleted
verify(fileInfo, timeout(20000).times(1)).delete();
fileStatusList.remove(dirCreatedYesterdayStatus);
// Now reset job retention time to 10 secs
conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, 10 * 1000);
// Set cleaner interval to 1 sec
conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, 1 * 1000);
doReturn(conf).when(jobHistory).createConf();
// Do refresh job retention settings
jobHistory.refreshJobRetentionSettings();
// Cleaner interval should be updated
assertEquals(1 * 1000l, jobHistory.getCleanerInterval());
// Today's jhist file will also be deleted now since it falls below the
// retention threshold
verify(fileInfo, timeout(20000).times(2)).delete();
} |
@Override
public void setEvaluation(EvaluatedQualityGate g) {
// fail fast
requireNonNull(g);
checkState(evaluation == null, "QualityGateHolder evaluation can be initialized only once");
this.evaluation = g;
} | @Test
public void setEvaluation_throws_NPE_if_argument_is_null() {
assertThatThrownBy(() -> new QualityGateHolderImpl().setEvaluation(null))
.isInstanceOf(NullPointerException.class);
} |
@Override
public boolean test(final Path test) {
return this.equals(new DefaultPathPredicate(test));
} | @Test
public void testPredicateFileIdFile() {
final Path t = new Path("/f", EnumSet.of(Path.Type.file), new PathAttributes().withFileId("1"));
assertTrue(new DefaultPathPredicate(t).test(t));
assertTrue(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.file), new PathAttributes().withFileId("1"))));
assertFalse(new DefaultPathPredicate(t).test(new Path("/f", EnumSet.of(Path.Type.file), new PathAttributes().withFileId("2"))));
} |
public static long getContentLength(HttpMessage message) {
String value = message.headers().get(HttpHeaderNames.CONTENT_LENGTH);
if (value != null) {
return Long.parseLong(value);
}
// We know the content length if it's a Web Socket message even if
// Content-Length header is missing.
long webSocketContentLength = getWebSocketContentLength(message);
if (webSocketContentLength >= 0) {
return webSocketContentLength;
}
// Otherwise we don't.
throw new NumberFormatException("header not found: " + HttpHeaderNames.CONTENT_LENGTH);
} | @Test
public void testGetContentLengthIntDefaultValueThrowsNumberFormatException() {
final HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
message.headers().set(HttpHeaderNames.CONTENT_LENGTH, "bar");
try {
HttpUtil.getContentLength(message, 1);
fail();
} catch (final NumberFormatException e) {
// a number format exception is expected here
}
} |
public MemoryLRUCacheBytesIterator reverseRange(final String namespace, final Bytes from, final Bytes to) {
final NamedCache cache = getCache(namespace);
if (cache == null) {
return new MemoryLRUCacheBytesIterator(Collections.emptyIterator(), new NamedCache(namespace, this.metrics));
}
return new MemoryLRUCacheBytesIterator(cache.reverseKeyRange(from, to), cache);
} | @Test
public void shouldPeekNextKeyReverseRange() {
final ThreadCache cache = setupThreadCache(1, 1, 10000L, true);
final Bytes theByte = Bytes.wrap(new byte[]{1});
final ThreadCache.MemoryLRUCacheBytesIterator iterator = cache.reverseRange(namespace, Bytes.wrap(new byte[]{0}), theByte);
assertThat(iterator.peekNextKey(), is(theByte));
assertThat(iterator.peekNextKey(), is(theByte));
} |
@Override
public void updateSensitiveWord(SensitiveWordSaveVO updateReqVO) {
// 校验唯一性
validateSensitiveWordExists(updateReqVO.getId());
validateSensitiveWordNameUnique(updateReqVO.getId(), updateReqVO.getName());
// 更新
SensitiveWordDO updateObj = BeanUtils.toBean(updateReqVO, SensitiveWordDO.class);
sensitiveWordMapper.updateById(updateObj);
// 刷新缓存
initLocalCache();
} | @Test
public void testUpdateSensitiveWord_success() {
// mock 数据
SensitiveWordDO dbSensitiveWord = randomPojo(SensitiveWordDO.class);
sensitiveWordMapper.insert(dbSensitiveWord);// @Sql: 先插入出一条存在的数据
// 准备参数
SensitiveWordSaveVO reqVO = randomPojo(SensitiveWordSaveVO.class, o -> {
o.setId(dbSensitiveWord.getId()); // 设置更新的 ID
});
// 调用
sensitiveWordService.updateSensitiveWord(reqVO);
// 校验是否更新正确
SensitiveWordDO sensitiveWord = sensitiveWordMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, sensitiveWord);
} |
public static Thread newThread(Runnable runnable, String name, boolean isDaemon) {
Thread t = new Thread(null, runnable, name);
t.setDaemon(isDaemon);
return t;
} | @Test
public void testNewThread() {
// Setup
final Runnable runnable = null;
// Run the test
final Thread result = ThreadUtil.newThread(runnable, "name", false);
// Verify the results
Assert.assertNotNull(result);
} |
public MaintenanceAssociation decode(ObjectNode json, CodecContext context, int mdNameLen) {
if (json == null || !json.isObject()) {
return null;
}
JsonNode maNode = json.get(MA);
String maName = nullIsIllegal(maNode.get(MA_NAME), "maName is required").asText();
String maNameType = MaIdShort.MaIdType.CHARACTERSTRING.name();
if (maNode.get(MA_NAME_TYPE) != null) {
maNameType = maNode.get(MA_NAME_TYPE).asText();
}
try {
MaIdShort maId = MdMaNameUtil.parseMaName(maNameType, maName);
MaBuilder builder =
DefaultMaintenanceAssociation.builder(maId, mdNameLen);
JsonNode maNumericIdNode = maNode.get(MA_NUMERIC_ID);
if (maNumericIdNode != null) {
short mdNumericId = (short) maNumericIdNode.asInt();
builder = builder.maNumericId(mdNumericId);
}
if (maNode.get(CCM_INTERVAL) != null) {
builder.ccmInterval(CcmInterval.valueOf(maNode.get(CCM_INTERVAL).asText()));
}
List<Component> componentList = (new ComponentCodec()).decode((ArrayNode)
nullIsIllegal(maNode.get(COMPONENT_LIST),
"component-list is required"), context);
for (Component component:componentList) {
builder = builder.addToComponentList(component);
}
JsonNode rmepListJson = maNode.get(RMEP_LIST);
if (rmepListJson != null) {
List<MepId> remoteMeps = (new RMepCodec()).decode(
(ArrayNode) rmepListJson, context);
for (MepId remoteMep:remoteMeps) {
builder = builder.addToRemoteMepIdList(remoteMep);
}
}
return builder.build();
} catch (CfmConfigException e) {
throw new IllegalArgumentException(e);
}
} | @Test
public void testDecodeMa3() throws IOException {
String mdString = "{\"ma\": { \"maName\": 12467," +
"\"maNameType\": \"TWOOCTET\"," +
"\"component-list\": [], " +
"\"rmep-list\": [], " +
"\"maNumericId\": 3}}";
InputStream input = new ByteArrayInputStream(
mdString.getBytes(StandardCharsets.UTF_8));
JsonNode cfg = mapper.readTree(input);
MaintenanceAssociation maDecode3 = ((MaintenanceAssociationCodec) context
.codec(MaintenanceAssociation.class))
.decode((ObjectNode) cfg, context, 10);
assertEquals(MAID3_OCTET, maDecode3.maId());
} |
UUID getRandomIdFromList(ArrayList<Member> memberList) {
if (memberList.isEmpty()) {
throw new JetException("Cluster member list is empty");
}
Collections.shuffle(memberList);
Member member = memberList.get(0);
return member.getUuid();
} | @Test
public void testGetRandomIdFromList() throws UnknownHostException {
UUID uuid = UUID.randomUUID();
MemberImpl member = new MemberImpl(
new Address("hostname", InetAddress.getByName("192.168.8.112"), 0),
new MemberVersion(0, 0, 0),
true,
uuid);
ArrayList<Member> memberList = new ArrayList<>();
memberList.add(member);
SubmitJobTargetMemberFinder submitJobTargetMemberFinder = new SubmitJobTargetMemberFinder();
UUID result = submitJobTargetMemberFinder.getRandomIdFromList(memberList);
Assert.assertEquals(uuid, result);
} |
public static <T> T newInstanceOrNull(Class<? extends T> clazz, Object... params) {
Constructor<T> constructor = selectMatchingConstructor(clazz, params);
if (constructor == null) {
return null;
}
try {
return constructor.newInstance(params);
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
return null;
}
} | @Test
public void newInstanceOrNull_createInstanceWithSubclass() {
ClassWithObjectConstructor instance = InstantiationUtils.newInstanceOrNull(ClassWithObjectConstructor.class,
"foo");
assertNotNull(instance);
} |
@Override
public void stopTrackThread() {
} | @Test
public void stopTrackThread() {
mSensorsAPI.stopTrackThread();
} |
public static URL urlForResource(String location)
throws MalformedURLException, FileNotFoundException {
if (location == null) {
throw new NullPointerException("location is required");
}
URL url = null;
if (!location.matches(SCHEME_PATTERN)) {
url = Loader.getResourceBySelfClassLoader(location);
}
else if (location.startsWith(CLASSPATH_SCHEME)) {
String path = location.substring(CLASSPATH_SCHEME.length());
if (path.startsWith("/")) {
path = path.substring(1);
}
if (path.length() == 0) {
throw new MalformedURLException("path is required");
}
url = Loader.getResourceBySelfClassLoader(path);
}
else {
url = new URL(location);
}
if (url == null) {
throw new FileNotFoundException(location);
}
return url;
} | @Test
public void testImplicitClasspathUrl() throws Exception {
URL url = LocationUtil.urlForResource(TEST_CLASSPATH_RESOURCE);
validateResource(url);
} |
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
} | @Test
public void testLessThan() {
UnboundPredicate<Integer> expected =
org.apache.iceberg.expressions.Expressions.lessThan("field1", 1);
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(resolve(Expressions.$("field1").isLess(Expressions.lit(1))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(resolve(Expressions.lit(1).isGreater(Expressions.$("field1"))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
} |
@Override
public void lock() {
try {
lock(-1, null, false);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
} | @Test
public void testConcurrencyLoop_MultiInstance() throws InterruptedException {
final int iterations = 100;
final AtomicInteger lockedCounter = new AtomicInteger();
testMultiInstanceConcurrency(16, r -> {
for (int i = 0; i < iterations; i++) {
r.getLock("testConcurrency_MultiInstance1").lock();
try {
Thread.sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
lockedCounter.incrementAndGet();
r.getLock("testConcurrency_MultiInstance1").unlock();
}
});
Assertions.assertEquals(16 * iterations, lockedCounter.get());
} |
@Override
public List<RoleDO> getRoleListFromCache(Collection<Long> ids) {
if (CollectionUtil.isEmpty(ids)) {
return Collections.emptyList();
}
// 这里采用 for 循环从缓存中获取,主要考虑 Spring CacheManager 无法批量操作的问题
RoleServiceImpl self = getSelf();
return CollectionUtils.convertList(ids, self::getRoleFromCache);
} | @Test
public void testGetRoleListFromCache() {
try (MockedStatic<SpringUtil> springUtilMockedStatic = mockStatic(SpringUtil.class)) {
springUtilMockedStatic.when(() -> SpringUtil.getBean(eq(RoleServiceImpl.class)))
.thenReturn(roleService);
// mock 数据
RoleDO dbRole = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
roleMapper.insert(dbRole);
// 测试 id 不匹配
roleMapper.insert(cloneIgnoreId(dbRole, o -> {}));
// 准备参数
Collection<Long> ids = singleton(dbRole.getId());
// 调用
List<RoleDO> list = roleService.getRoleListFromCache(ids);
// 断言
assertEquals(1, list.size());
assertPojoEquals(dbRole, list.get(0));
}
} |
@VisibleForTesting
public ProcessContinuation run(
RestrictionTracker<OffsetRange, Long> tracker,
OutputReceiver<PartitionRecord> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator,
InitialPipelineState initialPipelineState)
throws Exception {
LOG.debug("DNP: Watermark: " + watermarkEstimator.getState());
LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom());
if (tracker.currentRestriction().getFrom() == 0L) {
if (!tracker.tryClaim(0L)) {
LOG.error(
"Could not claim initial DetectNewPartition restriction. No partitions are outputted.");
return ProcessContinuation.stop();
}
watermarkEstimator.setWatermark(initialPipelineState.getStartTime());
if (initialPipelineState.isResume()) {
resumeFromPreviousPipelineAction.run(receiver);
} else {
generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime());
}
return ProcessContinuation.resume();
}
// Create a new partition reconciler every run to reset the state each time.
partitionReconciler = new PartitionReconciler(metadataTableDao, metrics);
orphanedMetadataCleaner = new OrphanedMetadataCleaner();
// Calculating the new value of watermark is a resource intensive process. We have to do a full
// scan of the metadata table and then ensure we're not missing partitions and then calculate
// the low watermark. This is usually a fairly fast process even with thousands of partitions.
// However, sometimes this may take so long that the runner checkpoints before the watermark is
// calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to
// restart, wasting the resources spent calculating the watermark. On restart, we will try to
// calculate the watermark again. The problem causing the slow watermark calculation can persist
// leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate
// the watermark after successful tryClaim. Then we write to the metadata table the new
// watermark. On the start of each run we read the watermark and update the DoFn's watermark.
DetectNewPartitionsState detectNewPartitionsState =
metadataTableDao.readDetectNewPartitionsState();
if (detectNewPartitionsState != null) {
watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark());
}
// Terminate if endTime <= watermark that means all partitions have read up to or beyond
// watermark. We no longer need to manage splits and merges, we can terminate.
if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) {
tracker.tryClaim(tracker.currentRestriction().getTo());
return ProcessContinuation.stop();
}
if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) {
LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction());
return ProcessContinuation.stop();
}
// Read StreamPartitions to calculate watermark.
List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null;
if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) {
streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark();
}
// Process NewPartitions and track the ones successfully outputted.
List<NewPartition> newPartitions = metadataTableDao.readNewPartitions();
List<ByteStringRange> outputtedNewPartitions = new ArrayList<>();
for (NewPartition newPartition : newPartitions) {
if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) {
outputtedNewPartitions.add(newPartition.getPartition());
} else if (streamPartitionsWithWatermark != null) {
// streamPartitionsWithWatermark is not null on runs that we update watermark. We only run
// reconciliation when we update watermark. Only add incompleteNewPartitions if
// reconciliation is being run
partitionReconciler.addIncompleteNewPartitions(newPartition);
orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition);
}
}
// Process the watermark using read StreamPartitions and NewPartitions.
if (streamPartitionsWithWatermark != null) {
Optional<Instant> maybeWatermark =
getNewWatermark(streamPartitionsWithWatermark, newPartitions);
maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark);
// Only start reconciling after the pipeline has been running for a while.
if (tracker.currentRestriction().getFrom() > 50) {
// Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being
// streamed. This isn't perfect because there may be partitions moving between
// StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not
// include NewPartitions marked as deleted from a previous DNP run not yet processed by
// RCSP.
List<ByteStringRange> existingPartitions =
streamPartitionsWithWatermark.stream()
.map(StreamPartitionWithWatermark::getPartition)
.collect(Collectors.toList());
existingPartitions.addAll(outputtedNewPartitions);
List<ByteStringRange> missingStreamPartitions =
getMissingPartitionsFromEntireKeySpace(existingPartitions);
orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions);
partitionReconciler.addMissingPartitions(missingStreamPartitions);
processReconcilerPartitions(
receiver, watermarkEstimator, initialPipelineState.getStartTime());
cleanUpOrphanedMetadata();
}
}
return ProcessContinuation.resume().withResumeDelay(Duration.millis(100));
} | @Test
public void testAdvanceWatermarkWithNewPartitions() throws Exception {
// We advance watermark on every 2 restriction tracker advancement
OffsetRange offsetRange = new OffsetRange(2, Long.MAX_VALUE);
when(tracker.currentRestriction()).thenReturn(offsetRange);
when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true);
when(tracker.tryClaim(offsetRange.getTo())).thenReturn(true);
assertEquals(startTime, watermarkEstimator.currentWatermark());
assertNull(metadataTableDao.readDetectNewPartitionsState());
// Write 2 partitions to the table that DO NOT cover the entire keyspace.
ByteStringRange partition1 = ByteStringRange.create("", "b");
Instant watermark1 = endTime.plus(Duration.millis(100));
PartitionRecord partitionRecord1 =
new PartitionRecord(
partition1,
watermark1,
UniqueIdGenerator.getNextId(),
watermark1,
Collections.emptyList(),
null);
metadataTableDao.lockAndRecordPartition(partitionRecord1);
ByteStringRange partition2 = ByteStringRange.create("b", "c");
Instant watermark2 = endTime.plus(Duration.millis(1));
PartitionRecord partitionRecord2 =
new PartitionRecord(
partition2,
watermark2,
UniqueIdGenerator.getNextId(),
watermark2,
Collections.emptyList(),
null);
metadataTableDao.lockAndRecordPartition(partitionRecord2);
ByteStringRange partition3 = ByteStringRange.create("c", "");
ChangeStreamContinuationToken token = ChangeStreamContinuationToken.create(partition3, "token");
NewPartition newPartition =
new NewPartition(partition3, Collections.singletonList(token), watermark2);
metadataTableDao.writeNewPartition(newPartition);
// Updating watermark does not affect the result of this run.
assertEquals(
DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
verify(tracker, times(1)).tryClaim(offsetRange.getFrom());
// Because the StreamPartition and NewPartition cover the entire keyspace, the watermark should
// have advanced. Also note the watermark is watermark2 which is the lowest watermark. Watermark
// estimator isn't updated because we update the watermark estimator on the start of the run. We
// do update the metadata table with the new watermark value.
assertEquals(startTime, watermarkEstimator.currentWatermark());
assertEquals(watermark2, metadataTableDao.readDetectNewPartitionsState().getWatermark());
// On the 2nd run, watermark estimator is updated which is beyond endTime and terminates.
assertEquals(
DoFn.ProcessContinuation.stop(),
action.run(
tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false)));
verify(tracker, times(1)).tryClaim(offsetRange.getTo());
assertEquals(watermark2, watermarkEstimator.currentWatermark());
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.