focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
try {
final AttributedList<Path> children = new AttributedList<>();
String page = null;
do {
final TeamDriveList list = session.getClient().teamdrives().list()
.setPageToken(page)
.setPageSize(pagesize)
.execute();
for(TeamDrive f : list.getTeamDrives()) {
final Path child = new Path(directory, f.getName(), EnumSet.of(Path.Type.directory, Path.Type.volume),
new PathAttributes().withFileId(f.getId()));
children.add(child);
}
listener.chunk(directory, children);
page = list.getNextPageToken();
if(log.isDebugEnabled()) {
log.debug(String.format("Continue with next page token %s", page));
}
}
while(page != null);
return children;
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Listing directory failed", e, directory);
}
} | @Test
public void list() throws Exception {
final AttributedList<Path> list = new DriveTeamDrivesListService(session, new DriveFileIdProvider(session)).list(
DriveHomeFinderService.SHARED_DRIVES_NAME, new DisabledListProgressListener());
assertNotSame(AttributedList.emptyList(), list);
} |
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay readerWay, IntsRef relationFlags) {
if (readerWay.hasTag("hazmat:adr_tunnel_cat", TUNNEL_CATEGORY_NAMES)) {
HazmatTunnel code = HazmatTunnel.valueOf(readerWay.getTag("hazmat:adr_tunnel_cat"));
hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, code);
} else if (readerWay.hasTag("hazmat:tunnel_cat", TUNNEL_CATEGORY_NAMES)) {
HazmatTunnel code = HazmatTunnel.valueOf(readerWay.getTag("hazmat:tunnel_cat"));
hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, code);
} else if (readerWay.hasTag("tunnel", "yes")) {
HazmatTunnel[] codes = HazmatTunnel.values();
for (int i = codes.length - 1; i >= 0; i--) {
if (readerWay.hasTag("hazmat:" + codes[i].name(), "no")) {
hazTunnelEnc.setEnum(false, edgeId, edgeIntAccess, codes[i]);
break;
}
}
}
} | @Test
public void testHazmatSubtags() {
EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1);
int edgeId = 0;
ReaderWay readerWay = new ReaderWay(1);
readerWay.setTag("tunnel", "yes");
readerWay.setTag("hazmat:A", "no");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.A, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("tunnel", "yes");
readerWay.setTag("hazmat:B", "no");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.B, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("tunnel", "yes");
readerWay.setTag("hazmat:C", "no");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.C, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("tunnel", "yes");
readerWay.setTag("hazmat:D", "no");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.D, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
edgeIntAccess = new ArrayEdgeIntAccess(1);
readerWay = new ReaderWay(1);
readerWay.setTag("tunnel", "yes");
readerWay.setTag("hazmat:E", "no");
parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags);
assertEquals(HazmatTunnel.E, hazTunnelEnc.getEnum(false, edgeId, edgeIntAccess));
} |
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
if (!(statement.getStatement() instanceof CreateSource)
&& !(statement.getStatement() instanceof CreateAsSelect)) {
return statement;
}
try {
if (statement.getStatement() instanceof CreateSource) {
final ConfiguredStatement<CreateSource> createStatement =
(ConfiguredStatement<CreateSource>) statement;
return (ConfiguredStatement<T>) forCreateStatement(createStatement).orElse(createStatement);
} else {
final ConfiguredStatement<CreateAsSelect> createStatement =
(ConfiguredStatement<CreateAsSelect>) statement;
return (ConfiguredStatement<T>) forCreateAsStatement(createStatement).orElse(
createStatement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
} | @Test
public void shouldThrowIfCsasKeyTableElementsNotCompatibleExtraKey() {
// Given:
givenFormatsAndProps("protobuf", null,
ImmutableMap.of("KEY_SCHEMA_ID", new IntegerLiteral(42)));
givenDDLSchemaAndFormats(LOGICAL_SCHEMA_EXTRA_KEY, "protobuf", "avro",
SerdeFeature.WRAP_SINGLES, SerdeFeature.UNWRAP_SINGLES);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> injector.inject(csasStatement)
);
// Then:
assertThat(e.getMessage(),
containsString("The following key columns are changed, missing or reordered: "
+ "[`key1` STRING KEY]. Schema from schema registry is [`key` STRING KEY]"));
} |
@ExceptionHandler(RuntimeException.class)
protected ResponseEntity<?> handleRuntimeException(final RuntimeException runtimeException) {
CustomError customError = CustomError.builder()
.httpStatus(HttpStatus.NOT_FOUND)
.header(CustomError.Header.API_ERROR.getName())
.message(runtimeException.getMessage())
.build();
return new ResponseEntity<>(customError, HttpStatus.NOT_FOUND);
} | @Test
void givenRuntimeException_whenHandleRuntimeException_thenRespondWithNotFound() {
// Given
RuntimeException ex = new RuntimeException("Runtime exception message");
CustomError expectedError = CustomError.builder()
.httpStatus(HttpStatus.NOT_FOUND)
.header(CustomError.Header.API_ERROR.getName())
.message("Runtime exception message")
.build();
// When
ResponseEntity<?> responseEntity = globalExceptionHandler.handleRuntimeException(ex);
// Then
assertThat(responseEntity.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
CustomError actualError = (CustomError) responseEntity.getBody();
checkCustomError(expectedError, actualError);
} |
public static void executeEmbeddedDump(Runnable runnable) {
DUMP_EXECUTOR.execute(runnable);
} | @Test
void testExecuteEmbeddedDump() throws InterruptedException {
AtomicInteger atomicInteger = new AtomicInteger();
Runnable runnable = atomicInteger::incrementAndGet;
PersistenceExecutor.executeEmbeddedDump(runnable);
TimeUnit.MILLISECONDS.sleep(20);
assertEquals(1, atomicInteger.get());
} |
public void putUserProperty(final String name, final String value) {
if (MessageConst.STRING_HASH_SET.contains(name)) {
throw new RuntimeException(String.format(
"The Property<%s> is used by system, input another please", name));
}
if (value == null || value.trim().isEmpty()
|| name == null || name.trim().isEmpty()) {
throw new IllegalArgumentException(
"The name or value of property can not be null or blank string!"
);
}
this.putProperty(name, value);
} | @Test
public void putUserProperty() throws Exception {
Message m = new Message();
m.putUserProperty("prop1", "val1");
Assert.assertEquals("val1", m.getUserProperty("prop1"));
} |
@Override
public void createSubnet(Subnet osSubnet) {
checkNotNull(osSubnet, ERR_NULL_SUBNET);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getId()), ERR_NULL_SUBNET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getNetworkId()), ERR_NULL_SUBNET_NET_ID);
checkArgument(!Strings.isNullOrEmpty(osSubnet.getCidr()), ERR_NULL_SUBNET_CIDR);
osNetworkStore.createSubnet(osSubnet);
log.info(String.format(MSG_SUBNET, osSubnet.getCidr(), MSG_CREATED));
} | @Test(expected = IllegalArgumentException.class)
public void testCreateSubnetWithNullCidr() {
final Subnet testSubnet = NeutronSubnet.builder()
.networkId(NETWORK_ID)
.build();
testSubnet.setId(SUBNET_ID);
target.createSubnet(testSubnet);
} |
@Override
public void writeFloat(final float v) throws IOException {
writeInt(Float.floatToIntBits(v));
} | @Test
public void testWriteFloatForPositionV() throws Exception {
float v = 1.1f;
out.writeFloat(1, v);
int expected = Float.floatToIntBits(v);
int actual = Bits.readIntB(out.buffer, 1);
assertEquals(actual, expected);
} |
protected BigDecimal convertDecimal(Object value, DecimalType type) {
BigDecimal bigDecimal;
if (value instanceof BigDecimal) {
bigDecimal = (BigDecimal) value;
} else if (value instanceof Number) {
Number num = (Number) value;
Double dbl = num.doubleValue();
if (dbl.equals(Math.floor(dbl))) {
bigDecimal = BigDecimal.valueOf(num.longValue());
} else {
bigDecimal = BigDecimal.valueOf(dbl);
}
} else if (value instanceof String) {
bigDecimal = new BigDecimal((String) value);
} else {
throw new IllegalArgumentException(
"Cannot convert to BigDecimal: " + value.getClass().getName());
}
return bigDecimal.setScale(type.scale(), RoundingMode.HALF_UP);
} | @Test
public void testDecimalConversion() {
Table table = mock(Table.class);
when(table.schema()).thenReturn(SIMPLE_SCHEMA);
RecordConverter converter = new RecordConverter(table, config);
BigDecimal expected = new BigDecimal("123.45");
ImmutableList.of("123.45", 123.45d, expected)
.forEach(
input -> {
BigDecimal decimal = converter.convertDecimal(input, DecimalType.of(10, 2));
assertThat(decimal).isEqualTo(expected);
});
BigDecimal expected2 = new BigDecimal(123);
ImmutableList.of("123", 123, expected2)
.forEach(
input -> {
BigDecimal decimal = converter.convertDecimal(input, DecimalType.of(10, 0));
assertThat(decimal).isEqualTo(expected2);
});
} |
public static void createTopics(
Logger log, String bootstrapServers, Map<String, String> commonClientConf,
Map<String, String> adminClientConf,
Map<String, NewTopic> topics, boolean failOnExisting) throws Throwable {
// this method wraps the call to createTopics() that takes admin client, so that we can
// unit test the functionality with MockAdminClient. The exception is caught and
// re-thrown so that admin client is closed when the method returns.
try (Admin adminClient
= createAdminClient(bootstrapServers, commonClientConf, adminClientConf)) {
createTopics(log, adminClient, topics, failOnExisting);
} catch (Exception e) {
log.warn("Failed to create or verify topics {}", topics, e);
throw e;
}
} | @Test
public void testCreateNonExistingTopicsWithZeroTopicsDoesNothing() throws Throwable {
WorkerUtils.createTopics(
log, adminClient, Collections.emptyMap(), false);
assertEquals(0, adminClient.listTopics().names().get().size());
} |
void resetTimeouts() {
update(time.milliseconds());
sessionTimer.reset(rebalanceConfig.sessionTimeoutMs);
pollTimer.reset(maxPollIntervalMs);
heartbeatTimer.reset(rebalanceConfig.heartbeatIntervalMs);
} | @Test
public void testResetTimeouts() {
time.sleep(maxPollIntervalMs);
assertTrue(heartbeat.sessionTimeoutExpired(time.milliseconds()));
assertEquals(0, heartbeat.timeToNextHeartbeat(time.milliseconds()));
assertTrue(heartbeat.pollTimeoutExpired(time.milliseconds()));
heartbeat.resetTimeouts();
assertFalse(heartbeat.sessionTimeoutExpired(time.milliseconds()));
assertEquals(heartbeatIntervalMs, heartbeat.timeToNextHeartbeat(time.milliseconds()));
assertFalse(heartbeat.pollTimeoutExpired(time.milliseconds()));
} |
@Override
public boolean deleteService(String serviceName) throws NacosException {
return deleteService(serviceName, Constants.DEFAULT_GROUP);
} | @Test
void testDeleteService2() throws NacosException {
//given
String serviceName = "service1";
String groupName = "groupName";
//when
nacosNamingMaintainService.deleteService(serviceName, groupName);
//then
verify(serverProxy, times(1)).deleteService(serviceName, groupName);
} |
public Node parse() throws ScanException {
return E();
} | @Test
public void empty() {
try {
Parser<Object> p = new Parser("");
p.parse();
fail("");
} catch (ScanException e) {
}
} |
public Struct(Schema schema) {
if (schema.type() != Schema.Type.STRUCT)
throw new DataException("Not a struct schema: " + schema);
this.schema = schema;
this.values = new Object[schema.fields().size()];
} | @Test
public void testValidateStructWithNullValue() {
Schema schema = SchemaBuilder.struct()
.field("one", Schema.STRING_SCHEMA)
.field("two", Schema.STRING_SCHEMA)
.field("three", Schema.STRING_SCHEMA)
.build();
Struct struct = new Struct(schema);
Exception e = assertThrows(DataException.class, struct::validate);
assertEquals("Invalid value: null used for required field: \"one\", schema type: STRING",
e.getMessage());
} |
List<DataflowPackage> stageClasspathElements(
Collection<StagedFile> classpathElements, String stagingPath, CreateOptions createOptions) {
return stageClasspathElements(classpathElements, stagingPath, DEFAULT_SLEEPER, createOptions);
} | @Test
public void testPackageUploadWithFileSucceeds() throws Exception {
Pipe pipe = Pipe.open();
String contents = "This is a test!";
File tmpFile = makeFileWithContents("file.txt", contents);
when(mockGcsUtil.getObjects(anyListOf(GcsPath.class)))
.thenReturn(
ImmutableList.of(
StorageObjectOrIOException.create(new FileNotFoundException("some/path"))));
when(mockGcsUtil.create(any(GcsPath.class), any(GcsUtil.CreateOptions.class)))
.thenReturn(pipe.sink());
List<DataflowPackage> targets =
defaultPackageUtil.stageClasspathElements(
ImmutableList.of(makeStagedFile(tmpFile.getAbsolutePath())),
STAGING_PATH,
createOptions);
DataflowPackage target = Iterables.getOnlyElement(targets);
verify(mockGcsUtil).getObjects(anyListOf(GcsPath.class));
verify(mockGcsUtil).create(any(GcsPath.class), any(GcsUtil.CreateOptions.class));
verifyNoMoreInteractions(mockGcsUtil);
assertThat(target.getName(), endsWith(".txt"));
assertThat(target.getLocation(), equalTo(STAGING_PATH + target.getName()));
assertThat(
new LineReader(Channels.newReader(pipe.source(), StandardCharsets.UTF_8.name())).readLine(),
equalTo(contents));
} |
@Override
public Meter meter(String name) {
return NoopMeter.INSTANCE;
} | @Test
public void accessingACustomMeterRegistersAndReusesIt() {
final MetricRegistry.MetricSupplier<Meter> supplier = () -> meter;
final Meter meter1 = registry.meter("thing", supplier);
final Meter meter2 = registry.meter("thing", supplier);
assertThat(meter1).isExactlyInstanceOf(NoopMetricRegistry.NoopMeter.class);
assertThat(meter2).isExactlyInstanceOf(NoopMetricRegistry.NoopMeter.class);
assertThat(meter1).isSameAs(meter2);
verify(listener, never()).onMeterAdded("thing", meter1);
} |
@Override
public LoggingConfiguration getConfiguration(final Path file) throws BackgroundException {
final Path bucket = containerService.getContainer(file);
if(bucket.isRoot()) {
return LoggingConfiguration.empty();
}
try {
final Storage.Buckets.Get request = session.getClient().buckets().get(bucket.getName());
if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) {
request.setUserProject(session.getHost().getCredentials().getUsername());
}
final Bucket.Logging status = request.execute().getLogging();
if(null == status) {
return LoggingConfiguration.empty();
}
final LoggingConfiguration configuration = new LoggingConfiguration(
status.getLogObjectPrefix() != null, status.getLogBucket());
try {
configuration.setContainers(new GoogleStorageBucketListService(session).list(
new Path(String.valueOf(Path.DELIMITER), EnumSet.of(Path.Type.volume, Path.Type.directory)),
new DisabledListProgressListener()).toList());
}
catch(AccessDeniedException | InteroperabilityException e) {
log.warn(String.format("Failure listing buckets. %s", e.getMessage()));
}
return configuration;
}
catch(IOException e) {
try {
throw new GoogleStorageExceptionMappingService().map("Failure to read attributes of {0}", e, bucket);
}
catch(AccessDeniedException | InteroperabilityException l) {
log.warn(String.format("Missing permission to read logging configuration for %s %s", bucket.getName(), e.getMessage()));
return LoggingConfiguration.empty();
}
}
} | @Test
public void testGetConfiguration() throws Exception {
final GoogleStorageLoggingFeature feature = new GoogleStorageLoggingFeature(session);
final Path bucket = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume));
feature.setConfiguration(bucket, new LoggingConfiguration(true, "cyberduck-test-eu"));
final LoggingConfiguration configuration = feature.getConfiguration(bucket);
assertNotNull(configuration);
assertEquals("cyberduck-test-eu", configuration.getLoggingTarget());
assertTrue(configuration.isEnabled());
} |
@Deprecated
public String javaUnbox(Schema schema) {
return javaUnbox(schema, false);
} | @Test
void javaUnbox() throws Exception {
SpecificCompiler compiler = createCompiler();
compiler.setEnableDecimalLogicalType(false);
Schema intSchema = Schema.create(Schema.Type.INT);
Schema longSchema = Schema.create(Schema.Type.LONG);
Schema floatSchema = Schema.create(Schema.Type.FLOAT);
Schema doubleSchema = Schema.create(Schema.Type.DOUBLE);
Schema boolSchema = Schema.create(Schema.Type.BOOLEAN);
assertEquals("int", compiler.javaUnbox(intSchema, false), "Should use int for Type.INT");
assertEquals("long", compiler.javaUnbox(longSchema, false), "Should use long for Type.LONG");
assertEquals("float", compiler.javaUnbox(floatSchema, false), "Should use float for Type.FLOAT");
assertEquals("double", compiler.javaUnbox(doubleSchema, false), "Should use double for Type.DOUBLE");
assertEquals("boolean", compiler.javaUnbox(boolSchema, false), "Should use boolean for Type.BOOLEAN");
// see AVRO-2569
Schema nullSchema = Schema.create(Schema.Type.NULL);
assertEquals("void", compiler.javaUnbox(nullSchema, true), "Should use void for Type.NULL");
Schema dateSchema = LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT));
Schema timeSchema = LogicalTypes.timeMillis().addToSchema(Schema.create(Schema.Type.INT));
Schema timestampSchema = LogicalTypes.timestampMillis().addToSchema(Schema.create(Schema.Type.LONG));
// Date/time types should always use upper level java classes, even though
// their underlying representations are primitive types
assertEquals("java.time.LocalDate", compiler.javaUnbox(dateSchema, false), "Should use LocalDate for date type");
assertEquals("java.time.LocalTime", compiler.javaUnbox(timeSchema, false),
"Should use LocalTime for time-millis type");
assertEquals("java.time.Instant", compiler.javaUnbox(timestampSchema, false),
"Should use DateTime for timestamp-millis type");
} |
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
} | @Test
public void testBagIsEmptyAfterClear() throws Exception {
StateTag<BagState<String>> addr = StateTags.bag("bag", StringUtf8Coder.of());
BagState<String> bag = underTest.state(NAMESPACE, addr);
bag.clear();
ReadableState<Boolean> result = bag.isEmpty();
Mockito.verify(mockReader, never())
.bagFuture(key(NAMESPACE, "bag"), STATE_FAMILY, StringUtf8Coder.of());
assertThat(result.read(), Matchers.is(true));
bag.add("hello");
assertThat(result.read(), Matchers.is(false));
} |
public static ParamType getSchemaFromType(final Type type) {
return getSchemaFromType(type, JAVA_TO_ARG_TYPE);
} | @Test
public void shouldGetIntSchemaForIntegerClass() {
assertThat(
UdfUtil.getSchemaFromType(Integer.class),
equalTo(ParamTypes.INTEGER)
);
} |
@Override
public void write(final int b) throws IOException {
throw new IOException(new UnsupportedOperationException());
} | @Test
public void testSmallChunksToWrite() throws Exception {
final CryptoVault vault = this.getVault();
final ByteArrayOutputStream cipherText = new ByteArrayOutputStream();
final FileHeader header = vault.getFileHeaderCryptor().create();
final CryptoOutputStream stream = new CryptoOutputStream(
new ProxyOutputStream(cipherText), vault.getFileContentCryptor(), header, new RandomNonceGenerator(vault.getNonceSize()), 0);
final byte[] part1 = RandomUtils.nextBytes(1024);
final byte[] part2 = RandomUtils.nextBytes(1024);
stream.write(part1, 0, part1.length);
stream.write(part2, 0, part2.length);
stream.close();
final byte[] read = new byte[part1.length + part2.length];
final byte[] expected = ByteBuffer.allocate(part1.length + part2.length).put(part1).put(part2).array();
final CryptoInputStream cryptoInputStream = new CryptoInputStream(new ByteArrayInputStream(cipherText.toByteArray()), vault.getFileContentCryptor(), header, 0);
assertEquals(expected.length, cryptoInputStream.read(read));
cryptoInputStream.close();
assertArrayEquals(expected, read);
} |
@Override
public int compareTo(COSObjectKey other)
{
return Long.compare(numberAndGeneration, other.numberAndGeneration);
} | @Test
void compareToInputNotNullOutputNotNull()
{
// Arrange
final COSObjectKey objectUnderTest = new COSObjectKey(1L, 0);
final COSObjectKey other = new COSObjectKey(9_999_999L, 0);
// Act
final int retvalNegative = objectUnderTest.compareTo(other);
final int retvalPositive = other.compareTo(objectUnderTest);
// Assert results
assertEquals(-1, retvalNegative);
assertEquals(1, retvalPositive);
} |
public static String format(CharSequence template, Map<?, ?> map) {
return format(template, map, true);
} | @Test
public void formatTest() {
final String template = "你好,我是{name},我的电话是:{phone}";
final String result = StrUtil.format(template, Dict.create().set("name", "张三").set("phone", "13888881111"));
assertEquals("你好,我是张三,我的电话是:13888881111", result);
final String result2 = StrUtil.format(template, Dict.create().set("name", "张三").set("phone", null));
assertEquals("你好,我是张三,我的电话是:{phone}", result2);
} |
@Override
public void commitAfterRecovery() throws IOException {
LOGGER.trace(
"Committing recoverable after recovery with options {}: {}", options, recoverable);
// see discussion: https://github.com/apache/flink/pull/15599#discussion_r623127365
// only write the final blob if it doesn't already exist
Optional<GSBlobStorage.BlobMetadata> blobMetadata =
storage.getMetadata(recoverable.finalBlobIdentifier);
if (!blobMetadata.isPresent()) {
writeFinalBlob();
}
// clean up after successful commit
cleanupTemporaryBlobs();
} | @Test
public void commitWithRecoveryOverwriteShouldSucceedTest() throws IOException {
blobStorage.createBlob(blobIdentifier);
GSRecoverableWriterCommitter committer = commitTestInternal();
committer.commitAfterRecovery();
} |
@Override
public void eventReceived(Action action, Pod pod) {
logger.debug(
"Received {} event for pod {}, details: {}{}",
action,
pod.getMetadata().getName(),
System.lineSeparator(),
KubernetesUtils.tryToGetPrettyPrintYaml(pod.getStatus()));
final List<KubernetesPod> pods = Collections.singletonList(new KubernetesPod(pod));
switch (action) {
case ADDED:
callbackHandler.onAdded(pods);
break;
case MODIFIED:
callbackHandler.onModified(pods);
break;
case ERROR:
callbackHandler.onError(pods);
break;
case DELETED:
callbackHandler.onDeleted(pods);
break;
default:
logger.debug(
"Ignore handling {} event for pod {}", action, pod.getMetadata().getName());
break;
}
} | @Test
void testCallbackHandler() {
FlinkPod pod = new FlinkPod.Builder().build();
final KubernetesPodsWatcher podsWatcher =
new KubernetesPodsWatcher(
TestingWatchCallbackHandler.<KubernetesPod>builder()
.setOnAddedConsumer(pods -> podAddedList.addAll(pods))
.setOnModifiedConsumer(pods -> podModifiedList.addAll(pods))
.setOnDeletedConsumer(pods -> podDeletedList.addAll(pods))
.setOnErrorConsumer(pods -> podErrorList.addAll(pods))
.build());
podsWatcher.eventReceived(Watcher.Action.ADDED, pod.getPodWithoutMainContainer());
podsWatcher.eventReceived(Watcher.Action.MODIFIED, pod.getPodWithoutMainContainer());
podsWatcher.eventReceived(Watcher.Action.DELETED, pod.getPodWithoutMainContainer());
podsWatcher.eventReceived(Watcher.Action.ERROR, pod.getPodWithoutMainContainer());
assertThat(podAddedList).hasSize(1);
assertThat(podModifiedList).hasSize(1);
assertThat(podDeletedList).hasSize(1);
assertThat(podErrorList).hasSize(1);
} |
public static Serializer getSerializer(String alias) {
// 工厂模式 托管给ExtensionLoader
return EXTENSION_LOADER.getExtension(alias);
} | @Test
public void getSerializer1() {
Serializer serializer = SerializerFactory.getSerializer("test");
Assert.assertNotNull(serializer);
Assert.assertEquals(TestSerializer.class, serializer.getClass());
} |
public int quarter() {
return month() / 3 + 1;
} | @Test
public void quarterTest() {
DateTime dateTime = new DateTime("2017-01-05 12:34:23", DatePattern.NORM_DATETIME_FORMAT);
Quarter quarter = dateTime.quarterEnum();
assertEquals(Quarter.Q1, quarter);
dateTime = new DateTime("2017-04-05 12:34:23", DatePattern.NORM_DATETIME_FORMAT);
quarter = dateTime.quarterEnum();
assertEquals(Quarter.Q2, quarter);
dateTime = new DateTime("2017-07-05 12:34:23", DatePattern.NORM_DATETIME_FORMAT);
quarter = dateTime.quarterEnum();
assertEquals(Quarter.Q3, quarter);
dateTime = new DateTime("2017-10-05 12:34:23", DatePattern.NORM_DATETIME_FORMAT);
quarter = dateTime.quarterEnum();
assertEquals(Quarter.Q4, quarter);
// 精确到毫秒
DateTime beginTime = new DateTime("2017-10-01 00:00:00.000", DatePattern.NORM_DATETIME_MS_FORMAT);
dateTime = DateUtil.beginOfQuarter(dateTime);
assertEquals(beginTime, dateTime);
// 精确到毫秒
DateTime endTime = new DateTime("2017-12-31 23:59:59.999", DatePattern.NORM_DATETIME_MS_FORMAT);
dateTime = DateUtil.endOfQuarter(dateTime);
assertEquals(endTime, dateTime);
} |
@Override
@SuppressWarnings("unchecked")
public void onApplicationEvent(@NotNull final DataChangedEvent event) {
for (DataChangedListener listener : listeners) {
if ((!(listener instanceof AbstractDataChangedListener))
&& clusterProperties.isEnabled()
&& Objects.nonNull(shenyuClusterSelectMasterService)
&& !shenyuClusterSelectMasterService.isMaster()) {
LOG.info("received DataChangedEvent, not master, pass");
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("received DataChangedEvent, dispatching, event:{}", JsonUtils.toJson(event));
}
switch (event.getGroupKey()) {
case APP_AUTH:
listener.onAppAuthChanged((List<AppAuthData>) event.getSource(), event.getEventType());
break;
case PLUGIN:
listener.onPluginChanged((List<PluginData>) event.getSource(), event.getEventType());
break;
case RULE:
listener.onRuleChanged((List<RuleData>) event.getSource(), event.getEventType());
break;
case SELECTOR:
listener.onSelectorChanged((List<SelectorData>) event.getSource(), event.getEventType());
break;
case META_DATA:
listener.onMetaDataChanged((List<MetaData>) event.getSource(), event.getEventType());
break;
case PROXY_SELECTOR:
listener.onProxySelectorChanged((List<ProxySelectorData>) event.getSource(), event.getEventType());
break;
case DISCOVER_UPSTREAM:
listener.onDiscoveryUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
applicationContext.getBean(LoadServiceDocEntry.class).loadDocOnUpstreamChanged((List<DiscoverySyncData>) event.getSource(), event.getEventType());
break;
default:
throw new IllegalStateException("Unexpected value: " + event.getGroupKey());
}
}
} | @Test
public void onApplicationEventWithMetaDataConfigGroupTest() {
when(clusterProperties.isEnabled()).thenReturn(true);
when(shenyuClusterSelectMasterService.isMaster()).thenReturn(true);
ConfigGroupEnum configGroupEnum = ConfigGroupEnum.META_DATA;
DataChangedEvent dataChangedEvent = new DataChangedEvent(configGroupEnum, null, new ArrayList<>());
dataChangedEventDispatcher.onApplicationEvent(dataChangedEvent);
verify(httpLongPollingDataChangedListener, times(1)).onMetaDataChanged(anyList(), any());
verify(nacosDataChangedListener, times(1)).onMetaDataChanged(anyList(), any());
verify(websocketDataChangedListener, times(1)).onMetaDataChanged(anyList(), any());
verify(zookeeperDataChangedListener, times(1)).onMetaDataChanged(anyList(), any());
} |
public boolean shouldLog(final Logger logger, final String path, final int responseCode) {
if (rateLimitersByPath.containsKey(path)) {
final RateLimiter rateLimiter = rateLimitersByPath.get(path);
if (!rateLimiter.tryAcquire()) {
if (pathLimitHit.tryAcquire()) {
logger.info("Hit rate limit for path " + path + " with limit " + rateLimiter.getRate());
}
return false;
}
}
if (rateLimitersByResponseCode.containsKey(responseCode)) {
final RateLimiter rateLimiter = rateLimitersByResponseCode.get(responseCode);
if (!rateLimiter.tryAcquire()) {
if (responseCodeLimitHit.tryAcquire()) {
logger.info("Hit rate limit for response code " + responseCode + " with limit "
+ rateLimiter.getRate());
}
return false;
}
}
return true;
} | @Test
public void shouldSkipRateLimited_responseCode() {
// Given:
when(rateLimiter.tryAcquire()).thenReturn(true, false, false, false);
when(rateLimiter.getRate()).thenReturn(1d);
// When:
assertThat(loggingRateLimiter.shouldLog(logger, "/foo", RESPONSE_CODE), is(true));
assertThat(loggingRateLimiter.shouldLog(logger, "/foo", RESPONSE_CODE), is(false));
assertThat(loggingRateLimiter.shouldLog(logger, "/foo", RESPONSE_CODE), is(false));
assertThat(loggingRateLimiter.shouldLog(logger, "/foo", RESPONSE_CODE), is(false));
// Then:
verify(rateLimiter, times(4)).tryAcquire();
verify(logger, times(3)).info("Hit rate limit for response code 401 with limit 1.0");
} |
public String toString(String name) {
return toString(name, "");
} | @Test
public void testToString_String() {
System.out.println("toString");
String expResult;
String result;
Properties props = new Properties();
props.put("value1", "sTr1");
props.put("value2", "str_2");
props.put("empty", "");
props.put("str", "abc");
props.put("boolean", "true");
props.put("float", "24.98");
props.put("int", "12");
props.put("char", "a");
PropertyParser instance = new PropertyParser(props);
expResult = "sTr1";
result = instance.toString("value1");
assertEquals(expResult, result);
expResult = "str_2";
result = instance.toString("value2");
assertEquals(expResult, result);
expResult = "";
result = instance.toString("empty");
assertEquals(expResult, result);
expResult = "abc";
result = instance.toString("str");
assertEquals(expResult, result);
expResult = "true";
result = instance.toString("boolean");
assertEquals(expResult, result);
expResult = "24.98";
result = instance.toString("float");
assertEquals(expResult, result);
expResult = "12";
result = instance.toString("int");
assertEquals(expResult, result);
expResult = "a";
result = instance.toString("char");
assertEquals(expResult, result);
expResult = "";
result = instance.toString("nonexistent");
assertEquals(expResult, result);
} |
Record deserialize(Object data) {
return (Record) fieldDeserializer.value(data);
} | @Test
public void testStructDeserialize() {
Deserializer deserializer = new Deserializer.Builder()
.schema(CUSTOMER_SCHEMA)
.writerInspector((StructObjectInspector) IcebergObjectInspector.create(CUSTOMER_SCHEMA))
.sourceInspector(CUSTOMER_OBJECT_INSPECTOR)
.build();
Record expected = GenericRecord.create(CUSTOMER_SCHEMA);
expected.set(0, 1L);
expected.set(1, "Bob");
Record actual = deserializer.deserialize(new Object[] { new LongWritable(1L), new Text("Bob") });
Assert.assertEquals(expected, actual);
} |
@Override
public long position() throws IOException {
checkOpen();
long pos;
synchronized (this) {
boolean completed = false;
try {
begin(); // don't call beginBlocking() because this method doesn't block
if (!isOpen()) {
return 0; // AsynchronousCloseException will be thrown
}
pos = this.position;
completed = true;
} finally {
end(completed);
}
}
return pos;
} | @Test
public void testPositionNegative() throws IOException {
FileChannel channel = channel(regularFile(0), READ, WRITE);
try {
channel.position(-1);
fail();
} catch (IllegalArgumentException expected) {
}
} |
@Override
public AwsProxyResponse handle(Throwable ex) {
log.error("Called exception handler for:", ex);
// adding a print stack trace in case we have no appender or we are running inside SAM local, where need the
// output to go to the stderr.
ex.printStackTrace();
if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) {
return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR));
} else {
return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR));
}
} | @Test
void streamHandle_InvalidRequestEventException_jsonContentTypeHeader()
throws IOException {
ByteArrayOutputStream respStream = new ByteArrayOutputStream();
exceptionHandler.handle(new InvalidRequestEventException(INVALID_REQUEST_MESSAGE, null), respStream);
assertNotNull(respStream);
assertTrue(respStream.size() > 0);
AwsProxyResponse resp = objectMapper.readValue(new ByteArrayInputStream(respStream.toByteArray()), AwsProxyResponse.class);
assertNotNull(resp);
assertTrue(resp.getMultiValueHeaders().containsKey(HttpHeaders.CONTENT_TYPE));
assertEquals(MediaType.APPLICATION_JSON, resp.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE));
} |
public static LocalDate parseDate(CharSequence text) {
return parseDate(text, (DateTimeFormatter) null);
} | @Test
public void parseSingleMonthAndDayTest() {
final LocalDate localDate = LocalDateTimeUtil.parseDate("2020-1-1", "yyyy-M-d");
assertEquals("2020-01-01", localDate.toString());
} |
@Override
public Optional<String> getContentHash() {
return Optional.ofNullable(mContentHash);
} | @Test
public void writeByteArrayForLargeFile() throws Exception {
int partSize = (int) FormatUtils.parseSpaceSize(PARTITION_SIZE);
byte[] b = new byte[partSize + 1];
assertEquals(mStream.getPartNumber(), 1);
mStream.write(b, 0, b.length);
assertEquals(mStream.getPartNumber(), 2);
Mockito.verify(mMockS3Client)
.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
Mockito.verify(mMockOutputStream).write(b, 0, b.length - 1);
Mockito.verify(mMockOutputStream).write(b, b.length - 1, 1);
Mockito.verify(mMockExecutor).submit(any(Callable.class));
mStream.close();
assertEquals(mStream.getPartNumber(), 3);
Mockito.verify(mMockS3Client)
.completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
assertTrue(mStream.getContentHash().isPresent());
assertEquals("multiTag", mStream.getContentHash().get());
} |
public static String buildSelectorRealPath(final String pluginName, final String selectorId) {
return String.join(PATH_SEPARATOR, SELECTOR_PARENT, pluginName, selectorId);
} | @Test
public void testBuildSelectorRealPath() {
String pluginName = RandomStringUtils.randomAlphanumeric(10);
String selectorId = RandomStringUtils.randomAlphanumeric(10);
String selectorRealPath = DefaultPathConstants.buildSelectorRealPath(pluginName, selectorId);
assertThat(selectorRealPath, notNullValue());
assertThat(String.join(SEPARATOR, SELECTOR_PARENT, pluginName, selectorId), equalTo(selectorRealPath));
} |
@Override
public Space get() throws BackgroundException {
try {
final EueApiClient client = new EueApiClient(session);
final UserInfoApi userInfoApi = new UserInfoApi(client);
final UserInfoResponseModel userInfoResponseModel = userInfoApi.userinfoGet(null, null);
if(log.isDebugEnabled()) {
log.debug(String.format("Received user info %s", userInfoResponseModel));
}
return new Space(userInfoResponseModel.getQuotas().getContentSize().getCurrent(),
userInfoResponseModel.getQuotas().getContentSize().getMax() - userInfoResponseModel.getQuotas().getContentSize().getCurrent());
}
catch(ApiException e) {
throw new EueExceptionMappingService().map("Failure to read attributes of {0}", e,
new DefaultHomeFinderService(session).find());
}
} | @Test
public void testGetQuota() throws Exception {
final Quota.Space quota = new EueQuotaFeature(session).get();
assertNotNull(quota.available);
assertNotNull(quota.used);
assertNotEquals(0L, quota.available, 0L);
assertNotEquals(0L, quota.used, 0L);
assertTrue(quota.available < quota.available + quota.used);
} |
public Optional<String> branch() {
return configuration.get(BRANCH_NAME);
} | @Test
public void should_define_branch_name() {
settings.setProperty("sonar.branch.name", "name");
assertThat(underTest.branch()).isEqualTo(Optional.of("name"));
} |
@VisibleForTesting
boolean hasApplication(ApplicationId appId) {
return collectorManager.containsTimelineCollector(appId);
} | @Test
void testAddApplication() throws Exception {
auxService = createCollectorAndAddApplication();
// auxService should have a single app
assertTrue(auxService.hasApplication(appAttemptId.getApplicationId()));
auxService.close();
} |
ConnectorStatus.Listener wrapStatusListener(ConnectorStatus.Listener delegateListener) {
return new ConnectorStatusListener(delegateListener);
} | @Test
public void testConnectorFailureBeforeStartupRecordedMetrics() {
WorkerMetricsGroup workerMetricsGroup = new WorkerMetricsGroup(new HashMap<>(), new HashMap<>(), connectMetrics);
final ConnectorStatus.Listener connectorListener = workerMetricsGroup.wrapStatusListener(delegateConnectorListener);
connectorListener.onFailure(connector, exception);
verify(delegateConnectorListener).onFailure(connector, exception);
verifyRecordConnectorStartupFailure();
} |
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) {
return new CreateStreamCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(),
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
} | @Test
public void shouldThrowOnNoElementsInCreateStream() {
// Given:
final CreateStream statement
= new CreateStream(SOME_NAME, TableElements.of(), false, true, withProperties, false);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> createSourceFactory.createStreamCommand(statement, ksqlConfig)
);
// Then:
assertThat(e.getMessage(), containsString(
"The statement does not define any columns."));
} |
public static Collection<PValue> nonAdditionalInputs(AppliedPTransform<?, ?, ?> application) {
ImmutableList.Builder<PValue> mainInputs = ImmutableList.builder();
PTransform<?, ?> transform = application.getTransform();
for (Map.Entry<TupleTag<?>, PCollection<?>> input : application.getInputs().entrySet()) {
if (!transform.getAdditionalInputs().containsKey(input.getKey())) {
mainInputs.add(input.getValue());
}
}
checkArgument(
!mainInputs.build().isEmpty() || application.getInputs().isEmpty(),
"Expected at least one main input if any inputs exist");
return mainInputs.build();
} | @Test
public void nonAdditionalInputsWithAdditionalInputsSucceeds() {
Map<TupleTag<?>, PValue> additionalInputs = new HashMap<>();
additionalInputs.put(new TupleTag<String>() {}, pipeline.apply(Create.of("1, 2", "3")));
additionalInputs.put(new TupleTag<Long>() {}, pipeline.apply(GenerateSequence.from(3L)));
Map<TupleTag<?>, PCollection<?>> allInputs = new HashMap<>();
PCollection<Integer> mainInts = pipeline.apply("MainInput", Create.of(12, 3));
allInputs.put(new TupleTag<Integer>() {}, mainInts);
PCollection<Void> voids = pipeline.apply("VoidInput", Create.empty(VoidCoder.of()));
allInputs.put(new TupleTag<Void>() {}, voids);
allInputs.putAll((Map) additionalInputs);
AppliedPTransform<PInput, POutput, TestTransform> transform =
AppliedPTransform.of(
"additional",
allInputs,
Collections.emptyMap(),
new TestTransform(additionalInputs),
ResourceHints.create(),
pipeline);
assertThat(
TransformInputs.nonAdditionalInputs(transform),
Matchers.containsInAnyOrder(mainInts, voids));
} |
@Override
public void onAdd(Request request) {
if (isDisposed()) {
return;
}
queue.addImmediately(request);
} | @Test
void shouldNotAddExtensionWhenAddPredicateAlwaysFalse() {
var type = GroupVersionKind.fromAPIVersionAndKind("v1alpha1", "User");
when(matchers.onAddMatcher()).thenReturn(
DefaultExtensionMatcher.builder(client, type).build());
watcher.onAdd(createFake("fake-name"));
verify(matchers, times(1)).onAddMatcher();
verify(queue, times(0)).add(any());
verify(queue, times(0)).addImmediately(any());
} |
public Map<String, String> getPropertiesWithPrefix(String prefix) {
return getPropertiesWithPrefix(prefix, false);
} | @Test
public void testGetPropertiesWithPrefixEmptyResult() {
ConfigurationProperties configurationProperties =
new ConfigurationProperties(PROPERTIES);
Map<String, String> propsEmptyPrefix = configurationProperties.getPropertiesWithPrefix("");
Map<String, String> propsLongPrefix = configurationProperties
.getPropertiesWithPrefix("root.1.2.4.5.6");
Map<String, String> propsNonExistingRootPrefix = configurationProperties
.getPropertiesWithPrefix("3");
Assert.assertEquals(0, propsEmptyPrefix.size());
Assert.assertEquals(0, propsLongPrefix.size());
Assert.assertEquals(0, propsNonExistingRootPrefix.size());
} |
@Override
public void batchRegisterInstance(String serviceName, String groupName, List<Instance> instances)
throws NacosException {
NamingUtils.batchCheckInstanceIsLegal(instances);
batchCheckAndStripGroupNamePrefix(instances, groupName);
clientProxy.batchRegisterService(serviceName, groupName, instances);
} | @Test
void testBatchRegisterInstanceWithWrongGroupNamePrefix() throws NacosException {
Instance instance = new Instance();
String serviceName = "service1";
String ip = "1.1.1.1";
int port = 10000;
instance.setServiceName("WrongGroup" + "@@" + serviceName);
instance.setEphemeral(true);
instance.setPort(port);
instance.setIp(ip);
List<Instance> instanceList = new ArrayList<>();
instanceList.add(instance);
//when
try {
client.batchRegisterInstance(serviceName, Constants.DEFAULT_GROUP, instanceList);
} catch (Exception e) {
assertTrue(e instanceof NacosException);
assertTrue(e.getMessage().contains("wrong group name prefix of instance service name"));
}
} |
boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter =
toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord);
final SourceRecord record = transformationChain.apply(context, preTransformRecord);
final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record);
if (producerRecord == null || context.failed()) {
counter.skipRecord();
recordDropped(preTransformRecord);
processed++;
continue;
}
log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value());
Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord);
try {
final String topic = producerRecord.topic();
maybeCreateTopic(topic);
producer.send(
producerRecord,
(recordMetadata, e) -> {
if (e != null) {
if (producerClosed) {
log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e);
} else {
log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e);
}
log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord);
producerSendFailed(context, false, producerRecord, preTransformRecord, e);
if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) {
counter.skipRecord();
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack);
}
} else {
counter.completeRecord();
log.trace("{} Wrote record successfully: topic {} partition {} offset {}",
AbstractWorkerSourceTask.this,
recordMetadata.topic(), recordMetadata.partition(),
recordMetadata.offset());
recordSent(preTransformRecord, producerRecord, recordMetadata);
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack);
if (topicTrackingEnabled) {
recordActiveTopic(producerRecord.topic());
}
}
});
// Note that this will cause retries to take place within a transaction
} catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) {
log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ",
this, producerRecord.topic(), producerRecord.partition(), e);
toSend = toSend.subList(processed, toSend.size());
submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop);
counter.retryRemaining();
return false;
} catch (ConnectException e) {
log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ",
this, producerRecord.topic(), producerRecord.partition(), e);
log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e);
throw e;
} catch (KafkaException e) {
producerSendFailed(context, true, producerRecord, preTransformRecord, e);
}
processed++;
recordDispatched(preTransformRecord);
}
toSend = null;
batchDispatched();
return true;
} | @Test
public void testSendRecordsTopicCreateRetries() {
createWorkerTask();
SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
expectPreliminaryCalls(TOPIC);
when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap());
when(admin.createOrFindTopics(any(NewTopic.class)))
// First call to create the topic times out
.thenThrow(new RetriableException(new TimeoutException("timeout")))
// Next attempt succeeds
.thenReturn(createdTopic(TOPIC));
workerTask.toSend = Arrays.asList(record1, record2);
workerTask.sendRecords();
assertEquals(Arrays.asList(record1, record2), workerTask.toSend);
// Next they all succeed
workerTask.sendRecords();
assertNull(workerTask.toSend);
// First attempt failed, second succeeded
verifyTopicCreation(2, TOPIC, TOPIC);
} |
@Override
public boolean hasSameTypeAs(Task task) {
if (!getClass().equals(task.getClass())) {
return false;
}
return this.pluginConfiguration.equals(((PluggableTask) task).pluginConfiguration);
} | @Test
public void shouldReturnTrueWhenPluginConfigurationForTwoPluggableTasksIsExactlyTheSame() {
PluginConfiguration pluginConfiguration = new PluginConfiguration("test-plugin-1", "1.0");
PluggableTask pluggableTask1 = new PluggableTask(pluginConfiguration, new Configuration());
PluggableTask pluggableTask2 = new PluggableTask(pluginConfiguration, new Configuration());
assertTrue(pluggableTask1.hasSameTypeAs(pluggableTask2));
} |
@Override
public Column convert(BasicTypeDefine typeDefine) {
try {
return super.convert(typeDefine);
} catch (SeaTunnelRuntimeException e) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String kingbaseDataType = typeDefine.getDataType().toUpperCase();
switch (kingbaseDataType) {
case KB_TINYINT:
builder.dataType(BasicType.BYTE_TYPE);
break;
case KB_MONEY:
builder.dataType(new DecimalType(38, 18));
builder.columnLength(38L);
builder.scale(18);
break;
case KB_BLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength((long) (1024 * 1024 * 1024));
break;
case KB_CLOB:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(typeDefine.getLength());
builder.columnLength((long) (1024 * 1024 * 1024));
break;
case KB_BIT:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
// BIT(M) -> BYTE(M/8)
long byteLength = typeDefine.getLength() / 8;
byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0;
builder.columnLength(byteLength);
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.KINGBASE,
typeDefine.getDataType(),
typeDefine.getName());
}
return builder.build();
}
} | @Test
public void testConvertDouble() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("float8")
.dataType("float8")
.build();
Column column = KingbaseTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(BasicType.DOUBLE_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
} |
@Override
public boolean retryRequest(HttpResponse response, int executionCount, HttpContext ctx) {
log.fine(() -> String.format("retryRequest(responseCode='%s', executionCount='%d', ctx='%s'",
response.getStatusLine().getStatusCode(), executionCount, ctx));
HttpClientContext clientCtx = HttpClientContext.adapt(ctx);
if (!predicate.test(response, clientCtx)) {
log.fine(() -> String.format("Not retrying for '%s'", ctx));
return false;
}
if (executionCount > maxRetries) {
log.fine(() -> String.format("Max retries exceeded for '%s'", ctx));
retryFailedConsumer.onRetryFailed(response, executionCount, clientCtx);
return false;
}
Duration delay = delaySupplier.getDelay(executionCount);
log.fine(() -> String.format("Retrying after %s for '%s'", delay, ctx));
retryInterval.set(delay.toMillis());
retryConsumer.onRetry(response, delay, executionCount, clientCtx);
return true;
} | @Test
void retries_for_listed_exceptions_until_max_retries_exceeded() {
int maxRetries = 2;
DelayedResponseLevelRetryHandler handler = DelayedResponseLevelRetryHandler.Builder
.withFixedDelay(Duration.ofSeconds(2), maxRetries)
.retryForStatusCodes(List.of(HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_BAD_GATEWAY))
.build();
HttpResponse response = createResponse(HttpStatus.SC_SERVICE_UNAVAILABLE);
HttpClientContext ctx = new HttpClientContext();
int lastExecutionCount = maxRetries + 1;
for (int i = 1; i < lastExecutionCount; i++) {
assertTrue(handler.retryRequest(response, i, ctx));
}
assertFalse(handler.retryRequest(response, lastExecutionCount, ctx));
} |
@Override
public HttpRestResult<String> httpPost(String path, Map<String, String> headers, Map<String, String> paramValues,
String encode, long readTimeoutMs) throws Exception {
final long endTime = System.currentTimeMillis() + readTimeoutMs;
String currentServerAddr = serverListMgr.getCurrentServerAddr();
int maxRetry = this.maxRetry;
HttpClientConfig httpConfig = HttpClientConfig.builder()
.setReadTimeOutMillis(Long.valueOf(readTimeoutMs).intValue())
.setConTimeOutMillis(ConfigHttpClientManager.getInstance().getConnectTimeoutOrDefault(3000)).build();
do {
try {
Header newHeaders = Header.newInstance();
if (headers != null) {
newHeaders.addAll(headers);
}
HttpRestResult<String> result = nacosRestTemplate.postForm(getUrl(currentServerAddr, path), httpConfig,
newHeaders, paramValues, String.class);
if (isFail(result)) {
LOGGER.error("[NACOS ConnectException] currentServerAddr: {}, httpCode: {}", currentServerAddr,
result.getCode());
} else {
// Update the currently available server addr
serverListMgr.updateCurrentServerAddr(currentServerAddr);
return result;
}
} catch (ConnectException connectException) {
LOGGER.error("[NACOS ConnectException httpPost] currentServerAddr: {}, err : {}", currentServerAddr,
connectException.getMessage());
} catch (SocketTimeoutException socketTimeoutException) {
LOGGER.error("[NACOS SocketTimeoutException httpPost] currentServerAddr: {}, err : {}",
currentServerAddr, socketTimeoutException.getMessage());
} catch (Exception ex) {
LOGGER.error("[NACOS Exception httpPost] currentServerAddr: " + currentServerAddr, ex);
throw ex;
}
if (serverListMgr.getIterator().hasNext()) {
currentServerAddr = serverListMgr.getIterator().next();
} else {
maxRetry--;
if (maxRetry < 0) {
throw new ConnectException(
"[NACOS HTTP-POST] The maximum number of tolerable server reconnection errors has been reached");
}
serverListMgr.refreshCurrentServerAddr();
}
} while (System.currentTimeMillis() <= endTime);
LOGGER.error("no available server, currentServerAddr : {}", currentServerAddr);
throw new ConnectException("no available server, currentServerAddr : " + currentServerAddr);
} | @Test
void testHttpPostSuccess() throws Exception {
when(nacosRestTemplate.<String>postForm(eq(SERVER_ADDRESS_1 + "/test"), any(HttpClientConfig.class),
any(Header.class), anyMap(), eq(String.class))).thenReturn(mockResult);
when(mockResult.getCode()).thenReturn(HttpURLConnection.HTTP_OK);
HttpRestResult<String> actual = serverHttpAgent.httpPost("/test", Collections.emptyMap(),
Collections.emptyMap(), "UTF-8", 1000);
assertEquals(mockResult, actual);
} |
public Optional<YamlRuleConfiguration> swapToYamlRuleConfiguration(final Collection<RepositoryTuple> repositoryTuples, final Class<? extends YamlRuleConfiguration> toBeSwappedType) {
RepositoryTupleEntity tupleEntity = toBeSwappedType.getAnnotation(RepositoryTupleEntity.class);
if (null == tupleEntity) {
return Optional.empty();
}
return tupleEntity.leaf()
? swapToYamlRuleConfiguration(repositoryTuples, toBeSwappedType, tupleEntity)
: swapToYamlRuleConfiguration(repositoryTuples, toBeSwappedType, getFields(toBeSwappedType));
} | @Test
void assertSwapToYamlRuleConfigurationWithGlobalLeafYamlRuleConfiguration() {
Optional<YamlRuleConfiguration> actual = new RepositoryTupleSwapperEngine().swapToYamlRuleConfiguration(
Collections.singleton(new RepositoryTuple("/rules/leaf/versions/0", "value: foo")), GlobalLeafYamlRuleConfiguration.class);
assertTrue(actual.isPresent());
GlobalLeafYamlRuleConfiguration actualYamlConfig = (GlobalLeafYamlRuleConfiguration) actual.get();
assertThat(actualYamlConfig.getValue(), is("foo"));
} |
public static AuditActor system(@Nonnull NodeId nodeId) {
return new AutoValue_AuditActor(URN_GRAYLOG_NODE + requireNonNull(nodeId, "nodeId must not be null").getNodeId());
} | @Test
public void testSystem() {
final NodeId nodeId = new SimpleNodeId("28164cbe-4ad9-4c9c-a76e-088655aa78892");
final AuditActor actor = AuditActor.system(nodeId);
assertThat(actor.urn()).isEqualTo("urn:graylog:node:28164cbe-4ad9-4c9c-a76e-088655aa78892");
} |
public String getName() {
return name;
} | @Test
public void testGetName() {
// Create an ExceptionEvent with a code
ExceptionEvent exceptionEvent = new ExceptionEvent("CODE123");
// Test the getName method
assertEquals("CODE123", exceptionEvent.getName());
} |
@Override public @Nullable Boolean trySample(@Nullable M method) {
if (method == null) return null;
Sampler sampler = methodToSamplers.get(method);
if (sampler == NULL_SENTINEL) return null;
if (sampler != null) return sampler.isSampled(0L); // counting sampler ignores the input
sampler = samplerOfMethod(method);
if (sampler == null) {
methodToSamplers.put(method, NULL_SENTINEL);
return null;
}
Sampler previousSampler = methodToSamplers.putIfAbsent(method, sampler);
if (previousSampler != null) sampler = previousSampler; // lost race, use the existing counter
return sampler.isSampled(0L); // counting sampler ignores the input
} | @Test void cardinalityIsPerAnnotationNotInvocation() {
Traced traced = traced(1.0f, 0, true);
declarativeSampler.trySample(traced);
declarativeSampler.trySample(traced);
declarativeSampler.trySample(traced);
assertThat(declarativeSampler.methodToSamplers)
.hasSize(1);
} |
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
} | @Test
public void testOrderedListAddBeforeRangeRead() throws Exception {
StateTag<OrderedListState<String>> addr =
StateTags.orderedList("orderedList", StringUtf8Coder.of());
OrderedListState<String> orderedList = underTest.state(NAMESPACE, addr);
SettableFuture<Iterable<TimestampedValue<String>>> future = SettableFuture.create();
Range<Long> readSubrange = Range.closedOpen(70 * 1000L, 100 * 1000L);
when(mockReader.orderedListFuture(
readSubrange, key(NAMESPACE, "orderedList"), STATE_FAMILY, StringUtf8Coder.of()))
.thenReturn(future);
orderedList.readRangeLater(Instant.ofEpochMilli(70), Instant.ofEpochMilli(100));
final TimestampedValue<String> helloValue =
TimestampedValue.of("hello", Instant.ofEpochMilli(100));
final TimestampedValue<String> worldValue =
TimestampedValue.of("world", Instant.ofEpochMilli(75));
final TimestampedValue<String> goodbyeValue =
TimestampedValue.of("goodbye", Instant.ofEpochMilli(50));
orderedList.add(helloValue);
waitAndSet(future, Collections.singletonList(worldValue), 200);
orderedList.add(goodbyeValue);
assertThat(
orderedList.readRange(Instant.ofEpochMilli(70), Instant.ofEpochMilli(100)),
Matchers.contains(worldValue));
} |
@Override
public List<URL> lookup(URL url) {
List<URL> result = new ArrayList<>();
Map<String, List<URL>> notifiedUrls = getNotified().get(url);
if (CollectionUtils.isNotEmptyMap(notifiedUrls)) {
for (List<URL> urls : notifiedUrls.values()) {
for (URL u : urls) {
if (!EMPTY_PROTOCOL.equals(u.getProtocol())) {
result.add(u);
}
}
}
} else {
final AtomicReference<List<URL>> reference = new AtomicReference<>();
NotifyListener listener = reference::set;
subscribe(url, listener); // Subscribe logic guarantees the first notify to return
List<URL> urls = reference.get();
if (CollectionUtils.isNotEmpty(urls)) {
for (URL u : urls) {
if (!EMPTY_PROTOCOL.equals(u.getProtocol())) {
result.add(u);
}
}
}
}
return result;
} | @Test
void lookupTest() {
// loop up before registry
try {
abstractRegistry.lookup(null);
Assertions.fail();
} catch (Exception e) {
Assertions.assertTrue(e instanceof NullPointerException);
}
List<URL> urlList1 = abstractRegistry.lookup(testUrl);
Assertions.assertFalse(urlList1.contains(testUrl));
// loop up after registry
List<URL> urls = new ArrayList<>();
urls.add(testUrl);
abstractRegistry.notify(urls);
List<URL> urlList2 = abstractRegistry.lookup(testUrl);
Assertions.assertTrue(urlList2.contains(testUrl));
} |
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
Map<String, Object> fields = new HashMap<>();
if (flatten) {
final String json = new String(rawMessage.getPayload(), charset);
try {
fields = flatten(json);
} catch (JsonFlattenException e) {
LOG.warn("JSON contains type not supported by flatten method.", e);
} catch (JsonProcessingException e) {
LOG.warn("Could not parse JSON.", e);
}
} else {
if (jsonPath == null) {
return null;
}
final String json = new String(rawMessage.getPayload(), charset);
fields = read(json);
}
final Message message = messageFactory.createMessage(buildShortMessage(fields),
configuration.getString(CK_SOURCE),
rawMessage.getTimestamp());
message.addFields(fields);
return message;
} | @Test
public void testReadResultingInSingleStringFullJson() throws Exception {
RawMessage json = new RawMessage("{\"url\":\"https://api.github.com/repos/Graylog2/graylog2-server/releases/assets/22660\",\"download_count\":76185,\"id\":22660,\"name\":\"graylog2-server-0.20.0-preview.1.tgz\",\"label\":\"graylog2-server-0.20.0-preview.1.tgz\",\"content_type\":\"application/octet-stream\",\"state\":\"uploaded\",\"size\":38179285,\"updated_at\":\"2013-09-30T20:05:46Z\"}".getBytes(StandardCharsets.UTF_8));
String path = "$.state";
Message result = new JsonPathCodec(configOf(CK_PATH, path, CK_FLATTEN, true), objectMapperProvider.get(), messageFactory).decode(json);
assertThat(result.getField("state")).isEqualTo("\"uploaded\"");
} |
@Override
public Map<String, NoteInfo> list(AuthenticationInfo subject) throws IOException {
// Must to create rootNotebookFileObject each time when call method list, otherwise we can not
// get the updated data under this folder.
this.rootNotebookFileObject = fsManager.resolveFile(this.rootNotebookFolder);
return listFolder(rootNotebookFileObject);
} | @Test
void testSkipInvalidDirectoryName() throws IOException {
createNewDirectory(".hidden_dir");
createNewNote("{}", "hidden_note", "my_project/.hidden_dir/note");
Map<String, NoteInfo> noteInfos = notebookRepo.list(AuthenticationInfo.ANONYMOUS);
assertEquals(0, noteInfos.size());
} |
@Override
public Collection<Long> generateKeys(final AlgorithmSQLContext context, final int keyGenerateCount) {
Collection<Long> result = new LinkedList<>();
for (int index = 0; index < keyGenerateCount; index++) {
result.add(generateKey());
}
return result;
} | @Test
void assertMaxTolerateTimeDifferenceMillisecondsWhenNegative() {
assertThrows(AlgorithmInitializationException.class,
() -> TypedSPILoader.getService(KeyGenerateAlgorithm.class, "SNOWFLAKE", PropertiesBuilder.build(new Property("max-tolerate-time-difference-milliseconds", "-1")))
.generateKeys(mock(AlgorithmSQLContext.class), 1));
} |
@Override
public boolean equals(final Object o) {
if(this == o) {
return true;
}
if(o == null || getClass() != o.getClass()) {
return false;
}
final LifecycleConfiguration that = (LifecycleConfiguration) o;
if(!Objects.equals(expiration, that.expiration)) {
return false;
}
if(!Objects.equals(transition, that.transition)) {
return false;
}
return true;
} | @Test
public void testEquals() {
assertEquals(LifecycleConfiguration.empty(), new LifecycleConfiguration());
assertEquals(new LifecycleConfiguration(1, 1), new LifecycleConfiguration(1, 1));
assertEquals(new LifecycleConfiguration(1, 2), new LifecycleConfiguration(1, 2));
assertNotEquals(new LifecycleConfiguration(1, 2), new LifecycleConfiguration(2, 1));
} |
@VisibleForTesting
static List<TopicPartition> getAllTopicPartitions(
SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>> kafkaConsumerFactoryFn,
Map<String, Object> kafkaConsumerConfig,
Set<String> topics,
@Nullable Pattern topicPattern) {
List<TopicPartition> current = new ArrayList<>();
try (Consumer<byte[], byte[]> kafkaConsumer =
kafkaConsumerFactoryFn.apply(kafkaConsumerConfig)) {
if (topics != null && !topics.isEmpty()) {
for (String topic : topics) {
for (PartitionInfo partition : kafkaConsumer.partitionsFor(topic)) {
current.add(new TopicPartition(topic, partition.partition()));
}
}
} else {
for (Map.Entry<String, List<PartitionInfo>> topicInfo :
kafkaConsumer.listTopics().entrySet()) {
if (topicPattern == null || topicPattern.matcher(topicInfo.getKey()).matches()) {
for (PartitionInfo partition : topicInfo.getValue()) {
current.add(new TopicPartition(partition.topic(), partition.partition()));
}
}
}
}
}
return current;
} | @Test
public void testGetAllTopicPartitionsWithGivenPattern() throws Exception {
Consumer<byte[], byte[]> mockConsumer = Mockito.mock(Consumer.class);
when(mockConsumer.listTopics())
.thenReturn(
ImmutableMap.of(
"topic1",
ImmutableList.of(
new PartitionInfo("topic1", 0, null, null, null),
new PartitionInfo("topic1", 1, null, null, null)),
"topic2",
ImmutableList.of(
new PartitionInfo("topic2", 0, null, null, null),
new PartitionInfo("topic2", 1, null, null, null)),
"topicA",
ImmutableList.of(
new PartitionInfo("topicA", 0, null, null, null),
new PartitionInfo("topicA", 1, null, null, null)),
"topicB",
ImmutableList.of(
new PartitionInfo("topicB", 0, null, null, null),
new PartitionInfo("topicB", 1, null, null, null))));
assertEquals(
ImmutableList.of(
new TopicPartition("topic1", 0),
new TopicPartition("topic1", 1),
new TopicPartition("topic2", 0),
new TopicPartition("topic2", 1)),
WatchForKafkaTopicPartitions.getAllTopicPartitions(
(input) -> mockConsumer, null, null, Pattern.compile("topic[0-9]")));
assertEquals(
ImmutableList.of(
new TopicPartition("topicA", 0),
new TopicPartition("topicA", 1),
new TopicPartition("topicB", 0),
new TopicPartition("topicB", 1)),
WatchForKafkaTopicPartitions.getAllTopicPartitions(
(input) -> mockConsumer, null, null, Pattern.compile("topic[A-Z]")));
} |
@Scheduled(initialDelay = 5000, fixedDelay = 15000)
private void reload() {
try {
Page<RoleInfo> roleInfoPage = rolePersistService.getRolesByUserNameAndRoleName(StringUtils.EMPTY,
StringUtils.EMPTY, DEFAULT_PAGE_NO, Integer.MAX_VALUE);
if (roleInfoPage == null) {
return;
}
Set<String> tmpRoleSet = new HashSet<>(16);
Map<String, List<RoleInfo>> tmpRoleInfoMap = new ConcurrentHashMap<>(16);
for (RoleInfo roleInfo : roleInfoPage.getPageItems()) {
if (!tmpRoleInfoMap.containsKey(roleInfo.getUsername())) {
tmpRoleInfoMap.put(roleInfo.getUsername(), new ArrayList<>());
}
tmpRoleInfoMap.get(roleInfo.getUsername()).add(roleInfo);
tmpRoleSet.add(roleInfo.getRole());
}
Map<String, List<PermissionInfo>> tmpPermissionInfoMap = new ConcurrentHashMap<>(16);
for (String role : tmpRoleSet) {
Page<PermissionInfo> permissionInfoPage = permissionPersistService.getPermissions(role, DEFAULT_PAGE_NO,
Integer.MAX_VALUE);
tmpPermissionInfoMap.put(role, permissionInfoPage.getPageItems());
}
roleSet = tmpRoleSet;
roleInfoMap = tmpRoleInfoMap;
permissionInfoMap = tmpPermissionInfoMap;
} catch (Exception e) {
Loggers.AUTH.warn("[LOAD-ROLES] load failed", e);
}
} | @Test
void reload() throws Exception {
Method reload = nacosRoleServiceClass.getDeclaredMethod("reload");
reload.setAccessible(true);
reload.invoke(nacosRoleService);
} |
public static Schema getBeamSchemaFromProtoSchema(String schemaString, String messageName) {
Descriptors.Descriptor descriptor = getDescriptorFromProtoSchema(schemaString, messageName);
return ProtoDynamicMessageSchema.forDescriptor(ProtoDomain.buildFrom(descriptor), descriptor)
.getSchema();
} | @Test
public void testProtoSchemaWitPackageStringToBeamSchema() {
Schema schema =
ProtoByteUtils.getBeamSchemaFromProtoSchema(
PROTO_STRING_PACKAGE_SCHEMA, "com.test.proto.MyMessage");
Assert.assertEquals(schema.getFieldNames(), SCHEMA.getFieldNames());
} |
public List<GetBucketListReply.BucketInfo> retrieveBucketList(BucketId bucketId, String bucketSpace) throws BucketStatsException {
GetBucketListMessage msg = new GetBucketListMessage(bucketId, bucketSpace);
GetBucketListReply bucketListReply = sendMessage(msg, GetBucketListReply.class);
return bucketListReply.getBuckets();
} | @Test
void testRoute() throws BucketStatsException {
String route = "default";
BucketId bucketId = bucketIdFactory.getBucketId(new DocumentId("id:ns:type::another"));
GetBucketListReply reply = new GetBucketListReply();
reply.getBuckets().add(new GetBucketListReply.BucketInfo(bucketId, "I like turtles!"));
when(mockedSession.syncSend(any())).thenReturn(reply);
BucketStatsRetriever retriever = new BucketStatsRetriever(mockedFactory, route, t -> {
});
retriever.retrieveBucketList(new BucketId(0), bucketSpace);
// Route is set at session-level, not per message sent.
verify(mockedSession).setRoute(eq(route));
} |
@NonNull
@Override
public ConnectionFileName toPvfsFileName( @NonNull FileName providerFileName, @NonNull T details )
throws KettleException {
// Determine the part of provider file name following the connection "root".
// Use the transformer to generate the connection root provider uri.
// Both uris are assumed to be normalized.
// Examples:
// - connectionRootProviderUri: "hcp://domain.my:443/root/path/" | "s3://" | "local://"
// - providerUri: "hcp://domain.my:443/root/path/rest/path" | "s3://rest/path"
// Example: "pvfs://my-connection"
String connectionRootProviderUri = getConnectionRootProviderUriPrefix( details );
String providerUri = providerFileName.getURI();
if ( !connectionFileNameUtils.isDescendantOrSelf( providerUri, connectionRootProviderUri ) ) {
throw new IllegalArgumentException(
String.format(
"Provider file name '%s' is not a descendant of the connection root '%s'.",
providerUri,
connectionRootProviderUri ) );
}
String restUriPath = providerUri.substring( connectionRootProviderUri.length() );
// Examples: "/rest/path" or "rest/path"
return buildPvfsFileName( details, restUriPath, providerFileName.getType() );
} | @Test
public void testToPvfsFileNameHandlesTheConnectionRoot() throws Exception {
// Example: SMB with root path
mockDetailsWithDomain( details1, "my-domain:8080" );
when( details1.hasBuckets() ).thenReturn( true );
mockDetailsWithRootPath( details1, "my/root/path" );
String connectionRootProviderUriPrefix = "scheme1://my-domain:8080/my/root/path";
FileName providerFileName = mockFileNameWithUri( FileName.class, connectionRootProviderUriPrefix );
ConnectionFileName pvfsFileName = transformer.toPvfsFileName( providerFileName, details1 );
assertEquals( "pvfs://connection-name1/", pvfsFileName.getURI() );
// Should do connection root provider uri normalization.
verify( kettleVFS, times( 1 ) ).resolveURI( connectionRootProviderUriPrefix );
} |
public static String captchaNumber(int length) {
StringBuilder sb = new StringBuilder();
Random rand = new Random();
for (int i = 0; i < length; i++) {
sb.append(rand.nextInt(10));
}
return sb.toString();
} | @Test
public void testCaptchaNumber() throws Exception {
Assert.assertEquals(0, UUID.captchaNumber(0).length());
Assert.assertEquals(2, UUID.captchaNumber(2).length());
Assert.assertEquals(4, UUID.captchaNumber(4).length());
Assert.assertEquals(10, UUID.captchaNumber(10).length());
Assert.assertEquals(2, UUID.captchaChar(2).length());
Assert.assertEquals(4, UUID.captchaChar(4).length());
Assert.assertEquals(10, UUID.captchaChar(10).length());
} |
@Override
public long computePullFromWhereWithException(MessageQueue mq) throws MQClientException {
long result = -1;
final ConsumeFromWhere consumeFromWhere = this.defaultMQPushConsumerImpl.getDefaultMQPushConsumer().getConsumeFromWhere();
final OffsetStore offsetStore = this.defaultMQPushConsumerImpl.getOffsetStore();
switch (consumeFromWhere) {
case CONSUME_FROM_LAST_OFFSET_AND_FROM_MIN_WHEN_BOOT_FIRST:
case CONSUME_FROM_MIN_OFFSET:
case CONSUME_FROM_MAX_OFFSET:
case CONSUME_FROM_LAST_OFFSET: {
long lastOffset = offsetStore.readOffset(mq, ReadOffsetType.READ_FROM_STORE);
if (lastOffset >= 0) {
result = lastOffset;
}
// First start,no offset
else if (-1 == lastOffset) {
if (mq.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
result = 0L;
} else {
try {
result = this.mQClientFactory.getMQAdminImpl().maxOffset(mq);
} catch (MQClientException e) {
log.warn("Compute consume offset from last offset exception, mq={}, exception={}", mq, e);
throw e;
}
}
} else {
throw new MQClientException(ResponseCode.QUERY_NOT_FOUND, "Failed to query consume offset from " +
"offset store");
}
break;
}
case CONSUME_FROM_FIRST_OFFSET: {
long lastOffset = offsetStore.readOffset(mq, ReadOffsetType.READ_FROM_STORE);
if (lastOffset >= 0) {
result = lastOffset;
} else if (-1 == lastOffset) {
//the offset will be fixed by the OFFSET_ILLEGAL process
result = 0L;
} else {
throw new MQClientException(ResponseCode.QUERY_NOT_FOUND, "Failed to query offset from offset " +
"store");
}
break;
}
case CONSUME_FROM_TIMESTAMP: {
long lastOffset = offsetStore.readOffset(mq, ReadOffsetType.READ_FROM_STORE);
if (lastOffset >= 0) {
result = lastOffset;
} else if (-1 == lastOffset) {
if (mq.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
try {
result = this.mQClientFactory.getMQAdminImpl().maxOffset(mq);
} catch (MQClientException e) {
log.warn("Compute consume offset from last offset exception, mq={}, exception={}", mq, e);
throw e;
}
} else {
try {
long timestamp = UtilAll.parseDate(this.defaultMQPushConsumerImpl.getDefaultMQPushConsumer().getConsumeTimestamp(),
UtilAll.YYYYMMDDHHMMSS).getTime();
result = this.mQClientFactory.getMQAdminImpl().searchOffset(mq, timestamp);
} catch (MQClientException e) {
log.warn("Compute consume offset from last offset exception, mq={}, exception={}", mq, e);
throw e;
}
}
} else {
throw new MQClientException(ResponseCode.QUERY_NOT_FOUND, "Failed to query offset from offset " +
"store");
}
break;
}
default:
break;
}
if (result < 0) {
throw new MQClientException(ResponseCode.SYSTEM_ERROR, "Found unexpected result " + result);
}
return result;
} | @Test
public void testComputePullFromWhereWithException_eq_minus1_timestamp() throws MQClientException {
when(offsetStore.readOffset(any(MessageQueue.class), any(ReadOffsetType.class))).thenReturn(-1L);
consumer.setConsumeFromWhere(ConsumeFromWhere.CONSUME_FROM_TIMESTAMP);
when(admin.searchOffset(any(MessageQueue.class), anyLong())).thenReturn(12345L);
when(admin.maxOffset(any(MessageQueue.class))).thenReturn(23456L);
assertEquals(12345L, rebalanceImpl.computePullFromWhereWithException(mq));
assertEquals(23456L, rebalanceImpl.computePullFromWhereWithException(retryMq));
} |
@Operation(summary = "Receive app to app SAML AuthnRequest")
@PostMapping(value = {"/frontchannel/saml/v4/entrance/request_authentication", "/frontchannel/saml/v4/idp/request_authentication"}, produces = "application/json", consumes = "application/x-www-form-urlencoded", params = "Type")
@ResponseBody
public Map<String, Object> requestAuthenticationApp(HttpServletRequest request,
@RequestParam(name = "Type") String requestType,
@RequestParam(name = "RelayState") String relayState) throws SamlValidationException, DienstencatalogusException, SharedServiceClientException, ComponentInitializationException, MessageDecodingException, AdException, SamlSessionException {
validateRequestType(requestType, relayState);
AuthenticationRequest authenticationRequest = authenticationService.startAuthenticationProcess(request);
return authenticationAppToAppService.createAuthenticationParameters(relayState, authenticationRequest);
} | @Test
public void requestAuthenticationAppTest() throws DienstencatalogusException, ComponentInitializationException, SamlSessionException, AdException, SamlValidationException, SharedServiceClientException, MessageDecodingException {
Map<String, Object> authenticationParameters = new HashMap<>();
authenticationParameters.put("parameter1", "valueParameter1");
AuthenticationRequest authenticationRequest = new AuthenticationRequest();
when(authenticationServiceMock.startAuthenticationProcess(any(HttpServletRequest.class))).thenReturn(authenticationRequest);
when(authenticationAppToAppServiceMock.createAuthenticationParameters(anyString(), any(AuthenticationRequest.class))).thenReturn(authenticationParameters);
Map<String, Object> result = authenticationControllerMock.requestAuthenticationApp(request, APP_TO_APP.type, "relayState");
assertNotNull(result);
assertEquals(authenticationParameters.size(), result.size());
verify(authenticationServiceMock, times(1)).startAuthenticationProcess(any(HttpServletRequest.class));
verify(authenticationAppToAppServiceMock, times(1)).createAuthenticationParameters(anyString(), any(AuthenticationRequest.class));
} |
public Map<String, String> pukRequestAllowed(PukRequest request) throws PukRequestException {
final PenRequestStatus result = repository.findFirstByBsnAndDocTypeAndSequenceNoOrderByRequestDatetimeDesc(request.getBsn(), request.getDocType(), request.getSequenceNo());
checkExpirationDatePen(result);
return statusOK;
} | @Test
public void pukRequestAllowedIsAllowedWithin21Days() throws PukRequestException {
// set valid date of penrequest in repo
status.setPinResetValidDate(LocalDateTime.of(2019, 1, 2, 12, 33));
Map<String, String> result = service.pukRequestAllowed(request);
assertEquals("OK", result.get("status"));
} |
public Hamlet(PrintWriter out, int nestLevel, boolean wasInline) {
super(out, nestLevel, wasInline);
} | @Test
void testHamlet() {
Hamlet h = newHamlet().
title("test").
h1("heading 1").
p("#id.class").
b("hello").
em("world!").__().
div("#footer").
__("Brought to you by").
a("https://hostname/", "Somebody").__();
PrintWriter out = h.getWriter();
out.flush();
assertEquals(0, h.nestLevel);
verify(out).print("<title");
verify(out).print("test");
verify(out).print("</title>");
verify(out).print("<h1");
verify(out).print("heading 1");
verify(out).print("</h1>");
verify(out).print("<p");
verify(out).print(" id=\"id\"");
verify(out).print(" class=\"class\"");
verify(out).print("<b");
verify(out).print("hello");
verify(out).print("</b>");
verify(out).print("<em");
verify(out).print("world!");
verify(out).print("</em>");
verify(out).print("<div");
verify(out).print(" id=\"footer\"");
verify(out).print("Brought to you by");
verify(out).print("<a");
verify(out).print(" href=\"https://hostname/\"");
verify(out).print("Somebody");
verify(out).print("</a>");
verify(out).print("</div>");
verify(out, never()).print("</p>");
} |
@Override
public CompletionStage<Boolean> putIfAbsentAsync(K key, V value) {
return cache.putIfAbsentAsync(key, value);
} | @Test
public void testPutIfAbsentAsync() throws Exception {
cache.put(42, "oldValue");
assertTrue(adapter.putIfAbsentAsync(23, "newValue").toCompletableFuture().get());
assertFalse(adapter.putIfAbsentAsync(42, "newValue").toCompletableFuture().get());
assertEquals("newValue", cache.get(23));
assertEquals("oldValue", cache.get(42));
} |
public boolean add(final Integer element)
{
return addInt(null == element ? nullValue : element);
} | @Test
void shouldEqualGenericList()
{
final int count = 7;
final List<Integer> genericList = new ArrayList<>();
for (int i = 0; i < count; i++)
{
list.add(i);
genericList.add(i);
}
list.add(null);
genericList.add(null);
assertEquals(list, genericList);
} |
public ModelApiResponse uploadFile(Long petId, String additionalMetadata, File file) throws RestClientException {
return uploadFileWithHttpInfo(petId, additionalMetadata, file).getBody();
} | @Test
public void uploadFileTest() {
Long petId = null;
String additionalMetadata = null;
File file = null;
ModelApiResponse response = api.uploadFile(petId, additionalMetadata, file);
// TODO: test validations
} |
@Override
public boolean sendElectionMessage(int currentId, String content) {
var candidateList = findElectionCandidateInstanceList(currentId);
if (candidateList.isEmpty()) {
return true;
} else {
var electionMessage = new Message(MessageType.ELECTION_INVOKE, "");
candidateList.forEach((i) -> instanceMap.get(i).onMessage(electionMessage));
return false;
}
} | @Test
void testElectionMessageAccepted() {
var instance1 = new BullyInstance(null, 1, 1);
var instance2 = new BullyInstance(null, 1, 2);
var instance3 = new BullyInstance(null, 1, 3);
var instance4 = new BullyInstance(null, 1, 4);
Map<Integer, Instance> instanceMap = Map.of(1, instance1, 2, instance2, 3, instance3, 4, instance4);
instance1.setAlive(false);
var messageManager = new BullyMessageManager(instanceMap);
var result = messageManager.sendElectionMessage(2, "2");
assertTrue(result);
} |
public static <T> Optional<T> quietlyEval(String action,
String path,
CallableRaisingIOE<T> operation) {
try {
return Optional.of(once(action, path, operation));
} catch (Exception e) {
LOG.debug("Action {} failed", action, e);
return Optional.empty();
}
} | @Test
public void testQuietlyEvalReturnValueFail() {
// use a variable so IDEs don't warn of numeric overflows
int d = 0;
assertOptionalUnset("quietly",
quietlyEval("", "", () -> 3 / d));
} |
@Udf
public <T extends Comparable<? super T>> List<T> arraySortDefault(@UdfParameter(
description = "The array to sort") final List<T> input) {
return arraySortWithDirection(input, "ASC");
} | @Test
public void shouldReturnNullForNullInput() {
assertThat(udf.arraySortDefault((List<String>) null), is(nullValue()));
} |
List<String> getRandomWords() {
return Arrays.asList(words);
} | @Test
public void testRandomTextDataGeneratorUniqueness() {
RandomTextDataGenerator rtdg1 = new RandomTextDataGenerator(10, 1L, 5);
Set<String> words1 = new HashSet(rtdg1.getRandomWords());
RandomTextDataGenerator rtdg2 = new RandomTextDataGenerator(10, 0L, 5);
Set<String> words2 = new HashSet(rtdg2.getRandomWords());
assertFalse("List size mismatch across lists", words1.equals(words2));
} |
public double sphericalDistance(LatLong other) {
return LatLongUtils.sphericalDistance(this, other);
} | @Test
public void sphericalDistance_originToIslaGenovesa_returnQuarterOfEarthEquatorCircumference() {
// This is the origin of the WGS-84 reference system
LatLong zeroZero = new LatLong(0d, 0d);
// These coordinates are 1/4 Earth circumference from zero on the equator
LatLong islaGenovesa = new LatLong(0d, -90d);
double spherical = LatLongUtils.sphericalDistance(zeroZero, islaGenovesa);
assertEquals(EARTH_EQUATOR_CIRCUMFERENCE / 4, spherical, 0d);
} |
@SuppressWarnings("unchecked")
@VisibleForTesting
void handleNMContainerStatus(NMContainerStatus containerStatus, NodeId nodeId) {
ApplicationAttemptId appAttemptId =
containerStatus.getContainerId().getApplicationAttemptId();
RMApp rmApp =
rmContext.getRMApps().get(appAttemptId.getApplicationId());
if (rmApp == null) {
LOG.error("Received finished container : "
+ containerStatus.getContainerId()
+ " for unknown application " + appAttemptId.getApplicationId()
+ " Skipping.");
return;
}
if (rmApp.getApplicationSubmissionContext().getUnmanagedAM()) {
LOG.debug("Ignoring container completion status for unmanaged AM {}",
rmApp.getApplicationId());
return;
}
RMAppAttempt rmAppAttempt = rmApp.getRMAppAttempt(appAttemptId);
if (rmAppAttempt == null) {
LOG.info("Ignoring not found attempt " + appAttemptId);
return;
}
Container masterContainer = rmAppAttempt.getMasterContainer();
if (masterContainer.getId().equals(containerStatus.getContainerId())
&& containerStatus.getContainerState() == ContainerState.COMPLETE) {
ContainerStatus status =
ContainerStatus.newInstance(containerStatus.getContainerId(),
containerStatus.getContainerState(), containerStatus.getDiagnostics(),
containerStatus.getContainerExitStatus());
// sending master container finished event.
RMAppAttemptContainerFinishedEvent evt =
new RMAppAttemptContainerFinishedEvent(appAttemptId, status,
nodeId);
rmContext.getDispatcher().getEventHandler().handle(evt);
}
} | @SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testHandleContainerStatusInvalidCompletions() throws Exception {
rm = new MockRM(new YarnConfiguration());
rm.start();
EventHandler handler =
spy(rm.getRMContext().getDispatcher().getEventHandler());
// Case 1: Unmanaged AM
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1024, rm)
.withUnmanagedAM(true)
.build();
RMApp app = MockRMAppSubmitter.submit(rm, data);
// Case 1.1: AppAttemptId is null
NMContainerStatus report =
NMContainerStatus.newInstance(
ContainerId.newContainerId(
ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), 0,
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
rm.getResourceTrackerService().handleNMContainerStatus(report, null);
verify(handler, never()).handle((Event) any());
// Case 1.2: Master container is null
RMAppAttemptImpl currentAttempt =
(RMAppAttemptImpl) app.getCurrentAppAttempt();
currentAttempt.setMasterContainer(null);
report = NMContainerStatus.newInstance(
ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), 0,
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
rm.getResourceTrackerService().handleNMContainerStatus(report, null);
verify(handler, never()).handle((Event)any());
// Case 2: Managed AM
app = MockRMAppSubmitter.submitWithMemory(1024, rm);
// Case 2.1: AppAttemptId is null
report = NMContainerStatus.newInstance(
ContainerId.newContainerId(
ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1), 0,
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
try {
rm.getResourceTrackerService().handleNMContainerStatus(report, null);
} catch (Exception e) {
// expected - ignore
}
verify(handler, never()).handle((Event)any());
// Case 2.2: Master container is null
currentAttempt =
(RMAppAttemptImpl) app.getCurrentAppAttempt();
currentAttempt.setMasterContainer(null);
report = NMContainerStatus.newInstance(
ContainerId.newContainerId(currentAttempt.getAppAttemptId(), 0), 0,
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0, Priority.newInstance(10), 1234);
try {
rm.getResourceTrackerService().handleNMContainerStatus(report, null);
} catch (Exception e) {
// expected - ignore
}
verify(handler, never()).handle((Event)any());
} |
@Override
public Object structuralValue(Void value) {
return STRUCTURAL_VOID_VALUE;
} | @Test
public void testStructuralValueSharesSameObject() {
assertEquals(TEST_CODER.structuralValue(null), TEST_CODER.structuralValue(null));
// This is a minor performance optimization to not encode and compare empty byte
// arrays.
assertSame(TEST_CODER.structuralValue(null), TEST_CODER.structuralValue(null));
} |
@Override
protected LocalResourceId matchNewResource(String singleResourceSpec, boolean isDirectory) {
if (isDirectory) {
if (!singleResourceSpec.endsWith(File.separator)) {
singleResourceSpec += File.separator;
}
} else {
checkArgument(
!singleResourceSpec.endsWith(File.separator),
"Expected file path but received directory path [%s].",
singleResourceSpec);
}
Path path = Paths.get(singleResourceSpec);
return LocalResourceId.fromPath(path, isDirectory);
} | @Test
public void testMatchNewResource() {
// TODO: Java core test failing on windows, https://github.com/apache/beam/issues/20461
assumeFalse(SystemUtils.IS_OS_WINDOWS);
LocalResourceId fileResource =
localFileSystem.matchNewResource("/some/test/resource/path", false /* isDirectory */);
LocalResourceId dirResource =
localFileSystem.matchNewResource("/some/test/resource/path", true /* isDirectory */);
assertNotEquals(fileResource, dirResource);
assertThat(
fileResource
.getCurrentDirectory()
.resolve("path", StandardResolveOptions.RESOLVE_DIRECTORY),
equalTo(dirResource.getCurrentDirectory()));
assertThat(
fileResource
.getCurrentDirectory()
.resolve("path", StandardResolveOptions.RESOLVE_DIRECTORY),
equalTo(dirResource.getCurrentDirectory()));
assertThat(dirResource.toString(), equalTo("/some/test/resource/path/"));
IllegalArgumentException exception =
assertThrows(
IllegalArgumentException.class,
() -> localFileSystem.matchNewResource("/some/test/resource/path/", false));
assertTrue(exception.getMessage().startsWith("Expected file path but received directory path"));
} |
public static File getFile(String fileName) {
String extension = fileName.substring(fileName.lastIndexOf('.') + 1);
File toReturn = ResourceHelper.getFileResourcesByExtension(extension)
.stream()
.filter(file -> file.getName().equals(fileName))
.findFirst()
.orElse(null);
if (toReturn == null) {
throw new IllegalArgumentException("Failed to find file " + fileName);
}
return toReturn;
} | @Test
public void getFileNotExistingDirectory() {
assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> FileUtils.getFile(TEST_FILE, NOT_EXISTING_DIRECTORY));
} |
@Override
public Object getInternalProperty(String key) {
return System.getProperty(key);
} | @Test
void testGetSysProperty() {
Assertions.assertNull(sysConfig.getInternalProperty(MOCK_KEY));
Assertions.assertFalse(sysConfig.containsKey(MOCK_KEY));
Assertions.assertNull(sysConfig.getString(MOCK_KEY));
Assertions.assertNull(sysConfig.getProperty(MOCK_KEY));
System.setProperty(MOCK_KEY, MOCK_STRING_VALUE);
Assertions.assertTrue(sysConfig.containsKey(MOCK_KEY));
Assertions.assertEquals(MOCK_STRING_VALUE, sysConfig.getInternalProperty(MOCK_KEY));
Assertions.assertEquals(MOCK_STRING_VALUE, sysConfig.getString(MOCK_KEY, MOCK_STRING_VALUE));
Assertions.assertEquals(MOCK_STRING_VALUE, sysConfig.getProperty(MOCK_KEY, MOCK_STRING_VALUE));
} |
@Override
@Nullable
@GuardedBy("getLock()")
public PageInfo evict(CacheScope scope, PageStoreDir pageStoreDir) {
return evictInternal(pageStoreDir.getEvictor());
} | @Test
public void evict() throws Exception {
mMetaStore.addPage(mPage, mPageInfo);
assertEquals(mPageInfo, mMetaStore.evict(mPageStoreDir));
mMetaStore.removePage(mPageInfo.getPageId());
Assert.assertNull(mMetaStore.evict(mPageStoreDir));
assertEquals(0, mCachedPageGauge.getValue());
} |
@Override
public <T extends Serializable> T getCachedValue(String idempotentId)
throws IllegalArgumentException {
if (!knownValues.containsKey(idempotentId)) {
throw new IllegalArgumentException(
idempotentId
+ " is not a known key, known keys: "
+ Joiner.on(", ").join(knownValues.keySet()));
}
return (T) knownValues.get(idempotentId);
} | @Test
public void getCachedValue() throws Exception {
googleExecutor.setJobId(JOB_ID);
googleExecutor.executeAndSwallowIOExceptions("id1", ITEM_NAME, () -> "idempotentId1");
assertEquals(googleExecutor.getCachedValue("id1"), "idempotentId1");
} |
static ObjectName newObjectName(String name) {
try {
return new ObjectName(name);
} catch (MalformedObjectNameException e) {
String msg = "Illegal ObjectName: " + name;
throw new CacheException(msg, e);
}
} | @Test
public void newObjectName_malformed() {
assertThrows(CacheException.class, () -> JmxRegistration.newObjectName("a=b"));
} |
public boolean isEmpty() {
return events.isEmpty();
} | @Test
public void testIsEmpty() {
assertFalse(batchEventData.isEmpty());
assertFalse(batchEventDataSameAttribute.isEmpty());
assertFalse(batchEventDataOtherSource.isEmpty());
assertFalse(batchEventDataOtherPartitionId.isEmpty());
assertFalse(batchEventDataOtherEvent.isEmpty());
assertTrue(batchEventDataNoEvent.isEmpty());
} |
public static void write8ByteUnsignedIntLittleEndian(long data, ByteArrayOutputStream out) {
out.write((byte) (data & 0xFF));
out.write((byte) (data >>> 8));
out.write((byte) (data >>> 16));
out.write((byte) (data >>> 24));
out.write((byte) (data >>> 32));
out.write((byte) (data >>> 40));
out.write((byte) (data >>> 48));
out.write((byte) (data >>> 56));
} | @Test
public void testWrite8ByteUnsignedIntLittleEndian() {
ByteArrayOutputStream out = new ByteArrayOutputStream();
ByteHelper.write8ByteUnsignedIntLittleEndian(72_340_168_547_287_295L, out);
Assert.assertArrayEquals(new byte[] {-1, -64, 64, 1, 0, 1, 1, 1}, out.toByteArray());
} |
public static String decompressZlib(byte[] compressedData) throws IOException {
return decompressZlib(compressedData, Long.MAX_VALUE);
} | @Test
public void testDecompressZlibBomb() throws URISyntaxException, IOException {
final URL url = Resources.getResource("org/graylog2/plugin/zlib64mb.raw");
final byte[] testData = Files.readAllBytes(Paths.get(url.toURI()));
assertThat(Tools.decompressZlib(testData, 1024)).hasSize(1024);
} |
@Override
public int compare(Collection<? extends Comparable<T>> one, Collection<? extends Comparable<T>> other) {
return comparator.compare(string(one), string(other));
} | @Test
public void shouldCompareSortedCollections() {
AlphaAsciiCollectionComparator<Foo> comparator = new AlphaAsciiCollectionComparator<>();
assertThat(comparator.compare(List.of(new Foo("foo"), new Foo("quux")), List.of(new Foo("foo"), new Foo("bar"))), greaterThan(0));
assertThat(comparator.compare(List.of(new Foo("foo"), new Foo("abc")), List.of(new Foo("foo"), new Foo("bar"))), lessThan(0));
assertThat(comparator.compare(List.of(new Foo("foo"), new Foo("bar")), List.of(new Foo("bar"), new Foo("foo"))), is(0));
} |
List<CSVResult> sniff(Reader reader) throws IOException {
if (!reader.markSupported()) {
reader = new BufferedReader(reader);
}
List<CSVResult> ret = new ArrayList<>();
for (char delimiter : delimiters) {
reader.mark(markLimit);
try {
CSVResult result = new Snifflet(delimiter).sniff(reader);
ret.add(result);
} finally {
reader.reset();
}
}
Collections.sort(ret);
return ret;
} | @Test
public void testCSVBasic() throws Exception {
List<CSVResult> results = sniff(DELIMITERS, CSV_BASIC, StandardCharsets.UTF_8);
assertEquals(4, results.size());
assertEquals(Character.valueOf(','), results.get(0).getDelimiter());
results = sniff(DELIMITERS, CSV_BASIC2, StandardCharsets.UTF_8);
assertEquals(4, results.size());
assertEquals(Character.valueOf(';'), results.get(0).getDelimiter());
results = sniff(DELIMITERS, CSV_BASIC3, StandardCharsets.UTF_8);
assertEquals(4, results.size());
assertEquals(Character.valueOf('|'), results.get(0).getDelimiter());
results = sniff(DELIMITERS, TSV_BASIC, StandardCharsets.UTF_8);
assertEquals(4, results.size());
assertEquals(Character.valueOf('\t'), results.get(0).getDelimiter());
} |
public String getCatalog() {
return catalog;
} | @Test
public void testGetDefaultSessionCatalog() {
UserProperty userProperty = new UserProperty();
String defaultSessionCatalog = userProperty.getCatalog();
Assert.assertEquals(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME, defaultSessionCatalog);
} |
@Override
public boolean isPasswordConfigurable() {
// Only provide account email
return false;
} | @Test
public void testPassword() {
assertFalse(new DriveProtocol().isPasswordConfigurable());
} |
@VisibleForTesting
StandardContext addStaticDir(Tomcat tomcat, String contextPath, File dir) {
try {
fs.createOrCleanupDir(dir);
} catch (IOException e) {
throw new IllegalStateException(format("Fail to create or clean-up directory %s", dir.getAbsolutePath()), e);
}
return addContext(tomcat, contextPath, dir);
} | @Test
public void cleanup_static_directory_if_already_exists() throws Exception {
File dir = temp.newFolder();
FileUtils.touch(new File(dir, "foo.txt"));
underTest.addStaticDir(tomcat, "/deploy", dir);
assertThat(dir).isDirectory()
.exists()
.isEmptyDirectory();
} |
@Override
public void recordLoadFailure(long loadTime) {
loadFailure.update(loadTime, TimeUnit.NANOSECONDS);
totalLoadTime.add(loadTime);
} | @Test
public void loadFailure() {
stats.recordLoadFailure(256);
assertThat(registry.timer(PREFIX + ".loads-failure").getCount()).isEqualTo(1);
} |
public void cancelLike() {
if (this.likesCount > 0) {
this.likesCount--;
}
} | @Test
void review_공감수가_0이라면_공감수를_감소하지_않는다() {
// given
Review review = Review.builder().likesCount(0).build();
// when
review.cancelLike();
// then
assertEquals(review.getLikesCount(), 0);
} |
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
} | @Test
void onStartIfJobIsProcessingByStorageProviderItStaysInProcessingAndThenSucceeded() throws Exception {
Job job = anEnqueuedJob()
.withProcessingState(backgroundJobServer.getConfiguration().getId())
.build();
mockBackgroundJobRunner(job, jobFromStorage -> {
});
BackgroundJobPerformer backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
final ListAppender<ILoggingEvent> logger = LoggerAssert.initFor(backgroundJobPerformer);
backgroundJobPerformer.run();
assertThat(logAllStateChangesFilter.getStateChanges(job)).containsExactly("PROCESSING->SUCCEEDED");
assertThat(logAllStateChangesFilter.onProcessingIsCalled(job)).isTrue();
assertThat(logAllStateChangesFilter.onProcessingSucceededIsCalled(job)).isTrue();
assertThat(logger)
.hasNoErrorLogMessages();
} |
public static List<String> split( String str, char delim ) {
return split( str, delim, false, false );
} | @Test
void splitTrimAndExcludeEmpty() {
// empty
assertEquals(
Arrays.asList(),
StringUtils.split( "", ',', true, true ) );
// not empty
assertEquals(
Arrays.asList( "a" ),
StringUtils.split( "a", ',', true, true ) );
assertEquals(
Arrays.asList( "a", "b" ),
StringUtils.split( "a,b", ',', true, true ) );
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a,b,c", ',', true, true ) );
// empty parts
assertEquals(
Arrays.asList( "b", "c" ),
StringUtils.split( ",b,c", ',', true, true ) );
assertEquals(
Arrays.asList( "a", "c" ),
StringUtils.split( "a,,c", ',', true, true ) );
assertEquals(
Arrays.asList( "a", "b" ),
StringUtils.split( "a,b,", ',', true, true ) );
// parts with leading/trailing spaces
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a, b, c", ',', true, true ) );
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( " a,b,c ", ',', true, true ) );
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( " a, b ,c ", ',', true, true ) );
// space delimiter
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a b c", ' ', true, true ) );
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a b c", ' ', true, true ) );
// new line delimiter
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a\nb\nc", '\n', true, true ) );
assertEquals(
Arrays.asList( "a", "b", "c" ),
StringUtils.split( "a\n\n\nb\n\nc", '\n', true, true ) );
} |
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
} | @Test
public void testFetchMaxPollRecordsUnaligned() {
final int maxPollRecords = 2;
buildFetcher(maxPollRecords);
Set<TopicPartition> tps = new HashSet<>();
tps.add(tp0);
tps.add(tp1);
assignFromUser(tps);
subscriptions.seek(tp0, 1);
subscriptions.seek(tp1, 6);
client.prepareResponse(fetchResponse2(tidp0, records, 100L, tidp1, moreRecords, 100L));
client.prepareResponse(fullFetchResponse(tidp0, emptyRecords, Errors.NONE, 100L, 0));
// Send fetch request because we do not have pending fetch responses to process.
// The first fetch response will return 3 records for tp0 and 3 more for tp1.
assertEquals(1, sendFetches());
// The poll returns 2 records from one of the topic-partitions (non-deterministic).
// This leaves 1 record pending from that topic-partition, and the remaining 3 from the other.
pollAndValidateMaxPollRecordsNotExceeded(maxPollRecords);
// See if we need to send another fetch, which we do not because we have records in hand.
assertEquals(0, sendFetches());
// The poll returns 2 more records, 1 from the topic-partition we've already been
// processing, and 1 more from the other topic-partition. This means we have processed
// all records from the former, and 2 remain from the latter.
pollAndValidateMaxPollRecordsNotExceeded(maxPollRecords);
// See if we need to send another fetch, which we do because we've processed all of the records
// from one of the topic-partitions. The fetch response does not contain any more records.
assertEquals(1, sendFetches());
// The poll returns the final 2 records.
pollAndValidateMaxPollRecordsNotExceeded(maxPollRecords);
} |
public static String getTypeName(final int type) {
switch (type) {
case START_EVENT_V3:
return "Start_v3";
case STOP_EVENT:
return "Stop";
case QUERY_EVENT:
return "Query";
case ROTATE_EVENT:
return "Rotate";
case INTVAR_EVENT:
return "Intvar";
case LOAD_EVENT:
return "Load";
case NEW_LOAD_EVENT:
return "New_load";
case SLAVE_EVENT:
return "Slave";
case CREATE_FILE_EVENT:
return "Create_file";
case APPEND_BLOCK_EVENT:
return "Append_block";
case DELETE_FILE_EVENT:
return "Delete_file";
case EXEC_LOAD_EVENT:
return "Exec_load";
case RAND_EVENT:
return "RAND";
case XID_EVENT:
return "Xid";
case USER_VAR_EVENT:
return "User var";
case FORMAT_DESCRIPTION_EVENT:
return "Format_desc";
case TABLE_MAP_EVENT:
return "Table_map";
case PRE_GA_WRITE_ROWS_EVENT:
return "Write_rows_event_old";
case PRE_GA_UPDATE_ROWS_EVENT:
return "Update_rows_event_old";
case PRE_GA_DELETE_ROWS_EVENT:
return "Delete_rows_event_old";
case WRITE_ROWS_EVENT_V1:
return "Write_rows_v1";
case UPDATE_ROWS_EVENT_V1:
return "Update_rows_v1";
case DELETE_ROWS_EVENT_V1:
return "Delete_rows_v1";
case BEGIN_LOAD_QUERY_EVENT:
return "Begin_load_query";
case EXECUTE_LOAD_QUERY_EVENT:
return "Execute_load_query";
case INCIDENT_EVENT:
return "Incident";
case HEARTBEAT_LOG_EVENT:
case HEARTBEAT_LOG_EVENT_V2:
return "Heartbeat";
case IGNORABLE_LOG_EVENT:
return "Ignorable";
case ROWS_QUERY_LOG_EVENT:
return "Rows_query";
case WRITE_ROWS_EVENT:
return "Write_rows";
case UPDATE_ROWS_EVENT:
return "Update_rows";
case DELETE_ROWS_EVENT:
return "Delete_rows";
case GTID_LOG_EVENT:
return "Gtid";
case ANONYMOUS_GTID_LOG_EVENT:
return "Anonymous_Gtid";
case PREVIOUS_GTIDS_LOG_EVENT:
return "Previous_gtids";
case PARTIAL_UPDATE_ROWS_EVENT:
return "Update_rows_partial";
case TRANSACTION_CONTEXT_EVENT :
return "Transaction_context";
case VIEW_CHANGE_EVENT :
return "view_change";
case XA_PREPARE_LOG_EVENT :
return "Xa_prepare";
case TRANSACTION_PAYLOAD_EVENT :
return "transaction_payload";
default:
return "Unknown type:" + type;
}
} | @Test
public void getTypeNameInputPositiveOutputNotNull26() {
// Arrange
final int type = 24;
// Act
final String actual = LogEvent.getTypeName(type);
// Assert result
Assert.assertEquals("Update_rows_v1", actual);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.