focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
@Override
public int start(int from) {
Assert.notNull(this.text, "Text to find must be not null!");
final int limit = getValidEndIndex();
if(negative){
for (int i = from; i > limit; i--) {
if (NumberUtil.equals(c, text.charAt(i), caseInsensitive)) {
return i;
}
}
} else{
for (int i = from; i < limit; i++) {
if (NumberUtil.equals(c, text.charAt(i), caseInsensitive)) {
return i;
}
}
}
return -1;
} | @Test
public void startTest(){
int start = new CharFinder('a').setText("cba123").start(2);
assertEquals(2, start);
start = new CharFinder('c').setText("cba123").start(2);
assertEquals(-1, start);
start = new CharFinder('3').setText("cba123").start(2);
assertEquals(5, start);
} |
@Override
public CompletableFuture<Map<String, BrokerLookupData>> filterAsync(Map<String, BrokerLookupData> brokers,
ServiceUnitId serviceUnit,
LoadManagerContext context) {
ServiceConfiguration conf = context.brokerConfiguration();
if (!conf.isPreferLaterVersions() || brokers.isEmpty()) {
return CompletableFuture.completedFuture(brokers);
}
Version latestVersion;
try {
latestVersion = getLatestVersionNumber(brokers);
if (log.isDebugEnabled()) {
log.debug("Latest broker version found was [{}]", latestVersion);
}
} catch (Exception ex) {
log.warn("Disabling PreferLaterVersions feature; reason: " + ex.getMessage());
return FutureUtil.failedFuture(
new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage()));
}
int numBrokersLatestVersion = 0;
int numBrokersOlderVersion = 0;
Iterator<Map.Entry<String, BrokerLookupData>> brokerIterator = brokers.entrySet().iterator();
while (brokerIterator.hasNext()) {
Map.Entry<String, BrokerLookupData> next = brokerIterator.next();
String brokerId = next.getKey();
String version = next.getValue().brokerVersion();
Version brokerVersionVersion = Version.valueOf(version);
if (brokerVersionVersion.equals(latestVersion)) {
log.debug("Broker [{}] is running the latest version ([{}])", brokerId, version);
numBrokersLatestVersion++;
} else {
log.info("Broker [{}] is running an older version ([{}]); latest version is [{}]",
brokerId, version, latestVersion);
numBrokersOlderVersion++;
brokerIterator.remove();
}
}
if (numBrokersOlderVersion == 0) {
log.info("All {} brokers are running the latest version [{}]", numBrokersLatestVersion, latestVersion);
}
return CompletableFuture.completedFuture(brokers);
} | @Test
public void testInvalidVersionString() {
Map<String, BrokerLookupData> originalBrokers = Map.of(
"localhost:6650", getLookupData("xxx")
);
BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter();
try {
brokerVersionFilter.filterAsync(new HashMap<>(originalBrokers), null, getContext()).get();
fail();
} catch (Exception ex) {
assertEquals(ex.getCause().getClass(), BrokerFilterBadVersionException.class);
}
} |
@GetMapping(
path = "/admin/extension/{namespaceName}/{extensionName}",
produces = MediaType.APPLICATION_JSON_VALUE
)
public ResponseEntity<ExtensionJson> getExtension(@PathVariable String namespaceName,
@PathVariable String extensionName) {
try {
admins.checkAdminUser();
ExtensionJson json;
var latest = repositories.findLatestVersion(namespaceName, extensionName, null, false, false);
if (latest != null) {
json = local.toExtensionVersionJson(latest, null, false);
json.allTargetPlatformVersions = repositories.findTargetPlatformsGroupedByVersion(latest.getExtension());
json.active = latest.getExtension().isActive();
} else {
var extension = repositories.findExtension(extensionName, namespaceName);
if (extension == null) {
var error = "Extension not found: " + NamingUtil.toExtensionId(namespaceName, extensionName);
throw new ErrorResultException(error, HttpStatus.NOT_FOUND);
}
json = new ExtensionJson();
json.namespace = extension.getNamespace().getName();
json.name = extension.getName();
json.allVersions = Collections.emptyMap();
json.allTargetPlatformVersions = Collections.emptyList();
json.active = extension.isActive();
}
return ResponseEntity.ok(json);
} catch (ErrorResultException exc) {
return exc.toResponseEntity(ExtensionJson.class);
}
} | @Test
public void testGetExtension() throws Exception {
mockAdminUser();
mockExtension(2, 0, 0);
mockMvc.perform(get("/admin/extension/{namespace}/{extension}", "foobar", "baz")
.with(user("admin_user").authorities(new SimpleGrantedAuthority(("ROLE_ADMIN"))))
.with(csrf().asHeader()))
.andExpect(status().isOk())
.andExpect(content().json(extensionJson(e -> {
e.namespace = "foobar";
e.name = "baz";
e.version = "2.0.0";
e.active = true;
})));
} |
@Nullable public static String ipOrNull(@Nullable String ip) {
if (ip == null || ip.isEmpty()) return null;
if ("::1".equals(ip) || "127.0.0.1".equals(ip)) return ip; // special-case localhost
IpFamily format = detectFamily(ip);
if (format == IpFamily.IPv4Embedded) {
ip = ip.substring(ip.lastIndexOf(':') + 1);
} else if (format == IpFamily.Unknown) {
ip = null;
}
return ip;
} | @Test void ipOrNullv4_localhost() {
assertThat(IpLiteral.ipOrNull("127.0.0.1")).isEqualTo("127.0.0.1");
} |
public static ArrayNode createEmptyArrayNode() {
return new ArrayNode(mapper.getNodeFactory());
} | @Test
void testCreateEmptyArrayNode() {
assertEquals("", JacksonUtils.createEmptyJsonNode().asText());
assertEquals(0, JacksonUtils.createEmptyArrayNode().size());
assertTrue(JacksonUtils.createEmptyArrayNode().isEmpty());
} |
public Message createMessage(String messageString) {
Message message;
try {
Map<String, Object> map = objectMapper.readValue(messageString, Map.class);
if (!map.containsKey("_id")) {
map.put("_id", UUID.randomUUID().toString());
}
final String messageField = "message"; // message field must be of type string
if (map.containsKey(messageField) && !(map.get(messageField) instanceof String)) {
map.put(messageField, String.valueOf(map.get(messageField)));
}
message = messageFactory.createMessage(map);
} catch (JacksonException e) {
message = messageFactory.createMessage(messageString, "127.0.0.1", DateTime.now(DateTimeZone.UTC));
if (StringUtils.startsWith(StringUtils.trim(messageString), "{")) {
message.addField("gl2_simulator_json_error",
"Cannot parse simulation message as JSON. Using as raw message field instead: " + e.getMessage());
}
}
return message;
} | @Test
void createMessageFromJson() {
String jsonMessage = """
{
"message": "This is a test message",
"additionalField": "this is an additional field passed"
}
""";
Message result = ruleSimulator.createMessage(jsonMessage);
Map<String, Object> fields = result.getFields();
Assertions.assertEquals("This is a test message", fields.get("message"));
Assertions.assertEquals("this is an additional field passed", fields.get("additionalField"));
} |
@Override
public void run() {
try { // make sure we call afterRun() even on crashes
// and operate countdown latches, else we may hang the parallel runner
if (steps == null) {
beforeRun();
}
if (skipped) {
return;
}
int count = steps.size();
int index = 0;
while ((index = nextStepIndex()) < count) {
currentStep = steps.get(index);
execute(currentStep);
if (currentStepResult != null) { // can be null if debug step-back or hook skip
result.addStepResult(currentStepResult);
}
}
} catch (Exception e) {
if (currentStepResult != null) {
result.addStepResult(currentStepResult);
}
logError("scenario [run] failed\n" + StringUtils.throwableToString(e));
currentStepResult = result.addFakeStepResult("scenario [run] failed", e);
} finally {
if (!skipped) {
afterRun();
if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) {
featureRuntime.suite.abort();
}
}
if (caller.isNone()) {
logAppender.close(); // reclaim memory
}
}
} | @Test
void testSizeOfForByteArrays() {
run(
"bytes data = 'foo'",
"def arrSize = karate.sizeOf(data)"
);
assertEquals(3, get("arrSize"));
} |
public static SchemaBuilder builder(int scale) {
return SchemaBuilder.bytes()
.name(LOGICAL_NAME)
.parameter(SCALE_FIELD, Integer.toString(scale))
.version(1);
} | @Test
public void testBuilder() {
Schema plain = Decimal.builder(2).build();
assertEquals(Decimal.LOGICAL_NAME, plain.name());
assertEquals(Collections.singletonMap(Decimal.SCALE_FIELD, "2"), plain.parameters());
assertEquals(1, (Object) plain.version());
} |
public void dump(OutputStream out) {
dump(true, true, out);
} | @Test
public void dumpsAllThreads() {
final ByteArrayOutputStream output = new ByteArrayOutputStream();
threadDump.dump(output);
assertThat(output.toString())
.isEqualTo(String.format("\"runnable\" id=100 state=RUNNABLE%n" +
" at Blah.blee(Blah.java:100)%n" +
"%n" +
"%n"));
} |
@Override
@Nonnull
public <T> List<Future<T>> invokeAll(@Nonnull Collection<? extends Callable<T>> tasks) {
throwRejectedExecutionExceptionIfShutdown();
ArrayList<Future<T>> result = new ArrayList<>();
for (Callable<T> task : tasks) {
try {
result.add(new CompletedFuture<>(task.call(), null));
} catch (Exception e) {
result.add(new CompletedFuture<>(null, e));
}
}
return result;
} | @Test
void testInvokeAllWithTimeout() {
final CompletableFuture<Thread> future = new CompletableFuture<>();
testTaskSubmissionBeforeShutdown(
testInstance ->
testInstance.invokeAll(
callableCollectionFromFuture(future), 1, TimeUnit.DAYS));
assertThat(future).isCompletedWithValue(Thread.currentThread());
} |
@SuppressWarnings("unchecked")
public <T> T get(Class<T> serviceKlass) {
ensureOperational();
Check.notNull(serviceKlass, "serviceKlass");
return (T) services.get(serviceKlass);
} | @Test(expected = IllegalStateException.class)
@TestDir
public void illegalState2() throws Exception {
Server server = new Server("server", TestDirHelper.getTestDir().getAbsolutePath(), new Configuration(false));
server.get(Object.class);
} |
public static void mergeParams(
Map<String, ParamDefinition> params,
Map<String, ParamDefinition> paramsToMerge,
MergeContext context) {
if (paramsToMerge == null) {
return;
}
Stream.concat(params.keySet().stream(), paramsToMerge.keySet().stream())
.forEach(
name -> {
ParamDefinition paramToMerge = paramsToMerge.get(name);
if (paramToMerge == null) {
return;
}
if (paramToMerge.getType() == ParamType.MAP && paramToMerge.isLiteral()) {
Map<String, ParamDefinition> baseMap = mapValueOrEmpty(params, name);
Map<String, ParamDefinition> toMergeMap = mapValueOrEmpty(paramsToMerge, name);
mergeParams(
baseMap,
toMergeMap,
MergeContext.copyWithParentMode(
context, params.getOrDefault(name, paramToMerge).getMode()));
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else if (paramToMerge.getType() == ParamType.STRING_MAP
&& paramToMerge.isLiteral()) {
Map<String, String> baseMap = stringMapValueOrEmpty(params, name);
Map<String, String> toMergeMap = stringMapValueOrEmpty(paramsToMerge, name);
baseMap.putAll(toMergeMap);
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, baseMap));
} else {
params.put(
name,
buildMergedParamDefinition(
name, paramToMerge, params.get(name), context, paramToMerge.getValue()));
}
});
} | @Test
public void testMergeDisallowInvalidRestartChanged() {
for (ParamMode mode :
Arrays.asList(ParamMode.MUTABLE_ON_START, ParamMode.CONSTANT, ParamMode.IMMUTABLE)) {
AssertHelper.assertThrows(
String.format("Should not allow modifying reserved modes, mode [%s]", mode),
MaestroValidationException.class,
String.format("Cannot modify param with mode [%s] for parameter [tomerge]", mode),
new Runnable() {
@SneakyThrows
@Override
public void run() {
Map<String, ParamDefinition> allParams =
parseParamDefMap(
String.format(
"{'tomerge': {'type': 'STRING','value': 'hello', 'mode': '%s'}}", mode));
Map<String, ParamDefinition> paramsToMerge =
parseParamDefMap("{'tomerge': {'type': 'STRING', 'value': 'goodbye'}}");
ParamsMergeHelper.mergeParams(allParams, paramsToMerge, restartContext);
}
});
}
} |
public CreateTableBuilder withPkConstraintName(String pkConstraintName) {
this.pkConstraintName = validateConstraintName(pkConstraintName);
return this;
} | @Test
public void withPkConstraintName_does_not_fail_if_name_contains_underscore() {
underTest.withPkConstraintName("a_");
} |
public boolean containsPK(List<String> cols) {
if (cols == null) {
return false;
}
List<String> pk = getPrimaryKeyOnlyName();
if (pk.isEmpty()) {
return false;
}
//at least contain one pk
if (cols.containsAll(pk)) {
return true;
} else {
return CollectionUtils.toUpperList(cols).containsAll(CollectionUtils.toUpperList(pk));
}
} | @Test
public void testContainsPKWithCaseInsensitiveMatch() {
List<String> cols = Arrays.asList("COL1", "COL2");
assertTrue(tableMeta.containsPK(cols));
} |
@SuppressWarnings("unchecked")
public static String encode(Type parameter) {
if (parameter instanceof NumericType) {
return encodeNumeric(((NumericType) parameter));
} else if (parameter instanceof Address) {
return encodeAddress((Address) parameter);
} else if (parameter instanceof Bool) {
return encodeBool((Bool) parameter);
} else if (parameter instanceof Bytes) {
return encodeBytes((Bytes) parameter);
} else if (parameter instanceof DynamicBytes) {
return encodeDynamicBytes((DynamicBytes) parameter);
} else if (parameter instanceof Utf8String) {
return encodeString((Utf8String) parameter);
} else if (parameter instanceof StaticArray) {
if (DynamicStruct.class.isAssignableFrom(
((StaticArray) parameter).getComponentType())) {
return encodeStaticArrayWithDynamicStruct((StaticArray) parameter);
} else {
return encodeArrayValues((StaticArray) parameter);
}
} else if (parameter instanceof DynamicStruct) {
return encodeDynamicStruct((DynamicStruct) parameter);
} else if (parameter instanceof DynamicArray) {
return encodeDynamicArray((DynamicArray) parameter);
} else if (parameter instanceof PrimitiveType) {
return encode(((PrimitiveType) parameter).toSolidityType());
} else {
throw new UnsupportedOperationException(
"Type cannot be encoded: " + parameter.getClass());
}
} | @Test
public void testPrimitiveFloat() {
assertThrows(
UnsupportedOperationException.class,
() -> encode(new org.web3j.abi.datatypes.primitive.Float(0)));
} |
@Override
public PartitionQuickStats buildQuickStats(ConnectorSession session, SemiTransactionalHiveMetastore metastore,
SchemaTableName table, MetastoreContext metastoreContext, String partitionId, Iterator<HiveFileInfo> files)
{
requireNonNull(session);
requireNonNull(metastore);
requireNonNull(table);
requireNonNull(metastoreContext);
requireNonNull(partitionId);
requireNonNull(files);
if (!files.hasNext()) {
return PartitionQuickStats.EMPTY;
}
// TODO: Consider refactoring storage and/or table format to the interface when we implement an ORC/Iceberg quick stats builder
StorageFormat storageFormat;
if (UNPARTITIONED_ID.getPartitionName().equals(partitionId)) {
Table resolvedTable = metastore.getTable(metastoreContext, table.getSchemaName(), table.getTableName()).get();
storageFormat = resolvedTable.getStorage().getStorageFormat();
}
else {
Partition partition = metastore.getPartitionsByNames(metastoreContext, table.getSchemaName(), table.getTableName(),
ImmutableList.of(new PartitionNameWithVersion(partitionId, Optional.empty()))).get(partitionId).get();
storageFormat = partition.getStorage().getStorageFormat();
}
if (!PARQUET_SERDE_CLASS_NAMES.contains(storageFormat.getSerDe())) {
// Not a parquet table/partition
return PartitionQuickStats.EMPTY;
}
// We want to keep the number of files we use to build quick stats bounded, so that
// 1. We can control total file IO overhead in a measurable way
// 2. Planning time remains bounded
// Future work here is to sample the file list, read their stats only and extrapolate the overall stats (TODO)
List<CompletableFuture<ParquetMetadata>> footerFetchCompletableFutures = new ArrayList<>();
int filesCount = 0;
while (files.hasNext()) {
HiveFileInfo file = files.next();
filesCount++;
Path path = file.getPath();
long fileSize = file.getLength();
HiveFileContext hiveFileContext = new HiveFileContext(
true,
NO_CACHE_CONSTRAINTS,
Optional.empty(),
OptionalLong.of(fileSize),
OptionalLong.empty(),
OptionalLong.empty(),
file.getFileModifiedTime(),
false);
HdfsContext hdfsContext = new HdfsContext(session, table.getSchemaName(), table.getTableName());
Configuration configuration = hdfsEnvironment.getConfiguration(hdfsContext, path);
footerFetchCompletableFutures.add(supplyAsync(() -> {
Stopwatch footerFetchDuration = Stopwatch.createStarted();
try (FSDataInputStream inputStream = hdfsEnvironment.getFileSystem(hdfsContext, path).openFile(path, hiveFileContext);
ParquetDataSource parquetDataSource = buildHdfsParquetDataSource(inputStream, path, stats)) {
ParquetFileMetadata parquetFileMetadata = readFooter(parquetDataSource,
fileSize,
createDecryptor(configuration, path),
getReadNullMaskedParquetEncryptedValue(session));
footerByteSizeDistribution.add(parquetFileMetadata.getMetadataSize());
return parquetFileMetadata.getParquetMetadata();
}
catch (Exception e) {
log.error(e);
throw new RuntimeException(e);
}
finally {
this.footerFetchDuration.add(footerFetchDuration.elapsed(MILLISECONDS), MILLISECONDS);
}
}, footerFetchExecutor));
}
// Record a metric about how many files were seen
session.getRuntimeStats().addMetricValue(String.format("ParquetQuickStatsBuilder/FileCount/%s/%s", table.getTableName(), partitionId), RuntimeUnit.NONE, filesCount);
fileCountPerPartition.add(filesCount);
HashMap<ColumnPath, ColumnQuickStats<?>> rolledUpColStats = new HashMap<>();
try {
// Wait for footer reads to finish
CompletableFuture<Void> overallCompletableFuture = CompletableFuture.allOf(footerFetchCompletableFutures.toArray(new CompletableFuture[0]));
overallCompletableFuture.get(footerFetchTimeoutMillis, MILLISECONDS);
for (CompletableFuture<ParquetMetadata> future : footerFetchCompletableFutures) {
ParquetMetadata parquetMetadata = future.get();
processColumnMetadata(parquetMetadata, rolledUpColStats);
}
}
catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error(e, "Failed to read/build stats from parquet footer");
throw new RuntimeException(e);
}
if (rolledUpColStats.isEmpty()) {
return PartitionQuickStats.EMPTY;
}
return new PartitionQuickStats(partitionId, rolledUpColStats.values(), filesCount);
} | @Test
public void testStatsBuildTimeIsBoundedUsingFooterFetchTimeout()
{
HiveClientConfig customHiveClientConfig = new HiveClientConfig().setParquetQuickStatsFileMetadataFetchTimeout(new Duration(10, TimeUnit.MILLISECONDS));
HdfsEnvironment mockHdfsEnvironment = new DelayingHdfsEnvironment(hdfsEnvironment, hiveClientConfig, metastoreClientConfig);
String resourceDir = TestParquetQuickStatsBuilder.class.getClassLoader().getResource("quick_stats").toString();
ParquetQuickStatsBuilder customParquetQuickStatsBuilder = new ParquetQuickStatsBuilder(new FileFormatDataSourceStats(), mockHdfsEnvironment, customHiveClientConfig);
ImmutableList<HiveFileInfo> hiveFileInfos = buildHiveFileInfos(resourceDir, "tpcds_store_sales_sf_point_01", 1);
try {
customParquetQuickStatsBuilder.buildQuickStats(SESSION, metastore, new SchemaTableName(TEST_SCHEMA, TEST_TABLE),
metastoreContext, UNPARTITIONED_ID.getPartitionName(), hiveFileInfos.iterator());
}
catch (RuntimeException ex) {
assertEquals(TimeoutException.class, ex.getCause().getClass());
}
} |
public boolean contains(double lat, double lon) {
return prepPolygon.contains(factory.createPoint(new Coordinate(lon, lat)));
} | @Test
public void testContains(){
/*
* |----|
* | |
* |----|
*/
Polygon square = new Polygon(new double[]{0,0,20,20}, new double[]{0,20,20,0});
assertTrue(square.contains(10,10));
assertTrue(square.contains(16,10));
assertFalse(square.contains(10,-20));
assertTrue(square.contains(10,0.1));
assertFalse(square.contains(10,20));
assertTrue(square.contains(10,16));
assertFalse(square.contains(20,20));
/*
* \-----|
* --| |
* --| |
* /----|
*/
Polygon squareHole = new Polygon(new double[]{0,0,20,20,15,15,5,5}, new double[]{0,20,20,0,5,15,15,5});
assertFalse(squareHole.contains(10,10));
assertTrue(squareHole.contains(16,10));
assertFalse(squareHole.contains(10,-20));
assertFalse(squareHole.contains(10,0));
assertFalse(squareHole.contains(10,20));
assertTrue(squareHole.contains(10,16));
assertFalse(squareHole.contains(20,20));
/*
* |----|
* | |
* |----|
*/
square = new Polygon(new double[]{1, 1, 2, 2}, new double[]{1, 2, 2, 1});
assertTrue(square.contains(1.5,1.5));
assertFalse(square.contains(0.5,1.5));
/*
* |----|
* | /\ |
* |/ \|
*/
squareHole = new Polygon(new double[]{1, 1, 2, 1.1, 2}, new double[]{1, 2, 2, 1.5, 1});
assertTrue(squareHole.contains(1.1,1.1));
assertFalse(squareHole.contains(1.5,1.5));
assertFalse(squareHole.contains(0.5,1.5));
} |
@Override
public QueryTarget create(InternalSerializationService serializationService, Extractors extractors, boolean isKey) {
return new AvroQueryTarget();
} | @Test
@Parameters({
"true",
"false"
})
public void test_create(boolean key) {
Extractors extractors = Extractors.newBuilder(SERIALIZATION_SERVICE).build();
AvroQueryTargetDescriptor descriptor = AvroQueryTargetDescriptor.INSTANCE;
// when
QueryTarget target = descriptor.create(SERIALIZATION_SERVICE, extractors, key);
// then
assertThat(target).isInstanceOf(AvroQueryTarget.class);
} |
public void addTotalTime(long time) {
totalTime.addAndGet(time);
} | @Test
void testAddTotalTime() {
TimeoutUtils timeoutUtils = new TimeoutUtils(10, 1);
timeoutUtils.initLastResetTime();
timeoutUtils.addTotalTime(1);
assertEquals(1L, timeoutUtils.getTotalTime().get());
} |
@Override
public ImportResult importItem(
UUID jobId,
IdempotentImportExecutor idempotentImportExecutor,
TokensAndUrlAuthData authData,
PhotosContainerResource data)
throws Exception {
if (data == null) {
return ImportResult.OK;
}
AppleMediaInterface mediaInterface = factory
.getOrCreateMediaInterface(jobId, authData, appCredentials, exportingService, monitor);
// Uploads album metadata
final int albumCount =
mediaInterface.importAlbums(
jobId,
idempotentImportExecutor,
data.getAlbums().stream()
.map(MediaAlbum::photoToMediaAlbum)
.collect(Collectors.toList()),
DataVertical.PHOTOS.getDataType());
final Map<String, Long> importPhotosResult =
mediaInterface.importAllMedia(
jobId,
idempotentImportExecutor,
data.getPhotos(),
DataVertical.PHOTOS.getDataType());
// generate import result
final ImportResult result = ImportResult.OK;
final Map<String, Integer> counts =
new ImmutableMap.Builder<String, Integer>()
.put(PhotosContainerResource.ALBUMS_COUNT_DATA_NAME, albumCount)
.put(
PhotosContainerResource.PHOTOS_COUNT_DATA_NAME,
importPhotosResult.get(ApplePhotosConstants.COUNT_KEY).intValue())
.build();
return result
.copyWithBytes(importPhotosResult.get(ApplePhotosConstants.BYTES_KEY))
.copyWithCounts(counts);
} | @Test
public void importSinglePhoto() throws Exception {
// set up
final int photoCount = 1;
final List<PhotoModel> photos = createTestPhotos(photoCount);
final Map<String, Integer> dataIdToStatus =
photos.stream()
.collect(
Collectors.toMap(
PhotoModel::getDataId,
photoModel -> SC_OK));
setUpGetUploadUrlResponse(dataIdToStatus);
setUpUploadContentResponse(dataIdToStatus);
setUpCreateMediaResponse(dataIdToStatus);
// run test
PhotosContainerResource data = new PhotosContainerResource(null, photos);
final ImportResult importResult =
applePhotosImporter.importItem(uuid, executor, authData, data);
// verify correct methods were called
final List<String> dataIds =
photos.stream().map(PhotoModel::getDataId).collect(Collectors.toList());
verify(mediaInterface)
.getUploadUrl(
uuid.toString(), DataVertical.PHOTOS.getDataType(), dataIds);
verify(mediaInterface).uploadContent(anyMap(), anyList());
verify(mediaInterface).createMedia(anyString(), anyString(), anyList());
// check the result
assertThat(importResult.getCounts().isPresent()).isTrue();
assertThat(importResult.getCounts().get().get(ALBUMS_COUNT_DATA_NAME) == 0).isTrue();
assertThat(importResult.getCounts().get().get(PHOTOS_COUNT_DATA_NAME) == photoCount).isTrue();
assertThat(importResult.getBytes().get() == photoCount * PHOTOS_FILE_SIZE).isTrue();
final Map<String, Serializable> expectedKnownValue =
photos.stream()
.collect(
Collectors.toMap(
photoModel -> photoModel.getAlbumId() + "-" + photoModel.getDataId(),
photoModel -> MEDIA_RECORDID_BASE + photoModel.getDataId()));
checkKnownValues(expectedKnownValue);
} |
@Override
public void close() {
commandTopic.close();
} | @Test
public void shouldClose() {
// When:
commandStore.close();
// Then:
verify(commandTopic).close();
} |
public Set<Device.Type> deviceTypes() {
return ImmutableSet.copyOf(getList(DEVICE_TYPES, Device.Type::valueOf, DEFAULT_DEVICE_TYPES));
} | @Test
public void testDeviceTypes() {
Set<Device.Type> inputTypes = new HashSet<Device.Type>() { {
add(DEVICE_TYPE_1);
add(DEVICE_TYPE_2);
} };
assertNotNull(cfg.deviceTypes(inputTypes));
Set<Device.Type> outputTypes = cfg.deviceTypes();
assertTrue(outputTypes.contains(DEVICE_TYPE_1));
assertTrue(outputTypes.contains(DEVICE_TYPE_2));
assertEquals(outputTypes.size(), 2);
} |
@Override
public void upgrade() {
if (hasBeenRunSuccessfully()) {
LOG.debug("Migration already completed.");
return;
}
final Set<String> dashboardIdToViewId = new HashSet<>();
final Consumer<String> recordMigratedDashboardIds = dashboardIdToViewId::add;
final Map<String, Set<String>> widgetIdMigrationMapping = new HashMap<>();
final Consumer<Map<String, Set<String>>> recordMigratedWidgetIds = widgetIdMigrationMapping::putAll;
final Map<View, Search> newViews = this.dashboardsService.streamAll()
.sorted(Comparator.comparing(Dashboard::id))
.map(dashboard -> migrateDashboard(dashboard, recordMigratedDashboardIds, recordMigratedWidgetIds))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
writeViews(newViews);
final MigrationCompleted migrationCompleted = MigrationCompleted.create(dashboardIdToViewId, widgetIdMigrationMapping);
writeMigrationCompleted(migrationCompleted);
} | @Test
@MongoDBFixtures("quickvalues_widget_with_sort_order.json")
public void migratesAQuickValuesWidgetWithSortOrder() throws Exception {
this.migration.upgrade();
final MigrationCompleted migrationCompleted = captureMigrationCompleted();
assertThat(migrationCompleted.migratedDashboardIds()).containsExactly("5b3b76caadbe1d0001417041");
assertThat(migrationCompleted.widgetMigrationIds()).hasSize(2);
assertViewsWritten(1, resourceFile("quickvalues_widget_with_sort_order-expected_views.json"));
assertSearchesWritten(1, resourceFile("quickvalues_widget_with_sort_order-expected_searches.json"));
} |
@Override
public boolean tryLock(String name) {
return tryLock(name, DEFAULT_LOCK_DURATION_SECONDS);
} | @Test
public void tryLock_accepts_name_with_allowed_length() {
for (int i = 1; i <= LOCK_NAME_MAX_LENGTH; i++) {
String lockName = RandomStringUtils.random(i);
assertThatNoException().isThrownBy(() -> underTest.tryLock(lockName));
}
} |
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
if (data.size() < 3) {
onInvalidDataReceived(device, data);
return;
}
int offset = 0;
final int flags = data.getIntValue(Data.FORMAT_UINT8, offset++);
final boolean carbohydratePresent = (flags & 0x01) != 0;
final boolean mealPresent = (flags & 0x02) != 0;
final boolean testerHealthPresent = (flags & 0x04) != 0;
final boolean exercisePresent = (flags & 0x08) != 0;
final boolean medicationPresent = (flags & 0x10) != 0;
final boolean medicationUnitLiter = (flags & 0x20) != 0;
final boolean HbA1cPresent = (flags & 0x40) != 0;
final boolean extendedFlagsPresent = (flags & 0x80) != 0;
if (data.size() < 3 + (carbohydratePresent ? 3 : 0) + (mealPresent ? 1 : 0) + (testerHealthPresent ? 1 : 0)
+ (exercisePresent ? 3 : 0) + (medicationPresent ? 3 : 0) + (HbA1cPresent ? 2 : 0)
+ (extendedFlagsPresent ? 1 : 0)) {
onInvalidDataReceived(device, data);
return;
}
final int sequenceNumber = data.getIntValue(Data.FORMAT_UINT16_LE, offset);
offset += 2;
// Optional fields
if (extendedFlagsPresent) {
// ignore extended flags
offset += 1;
}
Carbohydrate carbohydrate = null;
Float carbohydrateAmount = null;
if (carbohydratePresent) {
final int carbohydrateId = data.getIntValue(Data.FORMAT_UINT8, offset);
carbohydrate = Carbohydrate.from(carbohydrateId);
carbohydrateAmount = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 1); // in grams
offset += 3;
}
Meal meal = null;
if (mealPresent) {
final int mealId = data.getIntValue(Data.FORMAT_UINT8, offset);
meal = Meal.from(mealId);
offset += 1;
}
Tester tester = null;
Health health = null;
if (testerHealthPresent) {
final int testerAndHealth = data.getIntValue(Data.FORMAT_UINT8, offset);
tester = Tester.from(testerAndHealth & 0x0F);
health = Health.from(testerAndHealth >> 4);
offset += 1;
}
Integer exerciseDuration = null;
Integer exerciseIntensity = null;
if (exercisePresent) {
exerciseDuration = data.getIntValue(Data.FORMAT_UINT16_LE, offset); // in seconds
exerciseIntensity = data.getIntValue(Data.FORMAT_UINT8, offset + 2); // in percentage
offset += 3;
}
Medication medication = null;
Float medicationAmount = null;
Integer medicationUnit = null;
if (medicationPresent) {
final int medicationId = data.getIntValue(Data.FORMAT_UINT8, offset);
medication = Medication.from(medicationId);
medicationAmount = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 1); // mg or ml
medicationUnit = medicationUnitLiter ? UNIT_ml : UNIT_mg;
offset += 3;
}
Float HbA1c = null;
if (HbA1cPresent) {
HbA1c = data.getFloatValue(Data.FORMAT_SFLOAT, offset);
// offset += 2;
}
onGlucoseMeasurementContextReceived(device, sequenceNumber, carbohydrate, carbohydrateAmount,
meal, tester, health, exerciseDuration, exerciseIntensity,
medication, medicationAmount, medicationUnit, HbA1c);
} | @Test
public void onInvalidDataReceived() {
final MutableData data = new MutableData(new byte[5]);
data.setValue(0xFF, Data.FORMAT_UINT8, 0); // Flags
data.setValue(0, Data.FORMAT_UINT16_LE, 1); // Sequence number
data.setValue(0xb3, Data.FORMAT_UINT8, 3); // Extended flags - ignored
data.setValue(3, Data.FORMAT_UINT8, 4); // Carbohydrate: dinner
data.setValue(100.0f, Data.FORMAT_SFLOAT, 5); // Carbohydrate Amount
data.setValue(4, Data.FORMAT_UINT8, 7); // Meal: casual
data.setValue(0x12, Data.FORMAT_UINT8, 8); // Tester and Health (health care practitioner, minor issues)
data.setValue(60, Data.FORMAT_UINT16_LE, 9); // 1 minute of exercise
data.setValue(50, Data.FORMAT_UINT8, 11); // 50%
data.setValue(4, Data.FORMAT_UINT8, 12); // Long acting insulin
data.setValue(123.45f, Data.FORMAT_SFLOAT, 13); // 123.45 ml
callback.onDataReceived(null, data);
assertTrue(invalidData);
} |
public static void main(String[] args) {
// create some weapons
var enchantedHammer = new Weapon(1, "enchanted hammer");
var brokenGreatSword = new Weapon(2, "broken great sword");
var silverTrident = new Weapon(3, "silver trident");
// create repository
var weaponRepository = new ArmsDealer(new HashMap<>(),
new WeaponDatabase());
// perform operations on the weapons
weaponRepository.registerNew(enchantedHammer);
weaponRepository.registerModified(silverTrident);
weaponRepository.registerDeleted(brokenGreatSword);
weaponRepository.commit();
} | @Test
void shouldExecuteWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
} |
public int getBytesSize(SeaTunnelRowType rowType) {
if (size == 0) {
int s = 0;
for (int i = 0; i < fields.length; i++) {
s += getBytesForValue(fields[i], rowType.getFieldType(i));
}
size = s;
}
return size;
} | @Test
void testWithMapInterface() {
Map<String, String> map = Collections.singletonMap("key", "value");
SeaTunnelRow row = new SeaTunnelRow(new Object[] {map});
Assertions.assertEquals(8, row.getBytesSize());
} |
@Override
public BulkOperationResponse executeBulkOperation(final BulkOperationRequest bulkOperationRequest, final C userContext, final AuditParams params) {
if (bulkOperationRequest.entityIds() == null || bulkOperationRequest.entityIds().isEmpty()) {
throw new BadRequestException(NO_ENTITY_IDS_ERROR);
}
List<BulkOperationFailure> capturedFailures = new LinkedList<>();
for (String entityId : bulkOperationRequest.entityIds()) {
try {
T entityModel = singleEntityOperationExecutor.execute(entityId, userContext);
try {
if (params != null) {
auditEventSender.success(getAuditActor(userContext), params.eventType(), successAuditLogContextCreator.create(entityModel, params.entityClass()));
}
} catch (Exception auditLogStoreException) {
//exception on audit log storing should not result in failure report, as the operation itself is successful
LOG.error("Failed to store in the audit log information about successful entity removal via bulk action ", auditLogStoreException);
}
} catch (Exception ex) {
capturedFailures.add(new BulkOperationFailure(entityId, ex.getMessage()));
try {
if (params != null) {
auditEventSender.failure(getAuditActor(userContext), params.eventType(), failureAuditLogContextCreator.create(params.entityIdInPathParam(), entityId));
}
} catch (Exception auditLogStoreException) {
//exception on audit log storing should not result in failure report, as the operation itself is successful
LOG.error("Failed to store in the audit log information about failed entity removal via bulk action ", auditLogStoreException);
}
}
}
return new BulkOperationResponse(
bulkOperationRequest.entityIds().size() - capturedFailures.size(),
capturedFailures);
} | @Test
void returnsProperResponseOnSuccessfulBulkRemoval() throws Exception {
mockUserContext();
Object entity1 = new Object();
doReturn(entity1).when(singleEntityOperationExecutor).execute("1", context);
Object entity2 = new Object();
doReturn(entity2).when(singleEntityOperationExecutor).execute("2", context);
Object entity3 = new Object();
doReturn(entity3).when(singleEntityOperationExecutor).execute("3", context);
final BulkOperationResponse bulkOperationResponse = toTest.executeBulkOperation(new BulkOperationRequest(List.of("1", "2", "3")), context, params);
assertThat(bulkOperationResponse.successfullyPerformed()).isEqualTo(3);
assertThat(bulkOperationResponse.failures()).isEmpty();
verify(singleEntityOperationExecutor).execute("1", context);
verify(singleEntityOperationExecutor).execute("2", context);
verify(singleEntityOperationExecutor).execute("3", context);
verifyNoMoreInteractions(singleEntityOperationExecutor);
verify(auditEventSender, times(3)).success(any(), eq(eventType), any());
verifyNoInteractions(failureAuditLogContextCreator);
verify(successAuditLogContextCreator).create(entity1, Object.class);
verify(successAuditLogContextCreator).create(entity2, Object.class);
verify(successAuditLogContextCreator).create(entity3, Object.class);
verifyNoMoreInteractions(successAuditLogContextCreator);
} |
public Progress getSortPhase() {
return sortPhase;
} | @Test
public void testSpillFilesCountLimitInvalidValue() throws Exception {
JobConf conf = new JobConf();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
conf.set(MRConfig.LOCAL_DIR, testRootDir.getAbsolutePath());
conf.setInt(MRJobConfig.SPILL_FILES_COUNT_LIMIT, -2);
MapOutputFile mof = new MROutputFiles();
mof.setConf(conf);
TaskAttemptID attemptId = new TaskAttemptID("12345", 1, TaskType.MAP, 1, 1);
MapTask mockTask = mock(MapTask.class);
doReturn(mof).when(mockTask).getMapOutputFile();
doReturn(attemptId).when(mockTask).getTaskID();
doReturn(new Progress()).when(mockTask).getSortPhase();
TaskReporter mockReporter = mock(TaskReporter.class);
doReturn(new Counter()).when(mockReporter).getCounter(any(TaskCounter.class));
MapOutputCollector.Context ctx = new MapOutputCollector.Context(mockTask, conf, mockReporter);
MapOutputBuffer<Object, Object> mob = new MapOutputBuffer<>();
exception.expect(IOException.class);
exception.expectMessage("Invalid value for \"mapreduce.task.spill.files.count.limit\", " +
"current value: -2");
mob.init(ctx);
mob.close();
} |
public static SofaRpcRuntimeException buildRuntime(String configKey, String configValue) {
String msg = "The value of config " + configKey + " [" + configValue + "] is illegal, please check it";
return new SofaRpcRuntimeException(msg);
} | @Test
public void buildRuntime() throws Exception {
SofaRpcRuntimeException exception = ExceptionUtils.buildRuntime("xxx111", "222");
Assert.assertTrue(exception.getMessage().contains("xxx111"));
Assert.assertTrue(exception.getMessage().contains("222"));
} |
void installSnapshot() {
if (getState() == State.Snapshot) {
LOG.warn("Replicator {} is installing snapshot, ignore the new request.", this);
unlockId();
return;
}
boolean doUnlock = true;
if (!this.rpcService.connect(this.options.getPeerId().getEndpoint())) {
LOG.error("Fail to check install snapshot connection to peer={}, give up to send install snapshot request, group id {}.",
this.options.getPeerId().getEndpoint(), this.options.getGroupId());
block(Utils.nowMs(), RaftError.EHOSTDOWN.getNumber());
return;
}
try {
Requires.requireTrue(this.reader == null,
"Replicator [%s, %s, %s] already has a snapshot reader, current state is %s",
this.options.getGroupId(), this.options.getPeerId(), this.options.getReplicatorType(), getState());
this.reader = this.options.getSnapshotStorage().open();
if (this.reader == null) {
final NodeImpl node = this.options.getNode();
final RaftException error = new RaftException(EnumOutter.ErrorType.ERROR_TYPE_SNAPSHOT);
error.setStatus(new Status(RaftError.EIO, "Fail to open snapshot"));
unlockId();
doUnlock = false;
node.onError(error);
return;
}
final String uri = this.reader.generateURIForCopy();
if (uri == null) {
final NodeImpl node = this.options.getNode();
final RaftException error = new RaftException(EnumOutter.ErrorType.ERROR_TYPE_SNAPSHOT);
error.setStatus(new Status(RaftError.EIO, "Fail to generate uri for snapshot reader"));
releaseReader();
unlockId();
doUnlock = false;
node.onError(error);
return;
}
final RaftOutter.SnapshotMeta meta = this.reader.load();
if (meta == null) {
final String snapshotPath = this.reader.getPath();
final NodeImpl node = this.options.getNode();
final RaftException error = new RaftException(EnumOutter.ErrorType.ERROR_TYPE_SNAPSHOT);
error.setStatus(new Status(RaftError.EIO, "Fail to load meta from %s", snapshotPath));
releaseReader();
unlockId();
doUnlock = false;
node.onError(error);
return;
}
final InstallSnapshotRequest.Builder rb = InstallSnapshotRequest.newBuilder();
rb.setTerm(this.options.getTerm());
rb.setGroupId(this.options.getGroupId());
rb.setServerId(this.options.getServerId().toString());
rb.setPeerId(this.options.getPeerId().toString());
rb.setMeta(meta);
rb.setUri(uri);
this.statInfo.runningState = RunningState.INSTALLING_SNAPSHOT;
this.statInfo.lastLogIncluded = meta.getLastIncludedIndex();
this.statInfo.lastTermIncluded = meta.getLastIncludedTerm();
final InstallSnapshotRequest request = rb.build();
setState(State.Snapshot);
// noinspection NonAtomicOperationOnVolatileField
this.installSnapshotCounter++;
final long monotonicSendTimeMs = Utils.monotonicMs();
final int stateVersion = this.version;
final int seq = getAndIncrementReqSeq();
final Future<Message> rpcFuture = this.rpcService.installSnapshot(this.options.getPeerId().getEndpoint(),
request, new RpcResponseClosureAdapter<InstallSnapshotResponse>() {
@Override
public void run(final Status status) {
onRpcReturned(Replicator.this.id, RequestType.Snapshot, status, request, getResponse(), seq,
stateVersion, monotonicSendTimeMs);
}
});
addInflight(RequestType.Snapshot, this.nextIndex, 0, 0, seq, rpcFuture);
} finally {
if (doUnlock) {
unlockId();
}
}
} | @Test
public void testInstallSnapshot() {
final Replicator r = getReplicator();
this.id.unlock();
final Future<Message> rpcInFly = r.getRpcInFly();
assertNotNull(rpcInFly);
final SnapshotReader reader = Mockito.mock(SnapshotReader.class);
Mockito.when(this.snapshotStorage.open()).thenReturn(reader);
final String uri = "remote://localhost:8081/99";
Mockito.when(reader.generateURIForCopy()).thenReturn(uri);
final RaftOutter.SnapshotMeta meta = RaftOutter.SnapshotMeta.newBuilder() //
.setLastIncludedIndex(11) //
.setLastIncludedTerm(1) //
.build();
Mockito.when(reader.load()).thenReturn(meta);
assertEquals(0, r.statInfo.lastLogIncluded);
assertEquals(0, r.statInfo.lastTermIncluded);
final RpcRequests.InstallSnapshotRequest.Builder rb = RpcRequests.InstallSnapshotRequest.newBuilder();
rb.setTerm(this.opts.getTerm());
rb.setGroupId(this.opts.getGroupId());
rb.setServerId(this.opts.getServerId().toString());
rb.setPeerId(this.opts.getPeerId().toString());
rb.setMeta(meta);
rb.setUri(uri);
Mockito.when(
this.rpcService.installSnapshot(Matchers.eq(this.opts.getPeerId().getEndpoint()), eq(rb.build()),
Mockito.any())).thenReturn(new FutureImpl<>());
r.installSnapshot();
assertNotNull(r.getRpcInFly());
assertNotSame(r.getRpcInFly(), rpcInFly);
Assert.assertEquals(Replicator.RunningState.INSTALLING_SNAPSHOT, r.statInfo.runningState);
assertEquals(11, r.statInfo.lastLogIncluded);
assertEquals(1, r.statInfo.lastTermIncluded);
} |
@Nullable
public <T extends DeviceConfig> T getDeviceConfig(String name) {
return (T) deviceConfigs.get(name);
} | @Test
public void testGetDeviceConfig() {
String deviceName = randomName();
DeviceConfig deviceConfig = new LocalDeviceConfig().setName(deviceName);
config.addDeviceConfig(deviceConfig);
assertNull(config.getDeviceConfig(randomName()));
assertEquals(deviceConfig, config.getDeviceConfig(deviceName));
assertEquals(deviceConfig, config.getDeviceConfig(LocalDeviceConfig.class, deviceName));
deviceConfig = new DeviceConfig() {
@Override
public boolean isLocal() {
return false;
}
@Override
public Capacity getCapacity() {
return LocalDeviceConfig.DEFAULT_CAPACITY;
}
@Override
public NamedConfig setName(String name) {
return this;
}
@Override
public String getName() {
return deviceName;
}
};
config.addDeviceConfig(deviceConfig);
assertEquals(deviceConfig, config.getDeviceConfig(deviceName));
assertEquals(deviceConfig, config.getDeviceConfig(DeviceConfig.class, deviceName));
assertThrows(ClassCastException.class, () -> config.getDeviceConfig(LocalDeviceConfig.class, deviceName));
} |
@Override
public void clear() {
data = null;
size = 0;
pos = 0;
mark = 0;
if (charBuffer != null && charBuffer.length > UTF_BUFFER_SIZE * 8) {
charBuffer = new char[UTF_BUFFER_SIZE * 8];
}
version = UNKNOWN;
wanProtocolVersion = UNKNOWN;
} | @Test
public void testClear() {
in.clear();
assertNull(in.data);
assertEquals(0, in.size);
assertEquals(0, in.pos);
assertEquals(0, in.mark);
} |
public T acceptClient() throws IOException {
Socket socket = serverSocket.accept();
return createClient(
socketAddressToString(socket.getRemoteSocketAddress()), socket);
} | @Test
public void testAcceptClient() throws Exception {
RunnableClient localClient = new RunnableClient(
InetAddress.getLocalHost(), serverSocket.getLocalPort());
Thread thread = new Thread(localClient);
thread.start();
synchronized (localClient) {
int retries = 200;
while (retries-- > 0 && !localClient.isConnected()) {
localClient.wait(10);
}
}
assertTrue(localClient.isConnected());
localClient.close();
serverSocket.setSoTimeout(5000);
Client client = listener.acceptClient();
assertNotNull(client);
client.close();
} |
public InputStream fetch(URL url) throws DownloadFailedException, TooManyRequestsException, ResourceNotFoundException {
if ("file".equalsIgnoreCase(url.getProtocol())) {
final File file;
try {
file = new File(url.toURI());
} catch (URISyntaxException ex) {
final String msg = format("Download failed, unable to locate '%s'", url);
throw new DownloadFailedException(msg);
}
if (file.exists()) {
try {
return new FileInputStream(file);
} catch (IOException ex) {
final String msg = format("Download failed, unable to rerieve '%s'", url);
throw new DownloadFailedException(msg, ex);
}
} else {
final String msg = format("Download failed, file ('%s') does not exist", url);
throw new DownloadFailedException(msg);
}
} else {
if (connection != null) {
LOGGER.warn("HTTP URL Connection was not properly closed");
connection.disconnect();
connection = null;
}
connection = obtainConnection(url);
final String encoding = connection.getContentEncoding();
try {
if ("gzip".equalsIgnoreCase(encoding)) {
return new GZIPInputStream(connection.getInputStream());
} else if ("deflate".equalsIgnoreCase(encoding)) {
return new InflaterInputStream(connection.getInputStream());
} else {
return connection.getInputStream();
}
} catch (IOException ex) {
checkForCommonExceptionTypes(ex);
final String msg = format("Error retrieving '%s'%nConnection Timeout: %d%nEncoding: %s%n",
url, connection.getConnectTimeout(), encoding);
throw new DownloadFailedException(msg, ex);
} catch (Exception ex) {
final String msg = format("Unexpected exception retrieving '%s'%nConnection Timeout: %d%nEncoding: %s%n",
url, connection.getConnectTimeout(), encoding);
throw new DownloadFailedException(msg, ex);
}
}
} | @Test
public void testFetch() throws Exception {
URL url = new URL(getSettings().getString(Settings.KEYS.ENGINE_VERSION_CHECK_URL));
try (HttpResourceConnection resource = new HttpResourceConnection(getSettings())) {
InputStream in = resource.fetch(url);
byte[] read = new byte[90];
in.read(read);
String text = new String(read, UTF_8);
assertTrue(text.matches("^\\d+\\.\\d+\\.\\d+.*"));
assertFalse(resource.isClosed());
}
} |
public Optional<File> get(InstalledPlugin plugin) {
// Does not fail if another process tries to create the directory at the same time.
Path jarInCache = jarInCache(plugin.key, plugin.hash);
if (Files.isRegularFile(jarInCache)) {
return Optional.of(jarInCache.toFile());
}
return download(plugin).map(Path::toFile);
} | @Test
void fail_if_integrity_of_download_is_not_valid() throws IOException {
FileAndMd5 tempJar = new FileAndMd5();
stubDownload(tempJar.file, "invalid_hash");
InstalledPlugin plugin = newInstalledPlugin("foo", "abc");
expectISE("foo", "was expected to have checksum invalid_hash but had " + tempJar.md5,
() -> underTest.get(plugin));
} |
public static void updateHadoopConfig(org.apache.hadoop.conf.Configuration hadoopConfig) {
LOG.info("Updating Hadoop configuration");
String providers = hadoopConfig.get(PROVIDER_CONFIG_NAME, "");
if (!providers.contains(DynamicTemporaryAWSCredentialsProvider.NAME)) {
if (providers.isEmpty()) {
LOG.debug("Setting provider");
providers = DynamicTemporaryAWSCredentialsProvider.NAME;
} else {
providers = DynamicTemporaryAWSCredentialsProvider.NAME + "," + providers;
LOG.debug("Prepending provider, new providers value: {}", providers);
}
hadoopConfig.set(PROVIDER_CONFIG_NAME, providers);
} else {
LOG.debug("Provider already exists");
}
if (!StringUtils.isNullOrWhitespaceOnly(region)) {
LOG.debug("Setting region");
hadoopConfig.set("fs.s3a.endpoint.region", region);
}
LOG.info("Updated Hadoop configuration successfully");
} | @Test
public void updateHadoopConfigShouldNotAddProviderWhenAlreadyExists() {
org.apache.hadoop.conf.Configuration hadoopConfiguration =
new org.apache.hadoop.conf.Configuration();
hadoopConfiguration.set(PROVIDER_CONFIG_NAME, DynamicTemporaryAWSCredentialsProvider.NAME);
AbstractS3DelegationTokenReceiver.updateHadoopConfig(hadoopConfiguration);
assertEquals(
DynamicTemporaryAWSCredentialsProvider.NAME,
hadoopConfiguration.get(PROVIDER_CONFIG_NAME));
} |
@Override
public Map<Integer, ReaderInfo> registeredReaders() {
final Map<Integer, ReaderInfo> readers = new HashMap<>();
for (Map.Entry<Integer, ConcurrentMap<Integer, ReaderInfo>> entry :
registeredReaders.entrySet()) {
final int subtaskIndex = entry.getKey();
final Map<Integer, ReaderInfo> attemptReaders = entry.getValue();
int earliestAttempt = Integer.MAX_VALUE;
for (int attemptNumber : attemptReaders.keySet()) {
if (attemptNumber < earliestAttempt) {
earliestAttempt = attemptNumber;
}
}
readers.put(subtaskIndex, attemptReaders.get(earliestAttempt));
}
return Collections.unmodifiableMap(readers);
} | @Test
void testRegisterReader() throws Exception {
sourceReady();
List<ReaderInfo> readerInfo = registerReaders();
assertThat(context.registeredReaders()).containsKey(0);
assertThat(context.registeredReaders()).containsKey(1);
assertThat(context.registeredReaders().get(0)).isEqualTo(readerInfo.get(0));
assertThat(context.registeredReaders().get(1)).isEqualTo(readerInfo.get(1));
final TestingSplitEnumerator<?> enumerator = getEnumerator();
assertThat(enumerator.getRegisteredReaders()).containsExactlyInAnyOrder(0, 1, 2);
} |
public URI getServerAddress() {
return serverAddresses.get(0);
} | @Test
public void shouldParseMultipleServerAddresses() throws Exception {
// Given:
final String firstServerAddress = "http://firstServer:8088";
final String multipleServerAddresses = firstServerAddress + ",http://secondServer:8088";
final URI firstServerURI = new URI(firstServerAddress);
// When:
try (KsqlRestClient ksqlRestClient = clientWithServerAddresses(multipleServerAddresses)) {
// Then:
assertThat(ksqlRestClient.getServerAddress(), is(firstServerURI));
}
} |
@Override
public ListView<String> getServiceList(int pageNo, int pageSize, String groupName, AbstractSelector selector)
throws NacosException {
ServiceListRequest request = new ServiceListRequest(namespaceId, groupName, pageNo, pageSize);
if (selector != null) {
if (SelectorType.valueOf(selector.getType()) == SelectorType.label) {
request.setSelector(JacksonUtils.toJson(selector));
}
}
ServiceListResponse response = requestToServer(request, ServiceListResponse.class);
ListView<String> result = new ListView<>();
result.setCount(response.getCount());
result.setData(response.getServiceNames());
return result;
} | @Test
void testGetServiceList() throws Exception {
ServiceListResponse res = new ServiceListResponse();
List<String> services = Arrays.asList("service1", "service2");
res.setServiceNames(services);
res.setCount(5);
when(this.rpcClient.request(any())).thenReturn(res);
AbstractSelector selector = new NoneSelector();
ListView<String> serviceList = client.getServiceList(1, 10, GROUP_NAME, selector);
assertEquals(5, serviceList.getCount());
assertEquals(services, serviceList.getData());
} |
@Override
public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding,
final boolean endOfStream, ChannelPromise promise) {
promise = promise.unvoid();
final Http2Stream stream;
try {
stream = requireStream(streamId);
// Verify that the stream is in the appropriate state for sending DATA frames.
switch (stream.state()) {
case OPEN:
case HALF_CLOSED_REMOTE:
// Allowed sending DATA frames in these states.
break;
default:
throw new IllegalStateException("Stream " + stream.id() + " in unexpected state " + stream.state());
}
} catch (Throwable e) {
data.release();
return promise.setFailure(e);
}
// Hand control of the frame to the flow controller.
flowController().addFlowControlled(stream,
new FlowControlledData(stream, data, padding, endOfStream, promise));
return promise;
} | @Test
public void canWriteDataFrameAfterGoAwayReceived() throws Exception {
Http2Stream stream = createStream(STREAM_ID, false);
goAwayReceived(STREAM_ID);
ByteBuf data = mock(ByteBuf.class);
encoder.writeData(ctx, STREAM_ID, data, 0, false, newPromise());
verify(remoteFlow).addFlowControlled(eq(stream), any(FlowControlled.class));
} |
@Override
public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final DelayedHttpEntityCallable<File> command = new DelayedHttpEntityCallable<File>(file) {
@Override
public File call(final HttpEntity entity) throws BackgroundException {
try {
final HttpPost request;
if(status.isExists()) {
request = new HttpPost(String.format("%s/files/%s/content?fields=%s", client.getBasePath(),
fileid.getFileId(file),
String.join(",", BoxAttributesFinderFeature.DEFAULT_FIELDS)));
}
else {
request = new HttpPost(String.format("%s/files/content?fields=%s", client.getBasePath(),
String.join(",", BoxAttributesFinderFeature.DEFAULT_FIELDS)));
}
final Checksum checksum = status.getChecksum();
if(Checksum.NONE != checksum) {
switch(checksum.algorithm) {
case sha1:
request.addHeader(HttpHeaders.CONTENT_MD5, checksum.hash);
}
}
final ByteArrayOutputStream content = new ByteArrayOutputStream();
new JSON().getContext(null).writeValue(content, new FilescontentAttributes()
.name(file.getName())
.parent(new FilescontentAttributesParent().id(fileid.getFileId(file.getParent())))
.contentCreatedAt(status.getCreated() != null ? new DateTime(status.getCreated()) : null)
.contentModifiedAt(status.getModified() != null ? new DateTime(status.getModified()) : null)
);
final MultipartEntityBuilder multipart = MultipartEntityBuilder.create();
multipart.addBinaryBody("attributes", content.toByteArray());
final ByteArrayOutputStream out = new ByteArrayOutputStream();
entity.writeTo(out);
multipart.addBinaryBody("file", out.toByteArray(),
null == status.getMime() ? ContentType.APPLICATION_OCTET_STREAM : ContentType.create(status.getMime()), file.getName());
request.setEntity(multipart.build());
if(status.isExists()) {
if(StringUtils.isNotBlank(status.getRemote().getETag())) {
request.addHeader(new BasicHeader(HttpHeaders.IF_MATCH, status.getRemote().getETag()));
}
else {
log.warn(String.format("Missing remote attributes in transfer status to read current ETag for %s", file));
}
}
final Files files = session.getClient().execute(request, new BoxClientErrorResponseHandler<Files>() {
@Override
public Files handleEntity(final HttpEntity entity) throws IOException {
return new JSON().getContext(null).readValue(entity.getContent(), Files.class);
}
});
if(log.isDebugEnabled()) {
log.debug(String.format("Received response %s for upload of %s", files, file));
}
if(files.getEntries().stream().findFirst().isPresent()) {
return files.getEntries().stream().findFirst().get();
}
throw new NotfoundException(file.getAbsolute());
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
@Override
public long getContentLength() {
return -1L;
}
};
return this.write(file, status, command);
} | @Test
public void testWrite() throws Exception {
final BoxFileidProvider fileid = new BoxFileidProvider(session);
final BoxWriteFeature feature = new BoxWriteFeature(session, fileid);
final Path folder = new BoxDirectoryFeature(session, fileid).mkdir(
new Path(Home.ROOT, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final long folderModification = new BoxAttributesFinderFeature(session, fileid).find(folder).getModificationDate();
assertEquals(folderModification, folder.attributes().getModificationDate());
// Makes sure to test overwrite
final Path file = new BoxTouchFeature(session, fileid).touch(
new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withCreated(1503654615002L));
final byte[] content = RandomUtils.nextBytes(2047);
final TransferStatus status = new TransferStatus();
status.setModified(1503654614004L); //GMT: Friday, 25. August 2017 09:50:14.004
status.setLength(content.length);
status.setExists(true);
status.setRemote(file.attributes());
status.setMime(MimeTypeService.DEFAULT_CONTENT_TYPE);
status.setChecksum(feature.checksum(file, status).compute(new ByteArrayInputStream(content), status));
final HttpResponseOutputStream<File> out = feature.write(file, status, new DisabledConnectionCallback());
final ByteArrayInputStream in = new ByteArrayInputStream(content);
final TransferStatus progress = new TransferStatus();
final BytecountStreamListener count = new BytecountStreamListener();
new StreamCopier(progress, progress).withListener(count).transfer(in, out);
assertEquals(content.length, count.getSent());
in.close();
out.close();
assertNotNull(out.getStatus());
assertTrue(new DefaultFindFeature(session).find(file));
assertTrue(new BoxFindFeature(session, fileid).find(file));
final PathAttributes attributes = new BoxAttributesFinderFeature(session, fileid).find(file);
assertEquals(content.length, attributes.getSize());
final byte[] compare = new byte[content.length];
final InputStream stream = new BoxReadFeature(session, fileid).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback());
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(content, compare);
// Check folder attributes after write
assertEquals(folderModification, new BoxAttributesFinderFeature(session, fileid).find(folder).getModificationDate(), 0L);
final PathAttributes fileAttr = new BoxAttributesFinderFeature(session, fileid).find(file);
assertNotEquals(file.attributes(), fileAttr);
assertEquals(file.attributes().getCreationDate(), fileAttr.getCreationDate());
assertNotEquals(file.attributes().getModificationDate(), fileAttr.getModificationDate());
assertEquals(1503654615000L, fileAttr.getCreationDate()); //milliseconds are ignored by the Box - GMT: Friday, 25. August 2017 09:50:14
assertEquals(1503654614000L, fileAttr.getModificationDate()); //milliseconds are ignored by the Box - GMT: Friday, 25. August 2017 09:50:14
new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
} |
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
} | @Test
public void testSimpleP() throws ScanException {
List<Token> tl = new TokenStream("%(hello %class{.4?})").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN);
witness.add(new Token(Token.LITERAL, "hello "));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "class"));
List<String> ol = new ArrayList<String>();
ol.add(".4?");
witness.add(new Token(Token.OPTION, ol));
witness.add(Token.RIGHT_PARENTHESIS_TOKEN);
assertEquals(witness, tl);
} |
@Override
public void init(InitContext context) {
String state = context.generateCsrfState();
OAuth20Service scribe = newScribeBuilder(context)
.defaultScope(getScope())
.build(scribeApi);
String url = scribe.getAuthorizationUrl(state);
context.redirectTo(url);
} | @Test
public void fail_to_init_when_disabled() {
setSettings(false);
InitContext context = mock(InitContext.class);
assertThatThrownBy(() -> underTest.init(context))
.isInstanceOf(IllegalStateException.class)
.hasMessage("GitHub authentication is disabled");
} |
public static Builder newBuilder() {
return new Builder();
} | @Test
public void testConstructor() {
try {
ConcurrentLongLongPairHashMap.newBuilder()
.expectedItems(0)
.build();
fail("should have thrown exception");
} catch (IllegalArgumentException e) {
// ok
}
try {
ConcurrentLongLongPairHashMap.newBuilder()
.expectedItems(16)
.concurrencyLevel(0)
.build();
fail("should have thrown exception");
} catch (IllegalArgumentException e) {
// ok
}
try {
ConcurrentLongLongPairHashMap.newBuilder()
.expectedItems(4)
.concurrencyLevel(8)
.build();
fail("should have thrown exception");
} catch (IllegalArgumentException e) {
// ok
}
} |
@Override
public Set<Long> calculateUsers(DelegateExecution execution, String param) {
Set<Long> deptIds = StrUtils.splitToLongSet(param);
List<AdminUserRespDTO> users = adminUserApi.getUserListByDeptIds(deptIds);
return convertSet(users, AdminUserRespDTO::getId);
} | @Test
public void testCalculateUsers() {
// 准备参数
String param = "11,22";
// mock 方法
List<AdminUserRespDTO> users = convertList(asSet(11L, 22L),
id -> new AdminUserRespDTO().setId(id));
when(adminUserApi.getUserListByDeptIds(eq(asSet(11L, 22L)))).thenReturn(users);
// 调用
Set<Long> results = strategy.calculateUsers(null, param);
// 断言
assertEquals(asSet(11L, 22L), results);
} |
static String getHomeDir(String name) {
String homeDir = HOME_DIR_TL.get();
if (homeDir == null) {
String sysProp = name + HOME_DIR;
homeDir = System.getProperty(sysProp);
if (homeDir == null) {
throw new IllegalArgumentException(MessageFormat.format(
"System property [{0}] not defined", sysProp));
}
}
return homeDir;
} | @Test(expected = IllegalArgumentException.class)
public void getHomeDirNotDef() {
ServerWebApp.getHomeDir("TestServerWebApp00");
} |
@VisibleForTesting
Row transformInput(KafkaRecord<byte[], byte[]> record) {
Row.FieldValueBuilder builder = Row.withSchema(getSchema()).withFieldValues(ImmutableMap.of());
if (schema.hasField(Schemas.MESSAGE_KEY_FIELD)) {
builder.withFieldValue(Schemas.MESSAGE_KEY_FIELD, record.getKV().getKey());
}
if (schema.hasField(Schemas.EVENT_TIMESTAMP_FIELD)) {
builder.withFieldValue(
Schemas.EVENT_TIMESTAMP_FIELD, Instant.ofEpochMilli(record.getTimestamp()));
}
if (schema.hasField(Schemas.HEADERS_FIELD)) {
@Nullable Headers recordHeaders = record.getHeaders();
if (recordHeaders != null) {
ImmutableListMultimap.Builder<String, byte[]> headersBuilder =
ImmutableListMultimap.builder();
recordHeaders.forEach(header -> headersBuilder.put(header.key(), header.value()));
ImmutableList.Builder<Row> listBuilder = ImmutableList.builder();
headersBuilder
.build()
.asMap()
.forEach(
(key, values) -> {
Row entry =
Row.withSchema(Schemas.HEADERS_ENTRY_SCHEMA)
.withFieldValue(Schemas.HEADERS_KEY_FIELD, key)
.withFieldValue(Schemas.HEADERS_VALUES_FIELD, values)
.build();
listBuilder.add(entry);
});
builder.withFieldValue(Schemas.HEADERS_FIELD, listBuilder.build());
}
}
if (payloadSerializer == null) {
builder.withFieldValue(Schemas.PAYLOAD_FIELD, record.getKV().getValue());
} else {
byte[] payload = record.getKV().getValue();
if (payload != null) {
builder.withFieldValue(
Schemas.PAYLOAD_FIELD, payloadSerializer.deserialize(record.getKV().getValue()));
}
}
return builder.build();
} | @Test
public void fullRecordToRow() {
NestedPayloadKafkaTable table = newTable(FULL_READ_SCHEMA, Optional.empty());
Instant event = Instant.now();
KafkaRecord<byte[], byte[]> record =
readRecord(
"key".getBytes(UTF_8),
"value".getBytes(UTF_8),
event.getMillis(),
ImmutableListMultimap.of(
"key1", "attr1".getBytes(UTF_8),
"key1", "attr2".getBytes(UTF_8),
"key2", "attr3".getBytes(UTF_8)));
Row expected =
Row.withSchema(FULL_READ_SCHEMA)
.withFieldValue(Schemas.MESSAGE_KEY_FIELD, "key".getBytes(UTF_8))
.withFieldValue(Schemas.PAYLOAD_FIELD, "value".getBytes(UTF_8))
.withFieldValue(Schemas.EVENT_TIMESTAMP_FIELD, event)
.withFieldValue(
Schemas.HEADERS_FIELD,
ImmutableList.of(
Row.withSchema(Schemas.HEADERS_ENTRY_SCHEMA)
.attachValues(
"key1",
ImmutableList.of("attr1".getBytes(UTF_8), "attr2".getBytes(UTF_8))),
Row.withSchema(Schemas.HEADERS_ENTRY_SCHEMA)
.attachValues("key2", ImmutableList.of("attr3".getBytes(UTF_8)))))
.build();
assertEquals(expected, table.transformInput(record));
} |
public static AtomicInteger getTotalPushMonitor() {
return INSTANCE.totalPush;
} | @Test
void testGetTotalPush() {
assertEquals(0, MetricsMonitor.getTotalPushMonitor().get());
assertEquals(1, MetricsMonitor.getTotalPushMonitor().incrementAndGet());
} |
public static ReadOnlyHttp2Headers trailers(boolean validateHeaders, AsciiString... otherHeaders) {
return new ReadOnlyHttp2Headers(validateHeaders, EMPTY_ASCII_STRINGS, otherHeaders);
} | @Test
public void nullHeaderNameValidated() {
assertThrows(Http2Exception.class, new Executable() {
@Override
public void execute() {
ReadOnlyHttp2Headers.trailers(true, null, new AsciiString("foo"));
}
});
} |
Queue<String> prepareRollingOrder(List<String> podNamesToConsider, List<Pod> pods) {
Deque<String> rollingOrder = new ArrayDeque<>();
for (String podName : podNamesToConsider) {
Pod matchingPod = pods.stream().filter(pod -> podName.equals(pod.getMetadata().getName())).findFirst().orElse(null);
if (matchingPod == null || !Readiness.isPodReady(matchingPod)) {
// Non-existing or unready pods are handled first
// This helps to avoid rolling all pods into some situation where they would be all failing
rollingOrder.addFirst(podName);
} else {
// Ready pods are rolled only at the end
rollingOrder.addLast(podName);
}
}
return rollingOrder;
} | @Test
public void testRollingOrderWithUnreadyPod() {
List<Pod> pods = List.of(
renamePod(READY_POD, "my-connect-connect-0"),
renamePod(UNREADY_POD, "my-connect-connect-1"),
renamePod(READY_POD, "my-connect-connect-2")
);
KafkaConnectRoller roller = new KafkaConnectRoller(RECONCILIATION, CLUSTER, 1_000L, null);
Queue<String> rollingOrder = roller.prepareRollingOrder(POD_NAMES, pods);
assertThat(rollingOrder.size(), is(3));
assertThat(rollingOrder.poll(), is("my-connect-connect-1"));
assertThat(rollingOrder.poll(), is("my-connect-connect-0"));
assertThat(rollingOrder.poll(), is("my-connect-connect-2"));
} |
public static HazelcastInstance getOrCreateHazelcastClient() {
return getOrCreateClientInternal(null);
} | @Test
public void testGetOrCreateHazelcastClient_returnsSame_withNoConfig() {
String hzConfigProperty = System.getProperty(CLIENT_CONFIG_PROP_NAME);
try {
System.setProperty(CLIENT_CONFIG_PROP_NAME, "classpath:hazelcast-client-test.xml");
HazelcastInstance client1 = HazelcastClient.getOrCreateHazelcastClient();
HazelcastInstance client2 = HazelcastClient.getOrCreateHazelcastClient();
assertEquals("Calling two times getOrCreateHazelcastClient should return same client", client1, client2);
} finally {
if (hzConfigProperty == null) {
System.clearProperty(CLIENT_CONFIG_PROP_NAME);
} else {
System.setProperty(CLIENT_CONFIG_PROP_NAME, hzConfigProperty);
}
}
} |
public void destroy() {
mGeneratingDisposable.dispose();
mGenerateStateSubject.onNext(LoadingState.NOT_LOADED);
mGenerateStateSubject.onComplete();
} | @Test
public void testCalculatesCornersInBackground() {
TestRxSchedulers.drainAllTasks();
Assert.assertEquals(GestureTypingDetector.LoadingState.LOADED, mCurrentState.get());
mDetectorUnderTest.destroy();
TestRxSchedulers.drainAllTasks();
Assert.assertEquals(GestureTypingDetector.LoadingState.NOT_LOADED, mCurrentState.get());
} |
public ProcessContinuation run(
PartitionRecord partitionRecord,
RestrictionTracker<StreamProgress, StreamProgress> tracker,
OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver,
ManualWatermarkEstimator<Instant> watermarkEstimator)
throws IOException {
BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator =
new BytesThroughputEstimator<>(sizeEstimator, Instant.now());
// Lock the partition
if (tracker.currentRestriction().isEmpty()) {
boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord);
// Clean up NewPartition on the first run regardless of locking result. If locking fails it
// means this partition is being streamed, then cleaning up NewPartitions avoids lingering
// NewPartitions.
for (NewPartition newPartition : partitionRecord.getParentPartitions()) {
metadataTableDao.deleteNewPartition(newPartition);
}
if (!lockedPartition) {
LOG.info(
"RCSP {} : Could not acquire lock with uid: {}, because this is a "
+ "duplicate and another worker is working on this partition already.",
formatByteStringRange(partitionRecord.getPartition()),
partitionRecord.getUuid());
StreamProgress streamProgress = new StreamProgress();
streamProgress.setFailToLock(true);
metrics.decPartitionStreamCount();
tracker.tryClaim(streamProgress);
return ProcessContinuation.stop();
}
} else if (tracker.currentRestriction().getCloseStream() == null
&& !metadataTableDao.doHoldLock(
partitionRecord.getPartition(), partitionRecord.getUuid())) {
// We only verify the lock if we are not holding CloseStream because if this is a retry of
// CloseStream we might have already cleaned up the lock in a previous attempt.
// Failed correctness check on this worker holds the lock on this partition. This shouldn't
// fail because there's a restriction tracker which means this worker has already acquired the
// lock and once it has acquired the lock it shouldn't fail the lock check.
LOG.warn(
"RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and "
+ "should probably be reviewed.",
formatByteStringRange(partitionRecord.getPartition()),
partitionRecord.getUuid());
StreamProgress streamProgress = new StreamProgress();
streamProgress.setFailToLock(true);
metrics.decPartitionStreamCount();
tracker.tryClaim(streamProgress);
return ProcessContinuation.stop();
}
// Process CloseStream if it exists
CloseStream closeStream = tracker.currentRestriction().getCloseStream();
if (closeStream != null) {
LOG.debug("RCSP: Processing CloseStream");
metrics.decPartitionStreamCount();
if (closeStream.getStatus().getCode() == Status.Code.OK) {
// We need to update watermark here. We're terminating this stream because we have reached
// endTime. Instant.now is greater or equal to endTime. The goal here is
// DNP will need to know this stream has passed the endTime so DNP can eventually terminate.
Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE);
Instant endTime = partitionRecord.getEndTime();
if (endTime != null) {
terminatingWatermark = endTime;
}
watermarkEstimator.setWatermark(terminatingWatermark);
metadataTableDao.updateWatermark(
partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null);
LOG.info(
"RCSP {}: Reached end time, terminating...",
formatByteStringRange(partitionRecord.getPartition()));
return ProcessContinuation.stop();
}
if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) {
LOG.error(
"RCSP {}: Reached unexpected terminal state: {}",
formatByteStringRange(partitionRecord.getPartition()),
closeStream.getStatus());
return ProcessContinuation.stop();
}
// Release the lock only if the uuid matches. In normal operation this doesn't change
// anything. However, it's possible for this RCSP to crash while processing CloseStream but
// after the side effects of writing the new partitions to the metadata table. New partitions
// can be created while this RCSP restarts from the previous checkpoint and processes the
// CloseStream again. In certain race scenarios the child partitions may merge back to this
// partition, but as a new RCSP. The new partition (same as this partition) would write the
// exact same content to the metadata table but with a different uuid. We don't want to
// accidentally delete the StreamPartition because it now belongs to the new RCSP.
// If the uuid is the same (meaning this race scenario did not take place) we release the lock
// and mark the StreamPartition to be deleted, so we can delete it after we have written the
// NewPartitions.
metadataTableDao.releaseStreamPartitionLockForDeletion(
partitionRecord.getPartition(), partitionRecord.getUuid());
// The partitions in the continuation tokens must cover the same key space as this partition.
// If there's only 1 token, then the token's partition is equals to this partition.
// If there are more than 1 tokens, then the tokens form a continuous row range equals to this
// partition.
List<ByteStringRange> childPartitions = new ArrayList<>();
List<ByteStringRange> tokenPartitions = new ArrayList<>();
// Check if NewPartitions field exists, if not we default to using just the
// ChangeStreamContinuationTokens.
boolean useNewPartitionsField =
closeStream.getNewPartitions().size()
== closeStream.getChangeStreamContinuationTokens().size();
for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) {
ByteStringRange childPartition;
if (useNewPartitionsField) {
childPartition = closeStream.getNewPartitions().get(i);
} else {
childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition();
}
childPartitions.add(childPartition);
ChangeStreamContinuationToken token =
getTokenWithCorrectPartition(
partitionRecord.getPartition(),
closeStream.getChangeStreamContinuationTokens().get(i));
tokenPartitions.add(token.getPartition());
metadataTableDao.writeNewPartition(
new NewPartition(
childPartition, Collections.singletonList(token), watermarkEstimator.getState()));
}
LOG.info(
"RCSP {}: Split/Merge into {}",
formatByteStringRange(partitionRecord.getPartition()),
partitionsToString(childPartitions));
if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) {
LOG.warn(
"RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace",
formatByteStringRange(partitionRecord.getPartition()),
partitionsToString(tokenPartitions));
}
// Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and
// merges results back to this partition again) because when we register the "new" partition,
// we unset the deletion bit.
metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition());
return ProcessContinuation.stop();
}
// Update the metadata table with the watermark
metadataTableDao.updateWatermark(
partitionRecord.getPartition(),
watermarkEstimator.getState(),
tracker.currentRestriction().getCurrentToken());
// Start to stream the partition.
ServerStream<ChangeStreamRecord> stream = null;
try {
stream =
changeStreamDao.readChangeStreamPartition(
partitionRecord,
tracker.currentRestriction(),
partitionRecord.getEndTime(),
heartbeatDuration);
for (ChangeStreamRecord record : stream) {
Optional<ProcessContinuation> result =
changeStreamAction.run(
partitionRecord,
record,
tracker,
receiver,
watermarkEstimator,
throughputEstimator);
// changeStreamAction will usually return Optional.empty() except for when a checkpoint
// (either runner or pipeline initiated) is required.
if (result.isPresent()) {
return result.get();
}
}
} catch (Exception e) {
throw e;
} finally {
if (stream != null) {
stream.cancel();
}
}
return ProcessContinuation.resume();
} | @Test
public void testCloseStreamTerminateNotOutOfRangeStatus() throws IOException {
// Force lock fail because CloseStream should not depend on locking
when(metadataTableDao.doHoldLock(partition, uuid)).thenReturn(false);
// Out of Range code is 11.
CloseStream mockCloseStream = Mockito.mock(CloseStream.class);
Status statusProto = Status.newBuilder().setCode(10).build();
Mockito.when(mockCloseStream.getStatus())
.thenReturn(com.google.cloud.bigtable.common.Status.fromProto(statusProto));
when(restriction.getCloseStream()).thenReturn(mockCloseStream);
when(restriction.isEmpty()).thenReturn(false);
final DoFn.ProcessContinuation result =
action.run(partitionRecord, tracker, receiver, watermarkEstimator);
assertEquals(DoFn.ProcessContinuation.stop(), result);
// Should terminate before reaching processing stream partition responses.
verify(changeStreamAction, never()).run(any(), any(), any(), any(), any(), any());
// Should not try claim any restriction when processing CloseStream
verify(tracker, (never())).tryClaim(any());
// Should decrement the metric on termination.
verify(metrics).decPartitionStreamCount();
// Should not try to write any new partition to the metadata table.
verify(metadataTableDao, never()).writeNewPartition(any());
verify(metadataTableDao, never()).releaseStreamPartitionLockForDeletion(any(), any());
verify(metadataTableDao, never()).deleteStreamPartitionRow(any());
} |
@ConstantFunction(name = "subtract", argTypes = {DOUBLE, DOUBLE}, returnType = DOUBLE)
public static ConstantOperator subtractDouble(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createDouble(first.getDouble() - second.getDouble());
} | @Test
public void subtractDouble() {
assertEquals(0.0,
ScalarOperatorFunctions.subtractDouble(O_DOUBLE_100, O_DOUBLE_100).getDouble(), 1);
} |
@Override
public Distribution read(final Path file, final Distribution.Method method, final LoginCallback prompt) throws BackgroundException {
return this.connected(new Connected<Distribution>() {
@Override
public Distribution call() throws BackgroundException {
return CustomOriginCloudFrontDistributionConfiguration.super.read(file, method, prompt);
}
}, prompt);
} | @Test
public void testReadNoConfiguredDistributionForOrigin() throws Exception {
final Host origin = new Host(new TestProtocol(), "myhost.localdomain");
origin.getCdnCredentials().setUsername(PROPERTIES.get("s3.key"));
origin.getCdnCredentials().setPassword(PROPERTIES.get("s3.secret"));
final CustomOriginCloudFrontDistributionConfiguration configuration
= new CustomOriginCloudFrontDistributionConfiguration(origin, new DefaultX509TrustManager() {
@Override
public void checkServerTrusted(final X509Certificate[] certs, final String cipher) {
//
}
}, new DefaultX509KeyManager());
final Path container = new Path("unknown.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Distribution distribution = configuration.read(container, Distribution.CUSTOM, new DisabledLoginCallback() {
@Override
public Credentials prompt(final Host bookmark, final String username, final String title, final String reason, final LoginOptions options) {
return new Credentials(PROPERTIES.get("s3.key"), PROPERTIES.get("s3.secret"));
}
});
assertFalse(distribution.isEnabled());
assertEquals("Amazon CloudFront", distribution.getName());
} |
public static double getPerTaskConfigSize() {
return perTaskConfigSize;
} | @Test
void testGetPerTaskConfigSize() {
double defaultVal = ParamUtil.getPerTaskConfigSize();
assertEquals(defaultPerTaskConfigSize, defaultVal, 0.01);
double expect = 50.0;
ParamUtil.setPerTaskConfigSize(expect);
assertEquals(expect, ParamUtil.getPerTaskConfigSize(), 0.01);
} |
@Override
public boolean equals(@Nullable Object object) {
if (object instanceof GaugeCell) {
GaugeCell gaugeCell = (GaugeCell) object;
return Objects.equals(dirty, gaugeCell.dirty)
&& Objects.equals(gaugeValue.get(), gaugeCell.gaugeValue.get())
&& Objects.equals(name, gaugeCell.name);
}
return false;
} | @Test
public void testEquals() {
GaugeCell gaugeCell = new GaugeCell(MetricName.named("namespace", "name"));
GaugeCell equal = new GaugeCell(MetricName.named("namespace", "name"));
Assert.assertEquals(gaugeCell, equal);
Assert.assertEquals(gaugeCell.hashCode(), equal.hashCode());
} |
public static Set<String> splitToSet(String value, char separatorChar) {
return splitToSet(value, separatorChar, false);
} | @Test
void testSplitToSet() {
String value = "1# 2#3 #4#3";
Set<String> values = splitToSet(value, '#', false);
assertEquals(ofSet("1", " 2", "3 ", "4", "3"), values);
values = splitToSet(value, '#', true);
assertEquals(ofSet("1", "2", "3", "4"), values);
} |
public static void tripSuggestions(
List<CharSequence> suggestions, final int maxSuggestions, List<CharSequence> stringsPool) {
while (suggestions.size() > maxSuggestions) {
removeSuggestion(suggestions, maxSuggestions, stringsPool);
}
} | @Test
public void testTrimSuggestionsWhenOneNeeded() {
ArrayList<CharSequence> list =
new ArrayList<>(
Arrays.<CharSequence>asList("typed", "something", "duped", "duped", "something"));
IMEUtil.tripSuggestions(list, 4, mStringPool);
Assert.assertEquals(4, list.size());
Assert.assertEquals("typed", list.get(0));
Assert.assertEquals("something", list.get(1));
Assert.assertEquals("duped", list.get(2));
Assert.assertEquals("duped", list.get(3));
} |
@Override
public int getServerPort() {
if (port == 0) {
VertxPlatformHttpServer server = CamelContextHelper.findSingleByType(camelContext, VertxPlatformHttpServer.class);
if (server != null && server.getServer() != null) {
port = server.getServer().actualPort();
}
if (port == 0) {
VertxPlatformHttpServerConfiguration config
= CamelContextHelper.findSingleByType(camelContext, VertxPlatformHttpServerConfiguration.class);
if (config != null) {
port = config.getBindPort();
}
}
if (port == 0) {
VertxPlatformHttpRouter router = VertxPlatformHttpRouter.lookup(camelContext);
if (router != null && router.getServer() != null && router.getServer().getServer() != null) {
port = router.getServer().getServer().actualPort();
}
}
}
return port;
} | @Test
public void testEngine() throws Exception {
final CamelContext context = createCamelContext();
try {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("platform-http:/get")
.routeId("get")
.setBody().constant("get");
from("platform-http:/post")
.routeId("post")
.transform().body(String.class, b -> b.toUpperCase());
}
});
context.start();
given()
.when()
.get("/get")
.then()
.statusCode(200)
.body(equalTo("get"));
given()
.body("post")
.when()
.post("/post")
.then()
.statusCode(200)
.body(equalTo("POST"));
PlatformHttpComponent phc = context.getComponent("platform-http", PlatformHttpComponent.class);
assertEquals(2, phc.getHttpEndpoints().size());
Iterator<HttpEndpointModel> it = phc.getHttpEndpoints().iterator();
assertEquals("/get", it.next().getUri());
assertEquals("/post", it.next().getUri());
// should find engine in registry
assertNotNull(context.getRegistry().findSingleByType(PlatformHttpEngine.class));
EmbeddedHttpService server = context.getRegistry().findSingleByType(EmbeddedHttpService.class);
assertNotNull(server);
assertEquals("http", server.getScheme());
assertEquals(RestAssured.port, server.getServerPort());
} finally {
context.stop();
}
} |
public static void verifyJvmRequirements()
{
verifyJavaVersion();
verify64BitJvm();
verifyOsArchitecture();
verifyByteOrder();
verifyUsingG1Gc();
verifyFileDescriptor();
verifySlice();
} | @Test
public void testVerifyJvmRequirements()
{
verifyJvmRequirements();
} |
@Override
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
meta = (GetRepositoryNamesMeta) smi;
data = (GetRepositoryNamesData) sdi;
if ( super.init( smi, sdi ) ) {
try {
// Get the repository objects from the repository...
//
data.list = getRepositoryObjects();
} catch ( Exception e ) {
logError( "Error initializing step: ", e );
return false;
}
data.rownr = 1L;
data.filenr = 0;
return true;
}
return false;
} | @Test
public void testGetRepoList_excludeSubfolders_Extended() throws KettleException {
init( repoExtended, "/", false, ".*", "", All, 0 );
} |
@Override
public CompletableFuture<Void> offload(ReadHandle readHandle,
UUID uuid,
Map<String, String> extraMetadata) {
final String managedLedgerName = extraMetadata.get(MANAGED_LEDGER_NAME);
final String topicName = TopicName.fromPersistenceNamingEncoding(managedLedgerName);
CompletableFuture<Void> promise = new CompletableFuture<>();
scheduler.chooseThread(readHandle.getId()).execute(() -> {
final BlobStore writeBlobStore = getBlobStore(config.getBlobStoreLocation());
log.info("offload {} uuid {} extraMetadata {} to {} {}", readHandle.getId(), uuid, extraMetadata,
config.getBlobStoreLocation(), writeBlobStore);
if (readHandle.getLength() == 0 || !readHandle.isClosed() || readHandle.getLastAddConfirmed() < 0) {
promise.completeExceptionally(
new IllegalArgumentException("An empty or open ledger should never be offloaded"));
return;
}
OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create()
.withLedgerMetadata(readHandle.getLedgerMetadata())
.withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize());
String dataBlockKey = DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid);
String indexBlockKey = DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid);
log.info("ledger {} dataBlockKey {} indexBlockKey {}", readHandle.getId(), dataBlockKey, indexBlockKey);
MultipartUpload mpu = null;
List<MultipartPart> parts = Lists.newArrayList();
// init multi part upload for data block.
try {
BlobBuilder blobBuilder = writeBlobStore.blobBuilder(dataBlockKey);
Map<String, String> objectMetadata = new HashMap<>(userMetadata);
objectMetadata.put("role", "data");
if (extraMetadata != null) {
objectMetadata.putAll(extraMetadata);
}
DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata);
Blob blob = blobBuilder.build();
log.info("initiateMultipartUpload bucket {}, metadata {} ", config.getBucket(), blob.getMetadata());
mpu = writeBlobStore.initiateMultipartUpload(config.getBucket(), blob.getMetadata(), new PutOptions());
} catch (Throwable t) {
promise.completeExceptionally(t);
return;
}
long dataObjectLength = 0;
// start multi part upload for data block.
try {
long startEntry = 0;
int partId = 1;
long start = System.nanoTime();
long entryBytesWritten = 0;
while (startEntry <= readHandle.getLastAddConfirmed()) {
int blockSize = BlockAwareSegmentInputStreamImpl
.calculateBlockSize(config.getMaxBlockSizeInBytes(), readHandle, startEntry, entryBytesWritten);
try (BlockAwareSegmentInputStream blockStream = new BlockAwareSegmentInputStreamImpl(
readHandle, startEntry, blockSize, this.offloaderStats, managedLedgerName)) {
Payload partPayload = Payloads.newInputStreamPayload(blockStream);
partPayload.getContentMetadata().setContentLength((long) blockSize);
partPayload.getContentMetadata().setContentType("application/octet-stream");
parts.add(writeBlobStore.uploadMultipartPart(mpu, partId, partPayload));
log.debug("UploadMultipartPart. container: {}, blobName: {}, partId: {}, mpu: {}",
config.getBucket(), dataBlockKey, partId, mpu.id());
indexBuilder.addBlock(startEntry, partId, blockSize);
if (blockStream.getEndEntryId() != -1) {
startEntry = blockStream.getEndEntryId() + 1;
} else {
// could not read entry from ledger.
break;
}
entryBytesWritten += blockStream.getBlockEntryBytesCount();
partId++;
this.offloaderStats.recordOffloadBytes(topicName, blockStream.getBlockEntryBytesCount());
}
dataObjectLength += blockSize;
}
String etag = writeBlobStore.completeMultipartUpload(mpu, parts);
log.info("Ledger {}, upload finished, etag {}", readHandle.getId(), etag);
mpu = null;
} catch (Throwable t) {
try {
if (mpu != null) {
writeBlobStore.abortMultipartUpload(mpu);
}
} catch (Throwable throwable) {
log.error("Failed abortMultipartUpload in bucket - {} with key - {}, uploadId - {}.",
config.getBucket(), dataBlockKey, mpu.id(), throwable);
}
this.offloaderStats.recordWriteToStorageError(topicName);
this.offloaderStats.recordOffloadError(topicName);
promise.completeExceptionally(t);
return;
}
// upload index block
try (OffloadIndexBlock index = indexBuilder.withDataObjectLength(dataObjectLength).build();
IndexInputStream indexStream = index.toStream()) {
// write the index block
BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey);
Map<String, String> objectMetadata = new HashMap<>(userMetadata);
objectMetadata.put("role", "index");
if (extraMetadata != null) {
objectMetadata.putAll(extraMetadata);
}
DataBlockUtils.addVersionInfo(blobBuilder, objectMetadata);
Payload indexPayload = Payloads.newInputStreamPayload(indexStream);
indexPayload.getContentMetadata().setContentLength((long) indexStream.getStreamSize());
indexPayload.getContentMetadata().setContentType("application/octet-stream");
Blob blob = blobBuilder
.payload(indexPayload)
.contentLength((long) indexStream.getStreamSize())
.build();
writeBlobStore.putBlob(config.getBucket(), blob);
promise.complete(null);
} catch (Throwable t) {
try {
writeBlobStore.removeBlob(config.getBucket(), dataBlockKey);
} catch (Throwable throwable) {
log.error("Failed deleteObject in bucket - {} with key - {}.",
config.getBucket(), dataBlockKey, throwable);
}
this.offloaderStats.recordWriteToStorageError(topicName);
this.offloaderStats.recordOffloadError(topicName);
promise.completeExceptionally(t);
return;
}
});
return promise;
} | @Test
public void testOffloadFailInitDataBlockUpload() throws Exception {
ReadHandle readHandle = buildReadHandle();
UUID uuid = UUID.randomUUID();
String failureString = "fail InitDataBlockUpload";
// mock throw exception when initiateMultipartUpload
try {
BlobStore spiedBlobStore = mock(BlobStore.class, delegatesTo(blobStore));
Mockito
.doThrow(new RuntimeException(failureString))
.when(spiedBlobStore).initiateMultipartUpload(any(), any(), any());
BlobStoreManagedLedgerOffloader offloader = getOffloader(spiedBlobStore);
offloader.offload(readHandle, uuid, new HashMap<>()).get();
Assert.fail("Should throw exception when initiateMultipartUpload");
} catch (Exception e) {
Assert.assertTrue(e.getCause() instanceof RuntimeException);
Assert.assertTrue(e.getCause().getMessage().contains(failureString));
Assert.assertFalse(blobStore.blobExists(BUCKET, DataBlockUtils.dataBlockOffloadKey(readHandle.getId(), uuid)));
Assert.assertFalse(blobStore.blobExists(BUCKET, DataBlockUtils.indexBlockOffloadKey(readHandle.getId(), uuid)));
}
} |
@Override
protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
cf.parse(args);
pathOnly = cf.getOpt(OPTION_PATHONLY);
dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse);
humanReadable = cf.getOpt(OPTION_HUMAN);
hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE);
orderReverse = cf.getOpt(OPTION_REVERSE);
orderTime = cf.getOpt(OPTION_MTIME);
orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
useAtime = cf.getOpt(OPTION_ATIME);
displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
if (args.isEmpty()) args.add(Path.CUR_DIR);
initialiseOrderComparator();
} | @Test
public void processPathDirOrderMtimeReverse() throws IOException {
TestFile testfile01 = new TestFile("testDirectory", "testFile01");
TestFile testfile02 = new TestFile("testDirectory", "testFile02");
TestFile testfile03 = new TestFile("testDirectory", "testFile03");
TestFile testfile04 = new TestFile("testDirectory", "testFile04");
TestFile testfile05 = new TestFile("testDirectory", "testFile05");
TestFile testfile06 = new TestFile("testDirectory", "testFile06");
// set file mtime in different order to file names
testfile01.setMtime(NOW.getTime() + 10);
testfile02.setMtime(NOW.getTime() + 30);
testfile03.setMtime(NOW.getTime() + 20);
testfile04.setMtime(NOW.getTime() + 60);
testfile05.setMtime(NOW.getTime() + 50);
testfile06.setMtime(NOW.getTime() + 40);
TestFile testDir = new TestFile("", "testDirectory");
testDir.setIsDir(true);
testDir.addContents(testfile01, testfile02, testfile03, testfile04,
testfile05, testfile06);
LinkedList<PathData> pathData = new LinkedList<PathData>();
pathData.add(testDir.getPathData());
PrintStream out = mock(PrintStream.class);
Ls ls = new Ls();
ls.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-t");
options.add("-r");
ls.processOptions(options);
String lineFormat = TestFile.computeLineFormat(pathData);
ls.processArguments(pathData);
InOrder inOrder = inOrder(out);
inOrder.verify(out).println("Found 6 items");
inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat));
inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat));
verifyNoMoreInteractions(out);
} |
@Implementation
protected HttpResponse execute(
HttpHost httpHost, HttpRequest httpRequest, HttpContext httpContext)
throws HttpException, IOException {
if (FakeHttp.getFakeHttpLayer().isInterceptingHttpRequests()) {
return FakeHttp.getFakeHttpLayer()
.emulateRequest(httpHost, httpRequest, httpContext, realObject);
} else {
FakeHttp.getFakeHttpLayer()
.addRequestInfo(new HttpRequestInfo(httpRequest, httpHost, httpContext, redirector));
HttpResponse response = redirector.execute(httpHost, httpRequest, httpContext);
if (FakeHttp.getFakeHttpLayer().isInterceptingResponseContent()) {
interceptResponseContent(response);
}
FakeHttp.getFakeHttpLayer().addHttpResponse(response);
return response;
}
} | @Test
public void shouldReturnRequestsByRule_WithTextResponse() throws Exception {
FakeHttp.addHttpResponseRule("http://some.uri", "a cheery response body");
HttpResponse response = requestDirector.execute(null, new HttpGet("http://some.uri"), null);
assertNotNull(response);
assertThat(response.getStatusLine().getStatusCode()).isEqualTo(200);
assertThat(getStringContent(response)).isEqualTo("a cheery response body");
} |
protected static String getOpenSSLVersion(long openSSLVersionConstant) {
final long major = openSSLVersionConstant >>> MAJOR_OFFSET;
final long minor = (openSSLVersionConstant & MINOR_MASK) >>> MINOR_OFFSET;
final long fix = (openSSLVersionConstant & FIX_MASK) >>> FIX_OFFSET;
final long patchLevel = (openSSLVersionConstant & PATCH_MASK) >>> PATCH_OFFSET;
final String patch = 0 == patchLevel || patchLevel > NUM_LETTERS ? "" : String.valueOf((char) (patchLevel + 'a' - 1));
final int statusCode = (int) (openSSLVersionConstant & STATUS_MASK);
final String status = 0xf == statusCode ? "" : (0 == statusCode ? "-dev" : "-beta" + statusCode);
return String.format("%d.%d.%d%s%s", major, minor, fix, patch, status);
} | @Test
public void testVersionConstantExamples() {
final long[] constants = {0x1000203fL, 0x00903000L, 0x00903001L, 0x00903002L, 0x0090300fL, 0x0090301fL, 0x0090400fL, 0x102031afL};
final String[] versions = {"1.0.2c",
"0.9.3-dev",
"0.9.3-beta1",
"0.9.3-beta2",
"0.9.3",
"0.9.3a",
"0.9.4",
"1.2.3z"};
assertEquals(constants.length, versions.length);
for (int i = 0; i < constants.length; i++) {
assertEquals(versions[i], OpenSSLAnalyzer.getOpenSSLVersion(constants[i]));
}
} |
@Override
public void isNotEqualTo(@Nullable Object expected) {
super.isNotEqualTo(expected);
} | @Test
public void isNotEqualTo_WithoutToleranceParameter_Success_NotEqual() {
assertThat(array(2.2d)).isNotEqualTo(array(OVER_2POINT2));
} |
public static Builder builder() {
return new Builder();
} | @Test
@SuppressWarnings("JdkObsolete") // for hashtable
public void testBuilder_environmentMapTypes() {
// Can accept empty environment.
Assert.assertNotNull(
ContainerConfiguration.builder().setEnvironment(ImmutableMap.of()).build());
// Can handle other map types (https://github.com/GoogleContainerTools/jib/issues/632)
Assert.assertNotNull(ContainerConfiguration.builder().setEnvironment(new TreeMap<>()));
Assert.assertNotNull(ContainerConfiguration.builder().setEnvironment(new Hashtable<>()));
} |
public static Serializable decode(final ByteBuf byteBuf) {
int valueType = byteBuf.readUnsignedByte() & 0xff;
StringBuilder result = new StringBuilder();
decodeValue(valueType, 1, byteBuf, result);
return result.toString();
} | @Test
void assertDecodeLargeJsonObjectWithLiteral() {
List<JsonEntry> jsonEntries = new LinkedList<>();
jsonEntries.add(new JsonEntry(JsonValueTypes.LITERAL, "key1", JsonValueTypes.LITERAL_NULL));
jsonEntries.add(new JsonEntry(JsonValueTypes.LITERAL, "key2", JsonValueTypes.LITERAL_TRUE));
jsonEntries.add(new JsonEntry(JsonValueTypes.LITERAL, "key3", JsonValueTypes.LITERAL_FALSE));
ByteBuf payload = mockJsonObjectByteBuf(jsonEntries, false);
String actual = (String) MySQLJsonValueDecoder.decode(payload);
assertThat(actual, is("{\"key1\":null,\"key2\":true,\"key3\":false}"));
} |
public List<String> getGlobalWhiteAddrs() {
return globalWhiteAddrs;
} | @Test
public void testGetGlobalWhiteAddrsWhenNull() {
AclConfig aclConfig = new AclConfig();
Assert.assertNull("The globalWhiteAddrs should return null", aclConfig.getGlobalWhiteAddrs());
} |
@Operation(summary = "Receive SAML AuthnRequest")
@PostMapping(value = {"/frontchannel/saml/v4/entrance/request_authentication", "/frontchannel/saml/v4/idp/request_authentication"})
public RedirectView requestAuthenticationService(HttpServletRequest request) throws SamlValidationException, SharedServiceClientException, DienstencatalogusException, UnsupportedEncodingException, ComponentInitializationException, MessageDecodingException, SamlSessionException, SamlParseException {
logger.info("Receive SAML AuthnRequest");
if (request.getParameter("SAMLRequest") != null) {
AuthenticationRequest authenticationRequest = authenticationService.startAuthenticationProcess(request);
return new RedirectView(authenticationRequest.getProtocolType().equals(ProtocolType.SAML_ROUTERINGSDIENST) ?
authenticationIdpService.redirectWithCorrectAttributesForAd(request, authenticationRequest) :
authenticationEntranceService.redirectWithCorrectAttributesForAd(request, authenticationRequest)
);
} else {
RedirectView redirectView = new RedirectView("/saml/v4/idp/redirect_with_artifact");
redirectView.setStatusCode(HttpStatus.BAD_REQUEST);
return redirectView;
}
} | @Test
public void successfulRequestAuthenticationEntranceServiceTest() throws UnsupportedEncodingException, SamlSessionException, DienstencatalogusException, SharedServiceClientException, SamlValidationException, MessageDecodingException, ComponentInitializationException, SamlParseException {
AuthenticationRequest authenticationRequest = new AuthenticationRequest();
authenticationRequest.setProtocolType(ProtocolType.SAML_COMBICONNECT);
when(request.getParameter("SAMLRequest")).thenReturn("test");
when(authenticationServiceMock.startAuthenticationProcess(any(HttpServletRequest.class))).thenReturn(authenticationRequest);
RedirectView result = authenticationControllerMock.requestAuthenticationService(request);
assertNotNull(result);
verify(authenticationServiceMock, times(1)).startAuthenticationProcess(any(HttpServletRequest.class));
verify(authenticationEntranceServiceMock, times(1)).redirectWithCorrectAttributesForAd(any(HttpServletRequest.class), any(AuthenticationRequest.class));
} |
public void cancelRoomBooking(int roomNumber) throws Exception {
var room = hotelDao.getById(roomNumber);
if (room.isEmpty()) {
throw new Exception("Room number: " + roomNumber + " does not exist");
} else {
if (room.get().isBooked()) {
var updateRoomBooking = room.get();
updateRoomBooking.setBooked(false);
int refundAmount = updateRoomBooking.getPrice();
hotelDao.update(updateRoomBooking);
LOGGER.info("Booking cancelled for room number: " + roomNumber);
LOGGER.info(refundAmount + " is refunded");
} else {
throw new Exception("No booking for the room exists");
}
}
} | @Test
void cancelRoomBookingForUnbookedRoomShouldRaiseException() {
assertThrows(Exception.class, () -> hotel.cancelRoomBooking(1));
} |
protected boolean isSystemAccount(Subject subject) {
PrincipalCollection pc = subject.getPrincipals();
return pc != null && matches(pc, systemAccountUsername, systemAccountRealmName);
} | @Test
public void testIsSystemAccountWithNullPrincipals() {
assertFalse(policy.isSystemAccount(new SubjectAdapter()));
} |
public static DataSource getDataSource( LogChannelInterface log, DatabaseMeta dbMeta, String partitionId ) throws KettleDatabaseException {
int initialSize = dbMeta.getInitialPoolSize();
int maximumSize = dbMeta.getMaximumPoolSize();
lock.lock();
try {
if ( !isDataSourceRegistered( dbMeta, partitionId ) ) {
addPoolableDataSource( log, dbMeta, partitionId, initialSize, maximumSize );
}
} finally {
lock.unlock();
}
return dataSources.get( getDataSourceName( dbMeta, partitionId ) );
} | @Test
public void testGetConnectionEncrypted() throws Exception {
when( dbMeta.getName() ).thenReturn( "CP2" );
when( dbMeta.getPassword() ).thenReturn( ENCR_PASSWORD );
when( dbMeta.getInitialPoolSize() ).thenReturn( 1 );
when( dbMeta.getMaximumPoolSize() ).thenReturn( 2 );
DataSource conn = ConnectionPoolUtil.getDataSource( logChannelInterface, dbMeta, "" );
assertNotNull( conn );
} |
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
} | @Test
void shouldKeepSubscriptionMediaEndpointUponRemovalOfAllButOneSubscriber()
{
final long id1 = driverProxy.addSubscription(CHANNEL_4000, STREAM_ID_1);
final long id2 = driverProxy.addSubscription(CHANNEL_4000, STREAM_ID_2);
driverProxy.addSubscription(CHANNEL_4000, STREAM_ID_3);
while (true)
{
if (0 == driverConductor.doWork())
{
break;
}
}
final ArgumentCaptor<ReceiveChannelEndpoint> captor = ArgumentCaptor.forClass(ReceiveChannelEndpoint.class);
verify(receiverProxy).registerReceiveChannelEndpoint(captor.capture());
receiveChannelEndpoint = captor.getValue();
assertNotNull(receiveChannelEndpoint);
assertEquals(3, receiveChannelEndpoint.distinctSubscriptionCount());
driverProxy.removeSubscription(id1);
driverProxy.removeSubscription(id2);
driverConductor.doWork();
driverConductor.doWork();
assertEquals(1, receiveChannelEndpoint.distinctSubscriptionCount());
} |
@JsonProperty
public URI getUri()
{
return uri;
} | @Test
public void testQueryDividedIntoSplitsLastSplitHasRightTime()
throws URISyntaxException
{
Instant now = LocalDateTime.of(2019, 10, 2, 7, 26, 56, 0).toInstant(UTC);
PrometheusConnectorConfig config = getCommonConfig(prometheusHttpServer.resolve("/prometheus-data/prometheus-metrics.json"));
PrometheusClient client = new PrometheusClient(config, METRIC_CODEC, TYPE_MANAGER);
PrometheusTable table = client.getTable("default", "up");
PrometheusTableHandle tableHandle = new PrometheusTableHandle("default", table.getName());
TupleDomain<ColumnHandle> columnConstraints = TupleDomain.withColumnDomains(
ImmutableMap.of(
new PrometheusColumnHandle("value", BIGINT, 1), Domain.all(VARCHAR),
new PrometheusColumnHandle("text", createUnboundedVarcharType(), 0), Domain.all(VARCHAR)));
PrometheusTableLayoutHandle tableLayoutHandle = new PrometheusTableLayoutHandle(tableHandle, columnConstraints);
PrometheusSplitManager splitManager = new PrometheusSplitManager(client, fixedClockAt(now), config);
ConnectorSplitSource splitsMaybe = splitManager.getSplits(
null,
null,
tableLayoutHandle,
null);
List<ConnectorSplit> splits = splitsMaybe.getNextBatch(NOT_PARTITIONED, NUMBER_MORE_THAN_EXPECTED_NUMBER_SPLITS).getNow(null).getSplits();
int lastSplitIndex = splits.size() - 1;
PrometheusSplit lastSplit = (PrometheusSplit) splits.get(lastSplitIndex);
String queryInSplit = lastSplit.getUri().getQuery();
String timeShouldBe = decimalSecondString(now.toEpochMilli());
URI uriAsFormed = new URI("http://doesnotmatter.example:9090/api/v1/query?query=up[" +
getQueryChunkSizeDurationAsPrometheusCompatibleDurationString(config) + "]" +
"&time=" + timeShouldBe);
assertEquals(queryInSplit, uriAsFormed.getQuery());
} |
public static UMemberSelect create(UExpression expression, CharSequence identifier, UType type) {
return new AutoValue_UMemberSelect(expression, StringName.of(identifier), type);
} | @Test
public void serialization() {
SerializableTester.reserializeAndAssert(
UMemberSelect.create(
ULiteral.stringLit("foo"),
"indexOf",
UMethodType.create(UPrimitiveType.INT, UPrimitiveType.INT)));
} |
public static StatementExecutorResponse execute(
final ConfiguredStatement<AssertSchema> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
return AssertExecutor.execute(
statement.getMaskedStatementText(),
statement.getStatement(),
executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS),
serviceContext,
(stmt, sc) -> assertSchema(
sc.getSchemaRegistryClient(),
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists()),
(str, stmt) -> new AssertSchemaEntity(
str,
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists())
);
} | @Test
public void shouldFailToAssertSchemaById() {
// Given
final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.empty(), Optional.of(100), Optional.empty(), true);
final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement
.of(KsqlParser.PreparedStatement.of("", assertSchema),
SessionConfig.of(ksqlConfig, ImmutableMap.of()));
// When:
final KsqlRestException e = assertThrows(KsqlRestException.class, () ->
AssertSchemaExecutor.execute(statement, mock(SessionProperties.class), engine, serviceContext));
// Then:
assertThat(e.getResponse().getStatus(), is(417));
assertThat(((KsqlErrorMessage) e.getResponse().getEntity()).getMessage(), is("Schema with id 100 does not exist"));
} |
public static String[] parseKey(String groupKey) {
StringBuilder sb = new StringBuilder();
String dataId = null;
String group = null;
String tenant = null;
for (int i = 0; i < groupKey.length(); ++i) {
char c = groupKey.charAt(i);
if ('+' == c) {
if (null == dataId) {
dataId = sb.toString();
sb.setLength(0);
} else if (null == group) {
group = sb.toString();
sb.setLength(0);
} else {
throw new IllegalArgumentException("invalid groupkey:" + groupKey);
}
} else if ('%' == c) {
char next = groupKey.charAt(++i);
char nextnext = groupKey.charAt(++i);
if ('2' == next && 'B' == nextnext) {
sb.append('+');
} else if ('2' == next && '5' == nextnext) {
sb.append('%');
} else {
throw new IllegalArgumentException("invalid groupkey:" + groupKey);
}
} else {
sb.append(c);
}
}
if (StringUtils.isBlank(group)) {
group = sb.toString();
} else {
tenant = sb.toString();
}
if (group.length() == 0) {
throw new IllegalArgumentException("invalid groupkey:" + groupKey);
}
return new String[] {dataId, group, tenant};
} | @Test
void testParseKeyBySingleCharacter() {
// Act
final String[] actual = GroupKey.parseKey("/");
// Assert result
assertArrayEquals(new String[] {null, "/", null}, actual);
} |
@Override
public byte[] getBytes() {
return bytes;
} | @Test
public void testCompare() throws Exception {
byte[][] values = new byte[][]{"abc".getBytes(),
"ad".getBytes(),
"abcd".getBytes(),
"".getBytes(),
"b".getBytes()};
BytesWritable[] buf = new BytesWritable[values.length];
for(int i=0; i < values.length; ++i) {
buf[i] = new BytesWritable(values[i]);
}
// check to make sure the compare function is symetric and reflexive
for(int i=0; i < values.length; ++i) {
for(int j=0; j < values.length; ++j) {
assertTrue(buf[i].compareTo(buf[j]) == -buf[j].compareTo(buf[i]));
assertTrue((i == j) == (buf[i].compareTo(buf[j]) == 0));
}
}
assertTrue(buf[0].compareTo(buf[1]) < 0);
assertTrue(buf[1].compareTo(buf[2]) > 0);
assertTrue(buf[2].compareTo(buf[3]) > 0);
assertTrue(buf[3].compareTo(buf[4]) < 0);
} |
public static long toLong(String value) {
String[] octets = value.split(":");
if (octets.length > 8) {
throw new NumberFormatException("Input string is too big to fit in long: " + value);
}
long l = 0;
for (String octet: octets) {
if (octet.length() > 2) {
throw new NumberFormatException(
"Each colon-separated byte component must consist of 1 or 2 hex digits: " + value);
}
short s = Short.parseShort(octet, 16);
l = (l << 8) + s;
}
return l;
} | @Test
public void testToLongMsb() {
String dpidStr = "ca:7c:5e:d1:64:7a:95:9b";
long valid = -3856102927509056101L;
long testLong = HexString.toLong(dpidStr);
assertEquals(valid, testLong);
} |
@SuppressWarnings("unchecked")
public Object clone() {
try {
IntHashMap<V> m = (IntHashMap<V>) super.clone();
m.states = states.clone();
m.keys = keys.clone();
m.values = values.clone();
return m;
} catch (CloneNotSupportedException e) {
throw new InternalError();
}
} | @SuppressWarnings("unchecked")
@Test
public void testClone() {
removeOdd();
IntHashMap<Integer> clone = (IntHashMap<Integer>) map.clone();
map.clear();
testGet(clone);
} |
public Optional<EventProcessorStateDto> setState(EventProcessorStateDto dto) {
return setState(dto.eventDefinitionId(), dto.minProcessedTimestamp(), dto.maxProcessedTimestamp());
} | @Test
public void setState() {
final DateTime now = DateTime.now(DateTimeZone.UTC);
// Before we set the state, there should be no record
assertThat(stateService.findByEventDefinitionId("yolo")).isNotPresent();
assertThat(stateService.setState("yolo", now.minusHours(1), now))
.isPresent()
.get()
.satisfies(dto1 -> {
assertThat(dto1.minProcessedTimestamp()).isEqualTo(now.minusHours(1));
assertThat(dto1.maxProcessedTimestamp()).isEqualTo(now);
assertThat(dto1.eventDefinitionId()).isEqualTo("yolo");
assertThat(stateService.setState("yolo", now, now.plusHours(1)))
.isPresent()
.get()
.satisfies(dto2 -> {
// The second setState call should update the existing one
assertThat(dto2.id()).isEqualTo(dto1.id());
assertThat(dto2.eventDefinitionId()).isEqualTo("yolo");
assertThat(dto2.minProcessedTimestamp()).isEqualTo(dto1.minProcessedTimestamp());
assertThat(dto2.maxProcessedTimestamp()).isEqualTo(dto1.maxProcessedTimestamp().plusHours(1));
});
});
} |
public boolean getAfterEndOfLifeDate() {
return afterEndOfLifeDate;
} | @Test
public void testGetAfterEndOfLifeDate() {
assertFalse(monitor.getAfterEndOfLifeDate());
} |
public static Optional<String> findFirstManifestAttribute(File jarFile, String... attributes)
throws IOException {
if (attributes.length == 0) {
return Optional.empty();
}
try (JarFile f = new JarFile(jarFile)) {
return findFirstManifestAttribute(f, attributes);
}
} | @Test
void testFindFirstManifestAttributeWithAttributes() throws IOException {
Optional<String> optionalValue =
JarManifestParser.findFirstManifestAttribute(
TestJob.getTestJobJar(), PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS);
assertThat(optionalValue).get().isEqualTo("org.apache.flink.client.testjar.TestJob");
} |
public SearchResults<GroupInformation> search(DbSession dbSession, GroupSearchRequest groupSearchRequest) {
GroupDto defaultGroup = defaultGroupFinder.findDefaultGroup(dbSession);
GroupQuery query = toGroupQuery(groupSearchRequest);
int limit = dbClient.groupDao().countByQuery(dbSession, query);
if (groupSearchRequest.page() == 0) {
return new SearchResults<>(List.of(), limit);
}
List<GroupDto> groups = dbClient.groupDao().selectByQuery(dbSession, query, groupSearchRequest.page(), groupSearchRequest.pageSize());
List<String> groupUuids = extractGroupUuids(groups);
Map<String, Boolean> groupUuidToIsManaged = managedInstanceService.getGroupUuidToManaged(dbSession, new HashSet<>(groupUuids));
List<GroupInformation> results = groups.stream()
.map(groupDto -> toGroupInformation(groupDto, defaultGroup.getUuid(), groupUuidToIsManaged))
.toList();
return new SearchResults<>(results, limit);
} | @Test
public void search_whenInstanceNotManagedAndManagedIsTrue_throws() {
assertThatExceptionOfType(BadRequestException.class)
.isThrownBy(() -> groupService.search(dbSession, new GroupSearchRequest("query", true, 5, 24)))
.withMessage("The 'managed' parameter is only available for managed instances.");
} |
public static List<TypedExpression> coerceCorrectConstructorArguments(
final Class<?> type,
List<TypedExpression> arguments,
List<Integer> emptyCollectionArgumentsIndexes) {
Objects.requireNonNull(type, "Type parameter cannot be null as the method searches constructors from that class!");
Objects.requireNonNull(arguments, "Arguments parameter cannot be null! Use an empty list instance if needed instead.");
Objects.requireNonNull(emptyCollectionArgumentsIndexes, "EmptyListArgumentIndexes parameter cannot be null! Use an empty list instance if needed instead.");
if (emptyCollectionArgumentsIndexes.size() > arguments.size()) {
throw new IllegalArgumentException("There cannot be more empty collection arguments than all arguments! emptyCollectionArgumentsIndexes parameter has more items than arguments parameter. "
+ "(" + emptyCollectionArgumentsIndexes.size() + " > " + arguments.size() + ")");
}
// Rather work only with the argumentsType and when a method is resolved, flip the arguments list based on it.
final List<TypedExpression> coercedArgumentsTypesList = new ArrayList<>(arguments);
Constructor<?> constructor = resolveConstructor(type, coercedArgumentsTypesList);
if (constructor != null) {
return coercedArgumentsTypesList;
} else {
// This needs to go through all possible combinations.
final int indexesListSize = emptyCollectionArgumentsIndexes.size();
for (int numberOfProcessedIndexes = 0; numberOfProcessedIndexes < indexesListSize; numberOfProcessedIndexes++) {
for (int indexOfEmptyListIndex = numberOfProcessedIndexes; indexOfEmptyListIndex < indexesListSize; indexOfEmptyListIndex++) {
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
constructor = resolveConstructor(type, coercedArgumentsTypesList);
if (constructor != null) {
return coercedArgumentsTypesList;
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(indexOfEmptyListIndex));
}
switchCollectionClassInArgumentsByIndex(coercedArgumentsTypesList, emptyCollectionArgumentsIndexes.get(numberOfProcessedIndexes));
}
// No constructor found, return the original arguments.
return arguments;
}
} | @Test
public void coerceCorrectConstructorArgumentsTypeIsNull() {
Assertions.assertThatThrownBy(
() -> MethodResolutionUtils.coerceCorrectConstructorArguments(
null,
null,
null))
.isInstanceOf(NullPointerException.class);
} |
@Override
public List<Map<String, String>> query(String queryStr) {
if (null == indexDirectory) {
throw new IllegalStateException(
"Something went wrong on instance creation time, index dir is null");
}
List<Map<String, String>> result = Collections.emptyList();
try (IndexReader indexReader = DirectoryReader.open(indexDirectory)) {
IndexSearcher indexSearcher = new IndexSearcher(indexReader);
Analyzer analyzer = new StandardAnalyzer();
MultiFieldQueryParser parser =
new MultiFieldQueryParser(new String[] {SEARCH_FIELD_TEXT, SEARCH_FIELD_TITLE}, analyzer);
Query query = parser.parse(queryStr);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Searching for: {}", query.toString(SEARCH_FIELD_TEXT));
}
SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter();
Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));
result = doSearch(indexSearcher, query, analyzer, highlighter);
} catch (IOException e) {
LOGGER.error("Failed to open index dir {}, make sure indexing finished OK", indexDirectory, e);
} catch (ParseException e) {
LOGGER.error("Failed to parse query {}", queryStr, e);
}
return result;
} | @Test
void canIndexAndQueryByNotebookName() throws IOException, InterruptedException {
// given
String note1Id = newNoteWithParagraph("Notebook1", "test");
newNoteWithParagraphs("Notebook2", "not test", "not test at all");
drainSearchEvents();
// when
List<Map<String, String>> results = noteSearchService.query("Notebook1");
// then
assertFalse(results.isEmpty());
assertEquals(1, results.size());
assertEquals(note1Id, results.get(0).get("id"));
} |
public static String getMinimalHashTag(final int slot) {
return HASHES_BY_SLOT[slot];
} | @Test
void testGetMinimalHashTag() {
for (int slot = 0; slot < SlotHash.SLOT_COUNT; slot++) {
assertEquals(slot, SlotHash.getSlot(RedisClusterUtil.getMinimalHashTag(slot)));
}
} |
@Nonnull
public static <T> AggregateOperation1<T, MutableReference<T>, T> maxBy(
@Nonnull ComparatorEx<? super T> comparator
) {
checkSerializable(comparator, "comparator");
return AggregateOperation
.withCreate(MutableReference<T>::new)
.andAccumulate((MutableReference<T> a, T i) -> {
if (a.isNull() || comparator.compare(i, a.get()) > 0) {
a.set(i);
}
})
.andCombine((a1, a2) -> {
if (a1.isNull() || (!a2.isNull() && comparator.compare(a1.get(), a2.get()) < 0)) {
a1.set(a2.get());
}
})
.andExportFinish(MutableReference::get);
} | @Test
public void when_maxBy() {
validateOpWithoutDeduct(maxBy(naturalOrder()), MutableReference::get,
10L, 11L, 10L, 11L, 11L);
} |
public Optional<ContentPack> insert(final ContentPack pack) {
if (findByIdAndRevision(pack.id(), pack.revision()).isPresent()) {
LOG.debug("Content pack already found: id: {} revision: {}. Did not insert!", pack.id(), pack.revision());
return Optional.empty();
}
final WriteResult<ContentPack, ObjectId> writeResult = dbCollection.insert(pack);
return Optional.of(writeResult.getSavedObject());
} | @Test
public void insert() {
final ContentPackV1 contentPack = ContentPackV1.builder()
.id(ModelId.of("id"))
.revision(1)
.name("name")
.description("description")
.summary("summary")
.vendor("vendor")
.url(URI.create("https://www.graylog.org/"))
.entities(ImmutableSet.of())
.build();
final Optional<ContentPack> savedContentPack = contentPackPersistenceService.insert(contentPack);
assertThat(savedContentPack)
.isPresent()
.get()
.isEqualToIgnoringGivenFields(contentPack, "_id");
} |
@Override
public boolean imbalanceDetected(LoadImbalance imbalance) {
long min = imbalance.minimumLoad;
long max = imbalance.maximumLoad;
if (min == Long.MIN_VALUE || max == Long.MAX_VALUE) {
return false;
}
long lowerBound = (long) (MIN_MAX_RATIO_MIGRATION_THRESHOLD * max);
return min < lowerBound;
} | @Test
public void testImbalanceDetected_shouldReturnTrueWhenNotBalanced() {
imbalance.maximumLoad = 1000;
imbalance.minimumLoad = (long) (1000 * 0.8) - 1;
boolean imbalanceDetected = strategy.imbalanceDetected(imbalance);
assertTrue(imbalanceDetected);
} |
public static List<String> generateProgressDisplay(
double progress, List<String> unfinishedLeafTasks) {
List<String> lines = new ArrayList<>();
lines.add(HEADER);
lines.add(generateProgressBar(progress));
for (String task : unfinishedLeafTasks) {
lines.add("> " + task);
}
return lines;
} | @Test
public void testGenerateProgressDisplay_progressBar_0() {
Assert.assertEquals(
Arrays.asList("Executing tasks:", getBar("[ ]", 0.0)),
ProgressDisplayGenerator.generateProgressDisplay(0, Collections.emptyList()));
} |
@Override
public AclFileAttributeView view(
FileLookup lookup, ImmutableMap<String, FileAttributeView> inheritedViews) {
return new View(lookup, (FileOwnerAttributeView) inheritedViews.get("owner"));
} | @Test
public void testView() throws IOException {
AclFileAttributeView view =
provider.view(
fileLookup(),
ImmutableMap.<String, FileAttributeView>of(
"owner", new OwnerAttributeProvider().view(fileLookup(), NO_INHERITED_VIEWS)));
assertNotNull(view);
assertThat(view.name()).isEqualTo("acl");
assertThat(view.getAcl()).isEqualTo(defaultAcl);
view.setAcl(ImmutableList.<AclEntry>of());
view.setOwner(FOO);
assertThat(view.getAcl()).isEqualTo(ImmutableList.<AclEntry>of());
assertThat(view.getOwner()).isEqualTo(FOO);
assertThat(file.getAttribute("acl", "acl")).isEqualTo(ImmutableList.<AclEntry>of());
} |
@Override
public boolean add(ResourceConfig resourceConfig) {
if (this.contains(resourceConfig) || isBlank(resourceConfig.getName())) {
return false;
}
super.add(resourceConfig);
return true;
} | @Test
public void shouldNotAddDuplicateResources() {
ResourceConfigs expected = new ResourceConfigs();
expected.add(new ResourceConfig("jdk1.4"));
expected.add(new ResourceConfig("jdk1.5"));
ResourceConfigs actual = new ResourceConfigs();
actual.add(new ResourceConfig("jdk1.4"));
actual.add(new ResourceConfig("jdk1.5"));
actual.add(new ResourceConfig("Jdk1.5"));
assertThat(expected, is(actual));
} |
public static Instant garbageCollectionTime(
BoundedWindow window, WindowingStrategy windowingStrategy) {
return garbageCollectionTime(window, windowingStrategy.getAllowedLateness());
} | @Test
public void beforeEndOfGlobalWindowSame() {
FixedWindows windowFn = FixedWindows.of(Duration.standardMinutes(5));
Duration allowedLateness = Duration.standardMinutes(2);
WindowingStrategy<?, ?> strategy =
WindowingStrategy.globalDefault()
.withWindowFn(windowFn)
.withAllowedLateness(allowedLateness);
IntervalWindow window = windowFn.assignWindow(new Instant(10));
assertThat(
LateDataUtils.garbageCollectionTime(window, strategy),
equalTo(window.maxTimestamp().plus(allowedLateness)));
} |
public BrokerFileSystem getFileSystem(String path, Map<String, String> properties) {
WildcardURI pathUri = new WildcardURI(path);
String scheme = pathUri.getUri().getScheme();
if (Strings.isNullOrEmpty(scheme)) {
throw new BrokerException(TBrokerOperationStatusCode.INVALID_INPUT_FILE_PATH,
"invalid path. scheme is null");
}
BrokerFileSystem brokerFileSystem = null;
if (scheme.equals(HDFS_SCHEME) || scheme.equals(VIEWFS_SCHEME)) {
brokerFileSystem = getDistributedFileSystem(scheme, path, properties);
} else if (scheme.equals(S3A_SCHEME)) {
brokerFileSystem = getS3AFileSystem(path, properties);
} else if (scheme.equals(OSS_SCHEME)) {
brokerFileSystem = getOSSFileSystem(path, properties);
} else if (scheme.equals(COS_SCHEME)) {
brokerFileSystem = getCOSFileSystem(path, properties);
} else if (scheme.equals(KS3_SCHEME)) {
brokerFileSystem = getKS3FileSystem(path, properties);
} else if (scheme.equals(OBS_SCHEME)) {
brokerFileSystem = getOBSFileSystem(path, properties);
} else if (scheme.equals(TOS_SCHEME)) {
brokerFileSystem = getTOSFileSystem(path, properties);
} else {
// If all above match fails, then we will read the settings from hdfs-site.xml, core-site.xml of FE,
// and try to create a universal file system. The reason why we can do this is because hadoop/s3
// SDK is compatible with nearly all file/object storage system
brokerFileSystem = getUniversalFileSystem(path, properties);
}
return brokerFileSystem;
} | @Test
public void testGetFileSystemForHAWithNoNames() throws IOException {
Map<String, String> properties = new HashMap<String, String>();
properties.put("username", "user");
properties.put("password", "passwd");
properties.put("fs.defaultFS", "hdfs://starrocks");
properties.put("dfs.nameservices", "starrocks");
properties.put("dfs.client.failover.proxy.provider.bdos",
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
boolean haveException = false;
try {
BrokerFileSystem fs = fileSystemManager.getFileSystem(testHdfsHost + "/data/abc/logs", properties);
} catch (BrokerException be) {
haveException = true;
}
assertEquals(true, haveException);
} |
@ConstantFunction(name = "multiply", argTypes = {INT, INT}, returnType = INT)
public static ConstantOperator multiplyInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createInt(Math.multiplyExact(first.getInt(), second.getInt()));
} | @Test
public void multiplyInt() {
assertEquals(100,
ScalarOperatorFunctions.multiplyInt(O_INT_10, O_INT_10).getInt());
} |
@Override
public Long run(final Session<?> session) throws BackgroundException {
for(Path next : files) {
next.attributes().setSize(this.calculateSize(session, next));
}
return total;
} | @Test
public void testRun() throws Exception {
final List<Path> files = new ArrayList<>();
final Path a = new Path("a", EnumSet.of(Path.Type.file));
a.attributes().setSize(1L);
files.add(a);
final Path b = new Path("a", EnumSet.of(Path.Type.file));
b.attributes().setSize(3L);
files.add(b);
assertEquals(4L, new CalculateSizeWorker(files,
new DisabledProgressListener()) {
int i = 0;
@Override
public void cleanup(final Long result) {
assertEquals(4L, result, 0L);
}
@Override
protected void update(final long size) {
if(0 == i) {
assertEquals(1L, size, 0L);
}
if(1 == i) {
assertEquals(4L, size, 0L);
}
i++;
}
}.run(new NullSession(new Host(new TestProtocol()))), 0L);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.