focal_method stringlengths 13 60.9k | test_case stringlengths 25 109k |
|---|---|
protected void patchHealthCheckPorts(Service current, Service desired) {
if (current.getSpec().getHealthCheckNodePort() != null
&& desired.getSpec().getHealthCheckNodePort() == null) {
desired.getSpec().setHealthCheckNodePort(current.getSpec().getHealthCheckNodePort());
}
} | @Test
public void testHealthCheckPortPatching() {
KubernetesClient client = mock(KubernetesClient.class);
Service current = new ServiceBuilder()
.withNewMetadata()
.withNamespace(NAMESPACE)
.withName(RESOURCE_NAME)
.endMetadata()
.withNewSpec()
.withType("LoadBalancer")
.withHealthCheckNodePort(34321)
.endSpec()
.build();
Service desired = new ServiceBuilder()
.withNewMetadata()
.withNamespace(NAMESPACE)
.withName(RESOURCE_NAME)
.endMetadata()
.withNewSpec()
.withType("LoadBalancer")
.endSpec()
.build();
ServiceOperator op = new ServiceOperator(vertx, client);
op.patchHealthCheckPorts(current, desired);
assertThat(current.getSpec().getHealthCheckNodePort(), is(desired.getSpec().getHealthCheckNodePort()));
} |
@Override
public int transferTo(ByteBuf buf) throws IOException {
if (mFileSize <= mPos) {
return -1;
}
int bytesToTransfer =
(int) Math.min(buf.writableBytes(), mFileSize - mPos);
ReadTargetBuffer targetBuffer = new NettyBufTargetBuffer(buf);
int bytesRead = mPositionReader.read(mPos, targetBuffer, bytesToTransfer);
if (bytesRead > 0) {
mPos += bytesRead;
}
return bytesRead;
} | @Test
public void transferTo() throws IOException {
int testNum = Math.min(mFileLen, mMinTestNum);
for (int i = 0; i < testNum; i++) {
int offset = mRandom.nextInt(mFileLen);
mPagedFileReader.setPosition(offset);
ByteBuf byteBuf = Unpooled.buffer(mFileLen);
int bytesRead = mPagedFileReader.transferTo(byteBuf);
byte[] realBytesArray = new byte[bytesRead];
System.arraycopy(mTestData, offset, realBytesArray, 0, bytesRead);
byte[] bytesArray = new byte[byteBuf.readableBytes()];
byteBuf.readBytes(bytesArray);
assertTrue(bytesRead > 0);
assertArrayEquals(realBytesArray, bytesArray);
}
} |
public static void checkCPSubsystemConfig(CPSubsystemConfig config) {
checkTrue(config.getGroupSize() <= config.getCPMemberCount(),
"The group size parameter cannot be bigger than the number of the CP member count");
checkTrue(config.getSessionTimeToLiveSeconds() > config.getSessionHeartbeatIntervalSeconds(),
"Session TTL must be greater than session heartbeat interval!");
checkTrue(config.getMissingCPMemberAutoRemovalSeconds() == 0
|| config.getSessionTimeToLiveSeconds() <= config.getMissingCPMemberAutoRemovalSeconds(),
"Session TTL must be smaller than or equal to missing CP member auto-removal seconds!");
checkTrue(!config.isPersistenceEnabled() || config.getCPMemberCount() > 0,
"CP member count must be greater than 0 to use CP persistence feature!");
} | @Test(expected = IllegalArgumentException.class)
public void testValidationFails_whenSessionHeartbeatIntervalGreaterThanSessionTTL() {
CPSubsystemConfig config = new CPSubsystemConfig();
config.setSessionTimeToLiveSeconds(5);
config.setSessionHeartbeatIntervalSeconds(10);
checkCPSubsystemConfig(config);
} |
@Override
public V pollFromAny(long timeout, TimeUnit unit, String... queueNames) throws InterruptedException {
return commandExecutor.getInterrupted(pollFromAnyAsync(timeout, unit, queueNames));
} | @Test
public void testPollFromAny() throws InterruptedException {
final RBlockingQueue<Integer> queue1 = redisson.getBlockingQueue("queue:pollany");
Executors.newSingleThreadScheduledExecutor().schedule(() -> {
RBlockingQueue<Integer> queue2 = redisson.getBlockingQueue("queue:pollany1");
RBlockingQueue<Integer> queue3 = redisson.getBlockingQueue("queue:pollany2");
Assertions.assertDoesNotThrow(() -> {
queue3.put(2);
queue1.put(1);
queue2.put(3);
});
}, 3, TimeUnit.SECONDS);
long s = System.currentTimeMillis();
int l = queue1.pollFromAny(4, TimeUnit.SECONDS, "queue:pollany1", "queue:pollany2");
Assertions.assertEquals(2, l);
Assertions.assertTrue(System.currentTimeMillis() - s > 2000);
} |
public static URI buildExternalUri(@NotNull MultivaluedMap<String, String> httpHeaders, @NotNull URI defaultUri) {
Optional<URI> externalUri = Optional.empty();
final List<String> headers = httpHeaders.get(HttpConfiguration.OVERRIDE_HEADER);
if (headers != null && !headers.isEmpty()) {
externalUri = headers.stream()
.filter(s -> {
try {
if (Strings.isNullOrEmpty(s)) {
return false;
}
final URI uri = new URI(s);
if (!uri.isAbsolute()) {
return true;
}
switch (uri.getScheme()) {
case "http":
case "https":
return true;
}
return false;
} catch (URISyntaxException e) {
return false;
}
})
.map(URI::create)
.findFirst();
}
final URI uri = externalUri.orElse(defaultUri);
// Make sure we return an URI object with a trailing slash
if (!uri.toString().endsWith("/")) {
return URI.create(uri.toString() + "/");
}
return uri;
} | @Test
public void buildEndpointUriReturnsFirstHeaderValueIfMultipleHeadersArePresent() throws Exception {
final MultivaluedMap<String, String> httpHeaders = new MultivaluedHashMap<>();
httpHeaders.put(HttpConfiguration.OVERRIDE_HEADER, ImmutableList.of("http://header1.example.com", "http://header2.example.com"));
final URI endpointUri = URI.create("http://graylog.example.com");
assertThat(RestTools.buildExternalUri(httpHeaders, endpointUri)).isEqualTo(URI.create("http://header1.example.com/"));
} |
public static OutputStreamAndPath createEntropyAware(
FileSystem fs, Path path, WriteMode writeMode) throws IOException {
final Path processedPath = addEntropy(fs, path);
// create the stream on the original file system to let the safety net
// take its effect
final FSDataOutputStream out = fs.create(processedPath, writeMode);
return new OutputStreamAndPath(out, processedPath);
} | @Test
void testClassLoaderFixingFsWithoutSafeyNet() throws Exception {
final String entropyKey = "__ekey__";
final String entropyValue = "abc";
final File folder = TempDirUtils.newFolder(tempFolder);
final Path path = new Path(Path.fromLocalFile(folder), entropyKey + "/path/");
final Path pathWithEntropy = new Path(Path.fromLocalFile(folder), entropyValue + "/path/");
PluginFileSystemFactory pluginFsFactory =
PluginFileSystemFactory.of(new TestFileSystemFactory(entropyKey, entropyValue));
FileSystem testFs = pluginFsFactory.create(URI.create("test"));
OutputStreamAndPath streamAndPath =
EntropyInjector.createEntropyAware(testFs, path, WriteMode.NO_OVERWRITE);
assertThat(streamAndPath.path()).isEqualTo(pathWithEntropy);
} |
@Override
public String processDoc(String doc) {
for (int i=0 ; i<regexes.size(); i++) {
doc = regexes.get(i).matcher(doc).replaceAll(replacements.get(i));
}
return doc;
} | @Test
public void testOrder() {
String doc = "sample document";
ArrayList<String> replacements = new ArrayList<String>();
replacements.add("*");
replacements.add("");
ArrayList<String> regexStrings = new ArrayList<String>();
regexStrings.add("[!A-Za-z]");
regexStrings.add("[\\p{Punct}]");
RegexPreprocessor regexPreprocessor = new RegexPreprocessor(regexStrings, replacements);
assertEquals(" ", regexPreprocessor.processDoc(doc));
} |
@VisibleForTesting
String getAuthRequestParameters(
@Nullable Credential credential, Map<String, String> repositoryScopes) {
String serviceScope = getServiceScopeRequestParameters(repositoryScopes);
return isOAuth2Auth(credential)
? serviceScope
// https://github.com/GoogleContainerTools/jib/pull/1545
+ "&client_id=jib.da031fe481a93ac107a95a96462358f9"
+ "&grant_type=refresh_token&refresh_token="
// If OAuth2, credential.getPassword() is a refresh token.
+ Verify.verifyNotNull(credential).getPassword()
: serviceScope;
} | @Test
public void testAuthRequestParameters_oauth2() {
Credential credential = Credential.from("<token>", "oauth2_access_token");
Assert.assertEquals(
"service=someservice&scope=repository:someimage:scope"
+ "&client_id=jib.da031fe481a93ac107a95a96462358f9"
+ "&grant_type=refresh_token&refresh_token=oauth2_access_token",
registryAuthenticator.getAuthRequestParameters(
credential, Collections.singletonMap("someimage", "scope")));
} |
@Override
public boolean isActive() {
return isActive;
} | @Test(timeOut = 30000)
public void testCreateProducerTimeoutThenCreateSameNamedProducerShouldFail() throws Exception {
resetChannel();
setChannelConnected();
// Delay the topic creation in a deterministic way
CompletableFuture<Runnable> openTopicFuture = new CompletableFuture<>();
doAnswer(invocationOnMock -> {
openTopicFuture.complete(
() -> ((OpenLedgerCallback) invocationOnMock.getArguments()[2]).openLedgerComplete(ledgerMock,
null));
return null;
}).when(pulsarTestContext.getManagedLedgerFactory())
.asyncOpen(matches(".*success.*"), any(ManagedLedgerConfig.class),
any(OpenLedgerCallback.class), any(Supplier.class), any());
// In a create producer timeout from client side we expect to see this sequence of commands :
// 1. create producer
// 2. close producer (when the timeout is triggered, which may be before the producer was created on the broker
// 3. create producer (triggered by reconnection logic)
// Then, when another producer is created with the same name, it should fail. Because we only have one
// channel here, we just use a different producer id
// These operations need to be serialized, to allow the last create producer to finally succeed
// (There can be more create/close pairs in the sequence, depending on the client timeout
String producerName = "my-producer";
ByteBuf createProducer1 = Commands.newProducer(successTopicName, 1 /* producer id */, 1 /* request id */,
producerName, Collections.emptyMap(), false);
channel.writeInbound(createProducer1);
ByteBuf closeProducer = Commands.newCloseProducer(1 /* producer id */, 2 /* request id */);
channel.writeInbound(closeProducer);
ByteBuf createProducer2 = Commands.newProducer(successTopicName, 1 /* producer id */, 3 /* request id */,
producerName, Collections.emptyMap(), false);
channel.writeInbound(createProducer2);
// Complete the topic opening: It will make 2nd producer creation successful
openTopicFuture.get().run();
// Close succeeds
Object response = getResponse();
assertEquals(response.getClass(), CommandSuccess.class);
assertEquals(((CommandSuccess) response).getRequestId(), 2);
// 2nd producer will be successfully created as topic is open by then
response = getResponse();
assertEquals(response.getClass(), CommandProducerSuccess.class);
assertEquals(((CommandProducerSuccess) response).getRequestId(), 3);
// Send create command after getting the CommandProducerSuccess to ensure correct ordering
ByteBuf createProducer3 = Commands.newProducer(successTopicName, 2 /* producer id */, 4 /* request id */,
producerName, Collections.emptyMap(), false);
channel.writeInbound(createProducer3);
// 3nd producer will fail
response = getResponse();
assertEquals(response.getClass(), CommandError.class);
assertEquals(((CommandError) response).getRequestId(), 4);
assertTrue(channel.isActive());
channel.finish();
} |
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
} | @Test
public void outOfMemoryMC() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/out_of_memory.txt")),
CrashReportAnalyzer.Rule.OUT_OF_MEMORY);
} |
Object getCellValue(Cell cell, Schema.FieldType type) {
ByteString cellValue = cell.getValue();
int valueSize = cellValue.size();
switch (type.getTypeName()) {
case BOOLEAN:
checkArgument(valueSize == 1, message("Boolean", 1));
return cellValue.toByteArray()[0] != 0;
case BYTE:
checkArgument(valueSize == 1, message("Byte", 1));
return cellValue.toByteArray()[0];
case INT16:
checkArgument(valueSize == 2, message("Int16", 2));
return Shorts.fromByteArray(cellValue.toByteArray());
case INT32:
checkArgument(valueSize == 4, message("Int32", 4));
return Ints.fromByteArray(cellValue.toByteArray());
case INT64:
checkArgument(valueSize == 8, message("Int64", 8));
return Longs.fromByteArray(cellValue.toByteArray());
case FLOAT:
checkArgument(valueSize == 4, message("Float", 4));
return Float.intBitsToFloat(Ints.fromByteArray(cellValue.toByteArray()));
case DOUBLE:
checkArgument(valueSize == 8, message("Double", 8));
return Double.longBitsToDouble(Longs.fromByteArray(cellValue.toByteArray()));
case DATETIME:
return DateTime.parse(cellValue.toStringUtf8());
case STRING:
return cellValue.toStringUtf8();
case BYTES:
return cellValue.toByteArray();
case LOGICAL_TYPE:
String identifier = checkArgumentNotNull(type.getLogicalType()).getIdentifier();
throw new IllegalStateException("Unsupported logical type: " + identifier);
default:
throw new IllegalArgumentException(
String.format("Unsupported cell value type '%s'.", type.getTypeName()));
}
} | @Test
public void shouldFailParseDoubleTypeTooLong() {
byte[] value = new byte[10];
IllegalArgumentException exception =
assertThrows(
IllegalArgumentException.class, () -> PARSER.getCellValue(cell(value), DOUBLE));
checkMessage(exception.getMessage(), "Double has to be 8-bytes long bytearray");
} |
@SuppressWarnings("unchecked")
public Collection<String> loadInstanceLabels(final String instanceId) {
String yamlContent = repository.query(ComputeNode.getInstanceLabelsNodePath(instanceId));
return Strings.isNullOrEmpty(yamlContent) ? new LinkedList<>() : YamlEngine.unmarshal(yamlContent, Collection.class);
} | @Test
void assertLoadInstanceLabels() {
InstanceMetaData instanceMetaData = new ProxyInstanceMetaData("foo_instance_id", 3307);
final String instanceId = instanceMetaData.getId();
new ComputeNodePersistService(repository).loadInstanceLabels(instanceId);
verify(repository).query(ComputeNode.getInstanceLabelsNodePath(instanceId));
} |
public static BuildInfo getBuildInfo() {
if (Overrides.isEnabled()) {
// never use cache when override is enabled -> we need to re-parse everything
Overrides overrides = Overrides.fromProperties();
return getBuildInfoInternalVersion(overrides);
}
return BUILD_INFO_CACHE;
} | @Test
public void testOverrideEdition() {
System.setProperty(HAZELCAST_INTERNAL_OVERRIDE_ENTERPRISE, "true");
BuildInfo buildInfo = BuildInfoProvider.getBuildInfo();
assertTrue(buildInfo.isEnterprise());
System.clearProperty(HAZELCAST_INTERNAL_OVERRIDE_ENTERPRISE);
} |
static List<InetAddress> resolve(String host, HostResolver hostResolver) throws UnknownHostException {
InetAddress[] addresses = hostResolver.resolve(host);
List<InetAddress> result = filterPreferredAddresses(addresses);
if (log.isDebugEnabled())
log.debug("Resolved host {} as {}", host, result.stream().map(InetAddress::getHostAddress).collect(Collectors.joining(",")));
return result;
} | @Test
public void testResolveUnknownHostException() {
assertThrows(UnknownHostException.class,
() -> ClientUtils.resolve("some.invalid.hostname.foo.bar.local", hostResolver));
} |
@SuppressWarnings("unchecked")
public static <T> T convert(Class<T> klass, String value) {
if (Strings.isNullOrEmpty(value)) {
throw new IllegalArgumentException("Value must not be empty.");
}
if (Objects.isNull(converters.get(klass))) {
throw new IllegalArgumentException("No conversion supported for given class.");
}
return (T)converters.get(klass).apply(value);
} | @Test
public void testConvert() {
int intResult = PasswordParamConverter.convert(Integer.class, "8");
Assert.assertEquals(8, intResult);
String stringResult = PasswordParamConverter.convert(String.class, "test");
Assert.assertEquals("test", stringResult);
boolean booleanResult = PasswordParamConverter.convert(Boolean.class, "false");
Assert.assertEquals(false, booleanResult);
double doubleResult = PasswordParamConverter.convert(Double.class, "0.0012");
Assert.assertEquals(0.0012, doubleResult, 0);
} |
protected String getFileName(double lat, double lon) {
lon = 1 + (180 + lon) / LAT_DEGREE;
int lonInt = (int) lon;
lat = 1 + (60 - lat) / LAT_DEGREE;
int latInt = (int) lat;
if (Math.abs(latInt - lat) < invPrecision / LAT_DEGREE)
latInt--;
// replace String.format as it seems to be slow
// String.format("srtm_%02d_%02d", lonInt, latInt);
String str = "srtm_";
str += lonInt < 10 ? "0" : "";
str += lonInt;
str += latInt < 10 ? "_0" : "_";
str += latInt;
return str;
} | @Disabled
@Test
public void testGetEleVerticalBorder() {
// Border between the tiles srtm_39_04 and srtm_39_03
assertEquals("srtm_39_04", instance.getFileName(44.999999, 11.5));
assertEquals(5, instance.getEle(44.999999, 11.5), precision);
assertEquals("srtm_39_03", instance.getFileName(45.000001, 11.5));
assertEquals(6, instance.getEle(45.000001, 11.5), precision);
} |
@UdafFactory(description = "Compute sample standard deviation of column with type Long.",
aggregateSchema = "STRUCT<SUM bigint, COUNT bigint, M2 double>")
public static TableUdaf<Long, Struct, Double> stdDevLong() {
return getStdDevImplementation(
0L,
STRUCT_LONG,
(agg, newValue) -> newValue + agg.getInt64(SUM),
(agg, newValue) ->
Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt64(SUM) + newValue)),
(agg1, agg2) ->
Double.valueOf(
agg1.getInt64(SUM) / agg1.getInt64(COUNT)
- agg2.getInt64(SUM) / agg2.getInt64(COUNT)),
(agg1, agg2) -> agg1.getInt64(SUM) + agg2.getInt64(SUM),
(agg, valueToRemove) -> agg.getInt64(SUM) - valueToRemove);
} | @Test
public void shouldUndoSummedLongs() {
final TableUdaf<Long, Struct, Double> udaf = stdDevLong();
Struct agg = udaf.initialize();
final Long[] values = new Long[] {1L, 2L, 3L, 4L, 5L};
for (final Long thisValue : values) {
agg = udaf.aggregate(thisValue, agg);
}
assertThat(agg.getInt64(COUNT), equalTo(5L));
assertThat(agg.getInt64(SUM), equalTo(15L));
assertThat(agg.getFloat64(M2), equalTo(10.0));
double standardDev = udaf.map(agg);
assertThat(standardDev, equalTo(2.5));
agg = udaf.undo(2L, agg);
assertThat(agg.getInt64(COUNT), equalTo(4L));
assertThat(agg.getInt64(SUM), equalTo(13L));
assertThat(agg.getFloat64(M2), equalTo(8.75));
standardDev = udaf.map(agg);
assertThat(standardDev, equalTo(2.9166666666666665));
} |
@Override
public boolean match(Message msg, StreamRule rule) {
final boolean inverted = rule.getInverted();
final Object field = msg.getField(rule.getField());
if (field != null) {
final String value = field.toString();
return inverted ^ value.contains(rule.getValue());
} else {
return inverted;
}
} | @Test
public void testSuccessfulMatch() {
msg.addField("something", "foobar");
StreamRuleMatcher matcher = getMatcher(rule);
assertTrue(matcher.match(msg, rule));
} |
protected void addModel(EpoxyModel<?> modelToAdd) {
int initialSize = models.size();
pauseModelListNotifications();
models.add(modelToAdd);
resumeModelListNotifications();
notifyItemRangeInserted(initialSize, 1);
} | @Test
public void testAllowSetSameModelIdAfterNotify() {
TestModel testModel = new TestModel();
testModel.id(100);
testAdapter.addModel(testModel);
testModel.id(100);
} |
public static boolean parse(final String str, ResTable_config out) {
return parse(str, out, true);
} | @Test
public void parse_uiModeType_appliance() {
ResTable_config config = new ResTable_config();
ConfigDescription.parse("appliance", config);
assertThat(config.uiMode).isEqualTo(UI_MODE_TYPE_APPLIANCE);
} |
@Override
public Snapshot getSnapshot() {
return reservoir.getSnapshot();
} | @Test
public void returnsTheSnapshotFromTheReservoir() throws Exception {
final Snapshot snapshot = mock(Snapshot.class);
when(reservoir.getSnapshot()).thenReturn(snapshot);
assertThat(histogram.getSnapshot())
.isEqualTo(snapshot);
} |
public static String getOffsetId(ByteBuffer message) {
return UtilAll.bytes2string(getOffsetIdBuffer(message).array());
} | @Test
public void getOffsetIdTest() {
ByteBuffer buffer = buildMockedMessageBuffer();
InetSocketAddress inetSocketAddress = new InetSocketAddress("127.0.0.1", 65535);
ByteBuffer address = ByteBuffer.allocate(Long.BYTES);
address.put(inetSocketAddress.getAddress().getAddress(), 0, 4);
address.putInt(inetSocketAddress.getPort());
address.flip();
for (int i = 0; i < address.remaining(); i++) {
buffer.put(MessageFormatUtil.STORE_HOST_POSITION + i, address.get(i));
}
String excepted = MessageDecoder.createMessageId(
ByteBuffer.allocate(MessageFormatUtil.MSG_ID_LENGTH), address, 7);
String offsetId = MessageFormatUtil.getOffsetId(buffer);
Assert.assertEquals(excepted, offsetId);
} |
public Map<String, Object> sanitizeConnectorConfig(@Nullable Map<String, Object> original) {
var result = new HashMap<String, Object>(); //null-values supporting map!
if (original != null) {
original.forEach((k, v) -> result.put(k, sanitize(k, v)));
}
return result;
} | @Test
void sanitizeConnectorConfigDoNotFailOnNullableValues() {
Map<String, Object> originalConfig = new HashMap<>();
originalConfig.put("password", "secret");
originalConfig.put("asIs", "normal");
originalConfig.put("nullVal", null);
var sanitizedConfig = new KafkaConfigSanitizer(true, List.of())
.sanitizeConnectorConfig(originalConfig);
assertThat(sanitizedConfig)
.hasSize(3)
.containsEntry("password", "******")
.containsEntry("asIs", "normal")
.containsEntry("nullVal", null);
} |
public static ListenableFuture<CustomerId> findEntityIdAsync(TbContext ctx, EntityId originator) {
switch (originator.getEntityType()) {
case CUSTOMER:
return Futures.immediateFuture((CustomerId) originator);
case USER:
return toCustomerIdAsync(ctx, ctx.getUserService().findUserByIdAsync(ctx.getTenantId(), (UserId) originator));
case ASSET:
return toCustomerIdAsync(ctx, ctx.getAssetService().findAssetByIdAsync(ctx.getTenantId(), (AssetId) originator));
case DEVICE:
return toCustomerIdAsync(ctx, Futures.immediateFuture(ctx.getDeviceService().findDeviceById(ctx.getTenantId(), (DeviceId) originator)));
default:
return Futures.immediateFailedFuture(new TbNodeException("Unexpected originator EntityType: " + originator.getEntityType()));
}
} | @Test
public void givenAssetEntityType_whenFindEntityIdAsync_thenOK() throws ExecutionException, InterruptedException {
// GIVEN
var asset = new Asset(new AssetId(UUID.randomUUID()));
var expectedCustomerId = new CustomerId(UUID.randomUUID());
asset.setCustomerId(expectedCustomerId);
when(ctxMock.getAssetService()).thenReturn(assetServiceMock);
doReturn(Futures.immediateFuture(asset)).when(assetServiceMock).findAssetByIdAsync(any(), any());
when(ctxMock.getDbCallbackExecutor()).thenReturn(DB_EXECUTOR);
// WHEN
var actualCustomerId = EntitiesCustomerIdAsyncLoader.findEntityIdAsync(ctxMock, asset.getId()).get();
// THEN
assertEquals(expectedCustomerId, actualCustomerId);
} |
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain filterChain) throws IOException {
boolean isAuthenticated = authenticate(request, response);
response.setContentType(MediaTypes.JSON);
try (JsonWriter jsonWriter = JsonWriter.of(response.getWriter())) {
jsonWriter.beginObject();
jsonWriter.prop("valid", isAuthenticated);
jsonWriter.endObject();
}
} | @Test
public void doFilter_whenNoForceAuthentication_shoudlReturnTrue() throws Exception {
settings.setProperty("sonar.forceAuthentication", "false");
underTest.doFilter(request, response, chain);
verifyResponseIsTrue();
} |
@Override
public boolean isInitializedAndRunning() {
synchronized (lock) {
return jobMasterServiceFuture.isDone()
&& !jobMasterServiceFuture.isCompletedExceptionally()
&& isRunning;
}
} | @Test
void testIsNotInitialized() {
DefaultJobMasterServiceProcess serviceProcess =
createTestInstance(new CompletableFuture<>());
assertThat(serviceProcess.isInitializedAndRunning()).isFalse();
} |
void recordBytesFetched(int bytes) {
bytesFetched.record(bytes);
} | @Test
public void testBytesFetched() {
shareFetchMetricsManager.recordBytesFetched(2);
time.sleep(metrics.config().timeWindowMs() + 1);
shareFetchMetricsManager.recordBytesFetched(10);
assertEquals(10, (double) getMetric(shareFetchMetricsRegistry.fetchSizeMax).metricValue());
assertEquals(6, (double) getMetric(shareFetchMetricsRegistry.fetchSizeAvg).metricValue(), EPSILON);
} |
@Override
public String name() {
return "consul";
} | @Test
public void name() {
assertEquals("consul", provider.name());
} |
public static void validateNameFormat(String name,
org.apache.hadoop.conf.Configuration conf) {
if (StringUtils.isEmpty(name)) {
throw new IllegalArgumentException("Name can not be empty!");
}
// validate component name
if (name.contains("_")) {
throw new IllegalArgumentException(
"Invalid format: " + name
+ ", can not use '_', as DNS hostname does not allow '_'. Use '-' Instead. ");
}
boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED,
RegistryConstants.DEFAULT_DNS_ENABLED);
if (dnsEnabled && name.length() > RegistryConstants.MAX_FQDN_LABEL_LENGTH) {
throw new IllegalArgumentException(String
.format("Invalid format %s, must be no more than 63 characters ",
name));
}
namePattern.validate(name);
} | @Test
public void testValidateCompName() {
String[] invalidNames = {
"EXAMPLE", // UPPER case not allowed
"example_app" // underscore not allowed.
};
for (String name : invalidNames) {
try {
ServiceApiUtil.validateNameFormat(name, new Configuration());
Assert.fail();
} catch (IllegalArgumentException ex) {
ex.printStackTrace();
}
}
} |
public static String[][] assignExecutors(
List<? extends ScanTaskGroup<?>> taskGroups, List<String> executorLocations) {
Map<Integer, JavaHash<StructLike>> partitionHashes = Maps.newHashMap();
String[][] locations = new String[taskGroups.size()][];
for (int index = 0; index < taskGroups.size(); index++) {
locations[index] = assign(taskGroups.get(index), executorLocations, partitionHashes);
}
return locations;
} | @Test
public void testFileScanTaskWithUnpartitionedDeletes() {
List<ScanTask> tasks1 =
ImmutableList.of(
new MockFileScanTask(
mockDataFile(Row.of()),
mockDeleteFiles(2, Row.of()),
SCHEMA,
PartitionSpec.unpartitioned()),
new MockFileScanTask(
mockDataFile(Row.of()),
mockDeleteFiles(2, Row.of()),
SCHEMA,
PartitionSpec.unpartitioned()),
new MockFileScanTask(
mockDataFile(Row.of()),
mockDeleteFiles(2, Row.of()),
SCHEMA,
PartitionSpec.unpartitioned()));
ScanTaskGroup<ScanTask> taskGroup1 = new BaseScanTaskGroup<>(tasks1);
List<ScanTask> tasks2 =
ImmutableList.of(
new MockFileScanTask(
mockDataFile(null),
mockDeleteFiles(2, null),
SCHEMA,
PartitionSpec.unpartitioned()),
new MockFileScanTask(
mockDataFile(null),
mockDeleteFiles(2, null),
SCHEMA,
PartitionSpec.unpartitioned()),
new MockFileScanTask(
mockDataFile(null),
mockDeleteFiles(2, null),
SCHEMA,
PartitionSpec.unpartitioned()));
ScanTaskGroup<ScanTask> taskGroup2 = new BaseScanTaskGroup<>(tasks2);
List<ScanTaskGroup<ScanTask>> taskGroups = ImmutableList.of(taskGroup1, taskGroup2);
String[][] locations = SparkPlanningUtil.assignExecutors(taskGroups, EXECUTOR_LOCATIONS);
// should not assign executors if the table is unpartitioned
assertThat(locations.length).isEqualTo(2);
assertThat(locations[0]).isEmpty();
assertThat(locations[1]).isEmpty();
} |
public void startAsync() {
try {
udfLoader.load();
ProcessingLogServerUtils.maybeCreateProcessingLogTopic(
serviceContext.getTopicClient(),
processingLogConfig,
ksqlConfig);
if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) {
log.warn("processing log auto-create is enabled, but this is not supported "
+ "for headless mode.");
}
rocksDBConfigSetterHandler.accept(ksqlConfig);
processesQueryFile(readQueriesFile(queriesFile));
showWelcomeMessage();
final Properties properties = new Properties();
ksqlConfig.originals().forEach((key, value) -> {
if (nonNull(value)) {
properties.put(key, value.toString());
}
});
versionChecker.start(KsqlModuleType.SERVER, properties);
} catch (final Exception e) {
log.error("Failed to start KSQL Server with query file: " + queriesFile, e);
throw e;
}
} | @Test
public void shouldRunCsasStatements() {
// Given:
final PreparedStatement<?> csas = PreparedStatement.of("CSAS1",
new CreateStreamAsSelect(SOME_NAME, query, false, false, CreateSourceAsProperties.none()));
final ConfiguredStatement<?> configured = ConfiguredStatement
.of(csas, SessionConfig.of(ksqlConfig, emptyMap()));
givenQueryFileParsesTo(csas);
when(sandBox.execute(sandBoxServiceContext, configured))
.thenReturn(ExecuteResult.of(persistentQuery));
// When:
standaloneExecutor.startAsync();
// Then:
verify(ksqlEngine).execute(serviceContext, configured);
} |
public static boolean isSystem(String topic, String group) {
return TopicValidator.isSystemTopic(topic) || isSystemGroup(group);
} | @Test
public void testIsSystem_EmptyTopicAndGroup_ReturnsFalse() {
String topic = "";
String group = "";
boolean result = BrokerMetricsManager.isSystem(topic, group);
assertThat(result).isFalse();
} |
@Override
public void destroy() {
if (this.producer != null) {
try {
this.producer.close();
} catch (Exception e) {
log.error("Failed to close producer during destroy()", e);
}
}
} | @Test
public void givenProducerIsNull_whenDestroy_thenDoNothing() {
node.destroy();
then(producerMock).shouldHaveNoInteractions();
} |
@Override
public void sessionCreated(HttpSessionEvent event) {
if (!instanceEnabled) {
return;
}
// pour être notifié des passivations et activations, on enregistre un HttpSessionActivationListener (this)
final HttpSession session = event.getSession();
// Since tomcat 6.0.21, because of https://issues.apache.org/bugzilla/show_bug.cgi?id=45255
// when tomcat authentication is used, sessionCreated is called twice for 1 session
// and each time with different ids, then sessionDestroyed is called once.
// So we do not count the 2nd sessionCreated event and we remove the id of the first event.
// And (issue #795), in Tomcat's cluster after one instance restart
// sessions are synced with sessionDidActivate+sessionCreated
// so do not increment count for sessionCreated when session.getAttribute(SESSION_ACTIVATION_KEY) != null
// (but not == this because of deserialization)
if (session.getAttribute(SESSION_ACTIVATION_KEY) != null) {
// si la map des sessions selon leurs id contient une session dont la clé
// n'est plus égale à son id courant, alors on l'enlève de la map
// (et elle sera remise dans la map avec son nouvel id ci-dessous)
removeSessionsWithChangedId();
} else {
session.setAttribute(SESSION_ACTIVATION_KEY, this);
// pour getSessionCount
SESSION_COUNT.incrementAndGet();
}
// pour invalidateAllSession
addSession(session);
} | @Test
public void testSessionCreated() {
final HttpSessionEvent sessionEvent = createSessionEvent();
sessionListener.sessionCreated(sessionEvent);
if (SessionListener.getSessionCount() != 1) {
fail("sessionCreated");
}
if (SessionListener.getAllSessionsInformations().isEmpty()) {
fail("sessionCreated");
}
sessionListener.sessionCreated(sessionEvent);
if (SessionListener.getSessionCount() != 1) {
fail("sessionCreated");
}
} |
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
} | @Test
void shouldBeAbleToAddAndRemoveSingleSubscription()
{
final long id = driverProxy.addSubscription(CHANNEL_4000, STREAM_ID_1);
driverProxy.removeSubscription(id);
driverConductor.doWork();
driverConductor.doWork();
verify(receiverProxy).registerReceiveChannelEndpoint(any());
verify(receiverProxy).closeReceiveChannelEndpoint(any());
} |
@PostMapping(
path = "/admin/create-namespace",
consumes = MediaType.APPLICATION_JSON_VALUE,
produces = MediaType.APPLICATION_JSON_VALUE
)
public ResponseEntity<ResultJson> createNamespace(@RequestBody NamespaceJson namespace) {
try {
admins.checkAdminUser();
var json = admins.createNamespace(namespace);
var serverUrl = UrlUtil.getBaseUrl();
var url = UrlUtil.createApiUrl(serverUrl, "admin", "namespace", namespace.name);
return ResponseEntity.status(HttpStatus.CREATED)
.location(URI.create(url))
.body(json);
} catch (ErrorResultException exc) {
return exc.toResponseEntity();
}
} | @Test
public void testCreateNamespace() throws Exception {
mockAdminUser();
mockMvc.perform(post("/admin/create-namespace")
.contentType(MediaType.APPLICATION_JSON)
.content(namespaceJson(n -> { n.name = "foobar"; }))
.with(user("admin_user").authorities(new SimpleGrantedAuthority(("ROLE_ADMIN"))))
.with(csrf().asHeader()))
.andExpect(status().isCreated())
.andExpect(redirectedUrl("http://localhost/admin/namespace/foobar"))
.andExpect(content().json(successJson("Created namespace foobar")));
} |
@Override
public boolean shouldWait() {
RingbufferContainer ringbuffer = getRingBufferContainerOrNull();
if (resultSet == null) {
resultSet = new ReadResultSetImpl<>(minSize, maxSize, getNodeEngine().getSerializationService(), filter);
sequence = startSequence;
}
if (ringbuffer == null) {
return minSize > 0;
}
sequence = ringbuffer.clampReadSequenceToBounds(sequence);
if (minSize == 0) {
if (sequence < ringbuffer.tailSequence() + 1) {
readMany(ringbuffer);
}
return false;
}
if (resultSet.isMinSizeReached()) {
// enough items have been read, we are done.
return false;
}
if (sequence == ringbuffer.tailSequence() + 1) {
// the sequence is not readable
return true;
}
readMany(ringbuffer);
return !resultSet.isMinSizeReached();
} | @Test
public void whenMinimumNumberOfItemsNotAvailable() {
long startSequence = ringbuffer.tailSequence() + 1;
ReadManyOperation op = getReadManyOperation(startSequence, 3, 3, null);
assertTrue(op.shouldWait());
assertEquals(startSequence, op.sequence);
assertTrue(getReadResultSet(op).isEmpty());
ringbuffer.add("item1");
assertTrue(op.shouldWait());
ReadResultSetImpl response = getReadResultSet(op);
assertEquals(startSequence + 1, op.sequence);
assertEquals(asList("item1"), response);
assertEquals(1, response.getNextSequenceToReadFrom());
ringbuffer.add("item2");
assertTrue(op.shouldWait());
assertEquals(startSequence + 2, op.sequence);
assertEquals(asList("item1", "item2"), response);
assertEquals(2, response.getNextSequenceToReadFrom());
ringbuffer.add("item3");
assertFalse(op.shouldWait());
assertEquals(startSequence + 3, op.sequence);
assertEquals(asList("item1", "item2", "item3"), response);
assertEquals(3, response.getNextSequenceToReadFrom());
} |
@Override
public void start() {
MasterServletFilter masterServletFilter = MasterServletFilter.getInstance();
if (masterServletFilter != null) {
// Probably a database upgrade. MasterSlaveFilter was instantiated by the servlet container
// while spring was not completely up.
// See https://jira.sonarsource.com/browse/SONAR-3612
masterServletFilter.initHttpFilters(Arrays.asList(httpFilters));
masterServletFilter.initServletFilters(Arrays.asList(servletFilters));
}
} | @Test
public void filters_should_be_optional() {
MasterServletFilter.setInstance(mock(MasterServletFilter.class));
new RegisterServletFilters().start();
// do not fail
verify(MasterServletFilter.getInstance()).initHttpFilters(anyList());
verify(MasterServletFilter.getInstance()).initServletFilters(anyList());
} |
@Field
public void setIfXFAExtractOnlyXFA(boolean ifXFAExtractOnlyXFA) {
defaultConfig.setIfXFAExtractOnlyXFA(ifXFAExtractOnlyXFA);
} | @Test
public void testXFAOnly() throws Exception {
ParseContext context = new ParseContext();
PDFParserConfig config = new PDFParserConfig();
config.setIfXFAExtractOnlyXFA(true);
context.set(PDFParserConfig.class, config);
String xml = getXML("testPDF_XFA_govdocs1_258578.pdf", context).xml;
assertContains("<body><div class=\"xfa_content\">", xml);
assertContains("<li fieldName=\"Room_1\">Room [1]: my_room1</li>", xml);
assertNotContained("Mount Rushmore National Memorial", xml);
} |
public Optional<ConnectorMetadata> getOptionalMetadata(String catalogName) {
Optional<String> queryId = getOptionalQueryID();
return getOptionalMetadata(queryId, catalogName);
} | @Test
public void testGetOptionalMetadata() {
MetadataMgr metadataMgr = GlobalStateMgr.getCurrentState().getMetadataMgr();
Optional<ConnectorMetadata> metadata = metadataMgr.getOptionalMetadata("hive_catalog");
Assert.assertTrue(metadata.isPresent());
metadata = metadataMgr.getOptionalMetadata("hive_catalog_not_exist");
Assert.assertFalse(metadata.isPresent());
} |
@Override
public synchronized ProxyInfo<T> getProxy() {
NNProxyInfo<T> current = proxies.get(currentProxyIndex);
return createProxyIfNeeded(current);
} | @Test
public void testRandomGetProxy() throws Exception {
final AtomicInteger nn1Count = new AtomicInteger(0);
final AtomicInteger nn2Count = new AtomicInteger(0);
final AtomicInteger nn3Count = new AtomicInteger(0);
Map<InetSocketAddress, ClientProtocol> proxyMap = new HashMap<>();
final ClientProtocol nn1Mock = mock(ClientProtocol.class);
when(nn1Mock.getStats()).thenAnswer(createAnswer(nn1Count, 1));
proxyMap.put(ns2nn1, nn1Mock);
final ClientProtocol nn2Mock = mock(ClientProtocol.class);
when(nn2Mock.getStats()).thenAnswer(createAnswer(nn2Count, 2));
proxyMap.put(ns2nn2, nn2Mock);
final ClientProtocol nn3Mock = mock(ClientProtocol.class);
when(nn3Mock.getStats()).thenAnswer(createAnswer(nn3Count, 3));
proxyMap.put(ns2nn3, nn3Mock);
for (int i = 0; i < NUM_ITERATIONS; i++) {
ConfiguredFailoverProxyProvider<ClientProtocol> provider =
new ConfiguredFailoverProxyProvider<>(conf, ns2Uri,
ClientProtocol.class, createFactory(proxyMap));
ClientProtocol proxy = provider.getProxy().proxy;
proxy.getStats();
}
assertTrue(nn1Count.get() < NUM_ITERATIONS && nn1Count.get() > 0);
assertTrue(nn2Count.get() < NUM_ITERATIONS && nn2Count.get() > 0);
assertTrue(nn3Count.get() < NUM_ITERATIONS && nn3Count.get() > 0);
assertEquals(NUM_ITERATIONS,
nn1Count.get() + nn2Count.get() + nn3Count.get());
} |
@Override
public List<DeptDO> getChildDeptList(Long id) {
List<DeptDO> children = new LinkedList<>();
// 遍历每一层
Collection<Long> parentIds = Collections.singleton(id);
for (int i = 0; i < Short.MAX_VALUE; i++) { // 使用 Short.MAX_VALUE 避免 bug 场景下,存在死循环
// 查询当前层,所有的子部门
List<DeptDO> depts = deptMapper.selectListByParentId(parentIds);
// 1. 如果没有子部门,则结束遍历
if (CollUtil.isEmpty(depts)) {
break;
}
// 2. 如果有子部门,继续遍历
children.addAll(depts);
parentIds = convertSet(depts, DeptDO::getId);
}
return children;
} | @Test
public void testGetChildDeptList() {
// mock 数据(1 级别子节点)
DeptDO dept1 = randomPojo(DeptDO.class, o -> o.setName("1"));
deptMapper.insert(dept1);
DeptDO dept2 = randomPojo(DeptDO.class, o -> o.setName("2"));
deptMapper.insert(dept2);
// mock 数据(2 级子节点)
DeptDO dept1a = randomPojo(DeptDO.class, o -> o.setName("1-a").setParentId(dept1.getId()));
deptMapper.insert(dept1a);
DeptDO dept2a = randomPojo(DeptDO.class, o -> o.setName("2-a").setParentId(dept2.getId()));
deptMapper.insert(dept2a);
// 准备参数
Long id = dept1.getParentId();
// 调用
List<DeptDO> result = deptService.getChildDeptList(id);
// 断言
assertEquals(result.size(), 2);
assertPojoEquals(dept1, result.get(0));
assertPojoEquals(dept1a, result.get(1));
} |
public void assertNonNegative() {
if(value < 0)
throw new IllegalStateException("non negative value required");
} | @Test()
public void zeroQuantity_passesOnCheckForNonNegative() throws Exception {
Quantity<Metrics> zero = new Quantity<Metrics>(0, Metrics.cm);
zero.assertNonNegative();
} |
@Override
public EueWriteFeature.Chunk upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
final List<Future<EueWriteFeature.Chunk>> parts = new ArrayList<>();
long offset = 0;
long remaining = status.getLength();
final String resourceId;
final String uploadUri;
if(status.isExists()) {
resourceId = fileid.getFileId(file);
uploadUri = EueUploadHelper.updateResource(session, resourceId, status, UploadType.CHUNKED).getUploadURI();
}
else {
final ResourceCreationResponseEntry uploadResourceCreationResponseEntry = EueUploadHelper.
createResource(session, fileid.getFileId(file.getParent()), file.getName(),
status, UploadType.CHUNKED);
resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(uploadResourceCreationResponseEntry.getHeaders().getLocation());
uploadUri = uploadResourceCreationResponseEntry.getEntity().getUploadURI();
}
for(int partNumber = 1; remaining > 0; partNumber++) {
final long length = Math.min(chunksize, remaining);
parts.add(this.submit(pool, file, local, throttle, listener, status,
uploadUri, resourceId, partNumber, offset, length, callback));
remaining -= length;
offset += length;
}
// Checksums for uploaded segments
final List<EueWriteFeature.Chunk> chunks = Interruptibles.awaitAll(parts);
// Full size of file
final long size = status.getOffset() + status.getLength();
final MessageDigest messageDigest = MessageDigest.getInstance("SHA-256");
chunks.stream().sorted(Comparator.comparing(EueWriteFeature.Chunk::getPartnumber)).forEach(chunk -> {
try {
messageDigest.update(Hex.decodeHex(chunk.getChecksum().hash));
}
catch(DecoderException e) {
log.error(String.format("Failure %s decoding hash %s", e, chunk.getChecksum()));
}
messageDigest.update(ChunkListSHA256ChecksumCompute.intToBytes(chunk.getLength().intValue()));
});
final String cdash64 = Base64.encodeBase64URLSafeString(messageDigest.digest());
final EueUploadHelper.UploadResponse completedUploadResponse = new EueMultipartUploadCompleter(session)
.getCompletedUploadResponse(uploadUri, size, cdash64);
if(!StringUtils.equals(cdash64, completedUploadResponse.getCdash64())) {
if(file.getType().contains(Path.Type.encrypted)) {
log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file));
}
else {
throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()),
MessageFormat.format("Mismatch between {0} hash {1} of uploaded data and ETag {2} returned by the server",
HashAlgorithm.cdash64, cdash64, completedUploadResponse.getCdash64()));
}
}
final EueWriteFeature.Chunk object = new EueWriteFeature.Chunk(resourceId, size, cdash64);
// Mark parent status as complete
status.withResponse(new EueAttributesAdapter().toAttributes(object)).setComplete();
return object;
}
catch(NoSuchAlgorithmException e) {
throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e);
}
finally {
// Cancel future tasks
pool.shutdown(false);
}
} | @Test
public void testUploadLargeFileInChunks() throws Exception {
final EueResourceIdProvider fileid = new EueResourceIdProvider(session);
final EueLargeUploadService s = new EueLargeUploadService(session, fileid, new EueWriteFeature(session, fileid));
final Path container = new EueDirectoryFeature(session, fileid).mkdir(new Path(
new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
final String name = new AlphanumericRandomStringService().random();
final Path file = new Path(container, name, EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), name);
final byte[] content = RandomUtils.nextBytes(5242881);
IOUtils.write(content, local.getOutputStream(false));
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
final BytecountStreamListener count = new BytecountStreamListener();
final EueWriteFeature.Chunk uploadResponse = s.upload(file, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledConnectionCallback());
assertNotNull(uploadResponse.getCdash64());
assertEquals(content.length, count.getSent());
assertEquals(PathAttributes.EMPTY, status.getResponse());
assertTrue(status.isComplete());
assertTrue(new EueFindFeature(session, fileid).find(file));
assertEquals(content.length, new EueAttributesFinderFeature(session, fileid).find(file).getSize());
final byte[] compare = new byte[content.length];
IOUtils.readFully(new EueReadFeature(session, fileid).read(file, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()), compare);
assertArrayEquals(content, compare);
new EueDeleteFeature(session, fileid).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
} |
public void send(ReqT request, long timeoutMs) throws IOException {
if (mClosed || mCanceled || mClosedFromRemote) {
throw new CancelledException(formatErrorMessage(
"Failed to send request %s: stream is already closed or cancelled. clientClosed: %s "
+ "clientCancelled: %s serverClosed: %s",
LogUtils.truncateMessageLineLength(request), mClosed, mCanceled, mClosedFromRemote));
}
try (LockResource ignored = new LockResource(mLock)) {
long startMs = System.currentTimeMillis();
while (true) {
checkError();
if (mRequestObserver.isReady()) {
break;
}
long waitedForMs = System.currentTimeMillis() - startMs;
if (waitedForMs >= timeoutMs) {
throw new DeadlineExceededException(formatErrorMessage(
"Timeout sending request %s after %dms. clientClosed: %s clientCancelled: %s "
+ "serverClosed: %s",
LogUtils.truncateMessageLineLength(request), timeoutMs, mClosed, mCanceled,
mClosedFromRemote));
}
try {
// Wait for a minute max
long awaitMs = Math.min(timeoutMs - waitedForMs, Constants.MINUTE_MS);
if (!mReadyOrFailed.await(awaitMs, TimeUnit.MILLISECONDS)) {
// Log a warning before looping again
LOG.warn(
"Stream is not ready for client to send request, will wait again. totalWaitMs: {} "
+ "clientClosed: {} clientCancelled: {} serverClosed: {} description: {}",
System.currentTimeMillis() - startMs, mClosed, mCanceled, mClosedFromRemote,
mDescription);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CancelledException(
formatErrorMessage("Failed to send request %s: interrupted while waiting for server.",
LogUtils.truncateMessageLineLength(request)), e);
}
}
}
mRequestObserver.onNext(request);
} | @Test
public void send() throws Exception {
WriteRequest request = WriteRequest.newBuilder().build();
mStream.send(request, TIMEOUT);
verify(mRequestObserver).onNext(request);
} |
@Override
public boolean namespaceExists(Namespace namespace) {
return JdbcUtil.namespaceExists(catalogName, connections, namespace);
} | @Test
public void testCreateTableInNonExistingNamespace() {
try (JdbcCatalog jdbcCatalog = initCatalog("non_strict_jdbc_catalog", ImmutableMap.of())) {
Namespace namespace = Namespace.of("test\\D_b%", "ns1", "ns2");
TableIdentifier identifier = TableIdentifier.of(namespace, "someTable");
assertThat(jdbcCatalog.namespaceExists(namespace)).isFalse();
assertThat(jdbcCatalog.tableExists(identifier)).isFalse();
// default=non-strict mode allows creating a table in a non-existing namespace
jdbcCatalog.createTable(identifier, SCHEMA, PARTITION_SPEC);
assertThat(jdbcCatalog.loadTable(identifier)).isNotNull();
}
} |
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception {
CruiseConfig configForEdit;
CruiseConfig config;
LOGGER.debug("[Config Save] Loading config holder");
configForEdit = deserializeConfig(content);
if (callback != null) callback.call(configForEdit);
config = preprocessAndValidate(configForEdit);
return new GoConfigHolder(config, configForEdit);
} | @Test
void shouldLoadRulesConfigWhereActionAndTypeHasWildcardForSchemaVersion124() throws Exception {
String content = config(
"""
<secretConfigs>
<secretConfig id="example" pluginId="vault_based_plugin">
<description>All secrets for env1</description>
<configuration>
<property>
<key>path</key>
<value>secret/dev/teamA</value>
</property>
</configuration>
<rules>
<deny action="*" type="environment">up42</deny> \s
<deny action="refer" type="*">up43</deny> \s
</rules>
</secretConfig>
</secretConfigs>""", CONFIG_SCHEMA_VERSION);
CruiseConfig config = xmlLoader.loadConfigHolder(content).config;
SecretConfig secretConfig = config.getSecretConfigs().find("example");
assertThat(secretConfig.getRules().first().action()).isEqualTo("*");
assertThat(secretConfig.getRules().get(1).type()).isEqualTo("*");
} |
public void addLike() {
this.likesCount++;
} | @Test
void review_공감수를_증가할_수_있다() {
// given
Review review = Review.builder().likesCount(0).build();
// when
review.addLike();
// then
assertEquals(review.getLikesCount(), 1);
} |
public static String unresolvedHostAndPortToNormalizedString(String host, int port) {
Preconditions.checkArgument(isValidHostPort(port), "Port is not within the valid range,");
return unresolvedHostToNormalizedString(host) + ":" + port;
} | @Test
void testFormatAddress() {
assertThat(NetUtils.unresolvedHostAndPortToNormalizedString(null, 42))
.isEqualTo("127.0.0.1" + ":" + 42);
// IPv4
assertThat(NetUtils.unresolvedHostAndPortToNormalizedString("1.2.3.4", 42))
.isEqualTo("1.2.3.4" + ":" + 42);
// IPv6
assertThat(
NetUtils.unresolvedHostAndPortToNormalizedString(
"2001:0db8:85a3:0000:0000:8a2e:0370:7334", 42))
.isEqualTo("[2001:db8:85a3::8a2e:370:7334]:" + 42);
// [IPv6]
assertThat(
NetUtils.unresolvedHostAndPortToNormalizedString(
"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]", 42))
.isEqualTo("[2001:db8:85a3::8a2e:370:7334]:" + 42);
// Hostnames
assertThat(NetUtils.unresolvedHostAndPortToNormalizedString("somerandomhostname", 99))
.isEqualTo("somerandomhostname" + ":" + 99);
// Whitespace
assertThat(NetUtils.unresolvedHostAndPortToNormalizedString(" somerandomhostname ", 99))
.isEqualTo("somerandomhostname" + ":" + 99);
// Illegal hostnames
assertThatThrownBy(
() -> NetUtils.unresolvedHostAndPortToNormalizedString("illegalhost.", 42))
.isInstanceOf(IllegalConfigurationException.class);
assertThatThrownBy(
() ->
NetUtils.unresolvedHostAndPortToNormalizedString(
"illegalhost:fasf", 42))
.isInstanceOf(IllegalConfigurationException.class);
// Illegal port ranges
assertThatThrownBy(() -> NetUtils.unresolvedHostAndPortToNormalizedString("1.2.3.4", -1))
.isInstanceOf(Exception.class);
// lower case conversion of hostnames
assertThat(NetUtils.unresolvedHostAndPortToNormalizedString("CamelCaseHostName", 99))
.isEqualTo("camelcasehostname" + ":" + 99);
} |
static MultiLineString buildMultiLineString(TDWay outerWay, List<TDWay> innerWays) {
List<LineString> lineStrings = new ArrayList<>();
// outer way geometry
lineStrings.add(buildLineString(outerWay));
// inner strings
if (innerWays != null) {
for (TDWay innerWay : innerWays) {
LineString innerRing = buildLineString(innerWay);
lineStrings.add(innerRing);
}
}
return GEOMETRY_FACTORY.createMultiLineString(lineStrings.toArray(new LineString[lineStrings.size()]));
} | @Test
public void testBuildValidMultiLineString() {
String testfile = "valid-multilinestring.wkt";
List<TDWay> ways = MockingUtils.wktMultiLineStringToWays(testfile);
MultiLineString mls = JTSUtils.buildMultiLineString(ways.get(0), ways.subList(1, ways.size()));
Geometry expected = MockingUtils.readWKTFile(testfile);
Assert.isTrue(mls.isValid());
Assert.equals(expected, mls);
} |
@Nullable
public static PipelineBreakerResult executePipelineBreakers(OpChainSchedulerService scheduler,
MailboxService mailboxService, WorkerMetadata workerMetadata, StagePlan stagePlan,
Map<String, String> opChainMetadata, long requestId, long deadlineMs) {
PipelineBreakerContext pipelineBreakerContext = new PipelineBreakerContext();
PipelineBreakerVisitor.visitPlanRoot(stagePlan.getRootNode(), pipelineBreakerContext);
if (!pipelineBreakerContext.getPipelineBreakerMap().isEmpty()) {
try {
// TODO: This PlanRequestContext needs to indicate it is a pre-stage opChain and only listens to pre-stage
// OpChain receive-mail callbacks.
// see also: MailboxIdUtils TODOs, de-couple mailbox id from query information
OpChainExecutionContext opChainExecutionContext =
new OpChainExecutionContext(mailboxService, requestId, deadlineMs, opChainMetadata,
stagePlan.getStageMetadata(), workerMetadata, null);
return execute(scheduler, pipelineBreakerContext, opChainExecutionContext);
} catch (Exception e) {
LOGGER.error("Caught exception executing pipeline breaker for request: {}, stage: {}", requestId,
stagePlan.getStageMetadata().getStageId(), e);
return new PipelineBreakerResult(pipelineBreakerContext.getNodeIdMap(), Collections.emptyMap(),
TransferableBlockUtils.getErrorTransferableBlock(e), null);
}
} else {
return null;
}
} | @Test
public void shouldReturnWhenAnyPBReturnsEmpty() {
MailboxReceiveNode mailboxReceiveNode1 = getPBReceiveNode(1);
MailboxReceiveNode incorrectlyConfiguredMailboxNode = getPBReceiveNode(3);
JoinNode joinNode = new JoinNode(0, DATA_SCHEMA, PlanNode.NodeHint.EMPTY,
List.of(mailboxReceiveNode1, incorrectlyConfiguredMailboxNode), JoinRelType.INNER, List.of(0), List.of(0),
List.of());
StagePlan stagePlan = new StagePlan(joinNode, _stageMetadata);
// when
when(_mailboxService.getReceivingMailbox(MAILBOX_ID_1)).thenReturn(_mailbox1);
when(_mailboxService.getReceivingMailbox(MAILBOX_ID_2)).thenReturn(_mailbox2);
Object[] row1 = new Object[]{1, 1};
Object[] row2 = new Object[]{2, 3};
when(_mailbox1.poll()).thenReturn(OperatorTestUtil.block(DATA_SCHEMA, row1),
TransferableBlockTestUtils.getEndOfStreamTransferableBlock(1));
when(_mailbox2.poll()).thenReturn(OperatorTestUtil.block(DATA_SCHEMA, row2),
TransferableBlockTestUtils.getEndOfStreamTransferableBlock(1));
PipelineBreakerResult pipelineBreakerResult =
PipelineBreakerExecutor.executePipelineBreakers(_scheduler, _mailboxService, _workerMetadata, stagePlan,
ImmutableMap.of(), 0, Long.MAX_VALUE);
// then
// should pass when one PB returns result, the other returns empty.
Assert.assertNotNull(pipelineBreakerResult);
Assert.assertEquals(pipelineBreakerResult.getResultMap().size(), 2);
Assert.assertEquals(pipelineBreakerResult.getResultMap().get(0).size(), 1);
Assert.assertEquals(pipelineBreakerResult.getResultMap().get(1).size(), 0);
Assert.assertNotNull(pipelineBreakerResult.getStageQueryStats());
} |
public static Metric metric(String name) {
return MetricsImpl.metric(name, Unit.COUNT);
} | @Test
public void when_jetMetricNameIsUsed_then_itIsNotOverwritten() {
Long[] items = {0L, 1L, 2L, 3L, 4L};
pipeline.readFrom(TestSources.items(items))
.filter(l -> {
Metrics.metric("emittedCount").increment(1000);
return true;
})
.writeTo(Sinks.noop());
Job job = runPipeline(pipeline.toDag());
new JobMetricsChecker(job, MeasurementPredicates.tagValueEquals("user", "true").negate())
.assertSummedMetricValue("emittedCount", 10);
new JobMetricsChecker(job)
.assertSummedMetricValue("emittedCount", 10 + items.length * 1000);
} |
public static Result<Void> failure() {
return failure(ErrorCodeEnum.SERVICE_ERROR.getCode(), ErrorCodeEnum.SERVICE_ERROR.getMessage());
} | @Test
public void testFailure2() {
String code = "500";
String msg = "message";
ErrorCode errorCode = new ErrorCode() {
@Override
public String getCode() {
return code;
}
@Override
public String getMessage() {
return msg;
}
};
Assert.isTrue(code.equals(Results.failure(errorCode).getCode()));
Assert.isTrue(msg.equals(Results.failure(errorCode).getMessage()));
} |
@Override
public void indexOnStartup(Set<IndexType> uninitializedIndexTypes) {
// TODO do not load everything in memory. Db rows should be scrolled.
List<IndexPermissions> authorizations = getAllAuthorizations();
Stream<AuthorizationScope> scopes = getScopes(uninitializedIndexTypes);
index(authorizations, scopes, Size.LARGE);
} | @Test
public void indexOnStartup_grants_access_to_group_on_private_project() {
ProjectDto project = createAndIndexPrivateProject();
UserDto user1 = db.users().insertUser();
UserDto user2 = db.users().insertUser();
UserDto user3 = db.users().insertUser();
GroupDto group1 = db.users().insertGroup();
GroupDto group2 = db.users().insertGroup();
db.users().insertEntityPermissionOnGroup(group1, USER, project);
db.users().insertEntityPermissionOnGroup(group2, ADMIN, project);
indexOnStartup();
// anonymous
verifyAnyoneNotAuthorized(project);
// group1 has access
verifyAuthorized(project, user1, group1);
// group2 has not access (only USER permission is accepted)
verifyNotAuthorized(project, user2, group2);
// user3 is not in any group
verifyNotAuthorized(project, user3);
} |
@JsonProperty("streamsProperties")
public Map<String, Object> getConfigOverrides() {
return PropertiesUtil.coerceTypes(configOverrides, false);
} | @Test
public void shouldHandleShortProperties() {
// Given:
final String jsonRequest = "{"
+ "\"ksql\":\"sql\","
+ "\"streamsProperties\":{"
+ "\"" + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG + "\":\"earliest\""
+ "}}";
// When:
final KsqlRequest request = deserialize(jsonRequest);
// Then:
assertThat(request.getConfigOverrides().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG), equalTo("earliest"));
} |
public static <T extends TypedSPI> T getService(final Class<T> serviceInterface, final Object type) {
return getService(serviceInterface, type, new Properties());
} | @Test
void assertGetServiceWithProperties() {
assertThat(((TypedSPIFixtureImpl) TypedSPILoader.getService(TypedSPIFixture.class, "TYPED.FIXTURE", PropertiesBuilder.build(new Property("key", "1")))).getValue(), is("1"));
} |
protected boolean isPredictionOn() {
return mPredictionOn;
} | @Test
@LooperMode(LooperMode.Mode.LEGACY) /*sensitive to animations*/
public void testClickingCancelPredicationHappyPath() {
TestRxSchedulers.drainAllTasks();
TestRxSchedulers.foregroundAdvanceBy(10000);
final KeyboardViewContainerView.StripActionProvider provider =
((AnySoftKeyboardSuggestions) mAnySoftKeyboardUnderTest).mCancelSuggestionsAction;
View rootActionView =
provider
.inflateActionView(mAnySoftKeyboardUnderTest.getInputViewContainer())
.findViewById(R.id.close_suggestions_strip_root);
final View.OnClickListener onClickListener =
Shadows.shadowOf(rootActionView).getOnClickListener();
final View image = rootActionView.findViewById(R.id.close_suggestions_strip_icon);
final View text = rootActionView.findViewById(R.id.close_suggestions_strip_text);
Assert.assertEquals(View.VISIBLE, image.getVisibility());
Assert.assertEquals(View.GONE, text.getVisibility());
Shadows.shadowOf(Looper.getMainLooper()).pause();
onClickListener.onClick(rootActionView);
// should be shown for some time
Assert.assertEquals(View.VISIBLE, text.getVisibility());
// strip is not removed
Assert.assertNotNull(
mAnySoftKeyboardUnderTest
.getInputViewContainer()
.findViewById(R.id.close_suggestions_strip_text));
Assert.assertTrue(mAnySoftKeyboardUnderTest.isPredictionOn());
Shadows.shadowOf(Looper.getMainLooper()).unPause();
Assert.assertEquals(View.GONE, text.getVisibility());
Shadows.shadowOf(Looper.getMainLooper()).pause();
onClickListener.onClick(rootActionView);
Assert.assertEquals(View.VISIBLE, text.getVisibility());
Assert.assertNotNull(
mAnySoftKeyboardUnderTest
.getInputViewContainer()
.findViewById(R.id.close_suggestions_strip_text));
// removing
onClickListener.onClick(rootActionView);
Shadows.shadowOf(Looper.getMainLooper()).unPause();
Assert.assertNull(
mAnySoftKeyboardUnderTest
.getInputViewContainer()
.findViewById(R.id.close_suggestions_strip_text));
Assert.assertFalse(mAnySoftKeyboardUnderTest.isPredictionOn());
} |
@Override
public ClusterClientProvider<String> deployApplicationCluster(
final ClusterSpecification clusterSpecification,
final ApplicationConfiguration applicationConfiguration)
throws ClusterDeploymentException {
if (client.getService(ExternalServiceDecorator.getExternalServiceName(clusterId))
.isPresent()) {
throw new ClusterDeploymentException(
"The Flink cluster " + clusterId + " already exists.");
}
checkNotNull(clusterSpecification);
checkNotNull(applicationConfiguration);
final KubernetesDeploymentTarget deploymentTarget =
KubernetesDeploymentTarget.fromConfig(flinkConfig);
if (KubernetesDeploymentTarget.APPLICATION != deploymentTarget) {
throw new ClusterDeploymentException(
"Couldn't deploy Kubernetes Application Cluster."
+ " Expected deployment.target="
+ KubernetesDeploymentTarget.APPLICATION.getName()
+ " but actual one was \""
+ deploymentTarget
+ "\"");
}
applicationConfiguration.applyToConfiguration(flinkConfig);
// No need to do pipelineJars validation if it is a PyFlink job.
if (!(PackagedProgramUtils.isPython(applicationConfiguration.getApplicationClassName())
|| PackagedProgramUtils.isPython(applicationConfiguration.getProgramArguments()))) {
final List<URI> pipelineJars =
KubernetesUtils.checkJarFileForApplicationMode(flinkConfig);
Preconditions.checkArgument(pipelineJars.size() == 1, "Should only have one jar");
}
try {
artifactUploader.uploadAll(flinkConfig);
} catch (Exception ex) {
throw new ClusterDeploymentException(ex);
}
final ClusterClientProvider<String> clusterClientProvider =
deployClusterInternal(
KubernetesApplicationClusterEntrypoint.class.getName(),
clusterSpecification,
false);
try (ClusterClient<String> clusterClient = clusterClientProvider.getClusterClient()) {
LOG.info(
"Create flink application cluster {} successfully, JobManager Web Interface: {}",
clusterId,
clusterClient.getWebInterfaceURL());
}
return clusterClientProvider;
} | @Test
void testDeployApplicationClusterWithMultipleJarsSet() {
flinkConfig.set(
PipelineOptions.JARS,
Arrays.asList("local:///path/of/user.jar", "local:///user2.jar"));
flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.APPLICATION.getName());
assertThatThrownBy(
() -> descriptor.deployApplicationCluster(clusterSpecification, appConfig))
.satisfies(
cause ->
assertThat(cause)
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Should only have one jar"));
} |
public static Path getConfigPath(@NonNull AbstractCommandArgs args) {
switch (args.getDeployMode()) {
case RUN:
case CLIENT:
return Paths.get(args.getConfigFile());
case RUN_APPLICATION:
case CLUSTER:
return Paths.get(getFileName(args.getConfigFile()));
default:
throw new IllegalArgumentException(
"Unsupported deploy mode: " + args.getDeployMode());
}
} | @Test
public void getConfigPath() throws URISyntaxException {
// test client mode.
SparkCommandArgs sparkCommandArgs = new SparkCommandArgs();
sparkCommandArgs.setDeployMode(DeployMode.CLIENT);
Path expectConfPath =
Paths.get(FileUtilsTest.class.getResource("/flink.batch.conf").toURI());
sparkCommandArgs.setConfigFile(expectConfPath.toString());
Assertions.assertEquals(expectConfPath, FileUtils.getConfigPath(sparkCommandArgs));
// test cluster mode
sparkCommandArgs.setDeployMode(DeployMode.CLUSTER);
Assertions.assertEquals(
"flink.batch.conf", FileUtils.getConfigPath(sparkCommandArgs).toString());
} |
public long getDbId() {
return dbId;
} | @Test
public void testSerialization() throws IOException {
Database db = GlobalStateMgr.getCurrentState().getMetadataMgr().getDb("default_catalog", "test");
Table tbl = GlobalStateMgr.getCurrentState().getMetadataMgr().getTable("default_catalog", "test", "region");
{
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
String s = "{\"dbId\":10001,\"tableId\":10177,\"columns\":[],\"type\":\"FULL\",\"updateTime\":1721650800," +
"\"properties\":{},\"updateRows\":10000}";
Text.writeString(dataOutputStream, s);
byte[] bytes = byteArrayOutputStream.toByteArray();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes);
DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream);
String deserializedString = Text.readString(dataInputStream);
BasicStatsMeta deserializedMeta = GSON.fromJson(deserializedString, BasicStatsMeta.class);
Assert.assertEquals(db.getId(), deserializedMeta.getDbId());
}
{
BasicStatsMeta basicStatsMeta = new BasicStatsMeta(db.getId(), tbl.getId(), List.of(),
StatsConstants.AnalyzeType.FULL,
LocalDateTime.of(2024, 07, 22, 12, 20), Map.of(), 10000);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream);
String s = GSON.toJson(basicStatsMeta);
Text.writeString(dataOutputStream, s);
dataOutputStream.close();
byte[] bytes = byteArrayOutputStream.toByteArray();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes);
DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream);
String deserializedString = Text.readString(dataInputStream);
BasicStatsMetaDemo deserializedMeta = GSON.fromJson(deserializedString, BasicStatsMetaDemo.class);
Assert.assertEquals(db.getId(), deserializedMeta.dbId);
}
} |
public ClientSession toClientSession()
{
return new ClientSession(
parseServer(server),
user,
source,
Optional.empty(),
parseClientTags(clientTags),
clientInfo,
catalog,
schema,
TimeZone.getDefault().getID(),
Locale.getDefault(),
toResourceEstimates(resourceEstimates),
toProperties(sessionProperties),
emptyMap(),
emptyMap(),
toExtraCredentials(extraCredentials),
null,
clientRequestTimeout,
disableCompression,
emptyMap(),
emptyMap(),
validateNextUriSource);
} | @Test
public void testServerHttpUri()
{
ClientOptions options = new ClientOptions();
options.server = "http://localhost/foo";
ClientSession session = options.toClientSession();
assertEquals(session.getServer().toString(), "http://localhost/foo");
} |
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
if (classifier == null) {
LOG.warn(getClass().getSimpleName() + " is not configured properly.");
return;
}
String inputString = IOUtils.toString(stream, "UTF-8");
String sentiment = classifier.predict(inputString);
metadata.add("Sentiment", sentiment);
} | @Test
public void testCategorical() throws Exception {
Tika tika = getTika("tika-config-sentiment-opennlp-cat.xml");
if (tika == null) {
return;
}
String text = "Whatever, I need some cooling off time!";
ByteArrayInputStream stream =
new ByteArrayInputStream(text.getBytes(StandardCharsets.UTF_8));
Metadata md = new Metadata();
tika.parse(stream, md);
String sentiment = md.get("Sentiment");
assertNotNull(sentiment);
assertEquals("angry", sentiment);
} |
@SuppressWarnings("unchecked")
public Mono<RateLimiterResponse> isAllowed(final String id, final RateLimiterHandle limiterHandle) {
double replenishRate = limiterHandle.getReplenishRate();
double burstCapacity = limiterHandle.getBurstCapacity();
double requestCount = limiterHandle.getRequestCount();
RateLimiterAlgorithm<?> rateLimiterAlgorithm = RateLimiterAlgorithmFactory.newInstance(limiterHandle.getAlgorithmName());
RedisScript<?> script = rateLimiterAlgorithm.getScript();
List<String> keys = rateLimiterAlgorithm.getKeys(id);
List<String> scriptArgs = Stream.of(replenishRate, burstCapacity, Instant.now().getEpochSecond(), requestCount).map(String::valueOf).collect(Collectors.toList());
Flux<List<Long>> resultFlux = Singleton.INST.get(ReactiveRedisTemplate.class).execute(script, keys, scriptArgs);
return resultFlux.onErrorResume(throwable -> Flux.just(Arrays.asList(1L, -1L)))
.reduce(new ArrayList<Long>(), (longs, l) -> {
longs.addAll(l);
return longs;
}).map(results -> {
boolean allowed = results.get(0) == 1L;
Long tokensLeft = results.get(1);
return new RateLimiterResponse(allowed, tokensLeft, keys);
})
.doOnError(throwable -> {
rateLimiterAlgorithm.callback(rateLimiterAlgorithm.getScript(), keys, scriptArgs);
LOG.error("Error occurred while judging if user is allowed by RedisRateLimiter:{}", throwable.getMessage());
});
} | @Test
public void allowedTest() {
isAllowedPreInit(1L, 1L, false);
rateLimiterHandle.setAlgorithmName("tokenBucket");
Mono<RateLimiterResponse> responseMono = redisRateLimiter.isAllowed(DEFAULT_TEST_ID, rateLimiterHandle);
StepVerifier.create(responseMono).assertNext(r -> {
assertEquals(1L, r.getTokensRemaining());
assertTrue(r.isAllowed());
}).verifyComplete();
} |
@Override
public boolean isSubTypeOf(Class<?> ancestor) {
checkNotNull(ancestor);
return id.isSubTypeOf(ancestor);
} | @Test
public void testSubTypeOfObject() {
DiscreteResource discrete = Resources.discrete(D1, P1, VLAN1).resource();
assertThat(discrete.isSubTypeOf(Object.class), is(true));
} |
public static <T> int indexOfSub(T[] array, T[] subArray) {
return indexOfSub(array, 0, subArray);
} | @Test
public void indexOfSubTest() {
Integer[] a = {0x12, 0x34, 0x56, 0x78, 0x9A};
Integer[] b = {0x56, 0x78};
Integer[] c = {0x12, 0x56};
Integer[] d = {0x78, 0x9A};
Integer[] e = {0x78, 0x9A, 0x10};
int i = ArrayUtil.indexOfSub(a, b);
assertEquals(2, i);
i = ArrayUtil.indexOfSub(a, c);
assertEquals(-1, i);
i = ArrayUtil.indexOfSub(a, d);
assertEquals(3, i);
i = ArrayUtil.indexOfSub(a, e);
assertEquals(-1, i);
i = ArrayUtil.indexOfSub(a, null);
assertEquals(-1, i);
i = ArrayUtil.indexOfSub(null, null);
assertEquals(-1, i);
i = ArrayUtil.indexOfSub(null, b);
assertEquals(-1, i);
} |
@Override
public E computeIfAbsent(String key, Function<? super String, ? extends E> mappingFunction) {
try {
return cacheStore.invoke(key, new AtomicComputeProcessor<>(), mappingFunction);
} catch (EntryProcessorException e) {
throw new RuntimeException(e.getCause());
}
} | @Test
public void computeIfAbsent_cacheMiss_mappingFunctionInvoked() {
Function<String, Integer> mappingFunction = k -> 7;
doReturn(null).when(mutableEntryMock).getValue();
entryProcessorMock = new CacheRegistryStore.AtomicComputeProcessor<>();
entryProcessorArgMock = mappingFunction;
Integer cacheResult = classUnderTest.computeIfAbsent(CACHE_KEY, mappingFunction);
verify(mutableEntryMock, times(1)).setValue(7);
assertEquals(Integer.valueOf(7), cacheResult);
} |
@PutMapping
@Secured(resource = AuthConstants.UPDATE_PASSWORD_ENTRY_POINT, action = ActionTypes.WRITE)
public Object updateUser(@RequestParam String username, @RequestParam String newPassword,
HttpServletResponse response, HttpServletRequest request) throws IOException {
// admin or same user
try {
if (!hasPermission(username, request)) {
response.sendError(HttpServletResponse.SC_FORBIDDEN, "authorization failed!");
return null;
}
} catch (HttpSessionRequiredException e) {
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "session expired!");
return null;
} catch (AccessException exception) {
response.sendError(HttpServletResponse.SC_FORBIDDEN, "authorization failed!");
return null;
}
User user = userDetailsService.getUserFromDatabase(username);
if (user == null) {
throw new IllegalArgumentException("user " + username + " not exist!");
}
userDetailsService.updateUserPassword(username, PasswordEncoderUtil.encode(newPassword));
return RestResultUtils.success("update user ok!");
} | @Test
void testUpdateUser7() throws IOException, AccessException {
RequestContextHolder.getContext().getAuthContext().getIdentityContext()
.setParameter(AuthConstants.NACOS_USER_KEY, null);
when(authConfigs.isAuthEnabled()).thenReturn(true);
when(authenticationManager.authenticate(any(MockHttpServletRequest.class))).thenThrow(
new AccessException("test"));
MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest();
MockHttpServletResponse mockHttpServletResponse = new MockHttpServletResponse();
Object result = userController.updateUser("nacos", "test", mockHttpServletResponse, mockHttpServletRequest);
assertNull(result);
assertEquals(HttpServletResponse.SC_FORBIDDEN, mockHttpServletResponse.getStatus());
} |
public void removeCorruptedNote(String noteId, AuthenticationInfo subject) throws IOException {
LOGGER.info("Remove corrupted note: {}", noteId);
noteManager.removeNote(noteId, subject);
authorizationService.removeNoteAuth(noteId);
} | @Test
void testRemoveCorruptedNote() throws IOException {
LOGGER.info("--------------- Test testRemoveCorruptedNote ---------------");
// create a note and a paragraph
String corruptedNoteId = notebook.createNote("note1", anonymous);
String corruptedNotePath = notebook.processNote(corruptedNoteId,
corruptedNote -> {
return notebookDir.getAbsolutePath() + corruptedNote.getPath() + "_" + corruptedNote.getId() + ".zpln";
});
// corrupt note
FileWriter myWriter = new FileWriter(corruptedNotePath);
myWriter.write("{{{I'm corrupted;;;");
myWriter.close();
LOGGER.info("--------------- Finish Test testRemoveCorruptedNote ---------------");
int numberOfNotes = notebook.getNotesInfo().size();
notebook.removeNote(corruptedNoteId, anonymous);
assertEquals(numberOfNotes - 1, notebook.getNotesInfo().size());
LOGGER.info("--------------- Finish Test testRemoveCorruptedNote ---------------");
} |
public void sortChildren() {
this.childList.sort((profile1, profile2) ->
Long.compare(profile2.first.getCounterTotalTime().getValue(),
profile1.first.getCounterTotalTime().getValue()));
} | @Test
public void testSortChildren() {
RuntimeProfile profile = new RuntimeProfile("profile");
// init profile
RuntimeProfile profile1 = new RuntimeProfile("profile1");
RuntimeProfile profile2 = new RuntimeProfile("profile2");
RuntimeProfile profile3 = new RuntimeProfile("profile3");
profile1.getCounterTotalTime().setValue(1);
profile2.getCounterTotalTime().setValue(3);
profile3.getCounterTotalTime().setValue(2);
profile.addChild(profile1);
profile.addChild(profile2);
profile.addChild(profile3);
// compare
profile.sortChildren();
// check result
long time0 = profile.getChildList().get(0).first.getCounterTotalTime().getValue();
long time1 = profile.getChildList().get(1).first.getCounterTotalTime().getValue();
long time2 = profile.getChildList().get(2).first.getCounterTotalTime().getValue();
Assert.assertEquals(3, time0);
Assert.assertEquals(2, time1);
Assert.assertEquals(1, time2);
} |
public QueueCapacityVector parse(String capacityString, QueuePath queuePath) {
if (queuePath.isRoot()) {
return QueueCapacityVector.of(100f, ResourceUnitCapacityType.PERCENTAGE);
}
if (capacityString == null) {
return new QueueCapacityVector();
}
// Trim all spaces from capacity string
capacityString = capacityString.replaceAll(" ", "");
for (Parser parser : parsers) {
Matcher matcher = parser.regex.matcher(capacityString);
if (matcher.find()) {
return parser.parser.apply(matcher);
}
}
return new QueueCapacityVector();
} | @Test
public void testZeroAbsoluteCapacityConfig() {
QueueCapacityVector weightCapacityVector =
capacityConfigParser.parse(String.format(MEMORY_VCORE_TEMPLATE, 0, 0), QUEUE_PATH);
QueueCapacityVectorEntry memory = weightCapacityVector.getResource(MEMORY_URI);
QueueCapacityVectorEntry vcore = weightCapacityVector.getResource(VCORES_URI);
Assert.assertEquals(ResourceUnitCapacityType.ABSOLUTE, memory.getVectorResourceType());
Assert.assertEquals(0, memory.getResourceValue(), EPSILON);
Assert.assertEquals(ResourceUnitCapacityType.ABSOLUTE, vcore.getVectorResourceType());
Assert.assertEquals(0, vcore.getResourceValue(), EPSILON);
} |
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) {
if (list == null) {
return FEELFnResult.ofResult(false);
}
boolean result = false;
for (final Object element : list) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" +
" a Boolean"));
} else {
if (element != null) {
result |= (Boolean) element;
}
}
}
return FEELFnResult.ofResult(result);
} | @Test
void invokeArrayParamReturnNull() {
FunctionTestUtil.assertResult(anyFunction.invoke(new Object[]{Boolean.FALSE, null, Boolean.FALSE}), false);
} |
@Override
public boolean equals(Object obj) {
if (obj instanceof DelegatingConfiguration) {
DelegatingConfiguration other = (DelegatingConfiguration) obj;
return this.prefix.equals(other.prefix)
&& this.backingConfig.equals(other.backingConfig);
} else {
return false;
}
} | @Test
void testIfDelegatesImplementAllMethods() throws IllegalArgumentException {
// For each method in the Configuration class...
Method[] confMethods = Configuration.class.getDeclaredMethods();
Method[] delegateMethods = DelegatingConfiguration.class.getDeclaredMethods();
for (Method configurationMethod : confMethods) {
final int mod = configurationMethod.getModifiers();
if (!Modifier.isPublic(mod) || Modifier.isStatic(mod)) {
continue;
}
boolean hasMethod = false;
// Find matching method in wrapper class and call it
lookForWrapper:
for (Method wrapperMethod : delegateMethods) {
if (configurationMethod.getName().equals(wrapperMethod.getName())) {
// Get parameters for method
Class<?>[] wrapperMethodParams = wrapperMethod.getParameterTypes();
Class<?>[] configMethodParams = configurationMethod.getParameterTypes();
if (wrapperMethodParams.length != configMethodParams.length) {
continue;
}
for (int i = 0; i < wrapperMethodParams.length; i++) {
if (wrapperMethodParams[i] != configMethodParams[i]) {
continue lookForWrapper;
}
}
hasMethod = true;
break;
}
}
assertThat(hasMethod)
.as(
"Configuration method '"
+ configurationMethod.getName()
+ "' has not been wrapped correctly in DelegatingConfiguration wrapper")
.isTrue();
}
} |
public void logFlow(Flow flow, Logger logger, Level level, String message, Object... args) {
String finalMsg = tenantEnabled ? FLOW_PREFIX_WITH_TENANT + message : FLOW_PREFIX_NO_TENANT + message;
Object[] executionArgs = tenantEnabled ?
new Object[] { flow.getTenantId(), flow.getNamespace(), flow.getId() } :
new Object[] { flow.getNamespace(), flow.getId() };
Object[] finalArgs = ArrayUtils.addAll(executionArgs, args);
logger.atLevel(level).log(finalMsg, finalArgs);
} | @Test
void logFlow() {
var flow = Flow.builder().namespace("namespace").id("flow").build();
logService.logFlow(flow, log, Level.INFO, "Some log");
logService.logFlow(flow, log, Level.INFO, "Some log with an {}", "attribute");
logService.logFlow(flow, log, Level.ERROR, "Some log with an {} and an error", "attribute", new RuntimeException("Test Exception"));
} |
@Override
public Map<String, StepTransition> translate(WorkflowInstance workflowInstance) {
WorkflowInstance instance = objectMapper.convertValue(workflowInstance, WorkflowInstance.class);
if (instance.getRunConfig() != null) {
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_INCOMPLETE
|| instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
Map<String, StepInstance.Status> statusMap =
instance.getAggregatedInfo().getStepAggregatedViews().entrySet().stream()
.collect(
Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getStatus()));
if (!statusMap.isEmpty()) {
instance
.getRunConfig()
.setStartStepIds(
statusMap.entrySet().stream()
.filter(
entry ->
!entry.getValue().isComplete()
&& (entry.getValue().isTerminal()
|| entry.getValue() == StepInstance.Status.NOT_CREATED))
.map(Map.Entry::getKey)
.collect(Collectors.toList()));
}
// handle the special case of restarting from a completed step
if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) {
String restartStepId =
RunRequest.getCurrentNode(instance.getRunConfig().getRestartConfig()).getStepId();
if (!instance.getRunConfig().getStartStepIds().contains(restartStepId)) {
instance.getRunConfig().getStartStepIds().add(restartStepId);
}
}
} else {
if (workflowInstance.getRunConfig().getStartStepIds() != null) {
instance
.getRunConfig()
.setStartStepIds(new ArrayList<>(workflowInstance.getRunConfig().getStartStepIds()));
}
if (workflowInstance.getRunConfig().getEndStepIds() != null) {
instance
.getRunConfig()
.setEndStepIds(new ArrayList<>(workflowInstance.getRunConfig().getEndStepIds()));
}
}
}
List<String> startStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getStartStepIds() != null
? instance.getRunConfig().getStartStepIds()
: null;
List<String> endStepIds =
instance.getRunConfig() != null && instance.getRunConfig().getEndStepIds() != null
? instance.getRunConfig().getEndStepIds()
: null;
return WorkflowGraph.computeDag(instance.getRuntimeWorkflow(), startStepIds, endStepIds);
} | @Test
public void testTranslateForRestartCustomizedRun() {
instance.getRunConfig().setPolicy(RunPolicy.RESTART_CUSTOMIZED_RUN);
instance.getRunConfig().setStartStepIds(Collections.singletonList("job3"));
Map<String, StepTransition> dag = translator.translate(instance);
Assert.assertEquals(new HashSet<>(Arrays.asList("job.2", "job3", "job4")), dag.keySet());
StepTransition jobTransition = new StepTransition();
jobTransition.setPredecessors(Collections.singletonList("job3"));
jobTransition.setSuccessors(Collections.singletonMap("job4", "true"));
Assert.assertEquals(jobTransition, dag.get("job.2"));
jobTransition.setPredecessors(Collections.emptyList());
jobTransition.setSuccessors(new HashMap<>());
jobTransition.getSuccessors().put("job.2", "true");
jobTransition.getSuccessors().put("job4", "true");
Assert.assertEquals(jobTransition, dag.get("job3"));
jobTransition.setPredecessors(Arrays.asList("job3", "job.2"));
jobTransition.setSuccessors(Collections.emptyMap());
Assert.assertEquals(jobTransition, dag.get("job4"));
} |
@Override
public Iterable<WindowedValue<ElemT>> elementsIterable() {
return FluentIterable.from(workItem.getMessageBundlesList())
.transformAndConcat(Windmill.InputMessageBundle::getMessagesList)
.transform(
message -> {
try {
Instant timestamp =
WindmillTimeUtils.windmillToHarnessTimestamp(message.getTimestamp());
Collection<? extends BoundedWindow> windows =
WindmillSink.decodeMetadataWindows(windowsCoder, message.getMetadata());
PaneInfo pane = WindmillSink.decodeMetadataPane(message.getMetadata());
InputStream inputStream = message.getData().newInput();
ElemT value = valueCoder.decode(inputStream, Coder.Context.OUTER);
return WindowedValue.of(value, timestamp, windows, pane);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
} | @Test
public void testElementIteration() throws Exception {
Windmill.WorkItem.Builder workItem =
Windmill.WorkItem.newBuilder().setKey(SERIALIZED_KEY).setWorkToken(17);
Windmill.InputMessageBundle.Builder chunk1 = workItem.addMessageBundlesBuilder();
chunk1.setSourceComputationId("computation");
addElement(chunk1, 5, "hello", WINDOW_1, paneInfo(0));
addElement(chunk1, 7, "world", WINDOW_2, paneInfo(2));
Windmill.InputMessageBundle.Builder chunk2 = workItem.addMessageBundlesBuilder();
chunk2.setSourceComputationId("computation");
addElement(chunk2, 6, "earth", WINDOW_1, paneInfo(1));
KeyedWorkItem<String, String> keyedWorkItem =
new WindmillKeyedWorkItem<>(
KEY, workItem.build(), WINDOW_CODER, WINDOWS_CODER, VALUE_CODER);
assertThat(
keyedWorkItem.elementsIterable(),
Matchers.contains(
WindowedValue.of("hello", new Instant(5), WINDOW_1, paneInfo(0)),
WindowedValue.of("world", new Instant(7), WINDOW_2, paneInfo(2)),
WindowedValue.of("earth", new Instant(6), WINDOW_1, paneInfo(1))));
} |
@Override
public Optional<ErrorResponse> filter(DiscFilterRequest request) {
try {
Optional<ResourceNameAndAction> resourceMapping =
requestResourceMapper.getResourceNameAndAction(request);
log.log(Level.FINE, () -> String.format("Resource mapping for '%s': %s", request, resourceMapping));
if (resourceMapping.isEmpty()) {
incrementAcceptedMetrics(request, false, Optional.empty());
return Optional.empty();
}
Result result = checkAccessAllowed(request, resourceMapping.get());
AuthorizationResult.Type resultType = result.zpeResult.type();
setAttribute(request, RESULT_ATTRIBUTE, resultType.name());
if (resultType == AuthorizationResult.Type.ALLOW) {
populateRequestWithResult(request, result);
incrementAcceptedMetrics(request, true, Optional.of(result));
return Optional.empty();
}
log.log(Level.FINE, () -> String.format("Forbidden (403) for '%s': %s", request, resultType.name()));
incrementRejectedMetrics(request, FORBIDDEN, resultType.name(), Optional.of(result));
return Optional.of(new ErrorResponse(FORBIDDEN, "Access forbidden: " + resultType.getDescription()));
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> String.format("Unauthorized (401) for '%s': %s", request, e.getMessage()));
incrementRejectedMetrics(request, UNAUTHORIZED, "Unauthorized", Optional.empty());
return Optional.of(new ErrorResponse(UNAUTHORIZED, e.getMessage()));
}
} | @Test
void accepts_request_with_role_certificate() {
AthenzAuthorizationFilter filter = createFilter(new AllowingZpe(), List.of());
MockResponseHandler responseHandler = new MockResponseHandler();
DiscFilterRequest request = createRequest(null, null, ROLE_CERTIFICATE);
filter.filter(request, responseHandler);
assertAuthorizationResult(request, Type.ALLOW);
assertRequestNotFiltered(responseHandler);
assertMatchedCredentialType(request, EnabledCredentials.ROLE_CERTIFICATE);
assertMatchedRole(request, ROLE);
} |
public List<PluginConfiguration> getRoleConfigurationMetadata(String pluginId) {
return pluginRequestHelper.submitRequest(pluginId, REQUEST_GET_ROLE_CONFIG_METADATA, new DefaultPluginInteractionCallback<>() {
@Override
public List<PluginConfiguration> onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) {
return getMessageConverter(resolvedExtensionVersion).getRoleConfigMetadataResponseFromBody(responseBody);
}
});
} | @Test
void shouldTalkToPlugin_To_GetRoleConfigurationMetadata() {
String responseBody = "[{\"key\":\"memberOf\",\"metadata\":{\"required\":true,\"secure\":false}}]";
when(pluginManager.submitTo(eq(PLUGIN_ID), eq(AUTHORIZATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody));
List<PluginConfiguration> roleConfigurationMetadata = authorizationExtension.getRoleConfigurationMetadata(PLUGIN_ID);
assertRequest(requestArgumentCaptor.getValue(), AUTHORIZATION_EXTENSION, "2.0", REQUEST_GET_ROLE_CONFIG_METADATA, null);
assertThat(roleConfigurationMetadata.size()).isEqualTo(1);
assertThat(roleConfigurationMetadata).contains(
new PluginConfiguration("memberOf", new Metadata(true, false))
);
} |
public Schema toKsqlSchema(final Schema schema) {
try {
final Schema rowSchema = toKsqlFieldSchema(schema);
if (rowSchema.type() != Schema.Type.STRUCT) {
throw new KsqlException("KSQL stream/table schema must be structured");
}
if (rowSchema.fields().isEmpty()) {
throw new KsqlException("Schema does not include any columns with "
+ "types that ksqlDB supports."
+ System.lineSeparator()
+ "schema: " + FORMATTER.format(schema));
}
return rowSchema;
} catch (final UnsupportedTypeException e) {
throw new KsqlException("Unsupported type at root of schema: " + e.getMessage(), e);
}
} | @Test
public void shouldTranslateMapWithNonStringKey() {
final Schema connectSchema = SchemaBuilder
.struct()
.field("mapfield", SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA))
.build();
final Schema ksqlSchema = translator.toKsqlSchema(connectSchema);
assertThat(ksqlSchema.field(nameTranslator.apply("mapfield")), notNullValue());
final Schema mapSchema = ksqlSchema.field(nameTranslator.apply("mapfield")).schema();
assertThat(mapSchema.type(), equalTo(Schema.Type.MAP));
assertThat(mapSchema.keySchema(), equalTo(Schema.OPTIONAL_STRING_SCHEMA));
assertThat(mapSchema.valueSchema(), equalTo(Schema.OPTIONAL_INT32_SCHEMA));
} |
Optional<String> placementGroupEc2() {
return getOptionalMetadata(ec2MetadataEndpoint.concat("/placement/group-name/"), "placement group");
} | @Test
public void placementGroupEc2() {
// given
String placementGroup = "placement-group-1";
stubFor(get(urlEqualTo(GROUP_NAME_URL))
.willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody(placementGroup)));
// when
Optional<String> result = awsMetadataApi.placementGroupEc2();
// then
assertEquals(placementGroup, result.orElse("N/A"));
verify(exactly(1), getRequestedFor(urlEqualTo(GROUP_NAME_URL)));
} |
public static JSONObject getScreenNameAndTitle(View view) {
if (view == null) {
return null;
}
JSONObject object = null;
Activity activity = SAViewUtils.getActivityOfView(view.getContext(), view);
if (activity == null) {
activity = AppStateTools.getInstance().getForegroundActivity();
}
if (activity != null && activity.getWindow() != null && activity.getWindow().isActive()) {
Object fragment = SAFragmentUtils.getFragmentFromView(view, activity);
if (fragment != null) {
object = SAPageInfoUtils.getFragmentPageInfo(activity, fragment);
} else {
object = SAPageInfoUtils.getActivityPageInfo(activity);
JSONObject rnJson = SAPageInfoUtils.getRNPageInfo();
JSONUtils.mergeDuplicateProperty(rnJson, object);
}
}
return object;
} | @Test
public void getScreenNameAndTitle() {
TextView textView1 = new TextView(mApplication);
textView1.setText("child1");
Assert.assertNull(SAViewUtils.getScreenNameAndTitle(textView1));
} |
public Set<? extends AuthenticationRequest> getRequest(final Host bookmark, final LoginCallback prompt)
throws LoginCanceledException {
final StringBuilder url = new StringBuilder();
url.append(bookmark.getProtocol().getScheme().toString()).append("://");
url.append(bookmark.getHostname());
if(!(bookmark.getProtocol().getScheme().getPort() == bookmark.getPort())) {
url.append(":").append(bookmark.getPort());
}
final String context = PathNormalizer.normalize(bookmark.getProtocol().getContext());
// Custom authentication context
url.append(context);
if(bookmark.getProtocol().getDefaultHostname().endsWith("identity.api.rackspacecloud.com")
|| bookmark.getHostname().endsWith("identity.api.rackspacecloud.com")) {
return Collections.singleton(new Authentication20RAXUsernameKeyRequest(
URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword(), null)
);
}
final LoginOptions options = new LoginOptions(bookmark.getProtocol()).password(false).anonymous(false).publickey(false);
if(context.contains("1.0")) {
return Collections.singleton(new Authentication10UsernameKeyRequest(URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword()));
}
else if(context.contains("1.1")) {
return Collections.singleton(new Authentication11UsernameKeyRequest(URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword()));
}
else if(context.contains("2.0")) {
// Prompt for tenant
final String user;
final String tenant;
if(StringUtils.contains(bookmark.getCredentials().getUsername(), ':')) {
final String[] parts = StringUtils.splitPreserveAllTokens(bookmark.getCredentials().getUsername(), ':');
tenant = parts[0];
user = parts[1];
}
else {
user = bookmark.getCredentials().getUsername();
tenant = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Tenant Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Tenant Name", "Mosso"))).getUsername();
// Save tenant in username
bookmark.getCredentials().setUsername(String.format("%s:%s", tenant, bookmark.getCredentials().getUsername()));
}
final Set<AuthenticationRequest> requests = new LinkedHashSet<>();
requests.add(new Authentication20UsernamePasswordRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), tenant)
);
requests.add(new Authentication20UsernamePasswordTenantIdRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), tenant)
);
requests.add(new Authentication20AccessKeySecretKeyRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), tenant));
return requests;
}
else if(context.contains("3")) {
// Prompt for project
final String user;
final String project;
final String domain;
if(StringUtils.contains(bookmark.getCredentials().getUsername(), ':')) {
final String[] parts = StringUtils.splitPreserveAllTokens(bookmark.getCredentials().getUsername(), ':');
if(parts.length == 3) {
project = parts[0];
domain = parts[1];
user = parts[2];
}
else {
project = parts[0];
user = parts[1];
domain = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Project Domain Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Project Domain Name", "Mosso"))).getUsername();
// Save project name and domain in username
bookmark.getCredentials().setUsername(String.format("%s:%s:%s", project, domain, bookmark.getCredentials().getUsername()));
}
}
else {
user = bookmark.getCredentials().getUsername();
final Credentials projectName = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Project Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Project Name", "Mosso")));
if(StringUtils.contains(bookmark.getCredentials().getUsername(), ':')) {
final String[] parts = StringUtils.splitPreserveAllTokens(projectName.getUsername(), ':');
project = parts[0];
domain = parts[1];
}
else {
project = projectName.getUsername();
domain = prompt.prompt(bookmark, bookmark.getCredentials().getUsername(),
LocaleFactory.localizedString("Provide additional login credentials", "Credentials"),
LocaleFactory.localizedString("Project Domain Name", "Mosso"), options
.usernamePlaceholder(LocaleFactory.localizedString("Project Domain Name", "Mosso"))).getUsername();
}
// Save project name and domain in username
bookmark.getCredentials().setUsername(String.format("%s:%s:%s", project, domain, bookmark.getCredentials().getUsername()));
}
final Set<AuthenticationRequest> requests = new LinkedHashSet<>();
requests.add(new Authentication3UsernamePasswordProjectRequest(
URI.create(url.toString()),
user, bookmark.getCredentials().getPassword(), project, domain)
);
return requests;
}
else {
log.warn(String.format("Unknown context version in %s. Default to v1 authentication.", context));
// Default to 1.0
return Collections.singleton(new Authentication10UsernameKeyRequest(URI.create(url.toString()),
bookmark.getCredentials().getUsername(), bookmark.getCredentials().getPassword()));
}
} | @Test
public void testProfileLondon() throws Exception {
final SwiftProtocol protocol = new SwiftProtocol() {
@Override
public String getContext() {
return "/v2.0/tokens";
}
};
final SwiftAuthenticationService s = new SwiftAuthenticationService();
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new SwiftProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/Rackspace UK.cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname());
assertEquals("/v2.0/tokens", profile.getContext());
assertEquals(URI.create("https://lon.identity.api.rackspacecloud.com/v2.0/tokens"), s.getRequest(host, new DisabledLoginCallback()).iterator().next().getURI());
assertEquals(Client.AuthVersion.v20, s.getRequest(host, new DisabledLoginCallback()).iterator().next().getVersion());
assertEquals(Authentication20RAXUsernameKeyRequest.class, s.getRequest(host, new DisabledLoginCallback()).iterator().next().getClass());
} |
@PostMapping(value = "/task", consumes = "application/json", produces = "application/json")
@ResponseBody
@Operation(summary = "start a perform scheduled task")
public ResponseEntity<Map<String, Object>> performScheduleTask(final @RequestBody ScheduledTask task) {
if (RESENT_TASK_NAME.equals(task.getIapi().get("name"))) {
afnemersindicatieService.resendUnsentMessages();
}
return ResponseEntity.ok(Map.of("status", "OK"));
} | @Test
void testResendWithApv01(){
when(afnemersbericht.getType()).thenReturn(Afnemersbericht.Type.Av01);
List<Afnemersbericht> afnemersberichten = Arrays.asList(afnemersbericht);
when(afnemersberichtRepository.findByStatus(Afnemersbericht.Status.SEND_FAILED)).thenReturn(afnemersberichten);
classUnderTest.performScheduleTask(new ScheduledTask(RESENT_TASK_NAME));
verify(dglSendServiceMock, times(1)).sendAfnemersBerichtAanDGL(any(), any());
verify(afnemersberichtRepository, times(1)).delete(afnemersbericht);
} |
public static boolean webSocketHostPathMatches(String hostPath, String targetPath) {
boolean exactPathMatch = true;
if (ObjectHelper.isEmpty(hostPath) || ObjectHelper.isEmpty(targetPath)) {
// This scenario should not really be possible as the input args come from the vertx-websocket consumer / producer URI
return false;
}
// Paths ending with '*' are Vert.x wildcard routes so match on the path prefix
if (hostPath.endsWith("*")) {
exactPathMatch = false;
hostPath = hostPath.substring(0, hostPath.lastIndexOf('*'));
}
String normalizedHostPath = HttpUtils.normalizePath(hostPath + "/");
String normalizedTargetPath = HttpUtils.normalizePath(targetPath + "/");
String[] hostPathElements = normalizedHostPath.split("/");
String[] targetPathElements = normalizedTargetPath.split("/");
if (exactPathMatch && hostPathElements.length != targetPathElements.length) {
return false;
}
if (exactPathMatch) {
return normalizedHostPath.equals(normalizedTargetPath);
} else {
return normalizedTargetPath.startsWith(normalizedHostPath);
}
} | @Test
void webSocketHostWildcardPathWithParamsNotMatches() {
String hostPath = "/foo/{bar}/cheese/{wine}*";
String targetPath = "/foo/bar/invalid/wine/beer/additional/path";
assertFalse(VertxWebsocketHelper.webSocketHostPathMatches(hostPath, targetPath));
} |
@Override
public int run(String[] args) throws Exception {
handleOpts(args);
Integer exitCode = 1;
UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
// If we're running as the user, then no need to impersonate
// (which might fail if user is not a proxyuser for themselves)
// Also if !proxy is set
if (!proxy || loginUser.getShortUserName().equals(user)) {
LOG.info("Running as " + user);
exitCode = runInternal();
} else {
// Otherwise impersonate user. If we're not allowed to, then this will
// fail with an Exception
LOG.info("Running as " + loginUser.getShortUserName() + " but will " +
"impersonate " + user);
UserGroupInformation proxyUser =
UserGroupInformation.createProxyUser(user, loginUser);
exitCode = proxyUser.doAs(new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws Exception {
return runInternal();
}
});
}
return exitCode;
} | @Test
public void testHadoopArchiveLogsWithArchiveError() throws Exception {
String[] args = getArgs();
final HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(conf);
HadoopArchives mockHadoopArchives = mock(HadoopArchives.class);
when(mockHadoopArchives.run(Mockito.<String[]>any())).thenReturn(-1);
halr.hadoopArchives = mockHadoopArchives;
assertNotEquals(0, ToolRunner.run(halr, args));
// Make sure the original log files are intact
FileStatus[] app1Files = fs.listStatus(app1Path);
assertEquals(FILE_COUNT, app1Files.length);
for (int i = 0; i < FILE_COUNT; i++) {
Assert.assertEquals(FILE_SIZES[i] * FILE_SIZE_INCREMENT,
app1Files[i].getLen());
}
} |
public User createUser(String username, final String password, final String name, final String email)
throws UserAlreadyExistsException
{
if (provider.isReadOnly()) {
throw new UnsupportedOperationException("User provider is read-only.");
}
if (username == null || username.isEmpty()) {
throw new IllegalArgumentException("Null or empty username.");
}
if (password == null || password.isEmpty()) {
throw new IllegalArgumentException("Null or empty password.");
}
// Make sure that the username is valid.
try {
username = Stringprep.nodeprep(username);
}
catch (final StringprepException se) {
throw new IllegalArgumentException("Invalid username: " + username, se);
}
if (provider.isNameRequired() && (name == null || name.matches("\\s*"))) {
throw new IllegalArgumentException("Invalid or empty name specified with provider that requires name. User: "
+ username + " Name: " + name);
}
if (provider.isEmailRequired() && !StringUtils.isValidEmailAddress(email)) {
throw new IllegalArgumentException("Invalid or empty email address specified with provider that requires email address. User: "
+ username + " Email: " + email);
}
final User user = provider.createUser(username, password, name, email);
userCache.put(username, user);
// Fire event.
final Map<String,Object> params = Collections.emptyMap();
UserEventDispatcher.dispatchEvent(user, UserEventDispatcher.EventType.user_created, params);
return user;
} | @Test
public void createExistingUserWillGetError() throws Exception{
assertThrows(UserAlreadyExistsException.class, () -> userManager.createUser(USER_ID, "change me", "Test User Name", "test-email@example.com"));
} |
@Override
public boolean add(FilteredBlock block) throws VerificationException, PrunedException {
boolean success = super.add(block);
if (success) {
trackFilteredTransactions(block.getTransactionCount());
}
return success;
} | @Test
public void testBasicChaining() throws Exception {
// Check that we can plug a few blocks together and the futures work.
CompletableFuture<StoredBlock> future = testNetChain.getHeightFuture(2);
// Block 1 from the testnet.
Block b1 = getBlock1();
assertTrue(testNetChain.add(b1));
assertFalse(future.isDone());
// Block 2 from the testnet.
Block b2 = getBlock2();
// Let's try adding an invalid block.
long n = b2.getNonce();
try {
b2.setNonce(12345);
testNetChain.add(b2);
fail();
} catch (VerificationException e) {
b2.setNonce(n);
}
// Now it works because we reset the nonce.
assertTrue(testNetChain.add(b2));
assertTrue(future.isDone());
assertEquals(2, future.get().getHeight());
} |
@Override
public List<DeterministicKey> getKeys(KeyPurpose purpose, int numberOfKeys) {
checkArgument(numberOfKeys > 0);
lock.lock();
try {
DeterministicKey parentKey;
int index;
switch (purpose) {
// Map both REFUND and RECEIVE_KEYS to the same branch for now. Refunds are a feature of the BIP 70
// payment protocol. Later we may wish to map it to a different branch (in a new wallet version?).
// This would allow a watching wallet to only be able to see inbound payments, but not change
// (i.e. spends) or refunds. Might be useful for auditing ...
case RECEIVE_FUNDS:
case REFUND:
issuedExternalKeys += numberOfKeys;
index = issuedExternalKeys;
parentKey = externalParentKey;
break;
case AUTHENTICATION:
case CHANGE:
issuedInternalKeys += numberOfKeys;
index = issuedInternalKeys;
parentKey = internalParentKey;
break;
default:
throw new UnsupportedOperationException();
}
// Optimization: potentially do a very quick key generation for just the number of keys we need if we
// didn't already create them, ignoring the configured lookahead size. This ensures we'll be able to
// retrieve the keys in the following loop, but if we're totally fresh and didn't get a chance to
// calculate the lookahead keys yet, this will not block waiting to calculate 100+ EC point multiplies.
// On slow/crappy Android phones looking ahead 100 keys can take ~5 seconds but the OS will kill us
// if we block for just one second on the UI thread. Because UI threads may need an address in order
// to render the screen, we need getKeys to be fast even if the wallet is totally brand new and lookahead
// didn't happen yet.
//
// It's safe to do this because when a network thread tries to calculate a Bloom filter, we'll go ahead
// and calculate the full lookahead zone there, so network requests will always use the right amount.
List<DeterministicKey> lookahead = maybeLookAhead(parentKey, index, 0, 0);
putKeys(lookahead);
List<DeterministicKey> keys = new ArrayList<>(numberOfKeys);
for (int i = 0; i < numberOfKeys; i++) {
HDPath path = parentKey.getPath().extend(new ChildNumber(index - numberOfKeys + i, false));
DeterministicKey k = hierarchy.get(path, false, false);
// Just a last minute sanity check before we hand the key out to the app for usage. This isn't inspired
// by any real problem reports from bitcoinj users, but I've heard of cases via the grapevine of
// places that lost money due to bitflips causing addresses to not match keys. Of course in an
// environment with flaky RAM there's no real way to always win: bitflips could be introduced at any
// other layer. But as we're potentially retrieving from long term storage here, check anyway.
checkForBitFlip(k);
keys.add(k);
}
return keys;
} finally {
lock.unlock();
}
} | @Test
public void getKeys() {
chain.getKey(KeyChain.KeyPurpose.RECEIVE_FUNDS);
chain.getKey(KeyChain.KeyPurpose.CHANGE);
chain.maybeLookAhead();
assertEquals(2, chain.getKeys(false, false).size());
} |
@Override
public void onNewActivity(Activity activity) {
} | @Test
public void onNewActivity_activityLaunchedFromPushNotification_dontClearInitialNotification() throws Exception {
Activity activity = mock(Activity.class);
Intent intent = mock(Intent.class);
when(activity.getIntent()).thenReturn(intent);
when(mAppLaunchHelper.isLaunchIntentsActivity(activity)).thenReturn(true);
when(mAppLaunchHelper.isLaunchIntentOfNotification(eq(intent))).thenReturn(true);
createUUT().onNewActivity(activity);
verify(InitialNotificationHolder.getInstance(), never()).clear();
} |
public static Date removeTimeFromDate( Date input ) {
if ( input == null ) {
return null;
}
// Get an instance of the Calendar.
Calendar calendar = Calendar.getInstance();
// Make sure the calendar will not perform automatic correction.
calendar.setLenient( false );
// Set the time of the calendar to the given date.
calendar.setTime( input );
// Remove the hours, minutes, seconds and milliseconds.
calendar.set( Calendar.HOUR_OF_DAY, 0 );
calendar.set( Calendar.MINUTE, 0 );
calendar.set( Calendar.SECOND, 0 );
calendar.set( Calendar.MILLISECOND, 0 );
// Return the date again.
return calendar.getTime();
} | @Test
public void testRemoveTimeFromDate() {
final Date date = Const.removeTimeFromDate( new Date() );
assertEquals( 0, date.getHours() );
assertEquals( 0, date.getMinutes() );
assertEquals( 0, date.getSeconds() );
} |
@Override
public void deleteNoteIndex(String noteId) {
try {
deleteDoc(noteId, null);
deleteParagraphIndex(noteId, null);
} catch (IOException e) {
LOGGER.error("Unable to delete note {}", noteId, e);
}
} | @Test
void canDeleteNull() {
// give
// looks like a bug in web UI: it tries to delete a note twice (after it has just been deleted)
// when
noteSearchService.deleteNoteIndex(null);
} |
@Override
public TbPair<Boolean, JsonNode> upgrade(int fromVersion, JsonNode oldConfiguration) throws TbNodeException {
return fromVersion == 0 ? upgradeToUseFetchToAndDataToFetch(oldConfiguration) : new TbPair<>(false, oldConfiguration);
} | @Test
public void givenOldConfig_whenUpgrade_thenShouldReturnTrueResultWithNewConfig() throws Exception {
var defaultConfig = new TbGetEntityDataNodeConfiguration().defaultConfiguration();
var node = new TbGetTenantAttributeNode();
String oldConfig = "{\"attrMapping\":{\"alarmThreshold\":\"threshold\"},\"telemetry\":false}";
JsonNode configJson = JacksonUtil.toJsonNode(oldConfig);
TbPair<Boolean, JsonNode> upgrade = node.upgrade(0, configJson);
Assertions.assertTrue(upgrade.getFirst());
Assertions.assertEquals(defaultConfig, JacksonUtil.treeToValue(upgrade.getSecond(), defaultConfig.getClass()));
} |
@Override
public CloudConfiguration getCloudConfiguration() {
AliyunCloudConfiguration configuration = new AliyunCloudConfiguration(aliyunCloudCredential);
configuration.loadCommonFields(new HashMap<>(0));
return configuration;
} | @Test
public void testGetCloudConfiguration() {
AliyunCloudConfiguration cloudConfiguration = (AliyunCloudConfiguration) odpsMetadata.getCloudConfiguration();
Assert.assertEquals(CloudType.ALIYUN, cloudConfiguration.getCloudType());
Assert.assertEquals("ak", cloudConfiguration.getAliyunCloudCredential().getAccessKey());
Assert.assertEquals("sk", cloudConfiguration.getAliyunCloudCredential().getSecretKey());
Assert.assertEquals("http://127.0.0.1", cloudConfiguration.getAliyunCloudCredential().getEndpoint());
} |
private static ClientAuthenticationMethod getClientAuthenticationMethod(
List<com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod> metadataAuthMethods) {
if (metadataAuthMethods == null || metadataAuthMethods
.contains(com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod.CLIENT_SECRET_BASIC)) {
// If null, the default includes client_secret_basic
return ClientAuthenticationMethod.CLIENT_SECRET_BASIC;
}
if (metadataAuthMethods.contains(com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod.CLIENT_SECRET_POST)) {
return ClientAuthenticationMethod.CLIENT_SECRET_POST;
}
if (metadataAuthMethods.contains(com.nimbusds.oauth2.sdk.auth.ClientAuthenticationMethod.NONE)) {
return ClientAuthenticationMethod.NONE;
}
return null;
} | @Test
public void buildWhenClientRegistrationProvidedThenEachPropertyMatches() {
ClientRegistration clientRegistration = TestClientRegistrations.clientRegistration().build();
ClientRegistration updated = ClientRegistration.withClientRegistration(clientRegistration).build();
assertThat(clientRegistration.getRegistrationId()).isEqualTo(updated.getRegistrationId());
assertThat(clientRegistration.getClientId()).isEqualTo(updated.getClientId());
assertThat(clientRegistration.getClientSecret()).isEqualTo(updated.getClientSecret());
assertThat(clientRegistration.getClientAuthenticationMethod())
.isEqualTo(updated.getClientAuthenticationMethod());
assertThat(clientRegistration.getAuthorizationGrantType()).isEqualTo(updated.getAuthorizationGrantType());
assertThat(clientRegistration.getRedirectUri()).isEqualTo(updated.getRedirectUri());
assertThat(clientRegistration.getScopes()).isEqualTo(updated.getScopes());
ClientRegistration.ProviderDetails providerDetails = clientRegistration.getProviderDetails();
ClientRegistration.ProviderDetails updatedProviderDetails = updated.getProviderDetails();
assertThat(providerDetails.getAuthorizationUri()).isEqualTo(updatedProviderDetails.getAuthorizationUri());
assertThat(providerDetails.getTokenUri()).isEqualTo(updatedProviderDetails.getTokenUri());
ClientRegistration.ProviderDetails.UserInfoEndpoint userInfoEndpoint = providerDetails.getUserInfoEndpoint();
ClientRegistration.ProviderDetails.UserInfoEndpoint updatedUserInfoEndpoint = updatedProviderDetails
.getUserInfoEndpoint();
assertThat(userInfoEndpoint.getUri()).isEqualTo(updatedUserInfoEndpoint.getUri());
assertThat(userInfoEndpoint.getAuthenticationMethod())
.isEqualTo(updatedUserInfoEndpoint.getAuthenticationMethod());
assertThat(userInfoEndpoint.getUserNameAttributeName())
.isEqualTo(updatedUserInfoEndpoint.getUserNameAttributeName());
assertThat(providerDetails.getJwkSetUri()).isEqualTo(updatedProviderDetails.getJwkSetUri());
assertThat(providerDetails.getIssuerUri()).isEqualTo(updatedProviderDetails.getIssuerUri());
assertThat(providerDetails.getConfigurationMetadata())
.isEqualTo(updatedProviderDetails.getConfigurationMetadata());
assertThat(clientRegistration.getClientName()).isEqualTo(updated.getClientName());
} |
public static EventLoopGroup eventLoopGroup(int threads, String threadFactoryName) {
ThreadFactory threadFactory = new DefaultThreadFactory(threadFactoryName, true);
return shouldEpoll()
? new EpollEventLoopGroup(threads, threadFactory)
: new NioEventLoopGroup(threads, threadFactory);
} | @Test
void eventLoopGroup() {
if (isEpoll()) {
EventLoopGroup eventLoopGroup = NettyEventLoopFactory.eventLoopGroup(1, "test");
Assertions.assertTrue(eventLoopGroup instanceof EpollEventLoopGroup);
Class<? extends SocketChannel> socketChannelClass = NettyEventLoopFactory.socketChannelClass();
Assertions.assertEquals(socketChannelClass, EpollSocketChannel.class);
Class<? extends ServerSocketChannel> serverSocketChannelClass =
NettyEventLoopFactory.serverSocketChannelClass();
Assertions.assertEquals(serverSocketChannelClass, EpollServerSocketChannel.class);
} else {
EventLoopGroup eventLoopGroup = NettyEventLoopFactory.eventLoopGroup(1, "test");
Assertions.assertTrue(eventLoopGroup instanceof NioEventLoopGroup);
Class<? extends SocketChannel> socketChannelClass = NettyEventLoopFactory.socketChannelClass();
Assertions.assertEquals(socketChannelClass, NioSocketChannel.class);
Class<? extends ServerSocketChannel> serverSocketChannelClass =
NettyEventLoopFactory.serverSocketChannelClass();
Assertions.assertEquals(serverSocketChannelClass, NioServerSocketChannel.class);
}
} |
public static void checkArgument(final boolean condition, final String errorMessage) {
if (!condition) {
throw new IllegalArgumentException(errorMessage);
}
} | @Test
void assertCheckArgumentSuccess() {
assertDoesNotThrow(() -> PluginPreconditions.checkArgument(true, "Port `-3306` of MySQL Service must be a positive number."));
} |
@Override
public List<IncomingMessage> pull(
long requestTimeMsSinceEpoch,
SubscriptionPath subscription,
int batchSize,
boolean returnImmediately)
throws IOException {
PullRequest request =
PullRequest.newBuilder()
.setSubscription(subscription.getPath())
.setReturnImmediately(returnImmediately)
.setMaxMessages(batchSize)
.build();
PullResponse response = subscriberStub().pull(request);
if (response.getReceivedMessagesCount() == 0) {
return ImmutableList.of();
}
List<IncomingMessage> incomingMessages = new ArrayList<>(response.getReceivedMessagesCount());
for (ReceivedMessage message : response.getReceivedMessagesList()) {
PubsubMessage pubsubMessage = message.getMessage();
@Nullable Map<String, String> attributes = pubsubMessage.getAttributes();
// Timestamp.
long timestampMsSinceEpoch;
if (Strings.isNullOrEmpty(timestampAttribute)) {
Timestamp timestampProto = pubsubMessage.getPublishTime();
checkArgument(timestampProto != null, "Pubsub message is missing timestamp proto");
timestampMsSinceEpoch =
timestampProto.getSeconds() * 1000 + timestampProto.getNanos() / 1000L / 1000L;
} else {
timestampMsSinceEpoch = extractTimestampAttribute(timestampAttribute, attributes);
}
// Ack id.
String ackId = message.getAckId();
checkState(!Strings.isNullOrEmpty(ackId));
// Record id, if any.
@Nullable String recordId = null;
if (idAttribute != null && attributes != null) {
recordId = attributes.get(idAttribute);
}
if (Strings.isNullOrEmpty(recordId)) {
// Fall back to the Pubsub provided message id.
recordId = pubsubMessage.getMessageId();
}
incomingMessages.add(
IncomingMessage.of(
pubsubMessage, timestampMsSinceEpoch, requestTimeMsSinceEpoch, ackId, recordId));
}
return incomingMessages;
} | @Test
public void pullOneMessageUsingAttributes() throws IOException {
initializeClient(TIMESTAMP_ATTRIBUTE, ID_ATTRIBUTE);
String expectedSubscription = SUBSCRIPTION.getPath();
final PullRequest expectedRequest =
PullRequest.newBuilder()
.setSubscription(expectedSubscription)
.setReturnImmediately(true)
.setMaxMessages(10)
.build();
Timestamp timestamp =
Timestamp.newBuilder()
.setSeconds(PUB_TIME_MS / 1000)
.setNanos((int) (PUB_TIME_MS % 1000) * 1000 * 1000)
.build();
PubsubMessage expectedPubsubMessage =
PubsubMessage.newBuilder()
.setMessageId(MESSAGE_ID)
.setData(ByteString.copyFrom(DATA.getBytes(StandardCharsets.UTF_8)))
.setPublishTime(timestamp)
.putAllAttributes(ATTRIBUTES)
.putAllAttributes(
ImmutableMap.of(
TIMESTAMP_ATTRIBUTE, String.valueOf(MESSAGE_TIME_MS), ID_ATTRIBUTE, RECORD_ID))
.build();
ReceivedMessage expectedReceivedMessage =
ReceivedMessage.newBuilder().setMessage(expectedPubsubMessage).setAckId(ACK_ID).build();
final PullResponse response =
PullResponse.newBuilder()
.addAllReceivedMessages(ImmutableList.of(expectedReceivedMessage))
.build();
final List<PullRequest> requestsReceived = new ArrayList<>();
SubscriberImplBase subscriberImplBase =
new SubscriberImplBase() {
@Override
public void pull(PullRequest request, StreamObserver<PullResponse> responseObserver) {
requestsReceived.add(request);
responseObserver.onNext(response);
responseObserver.onCompleted();
}
};
Server server =
InProcessServerBuilder.forName(channelName).addService(subscriberImplBase).build().start();
try {
List<IncomingMessage> actualMessages = client.pull(REQ_TIME_MS, SUBSCRIPTION, 10, true);
assertEquals(1, actualMessages.size());
IncomingMessage actualMessage = actualMessages.get(0);
assertEquals(ACK_ID, actualMessage.ackId());
assertEquals(DATA, actualMessage.message().getData().toStringUtf8());
assertEquals(RECORD_ID, actualMessage.recordId());
assertEquals(REQ_TIME_MS, actualMessage.requestTimeMsSinceEpoch());
assertEquals(MESSAGE_TIME_MS, actualMessage.timestampMsSinceEpoch());
assertEquals(expectedRequest, Iterables.getOnlyElement(requestsReceived));
} finally {
server.shutdownNow();
}
} |
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
synchronized (getClassLoadingLock(name)) {
Class<?> loadedClass = findLoadedClass(name);
if (loadedClass != null) {
return loadedClass;
}
if (isClosed) {
throw new ClassNotFoundException("This ClassLoader is closed");
}
if (config.shouldAcquire(name)) {
loadedClass =
PerfStatsCollector.getInstance()
.measure("load sandboxed class", () -> maybeInstrumentClass(name));
} else {
loadedClass = getParent().loadClass(name);
}
if (resolve) {
resolveClass(loadedClass);
}
return loadedClass;
}
} | @Test
public void callingStaticMethodShouldInvokeClassHandler() throws Exception {
Class<?> exampleClass = loadClass(AClassWithStaticMethod.class);
Method normalMethod = exampleClass.getMethod("staticMethod", String.class);
assertEquals(
"response from methodInvoked: AClassWithStaticMethod.staticMethod(java.lang.String value1)",
normalMethod.invoke(null, "value1"));
assertThat(transcript)
.containsExactly(
"methodInvoked: AClassWithStaticMethod.staticMethod(java.lang.String value1)");
} |
@Override
public void delete(Long key) {
long startNanos = Timer.nanos();
try {
delegate.delete(key);
} finally {
deleteProbe.recordValue(Timer.nanosElapsed(startNanos));
}
} | @Test
public void delete() {
Long key = 1L;
queueStore.delete(key);
verify(delegate).delete(key);
assertProbeCalledOnce("delete");
} |
@Override
public void execute(ComputationStep.Context context) {
DuplicationVisitor visitor = new DuplicationVisitor();
new DepthTraversalTypeAwareCrawler(visitor).visit(treeRootHolder.getReportTreeRoot());
context.getStatistics().add("duplications", visitor.count);
} | @Test
public void loads_duplication_with_otherFileRef_as_InExtendedProject_duplication() {
Branch branch = mock(Branch.class);
when(branch.getType()).thenReturn(BranchType.PULL_REQUEST);
analysisMetadataHolder.setBranch(branch);
reportReader.putDuplications(FILE_1_REF, createDuplication(singleLineTextRange(LINE), createInProjectDuplicate(FILE_2_REF, LINE + 1)));
TestComputationStepContext context = new TestComputationStepContext();
underTest.execute(context);
assertDuplications(FILE_1_REF, singleLineDetailedTextBlock(1, LINE),
new InExtendedProjectDuplicate(treeRootHolder.getComponentByRef(FILE_2_REF), singleLineTextBlock(LINE + 1)));
assertNoDuplication(FILE_2_REF);
assertNbOfDuplications(context, 1);
} |
public static String delAll(String regex, CharSequence content) {
if (StrUtil.hasEmpty(regex, content)) {
return StrUtil.str(content);
}
final Pattern pattern = PatternPool.get(regex, Pattern.DOTALL);
return delAll(pattern, content);
} | @Test
public void delAllTest() {
// 删除所有匹配到的内容
final String content = "发东方大厦eee![images]http://abc.com/2.gpg]好机会eee![images]http://abc.com/2.gpg]好机会";
final String resultDelAll = ReUtil.delAll("!\\[images\\][^\\u4e00-\\u9fa5\\\\s]*", content);
assertEquals("发东方大厦eee好机会eee好机会", resultDelAll);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.