focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public Future<RestResponse> restRequest(RestRequest request)
{
return restRequest(request, new RequestContext());
}
|
@Test(groups = {"small"}, dataProvider = "allCombinations3x")
@SuppressWarnings("deprecation")
public void testRequestTimeoutAllowed(boolean isHigherThanDefault, boolean ignoreTimeoutIfHigher, int expectedTimeout) throws Exception
{
LoadBalancerSimulator.ClockedExecutor clockedExecutor = new LoadBalancerSimulator.ClockedExecutor();
LoadBalancerMock balancer = new LoadBalancerMock(false, true, clockedExecutor);
DirectoryProvider dirProvider = new DirectoryProviderMock();
KeyMapperProvider keyMapperProvider = new KeyMapperProviderMock();
ClientFactoryProvider clientFactoryProvider = Mockito.mock(ClientFactoryProvider.class);
Facilities facilities = new DelegatingFacilities(dirProvider, keyMapperProvider, clientFactoryProvider);
D2Client client = new DynamicClient(balancer, facilities, true);
URI uri = URI.create("d2://test");
RestRequest restRequest = new RestRequestBuilder(uri).build();
client = new RequestTimeoutClient(client, balancer, clockedExecutor);
RequestContext requestContext = new RequestContext();
int requestTimeout = isHigherThanDefault ? DEFAULT_REQUEST_TIMEOUT + 100 : DEFAULT_REQUEST_TIMEOUT - 100;
DegraderTrackerClientTest.TestCallback<RestResponse> restCallback = new DegraderTrackerClientTest.TestCallback<>();
requestContext.putLocalAttr(R2Constants.REQUEST_TIMEOUT, requestTimeout);
if (ignoreTimeoutIfHigher)
{
requestContext.putLocalAttr(R2Constants.REQUEST_TIMEOUT_IGNORE_IF_HIGHER_THAN_DEFAULT, ignoreTimeoutIfHigher);
}
client.restRequest(restRequest, requestContext, restCallback);
clockedExecutor.run(expectedTimeout - 10).get();
Assert.assertFalse(checkTimeoutFired(restCallback));
checkRequestTimeoutOrViewSet(requestContext);
clockedExecutor.run(expectedTimeout + 10).get();
Assert.assertTrue(checkTimeoutFired(restCallback));
checkRequestTimeoutOrViewSet(requestContext);
}
|
@Override
public V put(K key, V value, Duration ttl) {
return get(putAsync(key, value, ttl));
}
|
@Test
public void testReplaceOldValueSuccess() {
RMapCacheNative<SimpleKey, SimpleValue> map = redisson.getMapCacheNative("simple");
map.put(new SimpleKey("1"), new SimpleValue("2"));
boolean res = map.replace(new SimpleKey("1"), new SimpleValue("2"), new SimpleValue("3"));
Assertions.assertTrue(res);
boolean res1 = map.replace(new SimpleKey("1"), new SimpleValue("2"), new SimpleValue("3"));
Assertions.assertFalse(res1);
SimpleValue val1 = map.get(new SimpleKey("1"));
Assertions.assertEquals("3", val1.getValue());
map.destroy();
}
|
@Override
public MapperResult getTenantIdList(MapperContext context) {
return new MapperResult(
"SELECT tenant_id FROM config_info WHERE tenant_id != '" + NamespaceUtil.getNamespaceDefaultId()
+ "' GROUP BY tenant_id OFFSET " + context.getStartRow() + " ROWS FETCH NEXT "
+ context.getPageSize() + " ROWS ONLY", Collections.emptyList());
}
|
@Test
void testGetTenantIdList() {
MapperResult mapperResult = configInfoMapperByDerby.getTenantIdList(context);
assertEquals(mapperResult.getSql(),
"SELECT tenant_id FROM config_info WHERE tenant_id != '' GROUP BY tenant_id OFFSET " + startRow + " ROWS FETCH NEXT "
+ pageSize + " ROWS ONLY");
assertArrayEquals(mapperResult.getParamList().toArray(), emptyObjs);
}
|
public String workersStatus() {
StringBuilder sb = new StringBuilder();
for (TaskExecuteWorker worker : executeWorkers) {
sb.append(worker.status()).append('\n');
}
return sb.toString();
}
|
@Test
void testWorkersStatus() {
assertEquals("TEST_0%1, pending tasks: 0\n", executeTaskExecuteEngine.workersStatus());
}
|
@Override
@Deprecated
@SuppressWarnings("unchecked")
public <T extends Number> Counter<T> counter(String name, Class<T> type, Unit unit) {
if (Integer.class.equals(type)) {
return (Counter<T>) new DefaultCounter(unit).asIntCounter();
}
if (Long.class.equals(type)) {
return (Counter<T>) new DefaultCounter(unit).asLongCounter();
}
throw new IllegalArgumentException(
String.format("Counter for type %s is not supported", type.getName()));
}
|
@Test
public void longCounterNullCheck() {
assertThatThrownBy(() -> new DefaultMetricsContext().counter("name", Long.class, null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid count unit: null");
}
|
public static Map<TopicPartition, ListOffsetsResultInfo> fetchEndOffsets(final Collection<TopicPartition> partitions,
final Admin adminClient) {
if (partitions.isEmpty()) {
return Collections.emptyMap();
}
return getEndOffsets(fetchEndOffsetsFuture(partitions, adminClient));
}
|
@Test
public void fetchEndOffsetsShouldReturnEmptyMapIfPartitionsAreEmpty() {
final Admin adminClient = mock(AdminClient.class);
assertTrue(fetchEndOffsets(emptySet(), adminClient).isEmpty());
}
|
@Override
public CheckpointStateToolset createTaskOwnedCheckpointStateToolset() {
if (fileSystem instanceof PathsCopyingFileSystem) {
return new FsCheckpointStateToolset(
taskOwnedStateDirectory, (PathsCopyingFileSystem) fileSystem);
} else {
return new NotDuplicatingCheckpointStateToolset();
}
}
|
@Test
void testNotDuplicationCheckpointStateToolset() throws Exception {
CheckpointStorageAccess checkpointStorage = createCheckpointStorage(randomTempPath(), true);
assertThat(checkpointStorage.createTaskOwnedCheckpointStateToolset())
.isInstanceOf(NotDuplicatingCheckpointStateToolset.class);
}
|
@Override
public ConfigOperateResult insertOrUpdateBetaCas(final ConfigInfo configInfo, final String betaIps,
final String srcIp, final String srcUser) {
if (findConfigInfo4BetaState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant()) == null) {
return addConfigInfo4Beta(configInfo, betaIps, srcIp, srcUser);
} else {
return updateConfigInfo4BetaCas(configInfo, betaIps, srcIp, srcUser);
}
}
|
@Test
void testInsertOrUpdateBetaCasOfUpdate() {
String dataId = "betaDataId113";
String group = "group";
String tenant = "tenant";
//mock exist beta
ConfigInfoStateWrapper mockedConfigInfoStateWrapper = new ConfigInfoStateWrapper();
mockedConfigInfoStateWrapper.setDataId(dataId);
mockedConfigInfoStateWrapper.setGroup(group);
mockedConfigInfoStateWrapper.setTenant(tenant);
mockedConfigInfoStateWrapper.setId(123456L);
mockedConfigInfoStateWrapper.setLastModified(System.currentTimeMillis());
Mockito.when(
databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER)))
.thenReturn(mockedConfigInfoStateWrapper, mockedConfigInfoStateWrapper);
//execute
String appName = "appname";
String content = "content111";
ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content);
configInfo.setEncryptedDataKey("key34567");
configInfo.setMd5("casMd5");
//mock cas update
Mockito.when(databaseOperate.blockUpdate()).thenReturn(true);
String betaIps = "betaips...";
String srcIp = "srcUp...";
String srcUser = "srcUser...";
ConfigOperateResult configOperateResult = embeddedConfigInfoBetaPersistService.insertOrUpdateBetaCas(configInfo, betaIps, srcIp,
srcUser);
//expect return obj
assertEquals(mockedConfigInfoStateWrapper.getId(), configOperateResult.getId());
assertEquals(mockedConfigInfoStateWrapper.getLastModified(), configOperateResult.getLastModified());
//verify cas update to be invoked
embeddedStorageContextHolderMockedStatic.verify(
() -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(configInfo.getContent()),
eq(MD5Utils.md5Hex(content, Constants.PERSIST_ENCODE)), eq(betaIps), eq(srcIp), eq(srcUser),
eq(appName), eq(dataId), eq(group), eq(tenant), eq(configInfo.getMd5())), times(1));
}
|
public static boolean isMultipart(DiscFilterRequest request) {
if (request == null) {
return false;
}
String contentType = request.getContentType();
if (contentType == null) {
return false;
}
String[] parts = Pattern.compile(";").split(contentType);
if (parts.length == 0) {
return false;
}
for (String part : parts) {
if ("multipart/form-data".equals(part)) {
return true;
}
}
return false;
}
|
@Test
void testIsMultipart() {
URI uri = URI.create("http://localhost:8080/test");
HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1);
httpReq.headers().add(HttpHeaders.Names.CONTENT_TYPE, "multipart/form-data");
DiscFilterRequest request = new DiscFilterRequest(httpReq);
assertTrue(DiscFilterRequest.isMultipart(request));
httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1);
httpReq.headers().add(HttpHeaders.Names.CONTENT_TYPE, "text/html;charset=UTF-8");
request = new DiscFilterRequest(httpReq);
assertFalse(DiscFilterRequest.isMultipart(request));
assertFalse(DiscFilterRequest.isMultipart(null));
httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1);
request = new DiscFilterRequest(httpReq);
assertFalse(DiscFilterRequest.isMultipart(request));
}
|
@Override
public String[] getHelp() {
if (expression != null) {
return expression.getHelp();
}
return null;
}
|
@Test
public void getHelp() {
String[] help = new String[] { "Help 1", "Help 2", "Help 3" };
when(expr.getHelp()).thenReturn(help);
assertArrayEquals(help, test.getHelp());
verify(expr).getHelp();
verifyNoMoreInteractions(expr);
}
|
public static Ip4Prefix valueOf(int address, int prefixLength) {
return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfEmptyString() {
Ip4Prefix ipPrefix;
String fromString;
fromString = "";
ipPrefix = Ip4Prefix.valueOf(fromString);
}
|
public static String fix(final String raw) {
if ( raw == null || "".equals( raw.trim() )) {
return raw;
}
MacroProcessor macroProcessor = new MacroProcessor();
macroProcessor.setMacros( macros );
return macroProcessor.parse( raw );
}
|
@Test
public void testLeaveLargeAlone() {
final String original = "yeah yeah yeah minsert( xxx ) this is a long() thing Person (name=='drools') modify a thing";
final String result = KnowledgeHelperFixerTest.fixer.fix( original );
assertEqualsIgnoreWhitespace( original,
result );
}
|
public int compareNodePositions() {
if(beginPath.length == 0 && endPath.length == 0)
return 0;
if(beginPath.length == 0)
return -1;
if(endPath.length == 0)
return 1;
return Integer.compare(beginPath[0], endPath[0]);
}
|
@Test
public void compareParentToDescendantNode(){
final NodeModel parent = root();
final NodeModel node1 = new NodeModel("node1", map);
parent.insert(node1);
final int compared = new NodeRelativePath(parent, node1).compareNodePositions();
assertTrue(compared < 0);
}
|
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) {
// Set of Visited Schemas
IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>();
// Stack that contains the Schemas to process and afterVisitNonTerminal
// functions.
// Deque<Either<Schema, Supplier<SchemaVisitorAction>>>
// Using Either<...> has a cost we want to avoid...
Deque<Object> dq = new ArrayDeque<>();
dq.push(start);
Object current;
while ((current = dq.poll()) != null) {
if (current instanceof Supplier) {
// We are executing a non-terminal post visit.
SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get();
switch (action) {
case CONTINUE:
break;
case SKIP_SIBLINGS:
while (dq.peek() instanceof Schema) {
dq.remove();
}
break;
case TERMINATE:
return visitor.get();
case SKIP_SUBTREE:
default:
throw new UnsupportedOperationException("Invalid action " + action);
}
} else {
Schema schema = (Schema) current;
boolean terminate;
if (visited.containsKey(schema)) {
terminate = visitTerminal(visitor, schema, dq);
} else {
Schema.Type type = schema.getType();
switch (type) {
case ARRAY:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType()));
visited.put(schema, schema);
break;
case RECORD:
terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema)
.collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator());
visited.put(schema, schema);
break;
case UNION:
terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes());
visited.put(schema, schema);
break;
case MAP:
terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType()));
visited.put(schema, schema);
break;
default:
terminate = visitTerminal(visitor, schema, dq);
break;
}
}
if (terminate) {
return visitor.get();
}
}
}
return visitor.get();
}
|
@Test
public void testVisit12() {
String s12 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": ["
+ "{\"name\": \"f1\", \"type\": {\"type\": \"record\", \"name\": \"ct2\", \"fields\": "
+ "[{\"name\": \"f11\", \"type\": \"int\"}]}}," + "{\"name\": \"f2\", \"type\": \"long\"}" + "]}";
Assert.assertEquals("c1.ct2.\"int\".", Schemas.visit(new Schema.Parser().parse(s12), new TestVisitor() {
@Override
public SchemaVisitorAction visitTerminal(Schema terminal) {
sb.append(terminal).append('.');
return SchemaVisitorAction.TERMINATE;
}
}));
}
|
public SpanInScope withSpanInScope(@Nullable Span span) {
return new SpanInScope(currentTraceContext.newScope(span != null ? span.context() : null));
}
|
@Test void withSpanInScope() {
Span current = tracer.newTrace();
try (SpanInScope scope = tracer.withSpanInScope(current)) {
assertThat(tracer.currentSpan())
.isEqualTo(current);
assertThat(tracer.currentSpanCustomizer())
.isNotEqualTo(current)
.isNotEqualTo(NoopSpanCustomizer.INSTANCE);
}
// context was cleared
assertThat(tracer.currentSpan()).isNull();
}
|
@Override
public IndexedFieldProvider<Class<?>> getIndexedFieldProvider() {
return entityType -> {
IndexDescriptor indexDescriptor = getIndexDescriptor(entityType);
if (indexDescriptor == null) {
return CLASS_NO_INDEXING;
}
return new SearchFieldIndexingMetadata(indexDescriptor);
};
}
|
@Test
public void testRecognizeAnalyzedField() {
assertThat(propertyHelper.getIndexedFieldProvider().get(TestEntity.class).isAnalyzed(new String[]{"description"})).isTrue();
}
|
@Override
protected InputStream openObjectInputStream(
long position, int bytesToRead) {
GSObject object;
try {
object = mClient.getObject(mBucketName, mPath, null,
null, null, null, position, position + bytesToRead - 1);
} catch (ServiceException e) {
String errorMessage = String
.format("Failed to get object: %s bucket: %s", mPath, mBucketName);
throw new RuntimeException(errorMessage, e);
}
try {
return object.getDataInputStream();
} catch (ServiceException e) {
String errorMessage = String
.format("Failed to open GCS InputStream");
throw new RuntimeException(errorMessage, e);
}
}
|
@Test
public void openObjectInputStream() throws Exception {
GSObject object = Mockito.mock(GSObject.class);
BufferedInputStream objectInputStream = Mockito.mock(BufferedInputStream.class);
Mockito.when(mClient.getObject(Mockito.anyString(), Mockito.anyString(),
Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(),
Mockito.any(), Mockito.any())).thenReturn(object);
Mockito.when(object.getDataInputStream()).thenReturn(objectInputStream);
// test successful open object input stream
long position = 0L;
int bytesToRead = 10;
InputStream inputStream = mPositionReader.openObjectInputStream(position, bytesToRead);
Assert.assertTrue(inputStream instanceof BufferedInputStream);
// test open object input stream with exception
Mockito.when(mClient.getObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString()))
.thenThrow(ServiceException.class);
try {
mPositionReader.openObjectInputStream(position, bytesToRead);
} catch (Exception e) {
Assert.assertTrue(e instanceof IOException);
String errorMessage = String
.format("Failed to get object: %s bucket: %s", mPath, mBucketName);
Assert.assertEquals(errorMessage, e.getMessage());
}
}
|
public static FuryBuilder builder() {
return new FuryBuilder();
}
|
@Test
public void testSerializePackageLevelBean() {
Fury fury =
Fury.builder()
.withLanguage(Language.JAVA)
.withCodegen(false)
.requireClassRegistration(false)
.build();
PackageLevelBean o = new PackageLevelBean();
o.f1 = 10;
o.f2 = 1;
serDeCheckSerializer(fury, o, "Object");
}
|
@Override
@SuccessResponse(statuses = { HttpStatus.S_204_NO_CONTENT })
@ServiceErrors(INVALID_PERMISSIONS)
@ParamError(code = INVALID_ID, parameterNames = { "albumEntryId" })
public UpdateResponse update(CompoundKey key, AlbumEntry entity)
{
long photoId = (Long) key.getPart("photoId");
long albumId = (Long) key.getPart("albumId");
// make sure photo and album exist
if (!_photoDb.getData().containsKey(photoId))
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST,
"Nonexistent photo ID: " + photoId);
if (!_albumDb.getData().containsKey(albumId))
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST,
"Nonexistent album ID: " + albumId);
// disallow changing entity ID
if (entity.hasAlbumId() || entity.hasPhotoId())
throw new RestLiServiceException(HttpStatus.S_400_BAD_REQUEST,
"Photo/album ID are not acceptable in request");
// make sure the ID in the entity is consistent with the key in the database
entity.setPhotoId(photoId);
entity.setAlbumId(albumId);
_db.getData().put(key, entity);
return new UpdateResponse(HttpStatus.S_204_NO_CONTENT);
}
|
@Test(expectedExceptions = RestLiServiceException.class)
public void testBadUpdatePhotoId()
{
// photo 100 doesn't exist
CompoundKey key = new CompoundKey().append("photoId", 100L).append("albumId", 1L);
AlbumEntry entry = new AlbumEntry().setAddTime(4);
_entryRes.update(key, entry);
}
|
public static List<String> extractLinks(String content) {
if (content == null || content.length() == 0) {
return Collections.emptyList();
}
List<String> extractions = new ArrayList<>();
final Matcher matcher = LINKS_PATTERN.matcher(content);
while (matcher.find()) {
extractions.add(matcher.group());
}
return extractions;
}
|
@Test
public void testExtractLinksFtp() {
List<String> links = RegexUtils.extractLinks("Test with ftp://www.nutch.org is it found? " +
"What about www.google.com at ftp://www.google.de");
assertTrue(links.size() == 2, "Url not found!");
assertEquals("ftp://www.nutch.org", links.get(0), "Wrong URL");
assertEquals("ftp://www.google.de", links.get(1), "Wrong URL");
}
|
public void removeState(HttpRequest request, HttpResponse response) {
response.addCookie(newCookieBuilder(request).setName(CSRF_STATE_COOKIE).setValue(null).setHttpOnly(false).setExpiry(0).build());
}
|
@Test
public void remove_state() {
underTest.removeState(request, response);
verify(response).addCookie(cookieArgumentCaptor.capture());
Cookie cookie = cookieArgumentCaptor.getValue();
assertThat(cookie.getValue()).isNull();
assertThat(cookie.getMaxAge()).isZero();
}
|
List<String> decorateTextWithHtml(String text, DecorationDataHolder decorationDataHolder) {
return decorateTextWithHtml(text, decorationDataHolder, null, null);
}
|
@Test
public void should_support_crlf_line_breaks() {
String crlfCodeSample =
"/**" + CR_END_OF_LINE + LF_END_OF_LINE +
"* @return metric generated by the decorator" + CR_END_OF_LINE + LF_END_OF_LINE +
"*/" + CR_END_OF_LINE + LF_END_OF_LINE +
"@DependedUpon" + CR_END_OF_LINE + LF_END_OF_LINE +
"public Metric generatesMetric() {" + CR_END_OF_LINE + LF_END_OF_LINE +
" return metric;" + CR_END_OF_LINE + LF_END_OF_LINE +
"}" + CR_END_OF_LINE + LF_END_OF_LINE;
DecorationDataHolder decorationData = new DecorationDataHolder();
decorationData.loadSyntaxHighlightingData("0,52,cppd;54,67,a;69,75,k;106,112,k;");
HtmlTextDecorator htmlTextDecorator = new HtmlTextDecorator();
List<String> htmlOutput = htmlTextDecorator.decorateTextWithHtml(crlfCodeSample, decorationData);
assertThat(htmlOutput).containsExactly(
"<span class=\"cppd\">/**</span>",
"<span class=\"cppd\">* @return metric generated by the decorator</span>",
"<span class=\"cppd\">*/</span>",
"<span class=\"a\">@DependedUpon</span>",
"<span class=\"k\">public</span> Metric generatesMetric() {",
" <span class=\"k\">return</span> metric;",
"}",
""
);
}
|
static ApiError validateQuotaKeyValue(
Map<String, ConfigDef.ConfigKey> validKeys,
String key,
double value
) {
// Ensure we have an allowed quota key
ConfigDef.ConfigKey configKey = validKeys.get(key);
if (configKey == null) {
return new ApiError(Errors.INVALID_REQUEST, "Invalid configuration key " + key);
}
if (value <= 0.0) {
return new ApiError(Errors.INVALID_REQUEST, "Quota " + key + " must be greater than 0");
}
// Ensure the quota value is valid
switch (configKey.type()) {
case DOUBLE:
return ApiError.NONE;
case SHORT:
if (value > Short.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a SHORT.");
}
return getErrorForIntegralQuotaValue(value, key);
case INT:
if (value > Integer.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for an INT.");
}
return getErrorForIntegralQuotaValue(value, key);
case LONG: {
if (value > Long.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a LONG.");
}
return getErrorForIntegralQuotaValue(value, key);
}
default:
return new ApiError(Errors.UNKNOWN_SERVER_ERROR,
"Unexpected config type " + configKey.type() + " should be Long or Double");
}
}
|
@Test
public void testValidateQuotaKeyValueForValidRequestPercentage() {
assertEquals(ApiError.NONE, ClientQuotaControlManager.validateQuotaKeyValue(
VALID_CLIENT_ID_QUOTA_KEYS, "request_percentage", 56.62367));
}
|
public static MemorySegment wrapCopy(byte[] bytes, int start, int end)
throws IllegalArgumentException {
checkArgument(end >= start);
checkArgument(end <= bytes.length);
MemorySegment copy = allocateUnpooledSegment(end - start);
copy.put(0, bytes, start, copy.size());
return copy;
}
|
@Test
void testWrapPartialCopy() {
byte[] data = {1, 2, 3, 5, 6};
MemorySegment segment = MemorySegmentFactory.wrapCopy(data, 0, data.length / 2);
byte[] exp = new byte[segment.size()];
arraycopy(data, 0, exp, 0, exp.length);
assertThat(segment.getHeapMemory()).containsExactly(exp);
}
|
@Override
public HttpServletRequest readRequest(AwsProxyRequest request, SecurityContext securityContext, Context lambdaContext, ContainerConfig config)
throws InvalidRequestEventException {
// Expect the HTTP method and context to be populated. If they are not, we are handling an
// unsupported event type.
if (request.getHttpMethod() == null || request.getHttpMethod().equals("") || request.getRequestContext() == null) {
throw new InvalidRequestEventException(INVALID_REQUEST_ERROR);
}
request.setPath(stripBasePath(request.getPath(), config));
if (request.getMultiValueHeaders() != null && request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE) != null) {
String contentType = request.getMultiValueHeaders().getFirst(HttpHeaders.CONTENT_TYPE);
// put single as we always expect to have one and only one content type in a request.
request.getMultiValueHeaders().putSingle(HttpHeaders.CONTENT_TYPE, getContentTypeWithCharset(contentType, config));
}
AwsProxyHttpServletRequest servletRequest = new AwsProxyHttpServletRequest(request, lambdaContext, securityContext, config);
servletRequest.setServletContext(servletContext);
servletRequest.setAttribute(API_GATEWAY_CONTEXT_PROPERTY, request.getRequestContext());
servletRequest.setAttribute(API_GATEWAY_STAGE_VARS_PROPERTY, request.getStageVariables());
servletRequest.setAttribute(API_GATEWAY_EVENT_PROPERTY, request);
servletRequest.setAttribute(ALB_CONTEXT_PROPERTY, request.getRequestContext().getElb());
servletRequest.setAttribute(LAMBDA_CONTEXT_PROPERTY, lambdaContext);
servletRequest.setAttribute(JAX_SECURITY_CONTEXT_PROPERTY, securityContext);
return servletRequest;
}
|
@Test
void readRequest_validAwsProxy_populatedRequest() {
AwsProxyRequest request = new AwsProxyRequestBuilder("/path", "GET").header(TEST_HEADER_KEY, TEST_HEADER_VALUE).build();
try {
HttpServletRequest servletRequest = reader.readRequest(request, null, null, ContainerConfig.defaultConfig());
assertNotNull(servletRequest.getHeader(TEST_HEADER_KEY));
assertEquals(TEST_HEADER_VALUE, servletRequest.getHeader(TEST_HEADER_KEY));
} catch (InvalidRequestEventException e) {
e.printStackTrace();
fail("Could not read request");
}
}
|
public void persistDatabaseNameListenerAssisted(final ListenerAssisted listenerAssisted) {
repository.persistEphemeral(ListenerAssistedNodePath.getDatabaseNameNodePath(listenerAssisted.getDatabaseName()), YamlEngine.marshal(listenerAssisted));
}
|
@Test
void assertPersistDatabaseNameListenerAssisted() {
new ListenerAssistedPersistService(repository).persistDatabaseNameListenerAssisted(new ListenerAssisted("foo_db", ListenerAssistedType.CREATE_DATABASE));
verify(repository).persistEphemeral("/listener_assisted/foo_db", "databaseName: foo_db" + System.lineSeparator() + "listenerAssistedType: CREATE_DATABASE" + System.lineSeparator());
}
|
@Override
public List<FileEntriesLayer> createLayers() throws IOException {
// Clear the exploded-artifact root first
if (Files.exists(targetExplodedJarRoot)) {
MoreFiles.deleteRecursively(targetExplodedJarRoot, RecursiveDeleteOption.ALLOW_INSECURE);
}
// Add dependencies layers.
List<FileEntriesLayer> layers =
JarLayers.getDependenciesLayers(jarPath, ProcessingMode.exploded);
// Determine class and resource files in the directory containing jar contents and create
// FileEntriesLayer for each type of layer (classes or resources).
ZipUtil.unzip(jarPath, targetExplodedJarRoot, true);
Predicate<Path> isClassFile = path -> path.getFileName().toString().endsWith(".class");
Predicate<Path> isResourceFile = isClassFile.negate().and(Files::isRegularFile);
FileEntriesLayer classesLayer =
ArtifactLayers.getDirectoryContentsAsLayer(
ArtifactLayers.CLASSES,
targetExplodedJarRoot,
isClassFile,
JarLayers.APP_ROOT.resolve("explodedJar"));
FileEntriesLayer resourcesLayer =
ArtifactLayers.getDirectoryContentsAsLayer(
ArtifactLayers.RESOURCES,
targetExplodedJarRoot,
isResourceFile,
JarLayers.APP_ROOT.resolve("explodedJar"));
if (!resourcesLayer.getEntries().isEmpty()) {
layers.add(resourcesLayer);
}
if (!classesLayer.getEntries().isEmpty()) {
layers.add(classesLayer);
}
return layers;
}
|
@Test
public void testCreateLayers_dependencyDoesNotExist() throws URISyntaxException {
Path standardJar = Paths.get(Resources.getResource(STANDARD_SINGLE_DEPENDENCY_JAR).toURI());
Path destDir = temporaryFolder.getRoot().toPath();
StandardExplodedProcessor standardExplodedModeProcessor =
new StandardExplodedProcessor(standardJar, destDir, JAR_JAVA_VERSION);
IllegalArgumentException exception =
assertThrows(
IllegalArgumentException.class, () -> standardExplodedModeProcessor.createLayers());
assertThat(exception)
.hasMessageThat()
.isEqualTo(
"Dependency required by the JAR (as specified in `Class-Path` in the JAR manifest) doesn't exist: "
+ standardJar.getParent().resolve("dependency.jar"));
}
|
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
if (bytes == null) {
return null;
}
// don't use the JsonSchemaConverter to read this data because
// we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS,
// which is not currently available in the standard converters
final JsonNode value = isJsonSchema
? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class)
: MAPPER.readTree(bytes);
final Object coerced = enforceFieldType(
"$",
new JsonValueContext(value, schema)
);
if (LOG.isTraceEnabled()) {
LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced);
}
return SerdeUtils.castToTargetType(coerced, targetType);
} catch (final Exception e) {
// Clear location in order to avoid logging data, for security reasons
if (e instanceof JsonParseException) {
((JsonParseException) e).clearLocation();
}
throw new SerializationException(
"Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e);
}
}
|
@Test
public void shouldIncludePathForErrorsInObjectFieldsValue() {
// Given:
final Map<String, Object> value = new HashMap<>(AN_ORDER);
value.put("ordertime", true);
final byte[] bytes = serializeJson(value);
// When:
final Exception e = assertThrows(
SerializationException.class,
() -> deserializer.deserialize(SOME_TOPIC, bytes)
);
// Then:
assertThat(e.getCause(), (hasMessage(endsWith(", path: $.ORDERTIME"))));
}
|
public static <T> int indexOfSub(T[] array, T[] subArray) {
return indexOfSub(array, 0, subArray);
}
|
@Test
public void indexOfSubTest2() {
Integer[] a = {0x12, 0x56, 0x34, 0x56, 0x78, 0x9A};
Integer[] b = {0x56, 0x78};
int i = ArrayUtil.indexOfSub(a, b);
assertEquals(3, i);
}
|
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) {
SinkConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Sink Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName()
.equals(existingConfig.getSourceSubscriptionName())) {
throw new IllegalArgumentException("Subscription Name cannot be altered");
}
if (newConfig.getInputSpecs() == null) {
newConfig.setInputSpecs(new HashMap<>());
}
if (mergedConfig.getInputSpecs() == null) {
mergedConfig.setInputSpecs(new HashMap<>());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getInputs() != null) {
newConfig.getInputs().forEach((topicName -> {
newConfig.getInputSpecs().putIfAbsent(topicName,
ConsumerConfig.builder().isRegexPattern(false).build());
}));
}
if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) {
newConfig.getInputSpecs().put(newConfig.getTopicsPattern(),
ConsumerConfig.builder()
.isRegexPattern(true)
.build());
}
if (newConfig.getTopicToSerdeClassName() != null) {
newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.serdeClassName(serdeClassName)
.isRegexPattern(false)
.build());
});
}
if (newConfig.getTopicToSchemaType() != null) {
newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> {
newConfig.getInputSpecs().put(topicName,
ConsumerConfig.builder()
.schemaType(schemaClassname)
.isRegexPattern(false)
.build());
});
}
if (!newConfig.getInputSpecs().isEmpty()) {
SinkConfig finalMergedConfig = mergedConfig;
newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> {
if (!existingConfig.getInputSpecs().containsKey(topicName)) {
throw new IllegalArgumentException("Input Topics cannot be altered");
}
if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) {
throw new IllegalArgumentException(
"isRegexPattern for input topic " + topicName + " cannot be altered");
}
finalMergedConfig.getInputSpecs().put(topicName, consumerConfig);
});
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering()
.equals(existingConfig.getRetainOrdering())) {
throw new IllegalArgumentException("Retain Ordering cannot be altered");
}
if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering()
.equals(existingConfig.getRetainKeyOrdering())) {
throw new IllegalArgumentException("Retain Key Ordering cannot be altered");
}
if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) {
throw new IllegalArgumentException("AutoAck cannot be altered");
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (newConfig.getTimeoutMs() != null) {
mergedConfig.setTimeoutMs(newConfig.getTimeoutMs());
}
if (newConfig.getCleanupSubscription() != null) {
mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription());
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (newConfig.getTransformFunction() != null) {
mergedConfig.setTransformFunction(newConfig.getTransformFunction());
}
if (newConfig.getTransformFunctionClassName() != null) {
mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName());
}
if (newConfig.getTransformFunctionConfig() != null) {
mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig());
}
return mergedConfig;
}
|
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Namespaces differ")
public void testMergeDifferentNamespace() {
SinkConfig sinkConfig = createSinkConfig();
SinkConfig newSinkConfig = createUpdatedSinkConfig("namespace", "Different");
SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig);
}
|
public void addAt(long offset, VoterSet voters) {
Optional<LogHistory.Entry<VoterSet>> lastEntry = votersHistory.lastEntry();
if (lastEntry.isPresent() && lastEntry.get().offset() >= 0) {
// If the last voter set comes from the replicated log then the majorities must overlap.
// This ignores the static voter set and the bootstrapped voter set since they come from
// the configuration and the KRaft leader never guaranteed that they are the same across
// all replicas.
VoterSet lastVoterSet = lastEntry.get().value();
if (!lastVoterSet.hasOverlappingMajority(voters)) {
throw new IllegalArgumentException(
String.format(
"Last voter set %s doesn't have an overlapping majority with the new voter set %s",
lastVoterSet,
voters
)
);
}
}
votersHistory.addAt(offset, voters);
}
|
@Test
void testAddAt() {
Map<Integer, VoterSet.VoterNode> voterMap = VoterSetTest.voterMap(IntStream.of(1, 2, 3), true);
VoterSet staticVoterSet = VoterSet.fromMap(new HashMap<>(voterMap));
VoterSetHistory votersHistory = new VoterSetHistory(staticVoterSet);
assertThrows(
IllegalArgumentException.class,
() -> votersHistory.addAt(-2, VoterSet.fromMap(VoterSetTest.voterMap(IntStream.of(1, 2, 3), true)))
);
assertEquals(staticVoterSet, votersHistory.lastValue());
voterMap.put(4, VoterSetTest.voterNode(4, true));
VoterSet addedVoterSet = VoterSet.fromMap(new HashMap<>(voterMap));
votersHistory.addAt(100, addedVoterSet);
assertEquals(addedVoterSet, votersHistory.lastValue());
assertEquals(Optional.empty(), votersHistory.valueAtOrBefore(99));
assertEquals(Optional.of(addedVoterSet), votersHistory.valueAtOrBefore(100));
voterMap.remove(4);
VoterSet removedVoterSet = VoterSet.fromMap(new HashMap<>(voterMap));
votersHistory.addAt(200, removedVoterSet);
assertEquals(removedVoterSet, votersHistory.lastValue());
assertEquals(Optional.empty(), votersHistory.valueAtOrBefore(99));
assertEquals(Optional.of(addedVoterSet), votersHistory.valueAtOrBefore(199));
assertEquals(Optional.of(removedVoterSet), votersHistory.valueAtOrBefore(200));
}
|
public static <E> List<E> ensureImmutable(List<E> list) {
if (list.isEmpty()) return Collections.emptyList();
// Faster to make a copy than check the type to see if it is already a singleton list
if (list.size() == 1) return Collections.singletonList(list.get(0));
if (isImmutable(list)) return list;
return Collections.unmodifiableList(new ArrayList<E>(list));
}
|
@Test void ensureImmutable_convertsToUnmodifiableList() {
List<Long> list = new ArrayList<>();
list.add(1L);
list.add(2L);
assertThat(Lists.ensureImmutable(list).getClass().getSimpleName())
.startsWith("Unmodifiable");
}
|
@NonNull
public static Permutor<FeedItem> getPermutor(@NonNull SortOrder sortOrder) {
Comparator<FeedItem> comparator = null;
Permutor<FeedItem> permutor = null;
switch (sortOrder) {
case EPISODE_TITLE_A_Z:
comparator = (f1, f2) -> itemTitle(f1).compareTo(itemTitle(f2));
break;
case EPISODE_TITLE_Z_A:
comparator = (f1, f2) -> itemTitle(f2).compareTo(itemTitle(f1));
break;
case DATE_OLD_NEW:
comparator = (f1, f2) -> pubDate(f1).compareTo(pubDate(f2));
break;
case DATE_NEW_OLD:
comparator = (f1, f2) -> pubDate(f2).compareTo(pubDate(f1));
break;
case DURATION_SHORT_LONG:
comparator = (f1, f2) -> Integer.compare(duration(f1), duration(f2));
break;
case DURATION_LONG_SHORT:
comparator = (f1, f2) -> Integer.compare(duration(f2), duration(f1));
break;
case EPISODE_FILENAME_A_Z:
comparator = (f1, f2) -> itemLink(f1).compareTo(itemLink(f2));
break;
case EPISODE_FILENAME_Z_A:
comparator = (f1, f2) -> itemLink(f2).compareTo(itemLink(f1));
break;
case FEED_TITLE_A_Z:
comparator = (f1, f2) -> feedTitle(f1).compareTo(feedTitle(f2));
break;
case FEED_TITLE_Z_A:
comparator = (f1, f2) -> feedTitle(f2).compareTo(feedTitle(f1));
break;
case RANDOM:
permutor = Collections::shuffle;
break;
case SMART_SHUFFLE_OLD_NEW:
permutor = (queue) -> smartShuffle(queue, true);
break;
case SMART_SHUFFLE_NEW_OLD:
permutor = (queue) -> smartShuffle(queue, false);
break;
case SIZE_SMALL_LARGE:
comparator = (f1, f2) -> Long.compare(size(f1), size(f2));
break;
case SIZE_LARGE_SMALL:
comparator = (f1, f2) -> Long.compare(size(f2), size(f1));
break;
case COMPLETION_DATE_NEW_OLD:
comparator = (f1, f2) -> f2.getMedia().getPlaybackCompletionDate()
.compareTo(f1.getMedia().getPlaybackCompletionDate());
break;
default:
throw new IllegalArgumentException("Permutor not implemented");
}
if (comparator != null) {
final Comparator<FeedItem> comparator2 = comparator;
permutor = (queue) -> Collections.sort(queue, comparator2);
}
return permutor;
}
|
@Test
public void testPermutorForRule_size_asc() {
Permutor<FeedItem> permutor = FeedItemPermutors.getPermutor(SortOrder.SIZE_SMALL_LARGE);
List<FeedItem> itemList = getTestList();
assertTrue(checkIdOrder(itemList, 1, 3, 2)); // before sorting
permutor.reorder(itemList);
assertTrue(checkIdOrder(itemList, 1, 2, 3)); // after sorting
}
|
@Override
public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) {
super.onDataReceived(device, data);
final Integer offset = readTimeZone(data, 0);
if (offset == null) {
onInvalidDataReceived(device, data);
return;
}
if (offset == -128) {
onUnknownTimeZoneReceived(device);
} else if (offset < -48 || offset > 56) {
onInvalidDataReceived(device, data);
} else {
onTimeZoneReceived(device, offset * 15);
}
}
|
@Test
public void onTimeZoneReceived_invalid() {
final Data data = new Data(new byte[] { 60 });
callback.onDataReceived(null, data);
assertTrue(invalidData);
}
|
public static boolean verify(@Nonnull UserModel currentUser, @Nonnull CertificateMetadata edsMetadata) {
logger.info("Trying to match via user attributes and bin & taxCode...");
Map<String, List<String>> attrs = currentUser.getAttributes();
String bin = edsMetadata.getBin(), taxCode = edsMetadata.getTaxCode();
if (null == bin && null == taxCode) {
logger.warn("Input bin {} or taxCode {} is null", bin, taxCode);
return false;
}
if (attrs != null && !attrs.isEmpty()) {
List<String> taxCodeValues = attrs.get(KeycloakAttributes.TAX_CODE);
List<String> binValues = attrs.get(KeycloakAttributes.BIN);
logger.info("Trying to match taxCode {} in values {}," +
" bin {} in values {}", taxCode, taxCodeValues, bin, binValues);
boolean isValidTaxCode = null == taxCodeValues ? false : taxCodeValues.contains(taxCode);
boolean isValidBin = null == binValues ? false : binValues.contains(bin);
if (isValidBin && isValidTaxCode) {
return true;
}
logger.info("Not matched by this verifier.");
}
return false;
}
|
@Test
public void testVerifyValidData() {
CertificateMetadata metadata = new CertificateMetadata();
metadata.withBin("BIN_VALUE");
metadata.withTaxCode("TAX_CODE_VALUE");
UserModel user = Mockito.mock(UserModel.class);
HashMap<String, List<String>> attributes = new HashMap<>();
List<String> taxCodeList = new ArrayList<>();
taxCodeList.add("TAX_CODE_VALUE");
attributes.put(KeycloakAttributes.TAX_CODE, taxCodeList);
List<String> binList = new ArrayList<>();
binList.add("BIN_VALUE");
attributes.put(KeycloakAttributes.BIN, binList);
Mockito.when(user.getAttributes()).thenReturn(attributes);
var res = CorporateUserVerifierImpl.verify(user, metadata);
assertThat(res).isTrue();
}
|
public static Class<?> resolvePrimitiveClassName(String name) {
Class<?> result = null;
// Most class names will be quite long, considering that they
// SHOULD sit in a package, so a length check is worthwhile.
if (name != null && name.length() <= 8) {
// Could be a primitive - likely.
result = (Class<?>) PRIMITIVE_TYPE_NAME_MAP.get(name);
}
return result;
}
|
@Test
void testResolvePrimitiveClassName() {
assertThat(ClassUtils.resolvePrimitiveClassName("boolean") == boolean.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("byte") == byte.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("char") == char.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("double") == double.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("float") == float.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("int") == int.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("long") == long.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("short") == short.class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[Z") == boolean[].class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[B") == byte[].class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[C") == char[].class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[D") == double[].class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[F") == float[].class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[I") == int[].class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[J") == long[].class, is(true));
assertThat(ClassUtils.resolvePrimitiveClassName("[S") == short[].class, is(true));
}
|
public void println( LogMessageInterface logMessage, LogLevel channelLogLevel ) {
String subject = logMessage.getSubject();
if ( !logMessage.getLevel().isVisible( channelLogLevel ) ) {
return; // not for our eyes.
}
if ( subject == null ) {
subject = DEFAULT_LOG_SUBJECT;
}
// Are the message filtered?
// apply filter if defined, and not Error level
boolean applyFilter = !logMessage.getLevel().isError() && !Utils.isEmpty( filter );
if ( applyFilter && !subject.contains( filter ) && !logMessage.toString().contains( filter ) ) {
return; // "filter" not found in row: don't show!
}
callBeforeLog();
KettleLoggingEvent loggingEvent = new KettleLoggingEvent( logMessage, System.currentTimeMillis(),
logMessage.getLevel() );
KettleLogStore.getAppender().addLogggingEvent( loggingEvent );
if ( this.fileWriter == null ) {
this.fileWriter = LoggingRegistry.getInstance().getLogChannelFileWriterBuffer( logChannelId );
}
// add to buffer
if ( this.fileWriter != null ) {
this.fileWriter.addEvent( loggingEvent );
}
callAfterLog();
}
|
@Test
public void testPrintlnWithNullLogChannelFileWriterBuffer() {
LoggingBuffer loggingBuffer = mock( LoggingBuffer.class );
kettleLogStoreMockedStatic.when( KettleLogStore::getAppender ).thenReturn( loggingBuffer );
logChannel.println( logMsgInterface, LogLevel.BASIC );
verify( logChFileWriterBuffer, times( 1 ) ).addEvent( any( KettleLoggingEvent.class ) );
verify( loggingBuffer, times( 1 ) ).addLogggingEvent( any( KettleLoggingEvent.class ) );
}
|
public MailConfiguration getConfiguration() {
if (configuration == null) {
configuration = new MailConfiguration(getCamelContext());
}
return configuration;
}
|
@Test
public void testTo() {
MailEndpoint endpoint
= checkEndpoint("smtp://james@myhost:25/?password=secret&to=someone@outthere.com&folderName=XXX");
MailConfiguration config = endpoint.getConfiguration();
assertEquals("smtp", config.getProtocol(), "getProtocol()");
assertEquals("myhost", config.getHost(), "getHost()");
assertEquals(25, config.getPort(), "getPort()");
assertEquals("james", config.getUsername(), "getUsername()");
assertEquals("someone@outthere.com", config.getRecipients().get(Message.RecipientType.TO),
"getRecipients().get(Message.RecipientType.TO)");
assertEquals("XXX", config.getFolderName(), "folder");
assertEquals("camel@localhost", config.getFrom(), "from");
assertEquals("secret", config.getPassword(), "password");
assertFalse(config.isDelete());
assertFalse(config.isIgnoreUriScheme());
assertEquals(-1, config.getFetchSize(), "fetchSize");
assertFalse(config.isDebugMode());
}
|
public Map<String, String> mergeOptions(
MergingStrategy mergingStrategy,
Map<String, String> sourceOptions,
Map<String, String> derivedOptions) {
Map<String, String> options = new HashMap<>();
if (mergingStrategy != MergingStrategy.EXCLUDING) {
options.putAll(sourceOptions);
}
derivedOptions.forEach(
(key, value) -> {
if (mergingStrategy != MergingStrategy.OVERWRITING
&& options.containsKey(key)) {
throw new ValidationException(
String.format(
"There already exists an option ['%s' -> '%s'] in the "
+ "base table. You might want to specify EXCLUDING OPTIONS or OVERWRITING OPTIONS.",
key, options.get(key)));
}
options.put(key, value);
});
return options;
}
|
@Test
void mergeOptions() {
Map<String, String> sourceOptions = new HashMap<>();
sourceOptions.put("offset", "1");
sourceOptions.put("format", "json");
Map<String, String> derivedOptions = new HashMap<>();
derivedOptions.put("format.ignore-errors", "true");
Map<String, String> mergedOptions =
util.mergeOptions(
getDefaultMergingStrategies().get(FeatureOption.OPTIONS),
sourceOptions,
derivedOptions);
Map<String, String> expectedOptions = new HashMap<>();
expectedOptions.put("offset", "1");
expectedOptions.put("format", "json");
expectedOptions.put("format.ignore-errors", "true");
assertThat(mergedOptions).isEqualTo(expectedOptions);
}
|
public static MountTo to(final String target) {
return new MountTo(checkNotNullOrEmpty(target, "Target should not be null"));
}
|
@Test
public void should_mount_dir_to_uri() throws Exception {
server.mount(MOUNT_DIR, to("/dir"));
running(server, () -> assertThat(helper.get(remoteUrl("/dir/dir.response")), is("response from dir")));
}
|
public Collection<PlanCoordinator> coordinators() {
return Collections.unmodifiableCollection(mCoordinators.values());
}
|
@Test
public void testPurgeCount() throws Exception {
PlanTracker tracker = new PlanTracker(10, 0, 5, mMockWorkflowTracker);
assertEquals("tracker should be empty", 0, tracker.coordinators().size());
fillJobTracker(tracker, 10);
finishAllJobs(tracker);
addJob(tracker, 100);
assertEquals(6, tracker.coordinators().size());
}
|
public void returnFury(Fury fury) {
Objects.requireNonNull(fury);
try {
lock.lock();
idleCacheQueue.add(fury);
activeCacheNumber.decrementAndGet();
furyCondition.signalAll();
} catch (Exception e) {
LOG.error(e.getMessage(), e);
throw new RuntimeException(e);
} finally {
lock.unlock();
}
}
|
@Test
public void testReturnFury() {
Function<ClassLoader, Fury> furyFactory = getFuryFactory();
Fury fury = furyFactory.apply(getClass().getClassLoader());
ClassLoaderFuryPooled pooled = getPooled(4, 8, furyFactory);
pooled.returnFury(fury);
}
|
@VisibleForTesting
void validateDictTypeUnique(Long id, String type) {
if (StrUtil.isEmpty(type)) {
return;
}
DictTypeDO dictType = dictTypeMapper.selectByType(type);
if (dictType == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典类型
if (id == null) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
if (!dictType.getId().equals(id)) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
}
|
@Test
public void testValidateDictTypeUnique_success() {
// 调用,成功
dictTypeService.validateDictTypeUnique(randomLongId(), randomString());
}
|
public static String readToString(String file) throws IOException {
return readToString(Paths.get(file));
}
|
@Test
public void testReadToString() throws IOException, URISyntaxException {
String content = IOKit.readToString(IOKitTest.class.getResourceAsStream("/application.properties"));
Assert.assertTrue(StringKit.isNotBlank(content));
content = IOKit.readToString(Paths.get(IOKitTest.class.getResource("/application.properties").toURI()).toString());
Assert.assertTrue(StringKit.isNotBlank(content));
}
|
public synchronized ApplicationDescription saveApplication(InputStream stream) {
try (InputStream ais = stream) {
byte[] cache = toByteArray(ais);
InputStream bis = new ByteArrayInputStream(cache);
boolean plainXml = isPlainXml(cache);
ApplicationDescription desc = plainXml ?
parsePlainAppDescription(bis) : parseZippedAppDescription(bis);
checkState(!appFile(desc.name(), APP_XML).exists(),
"Application %s already installed", desc.name());
if (plainXml) {
expandPlainApplication(cache, desc);
} else {
bis.reset();
boolean isSelfContainedJar = expandZippedApplication(bis, desc);
if (isSelfContainedJar) {
bis.reset();
stageSelfContainedJar(bis, desc);
}
/*
* Reset the ZIP file and reparse the app description now
* that the ZIP is expanded onto the filesystem. This way any
* file referenced as part of the description (i.e. app.png)
* can be loaded into the app description.
*/
bis.reset();
desc = parseZippedAppDescription(bis);
bis.reset();
saveApplication(bis, desc, isSelfContainedJar);
}
installArtifacts(desc);
return desc;
} catch (IOException e) {
throw new ApplicationException("Unable to save application", e);
}
}
|
@Test
public void savePlainApp() throws IOException {
InputStream stream = getClass().getResourceAsStream("app.xml");
ApplicationDescription app = aar.saveApplication(stream);
validate(app);
stream.close();
}
|
@Override
public boolean contains(double lat, double lon) {
return lat <= maxLat && lat >= minLat && lon <= maxLon && lon >= minLon;
}
|
@Test
public void testContains() {
assertTrue(new BBox(1, 2, 0, 1).contains(new BBox(1, 2, 0, 1)));
assertTrue(new BBox(1, 2, 0, 1).contains(new BBox(1.5, 2, 0.5, 1)));
assertFalse(new BBox(1, 2, 0, 0.5).contains(new BBox(1.5, 2, 0.5, 1)));
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final BoxApiClient client = new BoxApiClient(session.getClient());
final HttpGet request = new HttpGet(String.format("%s/files/%s/content", client.getBasePath(), fileid.getFileId(file)));
if(status.isAppend()) {
final HttpRange range = HttpRange.withStatus(status);
final String header;
if(-1 == range.getEnd()) {
header = String.format("bytes=%d-", range.getStart());
}
else {
header = String.format("bytes=%d-%d", range.getStart(), range.getEnd());
}
if(log.isDebugEnabled()) {
log.debug(String.format("Add range header %s for file %s", header, file));
}
request.addHeader(new BasicHeader(HttpHeaders.RANGE, header));
}
final CloseableHttpResponse response = session.getClient().execute(request);
return new HttpMethodReleaseInputStream(response, status);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test(expected = NotfoundException.class)
public void testReadNotFound() throws Exception {
final BoxFileidProvider fileid = new BoxFileidProvider(session);
final TransferStatus status = new TransferStatus();
new BoxReadFeature(session, fileid).read(new Path(new DefaultHomeFinderService(session).find(), "nosuchname", EnumSet.of(Path.Type.file)), status, new DisabledConnectionCallback());
}
|
@Override
public Object execute(String command, byte[]... args) {
for (Method method : this.getClass().getDeclaredMethods()) {
if (method.getName().equalsIgnoreCase(command)
&& Modifier.isPublic(method.getModifiers())
&& (method.getParameterTypes().length == args.length)) {
try {
Object t = execute(method, args);
if (t instanceof String) {
return ((String) t).getBytes();
}
return t;
} catch (IllegalArgumentException e) {
if (isPipelined()) {
throw new RedisPipelineException(e);
}
throw new InvalidDataAccessApiUsageException(e.getMessage(), e);
}
}
}
throw new UnsupportedOperationException();
}
|
@Test
public void testExecute() {
Long s = (Long) connection.execute("ttl", "key".getBytes());
assertThat(s).isEqualTo(-2);
connection.execute("flushDb");
}
|
public static LogCollector<ShenyuRequestLog> getInstance() {
return INSTANCE;
}
|
@Test
public void testAbstractLogCollector() throws Exception {
KafkaLogCollector.getInstance().start();
Field field1 = AbstractLogCollector.class.getDeclaredField("started");
field1.setAccessible(true);
Assertions.assertEquals(field1.get(KafkaLogCollector.getInstance()).toString(), "true");
KafkaLogCollector.getInstance().collect(shenyuRequestLog);
KafkaLogCollector.getInstance().close();
Field field2 = AbstractLogCollector.class.getDeclaredField("started");
field2.setAccessible(true);
Assertions.assertEquals(field2.get(KafkaLogCollector.getInstance()).toString(), "false");
}
|
@Override
public DescribeConsumerGroupsResult describeConsumerGroups(final Collection<String> groupIds,
final DescribeConsumerGroupsOptions options) {
SimpleAdminApiFuture<CoordinatorKey, ConsumerGroupDescription> future =
DescribeConsumerGroupsHandler.newFuture(groupIds);
DescribeConsumerGroupsHandler handler = new DescribeConsumerGroupsHandler(options.includeAuthorizedOperations(), logContext);
invokeDriver(handler, future, options.timeoutMs);
return new DescribeConsumerGroupsResult(future.all().entrySet().stream()
.collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue)));
}
|
@Test
public void testDescribeConsumerGroups() throws Exception {
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Retriable FindCoordinatorResponse errors should be retried
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode()));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode()));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
// The first request sent will be a ConsumerGroupDescribe request. Let's
// fail it in order to fail back to using the classic version.
env.kafkaClient().prepareUnsupportedVersionResponse(
request -> request instanceof ConsumerGroupDescribeRequest);
DescribeGroupsResponseData data = new DescribeGroupsResponseData();
// Retriable errors should be retried
data.groups().add(DescribeGroupsResponse.groupMetadata(
GROUP_ID,
Errors.COORDINATOR_LOAD_IN_PROGRESS,
"",
"",
"",
Collections.emptyList(),
Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
/*
* We need to return two responses here, one with NOT_COORDINATOR error when calling describe consumer group
* api using coordinator that has moved. This will retry whole operation. So we need to again respond with a
* FindCoordinatorResponse.
*
* And the same reason for COORDINATOR_NOT_AVAILABLE error response
*/
data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(
GROUP_ID,
Errors.NOT_COORDINATOR,
"",
"",
"",
Collections.emptyList(),
Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
data = new DescribeGroupsResponseData();
data.groups().add(DescribeGroupsResponse.groupMetadata(
GROUP_ID,
Errors.COORDINATOR_NOT_AVAILABLE,
"",
"",
"",
Collections.emptyList(),
Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller()));
data = new DescribeGroupsResponseData();
TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
final List<TopicPartition> topicPartitions = new ArrayList<>();
topicPartitions.add(0, myTopicPartition0);
topicPartitions.add(1, myTopicPartition1);
topicPartitions.add(2, myTopicPartition2);
final ByteBuffer memberAssignment = ConsumerProtocol.serializeAssignment(new ConsumerPartitionAssignor.Assignment(topicPartitions));
byte[] memberAssignmentBytes = new byte[memberAssignment.remaining()];
memberAssignment.get(memberAssignmentBytes);
DescribedGroupMember memberOne = DescribeGroupsResponse.groupMember("0", "instance1", "clientId0", "clientHost", memberAssignmentBytes, null);
DescribedGroupMember memberTwo = DescribeGroupsResponse.groupMember("1", "instance2", "clientId1", "clientHost", memberAssignmentBytes, null);
List<MemberDescription> expectedMemberDescriptions = new ArrayList<>();
expectedMemberDescriptions.add(convertToMemberDescriptions(memberOne,
new MemberAssignment(new HashSet<>(topicPartitions))));
expectedMemberDescriptions.add(convertToMemberDescriptions(memberTwo,
new MemberAssignment(new HashSet<>(topicPartitions))));
data.groups().add(DescribeGroupsResponse.groupMetadata(
GROUP_ID,
Errors.NONE,
"",
ConsumerProtocol.PROTOCOL_TYPE,
"",
asList(memberOne, memberTwo),
Collections.emptySet()));
env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data));
final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID));
final ConsumerGroupDescription groupDescription = result.describedGroups().get(GROUP_ID).get();
assertEquals(1, result.describedGroups().size());
assertEquals(GROUP_ID, groupDescription.groupId());
assertEquals(2, groupDescription.members().size());
assertEquals(expectedMemberDescriptions, groupDescription.members());
}
}
|
@Override
public boolean test(Pickle pickle) {
if (expressions.isEmpty()) {
return true;
}
List<String> tags = pickle.getTags();
return expressions.stream()
.allMatch(expression -> expression.evaluate(tags));
}
|
@Test
void single_tag_predicate_matches_pickle_with_same_single_tag() {
Pickle pickle = createPickleWithTags("@FOO");
TagPredicate predicate = createPredicate("@FOO");
assertTrue(predicate.test(pickle));
}
|
@Override
public HttpClientResponse execute(URI uri, String httpMethod, RequestHttpEntity requestHttpEntity)
throws Exception {
final Object body = requestHttpEntity.getBody();
final Header headers = requestHttpEntity.getHeaders();
replaceDefaultConfig(requestHttpEntity.getHttpClientConfig());
HttpURLConnection conn = (HttpURLConnection) uri.toURL().openConnection();
Map<String, String> headerMap = headers.getHeader();
if (headerMap != null && headerMap.size() > 0) {
for (Map.Entry<String, String> entry : headerMap.entrySet()) {
conn.setRequestProperty(entry.getKey(), entry.getValue());
}
}
conn.setConnectTimeout(this.httpClientConfig.getConTimeOutMillis());
conn.setReadTimeout(this.httpClientConfig.getReadTimeOutMillis());
conn.setRequestMethod(httpMethod);
if (body != null && !"".equals(body)) {
String contentType = headers.getValue(HttpHeaderConsts.CONTENT_TYPE);
String bodyStr = body instanceof String ? (String) body : JacksonUtils.toJson(body);
if (MediaType.APPLICATION_FORM_URLENCODED.equals(contentType)) {
Map<String, String> map = JacksonUtils.toObj(bodyStr, HashMap.class);
bodyStr = HttpUtils.encodingParams(map, headers.getCharset());
}
if (bodyStr != null) {
conn.setDoOutput(true);
byte[] b = bodyStr.getBytes();
conn.setRequestProperty(CONTENT_LENGTH, String.valueOf(b.length));
OutputStream outputStream = conn.getOutputStream();
outputStream.write(b, 0, b.length);
outputStream.flush();
IoUtils.closeQuietly(outputStream);
}
}
conn.connect();
return new JdkHttpClientResponse(conn);
}
|
@Test
void testExecuteForm() throws Exception {
Header header = Header.newInstance();
header.setContentType(MediaType.APPLICATION_FORM_URLENCODED);
HttpClientConfig config = HttpClientConfig.builder().build();
Map<String, String> body = new HashMap<>();
body.put("a", "bo&dy");
RequestHttpEntity httpEntity = new RequestHttpEntity(config, header, Query.EMPTY, body);
HttpClientResponse response = httpClientRequest.execute(uri, "GET", httpEntity);
byte[] writeBytes = HttpUtils.encodingParams(body, StandardCharsets.UTF_8.name()).getBytes(StandardCharsets.UTF_8);
verify(outputStream).write(writeBytes, 0, writeBytes.length);
assertEquals(connection, getActualConnection(response));
}
|
@Override
public boolean canHandleReturnType(Class<?> returnType) {
return Flux.class.isAssignableFrom(returnType) || Mono.class.isAssignableFrom(returnType);
}
|
@Test
public void testCheckTypes() {
assertThat(reactorTimeLimiterAspectExt.canHandleReturnType(Mono.class)).isTrue();
assertThat(reactorTimeLimiterAspectExt.canHandleReturnType(Flux.class)).isTrue();
}
|
public static IpAddress valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new IpAddress(Version.INET, bytes);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfIncorrectString() {
IpAddress ipAddress;
String fromString = "NoSuchIpAddress";
ipAddress = IpAddress.valueOf(fromString);
}
|
@Override
public void doRun() {
final DataNodeDto dto = DataNodeDto.Builder.builder()
.setId(nodeId.getNodeId())
.setTransportAddress(opensearchBaseUri.get().toString())
.setClusterAddress(opensearchClusterUri.get())
.setDataNodeStatus(processState.get().getDataNodeStatus())
.setHostname(configuration.getHostname())
.setRestApiAddress(datanodeRestApiUri.get())
.setCertValidUntil(certValidUntil.get())
.setDatanodeVersion(version.getVersion().toString())
.build();
nodeService.ping(dto);
}
|
@Test
void doRun() {
final SimpleNodeId nodeID = new SimpleNodeId("5ca1ab1e-0000-4000-a000-000000000000");
final URI uri = URI.create("http://localhost:9200");
final String cluster = "localhost:9300";
final String datanodeRestApi = "http://localhost:8999";
@SuppressWarnings("unchecked")
final NodePingPeriodical task = new NodePingPeriodical(
nodeService,
nodeID,
new Configuration(),
() -> uri,
() -> cluster,
() -> datanodeRestApi,
() -> OpensearchState.AVAILABLE,
Date::new
);
task.doRun();
Assertions.assertThat(nodeService.allActive().values())
.hasSize(1)
.allSatisfy(nodeDto -> {
Assertions.assertThat(nodeDto.getTransportAddress()).isEqualTo("http://localhost:9200");
Assertions.assertThat(nodeDto.getClusterAddress()).isEqualTo("localhost:9300");
Assertions.assertThat(nodeDto.getDataNodeStatus()).isEqualTo(DataNodeStatus.AVAILABLE);
Assertions.assertThat(nodeDto.getNodeId()).isEqualTo("5ca1ab1e-0000-4000-a000-000000000000");
Assertions.assertThat(nodeDto.getLastSeen()).isNotNull();
Assertions.assertThat(nodeDto.getProvisioningInformation().certValidUntil()).isNotNull();
});
}
|
@Override
public BackgroundException map(final String message, final OneDriveAPIException failure, final Path file) {
if(failure.getResponseCode() > 0) {
switch(failure.getResponseCode()) {
case HttpStatus.SC_NOT_FOUND:
fileid.cache(file, null);
}
}
return super.map(message, failure, file);
}
|
@Test
public void map() {
assertTrue(new GraphExceptionMappingService(new GraphFileIdProvider(new OneDriveSession(new Host(new OneDriveProtocol()), new DisabledX509TrustManager(), new DefaultX509KeyManager()))).map(
new OneDriveAPIException("The OneDrive API responded with too many redirects.")) instanceof InteroperabilityException);
assertTrue(new GraphExceptionMappingService(new GraphFileIdProvider(new OneDriveSession(new Host(new OneDriveProtocol()), new DisabledX509TrustManager(), new DefaultX509KeyManager()))).map(
new OneDriveAPIException("m", 404)) instanceof NotfoundException);
assertTrue(new GraphExceptionMappingService(new GraphFileIdProvider(new OneDriveSession(new Host(new OneDriveProtocol()), new DisabledX509TrustManager(), new DefaultX509KeyManager()))).map(
new OneDriveAPIException("Couldn't connect to the OneDrive API due to a network error.", new SocketException())) instanceof ConnectionRefusedException);
}
|
protected CompletableFuture<Optional<Topic>> loadOrCreatePersistentTopic(final String topic,
boolean createIfMissing, Map<String, String> properties, @Nullable TopicPolicies topicPolicies) {
final CompletableFuture<Optional<Topic>> topicFuture = FutureUtil.createFutureWithTimeout(
Duration.ofSeconds(pulsar.getConfiguration().getTopicLoadTimeoutSeconds()), executor(),
() -> FAILED_TO_LOAD_TOPIC_TIMEOUT_EXCEPTION);
topicFuture.exceptionally(t -> {
pulsarStats.recordTopicLoadFailed();
return null;
});
checkTopicNsOwnership(topic)
.thenRun(() -> {
final Semaphore topicLoadSemaphore = topicLoadRequestSemaphore.get();
if (topicLoadSemaphore.tryAcquire()) {
checkOwnershipAndCreatePersistentTopic(topic, createIfMissing, topicFuture,
properties, topicPolicies);
topicFuture.handle((persistentTopic, ex) -> {
// release permit and process pending topic
topicLoadSemaphore.release();
// do not recreate topic if topic is already migrated and deleted by broker
// so, avoid creating a new topic if migration is already started
if (ex != null && (ex.getCause() instanceof TopicMigratedException)) {
pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(ex.getCause());
return null;
}
createPendingLoadTopic();
return null;
});
} else {
pendingTopicLoadingQueue.add(new TopicLoadingContext(topic,
createIfMissing, topicFuture, properties, topicPolicies));
if (log.isDebugEnabled()) {
log.debug("topic-loading for {} added into pending queue", topic);
}
}
}).exceptionally(ex -> {
pulsar.getExecutor().execute(() -> topics.remove(topic, topicFuture));
topicFuture.completeExceptionally(ex.getCause());
return null;
});
return topicFuture;
}
|
@Test
public void testConcurrentLoadTopicExceedLimitShouldNotBeAutoCreated() throws Exception {
boolean needDeleteTopic = false;
final String namespace = "prop/concurrentLoad";
try {
// set up broker disable auto create and set concurrent load to 1 qps.
cleanup();
conf.setMaxConcurrentTopicLoadRequest(1);
conf.setAllowAutoTopicCreation(false);
setup();
try {
admin.namespaces().createNamespace(namespace);
} catch (PulsarAdminException.ConflictException e) {
// Ok.. (if test fails intermittently and namespace is already created)
}
// create 3 topic
String topicName = "persistent://" + namespace + "/my-topic";
for (int i = 0; i < 3; i++) {
admin.topics().createNonPartitionedTopic(topicName + "_" + i);
}
needDeleteTopic = true;
// try to load 10 topic
ArrayList<CompletableFuture<Optional<Topic>>> loadFutures = new ArrayList<>();
for (int i = 0; i < 10; i++) {
// try to create topic which should fail as bundle is disable
CompletableFuture<Optional<Topic>> futureResult = pulsar.getBrokerService()
.loadOrCreatePersistentTopic(topicName + "_" + i, false, null, null);
loadFutures.add(futureResult);
}
CompletableFuture<?>[] o = (CompletableFuture<?>[]) Array.newInstance(CompletableFuture.class, 10);
CompletableFuture<?>[] completableFutures = loadFutures.toArray(o);
CompletableFuture.allOf(completableFutures).get();
// check topic load CompletableFuture. only first three topic should be success.
for (int i = 0; i < 10; i++) {
CompletableFuture<Optional<Topic>> load = loadFutures.get(i);
if (i < 3) {
Assert.assertTrue(load.isDone());
Assert.assertFalse(load.isCompletedExceptionally());
} else {
// check topic should not be created if disable autoCreateTopic.
Assert.assertTrue(load.isDone());
Assert.assertTrue(load.get().isEmpty());
}
}
} finally {
if (needDeleteTopic) {
String topicName = "persistent://" + namespace + "/my-topic";
for (int i = 0; i < 3; i++) {
admin.topics().delete(topicName + "_" + i);
}
}
}
}
|
@Override
public Reiterator<ShuffleEntry> read(
@Nullable ShufflePosition startPosition, @Nullable ShufflePosition endPosition) {
return new ShuffleReadIterator(startPosition, endPosition);
}
|
@Test
public void readerCanRead() throws Exception {
ShuffleEntry e1 = newShuffleEntry(KEY, SKEY, VALUE);
ShuffleEntry e2 = newShuffleEntry(KEY, SKEY, VALUE);
ArrayList<ShuffleEntry> entries = new ArrayList<>();
entries.add(e1);
entries.add(e2);
when(batchReader.read(START_POSITION, END_POSITION))
.thenReturn(new ShuffleBatchReader.Batch(entries, null));
List<ShuffleEntry> results = newArrayList(reader.read(START_POSITION, END_POSITION));
assertThat(results, contains(e1, e2));
}
|
public static DeploymentStrategy deploymentStrategy(io.strimzi.api.kafka.model.common.template.DeploymentStrategy strategy) {
return switch (strategy) {
case ROLLING_UPDATE -> rollingUpdateStrategy();
case RECREATE -> recreateStrategy();
};
}
|
@Test
public void testDeploymentStrategyRollingUpdate() {
DeploymentStrategy strategy = WorkloadUtils.deploymentStrategy(io.strimzi.api.kafka.model.common.template.DeploymentStrategy.ROLLING_UPDATE);
assertThat(strategy.getType(), is("RollingUpdate"));
assertThat(strategy.getRollingUpdate().getMaxSurge(), is(new IntOrString(1)));
assertThat(strategy.getRollingUpdate().getMaxUnavailable(), is(new IntOrString(0)));
}
|
public static <T, K, V> MutableMap<K, V> aggregateInPlaceBy(
Iterable<T> iterable,
Function<? super T, ? extends K> groupBy,
Function0<? extends V> zeroValueFactory,
Procedure2<? super V, ? super T> mutatingAggregator)
{
return FJIterate.aggregateInPlaceBy(
iterable,
groupBy,
zeroValueFactory,
mutatingAggregator,
FJIterate.DEFAULT_MIN_FORK_SIZE);
}
|
@Test
public void aggregateInPlaceByWithBatchSize()
{
Procedure2<AtomicInteger, Integer> sumAggregator = AtomicInteger::addAndGet;
MutableList<Integer> list = LazyIterate.adapt(Collections.nCopies(100, 1))
.concatenate(Collections.nCopies(200, 2))
.concatenate(Collections.nCopies(300, 3))
.toList()
.shuffleThis();
MapIterable<String, AtomicInteger> aggregation =
FJIterate.aggregateInPlaceBy(list, Functions.getToString(), ATOMIC_INTEGER_NEW, sumAggregator, 50);
assertEquals(100, aggregation.get("1").intValue());
assertEquals(400, aggregation.get("2").intValue());
assertEquals(900, aggregation.get("3").intValue());
}
|
public RuntimeOptionsBuilder parse(Class<?> clazz) {
RuntimeOptionsBuilder args = new RuntimeOptionsBuilder();
for (Class<?> classWithOptions = clazz; hasSuperClass(
classWithOptions); classWithOptions = classWithOptions.getSuperclass()) {
CucumberOptions options = requireNonNull(optionsProvider).getOptions(classWithOptions);
if (options != null) {
addDryRun(options, args);
addMonochrome(options, args);
addTags(classWithOptions, options, args);
addPlugins(options, args);
addPublish(options, args);
addName(options, args);
addSnippets(options, args);
addGlue(options, args);
addFeatures(options, args);
addObjectFactory(options, args);
addUuidGenerator(options, args);
}
}
addDefaultFeaturePathIfNoFeaturePathIsSpecified(args, clazz);
addDefaultGlueIfNoOverridingGlueIsSpecified(args, clazz);
return args;
}
|
@Test
void create_with_no_filters() {
RuntimeOptions runtimeOptions = parser().parse(NoName.class).build();
assertAll(
() -> assertTrue(runtimeOptions.getTagExpressions().isEmpty()),
() -> assertTrue(runtimeOptions.getNameFilters().isEmpty()),
() -> assertTrue(runtimeOptions.getLineFilters().isEmpty()));
}
|
public static FromMatchesFilter createFull(Jid address) {
return new FromMatchesFilter(address, false);
}
|
@Test
public void fullCompareMatchingServiceJid() {
FromMatchesFilter filter = FromMatchesFilter.createFull(SERVICE_JID1);
Stanza packet = StanzaBuilder.buildMessage().build();
packet.setFrom(SERVICE_JID1);
assertTrue(filter.accept(packet));
packet.setFrom(SERVICE_JID2);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID1);
assertFalse(filter.accept(packet));
packet.setFrom(FULL_JID1_R1);
assertFalse(filter.accept(packet));
packet.setFrom(BASE_JID3);
assertFalse(filter.accept(packet));
}
|
public static String getViewVersionNode(final String databaseName, final String schemaName, final String viewName, final String version) {
return String.join("/", getViewVersionsNode(databaseName, schemaName, viewName), version);
}
|
@Test
void assertGetViewVersionNode() {
assertThat(ViewMetaDataNode.getViewVersionNode("foo_db", "foo_schema", "foo_view", "0"), is("/metadata/foo_db/schemas/foo_schema/views/foo_view/versions/0"));
}
|
@Override
public boolean addAll(Collection<? extends E> c) {
int numToAdd = c.size();
if (underlying.size() > maxLength - numToAdd) {
throw new BoundedListTooLongException("Cannot add another " + numToAdd +
" element(s) to the list because it would exceed the maximum length of " +
maxLength);
}
return underlying.addAll(c);
}
|
@Test
public void testAddAll() {
BoundedList<String> list = BoundedList.newArrayBacked(5);
list.add("a");
list.add("b");
list.add("c");
assertEquals("Cannot add another 3 element(s) to the list because it would exceed the " +
"maximum length of 5",
assertThrows(BoundedListTooLongException.class,
() -> list.addAll(Arrays.asList("d", "e", "f"))).getMessage());
assertEquals("Cannot add another 3 element(s) to the list because it would exceed the " +
"maximum length of 5",
assertThrows(BoundedListTooLongException.class,
() -> list.addAll(0, Arrays.asList("d", "e", "f"))).getMessage());
list.addAll(Arrays.asList("d", "e"));
assertEquals(Arrays.asList("a", "b", "c", "d", "e"), list);
}
|
public static Predicate[] acceptVisitor(Predicate[] predicates, Visitor visitor, IndexRegistry indexes) {
Predicate[] target = predicates;
boolean copyCreated = false;
for (int i = 0; i < predicates.length; i++) {
Predicate predicate = predicates[i];
if (predicate instanceof VisitablePredicate visitablePredicate) {
Predicate transformed = visitablePredicate.accept(visitor, indexes);
if (transformed != predicate) {
if (!copyCreated) {
copyCreated = true;
target = createCopy(target);
}
target[i] = transformed;
}
}
}
return target;
}
|
@Test
public void acceptVisitor_whenNoChange_thenReturnOriginalArray() {
Visitor mockVisitor = mock(Visitor.class);
Predicate[] predicates = new Predicate[1];
Predicate predicate = createMockVisitablePredicate();
predicates[0] = predicate;
Predicate[] result = VisitorUtils.acceptVisitor(predicates, mockVisitor, mockIndexes);
assertThat(result).isSameAs(predicates);
}
|
public static NamespaceName get(String tenant, String namespace) {
validateNamespaceName(tenant, namespace);
return get(tenant + '/' + namespace);
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void namespace_nullTenant() {
NamespaceName.get(null, "use", "ns1");
}
|
@Override
public NativeQuerySpec<Record> select(String sql, Object... args) {
return new NativeQuerySpecImpl<>(this, sql, args, DefaultRecord::new, false);
}
|
@Test
public void testLoadTable() {
database
.sql()
.reactive()
.execute(SqlRequests.of("create table \"NATIVE_TEST\"( " +
"\"id\" varchar(32) primary key" +
",name varchar(32)" +
",\"testName\" varchar(32)" +
")"))
.as(StepVerifier::create)
.expectComplete()
.verify();
DefaultQueryHelper helper = new DefaultQueryHelper(database);
database
.dml()
.insert("native_test")
.value("id", "test")
.value("NAME", "test")
.value("testName", "test")
.execute()
.sync();
helper.select("select id,name,testName from native_test")
.fetch()
.doOnNext(System.out::println)
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
}
|
@InvokeOnHeader(Web3jConstants.ETH_COMPILE_SOLIDITY)
void ethCompileSolidity(Message message) throws IOException {
String sourceCode = message.getHeader(Web3jConstants.SOURCE_CODE, configuration::getSourceCode, String.class);
Request<?, EthCompileSolidity> request = web3j.ethCompileSolidity(sourceCode);
setRequestId(message, request);
EthCompileSolidity response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getCompiledSolidity());
}
}
|
@Test
public void ethCompileSolidityTest() throws Exception {
EthCompileSolidity response = Mockito.mock(EthCompileSolidity.class);
Mockito.when(mockWeb3j.ethCompileSolidity(any())).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getCompiledSolidity()).thenReturn(Collections.EMPTY_MAP);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_COMPILE_SOLIDITY);
template.send(exchange);
Map body = exchange.getIn().getBody(Map.class);
assertTrue(body.isEmpty());
}
|
@Override
public long extractWatermark(IcebergSourceSplit split) {
return split.task().files().stream()
.map(
scanTask -> {
Preconditions.checkArgument(
scanTask.file().lowerBounds() != null
&& scanTask.file().lowerBounds().get(eventTimeFieldId) != null,
"Missing statistics for column name = %s in file = %s",
eventTimeFieldName,
eventTimeFieldId,
scanTask.file());
return timeUnit.toMillis(
Conversions.fromByteBuffer(
Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId)));
})
.min(Comparator.comparingLong(l -> l))
.get();
}
|
@TestTemplate
public void testSingle() throws IOException {
ColumnStatsWatermarkExtractor extractor =
new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MILLISECONDS);
assertThat(extractor.extractWatermark(split(0)))
.isEqualTo(MIN_VALUES.get(0).get(columnName).longValue());
}
|
@Override
public AppResponse process(Flow flow, RsPollAppApplicationResultRequest request) throws SharedServiceClientException {
checkSwitchesEnabled();
final String activationStatus = appSession.getActivationStatus();
final Long accountId = appSession.getAccountId();
final String userAppId = appSession.getUserAppId();
final boolean removeOldApp = request.getRemoveOldApp().equals("true");
String status;
int maxAppsPerUser = sharedServiceClient.getSSConfigInt("Maximum_aantal_DigiD_apps_eindgebruiker");
appSession.setRemoveOldApp(removeOldApp);
if (TOO_MANY_APPS.equals(activationStatus) && !removeOldApp) {
AppAuthenticator leastRecentApp = appAuthenticatorService.findLeastRecentApp(accountId);
return new TooManyAppsResponse("too_many_active", maxAppsPerUser, leastRecentApp.getDeviceName(),
leastRecentApp.getLastSignInOrActivatedAtOrCreatedAt().toLocalDate().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")));
}
status = TOO_MANY_APPS.equals(activationStatus) && removeOldApp ? OK : activationStatus;
if (!status.equals(OK)) {
setValid(false);
}
return new RsPollAppApplicationResultResponse(status, userAppId);
}
|
@Test
public void processRsPollAppApplicationResultTooManyAppsTest() throws SharedServiceClientException {
when(sharedServiceClient.getSSConfigInt("Maximum_aantal_DigiD_apps_eindgebruiker")).thenReturn(5);
when(switchService.digidAppSwitchEnabled()).thenReturn(true);
when(switchService.digidRequestStationEnabled()).thenReturn(true);
rsPollAppApplicationResult.setAppSession(createAppSession(ApplyForAppAtRequestStationFlow.NAME, State.RS_APP_APPLICATION_STARTED, "TOO_MANY_APPS"));
AppAuthenticator mockedAppAuthenticator = new AppAuthenticator();
mockedAppAuthenticator.setAccountId(ACCOUNT_1);
mockedAppAuthenticator.setDeviceName(DEVICENAME);
ZonedDateTime lastSignInAt = ZonedDateTime.now();
mockedAppAuthenticator.setLastSignInAt(lastSignInAt);
when(mockAppAuthenticatorService.findLeastRecentApp(ACCOUNT_1)).thenReturn(mockedAppAuthenticator);
mockedRsPollAppApplicationResultRequest = new RsPollAppApplicationResultRequest();
mockedRsPollAppApplicationResultRequest.setActivationCode(APP_ACTIVATION_CODE);
mockedRsPollAppApplicationResultRequest.setRemoveOldApp("false");
AppResponse appResponse = rsPollAppApplicationResult.process(mockedApplyForAppAtRequestStationFlow, mockedRsPollAppApplicationResultRequest);
assertEquals(false, rsPollAppApplicationResult.getAppSession().isRemoveOldApp());
assertEquals(TOO_MANY_APPS, rsPollAppApplicationResult.getAppSession().getActivationStatus());
assertTrue(appResponse instanceof TooManyAppsResponse);
assertEquals(NOK,((TooManyAppsResponse) appResponse).getStatus());
assertEquals(TOO_MANY_ACTIVE,((TooManyAppsResponse) appResponse).getError());
assertEquals(DEVICENAME,((TooManyAppsResponse) appResponse).getDeviceName());
assertEquals(5,((TooManyAppsResponse) appResponse).getMaxAmount());
assertEquals(lastSignInAt.toLocalDate().format(DateTimeFormatter.ofPattern("dd-MM-yyyy")), ((TooManyAppsResponse) appResponse).getLatestDate());
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Override
public Object getValue(final int columnIndex, final Class<?> type) throws SQLException {
Optional<ColumnProjection> columnProjection = selectStatementContext.getProjectionsContext().findColumnProjection(columnIndex);
if (!columnProjection.isPresent()) {
return mergedResult.getValue(columnIndex, type);
}
Optional<MaskTable> maskTable = maskRule.findMaskTable(columnProjection.get().getOriginalTable().getValue());
if (!maskTable.isPresent()) {
return mergedResult.getValue(columnIndex, type);
}
Optional<MaskAlgorithm> maskAlgorithm = maskTable.get().findAlgorithm(columnProjection.get().getName().getValue());
if (!maskAlgorithm.isPresent()) {
return mergedResult.getValue(columnIndex, type);
}
Object originalValue = mergedResult.getValue(columnIndex, Object.class);
return null == originalValue ? null : maskAlgorithm.get().mask(originalValue);
}
|
@Test
void assertGetValueWhenOriginalValueIsNull() throws SQLException {
when(mergedResult.getValue(1, Object.class)).thenReturn(null);
assertNull(new MaskMergedResult(mockMaskAlgorithmAbsent(), mockSelectStatementContext(), mergedResult).getValue(1, Object.class));
}
|
static void closeStateManager(final Logger log,
final String logPrefix,
final boolean closeClean,
final boolean eosEnabled,
final ProcessorStateManager stateMgr,
final StateDirectory stateDirectory,
final TaskType taskType) {
// if EOS is enabled, wipe out the whole state store for unclean close since it is now invalid
final boolean wipeStateStore = !closeClean && eosEnabled;
final TaskId id = stateMgr.taskId();
log.trace("Closing state manager for {} task {}", taskType, id);
final AtomicReference<ProcessorStateException> firstException = new AtomicReference<>(null);
try {
if (stateDirectory.lock(id)) {
try {
stateMgr.close();
} catch (final ProcessorStateException e) {
firstException.compareAndSet(null, e);
} finally {
try {
if (wipeStateStore) {
log.debug("Wiping state stores for {} task {}", taskType, id);
// we can just delete the whole dir of the task, including the state store images and the checkpoint files,
// and then we write an empty checkpoint file indicating that the previous close is graceful and we just
// need to re-bootstrap the restoration from the beginning
Utils.delete(stateMgr.baseDir());
}
} finally {
stateDirectory.unlock(id);
}
}
} else {
log.error("Failed to acquire lock while closing the state store for {} task {}", taskType, id);
}
} catch (final IOException e) {
final ProcessorStateException exception = new ProcessorStateException(
String.format("%sFatal error while trying to close the state manager for task %s", logPrefix, id), e
);
firstException.compareAndSet(null, exception);
}
final ProcessorStateException exception = firstException.get();
if (exception != null) {
throw exception;
}
}
|
@Test
public void shouldStillWipeStateStoresIfCloseThrowsException() {
final File randomFile = new File("/random/path");
when(stateManager.taskId()).thenReturn(taskId);
when(stateDirectory.lock(taskId)).thenReturn(true);
doThrow(new ProcessorStateException("Close failed")).when(stateManager).close();
when(stateManager.baseDir()).thenReturn(randomFile);
try (MockedStatic<Utils> utils = mockStatic(Utils.class)) {
assertThrows(ProcessorStateException.class, () ->
StateManagerUtil.closeStateManager(logger, "logPrefix:", false, true, stateManager, stateDirectory, TaskType.ACTIVE));
}
verify(stateDirectory).unlock(taskId);
}
|
@Override
public void message(final String message) {
if(StringUtils.isBlank(message)) {
return;
}
final StringAppender appender = new StringAppender('…');
appender.append(message);
// Clear the line and append message. Used instead of \r because the line width may vary
console.printf("\r%s%s%s", Ansi.ansi()
.fg(Ansi.Color.CYAN)
.saveCursorPosition()
.eraseLine(Ansi.Erase.ALL)
.restoreCursorPosition(), appender.toString(),
Ansi.ansi().reset());
}
|
@Test
public void testNullMessage() {
new TerminalProgressListener().message(null);
}
|
@Override
public Set<RuleDescriptionSectionDto> generateSections(RulesDefinition.Rule rule) {
return getDescriptionInHtml(rule)
.map(this::generateSections)
.orElse(emptySet());
}
|
@Test
public void parse_with_noncompliant_section_not_removed() {
when(rule.htmlDescription()).thenReturn(DESCRIPTION + NONCOMPLIANTCODE + COMPLIANTCODE);
Set<RuleDescriptionSectionDto> results = generator.generateSections(rule);
Map<String, String> sectionKeyToContent = results.stream().collect(toMap(RuleDescriptionSectionDto::getKey, RuleDescriptionSectionDto::getContent));
assertThat(sectionKeyToContent).hasSize(4)
.containsEntry(DEFAULT_SECTION_KEY, rule.htmlDescription())
.containsEntry(ROOT_CAUSE_SECTION_KEY, DESCRIPTION)
.containsEntry(ASSESS_THE_PROBLEM_SECTION_KEY, NONCOMPLIANTCODE)
.containsEntry(HOW_TO_FIX_SECTION_KEY, COMPLIANTCODE);
}
|
@VisibleForTesting
public static JobGraph createJobGraph(StreamGraph streamGraph) {
return new StreamingJobGraphGenerator(
Thread.currentThread().getContextClassLoader(),
streamGraph,
null,
Runnable::run)
.createJobGraph();
}
|
@Test
void generatorForwardsSavepointRestoreSettings() {
StreamGraph streamGraph =
new StreamGraph(
new Configuration(),
new ExecutionConfig(),
new CheckpointConfig(),
SavepointRestoreSettings.forPath("hello"));
JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
SavepointRestoreSettings savepointRestoreSettings = jobGraph.getSavepointRestoreSettings();
assertThat(savepointRestoreSettings.getRestorePath()).isEqualTo("hello");
}
|
public long[] getLatencyMax() {
return this.defaultMQProducerImpl.getLatencyMax();
}
|
@Test
public void assertGetLatencyMax() {
assertNotNull(producer.getLatencyMax());
}
|
public static UBinary create(Kind binaryOp, UExpression lhs, UExpression rhs) {
checkArgument(
OP_CODES.containsKey(binaryOp), "%s is not a supported binary operation", binaryOp);
return new AutoValue_UBinary(binaryOp, lhs, rhs);
}
|
@Test
public void lessThan() {
assertUnifiesAndInlines(
"4 < 17", UBinary.create(Kind.LESS_THAN, ULiteral.intLit(4), ULiteral.intLit(17)));
}
|
public static Class<?> getAssistInterface(Object proxyBean)
throws NoSuchFieldException, SecurityException, IllegalArgumentException, IllegalAccessException,
NoSuchMethodException, InvocationTargetException {
if (proxyBean == null) {
return null;
}
if (!isDubboProxyName(proxyBean.getClass().getName())) {
return null;
}
Field handlerField = proxyBean.getClass().getDeclaredField("handler");
handlerField.setAccessible(true);
Object invokerInvocationHandler = handlerField.get(proxyBean);
Field invokerField = invokerInvocationHandler.getClass().getDeclaredField("invoker");
invokerField.setAccessible(true);
Object invoker = invokerField.get(invokerInvocationHandler);
Field failoverClusterInvokerField = invoker.getClass().getDeclaredField("invoker");
failoverClusterInvokerField.setAccessible(true);
Object failoverClusterInvoker = failoverClusterInvokerField.get(invoker);
return (Class<?>)ReflectionUtil.invokeMethod(failoverClusterInvoker,
"getInterface");
}
|
@Test
public void testGetAssistInterfaceForNotDubboProxy() throws NoSuchFieldException, InvocationTargetException, IllegalAccessException, NoSuchMethodException {
assertNull(DubboUtil.getAssistInterface(new ArrayList<>()));
}
|
public int run(String... argv) {
if (argv.length == 0) {
printUsage();
return -1;
}
// Sanity check on the number of arguments
String cmd = argv[0];
Command command = mCommands.get(cmd);
if (command == null) {
String[] replacementCmd = getReplacementCmd(cmd);
if (replacementCmd == null) {
// Unknown command (we didn't find the cmd in our dict)
System.err.printf("%s is an unknown command.%n", cmd);
printUsage();
return -1;
} else {
// Handle command alias
if (mUnstableAlias != null && mUnstableAlias.contains(cmd)) {
String deprecatedMsg =
String.format("WARNING: %s is not a stable CLI command. It may be removed in the "
+ "future. Use with caution in scripts. You may use '%s' instead.",
cmd, StringUtils.join(replacementCmd, " "));
System.out.println(deprecatedMsg);
}
String[] replacementArgv =
ArrayUtils.addAll(replacementCmd, ArrayUtils.subarray(argv, 1, argv.length));
return run(replacementArgv);
}
}
// Find the inner-most command and its argument line.
CommandLine cmdline;
try {
String[] currArgs = Arrays.copyOf(argv, argv.length);
while (command.hasSubCommand()) {
if (currArgs.length < 2) {
throw new InvalidArgumentException("No sub-command is specified");
}
if (!command.getSubCommands().containsKey(currArgs[1])) {
throw new InvalidArgumentException("Unknown sub-command: " + currArgs[1]);
}
command = command.getSubCommands().get(currArgs[1]);
currArgs = Arrays.copyOfRange(currArgs, 1, currArgs.length);
}
currArgs = Arrays.copyOfRange(currArgs, 1, currArgs.length);
cmdline = command.parseAndValidateArgs(currArgs);
} catch (InvalidArgumentException e) {
// It outputs a prompt message when passing wrong args to CLI
System.out.println(e.getMessage());
System.out.println("Usage: " + command.getUsage());
System.out.println(command.getDescription());
LOG.error("Invalid arguments for command {}:", command.getCommandName(), e);
return -1;
}
// Handle the command
try {
return command.run(cmdline);
} catch (Exception e) {
System.out.println(e.getMessage());
LOG.error("Error running " + StringUtils.join(argv, " "), e);
return -1;
}
}
|
@Test
public void commandAliasExists() throws Exception {
TestShell shell = new TestShell();
assertEquals(0, shell.run("cmdAlias"));
String warningMsg = "WARNING: cmdAlias is not a stable CLI command. It may be removed in the"
+ " future. Use with caution in scripts. You may use 'cmd -O' instead.";
String output = mOutput.toString();
assertTrue(String.format("Output should contain proper warning.\nActual: %s\nExpected: %s",
output, warningMsg), output.contains(warningMsg));
}
|
public static boolean isMatchWithPrefix(final byte[] candidate, final byte[] expected, final int prefixLength)
{
if (candidate.length != expected.length)
{
return false;
}
if (candidate.length == 4)
{
final int mask = prefixLengthToIpV4Mask(prefixLength);
return (toInt(candidate) & mask) == (toInt(expected) & mask);
}
else if (candidate.length == 16)
{
final long upperMask = prefixLengthToIpV6Mask(min(prefixLength, 64));
final long lowerMask = prefixLengthToIpV6Mask(max(prefixLength - 64, 0));
return
(upperMask & toLong(candidate, 0)) == (upperMask & toLong(expected, 0)) &&
(lowerMask & toLong(candidate, 8)) == (lowerMask & toLong(expected, 8));
}
throw new IllegalArgumentException("how many bytes does an IP address have again?");
}
|
@Test
void shouldNotMatchIfNotAllBytesWithUnalignedPrefixMatch()
{
assertFalse(isMatchWithPrefix(
asBytes(0b10101010_11111111_00000000_00000000),
asBytes(0b10101010_11111111_10000000_00000000),
17));
}
|
@Override
public boolean renameTo(File dest) {
// implementing this based on the current storage structure is complex and very expensive.
// a redesign is nessesary, especially must be avoid storing paths as key
// maybe file name + reference to the parent in metadata and as key is used a uuid, so movements or renames
// are only affected on the current file. metadata should also contain list of uuids of the corresponding data chunks
throw new UnsupportedOperationException("Not implemented");
}
|
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testRenameTo(){
fs.getFile("file.txt").renameTo(null);
}
|
@GET
@Path("{name}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getConfig(@PathParam("name") String configName) {
log.trace(String.format(MESSAGE_CONFIG, QUERY));
final TelemetryConfig config =
nullIsNotFound(configService.getConfig(configName), CONFIG_NOT_FOUND);
final ObjectNode root = codec(TelemetryConfig.class).encode(config, this);
return ok(root).build();
}
|
@Test
public void testUpdateConfigAddressWithModifyOperation() {
expect(mockConfigAdminService.getConfig(anyString()))
.andReturn(telemetryConfig).once();
mockConfigAdminService.updateTelemetryConfig(telemetryConfig);
replay(mockConfigAdminService);
final WebTarget wt = target();
Response response = wt.path(PATH + "/address/test1/address1")
.request(MediaType.APPLICATION_JSON_TYPE)
.put(Entity.json(""));
final int status = response.getStatus();
assertEquals(200, status);
verify(mockConfigAdminService);
}
|
@Override
public String getType() {
return "TIMEOUT";
}
|
@Test
public void verify_message_and_type() {
assertThat(underTest.getMessage()).isEqualTo(message);
assertThat(underTest.getType()).isEqualTo("TIMEOUT");
}
|
@JsonCreator
public static DeletionRetentionStrategyConfig create(@JsonProperty(TYPE_FIELD) String type,
@JsonProperty("max_number_of_indices") @Min(1) int maxNumberOfIndices) {
return new AutoValue_DeletionRetentionStrategyConfig(type, maxNumberOfIndices);
}
|
@Test
public void testSerialization() throws JsonProcessingException {
final DeletionRetentionStrategyConfig config = DeletionRetentionStrategyConfig.create(25);
final ObjectMapper objectMapper = new ObjectMapperProvider().get();
final String json = objectMapper.writeValueAsString(config);
final Object document = Configuration.defaultConfiguration().jsonProvider().parse(json);
assertThat((String) JsonPath.read(document, "$.type")).isEqualTo("org.graylog2.indexer.retention.strategies.DeletionRetentionStrategyConfig");
assertThat((Integer) JsonPath.read(document, "$.max_number_of_indices")).isEqualTo(25);
}
|
public Object stringToKey(String s) {
char type = s.charAt(0);
switch (type) {
case 'S':
// this is a String, NOT a Short. For Short see case 'X'.
return s.substring(2);
case 'I':
// This is an Integer
return Integer.valueOf(s.substring(2));
case 'Y':
// This is a BYTE
return Byte.valueOf(s.substring(2));
case 'L':
// This is a Long
return Long.valueOf(s.substring(2));
case 'X':
// This is a SHORT
return Short.valueOf(s.substring(2));
case 'D':
// This is a Double
return Double.valueOf(s.substring(2));
case 'F':
// This is a Float
return Float.valueOf(s.substring(2));
case 'B':
// This is a Boolean, NOT a Byte. For Byte see case 'Y'.
return Boolean.valueOf(s.substring(2));
case 'C':
// This is a Character
return Character.valueOf(s.charAt(2));
case 'U':
// This is a java.util.UUID
return UUID.fromString(s.substring(2));
case 'A':
// This is an array of bytes encoded as a Base64 string
return Base64.getDecoder().decode(s.substring(2));
case 'T':
// this is a custom Transformable or a type with a registered Transformer
int indexOfSecondDelimiter = s.indexOf(':', 2);
String keyClassName = s.substring(2, indexOfSecondDelimiter);
String keyAsString = s.substring(indexOfSecondDelimiter + 1);
Transformer t = getTransformer(keyClassName);
if (t != null) {
return t.fromString(keyAsString);
} else {
throw CONTAINER.noTransformerForKey(keyClassName);
}
}
throw new CacheException("Unknown key type metadata: " + type);
}
|
@Test(expectedExceptions = CacheException.class)
public void testStringToUnknownKey() {
keyTransformationHandler.stringToKey("Z:someKey");
}
|
public static Combine.BinaryCombineDoubleFn ofDoubles() {
return new SumDoubleFn();
}
|
@Test
public void testSumDoubleFnInfinity() {
testCombineFn(
Sum.ofDoubles(),
Lists.newArrayList(Double.NEGATIVE_INFINITY, 2.0, 3.0, Double.POSITIVE_INFINITY),
Double.NaN);
}
|
@VisibleForTesting
static void validateInstancePartitionsTypeMapConfig(TableConfig tableConfig) {
if (MapUtils.isEmpty(tableConfig.getInstancePartitionsMap()) || MapUtils.isEmpty(
tableConfig.getInstanceAssignmentConfigMap())) {
return;
}
for (InstancePartitionsType instancePartitionsType : InstancePartitionsType.values()) {
if (tableConfig.getInstanceAssignmentConfigMap().containsKey(instancePartitionsType.toString())) {
InstanceAssignmentConfig instanceAssignmentConfig =
tableConfig.getInstanceAssignmentConfigMap().get(instancePartitionsType.toString());
if (instanceAssignmentConfig.getPartitionSelector()
== InstanceAssignmentConfig.PartitionSelector.MIRROR_SERVER_SET_PARTITION_SELECTOR) {
Preconditions.checkState(tableConfig.getInstancePartitionsMap().containsKey(instancePartitionsType),
String.format("Both InstanceAssignmentConfigMap and InstancePartitionsMap needed for %s, as "
+ "MIRROR_SERVER_SET_PARTITION_SELECTOR is used", instancePartitionsType));
} else {
Preconditions.checkState(!tableConfig.getInstancePartitionsMap().containsKey(instancePartitionsType),
String.format("Both InstanceAssignmentConfigMap and InstancePartitionsMap set for %s",
instancePartitionsType));
}
}
}
}
|
@Test
public void testValidateInstancePartitionsMap() {
InstanceAssignmentConfig instanceAssignmentConfig = Mockito.mock(InstanceAssignmentConfig.class);
TableConfig tableConfigWithoutInstancePartitionsMap =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build();
// Call validate with a table-config without any instance partitions or instance assignment config
TableConfigUtils.validateInstancePartitionsTypeMapConfig(tableConfigWithoutInstancePartitionsMap);
TableConfig tableConfigWithInstancePartitionsMap =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setInstancePartitionsMap(ImmutableMap.of(InstancePartitionsType.OFFLINE, "test_OFFLINE")).build();
// Call validate with a table-config with instance partitions set but not instance assignment config
TableConfigUtils.validateInstancePartitionsTypeMapConfig(tableConfigWithInstancePartitionsMap);
TableConfig invalidTableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME)
.setInstancePartitionsMap(ImmutableMap.of(InstancePartitionsType.OFFLINE, "test_OFFLINE"))
.setInstanceAssignmentConfigMap(
ImmutableMap.of(InstancePartitionsType.OFFLINE.toString(), instanceAssignmentConfig)).build();
try {
// Call validate with instance partitions and config set for the same type
TableConfigUtils.validateInstancePartitionsTypeMapConfig(invalidTableConfig);
Assert.fail("Validation should have failed since both instancePartitionsMap and config are set");
} catch (IllegalStateException ignored) {
}
}
|
@Override
public InputStream getJreBinary(String jreFilename) {
try {
return new FileInputStream("jres/" + jreFilename);
} catch (FileNotFoundException fileNotFoundException) {
throw new NotFoundException(String.format("Unable to find JRE '%s'", jreFilename));
}
}
|
@Test
void getJreBinary_shouldFail_whenFileNotFound() {
assertThatThrownBy(() -> jresHandler.getJreBinary("jre1"))
.isInstanceOf(NotFoundException.class)
.hasMessage("Unable to find JRE 'jre1'");
}
|
static ParseResult parse(String expression, NameValidator validator, ClassHelper helper) {
ParseResult result = new ParseResult();
try {
Parser parser = new Parser(new Scanner("ignore", new StringReader(expression)));
Java.Atom atom = parser.parseConditionalExpression();
// after parsing the expression the input should end (otherwise it is not "simple")
if (parser.peek().type == TokenType.END_OF_INPUT) {
result.guessedVariables = new LinkedHashSet<>();
ConditionalExpressionVisitor visitor = new ConditionalExpressionVisitor(result, validator, helper);
result.ok = atom.accept(visitor);
result.invalidMessage = visitor.invalidMessage;
if (result.ok) {
result.converted = new StringBuilder(expression.length());
int start = 0;
for (Replacement replace : visitor.replacements.values()) {
result.converted.append(expression, start, replace.start).append(replace.newString);
start = replace.start + replace.oldLength;
}
result.converted.append(expression.substring(start));
}
}
} catch (Exception ex) {
}
return result;
}
|
@Test
public void testConvertExpression() {
NameValidator validVariable = s -> Helper.toUpperCase(s).equals(s) || s.equals("road_class") || s.equals("toll");
ParseResult result = parse("toll == NO", validVariable, k -> "");
assertTrue(result.ok);
assertEquals("[toll]", result.guessedVariables.toString());
assertEquals("road_class == Hello.PRIMARY",
parse("road_class == PRIMARY", validVariable, k -> "Hello").converted.toString());
assertEquals("toll == Toll.NO", parse("toll == NO", validVariable, k -> "Toll").converted.toString());
assertEquals("toll == Toll.NO || road_class == RoadClass.NO", parse("toll == NO || road_class == NO", validVariable, k -> k.equals("toll") ? "Toll" : "RoadClass").converted.toString());
// convert in_area variable to function call:
assertEquals(CustomWeightingHelper.class.getSimpleName() + ".in(this.in_custom_1, edge)",
parse("in_custom_1", validVariable, k -> "").converted.toString());
// no need to inject:
assertNull(parse("toll == Toll.NO", validVariable, k -> "").converted);
}
|
public short getReplicas() {
return replicas == null ? DEFAULT_REPLICAS : replicas;
}
|
@Test
public void shouldDefaultIfNoReplicasSupplied() {
// Given:
// When:
final TopicProperties properties = new Builder()
.withName("name")
.withWithClause(Optional.empty(), Optional.of(1), Optional.empty(), Optional.of((long) 100))
.build();
// Then:
assertThat(properties.getReplicas(), is(TopicProperties.DEFAULT_REPLICAS));
}
|
@Override
@Nullable
protected HttpHost determineProxy(HttpHost target, HttpContext context) throws HttpException {
for (Pattern nonProxyHostPattern : nonProxyHostPatterns) {
if (nonProxyHostPattern.matcher(target.getHostName()).matches()) {
return null;
}
}
return super.determineProxy(target, context);
}
|
@Test
void testHostWithStartWildcardIsMatched() throws Exception {
assertThat(routePlanner.determineProxy(new HttpHost("test.example.com"), httpContext)).isNull();
}
|
@Override
public int read() throws IOException {
if (this.closed) {
throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
}
if (this.partRemaining <= 0 && this.position < this.fileSize) {
this.reopen(this.position);
}
int byteRead = -1;
if (this.partRemaining != 0) {
byteRead = this.buffer[
(int) (this.buffer.length - this.partRemaining)] & 0xff;
}
if (byteRead >= 0) {
this.position++;
this.partRemaining--;
if (null != this.statistics) {
this.statistics.incrementBytesRead(byteRead);
}
}
return byteRead;
}
|
@Test
public void testRead() throws Exception {
final int bufLen = 256;
Path readTestFilePath = new Path(this.testRootDir + "/"
+ "testReadSmallFile.txt");
long fileSize = 5 * Unit.MB;
ContractTestUtils.generateTestFile(
this.fs, readTestFilePath, fileSize, 256, 255);
LOG.info("read test file: " + readTestFilePath + " has created.");
FSDataInputStream inputStream = this.fs.open(readTestFilePath);
byte[] buf = new byte[bufLen];
long bytesRead = 0;
while (bytesRead < fileSize) {
int bytes = 0;
if (fileSize - bytesRead < bufLen) {
int remaining = (int) (fileSize - bytesRead);
bytes = inputStream.read(buf, 0, remaining);
} else {
bytes = inputStream.read(buf, 0, bufLen);
}
bytesRead += bytes;
if (bytesRead % (1 * Unit.MB) == 0) {
int available = inputStream.available();
assertTrue("expected remaining: " + (fileSize - bytesRead) +
" but got: " + available, (fileSize - bytesRead) == available);
LOG.info("Bytes read: " +
Math.round((double) bytesRead / Unit.MB) + "MB");
}
}
assertTrue(inputStream.available() == 0);
IOUtils.closeStream(inputStream);
}
|
public boolean isUniqueRoleName(final CaseInsensitiveString roleName) {
return Collections.frequency(roleNames(), roleName) <= 1;
}
|
@Test
public void isUniqueRoleName_shouldBeTrueIfRolesAreUnique() throws Exception {
RolesConfig rolesConfig = new RolesConfig(new RoleConfig(new CaseInsensitiveString("admin")),
new RoleConfig(new CaseInsensitiveString("view")));
assertTrue(rolesConfig.isUniqueRoleName(new CaseInsensitiveString("admin")));
assertTrue(rolesConfig.isUniqueRoleName(new CaseInsensitiveString("operate")));
}
|
public ChannelUriStringBuilder linger(final Long lingerNs)
{
if (null != lingerNs && lingerNs < 0)
{
throw new IllegalArgumentException("linger value cannot be negative: " + lingerNs);
}
this.linger = lingerNs;
return this;
}
|
@Test
void shouldCopyLingerTimeoutFromChannelUriNegativeValue()
{
final ChannelUri channelUri = ChannelUri.parse("aeron:udp?linger=-1000");
assertThrows(IllegalArgumentException.class, () -> new ChannelUriStringBuilder().linger(channelUri));
}
|
public static NodeBadge glyph(String gid) {
return new NodeBadge(Status.INFO, true, nonNull(gid), null);
}
|
@Test
public void glyphWarnMsg() {
badge = NodeBadge.glyph(Status.WARN, GID, MSG);
checkFields(badge, Status.WARN, true, GID, MSG);
}
|
public static Object invokeStaticMethod(final Class<?> clazz, final String method) {
try {
return MethodUtils.invokeStaticMethod(clazz, method);
} catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) {
LOG.error("", e);
}
return null;
}
|
@Test
public void testInvokeStaticMethod() {
final Reflect reflect = new Reflect();
assertEquals("1", ReflectUtils.invokeStaticMethod(reflect.getClass(), "methodStaticA"));
assertNull(ReflectUtils.invokeStaticMethod(reflect.getClass(), "methodB"));
}
|
private static String sanitizeDate(int days, int today) {
String isPast = today > days ? "ago" : "from-now";
int diff = Math.abs(today - days);
if (diff == 0) {
return "(date-today)";
} else if (diff < 90) {
return "(date-" + diff + "-days-" + isPast + ")";
}
return "(date)";
}
|
@Test
public void testSanitizeDate() {
assertEquals(
Expressions.equal("test", "(date)"),
ExpressionUtil.sanitize(Expressions.equal("test", "2022-04-29")));
assertEquals(
Expressions.equal("date", "(date)"),
ExpressionUtil.sanitize(STRUCT, Expressions.equal("date", "2022-04-29"), true));
assertThat(ExpressionUtil.toSanitizedString(Expressions.equal("test", "2022-04-29")))
.as("Sanitized string should be identical except for descriptive literal")
.isEqualTo("test = (date)");
assertThat(
ExpressionUtil.toSanitizedString(STRUCT, Expressions.equal("date", "2022-04-29"), true))
.as("Sanitized string should be identical except for descriptive literal")
.isEqualTo("date = (date)");
}
|
public static boolean isMissingTypeArguments(Type type) {
if (type instanceof WildcardType wildcardType) {
var baseType =
wildcardType.getLowerBounds().length > 0
? wildcardType.getLowerBounds()[0]
: wildcardType.getUpperBounds()[0];
return isMissingTypeArguments(baseType);
}
return GenericTypeReflector.isMissingTypeParameters(type);
}
|
@Test
public void isMissingTypeArguments() {
assertThat(
Reflection.isMissingTypeArguments(
Types.parameterizedType(Container.class, Person.class)))
.isFalse();
assertThat(Reflection.isMissingTypeArguments(Container.class)).isTrue();
assertThat(Reflection.isMissingTypeArguments(Person.class)).isFalse();
}
|
public static List<Type> decode(String rawInput, List<TypeReference<Type>> outputParameters) {
return decoder.decodeFunctionResult(rawInput, outputParameters);
}
|
@Test
public void testDecodeDynamicStruct3() {
AbiV2TestFixture.Nazz nazz =
new AbiV2TestFixture.Nazz(
Collections.singletonList(
new AbiV2TestFixture.Nazzy(
Arrays.asList(
new AbiV2TestFixture.Foo("a", "b"),
new AbiV2TestFixture.Foo("c", "d")))),
new BigInteger("100"));
String rawInput = FunctionEncoder.encodeConstructor(Collections.singletonList(nazz));
List<Type> decoded =
FunctionReturnDecoder.decode(
rawInput, AbiV2TestFixture.getNazzFunction.getOutputParameters());
assertEquals(Collections.singletonList(nazz).get(0).toString(), decoded.get(0).toString());
}
|
@Override
public void lock() {
try {
lock(-1, null, false);
} catch (InterruptedException e) {
throw new IllegalStateException();
}
}
|
@Test
public void testForceUnlock() {
RLock lock = redisson.getLock("lock");
lock.lock();
lock.forceUnlock();
Assertions.assertFalse(lock.isLocked());
lock = redisson.getLock("lock");
Assertions.assertFalse(lock.isLocked());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.