focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static RedirectionAction buildFormPostContentAction(final WebContext context, final String content) { // is this an automatic form post generated by OpenSAML? if (content != null && content.contains("onload=\"document.forms[0].submit()\"")) { val url = StringEscapeUtils.unescapeHtml4(substringBetween(content, "<form action=\"", "\" method=\"post\">")); if (isNotBlank(url)) { val data = new HashMap<String, String>(); val samlRequest = StringEscapeUtils.unescapeHtml4(substringBetween(content, "name=\"SAMLRequest\" value=\"", "\"/>")); if (isNotBlank(samlRequest)) { data.put("SAMLRequest", samlRequest); } val relayState = StringEscapeUtils.unescapeHtml4(substringBetween(content, "name=\"RelayState\" value=\"", "\"/>")); if (isNotBlank(relayState)) { data.put("RelayState", relayState); } val samlResponse = StringEscapeUtils.unescapeHtml4(substringBetween(content, "name=\"SAMLResponse\" value=\"", "\"/>")); if (isNotBlank(samlResponse)) { data.put("SAMLResponse", samlResponse); } return new AutomaticFormPostAction(url, data, content); } } return new OkAction(content); }
@Test public void testFormPostContentAction() { val action = HttpActionHelper.buildFormPostContentAction(MockWebContext.create(), VALUE); assertTrue(action instanceof OkAction); assertFalse(action instanceof AutomaticFormPostAction); assertEquals(VALUE, ((OkAction) action).getContent()); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildGroupByMemoryMergedResultWithMySQLLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class)); MySQLSelectStatement selectStatement = (MySQLSelectStatement) buildSelectStatement(new MySQLSelectStatement()); selectStatement.setGroupBy(new GroupBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralLimitValueSegment(0, 0, 1L), null)); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(LimitDecoratorMergedResult.class)); assertThat(((LimitDecoratorMergedResult) actual).getMergedResult(), instanceOf(GroupByStreamMergedResult.class)); }
public static JibContainerBuilder toJibContainerBuilder( Path projectRoot, Path buildFilePath, Build buildCommandOptions, CommonCliOptions commonCliOptions, ConsoleLogger logger) throws InvalidImageReferenceException, IOException { BuildFileSpec buildFile = toBuildFileSpec(buildFilePath, buildCommandOptions.getTemplateParameters()); Optional<BaseImageSpec> baseImageSpec = buildFile.getFrom(); JibContainerBuilder containerBuilder = baseImageSpec.isPresent() ? createJibContainerBuilder(baseImageSpec.get(), commonCliOptions, logger) : Jib.fromScratch(); buildFile.getCreationTime().ifPresent(containerBuilder::setCreationTime); buildFile.getFormat().ifPresent(containerBuilder::setFormat); containerBuilder.setEnvironment(buildFile.getEnvironment()); containerBuilder.setLabels(buildFile.getLabels()); containerBuilder.setVolumes(buildFile.getVolumes()); containerBuilder.setExposedPorts(buildFile.getExposedPorts()); buildFile.getUser().ifPresent(containerBuilder::setUser); buildFile.getWorkingDirectory().ifPresent(containerBuilder::setWorkingDirectory); buildFile.getEntrypoint().ifPresent(containerBuilder::setEntrypoint); buildFile.getCmd().ifPresent(containerBuilder::setProgramArguments); Optional<LayersSpec> layersSpec = buildFile.getLayers(); if (layersSpec.isPresent()) { containerBuilder.setFileEntriesLayers(Layers.toLayers(projectRoot, layersSpec.get())); } return containerBuilder; }
@Test public void testToBuildFileSpec_withTemplating() throws URISyntaxException, InvalidImageReferenceException, IOException { Path buildfile = Paths.get(Resources.getResource("buildfiles/projects/templating/valid.yaml").toURI()); Mockito.when(buildCli.getTemplateParameters()) .thenReturn( ImmutableMap.of( "unused", "ignored", // keys that are defined but not used do not throw an error "key", "templateKey", "value", "templateValue", "repeated", "repeatedValue")); JibContainerBuilder jibContainerBuilder = BuildFiles.toJibContainerBuilder( buildfile.getParent(), buildfile, buildCli, commonCliOptions, consoleLogger); ContainerBuildPlan resolved = jibContainerBuilder.toContainerBuildPlan(); Map<String, String> expectedLabels = ImmutableMap.<String, String>builder() .put("templateKey", "templateValue") .put("label1", "repeatedValue") .put("label2", "repeatedValue") .put("label3", "${escaped}") .put("label4", "free$") .put("unmatched", "${") .build(); Assert.assertEquals(expectedLabels, resolved.getLabels()); }
static BayeuxClient createClient(final SalesforceComponent component, final SalesforceSession session) throws SalesforceException { // use default Jetty client from SalesforceComponent, it's shared by all consumers final SalesforceHttpClient httpClient = component.getConfig().getHttpClient(); Map<String, Object> options = new HashMap<>(); /* The timeout should be greater than 110 sec as per https://github.com/cometd/cometd/issues/1142#issuecomment-1048256297 and https://developer.salesforce.com/docs/atlas.en-us.api_streaming.meta/api_streaming/using_streaming_api_timeouts.htm */ options.put(ClientTransport.MAX_NETWORK_DELAY_OPTION, 120000); if (component.getLongPollingTransportProperties() != null) { options.putAll(component.getLongPollingTransportProperties()); } // check login access token if (session.getAccessToken() == null && !component.getLoginConfig().isLazyLogin()) { session.login(null); } CookieStore cookieStore = new CookieManager().getCookieStore(); HttpCookieStore httpCookieStore = new HttpCookieStore.Default(); ClientTransport transport = new JettyHttpClientTransport(options, httpClient) { @Override protected void customize(Request request) { super.customize(request); //accessToken might be null due to lazy login String accessToken = session.getAccessToken(); if (accessToken == null) { try { accessToken = session.login(null); } catch (SalesforceException e) { throw new RuntimeException(e); } } String finalAccessToken = new String(accessToken); request.headers(h -> h.add(HttpHeader.AUTHORIZATION, "OAuth " + finalAccessToken)); } @Override protected void storeCookies(URI uri, Map<String, List<String>> cookies) { try { CookieManager cookieManager = new CookieManager(cookieStore, CookiePolicy.ACCEPT_ALL); cookieManager.put(uri, cookies); for (java.net.HttpCookie httpCookie : cookieManager.getCookieStore().getCookies()) { httpCookieStore.add(uri, HttpCookie.from(httpCookie)); } } catch (IOException x) { if (LOG.isDebugEnabled()) { LOG.debug("Could not parse cookies", x); } } } @Override protected HttpCookieStore getHttpCookieStore() { return httpCookieStore; } }; BayeuxClient client = new BayeuxClient(getEndpointUrl(component), transport); // added eagerly to check for support during handshake client.addExtension(REPLAY_EXTENSION); return client; }
@Test public void shouldNotLoginWhenAccessTokenIsNullAndLazyLoginIsTrue() throws SalesforceException { final SalesforceHttpClient httpClient = mock(SalesforceHttpClient.class); httpClient.setTimeout(0L); final SalesforceEndpointConfig endpointConfig = new SalesforceEndpointConfig(); endpointConfig.setHttpClient(httpClient); final SalesforceLoginConfig loginConfig = new SalesforceLoginConfig(); loginConfig.setLazyLogin(true); final SalesforceSession session = mock(SalesforceSession.class); final SalesforceComponent component = mock(SalesforceComponent.class); when(component.getLoginConfig()).thenReturn(loginConfig); when(component.getConfig()).thenReturn(endpointConfig); when(component.getSession()).thenReturn(session); BayeuxClient bayeuxClient = SubscriptionHelper.createClient(component, session); assertNotNull(bayeuxClient); verify(session, never()).login(null); }
@Subscribe public void handleUserDeletedEvent(UserDeletedEvent event) { final Set<GRN> grantees; try (final Stream<GrantDTO> grantStream = grantService.streamAll()) { grantees = grantStream .map(GrantDTO::grantee) .filter(grantee -> grantee.grnType().equals(GRNTypes.USER)) .collect(Collectors.toSet()); } final Set<GRN> users; try (final Stream<UserOverviewDTO> userStream = userService.streamAll()) { users = userStream .map(user -> grnRegistry.newGRN(GRNTypes.USER.type(), user.id())) .collect(Collectors.toSet()); } final Sets.SetView<GRN> removedGrantees = Sets.difference(grantees, users); if (!removedGrantees.isEmpty()) { log.debug("Clearing grants for {} grantees ({}).", removedGrantees.size(), removedGrantees); removedGrantees.forEach(grantService::deleteForGrantee); } }
@Test void userRemoved() { when(userService.streamAll()).thenReturn(Stream.of(userA)); when(grantService.streamAll()).thenReturn(Stream.of(grantUserA, grantUserB, grantTeam)); cleanupListener.handleUserDeletedEvent(mock(UserDeletedEvent.class)); verify(grantService).deleteForGrantee(grnRegistry.newGRN(GRNTypes.USER, "b")); verifyNoMoreInteractions(grantService); }
@PostMapping("/verify") @Operation(summary = "Verify email address by user entering verificationcode") public DEmailVerifyResult verifyEmail(@RequestBody DEmailVerifyRequest deprecatedRequest) { validateVerificationCode(deprecatedRequest); AppSession appSession = validate(deprecatedRequest); var request = deprecatedRequest.getRequest(); var result = accountService.verifyEmail(appSession.getAccountId(), request); return DEmailVerifyResult.copyFrom(result); }
@Test public void validEmailVerify() { DEmailVerifyRequest request = new DEmailVerifyRequest(); request.setAppSessionId("id"); request.setVerificationCode("code"); EmailVerifyResult result = new EmailVerifyResult(); result.setStatus(Status.OK); result.setError("error"); result.setRemainingAttempts(6); when(accountService.verifyEmail(eq(1L), any())).thenReturn(result); DEmailVerifyResult verifyResult = emailController.verifyEmail(request); assertEquals(Status.OK, verifyResult.getStatus()); assertEquals("error", verifyResult.getError()); assertEquals(6, verifyResult.getRemainingAttempts()); }
public static Set<Set<LogicalVertex>> computePipelinedRegions( final Iterable<? extends LogicalVertex> topologicallySortedVertices) { final Map<LogicalVertex, Set<LogicalVertex>> vertexToRegion = PipelinedRegionComputeUtil.buildRawRegions( topologicallySortedVertices, LogicalPipelinedRegionComputeUtil::getMustBePipelinedConsumedResults); // Since LogicalTopology is a DAG, there is no need to do cycle detection nor to merge // regions on cycles. return uniqueVertexGroups(vertexToRegion); }
@Test void testIsolatedVertices() { JobVertex v1 = new JobVertex("v1"); JobVertex v2 = new JobVertex("v2"); JobVertex v3 = new JobVertex("v3"); Set<Set<LogicalVertex>> regions = computePipelinedRegions(v1, v2, v3); checkRegionSize(regions, 3, 1, 1, 1); }
public Set<Cookie> decode(String header) { Set<Cookie> cookies = new TreeSet<Cookie>(); decode(cookies, header); return cookies; }
@Test public void testRejectCookieValueWithSemicolon() { Set<Cookie> cookies = ServerCookieDecoder.STRICT.decode("name=\"foo;bar\";"); assertTrue(cookies.isEmpty()); }
public Set<ContentPackInstallation> loadAll() { try (final DBCursor<ContentPackInstallation> installations = dbCollection.find()) { return ImmutableSet.copyOf((Iterator<ContentPackInstallation>) installations); } }
@Test @MongoDBFixtures("ContentPackInstallationPersistenceServiceTest.json") public void loadAll() { final Set<ContentPackInstallation> contentPacks = persistenceService.loadAll(); assertThat(contentPacks).hasSize(4); }
public static BadRequestException namespaceNotExists() { return new BadRequestException("namespace not exist."); }
@Test public void testNamespaceNotExists() { BadRequestException namespaceNotExists = BadRequestException.namespaceNotExists(); assertEquals("namespace not exist.", namespaceNotExists.getMessage()); BadRequestException namespaceNotExists2 = BadRequestException.namespaceNotExists(appId, clusterName, namespaceName); assertEquals("namespace not exist for appId:app-1001 clusterName:test namespaceName:application", namespaceNotExists2.getMessage()); }
DateRange getRange(String dateRangeString) throws ParseException { if (dateRangeString == null || dateRangeString.isEmpty()) return null; String[] dateArr = dateRangeString.split("-"); if (dateArr.length > 2 || dateArr.length < 1) return null; // throw new IllegalArgumentException("Only Strings containing two Date separated by a '-' or a single Date are allowed"); ParsedCalendar from = parseDateString(dateArr[0]); ParsedCalendar to; if (dateArr.length == 2) to = parseDateString(dateArr[1]); else // faster and safe? // to = new ParsedCalendar(from.parseType, (Calendar) from.parsedCalendar.clone()); to = parseDateString(dateArr[0]); try { return new DateRange(from, to); } catch (IllegalArgumentException ex) { return null; } }
@Test public void testParseReverseDateRangeWithoutYear() throws ParseException { DateRange dateRange = dateRangeParser.getRange("Aug 14-Aug 10"); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.JANUARY, 9))); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 9))); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 10))); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 12))); assertFalse(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 14))); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.AUGUST, 15))); assertTrue(dateRange.isInRange(getCalendar(2014, Calendar.SEPTEMBER, 15))); }
@Override public <T> T get(String key, Class<T> type) { ClusterConfig config = findClusterConfig(key); if (config == null) { LOG.debug("Couldn't find cluster config of type {}", key); return null; } T result = extractPayload(config.payload(), type); if (result == null) { LOG.error("Couldn't extract payload from cluster config (type: {})", key); } return result; }
@Test public void getReturnsExistingConfig() throws Exception { DBObject dbObject = new BasicDBObjectBuilder() .add("type", CustomConfig.class.getCanonicalName()) .add("payload", Collections.singletonMap("text", "TEST")) .add("last_updated", TIME.toString()) .add("last_updated_by", "ID") .get(); @SuppressWarnings("deprecation") final DBCollection collection = mongoConnection.getDatabase().getCollection(COLLECTION_NAME); collection.save(dbObject); assertThat(collection.count()).isEqualTo(1L); CustomConfig customConfig = clusterConfigService.get(CustomConfig.class); assertThat(customConfig.text).isEqualTo("TEST"); }
public ProcessingNodesState calculateProcessingState(TimeRange timeRange) { final DateTime updateThresholdTimestamp = clock.nowUTC().minus(updateThreshold.toMilliseconds()); try (DBCursor<ProcessingStatusDto> statusCursor = db.find(activeNodes(updateThresholdTimestamp))) { if (!statusCursor.hasNext()) { return ProcessingNodesState.NONE_ACTIVE; } int activeNodes = 0; int idleNodes = 0; while (statusCursor.hasNext()) { activeNodes++; ProcessingStatusDto nodeProcessingStatus = statusCursor.next(); DateTime lastIndexedMessage = nodeProcessingStatus.receiveTimes().postIndexing(); // If node is behind and is busy, it is overloaded. if (lastIndexedMessage.isBefore(timeRange.getTo()) && isBusy(nodeProcessingStatus)) { return ProcessingNodesState.SOME_OVERLOADED; } // If a node did not index a message that is at least at the start of the time range, // we consider it idle. if (lastIndexedMessage.isBefore(timeRange.getFrom())) { idleNodes++; } } // Only if all nodes are idle, we stop the processing. if (activeNodes == idleNodes) { return ProcessingNodesState.ALL_IDLE; } } // If none of the above checks return, we can assume that some nodes have already indexed the given timerange. return ProcessingNodesState.SOME_UP_TO_DATE; }
@Test @MongoDBFixtures("processing-status-all-nodes-up-to-date.json") public void processingStateAllNodesUpToDate() { when(clock.nowUTC()).thenReturn(DateTime.parse("2019-01-01T04:00:00.000Z")); when(updateThreshold.toMilliseconds()).thenReturn(Duration.hours(1).toMilliseconds()); TimeRange timeRange = AbsoluteRange.create("2019-01-01T02:00:00.000Z", "2019-01-01T03:00:00.000Z"); assertThat(dbService.calculateProcessingState(timeRange)).isEqualTo(ProcessingNodesState.SOME_UP_TO_DATE); }
@Override protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> properties) throws Exception { AS400ConnectionPool connectionPool; if (properties.containsKey(CONNECTION_POOL)) { LOG.trace("AS400ConnectionPool instance specified in the URI - will look it up."); // We have chosen to handle the connectionPool option ourselves, so // we must remove it from the given parameter list (see // http://camel.apache.org/writing-components.html) String poolId = properties.remove(CONNECTION_POOL).toString(); connectionPool = EndpointHelper.resolveReferenceParameter(getCamelContext(), poolId, AS400ConnectionPool.class, true); } else { LOG.trace("No AS400ConnectionPool instance specified in the URI - one will be provided."); connectionPool = getConnectionPool(); } String type = remaining.substring(remaining.lastIndexOf('.') + 1).toUpperCase(); Jt400Endpoint endpoint = new Jt400Endpoint(uri, this, connectionPool); setProperties(endpoint, properties); endpoint.setType(Jt400Type.valueOf(type)); return endpoint; }
@Test public void testCreateDatqSecuredEndpoint() throws Exception { Endpoint endpoint = component .createEndpoint( "jt400://user:password@host/qsys.lib/library.lib/queue.dtaq?connectionPool=#mockPool&secured=true"); assertNotNull(endpoint); assertTrue(endpoint instanceof Jt400Endpoint); assertTrue(((Jt400Endpoint) endpoint).isSecured()); }
@Override public long getAndAdd(K key, long delta) { return complete(asyncCounterMap.getAndAdd(key, delta)); }
@Test public void testGetAndAdd() { atomicCounterMap.put(KEY1, VALUE1); Long beforeIncrement = atomicCounterMap.getAndAdd(KEY1, DELTA1); assertThat(beforeIncrement, is(VALUE1)); Long afterIncrement = atomicCounterMap.get(KEY1); assertThat(afterIncrement, is(VALUE1 + DELTA1)); }
@Override public Map getAll(Set keys) { return null; }
@Test public void testGetAll() throws Exception { assertNull(NULL_QUERY_CACHE.getAll(null)); }
public void toPdf() throws IOException { try { document.open(); // il serait possible d'ouvrir la boîte de dialogue Imprimer de Adobe Reader // if (writer instanceof PdfWriter) { // ((PdfWriter) writer).addJavaScript("this.print(true);", false); // } pdfCoreReport.toPdf(); } catch (final DocumentException e) { throw createIOException(e); } document.close(); }
@Test public void testToPdf() throws Exception { final Counter sqlCounter = new Counter("sql", "db.png"); // counterName doit être http, sql ou ejb pour que les libellés de graph soient trouvés dans les traductions final Counter counter = new Counter("http", "db.png", sqlCounter); final Counter errorCounter = new Counter(Counter.ERROR_COUNTER_NAME, null); final Counter jobCounter = TestHtmlReport.getJobCounter(); final List<Counter> counters = List.of(counter, sqlCounter, errorCounter, jobCounter); counter.addRequest("test1", 0, 0, 0, false, 1000); counter.addRequest("test2", 1000, 500, 500, false, 1000); counter.addRequest("test3", 10000, 500, 500, true, 10000); final Collector collector = new Collector("test", counters); final JavaInformations javaInformations = new JavaInformations(null, true); final List<JavaInformations> javaInformationsList = Collections .singletonList(javaInformations); counter.addRequest("test1", 0, 0, 0, false, 1000); collector.collectWithoutErrors(javaInformationsList); counter.clear(); collector.collectWithoutErrors(javaInformationsList); toPdf(collector, true, javaInformationsList, null); final JRobin jrobin = collector.getCounterJRobins().iterator().next(); final byte[] graph = jrobin.graph(Period.JOUR.getRange(), 50, 50); final Map<String, byte[]> graphs = new HashMap<>(); graphs.put("1", graph); graphs.put("2", graph); graphs.put("3", graph); graphs.put("4", graph); toPdf(collector, true, javaInformationsList, graphs); // pour les PDFs suivants, inutile de regénérer toutes les images, // ce qui prendrait beaucoup de temps, donc on utilise preInitGraphs final Map<String, byte[]> emptyGraphs = Collections.emptyMap(); counter.bindContext("test 1", "complete test 1", null, -1, -1); sqlCounter.bindContext("sql1", "sql 1", null, -1, -1); sqlCounter.addRequest("sql1", 100, 100, 100, false, -1); counter.addRequest("test 1", 0, 0, 0, false, 1000); counter.addRequest("test2", 1000, 500, 500, false, 1000); counter.addRequest(buildLongRequestName(), 10000, 5000, 5000, true, 10000); collector.collectWithoutErrors(javaInformationsList); toPdf(collector, true, javaInformationsList, emptyGraphs); toPdf(collector, false, javaInformationsList, emptyGraphs); // errorCounter errorCounter.addRequestForSystemError("error", -1, -1, -1, null); errorCounter.addRequestForSystemError("error2", -1, -1, -1, "ma stack-trace"); toPdf(collector, false, javaInformationsList, emptyGraphs); rootContexts(counter, collector, javaInformations); cache(collector); job(collector); Utils.setProperty(Parameter.NO_DATABASE, Boolean.TRUE.toString()); toPdf(collector, false, javaInformationsList, emptyGraphs); Utils.setProperty(Parameter.NO_DATABASE, Boolean.FALSE.toString()); I18N.bindLocale(Locale.CHINA); try { toPdf(collector, false, javaInformationsList, emptyGraphs); } finally { I18N.unbindLocale(); } }
@Override public void checkClientTrusted( X509Certificate[] chain, String authType ) throws CertificateException { // Find and use the end entity as the selector for verification. final X509Certificate endEntityCert = CertificateUtils.identifyEndEntityCertificate( Arrays.asList( chain ) ); final X509CertSelector selector = new X509CertSelector(); selector.setCertificate( endEntityCert ); try { checkChainTrusted( selector, chain ); } catch ( InvalidAlgorithmParameterException | NoSuchAlgorithmException | CertPathBuilderException ex ) { throw new CertificateException( ex ); } }
@Test public void testInvalidChainExpiredTrustAnchor() throws Exception { // Setup fixture. // Execute system under test. assertThrows(CertificateException.class, () -> systemUnderTest.checkClientTrusted( expiredRootChain, "RSA" ) ); }
public static GetAllResourceTypeInfoResponse mergeResourceTypes( Collection<GetAllResourceTypeInfoResponse> responses) { GetAllResourceTypeInfoResponse resourceTypeInfoResponse = Records.newRecord(GetAllResourceTypeInfoResponse.class); Set<ResourceTypeInfo> resourceTypeInfoSet = new HashSet<>(); for (GetAllResourceTypeInfoResponse response : responses) { if (response != null && response.getResourceTypeInfo() != null) { resourceTypeInfoSet.addAll(response.getResourceTypeInfo()); } } resourceTypeInfoResponse.setResourceTypeInfo( new ArrayList<>(resourceTypeInfoSet)); return resourceTypeInfoResponse; }
@Test public void testMergeResourceTypes() { ResourceTypeInfo resourceTypeInfo1 = ResourceTypeInfo.newInstance("vcores"); ResourceTypeInfo resourceTypeInfo2 = ResourceTypeInfo.newInstance("gpu"); ResourceTypeInfo resourceTypeInfo3 = ResourceTypeInfo.newInstance("memory-mb"); List<ResourceTypeInfo> resourceTypeInfoList1 = new ArrayList<>(); resourceTypeInfoList1.add(resourceTypeInfo1); resourceTypeInfoList1.add(resourceTypeInfo3); List<ResourceTypeInfo> resourceTypeInfoList2 = new ArrayList<>(); resourceTypeInfoList2.add(resourceTypeInfo3); resourceTypeInfoList2.add(resourceTypeInfo2); // normal response GetAllResourceTypeInfoResponse response1 = Records.newRecord(GetAllResourceTypeInfoResponse.class); response1.setResourceTypeInfo(resourceTypeInfoList1); GetAllResourceTypeInfoResponse response2 = Records.newRecord(GetAllResourceTypeInfoResponse.class); response2.setResourceTypeInfo(resourceTypeInfoList2); // empty response GetAllResourceTypeInfoResponse response3 = Records.newRecord(GetAllResourceTypeInfoResponse.class); // null response GetAllResourceTypeInfoResponse response4 = null; List<GetAllResourceTypeInfoResponse> responses = new ArrayList<>(); responses.add(response1); responses.add(response2); responses.add(response3); responses.add(response4); // expected response List<ResourceTypeInfo> expectedResponse = new ArrayList<>(); expectedResponse.add(resourceTypeInfo1); expectedResponse.add(resourceTypeInfo2); expectedResponse.add(resourceTypeInfo3); GetAllResourceTypeInfoResponse response = RouterYarnClientUtils.mergeResourceTypes(responses); Assert.assertTrue(CollectionUtils.isEqualCollection(expectedResponse, response.getResourceTypeInfo())); }
public static boolean isBearerToken(final String authorizationHeader) { return StringUtils.hasText(authorizationHeader) && authorizationHeader.startsWith(TOKEN_PREFIX); }
@Test void testIsBearerToken_WithValidBearerToken() { // Given String authorizationHeader = "Bearer sampleAccessToken"; // When boolean result = Token.isBearerToken(authorizationHeader); // Then assertTrue(result); }
public static MetadataUpdate fromJson(String json) { return JsonUtil.parse(json, MetadataUpdateParser::fromJson); }
@Test public void testSetLocationFromJson() { String action = MetadataUpdateParser.SET_LOCATION; String location = "s3://bucket/warehouse/tbl_location"; String json = String.format("{\"action\":\"%s\",\"location\":\"%s\"}", action, location); MetadataUpdate expected = new MetadataUpdate.SetLocation(location); assertEquals(action, expected, MetadataUpdateParser.fromJson(json)); }
@Override public CompletableFuture<Acknowledge> submitFailedJob( JobID jobId, String jobName, Throwable exception) { final ArchivedExecutionGraph archivedExecutionGraph = ArchivedExecutionGraph.createSparseArchivedExecutionGraph( jobId, jobName, JobStatus.FAILED, null, exception, null, System.currentTimeMillis()); ExecutionGraphInfo executionGraphInfo = new ExecutionGraphInfo(archivedExecutionGraph); writeToExecutionGraphInfoStore(executionGraphInfo); return archiveExecutionGraphToHistoryServer(executionGraphInfo); }
@Test public void testRetrieveJobResultAfterSubmissionOfFailedJob() throws Exception { dispatcher = createAndStartDispatcher( heartbeatServices, haServices, new ExpectedJobIdJobManagerRunnerFactory(jobId)); final DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class); final JobID failedJobId = new JobID(); final String failedJobName = "test"; final CompletableFuture<Acknowledge> submitFuture = dispatcherGateway.submitFailedJob( failedJobId, failedJobName, new RuntimeException("Test exception.")); submitFuture.get(); final ArchivedExecutionGraph archivedExecutionGraph = dispatcherGateway.requestJob(failedJobId, TIMEOUT).get(); Assertions.assertThat(archivedExecutionGraph.getJobID()).isEqualTo(failedJobId); Assertions.assertThat(archivedExecutionGraph.getJobName()).isEqualTo(failedJobName); Assertions.assertThat(archivedExecutionGraph.getState()).isEqualTo(JobStatus.FAILED); Assertions.assertThat(archivedExecutionGraph.getFailureInfo()) .isNotNull() .extracting(ErrorInfo::getException) .extracting(e -> e.deserializeError(Thread.currentThread().getContextClassLoader())) .satisfies( exception -> Assertions.assertThat(exception) .isInstanceOf(RuntimeException.class) .hasMessage("Test exception.")); }
public static long freeSwapSpace() { return readLongAttribute("FreeSwapSpaceSize", -1L); }
@Test public void testFreeSwapSpace() { assertTrue(freeSwapSpace() >= -1); }
public Result waitForCondition(Config config, Supplier<Boolean>... conditionCheck) { return finishOrTimeout( config, conditionCheck, () -> jobIsDoneOrFinishing(config.project(), config.region(), config.jobId())); }
@Test public void testWaitForCondition() throws IOException { AtomicInteger callCount = new AtomicInteger(); int totalCalls = 3; Supplier<Boolean> checker = () -> callCount.incrementAndGet() >= totalCalls; when(client.getJobStatus(any(), any(), any())) .thenReturn(JobState.RUNNING) .thenThrow(new IOException()) .thenReturn(JobState.RUNNING); Result result = new PipelineOperator(client).waitForCondition(DEFAULT_CONFIG, checker); verify(client, atMost(totalCalls)) .getJobStatus(projectCaptor.capture(), regionCaptor.capture(), jobIdCaptor.capture()); assertThat(projectCaptor.getValue()).isEqualTo(PROJECT); assertThat(regionCaptor.getValue()).isEqualTo(REGION); assertThat(jobIdCaptor.getValue()).isEqualTo(JOB_ID); assertThat(result).isEqualTo(Result.CONDITION_MET); }
@Override public PageResult<MailLogDO> getMailLogPage(MailLogPageReqVO pageVO) { return mailLogMapper.selectPage(pageVO); }
@Test public void testGetMailLogPage() { // mock 数据 MailLogDO dbMailLog = randomPojo(MailLogDO.class, o -> { // 等会查询到 o.setUserId(1L); o.setUserType(UserTypeEnum.ADMIN.getValue()); o.setToMail("768@qq.com"); o.setAccountId(10L); o.setTemplateId(100L); o.setSendStatus(MailSendStatusEnum.INIT.getStatus()); o.setSendTime(buildTime(2023, 2, 10)); o.setTemplateParams(randomTemplateParams()); }); mailLogMapper.insert(dbMailLog); // 测试 userId 不匹配 mailLogMapper.insert(cloneIgnoreId(dbMailLog, o -> o.setUserId(2L))); // 测试 userType 不匹配 mailLogMapper.insert(cloneIgnoreId(dbMailLog, o -> o.setUserType(UserTypeEnum.MEMBER.getValue()))); // 测试 toMail 不匹配 mailLogMapper.insert(cloneIgnoreId(dbMailLog, o -> o.setToMail("788@.qq.com"))); // 测试 accountId 不匹配 mailLogMapper.insert(cloneIgnoreId(dbMailLog, o -> o.setAccountId(11L))); // 测试 templateId 不匹配 mailLogMapper.insert(cloneIgnoreId(dbMailLog, o -> o.setTemplateId(101L))); // 测试 sendStatus 不匹配 mailLogMapper.insert(cloneIgnoreId(dbMailLog, o -> o.setSendStatus(MailSendStatusEnum.SUCCESS.getStatus()))); // 测试 sendTime 不匹配 mailLogMapper.insert(cloneIgnoreId(dbMailLog, o -> o.setSendTime(buildTime(2023, 3, 10)))); // 准备参数 MailLogPageReqVO reqVO = new MailLogPageReqVO(); reqVO.setUserId(1L); reqVO.setUserType(UserTypeEnum.ADMIN.getValue()); reqVO.setToMail("768"); reqVO.setAccountId(10L); reqVO.setTemplateId(100L); reqVO.setSendStatus(MailSendStatusEnum.INIT.getStatus()); reqVO.setSendTime((buildBetweenTime(2023, 2, 1, 2023, 2, 15))); // 调用 PageResult<MailLogDO> pageResult = mailLogService.getMailLogPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbMailLog, pageResult.getList().get(0)); }
@Override public String updateUserAvatar(Long id, InputStream avatarFile) { validateUserExists(id); // 存储文件 String avatar = fileApi.createFile(IoUtil.readBytes(avatarFile)); // 更新路径 AdminUserDO sysUserDO = new AdminUserDO(); sysUserDO.setId(id); sysUserDO.setAvatar(avatar); userMapper.updateById(sysUserDO); return avatar; }
@Test public void testUpdateUserAvatar_success() throws Exception { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(); userMapper.insert(dbUser); // 准备参数 Long userId = dbUser.getId(); byte[] avatarFileBytes = randomBytes(10); ByteArrayInputStream avatarFile = new ByteArrayInputStream(avatarFileBytes); // mock 方法 String avatar = randomString(); when(fileApi.createFile(eq( avatarFileBytes))).thenReturn(avatar); // 调用 userService.updateUserAvatar(userId, avatarFile); // 断言 AdminUserDO user = userMapper.selectById(userId); assertEquals(avatar, user.getAvatar()); }
public static ADLSLocationParts parseLocation(String location) { URI locationUri = URI.create(location); String[] authorityParts = locationUri.getAuthority().split("@"); if (authorityParts.length > 1) { return new ADLSLocationParts( locationUri.getScheme(), authorityParts[0], authorityParts[1], authorityParts[1].split("\\.")[0], locationUri.getPath()); } else { return new ADLSLocationParts( locationUri.getScheme(), null, authorityParts[0], authorityParts[0].split("\\.")[0], locationUri.getPath()); } }
@Test public void testContainerInAuthority() { String location = format( "abfs://%s@%s.dfs.core.windows.net/path/to/files", TEST_CONTAINER, TEST_STORAGE_ACCOUNT); ADLSLocationUtils.ADLSLocationParts parts = ADLSLocationUtils.parseLocation(location); assertThat(parts.container()).isEqualTo(TEST_CONTAINER); assertThat(parts.accountName()).isEqualTo(TEST_STORAGE_ACCOUNT); assertThat(parts.account()).isEqualTo(format("%s.dfs.core.windows.net", TEST_STORAGE_ACCOUNT)); assertThat(parts.scheme()).isEqualTo("abfs"); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldFindTwoSameArgs() { // Given: givenFunctions( function(EXPECTED, -1, STRING, STRING) ); // When: final KsqlScalarFunction fun = udfIndex .getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING))); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
public static String[] fastParseUri(String uri) { return doParseUri(uri, true); }
@Test public void testFastParse() { String[] out1 = CamelURIParser.fastParseUri("file:relative"); assertEquals("file", out1[0]); assertEquals("relative", out1[1]); assertNull(out1[2]); String[] out2 = CamelURIParser.fastParseUri("file://relative"); assertEquals(CamelURIParser.URI_ALREADY_NORMALIZED, out2); String[] out3 = CamelURIParser.fastParseUri("file:relative?delete=true"); assertEquals("file", out3[0]); assertEquals("relative", out3[1]); assertEquals("delete=true", out3[2]); String[] out4 = CamelURIParser.fastParseUri("file://relative?delete=true"); assertEquals("file", out4[0]); assertEquals("relative", out4[1]); assertEquals("delete=true", out4[2]); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatFunctionCountStar() { final FunctionCall functionCall = new FunctionCall(FunctionName.of("COUNT"), Collections.emptyList()); assertThat(ExpressionFormatter.formatExpression(functionCall), equalTo("COUNT(*)")); }
@Override public Statement getStatement() { checkState(); return statement; }
@Test void assertGetStatement() { assertThat(actualResultSet.getStatement(), is(statement)); }
public NamedClusterEmbedManager getNamedClusterEmbedManager( ) { return namedClusterEmbedManager; }
@Test public void testGetNamedClusterEmbedManager() { assertNull( meta.getNamedClusterEmbedManager() ); NamedClusterEmbedManager mockNamedClusterEmbedManager = mock( NamedClusterEmbedManager.class ); meta.namedClusterEmbedManager = mockNamedClusterEmbedManager; assertEquals( mockNamedClusterEmbedManager, meta.getNamedClusterEmbedManager() ); }
@ProtoFactory public static MediaType fromString(String tree) { if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType(); Matcher matcher = TREE_PATTERN.matcher(tree); return parseSingleMediaType(tree, matcher, false); }
@Test(expected = EncodingException.class) public void testParsingMultipleSubType() { MediaType.fromString("application/json/on"); }
@Override public MaintenanceDomain decode(ObjectNode json, CodecContext context) { if (json == null || !json.isObject()) { return null; } JsonNode mdNode = json.get(MD); String mdName = nullIsIllegal(mdNode.get(MD_NAME), "mdName is required").asText(); String mdNameType = MdId.MdNameType.CHARACTERSTRING.name(); if (mdNode.get(MD_NAME_TYPE) != null) { mdNameType = mdNode.get(MD_NAME_TYPE).asText(); } try { MdId mdId = MdMaNameUtil.parseMdName(mdNameType, mdName); MaintenanceDomain.MdBuilder builder = DefaultMaintenanceDomain.builder(mdId); JsonNode mdLevelNode = mdNode.get(MD_LEVEL); if (mdLevelNode != null) { MdLevel mdLevel = MdLevel.valueOf(mdLevelNode.asText()); builder = builder.mdLevel(mdLevel); } JsonNode mdNumericIdNode = mdNode.get(MD_NUMERIC_ID); if (mdNumericIdNode != null) { short mdNumericId = (short) mdNumericIdNode.asInt(); builder = builder.mdNumericId(mdNumericId); } return builder.build(); } catch (CfmConfigException e) { throw new IllegalArgumentException(e); } }
@Test public void testDecodeMd4() throws IOException { String mdString = "{\"md\": { \"mdName\": \"\"," + "\"mdNameType\": \"NONE\"}}"; InputStream input = new ByteArrayInputStream( mdString.getBytes(StandardCharsets.UTF_8)); JsonNode cfg = mapper.readTree(input); MaintenanceDomain mdDecode1 = context .codec(MaintenanceDomain.class).decode((ObjectNode) cfg, context); assertEquals(MDID4_NONE, mdDecode1.mdId()); }
public static MapNameAndKeyPair parseMemcacheKey(String key) { key = URLDecoder.decode(key, StandardCharsets.UTF_8); String mapName = DEFAULT_MAP_NAME; int index = key.indexOf(':'); if (index != -1) { mapName = MAP_NAME_PREFIX + key.substring(0, index); key = key.substring(index + 1); } return new MapNameAndKeyPair(mapName, key); }
@Test public void withDefaultMap() { MapNameAndKeyPair mapNameKeyPair = MemcacheUtils.parseMemcacheKey("key"); assertEquals("hz_memcache_default", mapNameKeyPair.getMapName()); assertEquals("key", mapNameKeyPair.getKey()); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testReturnAbortedTransactionsInUncommittedMode() { buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 0; currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes())); abortTransaction(buffer, 1L, currentOffset); buffer.flip(); List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList( new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0)); MemoryRecords records = MemoryRecords.readableRecords(buffer); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0)); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); }
@Override public void registerStore(final StateStore store, final StateRestoreCallback stateRestoreCallback, final CommitCallback commitCallback) { final String storeName = store.name(); // TODO (KAFKA-12887): we should not trigger user's exception handler for illegal-argument but always // fail-crash; in this case we would not need to immediately close the state store before throwing if (CHECKPOINT_FILE_NAME.equals(storeName)) { store.close(); throw new IllegalArgumentException(format("%sIllegal store name: %s, which collides with the pre-defined " + "checkpoint file name", logPrefix, storeName)); } if (stores.containsKey(storeName)) { store.close(); throw new IllegalArgumentException(format("%sStore %s has already been registered.", logPrefix, storeName)); } if (stateRestoreCallback instanceof StateRestoreListener) { log.warn("The registered state restore callback is also implementing the state restore listener interface, " + "which is not expected and would be ignored"); } final StateStoreMetadata storeMetadata = isLoggingEnabled(storeName) ? new StateStoreMetadata( store, getStorePartition(storeName), stateRestoreCallback, commitCallback, converterForStore(store)) : new StateStoreMetadata(store, commitCallback); // register the store first, so that if later an exception is thrown then eventually while we call `close` // on the state manager this state store would be closed as well stores.put(storeName, storeMetadata); if (!stateUpdaterEnabled) { maybeRegisterStoreWithChangelogReader(storeName); } log.debug("Registered state store {} to its state manager", storeName); }
@Test public void shouldThrowOnFailureToWritePositionCheckpointFile() throws IOException { final ProcessorStateManager stateMgr = getStateManager(Task.TaskType.ACTIVE); final CommitCallback persistentCheckpoint = mock(CommitCallback.class); final IOException ioException = new IOException("asdf"); doThrow(ioException).when(persistentCheckpoint).onCommit(); stateMgr.registerStore( persistentStore, persistentStore.stateRestoreCallback, persistentCheckpoint ); final ProcessorStateException processorStateException = assertThrows( ProcessorStateException.class, stateMgr::checkpoint ); assertThat( processorStateException.getMessage(), containsString( "process-state-manager-test Exception caught while trying to checkpoint store," + " changelog partition test-application-My-Topology-persistentStore-changelog-1" ) ); assertThat(processorStateException.getCause(), is(ioException)); }
@Override public List<String> getServerList() { return serverList.isEmpty() ? serversFromEndpoint : serverList; }
@Test void testConstructWithEndpointWithEndpointPathAndName() throws Exception { clientProperties.setProperty(PropertyKeyConst.ENDPOINT_CONTEXT_PATH, "aaa"); clientProperties.setProperty(PropertyKeyConst.ENDPOINT_CLUSTER_NAME, "bbb"); clientProperties.setProperty(PropertyKeyConst.ENDPOINT, "127.0.0.1"); Mockito.reset(nacosRestTemplate); Mockito.when(nacosRestTemplate.get(eq("http://127.0.0.1:8080/aaa/bbb"), any(), any(), any())) .thenReturn(httpRestResult); serverListManager = new ServerListManager(clientProperties, "test"); List<String> serverList = serverListManager.getServerList(); assertEquals(1, serverList.size()); assertEquals("127.0.0.1:8848", serverList.get(0)); }
@Override public void setMetadata(final Path file, final TransferStatus status) throws BackgroundException { try { final String fileid = this.fileid.getFileId(file); final File body = new File(); body.setProperties(status.getMetadata()); final File properties = session.getClient().files().update(fileid, body).setFields("properties"). setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute(); status.setResponse(new DriveAttributesFinderFeature(session, this.fileid).toAttributes(properties)); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Failure to write attributes of {0}", e, file); } }
@Test public void setMetadata() throws Exception { final Path home = DriveHomeFinderService.MYDRIVE_FOLDER; final Path test = new Path(home, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final DriveFileIdProvider fileid = new DriveFileIdProvider(session); new DriveTouchFeature(session, fileid).touch(test, new TransferStatus()); assertEquals(Collections.emptyMap(), new DriveMetadataFeature(session, fileid).getMetadata(test)); new DriveMetadataFeature(session, fileid).setMetadata(test, Collections.singletonMap("test", "t")); assertEquals(Collections.singletonMap("test", "t"), new DriveMetadataFeature(session, fileid).getMetadata(test)); new DriveDeleteFeature(session, fileid).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public boolean next() throws SQLException { if (skipAll) { return false; } if (!paginationContext.getActualRowCount().isPresent()) { return getMergedResult().next(); } return rowNumber++ <= paginationContext.getActualRowCount().get() && getMergedResult().next(); }
@Test void assertNextWithoutOffsetWithRowCount() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "SQLServer")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(schema); SQLServerSelectStatement sqlStatement = new SQLServerSelectStatement(); sqlStatement.setProjections(new ProjectionsSegment(0, 0)); sqlStatement.setLimit(new LimitSegment(0, 0, null, new NumberLiteralLimitValueSegment(0, 0, 5L))); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), sqlStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(Arrays.asList(mockQueryResult(), mockQueryResult(), mockQueryResult(), mockQueryResult()), selectStatementContext, mockShardingSphereDatabase(), mock(ConnectionContext.class)); for (int i = 0; i < 5; i++) { assertTrue(actual.next()); } assertFalse(actual.next()); }
@Override public Config build() { return build(new Config()); }
@Override @Test public void testQueryCacheFullConfig() { String yaml = """ hazelcast: map: test: query-caches: cache-name: entry-listeners: - class-name: com.hazelcast.examples.EntryListener include-value: true local: false include-value: true batch-size: 1 buffer-size: 16 delay-seconds: 0 in-memory-format: BINARY coalesce: false populate: true serialize-keys: true indexes: - type: HASH attributes: - "name" predicate: class-name: com.hazelcast.examples.SimplePredicate eviction: eviction-policy: LRU max-size-policy: ENTRY_COUNT size: 133 """; Config config = buildConfig(yaml); QueryCacheConfig queryCacheConfig = config.getMapConfig("test").getQueryCacheConfigs().get(0); EntryListenerConfig entryListenerConfig = queryCacheConfig.getEntryListenerConfigs().get(0); assertEquals("cache-name", queryCacheConfig.getName()); assertTrue(entryListenerConfig.isIncludeValue()); assertFalse(entryListenerConfig.isLocal()); assertEquals("com.hazelcast.examples.EntryListener", entryListenerConfig.getClassName()); assertTrue(queryCacheConfig.isIncludeValue()); assertEquals(1, queryCacheConfig.getBatchSize()); assertEquals(16, queryCacheConfig.getBufferSize()); assertEquals(0, queryCacheConfig.getDelaySeconds()); assertEquals(InMemoryFormat.BINARY, queryCacheConfig.getInMemoryFormat()); assertFalse(queryCacheConfig.isCoalesce()); assertTrue(queryCacheConfig.isPopulate()); assertTrue(queryCacheConfig.isSerializeKeys()); assertIndexesEqual(queryCacheConfig); assertEquals("com.hazelcast.examples.SimplePredicate", queryCacheConfig.getPredicateConfig().getClassName()); assertEquals(LRU, queryCacheConfig.getEvictionConfig().getEvictionPolicy()); assertEquals(ENTRY_COUNT, queryCacheConfig.getEvictionConfig().getMaxSizePolicy()); assertEquals(133, queryCacheConfig.getEvictionConfig().getSize()); } private void assertIndexesEqual(QueryCacheConfig queryCacheConfig) { for (IndexConfig indexConfig : queryCacheConfig.getIndexConfigs()) { assertEquals("name", indexConfig.getAttributes().get(0)); assertFalse(indexConfig.getType() == IndexType.SORTED); } } @Override @Test public void testMapQueryCachePredicate() { String yaml = """ hazelcast: map: test: query-caches: cache-class-name: predicate: class-name: com.hazelcast.examples.SimplePredicate cache-sql: predicate: sql: "%age=40" """; Config config = buildConfig(yaml); QueryCacheConfig queryCacheClassNameConfig = config.getMapConfig("test").getQueryCacheConfigs().get(0); assertEquals("com.hazelcast.examples.SimplePredicate", queryCacheClassNameConfig.getPredicateConfig().getClassName()); QueryCacheConfig queryCacheSqlConfig = config.getMapConfig("test").getQueryCacheConfigs().get(1); assertEquals("%age=40", queryCacheSqlConfig.getPredicateConfig().getSql()); } @Override @Test public void testLiteMemberConfig() { String yaml = """ hazelcast: lite-member: enabled: true """; Config config = buildConfig(yaml); assertTrue(config.isLiteMember()); } @Override @Test public void testNonLiteMemberConfig() { String yaml = """ hazelcast: lite-member: enabled: false """; Config config = buildConfig(yaml); assertFalse(config.isLiteMember()); } @Override @Test(expected = SchemaViolationConfigurationException.class) public void testNonLiteMemberConfigWithoutEnabledField() { String yaml = """ hazelcast: lite-member: {} """; buildConfig(yaml); } @Override @Test(expected = SchemaViolationConfigurationException.class) public void testInvalidLiteMemberConfig() { String yaml = """ hazelcast: lite-member: enabled: dummytext """; buildConfig(yaml); } @Override @Test(expected = InvalidConfigurationException.class) public void testDuplicateLiteMemberConfig() { String yaml = """ hazelcast: lite-member: enabled: true lite-member: enabled: true """; buildConfig(yaml); fail(); } private static void assertIndexEqual(String expectedAttribute, boolean expectedOrdered, IndexConfig indexConfig) { assertEquals(expectedAttribute, indexConfig.getAttributes().get(0)); assertEquals(expectedOrdered, indexConfig.getType() == IndexType.SORTED); } @Override @Test public void testMapNativeMaxSizePolicy() { String yamlFormat = """ hazelcast: map: mymap: in-memory-format: NATIVE eviction: max-size-policy: "{0}" size: 9991 """; MessageFormat messageFormat = new MessageFormat(yamlFormat); MaxSizePolicy[] maxSizePolicies = MaxSizePolicy.values(); for (MaxSizePolicy maxSizePolicy : maxSizePolicies) { if (maxSizePolicy == ENTRY_COUNT) { // imap does not support ENTRY_COUNT continue; } Object[] objects = {maxSizePolicy.toString()}; String yaml = messageFormat.format(objects); Config config = buildConfig(yaml); MapConfig mapConfig = config.getMapConfig("mymap"); EvictionConfig evictionConfig = mapConfig.getEvictionConfig(); assertEquals(9991, evictionConfig.getSize()); assertEquals(maxSizePolicy, evictionConfig.getMaxSizePolicy()); } } @Override @Test public void testInstanceName() { String name = randomName(); String yaml = "" + "hazelcast:\n" + " instance-name: " + name + "\n"; Config config = buildConfig(yaml); assertEquals(name, config.getInstanceName()); } @Override @Test public void testUserCodeDeployment() { String yaml = """ hazelcast: user-code-deployment: enabled: true class-cache-mode: OFF provider-mode: LOCAL_CLASSES_ONLY blacklist-prefixes: com.blacklisted,com.other.blacklisted whitelist-prefixes: com.whitelisted,com.other.whitelisted provider-filter: HAS_ATTRIBUTE:foo """; Config config = new InMemoryYamlConfig(yaml); UserCodeDeploymentConfig dcConfig = config.getUserCodeDeploymentConfig(); assertTrue(dcConfig.isEnabled()); assertEquals(UserCodeDeploymentConfig.ClassCacheMode.OFF, dcConfig.getClassCacheMode()); assertEquals(UserCodeDeploymentConfig.ProviderMode.LOCAL_CLASSES_ONLY, dcConfig.getProviderMode()); assertEquals("com.blacklisted,com.other.blacklisted", dcConfig.getBlacklistedPrefixes()); assertEquals("com.whitelisted,com.other.whitelisted", dcConfig.getWhitelistedPrefixes()); assertEquals("HAS_ATTRIBUTE:foo", dcConfig.getProviderFilter()); } @Override public void testEmptyUserCodeDeployment() { String yaml = """ hazelcast: user-code-deployment: enabled: true """; Config config = buildConfig(yaml); UserCodeDeploymentConfig userCodeDeploymentConfig = config.getUserCodeDeploymentConfig(); assertTrue(userCodeDeploymentConfig.isEnabled()); assertEquals(UserCodeDeploymentConfig.ClassCacheMode.ETERNAL, userCodeDeploymentConfig.getClassCacheMode()); assertEquals(UserCodeDeploymentConfig.ProviderMode.LOCAL_AND_CACHED_CLASSES, userCodeDeploymentConfig.getProviderMode()); assertNull(userCodeDeploymentConfig.getBlacklistedPrefixes()); assertNull(userCodeDeploymentConfig.getWhitelistedPrefixes()); assertNull(userCodeDeploymentConfig.getProviderFilter()); } @Override @Test public void testCRDTReplicationConfig() { final String yaml = """ hazelcast: crdt-replication: max-concurrent-replication-targets: 10 replication-period-millis: 2000 """; final Config config = new InMemoryYamlConfig(yaml); final CRDTReplicationConfig replicationConfig = config.getCRDTReplicationConfig(); assertEquals(10, replicationConfig.getMaxConcurrentReplicationTargets()); assertEquals(2000, replicationConfig.getReplicationPeriodMillis()); } @Override @Test public void testGlobalSerializer() { String name = randomName(); String val = "true"; String yaml = "" + "hazelcast:\n" + " serialization:\n" + " global-serializer:\n" + " class-name: " + name + "\n" + " override-java-serialization: " + val + "\n"; Config config = new InMemoryYamlConfig(yaml); GlobalSerializerConfig globalSerializerConfig = config.getSerializationConfig().getGlobalSerializerConfig(); assertEquals(name, globalSerializerConfig.getClassName()); assertTrue(globalSerializerConfig.isOverrideJavaSerialization()); } @Override @Test public void testJavaSerializationFilter() { String yaml = """ hazelcast: serialization: java-serialization-filter: defaults-disabled: true whitelist: class: - java.lang.String - example.Foo package: - com.acme.app - com.acme.app.subpkg prefix: - java - com.hazelcast. - "[" blacklist: class: - com.acme.app.BeanComparator """; Config config = new InMemoryYamlConfig(yaml); JavaSerializationFilterConfig javaSerializationFilterConfig = config.getSerializationConfig().getJavaSerializationFilterConfig(); assertNotNull(javaSerializationFilterConfig); ClassFilter blackList = javaSerializationFilterConfig.getBlacklist(); assertNotNull(blackList); ClassFilter whiteList = javaSerializationFilterConfig.getWhitelist(); assertNotNull(whiteList); assertTrue(whiteList.getClasses().contains("java.lang.String")); assertTrue(whiteList.getClasses().contains("example.Foo")); assertTrue(whiteList.getPackages().contains("com.acme.app")); assertTrue(whiteList.getPackages().contains("com.acme.app.subpkg")); assertTrue(whiteList.getPrefixes().contains("java")); assertTrue(whiteList.getPrefixes().contains("[")); assertTrue(blackList.getClasses().contains("com.acme.app.BeanComparator")); } @Override @Test public void testJavaReflectionFilter() { String yaml = """ hazelcast: sql: java-reflection-filter: defaults-disabled: true whitelist: class: - java.lang.String - example.Foo package: - com.acme.app - com.acme.app.subpkg prefix: - java - com.hazelcast. - "[" blacklist: class: - com.acme.app.BeanComparator """; Config config = new InMemoryYamlConfig(yaml); JavaSerializationFilterConfig javaReflectionFilterConfig = config.getSqlConfig().getJavaReflectionFilterConfig(); assertNotNull(javaReflectionFilterConfig); ClassFilter blackList = javaReflectionFilterConfig.getBlacklist(); assertNotNull(blackList); ClassFilter whiteList = javaReflectionFilterConfig.getWhitelist(); assertNotNull(whiteList); assertTrue(whiteList.getClasses().contains("java.lang.String")); assertTrue(whiteList.getClasses().contains("example.Foo")); assertTrue(whiteList.getPackages().contains("com.acme.app")); assertTrue(whiteList.getPackages().contains("com.acme.app.subpkg")); assertTrue(whiteList.getPrefixes().contains("java")); assertTrue(whiteList.getPrefixes().contains("[")); assertTrue(blackList.getClasses().contains("com.acme.app.BeanComparator")); } @Override public void testCompactSerialization_serializerRegistration() { String yaml = """ hazelcast: serialization: compact-serialization: serializers: - serializer: example.serialization.SerializableEmployeeDTOSerializer """; Config config = buildConfig(yaml); CompactTestUtil.verifyExplicitSerializerIsUsed(config.getSerializationConfig()); } @Override public void testCompactSerialization_classRegistration() { String yaml = """ hazelcast: serialization: compact-serialization: classes: - class: example.serialization.ExternalizableEmployeeDTO """; Config config = buildConfig(yaml); CompactTestUtil.verifyReflectiveSerializerIsUsed(config.getSerializationConfig()); } @Override public void testCompactSerialization_serializerAndClassRegistration() { String yaml = """ hazelcast: serialization: compact-serialization: serializers: - serializer: example.serialization.SerializableEmployeeDTOSerializer classes: - class: example.serialization.ExternalizableEmployeeDTO """; SerializationConfig config = buildConfig(yaml).getSerializationConfig(); CompactTestUtil.verifyExplicitSerializerIsUsed(config); CompactTestUtil.verifyReflectiveSerializerIsUsed(config); } @Override public void testCompactSerialization_duplicateSerializerRegistration() { String yaml = """ hazelcast: serialization: compact-serialization: serializers: - serializer: example.serialization.EmployeeDTOSerializer - serializer: example.serialization.EmployeeDTOSerializer """; SerializationConfig config = buildConfig(yaml).getSerializationConfig(); assertThatThrownBy(() -> CompactTestUtil.verifySerializationServiceBuilds(config)) .isInstanceOf(InvalidConfigurationException.class) .hasMessageContaining("Duplicate"); } @Override public void testCompactSerialization_duplicateClassRegistration() { String yaml = """ hazelcast: serialization: compact-serialization: classes: - class: example.serialization.ExternalizableEmployeeDTO - class: example.serialization.ExternalizableEmployeeDTO """; SerializationConfig config = buildConfig(yaml).getSerializationConfig(); assertThatThrownBy(() -> CompactTestUtil.verifySerializationServiceBuilds(config)) .isInstanceOf(InvalidConfigurationException.class) .hasMessageContaining("Duplicate"); } @Override public void testCompactSerialization_registrationsWithDuplicateClasses() { String yaml = """ hazelcast: serialization: compact-serialization: serializers: - serializer: example.serialization.EmployeeDTOSerializer - serializer: example.serialization.SameClassEmployeeDTOSerializer """; SerializationConfig config = buildConfig(yaml).getSerializationConfig(); assertThatThrownBy(() -> CompactTestUtil.verifySerializationServiceBuilds(config)) .isInstanceOf(InvalidConfigurationException.class) .hasMessageContaining("Duplicate") .hasMessageContaining("class"); } @Override public void testCompactSerialization_registrationsWithDuplicateTypeNames() { String yaml = """ hazelcast: serialization: compact-serialization: serializers: - serializer: example.serialization.EmployeeDTOSerializer - serializer: example.serialization.SameTypeNameEmployeeDTOSerializer """; SerializationConfig config = buildConfig(yaml).getSerializationConfig(); assertThatThrownBy(() -> CompactTestUtil.verifySerializationServiceBuilds(config)) .isInstanceOf(InvalidConfigurationException.class) .hasMessageContaining("Duplicate") .hasMessageContaining("type name"); } @Override public void testCompactSerialization_withInvalidSerializer() { String yaml = """ hazelcast: serialization: compact-serialization: serializers: - serializer: does.not.exist.FooSerializer """; SerializationConfig config = buildConfig(yaml).getSerializationConfig(); assertThatThrownBy(() -> CompactTestUtil.verifySerializationServiceBuilds(config)) .isInstanceOf(InvalidConfigurationException.class) .hasMessageContaining("Cannot create an instance"); } @Override public void testCompactSerialization_withInvalidCompactSerializableClass() { String yaml = """ hazelcast: serialization: compact-serialization: classes: - class: does.not.exist.Foo """; SerializationConfig config = buildConfig(yaml).getSerializationConfig(); assertThatThrownBy(() -> CompactTestUtil.verifySerializationServiceBuilds(config)) .isInstanceOf(InvalidConfigurationException.class) .hasMessageContaining("Cannot load"); } @Override @Test public void testAllowOverrideDefaultSerializers() { final String yaml = """ hazelcast: serialization: allow-override-default-serializers: true """; final Config config = new InMemoryYamlConfig(yaml); final boolean isAllowOverrideDefaultSerializers = config.getSerializationConfig().isAllowOverrideDefaultSerializers(); assertTrue(isAllowOverrideDefaultSerializers); } @Override @Test public void testHotRestart() { String dir = "/mnt/hot-restart-root/"; String backupDir = "/mnt/hot-restart-backup/"; int parallelism = 3; int validationTimeout = 13131; int dataLoadTimeout = 45454; HotRestartClusterDataRecoveryPolicy policy = HotRestartClusterDataRecoveryPolicy.PARTIAL_RECOVERY_MOST_RECENT; String yaml = "" + "hazelcast:\n" + " hot-restart-persistence:\n" + " auto-remove-stale-data: true\n" + " enabled: true\n" + " base-dir: " + dir + "\n" + " backup-dir: " + backupDir + "\n" + " parallelism: " + parallelism + "\n" + " validation-timeout-seconds: " + validationTimeout + "\n" + " data-load-timeout-seconds: " + dataLoadTimeout + "\n" + " cluster-data-recovery-policy: " + policy + "\n"; Config config = new InMemoryYamlConfig(yaml); HotRestartPersistenceConfig hotRestartPersistenceConfig = config.getHotRestartPersistenceConfig(); assertTrue(hotRestartPersistenceConfig.isEnabled()); assertTrue(hotRestartPersistenceConfig.isAutoRemoveStaleData()); assertEquals(new File(dir).getAbsolutePath(), hotRestartPersistenceConfig.getBaseDir().getAbsolutePath()); assertEquals(new File(backupDir).getAbsolutePath(), hotRestartPersistenceConfig.getBackupDir().getAbsolutePath()); assertEquals(parallelism, hotRestartPersistenceConfig.getParallelism()); assertEquals(validationTimeout, hotRestartPersistenceConfig.getValidationTimeoutSeconds()); assertEquals(dataLoadTimeout, hotRestartPersistenceConfig.getDataLoadTimeoutSeconds()); assertEquals(policy, hotRestartPersistenceConfig.getClusterDataRecoveryPolicy()); } @Override @Test public void testPersistence() { String dir = "/mnt/persistence-root/"; String backupDir = "/mnt/persistence-backup/"; int parallelism = 3; int validationTimeout = 13131; int dataLoadTimeout = 45454; int rebalanceDelaySeconds = 240; PersistenceClusterDataRecoveryPolicy policy = PersistenceClusterDataRecoveryPolicy.PARTIAL_RECOVERY_MOST_RECENT; String yaml = "" + "hazelcast:\n" + " persistence:\n" + " enabled: true\n" + " auto-remove-stale-data: true\n" + " base-dir: " + dir + "\n" + " backup-dir: " + backupDir + "\n" + " parallelism: " + parallelism + "\n" + " validation-timeout-seconds: " + validationTimeout + "\n" + " data-load-timeout-seconds: " + dataLoadTimeout + "\n" + " cluster-data-recovery-policy: " + policy + "\n" + " rebalance-delay-seconds: " + rebalanceDelaySeconds + "\n"; Config config = new InMemoryYamlConfig(yaml); PersistenceConfig persistenceConfig = config.getPersistenceConfig(); assertTrue(persistenceConfig.isEnabled()); assertTrue(persistenceConfig.isAutoRemoveStaleData()); assertEquals(new File(dir).getAbsolutePath(), persistenceConfig.getBaseDir().getAbsolutePath()); assertEquals(new File(backupDir).getAbsolutePath(), persistenceConfig.getBackupDir().getAbsolutePath()); assertEquals(parallelism, persistenceConfig.getParallelism()); assertEquals(validationTimeout, persistenceConfig.getValidationTimeoutSeconds()); assertEquals(dataLoadTimeout, persistenceConfig.getDataLoadTimeoutSeconds()); assertEquals(policy, persistenceConfig.getClusterDataRecoveryPolicy()); assertEquals(rebalanceDelaySeconds, persistenceConfig.getRebalanceDelaySeconds()); } @Override @Test public void testDynamicConfig() { boolean persistenceEnabled = true; String backupDir = "/mnt/dynamic-configuration/backup-dir"; int backupCount = 7; String yaml = "" + "hazelcast:\n" + " dynamic-configuration:\n" + " persistence-enabled: " + persistenceEnabled + "\n" + " backup-dir: " + backupDir + "\n" + " backup-count: " + backupCount + "\n"; Config config = new InMemoryYamlConfig(yaml); DynamicConfigurationConfig dynamicConfigurationConfig = config.getDynamicConfigurationConfig(); assertEquals(persistenceEnabled, dynamicConfigurationConfig.isPersistenceEnabled()); assertEquals(new File(backupDir).getAbsolutePath(), dynamicConfigurationConfig.getBackupDir().getAbsolutePath()); assertEquals(backupCount, dynamicConfigurationConfig.getBackupCount()); yaml = "" + "hazelcast:\n" + " dynamic-configuration:\n" + " persistence-enabled: " + persistenceEnabled + "\n"; config = new InMemoryYamlConfig(yaml); dynamicConfigurationConfig = config.getDynamicConfigurationConfig(); assertEquals(persistenceEnabled, dynamicConfigurationConfig.isPersistenceEnabled()); assertEquals(new File(DEFAULT_BACKUP_DIR).getAbsolutePath(), dynamicConfigurationConfig.getBackupDir().getAbsolutePath()); assertEquals(DEFAULT_BACKUP_COUNT, dynamicConfigurationConfig.getBackupCount()); } @Override @Test public void testLocalDevice() { String baseDir = "base-directory"; int blockSize = 2048; int readIOThreadCount = 16; int writeIOThreadCount = 1; String yaml = "" + "hazelcast:\n"; Config config = new InMemoryYamlConfig(yaml); // default device LocalDeviceConfig localDeviceConfig = config.getDeviceConfig(DEFAULT_DEVICE_NAME); assertEquals(DEFAULT_DEVICE_NAME, localDeviceConfig.getName()); assertEquals(new File(DEFAULT_DEVICE_BASE_DIR).getAbsoluteFile(), localDeviceConfig.getBaseDir()); assertEquals(DEFAULT_BLOCK_SIZE_IN_BYTES, localDeviceConfig.getBlockSize()); assertEquals(DEFAULT_READ_IO_THREAD_COUNT, localDeviceConfig.getReadIOThreadCount()); assertEquals(DEFAULT_WRITE_IO_THREAD_COUNT, localDeviceConfig.getWriteIOThreadCount()); assertEquals(LocalDeviceConfig.DEFAULT_CAPACITY, localDeviceConfig.getCapacity()); yaml = "" + "hazelcast:\n" + " local-device:\n" + " my-device:\n" + " base-dir: " + baseDir + "\n" + " capacity:\n" + " unit: GIGABYTES\n" + " value: 100\n" + " block-size: " + blockSize + "\n" + " read-io-thread-count: " + readIOThreadCount + "\n" + " write-io-thread-count: " + writeIOThreadCount + "\n"; config = new InMemoryYamlConfig(yaml); localDeviceConfig = config.getDeviceConfig("my-device"); assertEquals("my-device", localDeviceConfig.getName()); assertEquals(new File(baseDir).getAbsolutePath(), localDeviceConfig.getBaseDir().getAbsolutePath()); assertEquals(blockSize, localDeviceConfig.getBlockSize()); assertEquals(new Capacity(100, MemoryUnit.GIGABYTES), localDeviceConfig.getCapacity()); assertEquals(readIOThreadCount, localDeviceConfig.getReadIOThreadCount()); assertEquals(writeIOThreadCount, localDeviceConfig.getWriteIOThreadCount()); int device0Multiplier = 2; int device1Multiplier = 4; yaml = "" + "hazelcast:\n" + " local-device:\n" + " device0:\n" + " capacity:\n" + " unit: MEGABYTES\n" + " value: 1234567890\n" + " block-size: " + (blockSize * device0Multiplier) + "\n" + " read-io-thread-count: " + (readIOThreadCount * device0Multiplier) + "\n" + " write-io-thread-count: " + (writeIOThreadCount * device0Multiplier) + "\n" + " device1:\n" + " block-size: " + (blockSize * device1Multiplier) + "\n" + " read-io-thread-count: " + (readIOThreadCount * device1Multiplier) + "\n" + " write-io-thread-count: " + (writeIOThreadCount * device1Multiplier) + "\n"; config = new InMemoryYamlConfig(yaml); // default device is removed. assertEquals(2, config.getDeviceConfigs().size()); assertNull(config.getDeviceConfig(DEFAULT_DEVICE_NAME)); localDeviceConfig = config.getDeviceConfig("device0"); assertEquals(blockSize * device0Multiplier, localDeviceConfig.getBlockSize()); assertEquals(readIOThreadCount * device0Multiplier, localDeviceConfig.getReadIOThreadCount()); assertEquals(writeIOThreadCount * device0Multiplier, localDeviceConfig.getWriteIOThreadCount()); assertEquals(new Capacity(1234567890, MemoryUnit.MEGABYTES), localDeviceConfig.getCapacity()); localDeviceConfig = config.getDeviceConfig("device1"); assertEquals(blockSize * device1Multiplier, localDeviceConfig.getBlockSize()); assertEquals(readIOThreadCount * device1Multiplier, localDeviceConfig.getReadIOThreadCount()); assertEquals(writeIOThreadCount * device1Multiplier, localDeviceConfig.getWriteIOThreadCount()); // override the default device config String newBaseDir = "/some/random/base/dir/for/tiered/store"; yaml = "" + "hazelcast:\n" + " local-device:\n" + " " + DEFAULT_DEVICE_NAME + ":\n" + " base-dir: " + newBaseDir + "\n" + " block-size: " + (DEFAULT_BLOCK_SIZE_IN_BYTES * 2) + "\n" + " read-io-thread-count: " + (DEFAULT_READ_IO_THREAD_COUNT * 2) + "\n"; config = new InMemoryYamlConfig(yaml); assertEquals(1, config.getDeviceConfigs().size()); localDeviceConfig = config.getDeviceConfig(DEFAULT_DEVICE_NAME); assertEquals(DEFAULT_DEVICE_NAME, localDeviceConfig.getName()); assertEquals(new File(newBaseDir).getAbsoluteFile(), localDeviceConfig.getBaseDir()); assertEquals(2 * DEFAULT_BLOCK_SIZE_IN_BYTES, localDeviceConfig.getBlockSize()); assertEquals(2 * DEFAULT_READ_IO_THREAD_COUNT, localDeviceConfig.getReadIOThreadCount()); assertEquals(DEFAULT_WRITE_IO_THREAD_COUNT, localDeviceConfig.getWriteIOThreadCount()); } @Override @Test public void testTieredStore() { String yaml = """ hazelcast: map: map0: tiered-store: enabled: true memory-tier: capacity: unit: MEGABYTES value: 1024 disk-tier: enabled: true device-name: local-device map1: tiered-store: enabled: true disk-tier: enabled: true map2: tiered-store: enabled: true memory-tier: capacity: unit: GIGABYTES value: 1 map3: tiered-store: enabled: true """; Config config = new InMemoryYamlConfig(yaml); TieredStoreConfig tieredStoreConfig = config.getMapConfig("map0").getTieredStoreConfig(); assertTrue(tieredStoreConfig.isEnabled()); MemoryTierConfig memoryTierConfig = tieredStoreConfig.getMemoryTierConfig(); assertEquals(MemoryUnit.MEGABYTES, memoryTierConfig.getCapacity().getUnit()); assertEquals(1024, memoryTierConfig.getCapacity().getValue()); DiskTierConfig diskTierConfig = tieredStoreConfig.getDiskTierConfig(); assertTrue(tieredStoreConfig.getDiskTierConfig().isEnabled()); assertEquals("local-device", diskTierConfig.getDeviceName()); assertEquals(DEFAULT_DEVICE_NAME, config.getMapConfig("map1").getTieredStoreConfig().getDiskTierConfig().getDeviceName()); assertNotNull(config.getDeviceConfig(DEFAULT_DEVICE_NAME)); tieredStoreConfig = config.getMapConfig("map2").getTieredStoreConfig(); assertTrue(tieredStoreConfig.isEnabled()); memoryTierConfig = tieredStoreConfig.getMemoryTierConfig(); assertEquals(MemoryUnit.GIGABYTES, memoryTierConfig.getCapacity().getUnit()); assertEquals(1L, memoryTierConfig.getCapacity().getValue()); assertFalse(tieredStoreConfig.getDiskTierConfig().isEnabled()); tieredStoreConfig = config.getMapConfig("map3").getTieredStoreConfig(); memoryTierConfig = tieredStoreConfig.getMemoryTierConfig(); assertEquals(DEFAULT_CAPACITY, memoryTierConfig.getCapacity()); diskTierConfig = tieredStoreConfig.getDiskTierConfig(); assertFalse(diskTierConfig.isEnabled()); assertEquals(DEFAULT_DEVICE_NAME, diskTierConfig.getDeviceName()); yaml = """ hazelcast: map: some-map: tiered-store: enabled: true """; config = new InMemoryYamlConfig(yaml); assertEquals(1, config.getDeviceConfigs().size()); assertEquals(1, config.getDeviceConfigs().size()); assertEquals(new LocalDeviceConfig(), config.getDeviceConfig(DEFAULT_DEVICE_NAME)); assertEquals(DEFAULT_DEVICE_NAME, config.getMapConfig("some-map").getTieredStoreConfig().getDiskTierConfig().getDeviceName()); } @Override @Test public void testHotRestartEncryptionAtRest_whenJavaKeyStore() { int keySize = 16; String keyStorePath = "/tmp/keystore.p12"; String keyStoreType = "PKCS12"; String keyStorePassword = "password"; int pollingInterval = 60; String currentKeyAlias = "current"; String yaml = "" + "hazelcast:\n" + " hot-restart-persistence:\n" + " enabled: true\n" + " encryption-at-rest:\n" + " enabled: true\n" + " algorithm: AES\n" + " salt: some-salt\n" + " key-size: " + keySize + "\n" + " secure-store:\n" + " keystore:\n" + " path: " + keyStorePath + "\n" + " type: " + keyStoreType + "\n" + " password: " + keyStorePassword + "\n" + " polling-interval: " + pollingInterval + "\n" + " current-key-alias: " + currentKeyAlias + "\n"; Config config = new InMemoryYamlConfig(yaml); HotRestartPersistenceConfig hotRestartPersistenceConfig = config.getHotRestartPersistenceConfig(); assertTrue(hotRestartPersistenceConfig.isEnabled()); EncryptionAtRestConfig encryptionAtRestConfig = hotRestartPersistenceConfig.getEncryptionAtRestConfig(); assertTrue(encryptionAtRestConfig.isEnabled()); assertEquals("AES", encryptionAtRestConfig.getAlgorithm()); assertEquals("some-salt", encryptionAtRestConfig.getSalt()); assertEquals(keySize, encryptionAtRestConfig.getKeySize()); SecureStoreConfig secureStoreConfig = encryptionAtRestConfig.getSecureStoreConfig(); assertTrue(secureStoreConfig instanceof JavaKeyStoreSecureStoreConfig); JavaKeyStoreSecureStoreConfig keyStoreConfig = (JavaKeyStoreSecureStoreConfig) secureStoreConfig; assertEquals(new File(keyStorePath).getAbsolutePath(), keyStoreConfig.getPath().getAbsolutePath()); assertEquals(keyStoreType, keyStoreConfig.getType()); assertEquals(keyStorePassword, keyStoreConfig.getPassword()); assertEquals(pollingInterval, keyStoreConfig.getPollingInterval()); assertEquals(currentKeyAlias, keyStoreConfig.getCurrentKeyAlias()); } @Override @Test public void testPersistenceEncryptionAtRest_whenJavaKeyStore() { int keySize = 16; String keyStorePath = "/tmp/keystore.p12"; String keyStoreType = "PKCS12"; String keyStorePassword = "password"; int pollingInterval = 60; String currentKeyAlias = "current"; String yaml = "" + "hazelcast:\n" + " persistence:\n" + " enabled: true\n" + " encryption-at-rest:\n" + " enabled: true\n" + " algorithm: AES\n" + " salt: some-salt\n" + " key-size: " + keySize + "\n" + " secure-store:\n" + " keystore:\n" + " path: " + keyStorePath + "\n" + " type: " + keyStoreType + "\n" + " password: " + keyStorePassword + "\n" + " polling-interval: " + pollingInterval + "\n" + " current-key-alias: " + currentKeyAlias + "\n"; Config config = new InMemoryYamlConfig(yaml); PersistenceConfig persistenceConfig = config.getPersistenceConfig(); assertTrue(persistenceConfig.isEnabled()); EncryptionAtRestConfig encryptionAtRestConfig = persistenceConfig.getEncryptionAtRestConfig(); assertTrue(encryptionAtRestConfig.isEnabled()); assertEquals("AES", encryptionAtRestConfig.getAlgorithm()); assertEquals("some-salt", encryptionAtRestConfig.getSalt()); assertEquals(keySize, encryptionAtRestConfig.getKeySize()); SecureStoreConfig secureStoreConfig = encryptionAtRestConfig.getSecureStoreConfig(); assertTrue(secureStoreConfig instanceof JavaKeyStoreSecureStoreConfig); JavaKeyStoreSecureStoreConfig keyStoreConfig = (JavaKeyStoreSecureStoreConfig) secureStoreConfig; assertEquals(new File(keyStorePath).getAbsolutePath(), keyStoreConfig.getPath().getAbsolutePath()); assertEquals(keyStoreType, keyStoreConfig.getType()); assertEquals(keyStorePassword, keyStoreConfig.getPassword()); assertEquals(pollingInterval, keyStoreConfig.getPollingInterval()); assertEquals(currentKeyAlias, keyStoreConfig.getCurrentKeyAlias()); } @Override @Test public void testHotRestartEncryptionAtRest_whenVault() { int keySize = 16; String address = "https://localhost:1234"; String secretPath = "secret/path"; String token = "token"; int pollingInterval = 60; String yaml = "" + "hazelcast:\n" + " hot-restart-persistence:\n" + " enabled: true\n" + " encryption-at-rest:\n" + " enabled: true\n" + " algorithm: AES\n" + " salt: some-salt\n" + " key-size: " + keySize + "\n" + " secure-store:\n" + " vault:\n" + " address: " + address + "\n" + " secret-path: " + secretPath + "\n" + " token: " + token + "\n" + " polling-interval: " + pollingInterval + "\n" + " ssl:\n" + " enabled: true\n" + " factory-class-name: com.hazelcast.nio.ssl.BasicSSLContextFactory\n" + " properties:\n" + " protocol: TLS\n"; Config config = new InMemoryYamlConfig(yaml); HotRestartPersistenceConfig hotRestartPersistenceConfig = config.getHotRestartPersistenceConfig(); assertTrue(hotRestartPersistenceConfig.isEnabled()); EncryptionAtRestConfig encryptionAtRestConfig = hotRestartPersistenceConfig.getEncryptionAtRestConfig(); assertTrue(encryptionAtRestConfig.isEnabled()); assertEquals("AES", encryptionAtRestConfig.getAlgorithm()); assertEquals("some-salt", encryptionAtRestConfig.getSalt()); assertEquals(keySize, encryptionAtRestConfig.getKeySize()); SecureStoreConfig secureStoreConfig = encryptionAtRestConfig.getSecureStoreConfig(); assertTrue(secureStoreConfig instanceof VaultSecureStoreConfig); VaultSecureStoreConfig vaultConfig = (VaultSecureStoreConfig) secureStoreConfig; assertEquals(address, vaultConfig.getAddress()); assertEquals(secretPath, vaultConfig.getSecretPath()); assertEquals(token, vaultConfig.getToken()); assertEquals(pollingInterval, vaultConfig.getPollingInterval()); SSLConfig sslConfig = vaultConfig.getSSLConfig(); assertTrue(sslConfig.isEnabled()); assertEquals("com.hazelcast.nio.ssl.BasicSSLContextFactory", sslConfig.getFactoryClassName()); assertEquals(1, sslConfig.getProperties().size()); assertEquals("TLS", sslConfig.getProperties().get("protocol")); } @Override @Test public void testPersistenceEncryptionAtRest_whenVault() { int keySize = 16; String address = "https://localhost:1234"; String secretPath = "secret/path"; String token = "token"; int pollingInterval = 60; String yaml = "" + "hazelcast:\n" + " persistence:\n" + " enabled: true\n" + " encryption-at-rest:\n" + " enabled: true\n" + " algorithm: AES\n" + " salt: some-salt\n" + " key-size: " + keySize + "\n" + " secure-store:\n" + " vault:\n" + " address: " + address + "\n" + " secret-path: " + secretPath + "\n" + " token: " + token + "\n" + " polling-interval: " + pollingInterval + "\n" + " ssl:\n" + " enabled: true\n" + " factory-class-name: com.hazelcast.nio.ssl.BasicSSLContextFactory\n" + " properties:\n" + " protocol: TLS\n"; Config config = new InMemoryYamlConfig(yaml); PersistenceConfig persistenceConfig = config.getPersistenceConfig(); assertTrue(persistenceConfig.isEnabled()); EncryptionAtRestConfig encryptionAtRestConfig = persistenceConfig.getEncryptionAtRestConfig(); assertTrue(encryptionAtRestConfig.isEnabled()); assertEquals("AES", encryptionAtRestConfig.getAlgorithm()); assertEquals("some-salt", encryptionAtRestConfig.getSalt()); assertEquals(keySize, encryptionAtRestConfig.getKeySize()); SecureStoreConfig secureStoreConfig = encryptionAtRestConfig.getSecureStoreConfig(); assertTrue(secureStoreConfig instanceof VaultSecureStoreConfig); VaultSecureStoreConfig vaultConfig = (VaultSecureStoreConfig) secureStoreConfig; assertEquals(address, vaultConfig.getAddress()); assertEquals(secretPath, vaultConfig.getSecretPath()); assertEquals(token, vaultConfig.getToken()); assertEquals(pollingInterval, vaultConfig.getPollingInterval()); SSLConfig sslConfig = vaultConfig.getSSLConfig(); assertTrue(sslConfig.isEnabled()); assertEquals("com.hazelcast.nio.ssl.BasicSSLContextFactory", sslConfig.getFactoryClassName()); assertEquals(1, sslConfig.getProperties().size()); assertEquals("TLS", sslConfig.getProperties().get("protocol")); } @Override @Test public void testCachePermission() { String yaml = """ hazelcast: security: enabled: true client-permissions: cache: - name: /hz/cachemanager1/cache1 principal: dev actions: - create - destroy - add - remove """; Config config = buildConfig(yaml); PermissionConfig expected = new PermissionConfig(CACHE, "/hz/cachemanager1/cache1", "dev"); expected.addAction("create").addAction("destroy").addAction("add").addAction("remove"); assertPermissionConfig(expected, config); } @Override @Test public void testOnJoinPermissionOperation() { for (OnJoinPermissionOperationName onJoinOp : OnJoinPermissionOperationName.values()) { String yaml = "" + "hazelcast:\n" + " security:\n" + " client-permissions:\n" + " on-join-operation: " + onJoinOp.name(); Config config = buildConfig(yaml); assertSame(onJoinOp, config.getSecurityConfig().getOnJoinPermissionOperation()); } } @Override @Test public void testConfigPermission() { String yaml = """ hazelcast: security: enabled: true client-permissions: priority-grant: true config: deny: true principal: dev endpoints: - 127.0.0.1"""; Config config = buildConfig(yaml); PermissionConfig expected = new PermissionConfig(CONFIG, null, "dev").setDeny(true); expected.getEndpoints().add("127.0.0.1"); assertPermissionConfig(expected, config); assertTrue(config.getSecurityConfig().isPermissionPriorityGrant()); } @Override @Test(expected = InvalidConfigurationException.class) public void testCacheConfig_withNativeInMemoryFormat_failsFastInOSS() { String yaml = """ hazelcast: cache: cache: eviction: size: 10000000 max-size-policy: ENTRY_COUNT eviction-policy: LFU in-memory-format: NATIVE """; buildConfig(yaml); } @Override @Test public void testAllPermissionsCovered() throws IOException { URL yamlResource = YamlConfigBuilderTest.class.getClassLoader().getResource("hazelcast-fullconfig.yaml"); Config config = new YamlConfigBuilder(yamlResource).build(); Set<PermissionType> allPermissionTypes = Set.of(PermissionType.values()); Set<PermissionType> foundPermissionTypes = config.getSecurityConfig().getClientPermissionConfigs().stream() .map(PermissionConfig::getType).collect(Collectors.toSet()); Collection<PermissionType> difference = Sets.difference(allPermissionTypes, foundPermissionTypes); assertTrue(String.format("All permission types should be listed in %s. Not found ones: %s", yamlResource, difference), difference.isEmpty()); } @Override @Test(expected = InvalidConfigurationException.class) @Ignore("Schema validation is supposed to fail with missing mandatory field: class-name") public void testMemberAddressProvider_classNameIsMandatory() { String yaml = """ hazelcast: network: member-address-provider: enabled: true """; buildConfig(yaml); } @Override @Test public void testMemberAddressProviderEnabled() { String yaml = """ hazelcast: network: member-address-provider: enabled: true class-name: foo.bar.Clazz """; Config config = buildConfig(yaml); MemberAddressProviderConfig memberAddressProviderConfig = config.getNetworkConfig().getMemberAddressProviderConfig(); assertTrue(memberAddressProviderConfig.isEnabled()); assertEquals("foo.bar.Clazz", memberAddressProviderConfig.getClassName()); } @Override @Test public void testMemberAddressProviderEnabled_withProperties() { String yaml = """ hazelcast: network: member-address-provider: enabled: true class-name: foo.bar.Clazz properties: propName1: propValue1 """; Config config = buildConfig(yaml); MemberAddressProviderConfig memberAddressProviderConfig = config.getNetworkConfig().getMemberAddressProviderConfig(); Properties properties = memberAddressProviderConfig.getProperties(); assertEquals(1, properties.size()); assertEquals("propValue1", properties.get("propName1")); } @Override @Test public void testFailureDetector_withProperties() { String yaml = """ hazelcast: network: failure-detector: icmp: enabled: true timeout-milliseconds: 42 fail-fast-on-startup: true interval-milliseconds: 4200 max-attempts: 42 parallel-mode: true ttl: 255 """; Config config = buildConfig(yaml); NetworkConfig networkConfig = config.getNetworkConfig(); IcmpFailureDetectorConfig icmpFailureDetectorConfig = networkConfig.getIcmpFailureDetectorConfig(); assertNotNull(icmpFailureDetectorConfig); assertTrue(icmpFailureDetectorConfig.isEnabled()); assertTrue(icmpFailureDetectorConfig.isParallelMode()); assertTrue(icmpFailureDetectorConfig.isFailFastOnStartup()); assertEquals(42, icmpFailureDetectorConfig.getTimeoutMilliseconds()); assertEquals(42, icmpFailureDetectorConfig.getMaxAttempts()); assertEquals(4200, icmpFailureDetectorConfig.getIntervalMilliseconds()); } @Override @Test public void testHandleMemberAttributes() { String yaml = """ hazelcast: member-attributes: IDENTIFIER: type: string value: ID """; Config config = buildConfig(yaml); MemberAttributeConfig memberAttributeConfig = config.getMemberAttributeConfig(); assertNotNull(memberAttributeConfig); assertEquals("ID", memberAttributeConfig.getAttribute("IDENTIFIER")); } @Override @Test public void testMemcacheProtocolEnabled() { String yaml = """ hazelcast: network: memcache-protocol: enabled: true """; Config config = buildConfig(yaml); MemcacheProtocolConfig memcacheProtocolConfig = config.getNetworkConfig().getMemcacheProtocolConfig(); assertNotNull(memcacheProtocolConfig); assertTrue(memcacheProtocolConfig.isEnabled()); } @Override @Test public void testRestApiDefaults() { String yaml = """ hazelcast: network: rest-api: enabled: false"""; Config config = buildConfig(yaml); RestApiConfig restApiConfig = config.getNetworkConfig().getRestApiConfig(); assertNotNull(restApiConfig); assertFalse(restApiConfig.isEnabled()); for (RestEndpointGroup group : RestEndpointGroup.values()) { assertEquals("Unexpected status of group " + group, group.isEnabledByDefault(), restApiConfig.isGroupEnabled(group)); } } @Override @Test public void testRestApiEndpointGroups() { String yaml = """ hazelcast: network: rest-api: enabled: true endpoint-groups: HEALTH_CHECK: enabled: true DATA: enabled: true CLUSTER_READ: enabled: false"""; Config config = buildConfig(yaml); RestApiConfig restApiConfig = config.getNetworkConfig().getRestApiConfig(); assertTrue(restApiConfig.isEnabled()); assertTrue(restApiConfig.isGroupEnabled(RestEndpointGroup.HEALTH_CHECK)); assertFalse(restApiConfig.isGroupEnabled(RestEndpointGroup.CLUSTER_READ)); assertEquals(RestEndpointGroup.CLUSTER_WRITE.isEnabledByDefault(), restApiConfig.isGroupEnabled(RestEndpointGroup.CLUSTER_WRITE)); } @Override @Test(expected = InvalidConfigurationException.class) public void testUnknownRestApiEndpointGroup() { String yaml = """ hazelcast: network: rest-api: enabled: true endpoint-groups: TEST: enabled: true"""; buildConfig(yaml); } @Override @Test public void testDefaultAdvancedNetworkConfig() { String yaml = """ hazelcast: advanced-network: {} """; Config config = buildConfig(yaml); AdvancedNetworkConfig advancedNetworkConfig = config.getAdvancedNetworkConfig(); JoinConfig joinConfig = advancedNetworkConfig.getJoin(); IcmpFailureDetectorConfig fdConfig = advancedNetworkConfig.getIcmpFailureDetectorConfig(); MemberAddressProviderConfig providerConfig = advancedNetworkConfig.getMemberAddressProviderConfig(); assertFalse(advancedNetworkConfig.isEnabled()); assertTrue(joinConfig.getAutoDetectionConfig().isEnabled()); assertNull(fdConfig); assertFalse(providerConfig.isEnabled()); assertTrue(advancedNetworkConfig.getEndpointConfigs().containsKey(EndpointQualifier.MEMBER)); assertEquals(1, advancedNetworkConfig.getEndpointConfigs().size()); } @Override @Test public void testAdvancedNetworkConfig_whenInvalidSocketKeepIdleSeconds() { String invalid1 = getAdvancedNetworkConfigWithSocketOption("keep-idle-seconds", -1); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid1)); String invalid2 = getAdvancedNetworkConfigWithSocketOption("keep-idle-seconds", 0); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid2)); String invalid3 = getAdvancedNetworkConfigWithSocketOption("keep-idle-seconds", 32768); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid3)); } @Override @Test public void testAdvancedNetworkConfig_whenInvalidSocketKeepIntervalSeconds() { String invalid1 = getAdvancedNetworkConfigWithSocketOption("keep-interval-seconds", -1); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid1)); String invalid2 = getAdvancedNetworkConfigWithSocketOption("keep-interval-seconds", 0); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid2)); String invalid3 = getAdvancedNetworkConfigWithSocketOption("keep-interval-seconds", 32768); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid3)); } @Override @Test public void testAdvancedNetworkConfig_whenInvalidSocketKeepCount() { String invalid1 = getAdvancedNetworkConfigWithSocketOption("keep-count", -1); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid1)); String invalid2 = getAdvancedNetworkConfigWithSocketOption("keep-count", 0); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid2)); String invalid3 = getAdvancedNetworkConfigWithSocketOption("keep-count", 128); Assert.assertThrows(IllegalArgumentException.class, () -> buildConfig(invalid3)); } @Override @Test public void testAmbiguousNetworkConfig_throwsException() { String yaml = """ hazelcast: advanced-network: enabled: true network: port: 9999"""; expected.expect(InvalidConfigurationException.class); buildConfig(yaml); } @Override @Test public void testNetworkConfigUnambiguous_whenAdvancedNetworkDisabled() { String yaml = """ hazelcast: advanced-network: {} network: port: port: 9999 """; Config config = buildConfig(yaml); assertFalse(config.getAdvancedNetworkConfig().isEnabled()); assertEquals(9999, config.getNetworkConfig().getPort()); } @Override @Test public void testMultipleMemberEndpointConfigs_throwsException() { String yaml = """ hazelcast: advanced-network: member-server-socket-endpoint-config: {} member-server-socket-endpoint-config: {}"""; expected.expect(InvalidConfigurationException.class); buildConfig(yaml); } @Test public void outboundPorts_asObject_ParsingTest() { String yaml = """ hazelcast: network: outbound-ports: ports: 2500-3000 more-ports: 2600-3500 """; Config actual = buildConfig(yaml); assertEquals(new HashSet<>(asList("2500-3000", "2600-3500")), actual.getNetworkConfig().getOutboundPortDefinitions()); } @Test public void outboundPorts_asSequence_ParsingTest() { String yaml = """ hazelcast: network: outbound-ports: - 1234-1999 - 2500 """; Config actual = buildConfig(yaml); assertEquals(new HashSet<>(asList("2500", "1234-1999")), actual.getNetworkConfig().getOutboundPortDefinitions()); } @Override protected Config buildCompleteAdvancedNetworkConfig() { String yaml = """ hazelcast: advanced-network: enabled: true join: multicast: enabled: false tcp-ip: enabled: true required-member: 10.10.1.10 member-list: - 10.10.1.11 - 10.10.1.12 failure-detector: icmp: enabled: true timeout-milliseconds: 42 fail-fast-on-startup: true interval-milliseconds: 4200 max-attempts: 42 parallel-mode: true ttl: 255 member-address-provider: class-name: com.hazelcast.test.Provider member-server-socket-endpoint-config: name: member-server-socket outbound-ports: ports: 33000-33100 interfaces: enabled: true interfaces: - 10.10.0.1 ssl: enabled: true factory-class-name: com.hazelcast.examples.MySSLContextFactory properties: foo: bar socket-interceptor: enabled: true class-name: com.hazelcast.examples.MySocketInterceptor properties: foo: baz socket-options: buffer-direct: true tcp-no-delay: true keep-alive: true connect-timeout-seconds: 33 send-buffer-size-kb: 34 receive-buffer-size-kb: 67 linger-seconds: 11 keep-count: 12 keep-interval-seconds: 13 keep-idle-seconds: 14 symmetric-encryption: enabled: true algorithm: Algorithm salt: thesalt password: thepassword iteration-count: 1000 port: port-count: 93 auto-increment: false port: 9191 public-address: 10.20.10.10 reuse-address: true rest-server-socket-endpoint-config: name: REST port: port: 8080 endpoint-groups: WAN: enabled: true CLUSTER_READ: enabled: true CLUSTER_WRITE: enabled: false HEALTH_CHECK: enabled: true memcache-server-socket-endpoint-config: name: MEMCACHE outbound-ports: ports: 42000-42100 wan-server-socket-endpoint-config: WAN_SERVER1: outbound-ports: ports: 52000-52100 WAN_SERVER2: outbound-ports: ports: 53000-53100 wan-endpoint-config: WAN_ENDPOINT1: outbound-ports: ports: 62000-62100 WAN_ENDPOINT2: outbound-ports: ports: 63000-63100 client-server-socket-endpoint-config: name: CLIENT outbound-ports: ports: 72000-72100 """; return buildConfig(yaml); } @Override @Test public void testCPSubsystemConfig() { String yaml = """ hazelcast: cp-subsystem: cp-member-count: 10 group-size: 5 session-time-to-live-seconds: 15 session-heartbeat-interval-seconds: 3 missing-cp-member-auto-removal-seconds: 120 fail-on-indeterminate-operation-state: true persistence-enabled: true base-dir: /mnt/cp-data data-load-timeout-seconds: 30 cp-member-priority: -1 map-limit: 25 raft-algorithm: leader-election-timeout-in-millis: 500 leader-heartbeat-period-in-millis: 100 max-missed-leader-heartbeat-count: 3 append-request-max-entry-count: 25 commit-index-advance-count-to-snapshot: 250 uncommitted-entry-count-to-reject-new-appends: 75 append-request-backoff-timeout-in-millis: 50 semaphores: sem1: jdk-compatible: true initial-permits: 1 sem2: jdk-compatible: false initial-permits: 2 locks: lock1: lock-acquire-limit: 1 lock2: lock-acquire-limit: 2 maps: map1: max-size-mb: 1 map2: max-size-mb: 2 """; Config config = buildConfig(yaml); CPSubsystemConfig cpSubsystemConfig = config.getCPSubsystemConfig(); assertEquals(10, cpSubsystemConfig.getCPMemberCount()); assertEquals(5, cpSubsystemConfig.getGroupSize()); assertEquals(15, cpSubsystemConfig.getSessionTimeToLiveSeconds()); assertEquals(3, cpSubsystemConfig.getSessionHeartbeatIntervalSeconds()); assertEquals(120, cpSubsystemConfig.getMissingCPMemberAutoRemovalSeconds()); assertTrue(cpSubsystemConfig.isFailOnIndeterminateOperationState()); assertTrue(cpSubsystemConfig.isPersistenceEnabled()); assertEquals(new File("/mnt/cp-data").getAbsoluteFile(), cpSubsystemConfig.getBaseDir().getAbsoluteFile()); assertEquals(30, cpSubsystemConfig.getDataLoadTimeoutSeconds()); assertEquals(-1, cpSubsystemConfig.getCPMemberPriority()); RaftAlgorithmConfig raftAlgorithmConfig = cpSubsystemConfig.getRaftAlgorithmConfig(); assertEquals(500, raftAlgorithmConfig.getLeaderElectionTimeoutInMillis()); assertEquals(100, raftAlgorithmConfig.getLeaderHeartbeatPeriodInMillis()); assertEquals(3, raftAlgorithmConfig.getMaxMissedLeaderHeartbeatCount()); assertEquals(25, raftAlgorithmConfig.getAppendRequestMaxEntryCount()); assertEquals(250, raftAlgorithmConfig.getCommitIndexAdvanceCountToSnapshot()); assertEquals(75, raftAlgorithmConfig.getUncommittedEntryCountToRejectNewAppends()); assertEquals(50, raftAlgorithmConfig.getAppendRequestBackoffTimeoutInMillis()); SemaphoreConfig semaphoreConfig1 = cpSubsystemConfig.findSemaphoreConfig("sem1"); SemaphoreConfig semaphoreConfig2 = cpSubsystemConfig.findSemaphoreConfig("sem2"); assertNotNull(semaphoreConfig1); assertNotNull(semaphoreConfig2); assertTrue(semaphoreConfig1.isJDKCompatible()); assertFalse(semaphoreConfig2.isJDKCompatible()); assertEquals(1, semaphoreConfig1.getInitialPermits()); assertEquals(2, semaphoreConfig2.getInitialPermits()); FencedLockConfig lockConfig1 = cpSubsystemConfig.findLockConfig("lock1"); FencedLockConfig lockConfig2 = cpSubsystemConfig.findLockConfig("lock2"); assertNotNull(lockConfig1); assertNotNull(lockConfig2); assertEquals(1, lockConfig1.getLockAcquireLimit()); assertEquals(2, lockConfig2.getLockAcquireLimit()); CPMapConfig mapConfig1 = cpSubsystemConfig.findCPMapConfig("map1"); CPMapConfig mapConfig2 = cpSubsystemConfig.findCPMapConfig("map2"); assertNotNull(mapConfig1); assertNotNull(mapConfig2); assertEquals("map1", mapConfig1.getName()); assertEquals(1, mapConfig1.getMaxSizeMb()); assertEquals("map2", mapConfig2.getName()); assertEquals(2, mapConfig2.getMaxSizeMb()); assertEquals(25, cpSubsystemConfig.getCPMapLimit()); } @Override @Test public void testSqlConfig() { String yaml = """ hazelcast: sql: statement-timeout-millis: 30 catalog-persistence-enabled: true """; Config config = buildConfig(yaml); SqlConfig sqlConfig = config.getSqlConfig(); assertEquals(30L, sqlConfig.getStatementTimeoutMillis()); assertTrue(sqlConfig.isCatalogPersistenceEnabled()); } @Override @Test public void testWhitespaceInNonSpaceStrings() { String yaml = """ hazelcast: split-brain-protection: name-of-split-brain-protection: enabled: true protect-on: WRITE \s """; buildConfig(yaml); } @Override @Test public void testPersistentMemoryDirectoryConfiguration() { String yaml = """ hazelcast: native-memory: persistent-memory: directories: - directory: /mnt/pmem0 numa-node: 0 - directory: /mnt/pmem1 numa-node: 1 """; Config yamlConfig = new InMemoryYamlConfig(yaml); PersistentMemoryConfig pmemConfig = yamlConfig.getNativeMemoryConfig() .getPersistentMemoryConfig(); List<PersistentMemoryDirectoryConfig> directoryConfigs = pmemConfig .getDirectoryConfigs(); assertFalse(pmemConfig.isEnabled()); assertEquals(MOUNTED, pmemConfig.getMode()); assertEquals(2, directoryConfigs.size()); PersistentMemoryDirectoryConfig dir0Config = directoryConfigs.get(0); PersistentMemoryDirectoryConfig dir1Config = directoryConfigs.get(1); assertEquals("/mnt/pmem0", dir0Config.getDirectory()); assertEquals(0, dir0Config.getNumaNode()); assertEquals("/mnt/pmem1", dir1Config.getDirectory()); assertEquals(1, dir1Config.getNumaNode()); } @Test public void cacheEntryListenerConfigParsing() { String yaml = """ hazelcast: cache: my-cache: cache-entry-listeners: - old-value-required: true synchronous: true cache-entry-listener-factory: class-name: com.example.cache.MyEntryListenerFactory cache-entry-event-filter-factory: class-name: com.example.cache.MyEntryEventFilterFactory"""; Config actual = buildConfig(yaml); CacheSimpleEntryListenerConfig expected = new CacheSimpleEntryListenerConfig() .setOldValueRequired(true) .setSynchronous(true) .setCacheEntryListenerFactory("com.example.cache.MyEntryListenerFactory") .setCacheEntryEventFilterFactory("com.example.cache.MyEntryEventFilterFactory"); List<CacheSimpleEntryListenerConfig> actualListeners = actual.findCacheConfig("my-cache").getCacheEntryListeners(); assertEquals(singletonList(expected), actualListeners); } @Override @Test public void testPersistentMemoryDirectoryConfigurationSimple() { String yaml = """ hazelcast: native-memory: persistent-memory-directory: /mnt/pmem0"""; Config config = buildConfig(yaml); PersistentMemoryConfig pmemConfig = config.getNativeMemoryConfig().getPersistentMemoryConfig(); assertTrue(pmemConfig.isEnabled()); List<PersistentMemoryDirectoryConfig> directoryConfigs = pmemConfig.getDirectoryConfigs(); assertEquals(1, directoryConfigs.size()); PersistentMemoryDirectoryConfig dir0Config = directoryConfigs.get(0); assertEquals("/mnt/pmem0", dir0Config.getDirectory()); assertFalse(dir0Config.isNumaNodeSet()); } @Override @Test(expected = InvalidConfigurationException.class) public void testPersistentMemoryDirectoryConfiguration_uniqueDirViolationThrows() { String yaml = """ hazelcast: native-memory: persistent-memory: directories: - directory: /mnt/pmem0 numa-node: 0 - directory: /mnt/pmem0 numa-node: 1 """; buildConfig(yaml); } @Override @Test(expected = InvalidConfigurationException.class) public void testPersistentMemoryDirectoryConfiguration_uniqueNumaNodeViolationThrows() { String yaml = """ hazelcast: native-memory: persistent-memory: directories: - directory: /mnt/pmem0 numa-node: 0 - directory: /mnt/pmem1 numa-node: 0 """; buildConfig(yaml); } @Override @Test(expected = InvalidConfigurationException.class) public void testPersistentMemoryDirectoryConfiguration_numaNodeConsistencyViolationThrows() { String yaml = """ hazelcast: native-memory: persistent-memory: directories: - directory: /mnt/pmem0 numa-node: 0 - directory: /mnt/pmem1 """; buildConfig(yaml); } @Override @Test public void testPersistentMemoryDirectoryConfiguration_simpleAndAdvancedPasses() { String yaml = """ hazelcast: native-memory: persistent-memory-directory: /mnt/optane persistent-memory: directories: - directory: /mnt/pmem0 - directory: /mnt/pmem1 """; Config config = buildConfig(yaml); PersistentMemoryConfig pmemConfig = config.getNativeMemoryConfig().getPersistentMemoryConfig(); assertTrue(pmemConfig.isEnabled()); assertEquals(MOUNTED, pmemConfig.getMode()); List<PersistentMemoryDirectoryConfig> directoryConfigs = pmemConfig.getDirectoryConfigs(); assertEquals(3, directoryConfigs.size()); PersistentMemoryDirectoryConfig dir0Config = directoryConfigs.get(0); PersistentMemoryDirectoryConfig dir1Config = directoryConfigs.get(1); PersistentMemoryDirectoryConfig dir2Config = directoryConfigs.get(2); assertEquals("/mnt/optane", dir0Config.getDirectory()); assertFalse(dir0Config.isNumaNodeSet()); assertEquals("/mnt/pmem0", dir1Config.getDirectory()); assertFalse(dir1Config.isNumaNodeSet()); assertEquals("/mnt/pmem1", dir2Config.getDirectory()); assertFalse(dir2Config.isNumaNodeSet()); } @Override @Test public void testPersistentMemoryConfiguration_SystemMemoryMode() { String yaml = """ hazelcast: native-memory: persistent-memory: enabled: true mode: SYSTEM_MEMORY """; Config config = buildConfig(yaml); PersistentMemoryConfig pmemConfig = config.getNativeMemoryConfig().getPersistentMemoryConfig(); assertTrue(pmemConfig.isEnabled()); assertEquals(SYSTEM_MEMORY, pmemConfig.getMode()); } @Override @Test(expected = InvalidConfigurationException.class) public void testPersistentMemoryConfiguration_NotExistingModeThrows() { String yaml = """ hazelcast: native-memory: persistent-memory: mode: NOT_EXISTING_MODE """; buildConfig(yaml); } @Override @Test(expected = InvalidConfigurationException.class) public void testPersistentMemoryDirectoryConfiguration_SystemMemoryModeThrows() { String yaml = """ hazelcast: native-memory: persistent-memory: mode: SYSTEM_MEMORY directories: - directory: /mnt/pmem0 """; buildConfig(yaml); } @Override @Test public void testMetricsConfig() { String yaml = """ hazelcast: metrics: enabled: false management-center: enabled: false retention-seconds: 11 jmx: enabled: false collection-frequency-seconds: 10"""; Config config = new InMemoryYamlConfig(yaml); MetricsConfig metricsConfig = config.getMetricsConfig(); assertFalse(metricsConfig.isEnabled()); MetricsManagementCenterConfig metricsMcConfig = metricsConfig.getManagementCenterConfig(); assertFalse(metricsMcConfig.isEnabled()); assertFalse(metricsConfig.getJmxConfig().isEnabled()); assertEquals(10, metricsConfig.getCollectionFrequencySeconds()); assertEquals(11, metricsMcConfig.getRetentionSeconds()); } @Override @Test public void testInstanceTrackingConfig() { String yaml = """ hazelcast: instance-tracking: enabled: true file-name: /dummy/file format-pattern: dummy-pattern with $HZ_INSTANCE_TRACKING{placeholder} and $RND{placeholder}"""; Config config = new InMemoryYamlConfig(yaml); InstanceTrackingConfig trackingConfig = config.getInstanceTrackingConfig(); assertTrue(trackingConfig.isEnabled()); assertEquals("/dummy/file", trackingConfig.getFileName()); assertEquals("dummy-pattern with $HZ_INSTANCE_TRACKING{placeholder} and $RND{placeholder}", trackingConfig.getFormatPattern()); } @Override @Test public void testMetricsConfigMasterSwitchDisabled() { String yaml = """ hazelcast: metrics: enabled: false"""; Config config = new InMemoryYamlConfig(yaml); MetricsConfig metricsConfig = config.getMetricsConfig(); assertFalse(metricsConfig.isEnabled()); assertTrue(metricsConfig.getManagementCenterConfig().isEnabled()); assertTrue(metricsConfig.getJmxConfig().isEnabled()); } @Override @Test public void testMetricsConfigMcDisabled() { String yaml = """ hazelcast: metrics: management-center: enabled: false"""; Config config = new InMemoryYamlConfig(yaml); MetricsConfig metricsConfig = config.getMetricsConfig(); assertTrue(metricsConfig.isEnabled()); assertFalse(metricsConfig.getManagementCenterConfig().isEnabled()); assertTrue(metricsConfig.getJmxConfig().isEnabled()); } @Override @Test public void testMetricsConfigJmxDisabled() { String yaml = """ hazelcast: metrics: jmx: enabled: false"""; Config config = new InMemoryYamlConfig(yaml); MetricsConfig metricsConfig = config.getMetricsConfig(); assertTrue(metricsConfig.isEnabled()); assertTrue(metricsConfig.getManagementCenterConfig().isEnabled()); assertFalse(metricsConfig.getJmxConfig().isEnabled()); } @Override protected Config buildAuditlogConfig() { String yaml = """ hazelcast: auditlog: enabled: true factory-class-name: com.acme.auditlog.AuditlogToSyslogFactory properties: host: syslogserver.acme.com port: 514 type: tcp """; return new InMemoryYamlConfig(yaml); } @Override protected Config buildMapWildcardConfig() { String yaml = """ hazelcast: map: map*: attributes: name: extractor-class-name: usercodedeployment.CapitalizingFirstNameExtractor mapBackup2*: backup-count: 2 attributes: name: extractor-class-name: usercodedeployment.CapitalizingFirstNameExtractor """; return new InMemoryYamlConfig(yaml); } @Override @Test public void testIntegrityCheckerConfig() { String yaml = """ hazelcast: integrity-checker: enabled: false """; Config config = buildConfig(yaml); assertFalse(config.getIntegrityCheckerConfig().isEnabled()); } @Override public void testDataConnectionConfigs() { String yaml = """ hazelcast: data-connection: mysql-database: type: jdbc properties: jdbcUrl: jdbc:mysql://dummy:3306 some.property: dummy-value shared: true other-database: type: other """; Config config = new InMemoryYamlConfig(yaml); Map<String, DataConnectionConfig> dataConnectionConfigs = config.getDataConnectionConfigs(); assertThat(dataConnectionConfigs).hasSize(2); assertThat(dataConnectionConfigs).containsKey("mysql-database"); DataConnectionConfig mysqlDataConnectionConfig = dataConnectionConfigs.get("mysql-database"); assertThat(mysqlDataConnectionConfig.getType()).isEqualTo("jdbc"); assertThat(mysqlDataConnectionConfig.getName()).isEqualTo("mysql-database"); assertThat(mysqlDataConnectionConfig.isShared()).isTrue(); assertThat(mysqlDataConnectionConfig.getProperty("jdbcUrl")).isEqualTo("jdbc:mysql://dummy:3306"); assertThat(mysqlDataConnectionConfig.getProperty("some.property")).isEqualTo("dummy-value"); assertThat(dataConnectionConfigs).containsKey("other-database"); DataConnectionConfig otherDataConnectionConfig = dataConnectionConfigs.get("other-database"); assertThat(otherDataConnectionConfig.getType()).isEqualTo("other"); } @Override @Test public void testPartitioningAttributeConfigs() { String yaml = """ hazelcast: map: test: partition-attributes: - name: attr1 - name: attr2 """; final MapConfig mapConfig = buildConfig(yaml).getMapConfig("test"); assertThat(mapConfig.getPartitioningAttributeConfigs()).containsExactly( new PartitioningAttributeConfig("attr1"), new PartitioningAttributeConfig("attr2") ); } @Override public void testNamespaceConfigs() throws IOException { File tempJar = tempFolder.newFile("tempJar.jar"); try (FileOutputStream out = new FileOutputStream(tempJar)) { out.write(new byte[]{0x50, 0x4B, 0x03, 0x04}); } File tempJarZip = tempFolder.newFile("tempZip.zip"); File tempClass = tempFolder.newFile("TempClass.class"); String yamlTestString = "hazelcast:\n" + " user-code-namespaces:\n" + " enabled: true\n" + " class-filter:\n" + " defaults-disabled: false\n" + " blacklist:\n" + " class:\n" + " - com.acme.app.BeanComparator\n" + " whitelist:\n" + " package:\n" + " - com.acme.app\n" + " prefix:\n" + " - com.hazelcast.\n" + " ns1:\n" + " - id: \"jarId\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"zipId\"\n" + " resource-type: \"jars_in_zip\"\n" + " url: " + tempJarZip.toURI().toURL() + "\n" + " - id: \"classId\"\n" + " resource-type: \"class\"\n" + " url: " + tempClass.toURI().toURL() + "\n" + " ns2:\n" + " - id: \"jarId2\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n"; final UserCodeNamespacesConfig userCodeNamespacesConfig = buildConfig(yamlTestString).getNamespacesConfig(); assertThat(userCodeNamespacesConfig.isEnabled()).isTrue(); assertThat(userCodeNamespacesConfig.getNamespaceConfigs()).hasSize(2); final UserCodeNamespaceConfig userCodeNamespaceConfig = userCodeNamespacesConfig.getNamespaceConfigs().get("ns1"); assertNotNull(userCodeNamespaceConfig); assertThat(userCodeNamespaceConfig.getName()).isEqualTo("ns1"); assertThat(userCodeNamespaceConfig.getResourceConfigs()).hasSize(3); // validate NS1 ResourceDefinition contents. Collection<ResourceDefinition> ns1Resources = userCodeNamespaceConfig.getResourceConfigs(); Optional<ResourceDefinition> jarIdResource = ns1Resources.stream().filter(r -> r.id().equals("jarId")).findFirst(); assertThat(jarIdResource).isPresent(); assertThat(jarIdResource.get().url()).isEqualTo(tempJar.toURI().toURL().toString()); assertEquals(ResourceType.JAR, jarIdResource.get().type()); // check the bytes[] are equal assertArrayEquals(getTestFileBytes(tempJar), jarIdResource.get().payload()); Optional<ResourceDefinition> zipId = ns1Resources.stream().filter(r -> r.id().equals("zipId")).findFirst(); Optional<ResourceDefinition> classId = ns1Resources.stream().filter(r -> r.id().equals("classId")).findFirst(); assertThat(zipId).isPresent(); assertThat(zipId.get().url()).isEqualTo(tempJarZip.toURI().toURL().toString()); assertEquals(ResourceType.JARS_IN_ZIP, zipId.get().type()); assertThat(classId).isPresent(); assertThat(classId.get().url()).isEqualTo(tempClass.toURI().toURL().toString()); assertEquals(ResourceType.CLASS, classId.get().type()); // check the bytes[] are equal assertArrayEquals(getTestFileBytes(tempJarZip), zipId.get().payload()); // validate NS2 ResourceDefinition contents. final UserCodeNamespaceConfig userCodeNamespaceConfig2 = userCodeNamespacesConfig.getNamespaceConfigs().get("ns2"); assertNotNull(userCodeNamespaceConfig2); assertThat(userCodeNamespaceConfig2.getName()).isEqualTo("ns2"); assertThat(userCodeNamespaceConfig2.getResourceConfigs()).hasSize(1); Collection<ResourceDefinition> ns2Resources = userCodeNamespaceConfig2.getResourceConfigs(); assertThat(ns2Resources).hasSize(1); Optional<ResourceDefinition> jarId2Resource = ns2Resources.stream().filter(r -> r.id().equals("jarId2")).findFirst(); assertThat(jarId2Resource).isPresent(); assertThat(jarId2Resource.get().url()).isEqualTo(tempJar.toURI().toURL().toString()); assertEquals(ResourceType.JAR, jarId2Resource.get().type()); // check the bytes[] are equal assertArrayEquals(getTestFileBytes(tempJar), jarId2Resource.get().payload()); // Validate filtering config assertNotNull(userCodeNamespacesConfig.getClassFilterConfig()); JavaSerializationFilterConfig filterConfig = userCodeNamespacesConfig.getClassFilterConfig(); assertFalse(filterConfig.isDefaultsDisabled()); assertTrue(filterConfig.getWhitelist().isListed("com.acme.app.FakeClass")); assertTrue(filterConfig.getWhitelist().isListed("com.hazelcast.fake.place.MagicClass")); assertFalse(filterConfig.getWhitelist().isListed("not.in.the.whitelist.ClassName")); assertTrue(filterConfig.getBlacklist().isListed("com.acme.app.BeanComparator")); assertFalse(filterConfig.getBlacklist().isListed("not.in.the.blacklist.ClassName")); } @Test public void testNamespaceConfigs_newStyle() throws IOException { File tempJar = tempFolder.newFile("tempJar.jar"); try (FileOutputStream out = new FileOutputStream(tempJar)) { out.write(new byte[]{0x50, 0x4B, 0x03, 0x04}); } File tempJarZip = tempFolder.newFile("tempZip.zip"); File tempClass = tempFolder.newFile("TempClass.class"); String yamlTestString = "hazelcast:\n" + " user-code-namespaces:\n" + " enabled: true\n" + " class-filter:\n" + " defaults-disabled: false\n" + " blacklist:\n" + " class:\n" + " - com.acme.app.BeanComparator\n" + " whitelist:\n" + " package:\n" + " - com.acme.app\n" + " prefix:\n" + " - com.hazelcast.\n" + " name-spaces:\n" + " ns1:\n" + " - id: \"jarId\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"zipId\"\n" + " resource-type: \"jars_in_zip\"\n" + " url: " + tempJarZip.toURI().toURL() + "\n" + " - id: \"classId\"\n" + " resource-type: \"class\"\n" + " url: " + tempClass.toURI().toURL() + "\n" + " ns2:\n" + " - id: \"jarId2\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n"; final UserCodeNamespacesConfig userCodeNamespacesConfig = buildConfig(yamlTestString).getNamespacesConfig(); assertThat(userCodeNamespacesConfig.isEnabled()).isTrue(); assertThat(userCodeNamespacesConfig.getNamespaceConfigs()).hasSize(2); final UserCodeNamespaceConfig userCodeNamespaceConfig = userCodeNamespacesConfig.getNamespaceConfigs().get("ns1"); assertNotNull(userCodeNamespaceConfig); assertThat(userCodeNamespaceConfig.getName()).isEqualTo("ns1"); assertThat(userCodeNamespaceConfig.getResourceConfigs()).hasSize(3); // validate NS1 ResourceDefinition contents. Collection<ResourceDefinition> ns1Resources = userCodeNamespaceConfig.getResourceConfigs(); Optional<ResourceDefinition> jarIdResource = ns1Resources.stream().filter(r -> r.id().equals("jarId")).findFirst(); assertThat(jarIdResource).isPresent(); assertThat(jarIdResource.get().url()).isEqualTo(tempJar.toURI().toURL().toString()); assertEquals(ResourceType.JAR, jarIdResource.get().type()); // check the bytes[] are equal assertArrayEquals(getTestFileBytes(tempJar), jarIdResource.get().payload()); Optional<ResourceDefinition> zipId = ns1Resources.stream().filter(r -> r.id().equals("zipId")).findFirst(); Optional<ResourceDefinition> classId = ns1Resources.stream().filter(r -> r.id().equals("classId")).findFirst(); assertThat(zipId).isPresent(); assertThat(zipId.get().url()).isEqualTo(tempJarZip.toURI().toURL().toString()); assertEquals(ResourceType.JARS_IN_ZIP, zipId.get().type()); assertThat(classId).isPresent(); assertThat(classId.get().url()).isEqualTo(tempClass.toURI().toURL().toString()); assertEquals(ResourceType.CLASS, classId.get().type()); // check the bytes[] are equal assertArrayEquals(getTestFileBytes(tempJarZip), zipId.get().payload()); // validate NS2 ResourceDefinition contents. final UserCodeNamespaceConfig userCodeNamespaceConfig2 = userCodeNamespacesConfig.getNamespaceConfigs().get("ns2"); assertNotNull(userCodeNamespaceConfig2); assertThat(userCodeNamespaceConfig2.getName()).isEqualTo("ns2"); assertThat(userCodeNamespaceConfig2.getResourceConfigs()).hasSize(1); Collection<ResourceDefinition> ns2Resources = userCodeNamespaceConfig2.getResourceConfigs(); assertThat(ns2Resources).hasSize(1); Optional<ResourceDefinition> jarId2Resource = ns2Resources.stream().filter(r -> r.id().equals("jarId2")).findFirst(); assertThat(jarId2Resource).isPresent(); assertThat(jarId2Resource.get().url()).isEqualTo(tempJar.toURI().toURL().toString()); assertEquals(ResourceType.JAR, jarId2Resource.get().type()); // check the bytes[] are equal assertArrayEquals(getTestFileBytes(tempJar), jarId2Resource.get().payload()); // Validate filtering config assertNotNull(userCodeNamespacesConfig.getClassFilterConfig()); JavaSerializationFilterConfig filterConfig = userCodeNamespacesConfig.getClassFilterConfig(); assertFalse(filterConfig.isDefaultsDisabled()); assertTrue(filterConfig.getWhitelist().isListed("com.acme.app.FakeClass")); assertTrue(filterConfig.getWhitelist().isListed("com.hazelcast.fake.place.MagicClass")); assertFalse(filterConfig.getWhitelist().isListed("not.in.the.whitelist.ClassName")); assertTrue(filterConfig.getBlacklist().isListed("com.acme.app.BeanComparator")); assertFalse(filterConfig.getBlacklist().isListed("not.in.the.blacklist.ClassName")); } /** * Unit test for {@link YamlMemberDomConfigProcessor}. It is placed under {@link com.hazelcast.config} package, * because we need access to the {@link UserCodeNamespacesConfig#getNamespaceConfigs()} package-private method. */ @Test public void unitTestNamespaceConfigs_oldStyle() throws IOException { File tempJar = tempFolder.newFile("tempJar.jar"); String yamlTestString = "hazelcast:\n" + " user-code-namespaces:\n" + " enabled: true\n" + " ns1:\n" + " - id: \"jarId1\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId2\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " ns2:\n" + " - id: \"jarId3\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId4\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n"; assertNamespaceConfig(yamlTestString); } /** * Unit test for {@link YamlMemberDomConfigProcessor}. It is placed under {@link com.hazelcast.config} package, * because we need access to the {@link UserCodeNamespacesConfig#getNamespaceConfigs()} package-private method. */ @Test public void unitTestNamespaceConfigs_newStyle() throws IOException { File tempJar = tempFolder.newFile("tempJar.jar"); String yamlTestString = "hazelcast:\n" + " user-code-namespaces:\n" + " enabled: true\n" + " name-spaces:\n" + " ns1:\n" + " - id: \"jarId1\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId2\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " ns2:\n" + " - id: \"jarId3\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId4\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n"; assertNamespaceConfig(yamlTestString); } /** * Unit test for {@link YamlMemberDomConfigProcessor}. It is placed under {@link com.hazelcast.config} package, * because we need access to the {@link UserCodeNamespacesConfig#getNamespaceConfigs()} package-private method. */ @Test public void unitTestDuplicatedNamespaceConfigs_mixedStyle_throws() throws IOException { File tempJar = tempFolder.newFile("tempJar.jar"); /* name-spaces: ns1 ns1 */ final String yamlTestString = "hazelcast:\n" + " user-code-namespaces:\n" + " enabled: true\n" + " name-spaces:\n" + " ns1:\n" + " - id: \"jarId1\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId2\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " ns1:\n" + " - id: \"jarId3\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId4\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n"; assertThatThrownBy(() -> buildConfig(yamlTestString).getNamespacesConfig()) .isInstanceOf(InvalidConfigurationException.class); /* ns1 name-spaces: ns1 */ final String _yamlTestString = "hazelcast:\n" + " user-code-namespaces:\n" + " enabled: true\n" + " ns1:\n" + " - id: \"jarId3\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId4\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " name-spaces:\n" + " ns1:\n" + " - id: \"jarId1\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n" + " - id: \"jarId2\"\n" + " resource-type: \"jar\"\n" + " url: " + tempJar.toURI().toURL() + "\n"; assertThatThrownBy(() -> buildConfig(_yamlTestString).getNamespacesConfig()) .isInstanceOf(InvalidConfigurationException.class); } @Test public void unitTestNamespaceConfigs_throws() throws IOException { File tempJar = tempFolder.newFile("tempJar.jar"); String yamlTestString = "hazelcast:\n" + " user-code-namespaces:\n" + " enabled: true\n" + " name-spaces:\n" + " ns1:\n" + " - id: \"jarId1\"\n" + " resource-type: \"jars\"\n" + " url: " + tempJar.toURI().toURL() + "\n"; assertThatThrownBy(() -> buildConfig(yamlTestString)) .isInstanceOf(InvalidConfigurationException.class) .hasMessageContaining("was configured with invalid resource type"); } private void assertNamespaceConfig(String yamlTestString) { final UserCodeNamespacesConfig ucnConfig = buildConfig(yamlTestString).getNamespacesConfig(); assertNotNull(ucnConfig); assertTrue(ucnConfig.isEnabled()); Map<String, UserCodeNamespaceConfig> namespaceConfigs = ucnConfig.getNamespaceConfigs(); assertEquals(2, namespaceConfigs.size()); assertTrue(namespaceConfigs.keySet().containsAll(asList("ns1", "ns2"))); namespaceConfigs.values().forEach(namespaceConfig -> assertEquals(2, namespaceConfig.getResourceConfigs().size())); } @Override public void testRestConfig() throws IOException { String yaml = """ hazelcast: rest: enabled: true port: 8080 security-realm: realmName token-validity-seconds: 500 ssl: enabled: true client-auth: NEED ciphers: TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_CBC_SHA256 enabled-protocols: TLSv1.2, TLSv1.3 key-alias: myKeyAlias key-password: myKeyPassword key-store: /path/to/keystore key-store-password: myKeyStorePassword key-store-type: JKS key-store-provider: SUN trust-store: /path/to/truststore trust-store-password: myTrustStorePassword trust-store-type: JKS trust-store-provider: SUN protocol: TLS certificate: /path/to/certificate certificate-key: /path/to/certificate-key trust-certificate: /path/to/trust-certificate trust-certificate-key: /path/to/trust-certificate-key """; Config config = buildConfig(yaml); validateRestConfig(config); } @Override public void testMapExpiryConfig() { String yaml = """ hazelcast: map: expiry: time-to-live-seconds: 2147483647 max-idle-seconds: 2147483647 """; Config config = buildConfig(yaml); MapConfig mapConfig = config.getMapConfig("expiry"); assertEquals(Integer.MAX_VALUE, mapConfig.getTimeToLiveSeconds()); assertEquals(Integer.MAX_VALUE, mapConfig.getMaxIdleSeconds()); } @Override @Test public void testTpcConfig() { String yaml = """ hazelcast: tpc: enabled: true eventloop-count: 12 """; TpcConfig tpcConfig = buildConfig(yaml).getTpcConfig(); assertThat(tpcConfig.isEnabled()).isTrue(); assertThat(tpcConfig.getEventloopCount()).isEqualTo(12); } @Override @Test public void testTpcSocketConfig() { String yaml = """ hazelcast: network: tpc-socket: port-range: 14000-16000 receive-buffer-size-kb: 256 send-buffer-size-kb: 256 """; TpcSocketConfig tpcSocketConfig = buildConfig(yaml).getNetworkConfig().getTpcSocketConfig(); assertThat(tpcSocketConfig.getPortRange()).isEqualTo("14000-16000"); assertThat(tpcSocketConfig.getReceiveBufferSizeKB()).isEqualTo(256); assertThat(tpcSocketConfig.getSendBufferSizeKB()).isEqualTo(256); } @Override @Test public void testTpcSocketConfigAdvanced() { String yaml = """ hazelcast: advanced-network: enabled: true member-server-socket-endpoint-config:\s tpc-socket:\s port-range: 14000-16000 receive-buffer-size-kb: 256 send-buffer-size-kb: 256 client-server-socket-endpoint-config: tpc-socket: port-range: 14000-16000 receive-buffer-size-kb: 256 send-buffer-size-kb: 256 memcache-server-socket-endpoint-config: tpc-socket: port-range: 14000-16000 receive-buffer-size-kb: 256 send-buffer-size-kb: 256 rest-server-socket-endpoint-config: tpc-socket: port-range: 14000-16000 receive-buffer-size-kb: 256 send-buffer-size-kb: 256 wan-endpoint-config:\s tokyo: tpc-socket: port-range: 14000-16000 receive-buffer-size-kb: 256 send-buffer-size-kb: 256 wan-server-socket-endpoint-config:\s london: tpc-socket: port-range: 14000-16000 receive-buffer-size-kb: 256 send-buffer-size-kb: 256 """; Map<EndpointQualifier, EndpointConfig> endpointConfigs = buildConfig(yaml) .getAdvancedNetworkConfig() .getEndpointConfigs(); assertThat(endpointConfigs).hasSize(6); endpointConfigs.forEach((endpointQualifier, endpointConfig) -> { TpcSocketConfig tpcSocketConfig = endpointConfig.getTpcSocketConfig(); assertThat(tpcSocketConfig.getPortRange()).isEqualTo("14000-16000"); assertThat(tpcSocketConfig.getReceiveBufferSizeKB()).isEqualTo(256); assertThat(tpcSocketConfig.getSendBufferSizeKB()).isEqualTo(256); }); } @Override public void testVectorCollectionConfig() { String yaml = """ hazelcast: vector-collection: vector-1: indexes: - name: index-1-1 dimension: 2 metric: DOT max-degree: 10 ef-construction: 10 use-deduplication: true - name: index-1-2 dimension: 3 metric: EUCLIDEAN vector-2: indexes: - dimension: 4 metric: COSINE use-deduplication: false """; Config config = buildConfig(yaml); validateVectorCollectionConfig(config); } public String getAdvancedNetworkConfigWithSocketOption(String socketOption, int value) { return "hazelcast:\n" + " advanced-network:\n" + " enabled: true\n" + " member-server-socket-endpoint-config: \n" + " socket-options: \n" + " " + socketOption + ": " + value + "\n"; } }
public static CommitReport fromJson(String json) { return JsonUtil.parse(json, CommitReportParser::fromJson); }
@Test public void missingFields() { assertThatThrownBy(() -> CommitReportParser.fromJson("{}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing string: table-name"); assertThatThrownBy(() -> CommitReportParser.fromJson("{\"table-name\":\"roundTripTableName\"}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing long: snapshot-id"); assertThatThrownBy( () -> CommitReportParser.fromJson( "{\"table-name\":\"roundTripTableName\",\"snapshot-id\":23}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing long: sequence-number"); assertThatThrownBy( () -> CommitReportParser.fromJson( "{\"table-name\":\"roundTripTableName\",\"snapshot-id\":23,\"sequence-number\":24}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing string: operation"); assertThatThrownBy( () -> CommitReportParser.fromJson( "{\"table-name\":\"roundTripTableName\",\"snapshot-id\":23,\"sequence-number\":24, \"operation\": \"DELETE\"}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing field: metrics"); }
public static void main(String[] args) { LOGGER.info("The alchemist begins his work."); var coin1 = CoinFactory.getCoin(CoinType.COPPER); var coin2 = CoinFactory.getCoin(CoinType.GOLD); LOGGER.info(coin1.getDescription()); LOGGER.info(coin2.getDescription()); }
@Test void shouldExecuteWithoutExceptions() { assertDoesNotThrow(() -> App.main(new String[]{})); }
static Map<String, String> resolveVariables(String expression, String str) { if (expression == null || str == null) return Collections.emptyMap(); Map<String, String> resolvedVariables = new HashMap<>(); StringBuilder variableBuilder = new StringBuilder(); State state = State.TEXT; int j = 0; int expressionLength = expression.length(); for (int i = 0; i < expressionLength; i++) { char e = expression.charAt(i); switch (e) { case '{': if (state == END_VAR) return Collections.emptyMap(); state = VAR; break; case '}': if (state != VAR) return Collections.emptyMap(); state = END_VAR; if (i != expressionLength - 1) break; default: switch (state) { case VAR: variableBuilder.append(e); break; case END_VAR: String replacement; boolean ec = i == expressionLength - 1; if (ec) { replacement = str.substring(j); } else { int k = str.indexOf(e, j); if (k == -1) return Collections.emptyMap(); replacement = str.substring(j, str.indexOf(e, j)); } resolvedVariables.put(variableBuilder.toString(), replacement); j += replacement.length(); if (j == str.length() && ec) return resolvedVariables; variableBuilder.setLength(0); state = TEXT; case TEXT: if (str.charAt(j) != e) return Collections.emptyMap(); j++; } } } return resolvedVariables; }
@Test public void testMalformedExpression2() { Map<String, String> res = resolveVariables("{counter }id}-", "whatever"); assertEquals(0, res.size()); }
@Override public void doGet( final HttpServletRequest req, final HttpServletResponse resp) throws IOException { // By default requests are persistent. We don't want long-lived connections // on server side. resp.addHeader("Connection", "close"); if (!isActive()) { // Report not SC_OK resp.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED, RESPONSE_NOT_ACTIVE); return; } resp.setStatus(HttpServletResponse.SC_OK); resp.getWriter().write(RESPONSE_ACTIVE); resp.getWriter().flush(); }
@Test public void testFailsOnInactive() throws IOException { servlet = new IsActiveServlet() { @Override protected boolean isActive() { return false; } }; doGet(); verify(resp, atLeastOnce()).sendError( eq(HttpServletResponse.SC_METHOD_NOT_ALLOWED), eq(IsActiveServlet.RESPONSE_NOT_ACTIVE)); }
public static String getUnresolvedSchemaName(final Schema schema) { if (!isUnresolvedSchema(schema)) { throw new IllegalArgumentException("Not a unresolved schema: " + schema); } return schema.getProp(UR_SCHEMA_ATTR); }
@Test(expected = IllegalArgumentException.class) public void testIsUnresolvedSchemaError3() { // Namespace not "org.apache.avro.compiler". Schema s = SchemaBuilder.record("UnresolvedSchema").prop("org.apache.avro.idl.unresolved.name", "x").fields() .endRecord(); SchemaResolver.getUnresolvedSchemaName(s); }
public static <InputT, OutputT> DoFnInvoker<InputT, OutputT> invokerFor( DoFn<InputT, OutputT> fn) { return ByteBuddyDoFnInvokerFactory.only().newByteBuddyInvoker(fn); }
@Test public void testFinishBundleException() throws Exception { DoFnInvoker.ArgumentProvider<Integer, Integer> mockArguments = mock(DoFnInvoker.ArgumentProvider.class); when(mockArguments.finishBundleContext(any(DoFn.class))).thenReturn(null); DoFnInvoker<Integer, Integer> invoker = DoFnInvokers.invokerFor( new DoFn<Integer, Integer>() { @FinishBundle public void finishBundle(@SuppressWarnings("unused") FinishBundleContext c) { throw new IllegalArgumentException("bogus"); } @ProcessElement public void processElement(@SuppressWarnings("unused") ProcessContext c) {} }); thrown.expect(UserCodeException.class); thrown.expectMessage("bogus"); invoker.invokeFinishBundle(mockArguments); }
@Override public void handleTenantMenu(TenantMenuHandler handler) { // 如果禁用,则不执行逻辑 if (isTenantDisable()) { return; } // 获得租户,然后获得菜单 TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId()); Set<Long> menuIds; if (isSystemTenant(tenant)) { // 系统租户,菜单是全量的 menuIds = CollectionUtils.convertSet(menuService.getMenuList(), MenuDO::getId); } else { menuIds = tenantPackageService.getTenantPackage(tenant.getPackageId()).getMenuIds(); } // 执行处理器 handler.handle(menuIds); }
@Test // 系统租户的情况 public void testHandleTenantMenu_system() { // 准备参数 TenantMenuHandler handler = mock(TenantMenuHandler.class); // mock 未禁用 when(tenantProperties.getEnable()).thenReturn(true); // mock 租户 TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(PACKAGE_ID_SYSTEM)); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 TenantContextHolder.setTenantId(dbTenant.getId()); // mock 菜单 when(menuService.getMenuList()).thenReturn(Arrays.asList(randomPojo(MenuDO.class, o -> o.setId(100L)), randomPojo(MenuDO.class, o -> o.setId(101L)))); // 调用 tenantService.handleTenantMenu(handler); // 断言 verify(handler).handle(asSet(100L, 101L)); }
public static <T> T[] toArray(Class<T> c, List<T> list) { @SuppressWarnings("unchecked") T[] ta= (T[])Array.newInstance(c, list.size()); for (int i= 0; i<list.size(); i++) ta[i]= list.get(i); return ta; }
@Test public void testWithEmptyList() { try { List<String> list = new ArrayList<String>(); String[] arr = GenericsUtil.toArray(list); fail("Empty array should throw exception"); System.out.println(arr); //use arr so that compiler will not complain }catch (IndexOutOfBoundsException ex) { //test case is successful } }
public int getHTTPFileArgCount() { return getHTTPFileArgsCollection().size(); }
@Test public void testConstructors() throws Exception { HTTPFileArgs files = new HTTPFileArgs(); assertEquals(0, files.getHTTPFileArgCount()); }
public static boolean isFloatingNumber(String text) { final int startPos = findStartPosition(text); if (startPos < 0) { return false; } boolean dots = false; for (int i = startPos; i < text.length(); i++) { char ch = text.charAt(i); if (!Character.isDigit(ch)) { if (ch == '.') { if (dots) { return false; } dots = true; } else { return false; } } } return true; }
@Test @DisplayName("Tests that isFloatingNumber returns true for empty, space or null") void isFloatingNumberEmpty() { assertFalse(ObjectHelper.isFloatingNumber("")); assertFalse(ObjectHelper.isFloatingNumber(" ")); assertFalse(ObjectHelper.isFloatingNumber(null)); }
@Override public boolean skip(final ServerWebExchange exchange) { return skipExcept(exchange, RpcTypeEnum.GRPC); }
@Test public void testSkip() { final boolean result = grpcPlugin.skip(getServerWebExchange()); assertFalse(result); }
@Override protected WritableByteChannel create(HadoopResourceId resourceId, CreateOptions createOptions) throws IOException { return Channels.newChannel( resourceId.toPath().getFileSystem(configuration).create(resourceId.toPath())); }
@Test public void testCreateAndReadFileWithShift() throws Exception { byte[] bytes = "testData".getBytes(StandardCharsets.UTF_8); create("testFile", bytes); int bytesToSkip = 3; byte[] expected = Arrays.copyOfRange(bytes, bytesToSkip, bytes.length); byte[] actual = read("testFile", bytesToSkip); assertArrayEquals(expected, actual); }
@Override public void setMonochrome(boolean monochrome) { formats = monochrome ? monochrome() : ansi(); }
@Test void should_print_tags() { Feature feature = TestFeatureParser.parse("path/test.feature", "" + "@feature_tag\n" + "Feature: feature name\n" + " @scenario_tag\n" + " Scenario: scenario name\n" + " Then first step\n" + " @scenario_outline_tag\n" + " Scenario Outline: scenario outline name\n" + " Then <arg> step\n" + " @examples_tag\n" + " Examples: examples name\n" + " | arg |\n" + " | second |\n"); ByteArrayOutputStream out = new ByteArrayOutputStream(); Runtime.builder() .withFeatureSupplier(new StubFeatureSupplier(feature)) .withAdditionalPlugins(new PrettyFormatter(out)) .withRuntimeOptions(new RuntimeOptionsBuilder().setMonochrome().build()) .withBackendSupplier(new StubBackendSupplier( new StubStepDefinition("first step", "path/step_definitions.java:7"), new StubStepDefinition("second step", "path/step_definitions.java:11"))) .build() .run(); assertThat(out, bytes(equalToCompressingWhiteSpace("" + "\n" + "@feature_tag @scenario_tag\n" + "Scenario: scenario name # path/test.feature:4\n" + " Then first step # path/step_definitions.java:7\n" + "\n" + "@feature_tag @scenario_outline_tag @examples_tag\n" + "Scenario Outline: scenario outline name # path/test.feature:12\n" + " Then second step # path/step_definitions.java:11\n"))); }
@Override public String get(String name) { checkKey(name); String value = null; String[] keyParts = splitKey(name); String ns = registry.getNamespaceURI(keyParts[0]); if (ns != null) { try { XMPProperty prop = xmpData.getProperty(ns, keyParts[1]); if (prop != null && prop.getOptions().isSimple()) { value = prop.getValue(); } else if (prop != null && prop.getOptions().isArray()) { prop = xmpData.getArrayItem(ns, keyParts[1], 1); value = prop.getValue(); } // in all other cases, null is returned } catch (XMPException e) { // Ignore } } return value; }
@Test public void get_notExistingProp_null() throws TikaException { assertNull(xmpMeta.get(TikaCoreProperties.FORMAT)); }
public void shutdown() { shutdown(0); }
@Test public void testShutdown() { defaultMQPushConsumerImpl.setServiceState(ServiceState.RUNNING); defaultMQPushConsumerImpl.shutdown(); assertEquals(ServiceState.SHUTDOWN_ALREADY, defaultMQPushConsumerImpl.getServiceState()); }
public synchronized void addService(URL url) { // fixme, pass in application mode context during initialization of MetadataInfo. if (this.loader == null) { this.loader = url.getOrDefaultApplicationModel().getExtensionLoader(MetadataParamsFilter.class); } List<MetadataParamsFilter> filters = loader.getActivateExtension(url, "params-filter"); // generate service level metadata ServiceInfo serviceInfo = new ServiceInfo(url, filters); this.services.put(serviceInfo.getMatchKey(), serviceInfo); // extract common instance level params extractInstanceParams(url, filters); if (exportedServiceURLs == null) { exportedServiceURLs = new ConcurrentSkipListMap<>(); } addURL(exportedServiceURLs, url); updated = true; }
@Test void testJdkSerialize() throws IOException, ClassNotFoundException, NoSuchFieldException, IllegalAccessException { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); ObjectOutputStream objectOutputStream = new ObjectOutputStream(byteArrayOutputStream); MetadataInfo metadataInfo = new MetadataInfo("demo"); metadataInfo.addService(url); objectOutputStream.writeObject(metadataInfo); objectOutputStream.close(); byteArrayOutputStream.close(); byte[] bytes = byteArrayOutputStream.toByteArray(); ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes); ObjectInputStream objectInputStream = new ObjectInputStream(byteArrayInputStream); MetadataInfo metadataInfo2 = (MetadataInfo) objectInputStream.readObject(); objectInputStream.close(); Assertions.assertEquals(metadataInfo, metadataInfo2); Field initiatedField = MetadataInfo.class.getDeclaredField("initiated"); initiatedField.setAccessible(true); Assertions.assertInstanceOf(AtomicBoolean.class, initiatedField.get(metadataInfo2)); Assertions.assertFalse(((AtomicBoolean) initiatedField.get(metadataInfo2)).get()); }
public String doLayout(ILoggingEvent event) { StringBuilder buf = new StringBuilder(); startNewTableIfLimitReached(buf); boolean odd = true; if (((counter++) & 1) == 0) { odd = false; } String level = event.getLevel().toString().toLowerCase(); buf.append(LINE_SEPARATOR); buf.append("<tr class=\""); buf.append(level); if (odd) { buf.append(" odd\">"); } else { buf.append(" even\">"); } buf.append(LINE_SEPARATOR); Converter<ILoggingEvent> c = head; while (c != null) { appendEventToBuffer(buf, c, event); c = c.getNext(); } buf.append("</tr>"); buf.append(LINE_SEPARATOR); if (event.getThrowableProxy() != null) { throwableRenderer.render(buf, event); } return buf.toString(); }
@Test public void testDoLayout() throws Exception { ILoggingEvent le = createLoggingEvent(); String result = layout.getFileHeader(); result += layout.getPresentationHeader(); result += layout.doLayout(le); result += layout.getPresentationFooter(); result += layout.getFileFooter(); Document doc = parseOutput(result); Element rootElement = doc.getRootElement(); rootElement.toString(); // the rest of this test is very dependent of the output generated // by HTMLLayout. Given that the XML parser already verifies // that the result conforms to xhtml-strict, we may want to // skip the assertions below. However, the assertions below are another // *independent* way to check the output format. // head, body assertEquals(2, rootElement.elements().size()); Element bodyElement = (Element) rootElement.elements().get(1); Element tableElement = (Element) bodyElement.elements().get(3); assertEquals("table", tableElement.getName()); Element trElement = (Element) tableElement.elements().get(1); { Element tdElement = (Element) trElement.elements().get(0); assertEquals("DEBUG", tdElement.getText()); } { Element tdElement = (Element) trElement.elements().get(1); String regex = ClassicTestConstants.NAKED_MAIN_REGEX; System.out.println(tdElement.getText()); assertTrue(tdElement.getText().matches(regex)); } { Element tdElement = (Element) trElement.elements().get(2); assertEquals("test message", tdElement.getText()); } }
@Override public Timestamp getTimestamp(final int columnIndex) throws SQLException { return (Timestamp) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, Timestamp.class), Timestamp.class); }
@Test void assertGetTimestampWithColumnIndex() throws SQLException { when(mergeResultSet.getValue(1, Timestamp.class)).thenReturn(new Timestamp(0L)); assertThat(shardingSphereResultSet.getTimestamp(1), is(new Timestamp(0L))); }
static Map<String, Class<? extends PipelineRunner<?>>> getRegisteredRunners() { return CACHE.get().supportedPipelineRunners; }
@Test public void testAutomaticRegistrationOfRunners() { assertEquals( REGISTERED_RUNNER, PipelineOptionsFactory.getRegisteredRunners() .get(REGISTERED_RUNNER.getSimpleName().toLowerCase())); }
@Override public void onEvent(ApplicationEvent event) { // only onRequest is used }
@Test void onEvent_toleratesBadCustomizer() { setEventType(RequestEvent.Type.FINISHED); setBaseUri("/"); when(request.getProperty(SpanCustomizer.class.getName())).thenReturn("eyeballs"); listener.onEvent(requestEvent); verifyNoMoreInteractions(parser); }
@Bean public PluginDataHandler contextPathPluginDataHandler() { return new ContextPathPluginDataHandler(); }
@Test public void testContextPathPluginDataHandler() { new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(ContextPathPluginConfiguration.class)) .withBean(ContextPathPluginConfigurationTest.class) .withPropertyValues("debug=true") .run(context -> { PluginDataHandler handler = context.getBean("contextPathPluginDataHandler", PluginDataHandler.class); assertNotNull(handler); assertThat(handler.pluginNamed()).isEqualTo(PluginEnum.CONTEXT_PATH.getName()); }); }
public ClosableIterator<ActiveAction> getActiveActionsIterator() { return loadInstants(null); }
@Test void testReadLegacyArchivedTimeline() throws Exception { String tableName = "testTable"; String tablePath = tempFile.getAbsolutePath() + StoragePath.SEPARATOR + tableName; HoodieTableMetaClient metaClient = HoodieTestUtils.init( HoodieTestUtils.getDefaultStorageConf(), tablePath, HoodieTableType.COPY_ON_WRITE, tableName); prepareLegacyArchivedTimeline(metaClient); LegacyArchivedMetaEntryReader reader = new LegacyArchivedMetaEntryReader(metaClient); ClosableIterator<ActiveAction> iterator = reader.getActiveActionsIterator(); List<ActiveAction> activeActions = new ArrayList<>(); while (iterator.hasNext()) { activeActions.add(iterator.next()); } assertThat(activeActions.stream().map(ActiveAction::getInstantTime).sorted().collect(Collectors.joining(",")), is("00000001,00000002,00000003,00000004,00000005,00000006,00000007,00000008,00000009,00000010")); }
@Override public void releaseFreeSlotsOnTaskManager(ResourceID taskManagerId, Exception cause) { assertHasBeenStarted(); if (isTaskManagerRegistered(taskManagerId)) { Collection<AllocationID> freeSlots = declarativeSlotPool.getFreeSlotTracker().getFreeSlotsInformation().stream() .filter( slotInfo -> slotInfo.getTaskManagerLocation() .getResourceID() .equals(taskManagerId)) .map(SlotInfo::getAllocationId) .collect(Collectors.toSet()); for (AllocationID allocationId : freeSlots) { final ResourceCounter previouslyFulfilledRequirement = declarativeSlotPool.releaseSlot(allocationId, cause); // release free slots, previously fulfilled requirement should be empty. Preconditions.checkState( previouslyFulfilledRequirement.equals(ResourceCounter.empty())); } } }
@Test void testReleaseFreeSlotsOnTaskManager() throws Exception { try (DeclarativeSlotPoolService slotPoolService = createDeclarativeSlotPoolService()) { final LocalTaskManagerLocation taskManagerLocation = new LocalTaskManagerLocation(); slotPoolService.registerTaskManager(taskManagerLocation.getResourceID()); final ResourceProfile resourceProfile = ResourceProfile.newBuilder().setCpuCores(1).build(); SlotOffer slotOffer1 = new SlotOffer(new AllocationID(), 0, resourceProfile); SlotOffer slotOffer2 = new SlotOffer(new AllocationID(), 1, resourceProfile); final DeclarativeSlotPool slotPool = slotPoolService.getDeclarativeSlotPool(); slotPool.setResourceRequirements(ResourceCounter.withResource(resourceProfile, 2)); final DefaultDeclarativeSlotPoolTest.FreeSlotConsumer freeSlotConsumer = new DefaultDeclarativeSlotPoolTest.FreeSlotConsumer(); final Collection<SlotOffer> slotOffers = Arrays.asList(slotOffer1, slotOffer2); slotPoolService.offerSlots( taskManagerLocation, new RpcTaskManagerGateway( new TestingTaskExecutorGatewayBuilder() .setFreeSlotFunction(freeSlotConsumer) .createTestingTaskExecutorGateway(), jobMasterId), slotOffers); // slot1 is reserved, slot2 is free. slotPool.reserveFreeSlot(slotOffer1.getAllocationId(), resourceProfile); slotPoolService.releaseFreeSlotsOnTaskManager( taskManagerLocation.getResourceID(), new FlinkException("Test cause")); assertThat(slotPool.getFreeSlotTracker().getAvailableSlots()).isEmpty(); assertThat( Iterables.getOnlyElement(slotPool.getAllSlotsInformation()) .getAllocationId()) .isEqualTo(slotOffer1.getAllocationId()); assertThat(Iterables.getOnlyElement(freeSlotConsumer.drainFreedSlots())) .isEqualTo(slotOffer2.getAllocationId()); } }
public int deleteUnusedWorker() { int cnt = 0; try { WarehouseManager warehouseManager = GlobalStateMgr.getCurrentState().getWarehouseMgr(); Warehouse warehouse = warehouseManager.getBackgroundWarehouse(); long workerGroupId = warehouseManager.selectWorkerGroupByWarehouseId(warehouse.getId()) .orElse(StarOSAgent.DEFAULT_WORKER_GROUP_ID); List<String> workerAddresses = GlobalStateMgr.getCurrentState().getStarOSAgent().listWorkerGroupIpPort(workerGroupId); // filter backend List<Backend> backends = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackends(); for (Backend backend : backends) { if (backend.getStarletPort() != 0) { String workerAddr = NetUtils.getHostPortInAccessibleFormat(backend.getHost(), backend.getStarletPort()); workerAddresses.remove(workerAddr); } } // filter compute node List<ComputeNode> computeNodes = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getComputeNodes(); for (ComputeNode computeNode : computeNodes) { if (computeNode.getStarletPort() != 0) { String workerAddr = NetUtils.getHostPortInAccessibleFormat(computeNode.getHost(), computeNode.getStarletPort()); workerAddresses.remove(workerAddr); } } for (String unusedWorkerAddress : workerAddresses) { GlobalStateMgr.getCurrentState().getStarOSAgent().removeWorker(unusedWorkerAddress, workerGroupId); LOG.info("unused worker {} removed from star mgr", unusedWorkerAddress); cnt++; } } catch (Exception e) { LOG.warn("fail to delete unused worker, {}", e); } return cnt; }
@Test public void testDeleteUnusedWorker() throws Exception { new MockUp<SystemInfoService>() { @Mock public List<Backend> getBackends() { List<Backend> backends = new ArrayList<>(); Backend be1 = new Backend(10001, "host1", 1001); be1.setStarletPort(888); backends.add(be1); Backend be2 = new Backend(10002, "host2", 1002); backends.add(be2); return backends; } @Mock public List<ComputeNode> getComputeNodes() { List<ComputeNode> computeNodes = new ArrayList<>(); ComputeNode cn1 = new ComputeNode(10003, "host3", 1003); cn1.setStarletPort(999); computeNodes.add(cn1); ComputeNode cn2 = new ComputeNode(10004, "host4", 1004); computeNodes.add(cn2); return computeNodes; } }; new MockUp<StarOSAgent>() { @Mock public List<String> listWorkerGroupIpPort(long workerGroupId) { List<String> addresses = new ArrayList<>(); addresses.add("host0:777"); addresses.add("host1:888"); addresses.add("host3:999"); addresses.add("host5:1000"); return addresses; } }; Assert.assertEquals(2, starMgrMetaSyncer.deleteUnusedWorker()); }
@Udf(description = "Adds a duration to a time") public Time timeAdd( @UdfParameter(description = "A unit of time, for example SECOND or HOUR") final TimeUnit unit, @UdfParameter(description = "An integer number of intervals to add") final Integer interval, @UdfParameter(description = "A TIME value.") final Time time ) { if (unit == null || interval == null || time == null) { return null; } final long nanoResult = LocalTime.ofNanoOfDay(time.getTime() * 1000_000) .plus(unit.toNanos(interval), ChronoUnit.NANOS) .toNanoOfDay(); return new Time(TimeUnit.NANOSECONDS.toMillis(nanoResult)); }
@Test public void handleNullTime() { assertNull(udf.timeAdd(TimeUnit.MILLISECONDS, -300, null)); }
@Override public boolean put(File localFile, JobID jobId, BlobKey blobKey) throws IOException { createBasePathIfNeeded(); String toBlobPath = BlobUtils.getStorageLocationPath(basePath, jobId, blobKey); try (FSDataOutputStream os = fileSystem.create(new Path(toBlobPath), FileSystem.WriteMode.OVERWRITE)) { LOG.debug("Copying from {} to {}.", localFile, toBlobPath); Files.copy(localFile, os); os.sync(); } return true; }
@Test void testSuccessfulPut() throws IOException { final Path temporaryFile = createTemporaryFileWithContent("put"); final JobID jobId = new JobID(); final BlobKey blobKey = createPermanentBlobKeyFromFile(temporaryFile); // Blob store operations are creating the base directory on-the-fly assertThat(getBlobDirectoryPath()).doesNotExist(); final boolean successfullyWritten = testInstance.put(temporaryFile.toFile(), jobId, blobKey); assertThat(successfullyWritten).isTrue(); assertThat(getPath(jobId)).isDirectory().exists(); assertThat(getPath(jobId, blobKey)).isNotEmptyFile().hasSameTextualContentAs(temporaryFile); }
public static <T> PTransform<PCollection<T>, PCollection<T>> exceptAll( PCollection<T> rightCollection) { checkNotNull(rightCollection, "rightCollection argument is null"); return new SetImpl<>(rightCollection, exceptAll()); }
@Test @Category(NeedsRunner.class) public void testExceptAll() { PAssert.that(first.apply("strings", Sets.exceptAll(second))) .containsInAnyOrder("a", "g", "g", "h", "h"); PCollection<Row> results = firstRows.apply("rows", Sets.exceptAll(secondRows)); PAssert.that(results).containsInAnyOrder(toRows("a", "g", "g", "h", "h")); assertEquals(schema, results.getSchema()); p.run(); }
static void validateCsvFormat(CSVFormat format) { String[] header = checkArgumentNotNull(format.getHeader(), "Illegal %s: header is required", CSVFormat.class); checkArgument(header.length > 0, "Illegal %s: header cannot be empty", CSVFormat.class); checkArgument( !format.getAllowMissingColumnNames(), "Illegal %s: cannot allow missing column names", CSVFormat.class); checkArgument( !format.getIgnoreHeaderCase(), "Illegal %s: cannot ignore header case", CSVFormat.class); checkArgument( !format.getAllowDuplicateHeaderNames(), "Illegal %s: cannot allow duplicate header names", CSVFormat.class); for (String columnName : header) { checkArgument( !Strings.isNullOrEmpty(columnName), "Illegal %s: column name is required", CSVFormat.class); } checkArgument( !format.getSkipHeaderRecord(), "Illegal %s: cannot skip header record because the header is already accounted for", CSVFormat.class); }
@Test public void givenCSVFormatThatSkipsHeaderRecord_throwsException() { CSVFormat format = csvFormatWithHeader().withSkipHeaderRecord(true); String gotMessage = assertThrows( IllegalArgumentException.class, () -> CsvIOParseHelpers.validateCsvFormat(format)) .getMessage(); assertEquals( "Illegal class org.apache.commons.csv.CSVFormat: cannot skip header record because the header is already accounted for", gotMessage); }
public static int indexOfName( String[] databaseNames, String name ) { if ( databaseNames == null || name == null ) { return -1; } for ( int i = 0; i < databaseNames.length; i++ ) { String databaseName = databaseNames[ i ]; if ( name.equalsIgnoreCase( databaseName ) ) { return i; } } return -1; }
@Test public void indexOfName_NonExactMatch() { assertEquals( 1, DatabaseMeta.indexOfName( new String[] { "a", "b", "c" }, "B" ) ); }
public void clear() { cacheMap.clear(); }
@Test public void testClear() { LruCache<String, SimpleValue> cache = new LruCache<String, SimpleValue>(2); String key1 = DEFAULT_KEY + 1; String key2 = DEFAULT_KEY + 2; SimpleValue value = new SimpleValue(true, true); cache.put(key1, value); cache.put(key2, value); assertEquals(value, cache.getCurrentValue(key1)); assertEquals(value, cache.getCurrentValue(key2)); assertEquals(2, cache.size()); cache.clear(); assertNull(cache.getCurrentValue(key1)); assertNull(cache.getCurrentValue(key2)); assertEquals(0, cache.size()); }
@Nullable public Float getFloatValue(@FloatFormat final int formatType, @IntRange(from = 0) final int offset) { if ((offset + getTypeLen(formatType)) > size()) return null; switch (formatType) { case FORMAT_SFLOAT -> { if (mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFE) return Float.POSITIVE_INFINITY; if ((mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFF) || (mValue[offset + 1] == 0x08 && mValue[offset] == 0x00) || (mValue[offset + 1] == 0x08 && mValue[offset] == 0x01)) return Float.NaN; if (mValue[offset + 1] == 0x08 && mValue[offset] == 0x02) return Float.NEGATIVE_INFINITY; return bytesToFloat(mValue[offset], mValue[offset + 1]); } case FORMAT_FLOAT -> { if (mValue[offset + 3] == 0x00) { if (mValue[offset + 2] == 0x7F && mValue[offset + 1] == (byte) 0xFF) { if (mValue[offset] == (byte) 0xFE) return Float.POSITIVE_INFINITY; if (mValue[offset] == (byte) 0xFF) return Float.NaN; } else if (mValue[offset + 2] == (byte) 0x80 && mValue[offset + 1] == 0x00) { if (mValue[offset] == 0x00 || mValue[offset] == 0x01) return Float.NaN; if (mValue[offset] == 0x02) return Float.NEGATIVE_INFINITY; } } return bytesToFloat(mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3]); } } return null; }
@Test public void setValue_FLOAT_negativeInfinity() { final MutableData data = new MutableData(new byte[4]); data.setValue(Float.NEGATIVE_INFINITY, Data.FORMAT_FLOAT, 0); final float value = data.getFloatValue(Data.FORMAT_FLOAT, 0); assertEquals(Float.NEGATIVE_INFINITY, value, 0.00); }
@Override public void asyncUpdateCursorInfo(String ledgerName, String cursorName, ManagedCursorInfo info, Stat stat, MetaStoreCallback<Void> callback) { if (log.isDebugEnabled()) { log.debug("[{}] [{}] Updating cursor info ledgerId={} mark-delete={}:{} lastActive={}", ledgerName, cursorName, info.getCursorsLedgerId(), info.getMarkDeleteLedgerId(), info.getMarkDeleteEntryId(), info.getLastActive()); } String path = PREFIX + ledgerName + "/" + cursorName; byte[] content = compressCursorInfo(info); long expectedVersion; if (stat != null) { expectedVersion = stat.getVersion(); if (log.isDebugEnabled()) { log.debug("[{}] Creating consumer {} on meta-data store with {}", ledgerName, cursorName, info); } } else { expectedVersion = -1; if (log.isDebugEnabled()) { log.debug("[{}] Updating consumer {} on meta-data store with {}", ledgerName, cursorName, info); } } store.put(path, content, Optional.of(expectedVersion)) .thenAcceptAsync(optStat -> callback.operationComplete(null, optStat), executor .chooseThread(ledgerName)) .exceptionally(ex -> { executor.executeOrdered(ledgerName, () -> callback.operationFailed(getException(ex))); return null; }); }
@Test(timeOut = 20000) void updatingCursorNode() throws Exception { MetaStore store = new MetaStoreImpl(metadataStore, executor); metadataStore.put("/managed-ledgers/my_test", "".getBytes(), Optional.empty()).join(); final CompletableFuture<Void> promise = new CompletableFuture<>(); ManagedCursorInfo info = ManagedCursorInfo.newBuilder().setCursorsLedgerId(1).build(); store.asyncUpdateCursorInfo("my_test", "c1", info, null, new MetaStoreCallback<Void>() { public void operationFailed(MetaStoreException e) { promise.completeExceptionally(e); } public void operationComplete(Void result, Stat version) { // Update again using the version metadataStore.failConditional(new MetadataStoreException("error"), (op, path) -> op == FaultInjectionMetadataStore.OperationType.PUT && path.contains("my_test") && path.contains("c1") ); ManagedCursorInfo info = ManagedCursorInfo.newBuilder().setCursorsLedgerId(2).build(); store.asyncUpdateCursorInfo("my_test", "c1", info, version, new MetaStoreCallback<Void>() { public void operationFailed(MetaStoreException e) { // ok promise.complete(null); } @Override public void operationComplete(Void result, Stat version) { promise.completeExceptionally(new Exception("should have failed")); } }); } }); promise.get(); }
public static String toJson(MetadataUpdate metadataUpdate) { return toJson(metadataUpdate, false); }
@Test public void testSetSnapshotRefTagToJsonDefault() { long snapshotId = 1L; SnapshotRefType type = SnapshotRefType.TAG; String refName = "hank"; Integer minSnapshotsToKeep = null; Long maxSnapshotAgeMs = null; Long maxRefAgeMs = null; String expected = "{\"action\":\"set-snapshot-ref\",\"ref-name\":\"hank\",\"snapshot-id\":1,\"type\":\"tag\"}"; MetadataUpdate update = new MetadataUpdate.SetSnapshotRef( refName, snapshotId, type, minSnapshotsToKeep, maxSnapshotAgeMs, maxRefAgeMs); String actual = MetadataUpdateParser.toJson(update); assertThat(actual) .as( "Set snapshot ref should serialize to the correct JSON value for tag with default fields") .isEqualTo(expected); }
@Override public void set(K key, V value) { begin(); transactionalMap.set(key, value); commit(); }
@Test public void testSet() { adapter.set(23, "test"); assertEquals("test", map.get(23)); }
public int capacity() { return capacity; }
@Test void shouldCalculateCapacityForBuffer() { assertThat(broadcastTransmitter.capacity(), is(CAPACITY)); }
public static String[] split(String splittee, String splitChar, boolean truncate) { //NOSONAR if (splittee == null || splitChar == null) { return new String[0]; } final String EMPTY_ELEMENT = ""; int spot; final int splitLength = splitChar.length(); final String adjacentSplit = splitChar + splitChar; final int adjacentSplitLength = adjacentSplit.length(); if (truncate) { while ((spot = splittee.indexOf(adjacentSplit)) != -1) { splittee = splittee.substring(0, spot + splitLength) + splittee.substring(spot + adjacentSplitLength, splittee.length()); } if (splittee.startsWith(splitChar)) { splittee = splittee.substring(splitLength); } if (splittee.endsWith(splitChar)) { // Remove trailing splitter splittee = splittee.substring(0, splittee.length() - splitLength); } } List<String> returns = new ArrayList<>(); final int length = splittee.length(); // This is the new length int start = 0; spot = 0; while (start < length && (spot = splittee.indexOf(splitChar, start)) > -1) { if (spot > 0) { returns.add(splittee.substring(start, spot)); } else { returns.add(EMPTY_ELEMENT); } start = spot + splitLength; } if (start < length) { returns.add(splittee.substring(start)); } else if (spot == length - splitLength) {// Found splitChar at end of line returns.add(EMPTY_ELEMENT); } return returns.toArray(new String[returns.size()]); }
@Test public void testSplitStringStringNullWithSingleDelimiter() { assertThat(JOrphanUtils.split("a,bc,,", ",", null), CoreMatchers.equalTo(new String[]{"a", "bc"})); }
public void clean(final Date now) { List<String> files = this.findFiles(); List<String> expiredFiles = this.filterFiles(files, this.createExpiredFileFilter(now)); for (String f : expiredFiles) { this.delete(new File(f)); } if (this.totalSizeCap != CoreConstants.UNBOUNDED_TOTAL_SIZE_CAP && this.totalSizeCap > 0) { this.capTotalSize(files); } List<String> emptyDirs = this.findEmptyDirs(); for (String dir : emptyDirs) { this.delete(new File(dir)); } }
@Test public void removesParentDirWhenEmpty() throws IOException { File[] emptyDirs = new File[] { tmpDir.newFolder("empty_2018", "08"), tmpDir.newFolder("empty_2018", "12"), tmpDir.newFolder("empty_2019", "01"), }; for (File d : emptyDirs) { d.deleteOnExit(); } remover = mockArchiveRemover(tmpDir.getRoot().getAbsolutePath() + File.separator + "empty_%d{yyyy/MM}" + File.separator + "%d.log", fileProvider); remover.clean(EXPIRY); for (File d : emptyDirs) { verify(fileProvider).deleteFile(d); } }
public static Set<String> computeStepIdsInRuntimeDag( @NotNull WorkflowInstance instance, @NotNull Set<String> knownStepIds) { if (instance.isFreshRun() || instance.getRunConfig() == null || instance.getRunConfig().getPolicy() != RunPolicy.RESTART_FROM_SPECIFIC) { return instance.getRuntimeDag().keySet(); } Map<String, Set<String>> childMap = new HashMap<>(); prepareDagForTraversal( instance.getRuntimeDag(), knownStepIds, instance.getRunConfig().getRestartConfig(), new HashMap<>(), childMap); return childMap.keySet(); }
@Test public void testComputeStepIdsInRuntimeDagForStepRestartAnotherPath() { WorkflowInstance instance = new WorkflowInstance(); instance.setRunConfig(new RunConfig()); instance.getRunConfig().setPolicy(RunPolicy.RESTART_FROM_SPECIFIC); instance .getRunConfig() .setRestartConfig( RestartConfig.builder().addRestartNode("sample-dag-test-1", 1, "job_9").build()); instance.setRuntimeDag(runtimeDag1); Set<String> actual = DagHelper.computeStepIdsInRuntimeDag(instance, Collections.singleton("job_3")); Assert.assertEquals( "[job_1, job_3, job_11, job_2, job_10, job_5, job_4, job_7, job_6, job_9, job_8]", actual.toString()); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); for (final Thread.State state : Thread.State.values()) { gauges.put(name(state.toString().toLowerCase(), "count"), (Gauge<Object>) () -> getThreadCount(state)); } gauges.put("count", (Gauge<Integer>) threads::getThreadCount); gauges.put("daemon.count", (Gauge<Integer>) threads::getDaemonThreadCount); gauges.put("peak.count", (Gauge<Integer>) threads::getPeakThreadCount); gauges.put("total_started.count", (Gauge<Long>) threads::getTotalStartedThreadCount); gauges.put("deadlock.count", (Gauge<Integer>) () -> deadlockDetector.getDeadlockedThreads().size()); gauges.put("deadlocks", (Gauge<Set<String>>) deadlockDetector::getDeadlockedThreads); return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForEachThreadState() { assertThat(((Gauge<?>) gauges.getMetrics().get("new.count")).getValue()) .isEqualTo(1); assertThat(((Gauge<?>) gauges.getMetrics().get("runnable.count")).getValue()) .isEqualTo(1); assertThat(((Gauge<?>) gauges.getMetrics().get("blocked.count")).getValue()) .isEqualTo(1); assertThat(((Gauge<?>) gauges.getMetrics().get("waiting.count")).getValue()) .isEqualTo(1); assertThat(((Gauge<?>) gauges.getMetrics().get("timed_waiting.count")).getValue()) .isEqualTo(1); assertThat(((Gauge<?>) gauges.getMetrics().get("terminated.count")).getValue()) .isEqualTo(1); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Override public Object getValue(final int columnIndex, final Class<?> type) throws SQLException { Optional<ColumnProjection> columnProjection = selectStatementContext.getProjectionsContext().findColumnProjection(columnIndex); if (!columnProjection.isPresent()) { return mergedResult.getValue(columnIndex, type); } Optional<MaskTable> maskTable = maskRule.findMaskTable(columnProjection.get().getOriginalTable().getValue()); if (!maskTable.isPresent()) { return mergedResult.getValue(columnIndex, type); } Optional<MaskAlgorithm> maskAlgorithm = maskTable.get().findAlgorithm(columnProjection.get().getName().getValue()); if (!maskAlgorithm.isPresent()) { return mergedResult.getValue(columnIndex, type); } Object originalValue = mergedResult.getValue(columnIndex, Object.class); return null == originalValue ? null : maskAlgorithm.get().mask(originalValue); }
@Test void assertGetValueWithoutColumnProjection() throws SQLException { when(mergedResult.getValue(1, String.class)).thenReturn("VALUE"); MaskRule maskRule = mock(MaskRule.class); assertThat(new MaskMergedResult(maskRule, mockSelectStatementContextWithoutColumnProjection(), mergedResult).getValue(1, String.class), is("VALUE")); }
@Override public void update(Collection<Route> routes) { log.debug("Received update {}", routes); routeStore.updateRoutes(routes); }
@Test public void testAsyncRouteAdd() { Route route = new Route(Route.Source.STATIC, V4_PREFIX1, V4_NEXT_HOP1); // 2nd route for the same nexthop Route route2 = new Route(Route.Source.STATIC, V4_PREFIX2, V4_NEXT_HOP2); // 3rd route with no valid nexthop Route route3 = new Route(Route.Source.STATIC, V6_PREFIX1, V6_NEXT_HOP1); // Host service will reply with no hosts when asked reset(hostService); expect(hostService.getHostsByIp(anyObject(IpAddress.class))).andReturn( Collections.emptySet()).anyTimes(); hostService.startMonitoringIp(V4_NEXT_HOP1); hostService.startMonitoringIp(V4_NEXT_HOP2); hostService.startMonitoringIp(V6_NEXT_HOP1); expectLastCall().anyTimes(); replay(hostService); // Initially when we add the route, no route event will be sent because // the host is not known replay(routeListener); routeManager.update(Lists.newArrayList(route, route2, route3)); verify(routeListener); // Now when we send the event, we expect the FIB update to be sent reset(routeListener); ResolvedRoute resolvedRoute = new ResolvedRoute(route, MAC1); routeListener.event(event(RouteEvent.Type.ROUTE_ADDED, resolvedRoute, null, Sets.newHashSet(resolvedRoute), null)); ResolvedRoute resolvedRoute2 = new ResolvedRoute(route2, MAC1); routeListener.event(event(RouteEvent.Type.ROUTE_ADDED, resolvedRoute2, null, Sets.newHashSet(resolvedRoute2), null)); replay(routeListener); Host host = createHost(MAC1, Lists.newArrayList(V4_NEXT_HOP1, V4_NEXT_HOP2)); // Set up the host service with a host reset(hostService); expect(hostService.getHostsByIp(V4_NEXT_HOP1)).andReturn( Collections.singleton(host)).anyTimes(); hostService.startMonitoringIp(V4_NEXT_HOP1); expect(hostService.getHostsByIp(V4_NEXT_HOP2)).andReturn( Collections.singleton(host)).anyTimes(); hostService.startMonitoringIp(V4_NEXT_HOP2); expectLastCall().anyTimes(); replay(hostService); // Send in the host event hostListener.event(new HostEvent(HostEvent.Type.HOST_ADDED, host)); verify(routeListener); }
public static String toString(Throwable e) { StackTraceElement[] traces = e.getStackTrace(); StringBuilder sb = new StringBuilder(1024); sb.append(e.toString()).append("\n"); if (traces != null) { for (StackTraceElement trace : traces) { sb.append("\tat ").append(trace).append("\n"); } } return sb.toString(); }
@Test public void testToString() throws Exception { SofaRpcException exception = new SofaRpcException(RpcErrorType.SERVER_BUSY, "111"); String string = ExceptionUtils.toString(exception); Assert.assertNotNull(string); Pattern pattern = Pattern.compile("at"); Matcher matcher = pattern.matcher(string); int count = 0; while (matcher.find()) { count++; } Assert.assertTrue(count > 1); }
static StringBuilder escapeChar(char c, StringBuilder sb) { sb.append('%'); if (c < 16) { sb.append('0'); } sb.append(Integer.toHexString(c).toUpperCase()); return sb; }
@Test void testEscapeChar() { for (char c = 0; c <= 128; c++) { String expected = "%" + String.format("%1$02X", (int) c); String actual = PartitionPathUtils.escapeChar(c, new StringBuilder()).toString(); assertThat(actual).isEqualTo(expected); } }
public static boolean isS3FileSystem(HdfsContext context, HdfsEnvironment hdfsEnvironment, Path path) { try { return getRawFileSystem(hdfsEnvironment.getFileSystem(context, path)) instanceof PrestoS3FileSystem; } catch (IOException e) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, "Failed checking path: " + path, e); } }
@Test public void testIsS3FileSystem() { HdfsEnvironment hdfsEnvironment = createTestHdfsEnvironment(new HiveClientConfig(), new MetastoreClientConfig()); assertTrue(isS3FileSystem(CONTEXT, hdfsEnvironment, new Path("s3://test-bucket/test-folder"))); assertFalse(isS3FileSystem(CONTEXT, hdfsEnvironment, new Path("/test-dir/test-folder"))); }
@Override public <VR> KTable<K, VR> transformValues(final ValueTransformerWithKeySupplier<? super K, ? super V, ? extends VR> transformerSupplier, final String... stateStoreNames) { return doTransformValues(transformerSupplier, null, NamedInternal.empty(), stateStoreNames); }
@Test public void shouldThrowNullPointerOnTransformValuesWithKeyWhenTransformerSupplierIsNull() { assertThrows(NullPointerException.class, () -> table.transformValues(null)); }
@Nullable protected V get(K key) { if (key == null) return null; Object[] state = state(); int i = indexOfExistingKey(state, key); return i != -1 ? (V) state[i + 1] : null; }
@Test void get_null_if_not_set() { assertThat(extra.get("1")).isNull(); }
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException { checkMaybeCompatible(source, target); if (source.isOptional() && !target.isOptional()) { if (target.defaultValue() != null) { if (record != null) { return projectRequiredSchema(source, record, target); } else { return target.defaultValue(); } } else { throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value."); } } else { if (record != null) { return projectRequiredSchema(source, record, target); } else { return null; } } }
@Test public void testStructAddField() { Schema source = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .build(); Struct sourceStruct = new Struct(source); sourceStruct.put("field", 1); Schema target = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", SchemaBuilder.int32().defaultValue(123).build()) .build(); Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target); assertEquals(1, (int) targetStruct.getInt32("field")); assertEquals(123, (int) targetStruct.getInt32("field2")); Schema incompatibleTargetSchema = SchemaBuilder.struct() .field("field", Schema.INT32_SCHEMA) .field("field2", Schema.INT32_SCHEMA) .build(); assertThrows(DataException.class, () -> SchemaProjector.project(source, sourceStruct, incompatibleTargetSchema), "Incompatible schema."); }
public static <K, E> Collector<E, ImmutableListMultimap.Builder<K, E>, ImmutableListMultimap<K, E>> index(Function<? super E, K> keyFunction) { return index(keyFunction, Function.identity()); }
@Test public void index_returns_ListMultimap() { ListMultimap<Integer, MyObj> multimap = LIST.stream().collect(index(MyObj::getId)); assertThat(multimap.size()).isEqualTo(3); Map<Integer, Collection<MyObj>> map = multimap.asMap(); assertThat(map.get(1)).containsOnly(MY_OBJ_1_A); assertThat(map.get(2)).containsOnly(MY_OBJ_2_B); assertThat(map.get(3)).containsOnly(MY_OBJ_3_C); }
@Udf(description = "Returns the inverse (arc) tangent of an INT value") public Double atan( @UdfParameter( value = "value", description = "The value to get the inverse tangent of." ) final Integer value ) { return atan(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleZero() { assertThat(udf.atan(0.0), closeTo(0.0, 0.000000000000001)); assertThat(udf.atan(0), closeTo(0.0, 0.000000000000001)); assertThat(udf.atan(0L), closeTo(0.0, 0.000000000000001)); }
@Override public List<PartitionGroupMetadata> computePartitionGroupMetadata(String clientId, StreamConfig streamConfig, List<PartitionGroupConsumptionStatus> partitionGroupConsumptionStatuses, int timeoutMillis) throws IOException, TimeoutException { List<PartitionGroupMetadata> newPartitionGroupMetadataList = new ArrayList<>(); Map<String, Shard> shardIdToShardMap = _kinesisConnectionHandler.getShards().stream() .collect(Collectors.toMap(Shard::shardId, s -> s, (s1, s2) -> s1)); Set<String> shardsInCurrent = new HashSet<>(); Set<String> shardsEnded = new HashSet<>(); // TODO: Once we start supporting multiple shards in a PartitionGroup, // we need to iterate over all shards to check if any of them have reached end // Process existing shards. Add them to new list if still consuming from them for (PartitionGroupConsumptionStatus currentPartitionGroupConsumptionStatus : partitionGroupConsumptionStatuses) { KinesisPartitionGroupOffset kinesisStartCheckpoint = (KinesisPartitionGroupOffset) currentPartitionGroupConsumptionStatus.getStartOffset(); String shardId = kinesisStartCheckpoint.getShardId(); shardsInCurrent.add(shardId); Shard shard = shardIdToShardMap.get(shardId); if (shard == null) { // Shard has expired shardsEnded.add(shardId); String lastConsumedSequenceID = kinesisStartCheckpoint.getSequenceNumber(); LOGGER.warn( "Kinesis shard with id: {} has expired. Data has been consumed from the shard till sequence number: {}. " + "There can be potential data loss.", shardId, lastConsumedSequenceID); continue; } StreamPartitionMsgOffset newStartOffset; StreamPartitionMsgOffset currentEndOffset = currentPartitionGroupConsumptionStatus.getEndOffset(); if (currentEndOffset != null) { // Segment DONE (committing/committed) String endingSequenceNumber = shard.sequenceNumberRange().endingSequenceNumber(); if (endingSequenceNumber != null) { // Shard has ended, check if we're also done consuming it if (consumedEndOfShard(currentEndOffset, currentPartitionGroupConsumptionStatus)) { shardsEnded.add(shardId); continue; // Shard ended and we're done consuming it. Skip } } newStartOffset = currentEndOffset; } else { // Segment IN_PROGRESS newStartOffset = currentPartitionGroupConsumptionStatus.getStartOffset(); } newPartitionGroupMetadataList.add( new PartitionGroupMetadata(currentPartitionGroupConsumptionStatus.getPartitionGroupId(), newStartOffset)); } // Add brand new shards for (Map.Entry<String, Shard> entry : shardIdToShardMap.entrySet()) { // If shard was already in current list, skip String newShardId = entry.getKey(); if (shardsInCurrent.contains(newShardId)) { continue; } Shard newShard = entry.getValue(); String parentShardId = newShard.parentShardId(); // Add the new shard in the following 3 cases: // 1. Root shards - Parent shardId will be null. Will find this case when creating new table. // 2. Parent expired - Parent shardId will not be part of shardIdToShard map // 3. Parent reached EOL and completely consumed. if (parentShardId == null || !shardIdToShardMap.containsKey(parentShardId) || shardsEnded.contains( parentShardId)) { // TODO: Revisit this. Kinesis starts consuming AFTER the start sequence number, and we might miss the first // message. StreamPartitionMsgOffset newStartOffset = new KinesisPartitionGroupOffset(newShardId, newShard.sequenceNumberRange().startingSequenceNumber()); int partitionGroupId = getPartitionGroupIdFromShardId(newShardId); newPartitionGroupMetadataList.add(new PartitionGroupMetadata(partitionGroupId, newStartOffset)); } } return newPartitionGroupMetadataList; }
@Test public void getPartitionsGroupInfoChildShardsest() throws Exception { List<PartitionGroupConsumptionStatus> currentPartitionGroupMeta = new ArrayList<>(); Map<String, String> shardToSequenceMap = new HashMap<>(); shardToSequenceMap.put("1", "1"); KinesisPartitionGroupOffset kinesisPartitionGroupOffset = new KinesisPartitionGroupOffset("1", "1"); currentPartitionGroupMeta.add( new PartitionGroupConsumptionStatus(0, 1, kinesisPartitionGroupOffset, kinesisPartitionGroupOffset, "CONSUMING")); ArgumentCaptor<StreamPartitionMsgOffset> checkpointArgs = ArgumentCaptor.forClass(StreamPartitionMsgOffset.class); ArgumentCaptor<PartitionGroupConsumptionStatus> partitionGroupMetadataCapture = ArgumentCaptor.forClass(PartitionGroupConsumptionStatus.class); ArgumentCaptor<Integer> intArguments = ArgumentCaptor.forClass(Integer.class); ArgumentCaptor<String> stringCapture = ArgumentCaptor.forClass(String.class); Shard shard0 = Shard.builder().shardId(SHARD_ID_0).parentShardId(SHARD_ID_1) .sequenceNumberRange(SequenceNumberRange.builder().startingSequenceNumber("1").build()).build(); Shard shard1 = Shard.builder().shardId(SHARD_ID_1).sequenceNumberRange( SequenceNumberRange.builder().startingSequenceNumber("1").endingSequenceNumber("1").build()).build(); when(_kinesisConnectionHandler.getShards()).thenReturn(ImmutableList.of(shard0, shard1)); when(_streamConsumerFactory.createPartitionGroupConsumer(stringCapture.capture(), partitionGroupMetadataCapture.capture())).thenReturn(_partitionGroupConsumer); when(_partitionGroupConsumer.fetchMessages(checkpointArgs.capture(), intArguments.capture())).thenReturn( new KinesisMessageBatch(new ArrayList<>(), kinesisPartitionGroupOffset, true)); List<PartitionGroupMetadata> result = _kinesisStreamMetadataProvider.computePartitionGroupMetadata(CLIENT_ID, getStreamConfig(), currentPartitionGroupMeta, TIMEOUT); Assert.assertEquals(result.size(), 1); Assert.assertEquals(result.get(0).getPartitionGroupId(), 0); Assert.assertEquals(partitionGroupMetadataCapture.getValue().getSequenceNumber(), 1); }
public void add(Boolean bool) { elements.add(bool == null ? JsonNull.INSTANCE : new JsonPrimitive(bool)); }
@Test public void testIntegerPrimitiveAddition() { JsonArray jsonArray = new JsonArray(); int x = 1; jsonArray.add(x); x = 2; jsonArray.add(x); x = -3; jsonArray.add(x); jsonArray.add((Integer) null); x = 4; jsonArray.add(x); x = 0; jsonArray.add(x); assertThat(jsonArray.toString()).isEqualTo("[1,2,-3,null,4,0]"); }
@Override @SuppressWarnings("rawtypes") public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) { final String dateTime = dateFormat.format(new Date(clock.getTime())); printWithBanner(dateTime, '='); output.println(); if (!gauges.isEmpty()) { printWithBanner("-- Gauges", '-'); for (Map.Entry<String, Gauge> entry : gauges.entrySet()) { output.println(entry.getKey()); printGauge(entry.getValue()); } output.println(); } if (!counters.isEmpty()) { printWithBanner("-- Counters", '-'); for (Map.Entry<String, Counter> entry : counters.entrySet()) { output.println(entry.getKey()); printCounter(entry); } output.println(); } if (!histograms.isEmpty()) { printWithBanner("-- Histograms", '-'); for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { output.println(entry.getKey()); printHistogram(entry.getValue()); } output.println(); } if (!meters.isEmpty()) { printWithBanner("-- Meters", '-'); for (Map.Entry<String, Meter> entry : meters.entrySet()) { output.println(entry.getKey()); printMeter(entry.getValue()); } output.println(); } if (!timers.isEmpty()) { printWithBanner("-- Timers", '-'); for (Map.Entry<String, Timer> entry : timers.entrySet()) { output.println(entry.getKey()); printTimer(entry.getValue()); } output.println(); } output.println(); output.flush(); }
@Test public void reportsCounterValues() throws Exception { final Counter counter = mock(Counter.class); when(counter.getCount()).thenReturn(100L); reporter.report(map(), map("test.counter", counter), map(), map(), map()); assertThat(consoleOutput()) .isEqualTo(lines( dateHeader, "", "-- Counters --------------------------------------------------------------------", "test.counter", " count = 100", "", "" )); }
@Override public BackgroundException map(final AmazonClientException e) { final StringBuilder buffer = new StringBuilder(); if(e instanceof AmazonServiceException) { final AmazonServiceException failure = (AmazonServiceException) e; this.append(buffer, failure.getErrorMessage()); if(null != failure.getErrorCode()) { switch(failure.getStatusCode()) { case HttpStatus.SC_BAD_REQUEST: switch(failure.getErrorCode()) { case "Throttling": return new RetriableAccessDeniedException(buffer.toString(), e); case "AccessDeniedException": return new AccessDeniedException(buffer.toString(), e); case "UnrecognizedClientException": return new LoginFailureException(buffer.toString(), e); } case HttpStatus.SC_FORBIDDEN: switch(failure.getErrorCode()) { case "SignatureDoesNotMatch": case "InvalidAccessKeyId": case "InvalidClientTokenId": case "InvalidSecurity": case "MissingClientTokenId": case "MissingAuthenticationToken": return new LoginFailureException(buffer.toString(), e); } } } return new DefaultHttpResponseExceptionMappingService().map(new HttpResponseException(failure.getStatusCode(), buffer.toString())); } this.append(buffer, e.getMessage()); return this.wrap(e, buffer); }
@Test public void testAccessFailure() { final AmazonServiceException f = new AmazonServiceException("message", null); f.setStatusCode(403); f.setErrorCode("AccessDenied"); assertTrue(new AmazonServiceExceptionMappingService().map(f) instanceof AccessDeniedException); f.setErrorCode("SignatureDoesNotMatch"); assertTrue(new AmazonServiceExceptionMappingService().map(f) instanceof LoginFailureException); }
public NettyPoolKey setTransactionRole(TransactionRole transactionRole) { this.transactionRole = transactionRole; return this; }
@Test public void setTransactionRole() { nettyPoolKey.setTransactionRole(TM_ROLE); Assertions.assertEquals(nettyPoolKey.getTransactionRole(), TM_ROLE); }
public Optional<UserDto> authenticate(HttpRequest request) { return extractCredentialsFromHeader(request) .flatMap(credentials -> Optional.ofNullable(authenticate(credentials, request))); }
@Test public void authenticate_from_basic_http_header() { when(request.getHeader(AUTHORIZATION_HEADER)).thenReturn("Basic " + CREDENTIALS_IN_BASE64); Credentials credentials = new Credentials(A_LOGIN, A_PASSWORD); when(credentialsAuthentication.authenticate(credentials, request, BASIC)).thenReturn(USER); underTest.authenticate(request); verify(credentialsAuthentication).authenticate(credentials, request, BASIC); verifyNoMoreInteractions(authenticationEvent); }
public <T> void resolve(T resolvable) { ParamResolver resolver = this; if (ParamScope.class.isAssignableFrom(resolvable.getClass())) { ParamScope newScope = (ParamScope) resolvable; resolver = newScope.applyOver(resolver); } resolveStringLeaves(resolvable, resolver); resolveNonStringLeaves(resolvable, resolver); resolveNodes(resolvable, resolver); }
@Test public void shouldAddResolutionErrorOnViewIfP4MaterialViewHasAnError() { P4MaterialViewConfig p4MaterialViewConfig = new P4MaterialViewConfig("#"); new ParamResolver(new ParamSubstitutionHandlerFactory(params(param("foo", "pavan"), param("bar", "jj"))), fieldCache).resolve(p4MaterialViewConfig); assertThat(p4MaterialViewConfig.errors().on(P4MaterialConfig.VIEW), is("Error when processing params for '#' used in field 'view', # must be followed by a parameter pattern or escaped by another #")); }
public static Object transform(Object root, DataIterator it, Transform<Object,Object> transform) { DataElement element; // don't transform in place because iterator behavior with replacements (which behave like a remove and an add) while iterating is undefined ArrayList<ToTransform> transformList = new ArrayList<>(); while ((element = it.next()) != null) { transformList.add(new ToTransform(element)); } for (ToTransform toTransform : transformList) { if (toTransform.isRoot()) { root = transform.apply(toTransform._value); } else { toTransform.transform(transform); } } return root; }
@Test public void testTransformByPredicateAtPath() throws Exception { SimpleTestData data = IteratorTestData.createSimpleTestData(); Builder.create(data.getDataElement(), IterationOrder.PRE_ORDER) .filterBy(Predicates.pathMatchesPattern("foo", Wildcard.ANY_ONE, "id")) .transform(plusTenTransform); assertEquals(data.getValue().getDataList("foo").getDataMap(0).getInteger("id").intValue(), 11); assertEquals(data.getValue().getDataList("foo").getDataMap(1).getInteger("id").intValue(), 12); assertEquals(data.getValue().getDataList("foo").getDataMap(2).getInteger("id").intValue(), 13); }
@Override public Processor<K, Change<V>, KO, SubscriptionWrapper<K>> get() { return new UnbindChangeProcessor(); }
@Test public void leftJoinShouldPropagateChangeFromNullFKToNullFKValue() { final MockInternalNewProcessorContext<String, SubscriptionWrapper<String>> context = new MockInternalNewProcessorContext<>(); leftJoinProcessor.init(context); context.setRecordMetadata("topic", 0, 0); final LeftValue leftRecordValue = new LeftValue(null); leftJoinProcessor.process(new Record<>(pk, new Change<>(leftRecordValue, leftRecordValue), 0)); assertThat(context.forwarded().size(), is(1)); assertThat( context.forwarded().get(0).record(), is(new Record<>(null, new SubscriptionWrapper<>(hash(leftRecordValue), PROPAGATE_NULL_IF_NO_FK_VAL_AVAILABLE, pk, 0), 0)) ); }
@Override View removeView(String name) { return (View) storage().remove(name); }
@Test public void when_removeAbsentValue_then_returnsNull() { assertThat(storage.removeView("non-existing")).isNull(); }