focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static void setCallerAddress(Operation op, Address caller) { op.setCallerAddress(caller); }
@Test public void testSetCallerAddress() { Operation operation = new DummyOperation(); setCallerAddress(operation, address); assertEquals(address, operation.getCallerAddress()); }
@Nonnull @Override public Collection<String> resourceTypes() { return Collections.singleton("Topic"); }
@Test public void should_list_resource_types() { // given kafkaDataConnection = createKafkaDataConnection(kafkaTestSupport); // when Collection<String> resourcedTypes = kafkaDataConnection.resourceTypes(); //then assertThat(resourcedTypes) .map(r -> r.toLowerCase(Locale.ROOT)) .containsExactlyInAnyOrder("topic"); }
public void initAndAddExternalIssue(ExternalIssue issue) { DefaultInputComponent inputComponent = (DefaultInputComponent) issue.primaryLocation().inputComponent(); ScannerReport.ExternalIssue rawExternalIssue = createReportExternalIssue(issue, inputComponent.scannerId()); write(inputComponent.scannerId(), rawExternalIssue); }
@Test public void initAndAddExternalIssue_whenImpactAndCleanCodeAttributeProvided_shouldPopulateReportFields() { initModuleIssues(); DefaultExternalIssue issue = new DefaultExternalIssue(project) .at(new DefaultIssueLocation().on(file).at(file.selectLine(3)).message("Foo")) .cleanCodeAttribute(CleanCodeAttribute.CLEAR) .forRule(JAVA_RULE_KEY) .addImpact(MAINTAINABILITY, org.sonar.api.issue.impact.Severity.LOW); moduleIssues.initAndAddExternalIssue(issue); ArgumentCaptor<ScannerReport.ExternalIssue> argument = ArgumentCaptor.forClass(ScannerReport.ExternalIssue.class); verify(reportPublisher.getWriter()).appendComponentExternalIssue(eq(file.scannerId()), argument.capture()); assertThat(argument.getValue().getImpactsList()).extracting(i -> i.getSoftwareQuality(), i -> i.getSeverity()) .containsExactly(tuple(MAINTAINABILITY.name(), org.sonar.api.issue.impact.Severity.LOW.name())); assertThat(argument.getValue().getCleanCodeAttribute()).isEqualTo(CleanCodeAttribute.CLEAR.name()); }
public static boolean isInstantiationStrategy(Object extension, String strategy) { InstantiationStrategy annotation = AnnotationUtils.getAnnotation(extension, InstantiationStrategy.class); if (annotation != null) { return strategy.equals(annotation.value()); } return InstantiationStrategy.PER_PROJECT.equals(strategy); }
@Test public void shouldBeBatchInstantiationStrategy() { assertThat(ExtensionUtils.isInstantiationStrategy(ProjectService.class, InstantiationStrategy.PER_BATCH)).isFalse(); assertThat(ExtensionUtils.isInstantiationStrategy(new ProjectService(), InstantiationStrategy.PER_BATCH)).isFalse(); assertThat(ExtensionUtils.isInstantiationStrategy(DefaultService.class, InstantiationStrategy.PER_BATCH)).isFalse(); assertThat(ExtensionUtils.isInstantiationStrategy(new DefaultService(), InstantiationStrategy.PER_BATCH)).isFalse(); assertThat(ExtensionUtils.isInstantiationStrategy(DefaultScannerService.class, InstantiationStrategy.PER_BATCH)).isFalse(); assertThat(ExtensionUtils.isInstantiationStrategy(new DefaultScannerService(), InstantiationStrategy.PER_BATCH)).isFalse(); }
@Override public CompletableFuture<List<Long>> getSplitBoundary(BundleSplitOption bundleSplitOptionTmp) { FlowOrQpsEquallyDivideBundleSplitOption bundleSplitOption = (FlowOrQpsEquallyDivideBundleSplitOption) bundleSplitOptionTmp; NamespaceService service = bundleSplitOption.getService(); NamespaceBundle bundle = bundleSplitOption.getBundle(); Map<String, TopicStatsImpl> topicStatsMap = bundleSplitOption.getTopicStatsMap(); int loadBalancerNamespaceBundleMaxMsgRate = bundleSplitOption.getLoadBalancerNamespaceBundleMaxMsgRate(); double diffThreshold = bundleSplitOption.getFlowOrQpsDifferenceThresholdPercentage() / 100.0; long loadBalancerNamespaceBundleMaxBandwidthBytes = bundleSplitOption .getLoadBalancerNamespaceBundleMaxBandwidthMbytes() * MBytes; return service.getOwnedTopicListForNamespaceBundle(bundle).thenCompose(topics -> { if (topics == null || topics.size() <= 1) { return CompletableFuture.completedFuture(null); } double bundleThroughput = 0; double bundleMsgRate = 0; Map<Long, TopicInfo> topicInfoMap = new HashMap<>(); List<Long> topicHashList = new ArrayList<>(topics.size()); for (String topic : topics) { TopicStatsImpl topicStats = topicStatsMap.get(topic); if (topicStats == null) { continue; } double msgRateIn = topicStats.getMsgRateIn(); double msgRateOut = topicStats.getMsgRateOut(); double msgThroughputIn = topicStats.getMsgThroughputIn(); double msgThroughputOut = topicStats.getMsgThroughputOut(); double msgRate = msgRateIn + msgRateOut; double throughput = msgThroughputIn + msgThroughputOut; if (msgRate <= 0 && throughput <= 0) { // Skip empty topic continue; } Long hashCode = bundle.getNamespaceBundleFactory().getLongHashCode(topic); topicHashList.add(hashCode); topicInfoMap.put(hashCode, new TopicInfo(topic, msgRate, throughput)); bundleThroughput += throughput; bundleMsgRate += msgRate; } if (topicInfoMap.size() < 2 || (bundleMsgRate < (loadBalancerNamespaceBundleMaxMsgRate * (1 + diffThreshold)) && bundleThroughput < (loadBalancerNamespaceBundleMaxBandwidthBytes * (1 + diffThreshold)))) { return CompletableFuture.completedFuture(null); } Collections.sort(topicHashList); List<Long> splitResults = new ArrayList<>(); double bundleMsgRateTmp = topicInfoMap.get(topicHashList.get(0)).msgRate; double bundleThroughputTmp = topicInfoMap.get(topicHashList.get(0)).throughput; for (int i = 1; i < topicHashList.size(); i++) { long topicHashCode = topicHashList.get(i); double msgRate = topicInfoMap.get(topicHashCode).msgRate; double throughput = topicInfoMap.get(topicHashCode).throughput; if ((bundleMsgRateTmp + msgRate) > loadBalancerNamespaceBundleMaxMsgRate || (bundleThroughputTmp + throughput) > loadBalancerNamespaceBundleMaxBandwidthBytes) { long splitStart = topicHashList.get(i - 1); long splitEnd = topicHashList.get(i); long splitMiddle = splitStart + (splitEnd - splitStart) / 2 + 1; splitResults.add(splitMiddle); bundleMsgRateTmp = msgRate; bundleThroughputTmp = throughput; } else { bundleMsgRateTmp += msgRate; bundleThroughputTmp += throughput; } } return CompletableFuture.completedFuture(splitResults); }); }
@Test public void testSplitBundleByFlowOrQps() { FlowOrQpsEquallyDivideBundleSplitAlgorithm algorithm = new FlowOrQpsEquallyDivideBundleSplitAlgorithm(); int loadBalancerNamespaceBundleMaxMsgRate = 1010; int loadBalancerNamespaceBundleMaxBandwidthMbytes = 100; int flowOrQpsDifferenceThresholdPercentage = 10; Map<Long, Double> hashAndMsgMap = new HashMap<>(); Map<Long, Double> hashAndThroughput = new HashMap<>(); Map<String, TopicStatsImpl> topicStatsMap = new HashMap<>(); List<String> mockTopics = new ArrayList<>(); List<Long> hashList = new ArrayList<>(); for (int i = 1; i < 6; i++) { String topicName = "persistent://test-tenant1/test-namespace1/test" + i; for (int j = 0; j < 20; j++) { String tp = topicName + "-partition-" + j; mockTopics.add(tp); TopicStatsImpl topicStats = new TopicStatsImpl(); topicStats.msgRateIn = 24.5; topicStats.msgThroughputIn = 1000; topicStats.msgRateOut = 25; topicStats.msgThroughputOut = 1000; topicStatsMap.put(tp, topicStats); } } for (int i = 6; i < 13; i++) { String topicName = "persistent://test-tenant1/test-namespace1/test" + i; for (int j = 0; j < 20; j++) { String tp = topicName + "-partition-" + j; mockTopics.add(tp); TopicStatsImpl topicStats = new TopicStatsImpl(); topicStats.msgRateIn = 25.5; topicStats.msgThroughputIn = 1000; topicStats.msgRateOut = 25; topicStats.msgThroughputOut = 1000; topicStatsMap.put(tp, topicStats); } } String tp = "persistent://test-tenant1/test-namespace1/test695-partition-0"; mockTopics.add(tp); TopicStatsImpl topicStats = new TopicStatsImpl(); topicStats.msgRateIn = 25; topicStats.msgThroughputIn = 1000; topicStats.msgRateOut = 35; topicStats.msgThroughputOut = 1000; topicStatsMap.put(tp, topicStats); // -- do test NamespaceService mockNamespaceService = mock(NamespaceService.class); NamespaceBundle mockNamespaceBundle = mock(NamespaceBundle.class); doReturn(CompletableFuture.completedFuture(mockTopics)) .when(mockNamespaceService).getOwnedTopicListForNamespaceBundle(mockNamespaceBundle); NamespaceBundleFactory mockNamespaceBundleFactory = mock(NamespaceBundleFactory.class); doReturn(mockNamespaceBundleFactory) .when(mockNamespaceBundle).getNamespaceBundleFactory(); mockTopics.forEach((topic) -> { long hashValue = Hashing.crc32().hashString(topic, UTF_8).padToLong(); doReturn(hashValue) .when(mockNamespaceBundleFactory).getLongHashCode(topic); hashList.add(hashValue); hashAndMsgMap.put(hashValue, topicStatsMap.get(topic).msgRateIn + topicStatsMap.get(topic).msgRateOut); hashAndThroughput.put(hashValue, topicStatsMap.get(topic).msgThroughputIn + topicStatsMap.get(topic).msgThroughputOut); }); List<Long> splitPositions = algorithm.getSplitBoundary(new FlowOrQpsEquallyDivideBundleSplitOption(mockNamespaceService, mockNamespaceBundle, null, topicStatsMap, loadBalancerNamespaceBundleMaxMsgRate, loadBalancerNamespaceBundleMaxBandwidthMbytes, flowOrQpsDifferenceThresholdPercentage)).join(); Collections.sort(hashList); int i = 0; for (Long position : splitPositions) { Long endPosition = position; double bundleMsgRateTmp = 0; double bundleThroughputTmp = 0; while (hashList.get(i) < endPosition) { bundleMsgRateTmp += hashAndMsgMap.get(hashList.get(i)); bundleThroughputTmp += hashAndThroughput.get(hashList.get(i)); i++; } assertTrue(bundleMsgRateTmp < loadBalancerNamespaceBundleMaxMsgRate); assertTrue(bundleThroughputTmp < loadBalancerNamespaceBundleMaxBandwidthMbytes * 1024 * 1024); } }
public static boolean isEmpty( CharSequence val ) { return val == null || val.length() == 0; }
@Test public void testIsEmptyStringBuffer() { assertTrue( Utils.isEmpty( (StringBuffer) null ) ); assertTrue( Utils.isEmpty( new StringBuffer( "" ) ) ); assertFalse( Utils.isEmpty( new StringBuffer( "test" ) ) ); }
@Override public int getColumnLength(final Object value) { throw new UnsupportedSQLOperationException("getColumnLength"); }
@Test void assertGetColumnLength() { assertThrows(UnsupportedSQLOperationException.class, () -> new PostgreSQLUnspecifiedBinaryProtocolValue().getColumnLength("val")); }
@Override public void accept(Props props) { File homeDir = props.nonNullValueAsFile(PATH_HOME.getKey()); Provider provider = resolveProviderAndEnforceNonnullJdbcUrl(props); String driverPath = driverPath(homeDir, provider); props.set(JDBC_DRIVER_PATH.getKey(), driverPath); }
@Test public void checkAndComplete_sets_driver_path_for_postgresql() throws Exception { File driverFile = new File(homeDir, "lib/jdbc/postgresql/pg.jar"); FileUtils.touch(driverFile); Props props = newProps(JDBC_URL.getKey(), "jdbc:postgresql://localhost/sonar"); underTest.accept(props); assertThat(props.nonNullValueAsFile(JDBC_DRIVER_PATH.getKey())).isEqualTo(driverFile); }
@Deprecated public void addAll(Promise... promises) { addAll((Future[]) promises); }
@Test public void testAddAllNullPromise() { assertThrows(NullPointerException.class, new Executable() { @Override public void execute() { combiner.addAll(null); } }); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); if(log.isDebugEnabled()) { log.debug(String.format("Read location for bucket %s", file)); } attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier()); return attributes; } if(file.getType().contains(Path.Type.upload)) { final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw new NotfoundException(file.getAbsolute()); } try { PathAttributes attr; final Path bucket = containerService.getContainer(file); try { attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(ServiceException e) { switch(e.getResponseCode()) { case 405: if(log.isDebugEnabled()) { log.debug(String.format("Mark file %s as delete marker", file)); } // Only DELETE method is allowed for delete markers attr = new PathAttributes(); attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString())); attr.setDuplicate(true); return attr; } throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } if(StringUtils.isNotBlank(attr.getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file)); } // Determine if latest version try { final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId(); if(null != latest) { if(log.isDebugEnabled()) { log.debug(String.format("Found later version %s for %s", latest, file)); } // Duplicate if not latest version attr.setDuplicate(!latest.equals(attr.getVersionId())); } } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(failure instanceof NotfoundException) { attr.setDuplicate(true); } else { throw failure; } } } return attr; } catch(NotfoundException e) { if(file.isDirectory()) { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // File may be marked as placeholder but no placeholder file exists. Check for common prefix returned. try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Found common prefix return PathAttributes.EMPTY; } throw e; } }
@Test public void testVersioningReadAttributesDeleteMarker() throws Exception { final Path bucket = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)); final Path f = new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Path testWithVersionId = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(f, new TransferStatus()); final PathAttributes attr = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(f); final String versionId = attr.getVersionId(); assertNotNull(versionId); assertEquals(testWithVersionId.attributes().getVersionId(), versionId); assertEquals(testWithVersionId.attributes(), attr); assertFalse(attr.isDuplicate()); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(new Path(testWithVersionId).withAttributes(PathAttributes.EMPTY)), new DisabledPasswordCallback(), new Delete.DisabledCallback()); { final PathAttributes marker = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(testWithVersionId); assertTrue(marker.isDuplicate()); assertFalse(marker.getCustom().containsKey(KEY_DELETE_MARKER)); assertNotNull(marker.getVersionId()); assertEquals(versionId, marker.getVersionId()); } { final PathAttributes marker = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(testWithVersionId); assertTrue(marker.isDuplicate()); assertFalse(marker.getCustom().containsKey(KEY_DELETE_MARKER)); assertNotNull(marker.getVersionId()); assertEquals(versionId, marker.getVersionId()); } { try { new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(new Path(testWithVersionId).withAttributes(PathAttributes.EMPTY)); fail(); } catch(NotfoundException e) { // Delete marker } } { try { new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(new Path(testWithVersionId).withAttributes(PathAttributes.EMPTY)); fail(); } catch(NotfoundException e) { // Delete marker } } }
@ExecuteOn(TaskExecutors.IO) @Get(value = "{id}/flow", produces = "application/yaml") @Operation(tags = {"Blueprints"}, summary = "Get a blueprint flow") public String blueprintFlow( @Parameter(description = "The blueprint id") String id, HttpRequest<?> httpRequest ) throws URISyntaxException { return fastForwardToKestraApi(httpRequest, "/v1/blueprints/" + id + "/flow", Argument.of(String.class)); }
@Test void blueprintFlow(WireMockRuntimeInfo wmRuntimeInfo) { stubFor(get(urlMatching("/v1/blueprints/id_1/flow.*")) .willReturn(aResponse() .withHeader("Content-Type", "application/json") .withBodyFile("blueprint-flow.yaml")) ); String blueprintFlow = client.toBlocking().retrieve( HttpRequest.GET("/api/v1/blueprints/community/id_1/flow"), String.class ); assertThat(blueprintFlow, not(emptyOrNullString())); WireMock wireMock = wmRuntimeInfo.getWireMock(); wireMock.verifyThat(getRequestedFor(urlEqualTo("/v1/blueprints/id_1/flow"))); }
@Override public void trash(final Local file) throws LocalAccessDeniedException { synchronized(NSWorkspace.class) { if(log.isDebugEnabled()) { log.debug(String.format("Move %s to Trash", file)); } // Asynchronous operation. 0 if the operation is performed synchronously and succeeds, and a positive // integer if the operation is performed asynchronously and succeeds if(!workspace.performFileOperation( NSWorkspace.RecycleOperation, new NFDNormalizer().normalize(file.getParent().getAbsolute()).toString(), StringUtils.EMPTY, NSArray.arrayWithObject(new NFDNormalizer().normalize(file.getName()).toString()))) { throw new LocalAccessDeniedException(String.format("Failed to move %s to Trash", file.getName())); } } }
@Test(expected = LocalAccessDeniedException.class) public void testTrashNotfound() throws Exception { Local l = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); assertFalse(l.exists()); new WorkspaceTrashFeature().trash(l); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testMissingSetterThrows() throws Exception { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "Expected setter for property [object] of type [java.lang.Object] on " + "[org.apache.beam.sdk.options.PipelineOptionsFactoryTest$MissingSetter]."); PipelineOptionsFactory.as(MissingSetter.class); }
@Deprecated public long searchOffset(final String addr, final String topic, final int queueId, final long timestamp, final long timeoutMillis) throws RemotingException, MQBrokerException, InterruptedException { SearchOffsetRequestHeader requestHeader = new SearchOffsetRequestHeader(); requestHeader.setTopic(topic); requestHeader.setQueueId(queueId); requestHeader.setTimestamp(timestamp); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.SEARCH_OFFSET_BY_TIMESTAMP, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { SearchOffsetResponseHeader responseHeader = (SearchOffsetResponseHeader) response.decodeCommandCustomHeader(SearchOffsetResponseHeader.class); return responseHeader.getOffset(); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark(), addr); }
@Test public void assertSearchOffset() throws MQBrokerException, RemotingException, InterruptedException { mockInvokeSync(); SearchOffsetResponseHeader responseHeader = mock(SearchOffsetResponseHeader.class); when(responseHeader.getOffset()).thenReturn(1L); setResponseHeader(responseHeader); assertEquals(1L, mqClientAPI.searchOffset(defaultBrokerAddr, new MessageQueue(), System.currentTimeMillis(), defaultTimeout)); }
@Override public <T> Set<Class<T>> getSubTypesOf(String pkg, Class<T> requestClass) { Set<Class<T>> set = new HashSet<>(16); String packageSearchPath = ResourcePatternResolver.CLASSPATH_ALL_URL_PREFIX + ClassUtils.convertClassNameToResourcePath(pkg) + '/' + "**/*.class"; try { Resource[] resources = resourcePatternResolver.getResources(packageSearchPath); for (Resource resource : resources) { Class<?> scanClass = getClassByResource(resource); if (requestClass.isAssignableFrom(scanClass)) { set.add((Class<T>) scanClass); } } } catch (IOException | ClassNotFoundException e) { LOGGER.error("scan path: {} failed", packageSearchPath, e); } return set; }
@Test void testClassVersionNotMatch() throws NoSuchFieldException, IllegalAccessException, IOException { setResolver(); Resource resource = mock(Resource.class); byte[] testCase = new byte[8]; testCase[7] = (byte) 64; InputStream inputStream = new ByteArrayInputStream(testCase); when(resource.getInputStream()).thenReturn(inputStream); String path = AnnotationClass.class.getPackage().getName(); when(pathMatchingResourcePatternResolver.getResources(anyString())).thenReturn(new Resource[] {resource}); Set<Class<MockClass>> subTypesOf = packageScan.getSubTypesOf(path, MockClass.class); assertTrue(subTypesOf.isEmpty()); }
@Override public String toString() { return "AfterPane.elementCountAtLeast(" + countElems + ")"; }
@Test public void testToString() { TriggerStateMachine trigger = AfterPaneStateMachine.elementCountAtLeast(5); assertEquals("AfterPane.elementCountAtLeast(5)", trigger.toString()); }
@Override public boolean contains(String clientId) { return connectionBasedClientManager.contains(clientId) || ephemeralIpPortClientManager.contains(clientId) || persistentIpPortClientManager.contains(clientId); }
@Test void testContainsConnectionId() { assertTrue(delegate.contains(connectionId)); }
public @Nullable String formatDiff(A actual, E expected) { return null; }
@Test public void testTransforming_both_formatDiff() { assertThat(HYPHENS_MATCH_COLONS.formatDiff("chat-room", "abcdefg:hij")).isNull(); }
public String convert(ILoggingEvent le) { long timestamp = le.getTimeStamp(); return cachingDateFormatter.format(timestamp); }
@Test public void convertsDateWithCurrentTimeZoneByDefault() { assertEquals(formatDate(TimeZone.getDefault().getID()), convert(_timestamp, DATETIME_PATTERN)); }
@Override public void finished(boolean allStepsExecuted) { if (postProjectAnalysisTasks.length == 0) { return; } ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED); for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) { executeTask(projectAnalysis, postProjectAnalysisTask); } }
@Test public void qualityGate_is_null_when_finished_method_argument_is_false() { underTest.finished(false); verify(postProjectAnalysisTask).finished(taskContextCaptor.capture()); assertThat(taskContextCaptor.getValue().getProjectAnalysis().getQualityGate()).isNull(); }
@Override public boolean shouldClientThrottle(short version) { return version >= 2; }
@Test public void testShouldThrottle() { LeaveGroupResponse response = new LeaveGroupResponse(new LeaveGroupResponseData()); for (short version : ApiKeys.LEAVE_GROUP.allVersions()) { if (version >= 2) { assertTrue(response.shouldClientThrottle(version)); } else { assertFalse(response.shouldClientThrottle(version)); } } }
public void evaluate(List<AuthorizationContext> contexts) { if (CollectionUtils.isEmpty(contexts)) { return; } contexts.forEach(this.authorizationStrategy::evaluate); }
@Test public void evaluate7() { if (MixAll.isMac()) { return; } this.authConfig.setAuthorizationEnabled(false); this.evaluator = new AuthorizationEvaluator(this.authConfig); Subject subject = Subject.of("User:test"); Resource resource = Resource.ofTopic("test"); Action action = Action.PUB; String sourceIp = "192.168.0.1"; DefaultAuthorizationContext context = DefaultAuthorizationContext.of(subject, resource, action, sourceIp); context.setRpcCode("10"); this.evaluator.evaluate(Collections.singletonList(context)); }
private void announceBackgroundJobServer() { final BackgroundJobServerStatus serverStatus = backgroundJobServer.getServerStatus(); storageProvider.announceBackgroundJobServer(serverStatus); determineIfCurrentBackgroundJobServerIsMaster(); lastSignalAlive = serverStatus.getLastHeartbeat(); }
@Test void otherServersDoZookeepingAndBecomeMasterIfMasterStops() { final BackgroundJobServerStatus master = anotherServer(); storageProvider.announceBackgroundJobServer(master); backgroundJobServer.start(); await().atMost(TWO_SECONDS) .untilAsserted(() -> assertThat(backgroundJobServer.isMaster()).isFalse()); storageProvider.signalBackgroundJobServerStopped(master); await() .atMost(1, TimeUnit.SECONDS) .untilAsserted(() -> assertThat(storageProvider.getBackgroundJobServers()).hasSize(1)); await().atMost(FIVE_SECONDS) .untilAsserted(() -> assertThat(backgroundJobServer.isMaster()).isTrue()); }
public static RocksDbIndexedTimeOrderedWindowBytesStoreSupplier create(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates, final boolean hasIndex) { Objects.requireNonNull(name, "name cannot be null"); final String rpMsgPrefix = prepareMillisCheckFailMsgPrefix(retentionPeriod, "retentionPeriod"); final long retentionMs = validateMillisecondDuration(retentionPeriod, rpMsgPrefix); final String wsMsgPrefix = prepareMillisCheckFailMsgPrefix(windowSize, "windowSize"); final long windowSizeMs = validateMillisecondDuration(windowSize, wsMsgPrefix); final long defaultSegmentInterval = Math.max(retentionMs / 2, 60_000L); if (retentionMs < 0L) { throw new IllegalArgumentException("retentionPeriod cannot be negative"); } if (windowSizeMs < 0L) { throw new IllegalArgumentException("windowSize cannot be negative"); } if (defaultSegmentInterval < 1L) { throw new IllegalArgumentException("segmentInterval cannot be zero or negative"); } if (windowSizeMs > retentionMs) { throw new IllegalArgumentException("The retention period of the window store " + name + " must be no smaller than its window size. Got size=[" + windowSizeMs + "], retention=[" + retentionMs + "]"); } return new RocksDbIndexedTimeOrderedWindowBytesStoreSupplier(name, retentionMs, defaultSegmentInterval, windowSizeMs, retainDuplicates, hasIndex); }
@Test public void shouldThrowIfRetentionPeriodIsNegative() { final Exception e = assertThrows(IllegalArgumentException.class, () -> RocksDbIndexedTimeOrderedWindowBytesStoreSupplier.create("anyName", ofMillis(-1L), ZERO, false, false)); assertEquals("retentionPeriod cannot be negative", e.getMessage()); }
@Override protected String getScheme() { return config.getScheme(); }
@Test public void testGetScheme() { S3FileSystem s3FileSystem = new S3FileSystem(s3Config("s3")); assertEquals("s3", s3FileSystem.getScheme()); s3FileSystem = new S3FileSystem(s3Config("other")); assertEquals("other", s3FileSystem.getScheme()); }
public static String describe(List<org.apache.iceberg.expressions.Expression> exprs) { return exprs.stream().map(Spark3Util::describe).collect(Collectors.joining(", ")); }
@Test public void testDescribeSchema() { Schema schema = new Schema( required(1, "data", Types.ListType.ofRequired(2, Types.StringType.get())), optional( 3, "pairs", Types.MapType.ofOptional(4, 5, Types.StringType.get(), Types.LongType.get())), required(6, "time", Types.TimestampType.withoutZone())); assertThat(Spark3Util.describe(schema)) .as("Schema description isn't correct.") .isEqualTo( "struct<data: list<string> not null,pairs: map<string, bigint>,time: timestamp not null>"); }
@Override public List<URI> get() { return resultsCachingSupplier.get(); }
@Test void testPreconfiguredIndexers() { final IndexerDiscoveryProvider provider = new IndexerDiscoveryProvider( List.of(URI.create("http://my-host:9200")), 1, Duration.seconds(1), preflightConfig(null), nodes(), NOOP_CERT_PROVISIONER ); Assertions.assertThat(provider.get()) .hasSize(1) .extracting(URI::toString) .contains("http://my-host:9200"); }
@VisibleForTesting public static boolean checkReadyToBeDropped(long tabletId, long backendId) { Long time = TABLET_TO_DROP_TIME.get(tabletId, backendId); long currentTimeMs = System.currentTimeMillis(); if (time == null) { TABLET_TO_DROP_TIME.put(tabletId, backendId, currentTimeMs + Config.tablet_report_drop_tablet_delay_sec * 1000); } else { boolean ready = currentTimeMs > time; if (ready) { // clean the map TABLET_TO_DROP_TIME.remove(tabletId, backendId); return true; } } return false; }
@Test public void testTabletDropDelay() throws InterruptedException { long tabletId = 100001; long backendId = 100002; Config.tablet_report_drop_tablet_delay_sec = 3; boolean ready = ReportHandler.checkReadyToBeDropped(tabletId, backendId); Assert.assertFalse(ready); Thread.sleep(1000); ready = ReportHandler.checkReadyToBeDropped(tabletId, backendId); Assert.assertFalse(ready); Thread.sleep(3000); ready = ReportHandler.checkReadyToBeDropped(tabletId, backendId); Assert.assertTrue(ready); // check map is cleaned ready = ReportHandler.checkReadyToBeDropped(tabletId, backendId); Assert.assertFalse(ready); Thread.sleep(4000); ready = ReportHandler.checkReadyToBeDropped(tabletId, backendId); Assert.assertTrue(ready); }
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetBinaryProtocolValueWithMySQLTypeLongLong() { assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.LONGLONG), instanceOf(MySQLInt8BinaryProtocolValue.class)); }
public static List<String> splitPlainTextLines(String text, int maxTokensPerLine) { return internalSplitLines(text, maxTokensPerLine, true, s_plaintextSplitOptions); }
@Test public void canSplitPlainTextLines() { String input = "This is a test of the emergency broadcast system. This is only a test."; List<String> expected = Arrays.asList( "This is a test of the emergency broadcast system.", "This is only a test."); List<String> result = TextChunker.splitPlainTextLines(input, 15); Assertions.assertEquals(expected, result); }
public static String getModelLocalUriIdString(ModelLocalUriId localUriId) throws JsonProcessingException { return objectMapper.writeValueAsString(localUriId); }
@Test void getModelLocalUriIdString() throws JsonProcessingException { String model = "foo"; String basePath = "this/is/modelLocalUriId"; LocalUri modelLocalUriId = new ReflectiveAppRoot("test") .get(ComponentFoo.class) .get("this", "is", "modelLocalUriId") .asLocalUri(); ModelLocalUriId localUriId = new ModelLocalUriId(modelLocalUriId); String retrieved = JSONUtils.getModelLocalUriIdString(localUriId); String expected = String.format("{\"model\":\"%1$s\",\"basePath\":\"/%2$s\",\"fullPath\":\"/%1$s/%2$s\"}", model, basePath); assertThat(retrieved).isEqualTo(expected); }
@Deprecated public static void validateIds(List<? extends UUIDBased> ids, String errorMessage) { if (ids == null || ids.isEmpty()) { throw new IncorrectParameterException(errorMessage); } else { for (UUIDBased id : ids) { validateId(id, errorMessage); } } }
@Test void validateIdsTest() { List<? extends UUIDBased> list = List.of(goodDeviceId); Validator.validateIds(list, ids -> "Incorrect Id " + ids); assertThatThrownBy(() -> Validator.validateIds(null, id -> "Incorrect Ids " + id)) .as("Ids are null") .isInstanceOf(IncorrectParameterException.class) .hasMessageContaining("Incorrect Ids null"); assertThatThrownBy(() -> Validator.validateIds(Collections.emptyList(), ids -> "Incorrect Ids " + ids)) .as("List is empty") .isInstanceOf(IncorrectParameterException.class) .hasMessageContaining("Incorrect Ids []"); List<UUIDBased> badList = new ArrayList<>(2); badList.add(goodDeviceId); badList.add(null); // Incorrect Ids [18594c15-9f05-4cda-b58e-70172467c3e5, null] assertThatThrownBy(() -> Validator.validateIds(badList, ids -> "Incorrect Ids " + ids)) .as("List contains null") .isInstanceOf(IncorrectParameterException.class) .hasMessageContaining("Incorrect Ids ") .hasMessageContaining(goodDeviceId.getId().toString()) .hasMessageContaining("null"); }
@Override public void clear() { Arrays.fill(tree, 0); }
@Test public void testClear() { MerkleTree merkleTree = new ArrayMerkleTree(4); merkleTree.updateAdd(0x80000000, 1); // leaf 7 merkleTree.updateAdd(0xA0000000, 2); // leaf 8 merkleTree.updateAdd(0xC0000000, 3); // leaf 9 merkleTree.updateAdd(0xE0000000, 4); // leaf 10 merkleTree.updateAdd(0x00000000, 5); // leaf 11 merkleTree.updateAdd(0x20000000, 6); // leaf 12 merkleTree.updateAdd(0x40000000, 7); // leaf 13 merkleTree.updateAdd(0x60000000, 8); // leaf 14 assertNotEquals(0, merkleTree.getNodeHash(0)); merkleTree.clear(); for (int nodeOrder = 0; nodeOrder < MerkleTreeUtil.getNumberOfNodes(merkleTree.depth()); nodeOrder++) { assertEquals(0, merkleTree.getNodeHash(nodeOrder)); } }
@Override public void run() { if (job == null) { return; } job.scheduleBuild(0, new TimerTriggerCause()); }
@Issue("JENKINS-29790") @Test public void testNoNPE() { new TimerTrigger("").run(); }
public float getFloat(String path) { final Object value = get(path); //Groovy will always return a Double for floating point values. if (value instanceof Double) { return ((Double) value).floatValue(); } else { return ObjectConverter.convertObjectTo(value, Float.class); } }
@Test public void parses_json_document_with_attribute_name_equal_to_size() { // When final float anInt = new JsonPath(JSON_PATH_WITH_SIZE).getFloat("map.size"); // Then assertThat(anInt, is(12.3f)); }
public URI getHttpPublishUri() { if (httpPublishUri == null) { final URI defaultHttpUri = getDefaultHttpUri(); LOG.debug("No \"http_publish_uri\" set. Using default <{}>.", defaultHttpUri); return defaultHttpUri; } else { final InetAddress inetAddress = toInetAddress(httpPublishUri.getHost()); if (Tools.isWildcardInetAddress(inetAddress)) { final URI defaultHttpUri = getDefaultHttpUri(httpPublishUri.getPath()); LOG.warn("\"{}\" is not a valid setting for \"http_publish_uri\". Using default <{}>.", httpPublishUri, defaultHttpUri); return defaultHttpUri; } else { return Tools.normalizeURI(httpPublishUri, httpPublishUri.getScheme(), GRAYLOG_DEFAULT_PORT, httpPublishUri.getPath()); } } }
@Test public void testHttpPublishUriIsAbsoluteURI() throws RepositoryException, ValidationException { jadConfig.setRepository(new InMemoryRepository(ImmutableMap.of("http_publish_uri", "http://www.example.com:12900/foo/"))).addConfigurationBean(configuration).process(); assertThat(configuration.getHttpPublishUri()).isEqualTo(URI.create("http://www.example.com:12900/foo/")); }
@InvokeOnHeader(Web3jConstants.ETH_MINING) void ethMining(Message message) throws IOException { Request<?, EthMining> request = web3j.ethMining(); setRequestId(message, request); EthMining response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.isMining()); } }
@Test public void ethMiningTest() throws Exception { EthMining response = Mockito.mock(EthMining.class); Mockito.when(mockWeb3j.ethMining()).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.isMining()).thenReturn(Boolean.TRUE); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_MINING); template.send(exchange); Boolean body = exchange.getIn().getBody(Boolean.class); assertTrue(body); }
@Override // The set of valid retention strategies must // - contain only names of supported strategies // - at least one must stay enabled public void validate(String parameter, Set<String> values) throws ValidationException { if (!values.stream() .filter(s -> !VALID_STRATEGIES.contains(s) && !ARCHIVE_RETENTION_STRATEGY.equals(s)) .collect(Collectors.toSet()).isEmpty()) { throw new ValidationException("Parameter " + parameter + " contains invalid values: " + values); } if (values.containsAll(VALID_STRATEGIES)) { throw new ValidationException(parameter + ":" + values + " At least one retention of the following [none, close, delete], should stay enabled!"); } }
@Test void validStrategy() throws ValidationException { assertDoesNotThrow( () -> { classUnderTest.validate(PARAM, Set.of("none")); classUnderTest.validate(PARAM, Set.of("archive")); classUnderTest.validate(PARAM, Set.of("delete")); classUnderTest.validate(PARAM, Set.of("close")); classUnderTest.validate(PARAM, Set.of("none", "close")); classUnderTest.validate(PARAM, Set.of("none", "delete")); classUnderTest.validate(PARAM, Set.of("delete", "close")); }); }
public RunResponse restart(RunRequest runRequest) { RunResponse runResponse = restartRecursively(runRequest); if (runResponse.getStatus() == RunResponse.Status.NON_TERMINAL_ERROR) { LOG.error( "workflow instance {} does not support restart action as it is in a non-terminal status [{}]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); throw new MaestroBadRequestException( Collections.emptyList(), "workflow instance %s does not support restart action as it is in a non-terminal status [%s]", runRequest.getWorkflowIdentity(), runResponse.getTimelineEvent().getMessage()); } return runResponse; }
@Test public void testRestartNonTerminalError() { WorkflowInstance wfInstance = new WorkflowInstance(); wfInstance.setInitiator(new ManualInitiator()); wfInstance.setStatus(WorkflowInstance.Status.IN_PROGRESS); wfInstance.setWorkflowInstanceId(10L); wfInstance.setWorkflowRunId(1L); wfInstance.setWorkflowId("test-workflow"); wfInstance.setRuntimeWorkflow(Workflow.builder().build()); RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_BEGINNING) .restartConfig( RestartConfig.builder().addRestartNode("test-workflow", 1L, null).build()) .build(); when(instanceDao.getWorkflowInstance("test-workflow", 1L, Constants.LATEST_INSTANCE_RUN, true)) .thenReturn(wfInstance); AssertHelper.assertThrows( "Cannot restart a non-terminal state instance", MaestroBadRequestException.class, "workflow instance [test-workflow][1] does not support restart action as it is in a non-terminal status [IN_PROGRESS]", () -> actionHandler.restart(request)); }
public FEELFnResult<Object> invoke(@ParameterName("input") String input, @ParameterName("pattern") String pattern, @ParameterName( "replacement" ) String replacement ) { return invoke(input, pattern, replacement, null); }
@Test void invokeNull() { FunctionTestUtil.assertResultError(replaceFunction.invoke(null, null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke("testString", null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke("testString", "test", null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, "test", null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, "test", "ttt"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, null, "ttt"), InvalidParametersEvent.class); }
@Override public String decrypt(String cipherText) throws CryptoException { try { Assert.isTrue(canDecrypt(cipherText), "bad cipher text"); String[] splits = cipherText.split(":"); String encodedIV = splits[1]; String encodedCipherText = splits[2]; byte[] initializationVector = DECODER.decode(encodedIV); Cipher decryptCipher = Cipher.getInstance("AES/CBC/PKCS5Padding"); decryptCipher.init(Cipher.DECRYPT_MODE, createSecretKeySpec(), new IvParameterSpec(initializationVector)); byte[] decryptedBytes = decryptCipher.doFinal(DECODER.decode(encodedCipherText)); return new String(decryptedBytes, StandardCharsets.UTF_8); } catch (Exception e) { throw new CryptoException(e); } }
@Test public void shouldErrorOutWhenCipherTextIsTamperedWith() { assertThatCode(() -> aesEncrypter.decrypt("some junk that is not base 64 encoded")) .isInstanceOf(CryptoException.class) .hasCauseInstanceOf(IllegalArgumentException.class) .hasMessageContaining("bad cipher text"); assertThatCode(() -> aesEncrypter.decrypt("AES:foo:bar")) .isInstanceOf(CryptoException.class) .hasCauseInstanceOf(InvalidAlgorithmParameterException.class) .hasMessageContaining("Wrong IV length: must be 16 bytes long"); assertThatCode(() -> aesEncrypter.decrypt("AES:lzcCuNSe4vUx+CsWgN11Uw==z:junk")) .isInstanceOf(CryptoException.class) .hasCauseInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Input byte array has incorrect ending byte at 24"); assertThatCode(() -> aesEncrypter.decrypt("AES:lzcCuNSe4vUx+CsWgN11Uw==z:@")) .isInstanceOf(CryptoException.class) .hasCauseInstanceOf(IllegalArgumentException.class) .hasMessageContaining("Input byte array has incorrect ending byte at 24"); assertThatCode(() -> aesEncrypter.decrypt("AES:lzcCuNSe4vUx+CsWgN11Uw==:DelRFu6mCN7kA/2oYmeLRA==")) .isInstanceOf(CryptoException.class) .hasCauseInstanceOf(BadPaddingException.class) .hasMessageContaining("Given final block not properly padded"); }
public static Optional<Boolean> parseBooleanExact(final String value) { if (booleanStringMatches(value, true)) { return Optional.of(true); } if (booleanStringMatches(value, false)) { return Optional.of(false); } return Optional.empty(); }
@Test public void shouldParseExactNoAsFalse() { assertThat(SqlBooleans.parseBooleanExact("nO"), is(Optional.of(false))); assertThat(SqlBooleans.parseBooleanExact("N"), is(Optional.of(false))); }
@GET @Path("/entity-uid/{uid}/") @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8) public TimelineEntity getEntity( @Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam("uid") String uId, @QueryParam("confstoretrieve") String confsToRetrieve, @QueryParam("metricstoretrieve") String metricsToRetrieve, @QueryParam("fields") String fields, @QueryParam("metricslimit") String metricsLimit, @QueryParam("metricstimestart") String metricsTimeStart, @QueryParam("metricstimeend") String metricsTimeEnd) { String url = req.getRequestURI() + (req.getQueryString() == null ? "" : QUERY_STRING_SEP + req.getQueryString()); UserGroupInformation callerUGI = TimelineReaderWebServicesUtils.getUser(req); LOG.info("Received URL {} from user {}", url, TimelineReaderWebServicesUtils.getUserName(callerUGI)); long startTime = Time.monotonicNow(); boolean succeeded = false; init(res); TimelineReaderManager timelineReaderManager = getTimelineReaderManager(); TimelineEntity entity = null; try { TimelineReaderContext context = TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uId); if (context == null) { throw new BadRequestException("Incorrect UID " + uId); } entity = timelineReaderManager.getEntity(context, TimelineReaderWebServicesUtils.createTimelineDataToRetrieve( confsToRetrieve, metricsToRetrieve, fields, metricsLimit, metricsTimeStart, metricsTimeEnd)); checkAccessForGenericEntity(entity, callerUGI); succeeded = true; } catch (Exception e) { handleException(e, url, startTime, "Either metricslimit or metricstime" + " start/end"); } finally { long latency = Time.monotonicNow() - startTime; METRICS.addGetEntitiesLatency(latency, succeeded); LOG.info("Processed URL {} (Took {} ms.)", url, latency); } if (entity == null) { LOG.info("Processed URL {} but entity not found" + " (Took {} ms.)", url, (Time.monotonicNow() - startTime)); throw new NotFoundException("Timeline entity with uid: " + uId + "is not found"); } return entity; }
@Test void testGetEntityAllFields() throws Exception { Client client = createClient(); try { URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/apps/app1/entities/app/id_1?" + "fields=ALL"); ClientResponse resp = getResponse(client, uri); TimelineEntity entity = resp.getEntity(TimelineEntity.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, resp.getType().toString()); assertNotNull(entity); assertEquals("id_1", entity.getId()); assertEquals("app", entity.getType()); assertEquals(3, entity.getConfigs().size()); assertEquals(3, entity.getMetrics().size()); assertTrue(entity.getInfo().containsKey(TimelineReaderUtils.UID_KEY), "UID should be present"); // Includes UID. assertEquals(3, entity.getInfo().size()); assertEquals(2, entity.getEvents().size()); } finally { client.destroy(); } }
@Override public boolean addClass(final Class<?> stepClass) { if (stepClasses.contains(stepClass)) { return true; } if (injectorSourceFromProperty == null) { if (hasInjectorSource(stepClass)) { checkOnlyOneClassHasInjectorSource(stepClass); withInjectorSource = stepClass; // Eager init to allow for static binding prior to before all // hooks injector = instantiateUserSpecifiedInjectorSource(withInjectorSource).getInjector(); } } stepClasses.add(stepClass); return true; }
@Test void shouldInjectStaticBeforeStart() { factory = new GuiceFactory(); WithStaticFieldClass.property = null; factory.addClass(CucumberInjector.class); assertThat(WithStaticFieldClass.property, equalTo("Hello world")); }
public static <T> Write<T> delete() { return Write.<T>builder(MutationType.DELETE).build(); }
@Test public void testCustomMapperImplDelete() { counter.set(0); SerializableFunction<Session, Mapper> factory = new NOOPMapperFactory(); pipeline .apply(Create.of("")) .apply( CassandraIO.<String>delete() .withHosts(Collections.singletonList(CASSANDRA_HOST)) .withPort(cassandraPort) .withKeyspace(CASSANDRA_KEYSPACE) .withMapperFactoryFn(factory) .withEntity(String.class)); pipeline.run(); assertEquals(1, counter.intValue()); }
@Override public void validateAction( RepositoryOperation... operations ) throws KettleException { for ( RepositoryOperation operation : operations ) { switch ( operation ) { case EXECUTE_TRANSFORMATION: case EXECUTE_JOB: checkOperationAllowed( EXECUTE_CONTENT_ACTION ); break; case MODIFY_TRANSFORMATION: case MODIFY_JOB: checkOperationAllowed( CREATE_CONTENT_ACTION ); break; case SCHEDULE_TRANSFORMATION: case SCHEDULE_JOB: checkOperationAllowed( SCHEDULE_CONTENT_ACTION ); break; case MODIFY_DATABASE: checkOperationAllowed( MODIFY_DATABASE_ACTION ); break; case SCHEDULER_EXECUTE: checkOperationAllowed( SCHEDULER_EXECUTE_ACTION ); break; } } }
@Test( expected = KettleException.class ) public void exceptionThrown_WhenOperationNotAllowed_ExecuteSchedulesOperation() throws Exception { setOperationPermissions( IAbsSecurityProvider.SCHEDULER_EXECUTE_ACTION, false ); provider.validateAction( RepositoryOperation.SCHEDULER_EXECUTE ); }
@Override public Map<String, String> evaluate(FunctionArgs args, EvaluationContext context) { final String value = valueParam.required(args, context); if (Strings.isNullOrEmpty(value)) { return Collections.emptyMap(); } final CharMatcher kvPairsMatcher = splitParam.optional(args, context).orElse(CharMatcher.whitespace()); final CharMatcher kvDelimMatcher = valueSplitParam.optional(args, context).orElse(CharMatcher.anyOf("=")); Splitter outerSplitter = Splitter.on(DelimiterCharMatcher.withQuoteHandling(kvPairsMatcher)) .omitEmptyStrings() .trimResults(); final Splitter entrySplitter = Splitter.on(kvDelimMatcher) .omitEmptyStrings() .limit(2) .trimResults(); return new MapSplitter(outerSplitter, entrySplitter, ignoreEmptyValuesParam.optional(args, context).orElse(true), trimCharactersParam.optional(args, context).orElse(CharMatcher.none()), trimValueCharactersParam.optional(args, context).orElse(CharMatcher.none()), allowDupeKeysParam.optional(args, context).orElse(true), duplicateHandlingParam.optional(args, context).orElse(TAKE_FIRST)) .split(value); }
@Test void testTakeLast() { final Map<String, Expression> arguments = Map.of("value", valueExpression, "handle_dup_keys", new StringExpression(new CommonToken(0), "TAKE_LAST")); Map<String, String> result = classUnderTest.evaluate(new FunctionArgs(classUnderTest, arguments), evaluationContext); Map<String, String> expectedResult = new HashMap<>(); expectedResult.put("test", "remi"); expectedResult.put("number", "12345"); assertThat(result).containsExactlyInAnyOrderEntriesOf(expectedResult); }
public static synchronized void v(final String tag, String text, Object... args) { if (msLogger.supportsV()) { String msg = getFormattedString(text, args); msLogger.v(tag, msg); addLog(LVL_V, tag, msg); } }
@Test public void testVNotSupported() throws Exception { Mockito.when(mMockLog.supportsV()).thenReturn(false); Logger.v("mTag", "Text with %d digits", 0); Mockito.verify(mMockLog, Mockito.never()).v("mTag", "Text with 0 digits"); Logger.v("mTag", "Text with no digits"); Mockito.verify(mMockLog, Mockito.never()).v("mTag", "Text with no digits"); }
public static DataflowRunner fromOptions(PipelineOptions options) { DataflowPipelineOptions dataflowOptions = PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options); ArrayList<String> missing = new ArrayList<>(); if (dataflowOptions.getAppName() == null) { missing.add("appName"); } if (Strings.isNullOrEmpty(dataflowOptions.getRegion()) && isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) { missing.add("region"); } if (missing.size() > 0) { throw new IllegalArgumentException( "Missing required pipeline options: " + Joiner.on(',').join(missing)); } validateWorkerSettings( PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options)); PathValidator validator = dataflowOptions.getPathValidator(); String gcpTempLocation; try { gcpTempLocation = dataflowOptions.getGcpTempLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires gcpTempLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(gcpTempLocation); String stagingLocation; try { stagingLocation = dataflowOptions.getStagingLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires stagingLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(stagingLocation); if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) { validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs()); } if (dataflowOptions.getFilesToStage() != null) { // The user specifically requested these files, so fail now if they do not exist. // (automatically detected classpath elements are permitted to not exist, so later // staging will not fail on nonexistent files) dataflowOptions.getFilesToStage().stream() .forEach( stagedFileSpec -> { File localFile; if (stagedFileSpec.contains("=")) { String[] components = stagedFileSpec.split("=", 2); localFile = new File(components[1]); } else { localFile = new File(stagedFileSpec); } if (!localFile.exists()) { // should be FileNotFoundException, but for build-time backwards compatibility // cannot add checked exception throw new RuntimeException( String.format("Non-existent files specified in filesToStage: %s", localFile)); } }); } else { dataflowOptions.setFilesToStage( detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options)); if (dataflowOptions.getFilesToStage().isEmpty()) { throw new IllegalArgumentException("No files to stage has been found."); } else { LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", dataflowOptions.getFilesToStage().size()); LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage()); } } // Verify jobName according to service requirements, truncating converting to lowercase if // necessary. String jobName = dataflowOptions.getJobName().toLowerCase(); checkArgument( jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"), "JobName invalid; the name must consist of only the characters " + "[-a-z0-9], starting with a letter and ending with a letter " + "or number"); if (!jobName.equals(dataflowOptions.getJobName())) { LOG.info( "PipelineOptions.jobName did not match the service requirements. " + "Using {} instead of {}.", jobName, dataflowOptions.getJobName()); } dataflowOptions.setJobName(jobName); // Verify project String project = dataflowOptions.getProject(); if (project.matches("[0-9]*")) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project number."); } else if (!project.matches(PROJECT_ID_REGEXP)) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project" + " description."); } DataflowPipelineDebugOptions debugOptions = dataflowOptions.as(DataflowPipelineDebugOptions.class); // Verify the number of worker threads is a valid value if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) { throw new IllegalArgumentException( "Number of worker harness threads '" + debugOptions.getNumberOfWorkerHarnessThreads() + "' invalid. Please make sure the value is non-negative."); } // Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11 if (dataflowOptions.getRecordJfrOnGcThrashing() && Environments.getJavaVersion() == Environments.JavaVersion.java8) { throw new IllegalArgumentException( "recordJfrOnGcThrashing is only supported on java 9 and up."); } if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) { dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT); } // Adding the Java version to the SDK name for user's and support convenience. String agentJavaVer = "(JRE 8 environment)"; if (Environments.getJavaVersion() != Environments.JavaVersion.java8) { agentJavaVer = String.format("(JRE %s environment)", Environments.getJavaVersion().specification()); } DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String userAgentName = dataflowRunnerInfo.getName(); Preconditions.checkArgument( !userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty."); String userAgentVersion = dataflowRunnerInfo.getVersion(); Preconditions.checkArgument( !userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty."); String userAgent = String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_"); dataflowOptions.setUserAgent(userAgent); return new DataflowRunner(dataflowOptions); }
@Test public void testNonExistentStagingLocation() throws IOException { DataflowPipelineOptions options = buildPipelineOptions(); options.setStagingLocation(NON_EXISTENT_BUCKET); thrown.expect(RuntimeException.class); thrown.expectCause(instanceOf(FileNotFoundException.class)); thrown.expectMessage( containsString("Output path does not exist or is not writeable: " + NON_EXISTENT_BUCKET)); DataflowRunner.fromOptions(options); ArgumentCaptor<Job> jobCaptor = ArgumentCaptor.forClass(Job.class); Mockito.verify(mockJobs).create(eq(PROJECT_ID), eq(REGION_ID), jobCaptor.capture()); assertValidJob(jobCaptor.getValue()); }
@SneakyThrows public static String readUtf8String(String path) { String resultReadStr; ClassPathResource classPathResource = new ClassPathResource(path); try ( InputStream inputStream = classPathResource.getInputStream(); BufferedInputStream bis = new BufferedInputStream(inputStream); ByteArrayOutputStream buf = new ByteArrayOutputStream()) { int result = bis.read(); while (result != ERROR_CODE) { buf.write((byte) result); result = bis.read(); } resultReadStr = buf.toString("UTF-8"); } return resultReadStr; }
@Test public void assertReadUtf8String2() { String linebreaks = System.getProperty("line.separator"); String testText = "abcd简体繁体\uD83D\uDE04\uD83D\uDD25& *" + linebreaks + "second line" + linebreaks + "empty line next" + linebreaks; String testFilePath = "test/test_utf8.txt"; String contentByFileUtil = FileUtil.readUtf8String(testFilePath); Assert.assertTrue(testText.equals(contentByFileUtil)); }
public void addOriginalResponseHeader(String key, List<String> values) { if (StringUtils.isNotEmpty(key)) { this.originalResponseHeader.put(key, values); addParam(key, values.get(0)); } }
@Test void testAddOriginalResponseHeader() { List<String> list = new ArrayList<>(4); list.add("test1"); list.add("test2"); list.add("test3"); list.add("test4"); Header header = Header.newInstance(); header.addOriginalResponseHeader("test", list); assertEquals("test1", header.getValue("test")); assertEquals(1, header.getOriginalResponseHeader().size()); assertEquals(list, header.getOriginalResponseHeader().get("test")); }
@SafeVarargs public static void setExceptionsToIgnore(Class<? extends Throwable>... ignoreClasses) { checkNotNull(ignoreClasses); Tracer.ignoreClasses = ignoreClasses; }
@Test(expected = IllegalArgumentException.class) public void testNull2() { Tracer.setExceptionsToIgnore(IgnoreException.class, null); }
public TolerantDoubleComparison isWithin(double tolerance) { return new TolerantDoubleComparison() { @Override public void of(double expected) { Double actual = DoubleSubject.this.actual; checkNotNull( actual, "actual value cannot be null. tolerance=%s expected=%s", tolerance, expected); checkTolerance(tolerance); if (!equalWithinTolerance(actual, expected, tolerance)) { failWithoutActual( fact("expected", doubleToString(expected)), butWas(), fact("outside tolerance", doubleToString(tolerance))); } } }; }
@Test public void isWithinZeroTolerance() { double max = Double.MAX_VALUE; assertThat(max).isWithin(0.0).of(max); assertThat(NEARLY_MAX).isWithin(0.0).of(NEARLY_MAX); assertThatIsWithinFails(max, 0.0, NEARLY_MAX); assertThatIsWithinFails(NEARLY_MAX, 0.0, max); double negativeMax = -1.0 * Double.MAX_VALUE; assertThat(negativeMax).isWithin(0.0).of(negativeMax); assertThat(NEGATIVE_NEARLY_MAX).isWithin(0.0).of(NEGATIVE_NEARLY_MAX); assertThatIsWithinFails(negativeMax, 0.0, NEGATIVE_NEARLY_MAX); assertThatIsWithinFails(NEGATIVE_NEARLY_MAX, 0.0, negativeMax); double min = Double.MIN_VALUE; assertThat(min).isWithin(0.0).of(min); assertThat(OVER_MIN).isWithin(0.0).of(OVER_MIN); assertThatIsWithinFails(min, 0.0, OVER_MIN); assertThatIsWithinFails(OVER_MIN, 0.0, min); double negativeMin = -1.0 * Double.MIN_VALUE; assertThat(negativeMin).isWithin(0.0).of(negativeMin); assertThat(UNDER_NEGATIVE_MIN).isWithin(0.0).of(UNDER_NEGATIVE_MIN); assertThatIsWithinFails(negativeMin, 0.0, UNDER_NEGATIVE_MIN); assertThatIsWithinFails(UNDER_NEGATIVE_MIN, 0.0, negativeMin); }
@Override public V pollLast() { return get(pollLastAsync()); }
@Test public void testPollLast() { RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple"); Assertions.assertNull(set.pollLast()); set.add(0.1, "a"); set.add(0.2, "b"); set.add(0.3, "c"); Assertions.assertEquals("c", set.pollLast()); assertThat(set).containsExactly("a", "b"); }
public void destroy() { mGeneratingDisposable.dispose(); mGenerateStateSubject.onNext(LoadingState.NOT_LOADED); mGenerateStateSubject.onComplete(); }
@Test public void testCalculatesCornersInBackgroundWithTwoDictionariesButDestroyed() { TestRxSchedulers.drainAllTasks(); mDetectorUnderTest.destroy(); Assert.assertEquals(GestureTypingDetector.LoadingState.NOT_LOADED, mCurrentState.get()); TestRxSchedulers.drainAllTasks(); Assert.assertEquals(GestureTypingDetector.LoadingState.NOT_LOADED, mCurrentState.get()); TestRxSchedulers.drainAllTasks(); mSubscribeState.dispose(); TestRxSchedulers.drainAllTasks(); Assert.assertEquals(GestureTypingDetector.LoadingState.NOT_LOADED, mCurrentState.get()); }
@Override public <T> T persist(T detachedObject) { Map<Object, Object> alreadyPersisted = new HashMap<Object, Object>(); return persist(detachedObject, alreadyPersisted, RCascadeType.PERSIST); }
@Test public void testWithoutIdSetterGetter() { ClassWithoutIdSetterGetter sg = new ClassWithoutIdSetterGetter(); sg = redisson.getLiveObjectService().persist(sg); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore .store(QueryableStoreTypes.timestampedWindowStore(), partition); final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = cacheBypassFetcher.fetch(store, key, lower, upper)) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIterator(builder.build().iterator()); } } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnValuesForOpenEndBounds() { // Given: final Range<Instant> end = Range.open( NOW, NOW.plusSeconds(10) ); final Range<Instant> startEquiv = Range.open( end.lowerEndpoint().minus(WINDOW_SIZE), end.upperEndpoint().minus(WINDOW_SIZE) ); when(fetchIterator.hasNext()) .thenReturn(true) .thenReturn(true) .thenReturn(true) .thenReturn(false); when(fetchIterator.next()) .thenReturn(new KeyValue<>(startEquiv.lowerEndpoint().toEpochMilli(), VALUE_1)) .thenReturn( new KeyValue<>(startEquiv.lowerEndpoint().plusMillis(1).toEpochMilli(), VALUE_2)) .thenReturn(new KeyValue<>(startEquiv.upperEndpoint().toEpochMilli(), VALUE_3)) .thenThrow(new AssertionError()); when(cacheBypassFetcher.fetch(eq(tableStore), any(), any(), any())).thenReturn(fetchIterator); // When: final Iterator<WindowedRow> rowIterator = table.get(A_KEY, PARTITION, Range.all(), end).rowIterator; // Then: assertThat(rowIterator.hasNext(), is(true)); final List<WindowedRow> resultList = Lists.newArrayList(rowIterator); assertThat(resultList, contains( WindowedRow.of( SCHEMA, windowedKey(startEquiv.lowerEndpoint().plusMillis(1)), VALUE_2.value(), VALUE_2.timestamp() ) )); }
public static Map<String, Class<?>> compile(Map<String, String> classNameSourceMap, ClassLoader classLoader) { return compile(classNameSourceMap, classLoader, null); }
@Test(expected = KieMemoryCompilerException.class) public void invalidClass() { Map<String, String> source = singletonMap("org.kie.memorycompiler.InvalidJavaClass", "Invalid Java Code"); KieMemoryCompiler.compile(source, this.getClass().getClassLoader()); }
@SuppressWarnings("MethodLength") public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header) { messageHeaderDecoder.wrap(buffer, offset); final int templateId = messageHeaderDecoder.templateId(); final int schemaId = messageHeaderDecoder.schemaId(); if (schemaId != MessageHeaderDecoder.SCHEMA_ID) { if (listenerExtension != null) { listenerExtension.onExtensionMessage( messageHeaderDecoder.blockLength(), templateId, schemaId, messageHeaderDecoder.version(), buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, length - MessageHeaderDecoder.ENCODED_LENGTH); return; } throw new ClusterException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId); } switch (templateId) { case SessionMessageHeaderDecoder.TEMPLATE_ID: { sessionMessageHeaderDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionMessageHeaderDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onMessage( sessionId, sessionMessageHeaderDecoder.timestamp(), buffer, offset + SESSION_HEADER_LENGTH, length - SESSION_HEADER_LENGTH, header); } break; } case SessionEventDecoder.TEMPLATE_ID: { sessionEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onSessionEvent( sessionEventDecoder.correlationId(), sessionId, sessionEventDecoder.leadershipTermId(), sessionEventDecoder.leaderMemberId(), sessionEventDecoder.code(), sessionEventDecoder.detail()); } break; } case NewLeaderEventDecoder.TEMPLATE_ID: { newLeaderEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = newLeaderEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onNewLeader( sessionId, newLeaderEventDecoder.leadershipTermId(), newLeaderEventDecoder.leaderMemberId(), newLeaderEventDecoder.ingressEndpoints()); } break; } case AdminResponseDecoder.TEMPLATE_ID: { adminResponseDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = adminResponseDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { final long correlationId = adminResponseDecoder.correlationId(); final AdminRequestType requestType = adminResponseDecoder.requestType(); final AdminResponseCode responseCode = adminResponseDecoder.responseCode(); final String message = adminResponseDecoder.message(); final int payloadOffset = adminResponseDecoder.offset() + AdminResponseDecoder.BLOCK_LENGTH + AdminResponseDecoder.messageHeaderLength() + message.length() + AdminResponseDecoder.payloadHeaderLength(); final int payloadLength = adminResponseDecoder.payloadLength(); listener.onAdminResponse( sessionId, correlationId, requestType, responseCode, message, buffer, payloadOffset, payloadLength); } break; } default: break; } }
@Test void defaultEgressListenerBehaviourShouldThrowClusterExceptionOnUnknownSchemaId() { final EgressListener listener = (clusterSessionId, timestamp, buffer, offset, length, header) -> { }; final EgressAdapter adapter = new EgressAdapter(listener, 42, mock(Subscription.class), 5); final ClusterException exception = assertThrows(ClusterException.class, () -> adapter.onFragment(buffer, 0, 64, new Header(0, 0))); assertEquals("ERROR - expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=0", exception.getMessage()); }
@Transactional public AppNamespace createAppNamespaceInLocal(AppNamespace appNamespace) { return createAppNamespaceInLocal(appNamespace, true); }
@Test(expected = BadRequestException.class) @Sql(scripts = "/sql/appnamespaceservice/init-appnamespace.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testCreatePublicAppNamespaceExistedWithNoAppendnamespacePrefix() { AppNamespace appNamespace = assembleBaseAppNamespace(); appNamespace.setPublic(true); appNamespace.setName("datasource"); appNamespace.setFormat(ConfigFileFormat.Properties.getValue()); appNamespaceService.createAppNamespaceInLocal(appNamespace, false); }
public static Formatter forNumbers(@Nonnull String format) { return new NumberFormat(format); }
@Test public void testOverflow() { Formatter f = forNumbers("9"); check(-10, f, "-#"); check(NaN, f, " #"); f = forNumbers(".9"); check(-1, f, "-.#"); check(NaN, f, " .#"); f = forNumbers("SG"); check(-1, f, "-"); check(NaN, f, "+"); f = forNumbers("9th"); check(-10, f, "-#th"); check(NaN, f, " # "); f = forNumbers("9.9EEEE"); check(NEGATIVE_INFINITY, f, "-#.#E+##"); check(NaN, f, " #.#E+##"); f = forNumbers("FMRN"); check(-1, f, "###############"); check(0, f, "###############"); check(4000, f, "###############"); check(NaN, f, "###############"); }
@Override public NodeOptions copy() { final NodeOptions nodeOptions = new NodeOptions(); nodeOptions.setElectionTimeoutMs(this.electionTimeoutMs); nodeOptions.setElectionPriority(this.electionPriority); nodeOptions.setDecayPriorityGap(this.decayPriorityGap); nodeOptions.setSnapshotIntervalSecs(this.snapshotIntervalSecs); nodeOptions.setSnapshotLogIndexMargin(this.snapshotLogIndexMargin); nodeOptions.setCatchupMargin(this.catchupMargin); nodeOptions.setFilterBeforeCopyRemote(this.filterBeforeCopyRemote); nodeOptions.setDisableCli(this.disableCli); nodeOptions.setSharedTimerPool(this.sharedTimerPool); nodeOptions.setTimerPoolSize(this.timerPoolSize); nodeOptions.setCliRpcThreadPoolSize(this.cliRpcThreadPoolSize); nodeOptions.setRaftRpcThreadPoolSize(this.raftRpcThreadPoolSize); nodeOptions.setEnableMetrics(this.enableMetrics); nodeOptions.setRaftOptions(this.raftOptions == null ? new RaftOptions() : this.raftOptions.copy()); nodeOptions.setSharedElectionTimer(this.sharedElectionTimer); nodeOptions.setSharedVoteTimer(this.sharedVoteTimer); nodeOptions.setSharedStepDownTimer(this.sharedStepDownTimer); nodeOptions.setSharedSnapshotTimer(this.sharedSnapshotTimer); nodeOptions.setRpcConnectTimeoutMs(super.getRpcConnectTimeoutMs()); nodeOptions.setRpcDefaultTimeout(super.getRpcDefaultTimeout()); nodeOptions.setRpcInstallSnapshotTimeout(super.getRpcInstallSnapshotTimeout()); nodeOptions.setRpcProcessorThreadPoolSize(super.getRpcProcessorThreadPoolSize()); nodeOptions.setEnableRpcChecksum(super.isEnableRpcChecksum()); nodeOptions.setMetricRegistry(super.getMetricRegistry()); return nodeOptions; }
@Test public void testCopyRpcOptionsRight() { final NodeOptions nodeOptions = new NodeOptions(); assertEquals(1000, nodeOptions.getRpcConnectTimeoutMs()); assertEquals(5000, nodeOptions.getRpcDefaultTimeout()); assertEquals(5 * 60 * 1000, nodeOptions.getRpcInstallSnapshotTimeout()); assertEquals(80, nodeOptions.getRpcProcessorThreadPoolSize()); assertFalse(nodeOptions.isEnableRpcChecksum()); assertNull(nodeOptions.getMetricRegistry()); //change options nodeOptions.setRpcConnectTimeoutMs(2000); nodeOptions.setRpcDefaultTimeout(6000); nodeOptions.setRpcInstallSnapshotTimeout(6 * 60 * 1000); nodeOptions.setRpcProcessorThreadPoolSize(90); nodeOptions.setEnableRpcChecksum(true); nodeOptions.setMetricRegistry(new MetricRegistry()); //copy options final NodeOptions copy = nodeOptions.copy(); assertEquals(2000, copy.getRpcConnectTimeoutMs()); assertEquals(6000, copy.getRpcDefaultTimeout()); assertEquals(6 * 60 * 1000, copy.getRpcInstallSnapshotTimeout()); assertEquals(90, copy.getRpcProcessorThreadPoolSize()); assertTrue(copy.isEnableRpcChecksum()); assertNotNull(copy.getMetricRegistry()); }
public static <K, V> Collector<Map<K, V>, ?, Map<K, List<V>>> reduceListMap() { return reduceListMap(HashMap::new); }
@Test public void reduceListMapTest() { final Set<Map<String, Integer>> nameScoreMapList = StreamUtil.of( // 集合内的第一个map,包含两个key value MapUtil.builder("苏格拉底", 1).put("特拉叙马霍斯", 3).build(), MapUtil.of("苏格拉底", 2), MapUtil.of("特拉叙马霍斯", 1), MapUtil.of("特拉叙马霍斯", 2) ).collect(Collectors.toSet()); // 执行聚合 final Map<String, List<Integer>> nameScoresMap = nameScoreMapList.stream().collect(CollectorUtil.reduceListMap()); assertEquals(MapUtil.builder("苏格拉底", Arrays.asList(1, 2)) .put("特拉叙马霍斯", Arrays.asList(3, 1, 2)).build(), nameScoresMap); List<Map<String, String>> data = ListUtil.toList( MapUtil.builder("name", "sam").put("count", "80").map(), MapUtil.builder("name", "sam").put("count", "81").map(), MapUtil.builder("name", "sam").put("count", "82").map(), MapUtil.builder("name", "jack").put("count", "80").map(), MapUtil.builder("name", "jack").put("count", "90").map() ); Map<String, Map<String, List<String>>> nameMap = data.stream() .collect(Collectors.groupingBy(e -> e.get("name"), CollectorUtil.reduceListMap())); assertEquals(MapUtil.builder("jack", MapUtil.builder("name", Arrays.asList("jack", "jack")) .put("count", Arrays.asList("80", "90")).build()) .put("sam", MapUtil.builder("name", Arrays.asList("sam", "sam", "sam")) .put("count", Arrays.asList("80", "81", "82")).build()) .build(), nameMap); }
public void createTask(CreateTaskRequest request) throws Throwable { taskManager.createTask(request.id(), request.spec()); }
@Test public void testTaskDistribution() throws Exception { MockTime time = new MockTime(0, 0, 0); Scheduler scheduler = new MockScheduler(time); try (MiniTrogdorCluster cluster = new MiniTrogdorCluster.Builder(). addCoordinator("node01"). addAgent("node01"). addAgent("node02"). scheduler(scheduler). build()) { CoordinatorClient coordinatorClient = cluster.coordinatorClient(); AgentClient agentClient1 = cluster.agentClient("node01"); AgentClient agentClient2 = cluster.agentClient("node02"); new ExpectedTasks(). waitFor(coordinatorClient). waitFor(agentClient1). waitFor(agentClient2); NoOpTaskSpec fooSpec = new NoOpTaskSpec(5, 7); coordinatorClient.createTask(new CreateTaskRequest("foo", fooSpec)); new ExpectedTasks(). addTask(new ExpectedTaskBuilder("foo").taskState( new TaskPending(fooSpec)).build()). waitFor(coordinatorClient). waitFor(agentClient1). waitFor(agentClient2); time.sleep(11); ObjectNode status1 = new ObjectNode(JsonNodeFactory.instance); status1.set("node01", new TextNode("active")); status1.set("node02", new TextNode("active")); new ExpectedTasks(). addTask(new ExpectedTaskBuilder("foo"). taskState(new TaskRunning(fooSpec, 11, status1)). workerState(new WorkerRunning("foo", fooSpec, 11, new TextNode("active"))). build()). waitFor(coordinatorClient). waitFor(agentClient1). waitFor(agentClient2); time.sleep(7); ObjectNode status2 = new ObjectNode(JsonNodeFactory.instance); status2.set("node01", new TextNode("done")); status2.set("node02", new TextNode("done")); new ExpectedTasks(). addTask(new ExpectedTaskBuilder("foo"). taskState(new TaskDone(fooSpec, 11, 18, "", false, status2)). workerState(new WorkerDone("foo", fooSpec, 11, 18, new TextNode("done"), "")). build()). waitFor(coordinatorClient). waitFor(agentClient1). waitFor(agentClient2); } }
public synchronized boolean tryLock() throws IOException { LOGGER.trace("Acquiring lock on {}", file.getAbsolutePath()); try { // weirdly this method will return null if the lock is held by another // process, but will throw an exception if the lock is held by this process // so we have to handle both cases flock = channel.tryLock(); return flock != null; } catch (OverlappingFileLockException e) { return false; } }
@Test void testTryLock() throws IOException { File tempFile = TestUtils.tempFile(); FileLock lock1 = new FileLock(tempFile); try { assertTrue(lock1.tryLock()); assertFalse(lock1.tryLock()); FileLock lock2 = new FileLock(tempFile); assertFalse(lock2.tryLock()); assertThrows(OverlappingFileLockException.class, lock2::lock); lock1.unlock(); } finally { lock1.destroy(); } }
protected PaginatedList<DTO> findPaginatedWithQueryFilterAndSort(Bson query, Predicate<DTO> filter, Bson sort, int page, int perPage) { return findPaginatedWithQueryFilterAndSortWithGrandTotal( query, filter, sort, DBQuery.empty(), page, perPage); }
@Test public void findPaginatedWithQueryFilterAndSort() { dbService.save(newDto("hello1")); dbService.save(newDto("hello2")); dbService.save(newDto("hello3")); dbService.save(newDto("hello4")); dbService.save(newDto("hello5")); dbService.save(newDto("hello6")); dbService.save(newDto("hello7")); final Predicate<TestDTO> filter = view -> view.title.matches("hello[23456]"); final PaginatedList<TestDTO> page1 = dbService.findPaginatedWithQueryFilterAndSort(DBQuery.empty(), filter, DBSort.asc("title"), 1, 2); assertThat(page1.pagination().count()).isEqualTo(2); assertThat(page1.pagination().total()).isEqualTo(5); assertThat(page1.delegate()) .extracting("title") .containsExactly("hello2", "hello3"); final PaginatedList<TestDTO> page2 = dbService.findPaginatedWithQueryFilterAndSort(DBQuery.empty(), filter, DBSort.asc("title"), 2, 2); assertThat(page2.pagination().count()).isEqualTo(2); assertThat(page2.pagination().total()).isEqualTo(5); assertThat(page2.delegate()) .extracting("title") .containsExactly("hello4", "hello5"); final PaginatedList<TestDTO> page3 = dbService.findPaginatedWithQueryFilterAndSort(DBQuery.empty(), filter, DBSort.asc("title"), 3, 2); assertThat(page3.pagination().count()).isEqualTo(1); assertThat(page3.pagination().total()).isEqualTo(5); assertThat(page3.delegate()) .extracting("title") .containsExactly("hello6"); final PaginatedList<TestDTO> page4 = dbService.findPaginatedWithQueryFilterAndSort(DBQuery.empty(), filter, DBSort.asc("title"), 2, 4); assertThat(page4.pagination().count()).isEqualTo(1); assertThat(page4.pagination().total()).isEqualTo(5); assertThat(page4.delegate()) .extracting("title") .containsExactly("hello6"); final PaginatedList<TestDTO> page1reverse = dbService.findPaginatedWithQueryFilterAndSort(DBQuery.empty(), filter, DBSort.desc("title"), 1, 2); assertThat(page1reverse.pagination().count()).isEqualTo(2); assertThat(page1reverse.pagination().total()).isEqualTo(5); assertThat(page1reverse.delegate()) .extracting("title") .containsExactly("hello6", "hello5"); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testGauntlet() { ChatMessage gauntletMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Gauntlet completion count is: <col=ff0000>123</col>.", null, 0); chatCommandsPlugin.onChatMessage(gauntletMessage); verify(configManager).setRSProfileConfiguration("killcount", "gauntlet", 123); }
public static PostgreSQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find PostgreSQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetNumericBinaryProtocolValue() { PostgreSQLBinaryProtocolValue binaryProtocolValue = PostgreSQLBinaryProtocolValueFactory.getBinaryProtocolValue(PostgreSQLColumnType.NUMERIC); assertThat(binaryProtocolValue, instanceOf(PostgreSQLNumericBinaryProtocolValue.class)); }
@Override public int getMaxStatements() { return 0; }
@Test void assertGetMaxStatements() { assertThat(metaData.getMaxStatements(), is(0)); }
@Override public double mean() { return 1 / lambda; }
@Test public void testMean() { System.out.println("mean"); ExponentialDistribution instance = new ExponentialDistribution(1.0); instance.rand(); assertEquals(1.0, instance.mean(), 1E-7); instance = new ExponentialDistribution(2.0); instance.rand(); assertEquals(0.5, instance.mean(), 1E-7); instance = new ExponentialDistribution(3.0); instance.rand(); assertEquals(0.3333333, instance.mean(), 1E-7); instance = new ExponentialDistribution(4.0); instance.rand(); assertEquals(0.25, instance.mean(), 1E-7); }
public int positionDiff(LogOffsetMetadata that) { if (messageOffsetOnly() || that.messageOffsetOnly()) throw new KafkaException(this + " cannot compare its segment position with " + that + " since it only has message offset info"); if (!onSameSegment(that)) throw new KafkaException(this + " cannot compare its segment position with " + that + " since they are not on the same segment"); return this.relativePositionInSegment - that.relativePositionInSegment; }
@Test void testPositionDiff() { LogOffsetMetadata metadata1 = new LogOffsetMetadata(1L); LogOffsetMetadata metadata2 = new LogOffsetMetadata(5L, 0L, 5); KafkaException exception = assertThrows(KafkaException.class, () -> metadata1.positionDiff(metadata2)); assertTrue(exception.getMessage().endsWith("since it only has message offset info")); exception = assertThrows(KafkaException.class, () -> metadata2.positionDiff(metadata1)); assertTrue(exception.getMessage().endsWith("since it only has message offset info")); LogOffsetMetadata metadata3 = new LogOffsetMetadata(15L, 10L, 5); exception = assertThrows(KafkaException.class, () -> metadata3.positionDiff(metadata2)); assertTrue(exception.getMessage().endsWith("since they are not on the same segment")); LogOffsetMetadata metadata4 = new LogOffsetMetadata(40L, 10L, 100); assertEquals(95, metadata4.positionDiff(metadata3)); }
static String convertToString(Object o, boolean standardYaml) { if (standardYaml) { if (o.getClass() == String.class) { return (String) o; } else { return YamlParserUtils.toYAMLString(o); } } if (o.getClass() == String.class) { return (String) o; } else if (o.getClass() == Duration.class) { Duration duration = (Duration) o; return TimeUtils.formatWithHighestUnit(duration); } else if (o instanceof List) { return ((List<?>) o) .stream() .map(e -> escapeWithSingleQuote(convertToString(e, false), ";")) .collect(Collectors.joining(";")); } else if (o instanceof Map) { return ((Map<?, ?>) o) .entrySet().stream() .map( e -> { String escapedKey = escapeWithSingleQuote(e.getKey().toString(), ":"); String escapedValue = escapeWithSingleQuote(e.getValue().toString(), ":"); return escapeWithSingleQuote( escapedKey + ":" + escapedValue, ","); }) .collect(Collectors.joining(",")); } return o.toString(); }
@TestTemplate void testConvertToString() { // String assertThat(ConfigurationUtils.convertToString("Simple String", standardYaml)) .isEqualTo("Simple String"); // Duration assertThat(ConfigurationUtils.convertToString(Duration.ZERO, standardYaml)) .isEqualTo("0 ms"); assertThat(ConfigurationUtils.convertToString(Duration.ofMillis(123L), standardYaml)) .isEqualTo("123 ms"); assertThat(ConfigurationUtils.convertToString(Duration.ofMillis(1_234_000L), standardYaml)) .isEqualTo("1234 s"); assertThat(ConfigurationUtils.convertToString(Duration.ofHours(25L), standardYaml)) .isEqualTo("25 h"); // List List<Object> listElements = new ArrayList<>(); listElements.add("Test;String"); listElements.add(Duration.ZERO); listElements.add(42); if (standardYaml) { assertThat("[Test;String, 0 ms, 42]") .isEqualTo(ConfigurationUtils.convertToString(listElements, true)); } else { assertThat("'Test;String';0 ms;42") .isEqualTo(ConfigurationUtils.convertToString(listElements, false)); } // Map Map<Object, Object> mapElements = new HashMap<>(); mapElements.put("A:,B", "C:,D"); mapElements.put(10, 20); if (standardYaml) { assertThat("{'A:,B': 'C:,D', 10: 20}") .isEqualTo(ConfigurationUtils.convertToString(mapElements, true)); } else { assertThat("'''A:,B'':''C:,D''',10:20") .isEqualTo(ConfigurationUtils.convertToString(mapElements, false)); } }
@SuppressWarnings("DataFlowIssue") public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException { if (commandPacket instanceof SQLReceivedPacket) { log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL()); } else { log.debug("Execute packet type: {}", commandPacketType); } switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitExecutor(); case COM_INIT_DB: return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession); case COM_FIELD_LIST: return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession); case COM_QUERY: return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession); case COM_PING: return new MySQLComPingExecutor(connectionSession); case COM_STMT_PREPARE: return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession); case COM_STMT_EXECUTE: return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession); case COM_STMT_RESET: return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession); case COM_STMT_CLOSE: return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession); case COM_SET_OPTION: return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession); case COM_RESET_CONNECTION: return new MySQLComResetConnectionExecutor(connectionSession); default: return new MySQLUnsupportedCommandExecutor(commandPacketType); } }
@Test void assertNewInstanceWithComSetOption() throws SQLException { assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_SET_OPTION, mock(MySQLComSetOptionPacket.class), connectionSession), instanceOf(MySQLComSetOptionExecutor.class)); }
public static String dashToCamelCase(final String text) { return dashToCamelCase(text, false); }
@Test public void testDashToCamelCase() { assertEquals("enableCors", dashToCamelCase("enable-cors")); }
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) { final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps()); map.put( MetricCollectors.RESOURCE_LABEL_PREFIX + StreamsConfig.APPLICATION_ID_CONFIG, applicationId ); // Streams client metrics aren't used in Confluent deployment possiblyConfigureConfluentTelemetry(map); return Collections.unmodifiableMap(map); }
@Test public void shouldSetStreamsConfigConsumerUnprefixedProperties() { final KsqlConfig ksqlConfig = new KsqlConfig(Collections.singletonMap(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")); final Object result = ksqlConfig.getKsqlStreamConfigProps().get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG); assertThat(result, equalTo("earliest")); }
@Override synchronized StampedRecord nextRecord(final RecordInfo info, final long wallClockTime) { return wrapped.nextRecord(info, wallClockTime); }
@Test public void testNextRecord() { final RecordInfo info = mock(RecordInfo.class); final long wallClockTime = 12345678L; final StampedRecord stampedRecord = mock(StampedRecord.class); when(wrapped.nextRecord(info, wallClockTime)).thenReturn(stampedRecord); final StampedRecord result = synchronizedPartitionGroup.nextRecord(info, wallClockTime); assertEquals(stampedRecord, result); verify(wrapped, times(1)).nextRecord(info, wallClockTime); }
public ArgumentBuilder type(String type) { this.type = type; return this; }
@Test void type() { ArgumentBuilder builder = ArgumentBuilder.newBuilder(); builder.type("int"); Assertions.assertEquals("int", builder.build().getType()); }
@Override protected FTPClient connect(final ProxyFinder proxy, final HostKeyCallback callback, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { try { final CustomTrustSSLProtocolSocketFactory f = new CustomTrustSSLProtocolSocketFactory(trust, key, preferences.getProperty("connection.ssl.protocols.ftp").split(",")); final LoggingProtocolCommandListener listener = new LoggingProtocolCommandListener(this); final FTPClient client = new FTPClient(host.getProtocol(), f, f.getSSLContext()) { @Override public void disconnect() throws IOException { try { super.disconnect(); } finally { this.removeProtocolCommandListener(listener); } } }; client.addProtocolCommandListener(listener); this.configure(client); client.connect(new PunycodeConverter().convert(host.getHostname()), host.getPort()); client.setTcpNoDelay(false); return client; } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } }
@Test @Ignore public void testConnectMutualTls() throws Exception { final Host host = new Host(new FTPTLSProtocol(), "test.cyberduck.ch", new Credentials( System.getProperties().getProperty("ftp.user"), System.getProperties().getProperty("ftp.password") )); final AtomicBoolean callback = new AtomicBoolean(); final FTPSession session = new FTPSession(host, new DefaultX509TrustManager(), new KeychainX509KeyManager(new DisabledCertificateIdentityCallback(), host, new DisabledCertificateStore() { @Override public X509Certificate choose(final CertificateIdentityCallback prompt, final String[] keyTypes, final Principal[] issuers, final Host bookmark) throws ConnectionCanceledException { assertEquals("test.cyberduck.ch", bookmark.getHostname()); callback.set(true); throw new ConnectionCanceledException(); } })); final LoginConnectionService c = new LoginConnectionService( new DisabledLoginCallback(), new DisabledHostKeyCallback(), new DisabledPasswordStore(), new DisabledProgressListener()); c.connect(session, new DisabledCancelCallback()); assertTrue(callback.get()); }
@Override public void cancel(Throwable cause) { // pending requests must be canceled first otherwise they might be fulfilled by // allocated slots released from this bulk for (ExecutionSlotSharingGroup group : pendingRequests.keySet()) { for (ExecutionVertexID id : executions.get(group)) { logicalSlotRequestCanceller.accept(id, cause); } } for (ExecutionSlotSharingGroup group : fulfilledRequests.keySet()) { for (ExecutionVertexID id : executions.get(group)) { logicalSlotRequestCanceller.accept(id, cause); } } }
@Test void testCancel() { LogicalSlotRequestCanceller canceller = new LogicalSlotRequestCanceller(); SharingPhysicalSlotRequestBulk bulk = createBulk(canceller); bulk.markFulfilled(SG1, new AllocationID()); Throwable cause = new Throwable(); bulk.cancel(cause); assertThat(canceller.cancellations) .contains(Tuple2.of(EV1, cause), Tuple2.of(EV2, cause), Tuple2.of(EV4, cause)); }
@Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { if (acceptForeignIp) { return; } // the anonymous access is enabled by default, permission level is PUBLIC // if allow anonymous access, return if (qosConfiguration.isAllowAnonymousAccess()) { return; } final InetAddress inetAddress = ((InetSocketAddress) ctx.channel().remoteAddress()).getAddress(); // loopback address, return if (inetAddress.isLoopbackAddress()) { return; } // the ip is in the whitelist, return if (checkForeignIpInWhiteList(inetAddress)) { return; } ByteBuf cb = Unpooled.wrappedBuffer((QosConstants.BR_STR + "Foreign Ip Not Permitted, Consider Config It In Whitelist." + QosConstants.BR_STR) .getBytes()); ctx.writeAndFlush(cb).addListener(ChannelFutureListener.CLOSE); }
@Test void shouldNotShowIpNotPermittedMsg_GivenAcceptForeignIpFalseAndMatchWhiteList() throws Exception { ChannelHandlerContext context = mock(ChannelHandlerContext.class); Channel channel = mock(Channel.class); when(context.channel()).thenReturn(channel); InetAddress addr = mock(InetAddress.class); when(addr.isLoopbackAddress()).thenReturn(false); when(addr.getHostAddress()).thenReturn("175.23.44.1"); InetSocketAddress address = new InetSocketAddress(addr, 12345); when(channel.remoteAddress()).thenReturn(address); ForeignHostPermitHandler handler = new ForeignHostPermitHandler(QosConfiguration.builder() .acceptForeignIp(false) .acceptForeignIpWhitelist("175.23.44.1, 192.168.1.192/26 ") .build()); handler.handlerAdded(context); verify(context, never()).writeAndFlush(any()); }
public FEELFnResult<TemporalAmount> invoke(@ParameterName("from") Temporal from, @ParameterName("to") Temporal to) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( to == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "cannot be null")); } final LocalDate fromDate = getLocalDateFromTemporal(from); if (fromDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "is of type not suitable for years and months function")); } final LocalDate toDate = getLocalDateFromTemporal(to); if (toDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "is of type not suitable for years and months function")); } return FEELFnResult.ofResult(new ComparablePeriod(Period.between(fromDate, toDate).withDays(0))); }
@Test void yearsAndMonthsFunctionInvokeLocalDateTime() { FunctionTestUtil.assertResult( yamFunction.invoke( LocalDateTime.of(2017, 6, 12, 12, 43), LocalDate.of(2020, 7, 13)), ComparablePeriod.of(3, 1, 0)); }
@Deprecated public void setLoadInfo(Table targetTable, BrokerDesc brokerDesc, List<BrokerFileGroup> fileGroups) { this.targetTable = targetTable; this.brokerDesc = brokerDesc; this.fileGroups = fileGroups; }
@Test public void testNoFilesFoundOnePath() { Analyzer analyzer = new Analyzer(GlobalStateMgr.getCurrentState(), new ConnectContext()); DescriptorTable descTable = analyzer.getDescTbl(); TupleDescriptor tupleDesc = descTable.createTupleDescriptor("DestTableTuple"); List<List<TBrokerFileStatus>> fileStatusesList = Lists.newArrayList(); fileStatusesList.add(Lists.newArrayList()); FileScanNode scanNode = new FileScanNode(new PlanNodeId(0), tupleDesc, "FileScanNode", fileStatusesList, 0, WarehouseManager.DEFAULT_WAREHOUSE_ID); List<String> files = Lists.newArrayList("hdfs://127.0.0.1:9001/file*"); DataDescription desc = new DataDescription("testTable", null, files, null, null, null, "csv", false, null); BrokerFileGroup brokerFileGroup = new BrokerFileGroup(desc); Deencapsulation.setField(brokerFileGroup, "filePaths", files); List<BrokerFileGroup> fileGroups = Lists.newArrayList(brokerFileGroup); scanNode.setLoadInfo(jobId, txnId, null, brokerDesc, fileGroups, true, loadParallelInstanceNum); ExceptionChecker.expectThrowsWithMsg(UserException.class, "No files were found matching the pattern(s) or path(s): 'hdfs://127.0.0.1:9001/file*'", () -> Deencapsulation.invoke(scanNode, "getFileStatusAndCalcInstance")); }
public Future<KafkaCluster> prepareKafkaCluster( Kafka kafkaCr, List<KafkaNodePool> nodePools, Map<String, Storage> oldStorage, Map<String, List<String>> currentPods, KafkaVersionChange versionChange, KafkaStatus kafkaStatus, boolean tryToFixProblems) { return createKafkaCluster(kafkaCr, nodePools, oldStorage, currentPods, versionChange) .compose(kafka -> brokerRemovalCheck(kafkaCr, kafka)) .compose(kafka -> { if (checkFailed() && tryToFixProblems) { // We have a failure, and should try to fix issues // Once we fix it, we call this method again, but this time with tryToFixProblems set to false return revertScaleDown(kafka, kafkaCr, nodePools) .compose(kafkaAndNodePools -> revertRoleChange(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs())) .compose(kafkaAndNodePools -> prepareKafkaCluster(kafkaAndNodePools.kafkaCr(), kafkaAndNodePools.nodePoolCrs(), oldStorage, currentPods, versionChange, kafkaStatus, false)); } else if (checkFailed()) { // We have a failure, but we should not try to fix it List<String> errors = new ArrayList<>(); if (scaleDownCheckFailed) { errors.add("Cannot scale-down Kafka brokers " + kafka.removedNodes() + " because they have assigned partition-replicas."); } if (usedToBeBrokersCheckFailed) { errors.add("Cannot remove the broker role from nodes " + kafka.usedToBeBrokerNodes() + " because they have assigned partition-replicas."); } return Future.failedFuture(new InvalidResourceException("Following errors were found when processing the Kafka custom resource: " + errors)); } else { // If everything succeeded, we return the KafkaCluster object // If any warning conditions exist from the reverted changes, we add them to the status if (!warningConditions.isEmpty()) { kafkaStatus.addConditions(warningConditions); } return Future.succeededFuture(kafka); } }); }
@Test public void testNewClusterWithMixedNodesKRaft(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); KafkaStatus kafkaStatus = new KafkaStatus(); KafkaClusterCreator creator = new KafkaClusterCreator(vertx, RECONCILIATION, CO_CONFIG, KafkaMetadataConfigurationState.KRAFT, supplier); Checkpoint async = context.checkpoint(); creator.prepareKafkaCluster(KAFKA, List.of(POOL_MIXED), Map.of(), null, KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE, kafkaStatus, true) .onComplete(context.succeeding(kc -> context.verify(() -> { // Kafka cluster is created assertThat(kc, is(notNullValue())); assertThat(kc.nodes().size(), is(3)); assertThat(kc.nodes().stream().map(NodeRef::nodeId).collect(Collectors.toSet()), is(Set.of(0, 1, 2))); assertThat(kc.removedNodes(), is(Set.of())); // Check the status conditions assertThat(kafkaStatus.getConditions(), is(nullValue())); // No scale-down => scale-down check is not done verify(supplier.brokersInUseCheck, never()).brokersInUse(any(), any(), any(), any()); async.flag(); }))); }
public String nextNonCliCommand() { String line; do { line = terminal.readLine(); } while (maybeHandleCliSpecificCommands(line)); return line; }
@Test public void shouldExecuteCliCommandWithArgsTrimmingWhiteSpace() { // Given: when(lineSupplier.get()) .thenReturn(CLI_CMD_NAME + WHITE_SPACE + "Arg0" + WHITE_SPACE + "Arg1" + WHITE_SPACE) .thenReturn("not a CLI command;"); // When: console.nextNonCliCommand(); // Then: verify(cliCommand).execute(eq(ImmutableList.of("Arg0", "Arg1")), any()); }
public static Data toHeapData(Data data) { if (data == null || data instanceof HeapData) { return data; } return new HeapData(data.toByteArray()); }
@Test public void toHeapData() throws Exception { Data data = ToHeapDataConverter.toHeapData(new AnotherDataImpl()); assertInstanceOf(HeapData.class, data); }
@VisibleForTesting void validateMobileUnique(Long id, String mobile) { if (StrUtil.isBlank(mobile)) { return; } AdminUserDO user = userMapper.selectByMobile(mobile); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_MOBILE_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_MOBILE_EXISTS); } }
@Test public void testValidateMobileUnique_mobileExistsForCreate() { // 准备参数 String mobile = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setMobile(mobile))); // 调用,校验异常 assertServiceException(() -> userService.validateMobileUnique(null, mobile), USER_MOBILE_EXISTS); }
@SneakyThrows public static Optional<Date> nextExecutionDate( TimeTrigger trigger, Date startDate, String uniqueId) { CronTimeTrigger cronTimeTrigger = getCronTimeTrigger(trigger); if (cronTimeTrigger != null) { CronExpression cronExpression = TriggerHelper.buildCron(cronTimeTrigger.getCron(), cronTimeTrigger.getTimezone()); Date nextTime = cronExpression.getNextValidTimeAfter(startDate); if (nextTime != null) { nextTime.setTime( nextTime.getTime() + getDelayInSeconds(cronTimeTrigger, uniqueId) * TimeTrigger.MS_IN_SECONDS); } return Optional.ofNullable(nextTime); } throw new UnsupportedOperationException( "TimeTrigger nextExecutionDate is not implemented for type: " + trigger.getType()); }
@Test public void testNextExecutionDateForCron() throws Exception { TimeTrigger trigger = loadObject("fixtures/time_triggers/sample-cron-time-trigger.json", TimeTrigger.class); Optional<Date> actual = TriggerHelper.nextExecutionDate(trigger, Date.from(Instant.EPOCH), "test-id"); assertEquals(Optional.of(Date.from(Instant.ofEpochSecond(72000))), actual); }
@Override public void deleteAll(Collection<K> keys) { long startNanos = Timer.nanos(); try { delegate.deleteAll(keys); } finally { deleteAllProbe.recordValue(Timer.nanosElapsed(startNanos)); } }
@Test public void deleteAll() { List<String> keys = Arrays.asList("1", "2"); cacheStore.deleteAll(keys); verify(delegate).deleteAll(keys); assertProbeCalledOnce("deleteAll"); }
public static <T> Either<String, T> resolveImportDMN(Import importElement, Collection<T> dmns, Function<T, QName> idExtractor) { final String importerDMNNamespace = ((Definitions) importElement.getParent()).getNamespace(); final String importerDMNName = ((Definitions) importElement.getParent()).getName(); final String importNamespace = importElement.getNamespace(); final String importName = importElement.getName(); final String importLocationURI = importElement.getLocationURI(); // This is optional final String importModelName = importElement.getAdditionalAttributes().get(TImport.MODELNAME_QNAME); LOGGER.debug("Resolving an Import in DMN Model with name={} and namespace={}. " + "Importing a DMN model with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); List<T> matchingDMNList = dmns.stream() .filter(m -> idExtractor.apply(m).getNamespaceURI().equals(importNamespace)) .toList(); if (matchingDMNList.size() == 1) { T located = matchingDMNList.get(0); // Check if the located DMN Model in the NS, correspond for the import `drools:modelName`. if (importModelName == null || idExtractor.apply(located).getLocalPart().equals(importModelName)) { LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " + "with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); return Either.ofRight(located); } else { LOGGER.error("DMN Model with name={} and namespace={} can't import a DMN with namespace={}, name={}, modelName={}, " + "located within namespace only {} but does not match for the actual modelName", importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located)); return Either.ofLeft(String.format( "DMN Model with name=%s and namespace=%s can't import a DMN with namespace=%s, name=%s, modelName=%s, " + "located within namespace only %s but does not match for the actual modelName", importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, idExtractor.apply(located))); } } else { List<T> usingNSandName = matchingDMNList.stream() .filter(dmn -> idExtractor.apply(dmn).getLocalPart().equals(importModelName)) .toList(); if (usingNSandName.size() == 1) { LOGGER.debug("DMN Model with name={} and namespace={} successfully imported a DMN " + "with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); return Either.ofRight(usingNSandName.get(0)); } else if (usingNSandName.isEmpty()) { LOGGER.error("DMN Model with name={} and namespace={} failed to import a DMN with namespace={} name={} locationURI={}, modelName={}.", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName); return Either.ofLeft(String.format( "DMN Model with name=%s and namespace=%s failed to import a DMN with namespace=%s name=%s locationURI=%s, modelName=%s. ", importerDMNName, importerDMNNamespace, importNamespace, importName, importLocationURI, importModelName)); } else { LOGGER.error("DMN Model with name={} and namespace={} detected a collision ({} elements) trying to import a DMN with namespace={} name={} locationURI={}, modelName={}", importerDMNName, importerDMNNamespace, usingNSandName.size(), importNamespace, importName, importLocationURI, importModelName); return Either.ofLeft(String.format( "DMN Model with name=%s and namespace=%s detected a collision trying to import a DMN with %s namespace, " + "%s name and modelName %s. There are %s DMN files with the same namespace in your project. " + "Please change the DMN namespaces and make them unique to fix this issue.", importerDMNName, importerDMNNamespace, importNamespace, importName, importModelName, usingNSandName.size())); } } }
@Test void nSnoModelNameDefaultWithAlias2() { final Import i = makeImport("ns1", "boh", null); final List<QName> available = Arrays.asList(new QName("ns1", "m1"), new QName("ns2", "m2"), new QName("ns3", "m3")); final Either<String, QName> result = ImportDMNResolverUtil.resolveImportDMN(i, available, Function.identity()); assertThat(result.isRight()).isTrue(); assertThat(result.getOrElse(null)).isEqualTo(new QName("ns1", "m1")); }
public static EnumBuilder<Schema> enumeration(String name) { return builder().enumeration(name); }
@Test void testEnum() { List<String> symbols = Arrays.asList("a", "b"); Schema expected = Schema.createEnum("myenum", null, null, symbols); expected.addProp("p", "v"); Schema schema = SchemaBuilder.enumeration("myenum").prop("p", "v").symbols("a", "b"); assertEquals(expected, schema); }
@SuppressWarnings("DataFlowIssue") public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException { if (commandPacket instanceof SQLReceivedPacket) { log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL()); } else { log.debug("Execute packet type: {}", commandPacketType); } switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitExecutor(); case COM_INIT_DB: return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession); case COM_FIELD_LIST: return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession); case COM_QUERY: return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession); case COM_PING: return new MySQLComPingExecutor(connectionSession); case COM_STMT_PREPARE: return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession); case COM_STMT_EXECUTE: return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession); case COM_STMT_RESET: return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession); case COM_STMT_CLOSE: return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession); case COM_SET_OPTION: return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession); case COM_RESET_CONNECTION: return new MySQLComResetConnectionExecutor(connectionSession); default: return new MySQLUnsupportedCommandExecutor(commandPacketType); } }
@Test void assertNewInstanceWithComQuery() throws SQLException { MySQLComQueryPacket packet = mock(MySQLComQueryPacket.class); when(packet.getSQL()).thenReturn(""); assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_QUERY, packet, connectionSession), instanceOf(MySQLComQueryPacketExecutor.class)); }
public synchronized void unregister(MeshRuleListener listener) { if (listener == null) { return; } Set<MeshRuleListener> listeners = listenerMap.get(listener.ruleSuffix()); if (CollectionUtils.isNotEmpty(listeners)) { listeners.remove(listener); } if (CollectionUtils.isEmpty(listeners)) { listenerMap.remove(listener.ruleSuffix()); } }
@Test void unregister() { MeshRuleDispatcher meshRuleDispatcher = new MeshRuleDispatcher("TestApp"); MeshRuleListener listener1 = new MeshRuleListener() { @Override public void onRuleChange(String appName, List<Map<String, Object>> rules) {} @Override public void clearRule(String appName) {} @Override public String ruleSuffix() { return "Type1"; } }; MeshRuleListener listener2 = new MeshRuleListener() { @Override public void onRuleChange(String appName, List<Map<String, Object>> rules) {} @Override public void clearRule(String appName) {} @Override public String ruleSuffix() { return "Type1"; } }; MeshRuleListener listener3 = new MeshRuleListener() { @Override public void onRuleChange(String appName, List<Map<String, Object>> rules) {} @Override public void clearRule(String appName) {} @Override public String ruleSuffix() { return "Type2"; } }; meshRuleDispatcher.register(listener1); meshRuleDispatcher.register(listener2); meshRuleDispatcher.register(listener3); Assertions.assertEquals( 2, meshRuleDispatcher.getListenerMap().get("Type1").size()); Assertions.assertTrue(meshRuleDispatcher.getListenerMap().get("Type1").contains(listener1)); Assertions.assertTrue(meshRuleDispatcher.getListenerMap().get("Type1").contains(listener2)); Assertions.assertEquals( 1, meshRuleDispatcher.getListenerMap().get("Type2").size()); Assertions.assertTrue(meshRuleDispatcher.getListenerMap().get("Type2").contains(listener3)); meshRuleDispatcher.unregister(listener1); Assertions.assertEquals( 1, meshRuleDispatcher.getListenerMap().get("Type1").size()); Assertions.assertTrue(meshRuleDispatcher.getListenerMap().get("Type1").contains(listener2)); Assertions.assertEquals( 1, meshRuleDispatcher.getListenerMap().get("Type2").size()); Assertions.assertTrue(meshRuleDispatcher.getListenerMap().get("Type2").contains(listener3)); meshRuleDispatcher.unregister(listener2); Assertions.assertNull(meshRuleDispatcher.getListenerMap().get("Type1")); Assertions.assertEquals( 1, meshRuleDispatcher.getListenerMap().get("Type2").size()); Assertions.assertTrue(meshRuleDispatcher.getListenerMap().get("Type2").contains(listener3)); meshRuleDispatcher.unregister(listener3); Assertions.assertNull(meshRuleDispatcher.getListenerMap().get("Type1")); Assertions.assertNull(meshRuleDispatcher.getListenerMap().get("Type2")); }
@Override public ValidationResult validate(Object value) { ValidationResult result = super.validate(value); if (result instanceof ValidationResult.ValidationPassed) { final String sValue = (String)value; if (sValue.length() < minLength || sValue.length() > maxLength) { result = new ValidationResult.ValidationFailed("Value is not between " + minLength + " and " + maxLength + " in length!"); } } return result; }
@Test public void testValidateLongString() { assertThat(new LimitedStringValidator(1, 1).validate("12")) .isInstanceOf(ValidationResult.ValidationFailed.class); }
@SuppressWarnings({"BooleanExpressionComplexity", "CyclomaticComplexity"}) public static boolean isScalablePushQuery( final Statement statement, final KsqlExecutionContext ksqlEngine, final KsqlConfig ksqlConfig, final Map<String, Object> overrides ) { if (!isPushV2Enabled(ksqlConfig, overrides)) { return false; } if (! (statement instanceof Query)) { return false; } final Query query = (Query) statement; final SourceFinder sourceFinder = new SourceFinder(); sourceFinder.process(query.getFrom(), null); // It will be present if it's not a join, which we don't handle if (!sourceFinder.getSourceName().isPresent()) { return false; } // Find all of the writers to this particular source. final SourceName sourceName = sourceFinder.getSourceName().get(); final Set<QueryId> upstreamQueries = ksqlEngine.getQueriesWithSink(sourceName); // See if the config or override have set the stream to be "latest" final boolean isLatest = isLatest(ksqlConfig, overrides); // Cannot be a pull query, i.e. must be a push return !query.isPullQuery() // Group by is not supported && !query.getGroupBy().isPresent() // Windowing is not supported && !query.getWindow().isPresent() // Having clause is not supported && !query.getHaving().isPresent() // Partition by is not supported && !query.getPartitionBy().isPresent() // There must be an EMIT CHANGES clause && (query.getRefinement().isPresent() && query.getRefinement().get().getOutputRefinement() == OutputRefinement.CHANGES) // Must be reading from "latest" && isLatest // We only handle a single sink source at the moment from a CTAS/CSAS && upstreamQueries.size() == 1 // ROWPARTITION and ROWOFFSET are not currently supported in SPQs && !containsDisallowedColumns(query); }
@Test public void isScalablePushQuery_false_wrongUpstreamQueries_None() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { // When: expectIsSPQ(ColumnName.of("foo"), columnExtractor); when(ksqlEngine.getQueriesWithSink(SourceName.of("Foo"))).thenReturn( ImmutableSet.of()); // Then: assertThat(ScalablePushUtil.isScalablePushQuery(query, ksqlEngine, ksqlConfig, ImmutableMap.of(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest")), equalTo(false)); } }
public JsonGetterContext getContext(String queryPath) { JsonGetterContext context = internalCache.get(queryPath); if (context != null) { return context; } context = new JsonGetterContext(queryPath); JsonGetterContext previousContextValue = internalCache.putIfAbsent(queryPath, context); if (previousContextValue == null) { cleanupIfNecessary(context); return context; } else { return previousContextValue; } }
@Test public void testMostRecentlyAddedElementIsNotImmediatelyEvicted() { JsonGetterContextCache cache = new JsonGetterContextCache(3, 2); cache.getContext("a"); cache.getContext("b"); cache.getContext("c"); JsonGetterContext contextD = cache.getContext("d"); assertSame(contextD, cache.getContext("d")); }
public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext) throws SQLException { Optional<MergedResult> mergedResult = executeMerge(queryResults, sqlStatementContext); Optional<MergedResult> result = mergedResult.isPresent() ? Optional.of(decorate(mergedResult.get(), sqlStatementContext)) : decorate(queryResults.get(0), sqlStatementContext); return result.orElseGet(() -> new TransparentMergedResult(queryResults.get(0))); }
@Test void assertMergeWithMergerRuleOnly() throws SQLException { when(database.getRuleMetaData().getRules()).thenReturn(Collections.singleton(new MergerRuleFixture())); MergedResult actual = new MergeEngine(mock(RuleMetaData.class), database, new ConfigurationProperties(new Properties()), mock(ConnectionContext.class)).merge(Collections.singletonList(queryResult), mock(SQLStatementContext.class)); assertThat(actual.getValue(1, String.class), is("merged_value")); }
AwsCredentials credentials() { if (!StringUtil.isNullOrEmptyAfterTrim(awsConfig.getAccessKey())) { return AwsCredentials.builder() .setAccessKey(awsConfig.getAccessKey()) .setSecretKey(awsConfig.getSecretKey()) .build(); } if (!StringUtil.isNullOrEmptyAfterTrim(ec2IamRole)) { return fetchCredentialsFromEc2(); } if (environment.isRunningOnEcs()) { return fetchCredentialsFromEcs(); } throw new NoCredentialsException(); }
@Test public void credentialsEc2IamRole() { // given String iamRole = "sample-iam-role"; AwsConfig awsConfig = AwsConfig.builder() .setIamRole(iamRole) .build(); given(awsMetadataApi.credentialsEc2(iamRole)).willReturn(CREDENTIALS); given(environment.isRunningOnEcs()).willReturn(false); AwsCredentialsProvider credentialsProvider = new AwsCredentialsProvider(awsConfig, awsMetadataApi, environment); // when AwsCredentials credentials = credentialsProvider.credentials(); // then assertEquals(CREDENTIALS, credentials); }
private AccessLog(String logFormat, Object... args) { Objects.requireNonNull(logFormat, "logFormat"); this.logFormat = logFormat; this.args = args; }
@Test void accessLogFilteringAndFormatting() { disposableServer = createServer() .handle((req, resp) -> { resp.withConnection(conn -> { ChannelHandler handler = conn.channel().pipeline().get(NettyPipeline.AccessLogHandler); resp.header(ACCESS_LOG_HANDLER, handler != null ? FOUND : NOT_FOUND); }); return resp.send(); }) .accessLog(true, AccessLogFactory.createFilter(p -> !String.valueOf(p.uri()).startsWith("/filtered/"), CUSTOM_ACCESS_LOG)) .bindNow(); Tuple2<String, String> response = getHttpClientResponse(URI_1); getHttpClientResponse(URI_2); assertAccessLogging(response, true, true, CUSTOM_FORMAT); }
public List<R> scanForResourcesPath(Path resourcePath) { requireNonNull(resourcePath, "resourcePath must not be null"); List<R> resources = new ArrayList<>(); pathScanner.findResourcesForPath( resourcePath, canLoad, processResource(DEFAULT_PACKAGE_NAME, NULL_FILTER, createUriResource(), resources::add)); return resources; }
@Test void scanForResourcesPath() { File file = new File("src/test/resources/io/cucumber/core/resource/test/resource.txt"); List<URI> resources = resourceScanner.scanForResourcesPath(file.toPath()); assertThat(resources, contains(file.toURI())); }
public TaskRun onRunningResend() { TaskRunBuilder taskRunBuilder = this.toBuilder(); if (taskRunBuilder.attempts == null || taskRunBuilder.attempts.isEmpty()) { taskRunBuilder.attempts = new ArrayList<>(); taskRunBuilder.attempts.add(TaskRunAttempt.builder() .state(new State(this.state, State.Type.KILLED)) .build() ); } else { ArrayList<TaskRunAttempt> taskRunAttempts = new ArrayList<>(taskRunBuilder.attempts); TaskRunAttempt lastAttempt = taskRunAttempts.get(taskRunBuilder.attempts.size() - 1); if (!lastAttempt.getState().isTerminated()) { taskRunAttempts.set(taskRunBuilder.attempts.size() - 1, lastAttempt.withState(State.Type.KILLED)); } else { taskRunAttempts.add(TaskRunAttempt.builder() .state(new State().withState(State.Type.KILLED)) .build() ); } taskRunBuilder.attempts(taskRunAttempts); } return taskRunBuilder.build(); }
@Test void onRunningResendNoAttempts() { TaskRun taskRun = TaskRun.builder() .state(new State()) .build() .onRunningResend(); assertThat(taskRun.getAttempts().size(), is(1)); assertThat(taskRun.getAttempts().getFirst().getState().getHistories().getFirst(), is(taskRun.getState().getHistories().getFirst())); assertThat(taskRun.getAttempts().getFirst().getState().getCurrent(), is(State.Type.KILLED)); }