focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public boolean appliesTo(Component project, @Nullable MetricEvaluationResult metricEvaluationResult) { return metricEvaluationResult != null && metricEvaluationResult.evaluationResult.level() != Measure.Level.OK && METRICS_TO_IGNORE_ON_SMALL_CHANGESETS.contains(metricEvaluationResult.condition.getMetric().getKey()) && config.getConfiguration().getBoolean(CoreProperties.QUALITY_GATE_IGNORE_SMALL_CHANGES).orElse(true) && isSmallChangeset(project); }
@Test public void should_not_change_quality_gate_if_new_lines_is_not_defined() { QualityGateMeasuresStep.MetricEvaluationResult metricEvaluationResult = generateEvaluationResult(NEW_COVERAGE_KEY, ERROR); Component project = generateNewRootProject(); boolean result = underTest.appliesTo(project, metricEvaluationResult); assertThat(result).isFalse(); }
@Override public int hashCode() { int result = type.hashCode(); result = 31 * result + newState.hashCode(); return result; }
@Test public void testHashCode() { assertEquals(clusterStateChange.hashCode(), clusterStateChange.hashCode()); assertEquals(clusterStateChange.hashCode(), clusterStateChangeSameAttributes.hashCode()); assumeDifferentHashCodes(); assertNotEquals(clusterStateChange.hashCode(), clusterStateChangeOtherType.hashCode()); assertNotEquals(clusterStateChange.hashCode(), clusterStateChangeOtherNewState.hashCode()); }
public TrueTypeFont parse(RandomAccessRead randomAccessRead) throws IOException { RandomAccessReadDataStream dataStream = new RandomAccessReadDataStream(randomAccessRead); try (randomAccessRead) { return parse(dataStream); } catch (IOException ex) { // close only on error (source is still being accessed later) dataStream.close(); throw ex; } }
@Test void testPostTable() throws IOException { InputStream input = TestTTFParser.class.getResourceAsStream( "/ttf/LiberationSans-Regular.ttf"); assertNotNull(input); TTFParser parser = new TTFParser(); TrueTypeFont font = parser.parse(new RandomAccessReadBuffer(input)); CmapTable cmapTable = font.getCmap(); assertNotNull(cmapTable); CmapSubtable[] cmaps = cmapTable.getCmaps(); assertNotNull(cmaps); CmapSubtable cmap = null; for (CmapSubtable e : cmaps) { if (e.getPlatformId() == NameRecord.PLATFORM_WINDOWS && e.getPlatformEncodingId() == NameRecord.ENCODING_WINDOWS_UNICODE_BMP) { cmap = e; break; } } assertNotNull(cmap); PostScriptTable post = font.getPostScript(); assertNotNull(post); String[] glyphNames = font.getPostScript().getGlyphNames(); assertNotNull(glyphNames); // test a WGL4 (Macintosh standard) name int gid = cmap.getGlyphId(0x2122); // TRADE MARK SIGN assertEquals("trademark", glyphNames[gid]); // test an additional name gid = cmap.getGlyphId(0x20AC); // EURO SIGN assertEquals("Euro", glyphNames[gid]); }
@Restricted(NoExternalUse.class) public boolean hasSymlink(OpenOption... openOptions) throws IOException { return false; }
@Test public void hasSymlink_AbstractBase() throws IOException { // This test checks the method's behavior in the abstract base class, // which generally does nothing. VirtualFile virtualRoot = new VirtualFileMinimalImplementation(tmp.getRoot()); assertFalse(virtualRoot.hasSymlink(LinkOption.NOFOLLOW_LINKS)); }
public long higherFrameTs(long timestamp) { long tsPlusFrame = timestamp + frameSize; return sumHadOverflow(timestamp, frameSize, tsPlusFrame) ? addClamped(floorFrameTs(timestamp), frameSize) : floorFrameTs(tsPlusFrame); }
@Test public void when_higherOutOfRange_then_maxValue() { definition = new SlidingWindowPolicy(4, 2, 10); assertEquals(Long.MAX_VALUE, definition.higherFrameTs(Long.MAX_VALUE - 1)); assertEquals(Long.MIN_VALUE + 2, definition.higherFrameTs(Long.MIN_VALUE)); }
@Override public Graph toGraph(ConfigVariableExpander cve) throws InvalidIRException { Graph trueGraph = getTrueStatement().toGraph(cve); Graph falseGraph = getFalseStatement().toGraph(cve); // If there is nothing in the true or false sections of this if statement, // we can omit the if statement altogether! if (trueGraph.isEmpty() && falseGraph.isEmpty()) { return new Graph(); } Graph.GraphCombinationResult combination = Graph.combine(trueGraph, falseGraph); Graph newGraph = combination.graph; Collection<Vertex> trueRoots = trueGraph.roots().map(combination.oldToNewVertices::get).collect(Collectors.toList()); Collection<Vertex> falseRoots = falseGraph.roots().map(combination.oldToNewVertices::get).collect(Collectors.toList()); IfVertex ifVertex = new IfVertex(this.getSourceWithMetadata(), (BooleanExpression) ExpressionSubstitution.substituteBoolExpression(cve, this.booleanExpression)); newGraph.addVertex(ifVertex); for (Vertex v : trueRoots) { newGraph.chainVerticesUnsafe(BooleanEdge.trueFactory, ifVertex, v); } for (Vertex v : falseRoots) { newGraph.chainVerticesUnsafe(BooleanEdge.falseFactory, ifVertex, v); } return newGraph; }
@Test public void testIfWithOneTrueOneFalseStatement() throws InvalidIRException { ConfigVariableExpander cve = ConfigVariableExpander.withoutSecret(EnvironmentVariableProvider.defaultProvider()); PluginDefinition pluginDef = testPluginDefinition(); Statement trueStatement = new PluginStatement(randMeta(), pluginDef); Statement falseStatement = new PluginStatement(randMeta(), pluginDef); BooleanExpression ifExpression = createTestExpression(); IfStatement ifStatement = new IfStatement( randMeta(), createTestExpression(), trueStatement, falseStatement ); Graph ifStatementGraph = ifStatement.toGraph(cve); assertFalse(ifStatementGraph.isEmpty()); Graph expected = new Graph(); IfVertex expectedIf = DSL.gIf(randMeta(), ifExpression); expected.addVertex(expectedIf); PluginVertex expectedT = DSL.gPlugin(randMeta(), pluginDef); expected.chainVertices(true, expectedIf, expectedT); PluginVertex expectedF = DSL.gPlugin(randMeta(), pluginDef); expected.chainVertices(false, expectedIf, expectedF); assertSyntaxEquals(expected, ifStatementGraph); }
@Override public void lock() { try { lockInterruptibly(-1, null); } catch (InterruptedException e) { throw new IllegalStateException(); } }
@Test public void testIsLockedOtherThread() throws InterruptedException { RLock lock = redisson.getSpinLock("lock"); lock.lock(); Thread t = new Thread() { public void run() { RLock lock = redisson.getSpinLock("lock"); Assertions.assertTrue(lock.isLocked()); } ; }; t.start(); t.join(); lock.unlock(); Thread t2 = new Thread() { public void run() { RLock lock = redisson.getSpinLock("lock"); Assertions.assertFalse(lock.isLocked()); } ; }; t2.start(); t2.join(); }
protected double convertDuration(double duration) { return duration / durationFactor; }
@Test public void shouldConvertDurationToMillisecondsPrecisely() { assertEquals(2.0E-5, reporter.convertDuration(20), 0.0); }
@Bean("GlobalTempFolder") public TempFolder provide(ScannerProperties scannerProps, SonarUserHome userHome) { var workingPathName = StringUtils.defaultIfBlank(scannerProps.property(CoreProperties.GLOBAL_WORKING_DIRECTORY), CoreProperties.GLOBAL_WORKING_DIRECTORY_DEFAULT_VALUE); var workingPath = Paths.get(workingPathName); if (!workingPath.isAbsolute()) { var home = userHome.getPath(); workingPath = home.resolve(workingPath).normalize(); } try { cleanTempFolders(workingPath); } catch (IOException e) { LOG.error(String.format("failed to clean global working directory: %s", workingPath), e); } var tempDir = createTempFolder(workingPath); return new DefaultTempFolder(tempDir.toFile(), true); }
@Test void createTempFolderFromSonarHome(@TempDir Path sonarUserHomePath) { // with sonar home, it will be in {sonar.home}/.sonartmp when(sonarUserHome.getPath()).thenReturn(sonarUserHomePath); var expectedWorkingDir = sonarUserHomePath.resolve(CoreProperties.GLOBAL_WORKING_DIRECTORY_DEFAULT_VALUE); TempFolder tempFolder = underTest.provide(new ScannerProperties(Map.of()), sonarUserHome); tempFolder.newDir(); tempFolder.newFile(); assertThat(expectedWorkingDir).isDirectory(); assertThat(expectedWorkingDir.toFile().list()).hasSize(1); var rootTmpDir = expectedWorkingDir.toFile().listFiles()[0]; assertThat(rootTmpDir.list()).hasSize(2); }
public void sendRequests(Callback<None> callback) { LOG.info("Event Bus Requests throttler started for {} keys at a {} load rate", _keysToFetch.size(), _maxConcurrentRequests); if (_keysToFetch.size() == 0) { callback.onSuccess(None.none()); return; } _callback = callback; makeRequests(_maxConcurrentRequests); }
@Test(timeOut = 10000) public void testAllowZeroRequests() throws InterruptedException, ExecutionException, TimeoutException { TestSubscriber testSubscriber = new TestSubscriber(); TestEventBus testEventBus = new TestEventBus(testSubscriber); PropertyEventBusRequestsThrottler<String> propertyEventBusRequestsThrottler = new PropertyEventBusRequestsThrottler<>(testEventBus, testSubscriber, new ArrayList<>(), 5, false); FutureCallback<None> callback = new FutureCallback<>(); propertyEventBusRequestsThrottler.sendRequests(callback); callback.get(1000, TimeUnit.MILLISECONDS); }
public FloatArrayAsIterable usingExactEquality() { return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_contains_nullExpected() { float[] actual = array(1.0f, 2.0f, 3.0f); expectFailureWhenTestingThat(actual).usingExactEquality().contains(null); assertFailureKeys( "value of", "expected to contain", "testing whether", "but was", "additionally, one or more exceptions were thrown while comparing elements", "first exception"); assertFailureValue("expected to contain", "null"); assertThatFailure() .factValue("first exception") .startsWith("compare(" + actual[0] + ", null) threw java.lang.NullPointerException"); }
public Optional<UserDto> authenticate(Credentials credentials, HttpRequest request, AuthenticationEvent.Method method) { if (isLdapAuthActivated) { return Optional.of(doAuthenticate(fixCase(credentials), request, method)); } return Optional.empty(); }
@Test public void return_empty_user_when_ldap_not_activated() { reset(ldapRealm); when(ldapRealm.isLdapAuthActivated()).thenReturn(false); underTest = new LdapCredentialsAuthentication(settings.asConfig(), userRegistrar, authenticationEvent, ldapRealm); assertThat(underTest.authenticate(new Credentials(LOGIN, PASSWORD), request, BASIC)).isEmpty(); verifyNoInteractions(authenticationEvent); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldFindOneArgWithCast() { // Given: final KsqlScalarFunction[] functions = new KsqlScalarFunction[]{ function(EXPECTED, -1, LONG)}; Arrays.stream(functions).forEach(udfIndex::addFunction); // When: final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.INTEGER))); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
public boolean isValid(String value) { if (value == null) { return false; } URI uri; // ensure value is a valid URI try { uri = new URI(value); } catch (URISyntaxException e) { return false; } // OK, perfom additional validation String scheme = uri.getScheme(); if (!isValidScheme(scheme)) { return false; } String authority = uri.getRawAuthority(); if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority return true; // this is a local file - nothing more to do here } else if ("file".equals(scheme) && authority != null && authority.contains(":")) { return false; } else { // Validate the authority if (!isValidAuthority(authority)) { return false; } } if (!isValidPath(uri.getRawPath())) { return false; } if (!isValidQuery(uri.getRawQuery())) { return false; } if (!isValidFragment(uri.getRawFragment())) { return false; } return true; }
@Test public void testValidator382() { UrlValidator validator = new UrlValidator(); assertTrue(validator.isValid("ftp://username:password@example.com:8042/over/there/index.dtb?type=animal&name=narwhal#nose")); }
@Override public Message postProcessMessage(Message message) { MessageProducerRequest request = new MessageProducerRequest(message); TraceContext maybeParent = currentTraceContext.get(); // Unlike message consumers, we try current span before trying extraction. This is the proper // order because the span in scope should take precedence over a potentially stale header entry. // // NOTE: Brave instrumentation used properly does not result in stale header entries, as we // always clear message headers after reading. Span span; if (maybeParent == null) { TraceContextOrSamplingFlags extracted = springRabbitTracing.extractAndClearTraceIdHeaders(extractor, request, message); span = springRabbitTracing.nextMessagingSpan(sampler, request, extracted); } else { // If we have a span in scope assume headers were cleared before span = tracer.newChild(maybeParent); } if (!span.isNoop()) { span.kind(PRODUCER).name("publish"); if (remoteServiceName != null) span.remoteServiceName(remoteServiceName); // incur timestamp overhead only once long timestamp = tracing.clock(span.context()).currentTimeMicroseconds(); span.start(timestamp).finish(timestamp); } injector.inject(span.context(), request); return message; }
@Test void should_set_remote_service() { Message message = MessageBuilder.withBody(new byte[0]).build(); tracingMessagePostProcessor.postProcessMessage(message); assertThat(spans.get(0).remoteServiceName()) .isEqualTo("my-exchange"); }
public List<JobMessage> getJobMessages(String jobId, long startTimestampMs) throws IOException { // TODO: Allow filtering messages by importance Instant startTimestamp = new Instant(startTimestampMs); ArrayList<JobMessage> allMessages = new ArrayList<>(); String pageToken = null; while (true) { ListJobMessagesResponse response = dataflowClient.listJobMessages(jobId, pageToken); if (response == null || response.getJobMessages() == null) { return allMessages; } for (JobMessage m : response.getJobMessages()) { @Nullable Instant timestamp = fromCloudTime(m.getTime()); if (timestamp == null) { continue; } if (timestamp.isAfter(startTimestamp)) { allMessages.add(m); } } if (response.getNextPageToken() == null) { break; } else { pageToken = response.getNextPageToken(); } } allMessages.sort(new TimeStampComparator()); return allMessages; }
@Test public void testGetJobMessages() throws IOException { DataflowClient dataflowClient = mock(DataflowClient.class); ListJobMessagesResponse firstResponse = new ListJobMessagesResponse(); firstResponse.setJobMessages(new ArrayList<>()); for (long i = 0; i < 100; ++i) { JobMessage message = new JobMessage(); message.setId("message_" + i); message.setTime(TimeUtil.toCloudTime(new Instant(i))); firstResponse.getJobMessages().add(message); } String pageToken = "page_token"; firstResponse.setNextPageToken(pageToken); ListJobMessagesResponse secondResponse = new ListJobMessagesResponse(); secondResponse.setJobMessages(new ArrayList<>()); for (long i = 100; i < 150; ++i) { JobMessage message = new JobMessage(); message.setId("message_" + i); message.setTime(TimeUtil.toCloudTime(new Instant(i))); secondResponse.getJobMessages().add(message); } when(dataflowClient.listJobMessages(JOB_ID, null)).thenReturn(firstResponse); when(dataflowClient.listJobMessages(JOB_ID, pageToken)).thenReturn(secondResponse); MonitoringUtil util = new MonitoringUtil(dataflowClient); List<JobMessage> messages = util.getJobMessages(JOB_ID, -1); assertEquals(150, messages.size()); }
public Searcher searcher() { return new Searcher(); }
@Test void requireThatPredicateIndexCanSearchWithEmptyDocuments() { PredicateIndexBuilder builder = new PredicateIndexBuilder(10); builder.indexDocument(1, Predicate.fromString("true")); builder.indexDocument(2, Predicate.fromString("false")); PredicateIndex index = builder.build(); PredicateIndex.Searcher searcher = index.searcher(); PredicateQuery query = new PredicateQuery(); assertEquals("[1]", searcher.search(query).toList().toString()); }
public static <T> PaginatedResponse<T> create(String listKey, PaginatedList<T> paginatedList) { return new PaginatedResponse<>(listKey, paginatedList, null, null); }
@Test public void serializeWithQuery() throws Exception { final ImmutableList<String> values = ImmutableList.of("hello", "world"); final PaginatedList<String> paginatedList = new PaginatedList<>(values, values.size(), 1, 10); final PaginatedResponse<String> response = PaginatedResponse.create("foo", paginatedList, "query1"); final DocumentContext ctx = JsonPath.parse(objectMapper.writeValueAsString(response)); final JsonPathAssert jsonPathAssert = JsonPathAssert.assertThat(ctx); jsonPathAssert.jsonPathAsString("$.query").isEqualTo("query1"); jsonPathAssert.jsonPathAsInteger("$.total").isEqualTo(2); jsonPathAssert.jsonPathAsInteger("$.count").isEqualTo(2); jsonPathAssert.jsonPathAsInteger("$.page").isEqualTo(1); jsonPathAssert.jsonPathAsInteger("$.per_page").isEqualTo(10); jsonPathAssert.jsonPathAsString("$.foo[0]").isEqualTo("hello"); jsonPathAssert.jsonPathAsString("$.foo[1]").isEqualTo("world"); assertThatThrownBy(() -> jsonPathAssert.jsonPathAsString("$.context")).isInstanceOf(PathNotFoundException.class); }
@Override public CompletableFuture<Acknowledge> submitJob(JobGraph jobGraph, Time timeout) { final JobID jobID = jobGraph.getJobID(); try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobID))) { log.info("Received JobGraph submission '{}' ({}).", jobGraph.getName(), jobID); } return isInGloballyTerminalState(jobID) .thenComposeAsync( isTerminated -> { if (isTerminated) { log.warn( "Ignoring JobGraph submission '{}' ({}) because the job already " + "reached a globally-terminal state (i.e. {}) in a " + "previous execution.", jobGraph.getName(), jobID, Arrays.stream(JobStatus.values()) .filter(JobStatus::isGloballyTerminalState) .map(JobStatus::name) .collect(Collectors.joining(", "))); return FutureUtils.completedExceptionally( DuplicateJobSubmissionException.ofGloballyTerminated( jobID)); } else if (jobManagerRunnerRegistry.isRegistered(jobID) || submittedAndWaitingTerminationJobIDs.contains(jobID)) { // job with the given jobID is not terminated, yet return FutureUtils.completedExceptionally( DuplicateJobSubmissionException.of(jobID)); } else if (isPartialResourceConfigured(jobGraph)) { return FutureUtils.completedExceptionally( new JobSubmissionException( jobID, "Currently jobs is not supported if parts of the vertices " + "have resources configured. The limitation will be " + "removed in future versions.")); } else { return internalSubmitJob(jobGraph); } }, getMainThreadExecutor(jobID)); }
@Test public void testJobSubmissionWithPartialResourceConfigured() throws Exception { ResourceSpec resourceSpec = ResourceSpec.newBuilder(2.0, 10).build(); final JobVertex firstVertex = new JobVertex("firstVertex"); firstVertex.setInvokableClass(NoOpInvokable.class); firstVertex.setResources(resourceSpec, resourceSpec); final JobVertex secondVertex = new JobVertex("secondVertex"); secondVertex.setInvokableClass(NoOpInvokable.class); JobGraph jobGraphWithTwoVertices = JobGraphTestUtils.streamingJobGraph(firstVertex, secondVertex); dispatcher = createAndStartDispatcher( heartbeatServices, haServices, new ExpectedJobIdJobManagerRunnerFactory(jobId)); DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class); CompletableFuture<Acknowledge> acknowledgeFuture = dispatcherGateway.submitJob(jobGraphWithTwoVertices, TIMEOUT); try { acknowledgeFuture.get(); fail("job submission should have failed"); } catch (ExecutionException e) { assertTrue(ExceptionUtils.findThrowable(e, JobSubmissionException.class).isPresent()); } }
private Map<String, StorageUnit> getStorageUnits(final Map<String, StorageNode> storageUnitNodeMap, final Map<StorageNode, DataSource> storageNodeDataSources, final Map<String, DataSourcePoolProperties> dataSourcePoolPropsMap) { Map<String, StorageUnit> result = new LinkedHashMap<>(dataSourcePoolPropsMap.size(), 1F); for (Entry<String, DataSourcePoolProperties> entry : dataSourcePoolPropsMap.entrySet()) { String storageUnitName = entry.getKey(); StorageNode storageNode = storageUnitNodeMap.get(storageUnitName); DataSource dataSource = storageNodeDataSources.containsKey(storageNode) ? storageNodeDataSources.get(storageNode) : storageNodeDataSources.get(new StorageNode(storageUnitName)); StorageUnit storageUnit = new StorageUnit(storageNode, entry.getValue(), dataSource); result.put(storageUnitName, storageUnit); } return result; }
@Test void assertGetDataSourcePoolProperties() { DataSourceProvidedDatabaseConfiguration databaseConfig = createDataSourceProvidedDatabaseConfiguration(); DataSourcePoolProperties props = databaseConfig.getStorageUnits().get("foo_ds").getDataSourcePoolProperties(); Map<String, Object> poolStandardProps = props.getPoolPropertySynonyms().getStandardProperties(); assertTrue(poolStandardProps.isEmpty()); Map<String, Object> connStandardProps = props.getConnectionPropertySynonyms().getStandardProperties(); assertThat(connStandardProps.size(), is(3)); assertThat(connStandardProps.get("url"), is("jdbc:mock://127.0.0.1/foo_ds")); assertThat(connStandardProps.get("username"), is("root")); assertThat(connStandardProps.get("password"), is("root")); }
@Override public String generateSqlType(Dialect dialect) { switch (dialect.getId()) { case MsSql.ID: return "NVARCHAR (MAX)"; case Oracle.ID, H2.ID: return "CLOB"; case PostgreSql.ID: return "TEXT"; default: throw new IllegalArgumentException("Unsupported dialect id " + dialect.getId()); } }
@Test public void generate_sql_type_on_h2() { assertThat(underTest.generateSqlType(new H2())).isEqualTo("CLOB"); }
public static <T> T readStaticFieldOrNull(String className, String fieldName) { try { Class<?> clazz = Class.forName(className); return readStaticField(clazz, fieldName); } catch (ClassNotFoundException | NoSuchFieldException | IllegalAccessException | SecurityException e) { return null; } }
@Test public void readStaticFieldOrNull_whenClassDoesNotExist_thenReturnNull() { Object field = ReflectionUtils.readStaticFieldOrNull("foo.bar.nonExistingClass", "field"); assertNull(field); }
@Override public DescribeClusterResult describeCluster(DescribeClusterOptions options) { final KafkaFutureImpl<Collection<Node>> describeClusterFuture = new KafkaFutureImpl<>(); final KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>(); final KafkaFutureImpl<String> clusterIdFuture = new KafkaFutureImpl<>(); final KafkaFutureImpl<Set<AclOperation>> authorizedOperationsFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("listNodes", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedBrokerOrActiveKController()) { private boolean useMetadataRequest = false; @Override AbstractRequest.Builder createRequest(int timeoutMs) { if (!useMetadataRequest) { return new DescribeClusterRequest.Builder(new DescribeClusterRequestData() .setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations()) .setEndpointType(metadataManager.usingBootstrapControllers() ? EndpointType.CONTROLLER.id() : EndpointType.BROKER.id())); } else { // Since this only requests node information, it's safe to pass true for allowAutoTopicCreation (and it // simplifies communication with older brokers) return new MetadataRequest.Builder(new MetadataRequestData() .setTopics(Collections.emptyList()) .setAllowAutoTopicCreation(true) .setIncludeClusterAuthorizedOperations( options.includeAuthorizedOperations())); } } @Override void handleResponse(AbstractResponse abstractResponse) { if (!useMetadataRequest) { DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); if (error != Errors.NONE) { ApiError apiError = new ApiError(error, response.data().errorMessage()); handleFailure(apiError.exception()); return; } Map<Integer, Node> nodes = response.nodes(); describeClusterFuture.complete(nodes.values()); // Controller is null if controller id is equal to NO_CONTROLLER_ID controllerFuture.complete(nodes.get(response.data().controllerId())); clusterIdFuture.complete(response.data().clusterId()); authorizedOperationsFuture.complete( validAclOperations(response.data().clusterAuthorizedOperations())); } else { MetadataResponse response = (MetadataResponse) abstractResponse; describeClusterFuture.complete(response.brokers()); controllerFuture.complete(controller(response)); clusterIdFuture.complete(response.clusterId()); authorizedOperationsFuture.complete( validAclOperations(response.clusterAuthorizedOperations())); } } private Node controller(MetadataResponse response) { if (response.controller() == null || response.controller().id() == MetadataResponse.NO_CONTROLLER_ID) return null; return response.controller(); } @Override void handleFailure(Throwable throwable) { describeClusterFuture.completeExceptionally(throwable); controllerFuture.completeExceptionally(throwable); clusterIdFuture.completeExceptionally(throwable); authorizedOperationsFuture.completeExceptionally(throwable); } @Override boolean handleUnsupportedVersionException(final UnsupportedVersionException exception) { if (metadataManager.usingBootstrapControllers()) { return false; } if (useMetadataRequest) { return false; } useMetadataRequest = true; return true; } }, now); return new DescribeClusterResult(describeClusterFuture, controllerFuture, clusterIdFuture, authorizedOperationsFuture); }
@Test public void testDescribeClusterHandleError() { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0), AdminClientConfig.RETRIES_CONFIG, "2")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); // Prepare the describe cluster response used for the first describe cluster String errorMessage = "my error"; env.kafkaClient().prepareResponse( new DescribeClusterResponse(new DescribeClusterResponseData() .setErrorCode(Errors.INVALID_REQUEST.code()) .setErrorMessage(errorMessage))); final DescribeClusterResult result = env.adminClient().describeCluster(); TestUtils.assertFutureThrows(result.clusterId(), InvalidRequestException.class, errorMessage); TestUtils.assertFutureThrows(result.controller(), InvalidRequestException.class, errorMessage); TestUtils.assertFutureThrows(result.nodes(), InvalidRequestException.class, errorMessage); TestUtils.assertFutureThrows(result.authorizedOperations(), InvalidRequestException.class, errorMessage); } }
@Bean public ShenyuPlugin springCloudPlugin(final ObjectProvider<ShenyuSpringCloudServiceChooser> serviceChooser) { return new SpringCloudPlugin(serviceChooser.getIfAvailable()); }
@Test public void testSpringCloudPlugin() { applicationContextRunner.run(context -> { ShenyuPlugin plugin = context.getBean("springCloudPlugin", ShenyuPlugin.class); assertNotNull(plugin); assertThat(plugin.named()).isEqualTo(PluginEnum.SPRING_CLOUD.getName()); } ); }
@Udf public Integer least(@UdfParameter final Integer val, @UdfParameter final Integer... vals) { return (vals == null) ? null : Stream.concat(Stream.of(val), Arrays.stream(vals)) .filter(Objects::nonNull) .min(Integer::compareTo) .orElse(null); }
@Test public void shouldHandleAllNullColumns() { assertThat(leastUDF.least((Integer) null, null, null), is(nullValue())); assertThat(leastUDF.least((Double) null, null, null), is(nullValue())); assertThat(leastUDF.least((Long) null, null, null), is(nullValue())); assertThat(leastUDF.least((BigDecimal) null, null, null), is(nullValue())); assertThat(leastUDF.least((String) null, null, null), is(nullValue())); assertThat(leastUDF.least((ByteBuffer) null, null, null), is(nullValue())); assertThat(leastUDF.least((Date) null, null, null), is(nullValue())); assertThat(leastUDF.least((Time) null, null, null), is(nullValue())); assertThat(leastUDF.least((Timestamp) null, null, null), is(nullValue())); }
public static HivePartitionStats merge(HivePartitionStats current, HivePartitionStats update) { if (current.getCommonStats().getRowNums() == -1 || update.getCommonStats().getRowNums() <= 0) { return current; } else if (current.getCommonStats().getRowNums() == 0 && update.getCommonStats().getRowNums() > 0) { return update; } return new HivePartitionStats( reduce(current.getCommonStats(), update.getCommonStats(), ReduceOperator.ADD), // TODO(stephen): collect and merge column statistics current.getColumnStats()); }
@Test public void testMerge() { HivePartitionStats current = HivePartitionStats.empty(); HivePartitionStats update = HivePartitionStats.empty(); Assert.assertEquals(current, HivePartitionStats.merge(current, update)); current = HivePartitionStats.fromCommonStats(5, 100); update = HivePartitionStats.empty(); Assert.assertEquals(current, HivePartitionStats.merge(current, update)); current = HivePartitionStats.fromCommonStats(0, 0); update = HivePartitionStats.fromCommonStats(5, 100); Assert.assertEquals(update, HivePartitionStats.merge(current, update)); current = HivePartitionStats.fromCommonStats(5, 100); Assert.assertEquals(10, HivePartitionStats.merge(current, update).getCommonStats().getRowNums()); Assert.assertEquals(200, HivePartitionStats.merge(current, update).getCommonStats().getTotalFileBytes()); }
@Override @SuppressWarnings("unchecked") public void processElement(Object untypedElem) throws Exception { if (fnRunner == null) { // If we need to run reallyStartBundle in here, we need to make sure to switch the state // sampler into the start state. try (Closeable start = operationContext.enterStart()) { reallyStartBundle(); } } WindowedValue<InputT> elem = (WindowedValue<InputT>) untypedElem; if (fnSignature != null && fnSignature.stateDeclarations().size() > 0) { registerStateCleanup( (WindowingStrategy<?, BoundedWindow>) getDoFnInfo().getWindowingStrategy(), (Collection<BoundedWindow>) elem.getWindows()); } outputsPerElementTracker.onProcessElement(); fnRunner.processElement(elem); outputsPerElementTracker.onProcessElementSuccess(); }
@Test public void testStateTracking() throws Exception { ExecutionStateTracker tracker = ExecutionStateTracker.newForTest(); TestOperationContext operationContext = TestOperationContext.create( new CounterSet(), NameContextsForTests.nameContextForTest(), new MetricsContainerImpl(NameContextsForTests.ORIGINAL_NAME), tracker); class StateTestingDoFn extends DoFn<Integer, String> { private boolean startCalled = false; @StartBundle public void startBundle() throws Exception { startCalled = true; assertThat(tracker.getCurrentState(), equalTo(operationContext.getStartState())); } @ProcessElement public void processElement(ProcessContext c) throws Exception { assertThat(startCalled, equalTo(true)); assertThat(tracker.getCurrentState(), equalTo(operationContext.getProcessState())); } } StateTestingDoFn fn = new StateTestingDoFn(); DoFnInfo<?, ?> fnInfo = DoFnInfo.forFn( fn, WindowingStrategy.globalDefault(), null /* side input views */, null /* input coder */, MAIN_OUTPUT, DoFnSchemaInformation.create(), Collections.emptyMap()); ParDoFn userParDoFn = new SimpleParDoFn<>( options, DoFnInstanceManagers.singleInstance(fnInfo), NullSideInputReader.empty(), MAIN_OUTPUT, ImmutableMap.of(MAIN_OUTPUT, 0, new TupleTag<>("declared"), 1), BatchModeExecutionContext.forTesting( options, operationContext.counterFactory(), "testStage") .getStepContext(operationContext), operationContext, DoFnSchemaInformation.create(), Collections.emptyMap(), SimpleDoFnRunnerFactory.INSTANCE); // This test ensures proper behavior of the state sampling even with lazy initialization. try (Closeable trackerCloser = tracker.activate()) { try (Closeable processCloser = operationContext.enterProcess()) { userParDoFn.processElement(WindowedValue.valueInGlobalWindow(5)); } } }
@Override public ServerConfiguration getServerConfiguration(String issuer) { ServerConfiguration server = staticServerService.getServerConfiguration(issuer); if (server != null) { return server; } else { return dynamicServerService.getServerConfiguration(issuer); } }
@Test public void getServerConfiguration_noIssuer() { Mockito.when(mockStaticService.getServerConfiguration(issuer)).thenReturn(mockServerConfig); Mockito.when(mockDynamicService.getServerConfiguration(issuer)).thenReturn(mockServerConfig); String badIssuer = "www.badexample.com"; ServerConfiguration result = hybridService.getServerConfiguration(badIssuer); Mockito.verify(mockStaticService).getServerConfiguration(badIssuer); Mockito.verify(mockDynamicService).getServerConfiguration(badIssuer); assertThat(result, is(nullValue())); }
public static Optional<MaximumLagFilter> create( final Optional<LagReportingAgent> lagReportingAgent, final RoutingOptions routingOptions, final List<KsqlHostInfo> hosts, final String queryId, final String storeName, final int partition ) { if (!lagReportingAgent.isPresent()) { return Optional.empty(); } final QueryStateStoreId queryStateStoreId = QueryStateStoreId.of(queryId, storeName); final ImmutableMap<KsqlHostInfo, Optional<LagInfoEntity>> lagByHost = hosts.stream() .collect(ImmutableMap.toImmutableMap( Function.identity(), host -> lagReportingAgent.get().getLagInfoForHost( host, queryStateStoreId, partition))); final OptionalLong maxEndOffset = lagByHost.values().stream() .filter(Optional::isPresent) .map(Optional::get) .mapToLong(LagInfoEntity::getEndOffsetPosition) .max(); return Optional.of(new MaximumLagFilter(routingOptions, lagByHost, maxEndOffset)); }
@Test public void filter_lagReportingDisabled() { // When: Optional<MaximumLagFilter> filterOptional = MaximumLagFilter.create( Optional.empty(), routingOptions, HOSTS, APPLICATION_ID, STATE_STORE, PARTITION); // Then: assertFalse(filterOptional.isPresent()); }
public DescribeTopicPartitionsResponseData handleDescribeTopicPartitionsRequest(RequestChannel.Request abstractRequest) { DescribeTopicPartitionsRequestData request = ((DescribeTopicPartitionsRequest) abstractRequest.loggableRequest()).data(); Set<String> topics = new HashSet<>(); boolean fetchAllTopics = request.topics().isEmpty(); DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); String cursorTopicName = cursor != null ? cursor.topicName() : ""; if (fetchAllTopics) { JavaConverters.asJavaCollection(metadataCache.getAllTopics()).forEach(topicName -> { if (topicName.compareTo(cursorTopicName) >= 0) { topics.add(topicName); } }); } else { request.topics().forEach(topic -> { String topicName = topic.name(); if (topicName.compareTo(cursorTopicName) >= 0) { topics.add(topicName); } }); if (cursor != null && !topics.contains(cursor.topicName())) { // The topic in cursor must be included in the topic list if provided. throw new InvalidRequestException("DescribeTopicPartitionsRequest topic list should contain the cursor topic: " + cursor.topicName()); } } if (cursor != null && cursor.partitionIndex() < 0) { // The partition id in cursor must be valid. throw new InvalidRequestException("DescribeTopicPartitionsRequest cursor partition must be valid: " + cursor); } // Do not disclose the existence of topics unauthorized for Describe, so we've not even checked if they exist or not Set<DescribeTopicPartitionsResponseTopic> unauthorizedForDescribeTopicMetadata = new HashSet<>(); Stream<String> authorizedTopicsStream = topics.stream().sorted().filter(topicName -> { boolean isAuthorized = authHelper.authorize( abstractRequest.context(), DESCRIBE, TOPIC, topicName, true, true, 1); if (!fetchAllTopics && !isAuthorized) { // We should not return topicId when on unauthorized error, so we return zero uuid. unauthorizedForDescribeTopicMetadata.add(describeTopicPartitionsResponseTopic( Errors.TOPIC_AUTHORIZATION_FAILED, topicName, Uuid.ZERO_UUID, false, Collections.emptyList()) ); } return isAuthorized; }); DescribeTopicPartitionsResponseData response = metadataCache.getTopicMetadataForDescribeTopicResponse( JavaConverters.asScalaIterator(authorizedTopicsStream.iterator()), abstractRequest.context().listenerName, (String topicName) -> topicName.equals(cursorTopicName) ? cursor.partitionIndex() : 0, Math.max(Math.min(config.maxRequestPartitionSizeLimit(), request.responsePartitionLimit()), 1), fetchAllTopics ); // get topic authorized operations response.topics().forEach(topicData -> topicData.setTopicAuthorizedOperations(authHelper.authorizedOperations(abstractRequest, new Resource(TOPIC, topicData.name())))); response.topics().addAll(unauthorizedForDescribeTopicMetadata); return response; }
@Test void testDescribeTopicPartitionsRequest() { // 1. Set up authorizer Authorizer authorizer = mock(Authorizer.class); String unauthorizedTopic = "unauthorized-topic"; String authorizedTopic = "authorized-topic"; String authorizedNonExistTopic = "authorized-non-exist"; Action expectedActions1 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, unauthorizedTopic, PatternType.LITERAL), 1, true, true); Action expectedActions2 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true); Action expectedActions3 = new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedNonExistTopic, PatternType.LITERAL), 1, true, true); when(authorizer.authorize(any(RequestContext.class), argThat(t -> t.contains(expectedActions1) || t.contains(expectedActions2) || t.contains(expectedActions3)))) .thenAnswer(invocation -> { List<Action> actions = invocation.getArgument(1); return actions.stream().map(action -> { if (action.resourcePattern().name().startsWith("authorized")) return AuthorizationResult.ALLOWED; else return AuthorizationResult.DENIED; }).collect(Collectors.toList()); }); // 2. Set up MetadataCache Uuid authorizedTopicId = Uuid.randomUuid(); Uuid unauthorizedTopicId = Uuid.randomUuid(); Map<String, Uuid> topicIds = new HashMap<>(); topicIds.put(authorizedTopic, authorizedTopicId); topicIds.put(unauthorizedTopic, unauthorizedTopicId); BrokerEndpointCollection collection = new BrokerEndpointCollection(); collection.add(new BrokerEndpoint() .setName(broker.endpoints().get(0).listener()) .setHost(broker.endpoints().get(0).host()) .setPort(broker.endpoints().get(0).port()) .setSecurityProtocol(broker.endpoints().get(0).securityProtocol()) ); List<ApiMessage> records = Arrays.asList( new RegisterBrokerRecord() .setBrokerId(broker.id()) .setBrokerEpoch(0) .setIncarnationId(Uuid.randomUuid()) .setEndPoints(collection) .setRack(broker.rack()) .setFenced(false), new TopicRecord().setName(authorizedTopic).setTopicId(topicIds.get(authorizedTopic)), new TopicRecord().setName(unauthorizedTopic).setTopicId(topicIds.get(unauthorizedTopic)), new PartitionRecord() .setTopicId(authorizedTopicId) .setPartitionId(1) .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) .setIsr(Arrays.asList(0)) .setEligibleLeaderReplicas(Arrays.asList(1)) .setLastKnownElr(Arrays.asList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord() .setTopicId(authorizedTopicId) .setPartitionId(0) .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) .setIsr(Arrays.asList(0)) .setEligibleLeaderReplicas(Arrays.asList(1)) .setLastKnownElr(Arrays.asList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord() .setTopicId(unauthorizedTopicId) .setPartitionId(0) .setReplicas(Arrays.asList(0, 1, 3)) .setLeader(0) .setIsr(Arrays.asList(0)) .setEligibleLeaderReplicas(Arrays.asList(1)) .setLastKnownElr(Arrays.asList(3)) .setLeaderEpoch(0) .setPartitionEpoch(2) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()) ); KRaftMetadataCache metadataCache = new KRaftMetadataCache(0, () -> KRaftVersion.KRAFT_VERSION_1); updateKraftMetadataCache(metadataCache, records); DescribeTopicPartitionsRequestHandler handler = new DescribeTopicPartitionsRequestHandler(metadataCache, new AuthHelper(scala.Option.apply(authorizer)), createKafkaDefaultConfig()); // 3.1 Basic test DescribeTopicPartitionsRequest describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest( new DescribeTopicPartitionsRequestData() .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(unauthorizedTopic) )) ); RequestChannel.Request request; try { request = buildRequest(describeTopicPartitionsRequest, plaintextListener); } catch (Exception e) { fail(e.getMessage()); return; } DescribeTopicPartitionsResponseData response = handler.handleDescribeTopicPartitionsRequest(request); List<DescribeTopicPartitionsResponseTopic> topics = response.topics().valuesList(); assertEquals(2, topics.size()); DescribeTopicPartitionsResponseTopic topicToCheck = topics.get(0); assertEquals(authorizedTopicId, topicToCheck.topicId()); assertEquals(Errors.NONE.code(), topicToCheck.errorCode()); assertEquals(authorizedTopic, topicToCheck.name()); assertEquals(2, topicToCheck.partitions().size()); topicToCheck = topics.get(1); assertNotEquals(unauthorizedTopicId, topicToCheck.topicId()); assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), topicToCheck.errorCode()); assertEquals(unauthorizedTopic, topicToCheck.name()); // 3.2 With cursor describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData() .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(unauthorizedTopic) )) .setCursor(new DescribeTopicPartitionsRequestData.Cursor().setTopicName(authorizedTopic).setPartitionIndex(1)) ); try { request = buildRequest(describeTopicPartitionsRequest, plaintextListener); } catch (Exception e) { fail(e.getMessage()); return; } response = handler.handleDescribeTopicPartitionsRequest(request); topics = response.topics().valuesList(); assertEquals(2, topics.size()); topicToCheck = topics.get(0); assertEquals(authorizedTopicId, topicToCheck.topicId()); assertEquals(Errors.NONE.code(), topicToCheck.errorCode()); assertEquals(authorizedTopic, topicToCheck.name()); assertEquals(1, topicToCheck.partitions().size()); topicToCheck = topics.get(1); assertNotEquals(unauthorizedTopicId, topicToCheck.topicId()); assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), topicToCheck.errorCode()); assertEquals(unauthorizedTopic, topicToCheck.name()); // 3.3 Fetch all topics describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData()); try { request = buildRequest(describeTopicPartitionsRequest, plaintextListener); } catch (Exception e) { fail(e.getMessage()); return; } response = handler.handleDescribeTopicPartitionsRequest(request); topics = response.topics().valuesList(); assertEquals(1, topics.size()); topicToCheck = topics.get(0); assertEquals(authorizedTopicId, topicToCheck.topicId()); assertEquals(Errors.NONE.code(), topicToCheck.errorCode()); assertEquals(authorizedTopic, topicToCheck.name()); assertEquals(2, topicToCheck.partitions().size()); // 3.4 Fetch all topics with cursor describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest( new DescribeTopicPartitionsRequestData().setCursor( new DescribeTopicPartitionsRequestData.Cursor().setTopicName(authorizedTopic).setPartitionIndex(1))); try { request = buildRequest(describeTopicPartitionsRequest, plaintextListener); } catch (Exception e) { fail(e.getMessage()); return; } response = handler.handleDescribeTopicPartitionsRequest(request); topics = response.topics().valuesList(); assertEquals(1, topics.size()); topicToCheck = topics.get(0); assertEquals(authorizedTopicId, topicToCheck.topicId()); assertEquals(Errors.NONE.code(), topicToCheck.errorCode()); assertEquals(authorizedTopic, topicToCheck.name()); assertEquals(1, topicToCheck.partitions().size()); // 3.5 Fetch all topics with limit describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest( new DescribeTopicPartitionsRequestData().setResponsePartitionLimit(1) ); try { request = buildRequest(describeTopicPartitionsRequest, plaintextListener); } catch (Exception e) { fail(e.getMessage()); return; } response = handler.handleDescribeTopicPartitionsRequest(request); topics = response.topics().valuesList(); assertEquals(1, topics.size()); topicToCheck = topics.get(0); assertEquals(authorizedTopicId, topicToCheck.topicId()); assertEquals(Errors.NONE.code(), topicToCheck.errorCode()); assertEquals(authorizedTopic, topicToCheck.name()); assertEquals(1, topicToCheck.partitions().size()); assertEquals(authorizedTopic, response.nextCursor().topicName()); assertEquals(1, response.nextCursor().partitionIndex()); }
public void cleanControllerBrokerData(String controllerAddr, String clusterName, String brokerName, String brokerControllerIdsToClean, boolean isCleanLivingBroker) throws RemotingException, InterruptedException, MQBrokerException { //get controller leader address final GetMetaDataResponseHeader controllerMetaData = this.getControllerMetaData(controllerAddr); assert controllerMetaData != null; assert controllerMetaData.getControllerLeaderAddress() != null; final String leaderAddress = controllerMetaData.getControllerLeaderAddress(); CleanControllerBrokerDataRequestHeader cleanHeader = new CleanControllerBrokerDataRequestHeader(clusterName, brokerName, brokerControllerIdsToClean, isCleanLivingBroker); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CLEAN_BROKER_DATA, cleanHeader); final RemotingCommand response = this.remotingClient.invokeSync(leaderAddress, request, 3000); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return; } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void testCleanControllerBrokerData() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); GetMetaDataResponseHeader responseHeader = new GetMetaDataResponseHeader(); responseHeader.setControllerLeaderAddress(defaultBrokerAddr); setResponseHeader(responseHeader); mqClientAPI.cleanControllerBrokerData(defaultBrokerAddr, clusterName, brokerName, "", false); }
@Restricted(NoExternalUse.class) @VisibleForTesting public static Set<String> getAutoCompletionCandidates(List<String> loggerNamesList) { Set<String> loggerNames = new HashSet<>(loggerNamesList); // now look for package prefixes that make sense to offer for autocompletion: // Only prefixes that match multiple loggers will be shown. // Example: 'org' will show 'org', because there's org.apache, org.jenkinsci, etc. // 'io' might only show 'io.jenkins.plugins' rather than 'io' if all loggers starting with 'io' start with 'io.jenkins.plugins'. HashMap<String, Integer> seenPrefixes = new HashMap<>(); SortedSet<String> relevantPrefixes = new TreeSet<>(); for (String loggerName : loggerNames) { String[] loggerNameParts = loggerName.split("[.]"); String longerPrefix = null; for (int i = loggerNameParts.length; i > 0; i--) { String loggerNamePrefix = String.join(".", Arrays.copyOf(loggerNameParts, i)); seenPrefixes.put(loggerNamePrefix, seenPrefixes.getOrDefault(loggerNamePrefix, 0) + 1); if (longerPrefix == null) { relevantPrefixes.add(loggerNamePrefix); // actual logger name longerPrefix = loggerNamePrefix; continue; } if (seenPrefixes.get(loggerNamePrefix) > seenPrefixes.get(longerPrefix)) { relevantPrefixes.add(loggerNamePrefix); } longerPrefix = loggerNamePrefix; } } return relevantPrefixes; }
@Test public void autocompletionTest() { List<String> loggers = Arrays.asList( "com.company.whatever.Foo", "com.foo.Bar", "com.foo.Baz", "org.example.app.Main", "org.example.app.impl.xml.Parser", "org.example.app.impl.xml.Validator"); Set<String> candidates = LogRecorder.getAutoCompletionCandidates(loggers); isCandidate(candidates, "com"); isCandidate(candidates, "com.company.whatever.Foo"); isCandidate(candidates, "com.foo"); isCandidate(candidates, "com.foo.Bar"); isCandidate(candidates, "com.foo.Baz"); isCandidate(candidates, "org.example.app"); isCandidate(candidates, "org.example.app.Main"); isCandidate(candidates, "org.example.app.impl.xml"); isCandidate(candidates, "org.example.app.impl.xml.Parser"); isCandidate(candidates, "org.example.app.impl.xml.Validator"); isNotCandidate(candidates, "org"); isNotCandidate(candidates, "org.example"); assertEquals("expected number of items", 10, candidates.size()); }
@Override public void execute(Exchange exchange) throws SmppException { SubmitSm[] submitSms = createSubmitSm(exchange); List<String> messageIDs = new ArrayList<>(submitSms.length); String messageID = null; for (int i = 0; i < submitSms.length; i++) { SubmitSm submitSm = submitSms[i]; messageID = null; if (log.isDebugEnabled()) { log.debug("Sending short message {} for exchange id '{}'...", i, exchange.getExchangeId()); } try { SubmitSmResult result = session.submitShortMessage( submitSm.getServiceType(), TypeOfNumber.valueOf(submitSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(submitSm.getSourceAddrNpi()), submitSm.getSourceAddr(), TypeOfNumber.valueOf(submitSm.getDestAddrTon()), NumberingPlanIndicator.valueOf(submitSm.getDestAddrNpi()), submitSm.getDestAddress(), new ESMClass(submitSm.getEsmClass()), submitSm.getProtocolId(), submitSm.getPriorityFlag(), submitSm.getScheduleDeliveryTime(), submitSm.getValidityPeriod(), new RegisteredDelivery(submitSm.getRegisteredDelivery()), submitSm.getReplaceIfPresent(), DataCodings.newInstance(submitSm.getDataCoding()), (byte) 0, submitSm.getShortMessage(), submitSm.getOptionalParameters()); if (result != null) { messageID = result.getMessageId(); } } catch (Exception e) { throw new SmppException(e); } if (messageID != null) { messageIDs.add(messageID); } } if (log.isDebugEnabled()) { log.debug("Sent short message for exchange id '{}' and received message ids '{}'", exchange.getExchangeId(), messageIDs); } Message message = ExchangeHelper.getResultMessage(exchange); message.setHeader(SmppConstants.ID, messageIDs); message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size()); }
@Test public void executeWithOptionalParameter() throws Exception { Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitSm"); exchange.getIn().setHeader(SmppConstants.ID, "1"); exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value()); exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value()); exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818"); exchange.getIn().setHeader(SmppConstants.DEST_ADDR_TON, TypeOfNumber.INTERNATIONAL.value()); exchange.getIn().setHeader(SmppConstants.DEST_ADDR_NPI, NumberingPlanIndicator.INTERNET.value()); exchange.getIn().setHeader(SmppConstants.DEST_ADDR, "1919"); exchange.getIn().setHeader(SmppConstants.SCHEDULE_DELIVERY_TIME, new Date(1111111)); exchange.getIn().setHeader(SmppConstants.VALIDITY_PERIOD, new Date(2222222)); exchange.getIn().setHeader(SmppConstants.PROTOCOL_ID, (byte) 1); exchange.getIn().setHeader(SmppConstants.PRIORITY_FLAG, (byte) 2); exchange.getIn().setHeader(SmppConstants.REGISTERED_DELIVERY, new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE).value()); exchange.getIn().setHeader(SmppConstants.REPLACE_IF_PRESENT_FLAG, ReplaceIfPresentFlag.REPLACE.value()); exchange.getIn().setBody("short message body"); Map<String, String> optionalParameters = new LinkedHashMap<>(); optionalParameters.put("SOURCE_SUBADDRESS", "1292"); optionalParameters.put("ADDITIONAL_STATUS_INFO_TEXT", "urgent"); optionalParameters.put("DEST_ADDR_SUBUNIT", "4"); optionalParameters.put("DEST_TELEMATICS_ID", "2"); optionalParameters.put("QOS_TIME_TO_LIVE", "3600000"); optionalParameters.put("ALERT_ON_MESSAGE_DELIVERY", null); exchange.getIn().setHeader(SmppConstants.OPTIONAL_PARAMETERS, optionalParameters); when(session.submitShortMessage(eq("CMT"), eq(TypeOfNumber.NATIONAL), eq(NumberingPlanIndicator.NATIONAL), eq("1818"), eq(TypeOfNumber.INTERNATIONAL), eq(NumberingPlanIndicator.INTERNET), eq("1919"), eq(new ESMClass()), eq((byte) 1), eq((byte) 2), eq("-300101001831100+"), eq("-300101003702200+"), eq(new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE)), eq(ReplaceIfPresentFlag.REPLACE.value()), eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), eq("short message body".getBytes()), eq(new OptionalParameter.Source_subaddress("1292".getBytes())), eq(new OptionalParameter.Additional_status_info_text("urgent".getBytes())), eq(new OptionalParameter.Dest_addr_subunit((byte) 4)), eq(new OptionalParameter.Dest_telematics_id((short) 2)), eq(new OptionalParameter.Qos_time_to_live(3600000)), eq(new OptionalParameter.Alert_on_message_delivery((byte) 0)))) .thenReturn(new SubmitSmResult(new MessageId("1"), null)); command.execute(exchange); assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID)); assertEquals(1, exchange.getMessage().getHeader(SmppConstants.SENT_MESSAGE_COUNT)); }
@Override public ObjectNode encode(DelayMeasurementEntry dm, CodecContext context) { checkNotNull(dm, "DM cannot be null"); ObjectNode result = context.mapper().createObjectNode() .put(DM_ID, dm.dmId().toString()); if (dm.sessionStatus() != null) { result.put(SESSION_STATUS, dm.sessionStatus().name()); } if (dm.frameDelayTwoWay() != null) { result.put(FRAME_DELAY_TWO_WAY, dm.frameDelayTwoWay().toString()); } if (dm.frameDelayForward() != null) { result.put(FRAME_DELAY_FORWARD, dm.frameDelayForward().toString()); } if (dm.frameDelayBackward() != null) { result.put(FRAME_DELAY_BACKWARD, dm.frameDelayBackward().toString()); } if (dm.interFrameDelayVariationTwoWay() != null) { result.put(INTER_FRAME_DELAY_VARIATION_TWO_WAY, dm.interFrameDelayVariationTwoWay().toString()); } if (dm.interFrameDelayVariationForward() != null) { result.put(INTER_FRAME_DELAY_VARIATION_FORWARD, dm.interFrameDelayVariationForward().toString()); } if (dm.interFrameDelayVariationBackward() != null) { result.put(INTER_FRAME_DELAY_VARIATION_BACKWARD, dm.interFrameDelayVariationBackward().toString()); } ObjectNode dmAttrs = new DmCreateCodec().encode(dm, context); Iterator<Entry<String, JsonNode>> elements = dmAttrs.fields(); while (elements.hasNext()) { Entry<String, JsonNode> element = elements.next(); result.set(element.getKey(), element.getValue()); } if (dm.currentResult() != null) { result.set(CURRENT, new DelayMeasurementStatCurrentCodec() .encode(dm.currentResult(), context)); } if (dm.historicalResults() != null) { result.set(HISTORIC, new DelayMeasurementStatHistoryCodec() .encode(dm.historicalResults(), context)); } return result; }
@Test public void testEncodeDelayMeasurementEntryCodecContext() throws JsonProcessingException, IOException { ObjectNode node = mapper.createObjectNode(); node.set("dm", context.codec(DelayMeasurementEntry.class) .encode(dmEntry1, context)); assertEquals(12, node.get("dm").get("dmId").asInt()); assertEquals(DmType.DM1DMTX.name(), node.get("dm").get("dmCfgType").asText()); assertEquals(Version.Y17312008.name(), node.get("dm").get("version").asText()); assertEquals(10, node.get("dm").get("remoteMepId").asInt()); assertEquals(3, ((ArrayNode) node.get("dm").get("measurementsEnabled")).size()); assertEquals(SessionStatus.NOT_ACTIVE.name(), node.get("dm").get("sessionStatus").asText()); assertEquals("PT0.000101S", node.get("dm").get("frameDelayTwoWay").asText()); assertEquals("PT0.000102S", node.get("dm").get("frameDelayForward").asText()); assertEquals("PT0.000103S", node.get("dm").get("frameDelayBackward").asText()); assertEquals("PT0.000104S", node.get("dm").get("interFrameDelayVariationTwoWay").asText()); assertEquals("PT0.000105S", node.get("dm").get("interFrameDelayVariationForward").asText()); assertEquals("PT0.000106S", node.get("dm").get("interFrameDelayVariationBackward").asText()); }
@Override public Optional<DevOpsProjectCreator> getDevOpsProjectCreator(DbSession dbSession, Map<String, String> characteristics) { return delegates.stream() .flatMap(delegate -> delegate.getDevOpsProjectCreator(dbSession, characteristics).stream()) .findFirst(); }
@Test public void getDevOpsProjectDescriptor_whenOneDelegatesReturningACreator_shouldDelegate() { DevOpsProjectCreatorFactory successfulDelegate = mock(); DevOpsProjectCreator devOpsProjectCreator = mock(); when(successfulDelegate.getDevOpsProjectCreator(DB_SESSION, CHARACTERISTICS)).thenReturn(Optional.of(devOpsProjectCreator)); DelegatingDevOpsProjectCreatorFactory delegates = new DelegatingDevOpsProjectCreatorFactory(Set.of(mock(), successfulDelegate)); assertThat(delegates.getDevOpsProjectCreator(DB_SESSION, CHARACTERISTICS)).contains(devOpsProjectCreator); }
@Deprecated public static SegwitAddress fromBech32(@Nullable NetworkParameters params, String bech32) throws AddressFormatException { return (SegwitAddress) AddressParser.getLegacy(params).parseAddress(bech32); }
@Test(expected = AddressFormatException.InvalidDataLength.class) public void fromBech32m_taprootTooLong() { // Taproot, valid bech32m encoding, checksum ok, padding ok, but no valid Segwit v1 program // (this program is 40 bytes long, but only 32 bytes program length are valid for Segwit v1/Taproot) String taprootAddressWith40BytesWitnessProgram = "bc1p6t0pcqrq3mvedn884lgj9s2cm52xp9vtnlc89cv5x77f5l725rrdjhqrld6m6rza67j62a"; SegwitAddress.fromBech32(taprootAddressWith40BytesWitnessProgram, MAINNET); }
public static Duration minutes(long count) { return new Duration(count, TimeUnit.MINUTES); }
@Test void testSerialization() throws IOException, ClassNotFoundException { final Duration duration = Duration.minutes(42L); final byte[] bytes; try (final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); final ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream)) { objectOutputStream.writeObject(duration); bytes = outputStream.toByteArray(); } try (final ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes); final ObjectInputStream objectInputStream = new ObjectInputStream(inputStream)) { @SuppressWarnings("BanSerializableRead") final Object o = objectInputStream.readObject(); assertThat(o) .isInstanceOf(Duration.class) .isEqualTo(duration); } }
public CreateTableCommand createTableCommand( final KsqlStructuredDataOutputNode outputNode, final Optional<RefinementInfo> emitStrategy ) { Optional<WindowInfo> windowInfo = outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(); if (windowInfo.isPresent() && emitStrategy.isPresent()) { final WindowInfo info = windowInfo.get(); windowInfo = Optional.of(WindowInfo.of( info.getType(), info.getSize(), Optional.of(emitStrategy.get().getOutputRefinement()) )); } return new CreateTableCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), windowInfo, Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@Test public void shouldCreateTableCommandWithSingleValueWrappingFromPropertiesNotConfig() { // Given: ksqlConfig = new KsqlConfig(ImmutableMap.of( KsqlConfig.KSQL_WRAP_SINGLE_VALUES, true )); final ImmutableMap<String, Object> overrides = ImmutableMap.of( KsqlConfig.KSQL_WRAP_SINGLE_VALUES, true ); givenProperty(CommonCreateConfigs.WRAP_SINGLE_VALUE, new BooleanLiteral("false")); final CreateTable statement = new CreateTable(SOME_NAME, TABLE_ELEMENTS_1_VALUE, false, true, withProperties, false); // When: final CreateTableCommand cmd = createSourceFactory .createTableCommand( statement, ksqlConfig.cloneWithPropertyOverwrite(overrides)); // Then: assertThat(cmd.getFormats().getValueFeatures(), is(SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES))); }
@Override public Void execute(Context context) { KieSession ksession = ((RegistryContext) context).lookup(KieSession.class); Collection<?> objects = ksession.getObjects(new ConditionFilter(factToCheck)); if (!objects.isEmpty()) { factToCheck.forEach(fact -> fact.getScenarioResult().setResult(true)); } else { factToCheck.forEach(fact -> fact.getScenarioResult().getFactMappingValue().setExceptionMessage("There is no instance which satisfies the expected conditions")); } return null; }
@Test public void execute_setResultIsNotCalled() { when(kieSession.getObjects(any(ObjectFilter.class))).thenReturn(List.of()); when(scenarioResult.getFactMappingValue()).thenReturn(factMappingValue); validateFactCommand.execute(registryContext); verify(scenarioResult, times(0)).setResult(anyBoolean()); }
public void build(@Nullable SegmentVersion segmentVersion, ServerMetrics serverMetrics) throws Exception { SegmentGeneratorConfig genConfig = new SegmentGeneratorConfig(_tableConfig, _dataSchema); // The segment generation code in SegmentColumnarIndexCreator will throw // exception if start and end time in time column are not in acceptable // range. We don't want the realtime consumption to stop (if an exception // is thrown) and thus the time validity check is explicitly disabled for // realtime segment generation genConfig.setSegmentTimeValueCheck(false); if (_columnIndicesForRealtimeTable.getInvertedIndexColumns() != null) { genConfig.setIndexOn(StandardIndexes.inverted(), IndexConfig.ENABLED, _columnIndicesForRealtimeTable.getInvertedIndexColumns()); } if (_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns() != null) { genConfig.setVarLengthDictionaryColumns(_columnIndicesForRealtimeTable.getVarLengthDictionaryColumns()); } if (segmentVersion != null) { genConfig.setSegmentVersion(segmentVersion); } genConfig.setTableName(_tableName); genConfig.setOutDir(_outputPath); genConfig.setSegmentName(_segmentName); addIndexOrDefault(genConfig, StandardIndexes.text(), _columnIndicesForRealtimeTable.getTextIndexColumns(), new TextIndexConfigBuilder(genConfig.getFSTIndexType()).build()); addIndexOrDefault(genConfig, StandardIndexes.fst(), _columnIndicesForRealtimeTable.getFstIndexColumns(), new FstIndexConfig(genConfig.getFSTIndexType())); SegmentPartitionConfig segmentPartitionConfig = _realtimeSegmentImpl.getSegmentPartitionConfig(); genConfig.setSegmentPartitionConfig(segmentPartitionConfig); genConfig.setNullHandlingEnabled(_nullHandlingEnabled); genConfig.setSegmentZKPropsConfig(_segmentZKPropsConfig); // flush any artifacts to disk to improve mutable to immutable segment conversion _realtimeSegmentImpl.commit(); SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); try (PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader()) { int[] sortedDocIds = _columnIndicesForRealtimeTable.getSortedColumn() != null ? _realtimeSegmentImpl.getSortedDocIdIterationOrderWithSortedColumn( _columnIndicesForRealtimeTable.getSortedColumn()) : null; recordReader.init(_realtimeSegmentImpl, sortedDocIds); RealtimeSegmentSegmentCreationDataSource dataSource = new RealtimeSegmentSegmentCreationDataSource(_realtimeSegmentImpl, recordReader); driver.init(genConfig, dataSource, RecordEnricherPipeline.getPassThroughPipeline(), TransformPipeline.getPassThroughPipeline()); if (!_enableColumnMajor) { driver.build(); } else { driver.buildByColumn(_realtimeSegmentImpl); } } if (segmentPartitionConfig != null) { Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap(); for (String columnName : columnPartitionMap.keySet()) { int numPartitions = driver.getSegmentStats().getColumnProfileFor(columnName).getPartitions().size(); serverMetrics.addValueToTableGauge(_tableName, ServerGauge.REALTIME_SEGMENT_NUM_PARTITIONS, numPartitions); } } }
@Test public void testNoRecordsIndexedColumnMajorSegmentBuilder() throws Exception { File tmpDir = new File(TMP_DIR, "tmp_" + System.currentTimeMillis()); TableConfig tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName("testTable").setTimeColumnName(DATE_TIME_COLUMN) .setInvertedIndexColumns(Lists.newArrayList(STRING_COLUMN1)).setSortedColumn(LONG_COLUMN1) .setRangeIndexColumns(Lists.newArrayList(STRING_COLUMN2)) .setNoDictionaryColumns(Lists.newArrayList(LONG_COLUMN2)) .setVarLengthDictionaryColumns(Lists.newArrayList(STRING_COLUMN3)) .setOnHeapDictionaryColumns(Lists.newArrayList(LONG_COLUMN3)).setColumnMajorSegmentBuilderEnabled(true) .build(); Schema schema = new Schema.SchemaBuilder().addSingleValueDimension(STRING_COLUMN1, FieldSpec.DataType.STRING) .addSingleValueDimension(STRING_COLUMN2, FieldSpec.DataType.STRING) .addSingleValueDimension(STRING_COLUMN3, FieldSpec.DataType.STRING) .addSingleValueDimension(STRING_COLUMN4, FieldSpec.DataType.STRING) .addSingleValueDimension(LONG_COLUMN1, FieldSpec.DataType.LONG) .addSingleValueDimension(LONG_COLUMN2, FieldSpec.DataType.LONG) .addSingleValueDimension(LONG_COLUMN3, FieldSpec.DataType.LONG) .addMultiValueDimension(MV_INT_COLUMN, FieldSpec.DataType.INT) .addMetric(LONG_COLUMN4, FieldSpec.DataType.LONG) .addDateTime(DATE_TIME_COLUMN, FieldSpec.DataType.LONG, "1:MILLISECONDS:EPOCH", "1:MILLISECONDS").build(); String tableNameWithType = tableConfig.getTableName(); String segmentName = "testTable__0__0__123456"; IndexingConfig indexingConfig = tableConfig.getIndexingConfig(); DictionaryIndexConfig varLengthDictConf = new DictionaryIndexConfig(false, true); RealtimeSegmentConfig.Builder realtimeSegmentConfigBuilder = new RealtimeSegmentConfig.Builder().setTableNameWithType(tableNameWithType).setSegmentName(segmentName) .setStreamName(tableNameWithType).setSchema(schema).setTimeColumnName(DATE_TIME_COLUMN).setCapacity(1000) .setAvgNumMultiValues(3) .setIndex(Sets.newHashSet(LONG_COLUMN2), StandardIndexes.dictionary(), DictionaryIndexConfig.DISABLED) .setIndex(Sets.newHashSet(Sets.newHashSet(STRING_COLUMN3)), StandardIndexes.dictionary(), varLengthDictConf) .setIndex(Sets.newHashSet(STRING_COLUMN1), StandardIndexes.inverted(), IndexConfig.ENABLED) .setSegmentZKMetadata(getSegmentZKMetadata(segmentName)).setOffHeap(true) .setMemoryManager(new DirectMemoryManager(segmentName)) .setStatsHistory(RealtimeSegmentStatsHistory.deserialzeFrom(new File(tmpDir, "stats"))) .setConsumerDir(new File(tmpDir, "consumerDir").getAbsolutePath()); // create mutable segment impl MutableSegmentImpl mutableSegmentImpl = new MutableSegmentImpl(realtimeSegmentConfigBuilder.build(), null); File outputDir = new File(tmpDir, "outputDir"); SegmentZKPropsConfig segmentZKPropsConfig = new SegmentZKPropsConfig(); segmentZKPropsConfig.setStartOffset("1"); segmentZKPropsConfig.setEndOffset("100"); ColumnIndicesForRealtimeTable cdc = new ColumnIndicesForRealtimeTable(indexingConfig.getSortedColumn().get(0), indexingConfig.getInvertedIndexColumns(), null, null, indexingConfig.getNoDictionaryColumns(), indexingConfig.getVarLengthDictionaryColumns()); RealtimeSegmentConverter converter = new RealtimeSegmentConverter(mutableSegmentImpl, segmentZKPropsConfig, outputDir.getAbsolutePath(), schema, tableNameWithType, tableConfig, segmentName, cdc, false); converter.build(SegmentVersion.v3, null); File indexDir = new File(outputDir, segmentName); SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(indexDir); assertEquals(segmentMetadata.getTotalDocs(), 0); assertEquals(segmentMetadata.getTimeColumn(), DATE_TIME_COLUMN); assertEquals(segmentMetadata.getTimeUnit(), TimeUnit.MILLISECONDS); assertEquals(segmentMetadata.getStartTime(), segmentMetadata.getEndTime()); assertTrue(segmentMetadata.getAllColumns().containsAll(schema.getColumnNames())); assertEquals(segmentMetadata.getStartOffset(), "1"); assertEquals(segmentMetadata.getEndOffset(), "100"); }
public static <T> T createInstance(String userClassName, Class<T> xface, ClassLoader classLoader) { Class<?> theCls; try { theCls = Class.forName(userClassName, true, classLoader); } catch (ClassNotFoundException | NoClassDefFoundError cnfe) { throw new RuntimeException("User class must be in class path", cnfe); } if (!xface.isAssignableFrom(theCls)) { throw new RuntimeException(userClassName + " does not implement " + xface.getName()); } Class<T> tCls = (Class<T>) theCls.asSubclass(xface); T result; try { Constructor<T> meth = (Constructor<T>) constructorCache.get(theCls); if (null == meth) { meth = tCls.getDeclaredConstructor(); meth.setAccessible(true); constructorCache.put(theCls, meth); } result = meth.newInstance(); } catch (InstantiationException ie) { throw new RuntimeException("User class must be concrete", ie); } catch (NoSuchMethodException e) { throw new RuntimeException("User class must have a no-arg constructor", e); } catch (IllegalAccessException e) { throw new RuntimeException("User class must have a public constructor", e); } catch (InvocationTargetException e) { throw new RuntimeException("User class constructor throws exception", e); } return result; }
@Test public void testCreateTypedInstanceNoNoArgConstructor() { try { createInstance(OneArgClass.class.getName(), aInterface.class, classLoader); fail("Should fail to load class doesn't have no-arg constructor"); } catch (RuntimeException re) { assertTrue(re.getCause() instanceof NoSuchMethodException); } }
@Override protected void parse(final ProtocolFactory protocols, final Local file) throws AccessDeniedException { try (final BufferedReader in = new BufferedReader(new InputStreamReader(file.getInputStream(), StandardCharsets.UTF_8))) { Host current = null; String line; while((line = in.readLine()) != null) { if(line.startsWith("[Sessions\\")) { current = new Host(protocols.forScheme(Scheme.sftp)); current.getCredentials().setUsername( PreferencesFactory.get().getProperty("connection.login.anon.name")); Pattern pattern = Pattern.compile("\\[Session\\\\(.*)\\]"); Matcher matcher = pattern.matcher(line); if(matcher.matches()) { current.setNickname(matcher.group(1)); } } else if(StringUtils.isBlank(line)) { this.add(current); current = null; } else { if(null == current) { log.warn("Failed to detect start of bookmark"); continue; } Scanner scanner = new Scanner(line); scanner.useDelimiter("="); if(!scanner.hasNext()) { log.warn("Missing key in line:" + line); continue; } String name = scanner.next().toLowerCase(Locale.ROOT); if(!scanner.hasNext()) { log.warn("Missing value in line:" + line); continue; } String value = scanner.next(); switch(name) { case "hostname": current.setHostname(value); break; case "username": current.getCredentials().setUsername(value); break; case "portnumber": try { current.setPort(Integer.parseInt(value)); } catch(NumberFormatException e) { log.warn("Invalid Port:" + e.getMessage()); } break; case "fsprotocol": try { switch(Integer.parseInt(value)) { case 0: case 1: case 2: current.setProtocol(protocols.forScheme(Scheme.sftp)); break; case 5: current.setProtocol(protocols.forScheme(Scheme.ftp)); break; } // Reset port to default current.setPort(-1); } catch(NumberFormatException e) { log.warn("Unknown Protocol:" + e.getMessage()); } break; } } } } catch(IOException e) { throw new AccessDeniedException(e.getMessage(), e); } }
@Test(expected = AccessDeniedException.class) public void testParseNotFound() throws Exception { new WinScpBookmarkCollection().parse(new ProtocolFactory(Collections.emptySet()), new Local(System.getProperty("java.io.tmpdir"), "f")); }
public static ConfigurableResource parseResourceConfigValue(String value) throws AllocationConfigurationException { return parseResourceConfigValue(value, Long.MAX_VALUE); }
@Test public void testAbsoluteMemoryNegativeWithSpaces() throws Exception { expectNegativeValueOfResource("memory"); parseResourceConfigValue("2 vcores, -5120 mb"); }
@Override public RLock readLock() { return new RedissonReadLock(commandExecutor, getName()); }
@Test public void testReentrancy() throws InterruptedException { RReadWriteLock rwlock = redisson.getReadWriteLock("lock"); RLock lock = rwlock.readLock(); Assertions.assertTrue(lock.tryLock()); Assertions.assertTrue(lock.tryLock()); lock.unlock(); // next row for test renew expiration tisk. //Thread.currentThread().sleep(TimeUnit.SECONDS.toMillis(RedissonLock.LOCK_EXPIRATION_INTERVAL_SECONDS*2)); Thread thread1 = new Thread() { @Override public void run() { RReadWriteLock rwlock = redisson.getReadWriteLock("lock1"); RLock lock = rwlock.readLock(); Assertions.assertTrue(lock.tryLock()); } }; thread1.start(); thread1.join(); lock.unlock(); }
void snapshotSession(final ClientSession session) { final String responseChannel = session.responseChannel(); final byte[] encodedPrincipal = session.encodedPrincipal(); final int length = MessageHeaderEncoder.ENCODED_LENGTH + ClientSessionEncoder.BLOCK_LENGTH + ClientSessionEncoder.responseChannelHeaderLength() + responseChannel.length() + ClientSessionEncoder.encodedPrincipalHeaderLength() + encodedPrincipal.length; if (length <= publication.maxPayloadLength()) { idleStrategy.reset(); while (true) { final long result = publication.tryClaim(length, bufferClaim); if (result > 0) { final MutableDirectBuffer buffer = bufferClaim.buffer(); final int offset = bufferClaim.offset(); encodeSession(session, responseChannel, encodedPrincipal, buffer, offset); bufferClaim.commit(); break; } checkResultAndIdle(result); } } else { final int offset = 0; encodeSession(session, responseChannel, encodedPrincipal, offerBuffer, offset); offer(offerBuffer, offset, length); } }
@Test void snapshotSessionUsesTryClaimIfDataFitIntoMaxPayloadSize() { final int offset = 40; final String responseChannel = "aeron:udp?endpoint=localhost:8080"; final byte[] encodedPrincipal = new byte[100]; ThreadLocalRandom.current().nextBytes(encodedPrincipal); final ContainerClientSession session = new ContainerClientSession(42, 8, responseChannel, encodedPrincipal, null); final int length = MessageHeaderEncoder.ENCODED_LENGTH + ClientSessionEncoder.BLOCK_LENGTH + ClientSessionEncoder.responseChannelHeaderLength() + responseChannel.length() + ClientSessionEncoder.encodedPrincipalHeaderLength() + encodedPrincipal.length; when(publication.maxPayloadLength()).thenReturn(length); when(publication.tryClaim(eq(length), any())) .thenReturn(BACK_PRESSURED) .thenAnswer(mockTryClaim(offset)); serviceSnapshotTaker.snapshotSession(session); final InOrder inOrder = inOrder(idleStrategy, publication); inOrder.verify(publication).maxPayloadLength(); inOrder.verify(idleStrategy).reset(); inOrder.verify(publication).tryClaim(anyInt(), any()); inOrder.verify(idleStrategy).idle(); inOrder.verify(publication).tryClaim(anyInt(), any()); inOrder.verifyNoMoreInteractions(); clientSessionDecoder.wrapAndApplyHeader(buffer, offset + HEADER_LENGTH, messageHeaderDecoder); assertEquals(session.id(), clientSessionDecoder.clusterSessionId()); assertEquals(session.responseStreamId(), clientSessionDecoder.responseStreamId()); assertEquals(responseChannel, clientSessionDecoder.responseChannel()); assertEquals(encodedPrincipal.length, clientSessionDecoder.encodedPrincipalLength()); final byte[] snapshotPrincipal = new byte[encodedPrincipal.length]; clientSessionDecoder.getEncodedPrincipal(snapshotPrincipal, 0, snapshotPrincipal.length); assertArrayEquals(encodedPrincipal, snapshotPrincipal); }
public void observeComputeEngineTaskDuration(long durationInSeconds, String taskType, String projectKey) { ceTasksRunningDuration.labels(taskType, projectKey).observe(durationInSeconds); }
@Test public void observeComputeEngineTaskDurationTest() { ServerMonitoringMetrics metrics = new ServerMonitoringMetrics(); String[] labelNames = {"task_type", "project_key"}; String[] labelValues = {"REPORT", "projectKey"}; metrics.observeComputeEngineTaskDuration(10, labelValues[0], labelValues[1]); assertThat(CollectorRegistry.defaultRegistry.getSampleValue("sonarqube_compute_engine_tasks_running_duration_seconds_sum", labelNames, labelValues)).isEqualTo(10); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testRequiredColumn() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("required")).eval(FILE); assertThat(shouldRead).as("Should read: required columns are always non-null").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("required")).eval(FILE); assertThat(shouldRead).as("Should skip: required columns are always non-null").isFalse(); }
@Override public void addStaticImport(String staticImport) { staticImports.add(staticImport); }
@Test void testAddStaticImport() { Interface interfaze = new Interface("com.foo.UserInterface"); interfaze.addStaticImport("com.foo.StaticUtil"); assertNotNull(interfaze.getStaticImports()); assertEquals(1, interfaze.getStaticImports().size()); assertTrue(interfaze.getStaticImports().contains("com.foo.StaticUtil")); }
@Override public URI getLocation(FileResource resource) { var blobName = getBlobName(resource); if (StringUtils.isEmpty(serviceEndpoint)) { throw new IllegalStateException("Cannot determine location of file " + blobName + ": missing Azure blob service endpoint"); } if (!serviceEndpoint.endsWith("/")) { throw new IllegalStateException("The Azure blob service endpoint URL must end with a slash."); } return URI.create(serviceEndpoint + blobContainer + "/" + blobName); }
@Test public void testGetLocation() { service.serviceEndpoint = "http://azure.blob.storage/"; service.blobContainer = "blob-container"; var namespace = new Namespace(); namespace.setName("abelfubu"); var extension = new Extension(); extension.setName("abelfubu-dark"); extension.setNamespace(namespace); var extVersion = new ExtensionVersion(); extVersion.setVersion("1.3.4"); extVersion.setTargetPlatform("universal"); extVersion.setExtension(extension); var resource = new FileResource(); resource.setName("extension/themes/abelFubu Dark+-color-theme.json"); resource.setExtension(extVersion); var uri = service.getLocation(resource); var expected = URI.create("http://azure.blob.storage/blob-container/abelfubu/abelfubu-dark/1.3.4/extension/themes/abelFubu%20Dark+-color-theme.json"); assertEquals(expected, uri); }
public static <T> Match<T> ifNotNull() { return NOT_NULL; }
@Test public void testIfNotNull() { Match<String> m = Match.ifNotNull(); assertFalse(m.matches(null)); assertTrue(m.matches("foo")); }
@Override public void createOrUpdate(final String path, final Object data) { zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT); }
@Test public void testOnMetaDataChangedUpdate() throws UnsupportedEncodingException { MetaData metaData = MetaData.builder().id(MOCK_ID).path(MOCK_PATH).appName(MOCK_APP_NAME).build(); String metaDataPath = DefaultPathConstants.buildMetaDataPath(URLEncoder.encode(metaData.getPath(), StandardCharsets.UTF_8)); zookeeperDataChangedListener.onMetaDataChanged(ImmutableList.of(metaData), DataEventTypeEnum.UPDATE); verify(zkClient, times(1)).createOrUpdate(metaDataPath, metaData, CreateMode.PERSISTENT); }
public Collection<String> getSharedWaitersAndHolders() { return Collections.unmodifiableSet(mSharedLockHolders.keySet()); }
@Test // TODO(jiacheng): run this test before committing public void testGetStateLockSharedWaitersAndHolders() throws Throwable { final StateLockManager stateLockManager = new StateLockManager(); assertEquals(0, stateLockManager.getSharedWaitersAndHolders().size()); for (int i = 1; i < 10; i++) { StateLockingThread sharedHolderThread = new StateLockingThread(stateLockManager, false); sharedHolderThread.start(); sharedHolderThread.waitUntilStateLockAcquired(); final Collection<String> sharedWaitersAndHolders = stateLockManager.getSharedWaitersAndHolders(); assertEquals(i, sharedWaitersAndHolders.size()); assertTrue(sharedWaitersAndHolders.contains( sharedHolderThread.getName())); } }
public static void main(String[] args) { createAndSaveGraph(); useContractionHierarchiesToMakeQueriesFaster(); }
@Test public void main() { LowLevelAPIExample.main(null); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { JsonObject json = JsonParser.parseString(msg.getData()).getAsJsonObject(); String tmp; if (msg.getOriginator().getEntityType() != EntityType.DEVICE) { ctx.tellFailure(msg, new RuntimeException("Message originator is not a device entity!")); } else if (!json.has("method")) { ctx.tellFailure(msg, new RuntimeException("Method is not present in the message!")); } else if (!json.has("params")) { ctx.tellFailure(msg, new RuntimeException("Params are not present in the message!")); } else { int requestId = json.has("requestId") ? json.get("requestId").getAsInt() : random.nextInt(); boolean restApiCall = msg.isTypeOf(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE); tmp = msg.getMetaData().getValue("oneway"); boolean oneway = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue(DataConstants.PERSISTENT); boolean persisted = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue("requestUUID"); UUID requestUUID = !StringUtils.isEmpty(tmp) ? UUID.fromString(tmp) : Uuids.timeBased(); tmp = msg.getMetaData().getValue("originServiceId"); String originServiceId = !StringUtils.isEmpty(tmp) ? tmp : null; tmp = msg.getMetaData().getValue(DataConstants.EXPIRATION_TIME); long expirationTime = !StringUtils.isEmpty(tmp) ? Long.parseLong(tmp) : (System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(config.getTimeoutInSeconds())); tmp = msg.getMetaData().getValue(DataConstants.RETRIES); Integer retries = !StringUtils.isEmpty(tmp) ? Integer.parseInt(tmp) : null; String params = parseJsonData(json.get("params")); String additionalInfo = parseJsonData(json.get(DataConstants.ADDITIONAL_INFO)); RuleEngineDeviceRpcRequest request = RuleEngineDeviceRpcRequest.builder() .oneway(oneway) .method(json.get("method").getAsString()) .body(params) .tenantId(ctx.getTenantId()) .deviceId(new DeviceId(msg.getOriginator().getId())) .requestId(requestId) .requestUUID(requestUUID) .originServiceId(originServiceId) .expirationTime(expirationTime) .retries(retries) .restApiCall(restApiCall) .persisted(persisted) .additionalInfo(additionalInfo) .build(); ctx.getRpcService().sendRpcRequestToDevice(request, ruleEngineDeviceRpcResponse -> { if (ruleEngineDeviceRpcResponse.getError().isEmpty()) { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), ruleEngineDeviceRpcResponse.getResponse().orElse(TbMsg.EMPTY_JSON_OBJECT)); ctx.enqueueForTellNext(next, TbNodeConnectionType.SUCCESS); } else { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), wrap("error", ruleEngineDeviceRpcResponse.getError().get().name())); ctx.enqueueForTellFailure(next, ruleEngineDeviceRpcResponse.getError().get().name()); } }); ctx.ack(msg); } }
@Test public void givenOriginServiceId_whenOnMsg_thenVerifyRequest() { given(ctxMock.getRpcService()).willReturn(rpcServiceMock); given(ctxMock.getTenantId()).willReturn(TENANT_ID); String originServiceId = "service-id-123"; TbMsgMetaData metadata = new TbMsgMetaData(); metadata.putValue("originServiceId", originServiceId); TbMsg msg = TbMsg.newMsg(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE, DEVICE_ID, metadata, MSG_DATA); node.onMsg(ctxMock, msg); ArgumentCaptor<RuleEngineDeviceRpcRequest> requestCaptor = captureRequest(); assertThat(requestCaptor.getValue().getOriginServiceId()).isEqualTo(originServiceId); }
@Operation(summary = "get", description = "Get a component") @GetMapping("/{id}") public ResponseEntity<ComponentVO> get(@PathVariable Long id) { return ResponseEntity.success(componentService.get(id)); }
@Test void getReturnsNotFoundForInvalidId() { Long id = 999L; when(componentService.get(id)).thenReturn(null); ResponseEntity<ComponentVO> response = componentController.get(id); assertTrue(response.isSuccess()); assertNull(response.getData()); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<>(); if(replies.isEmpty()) { return children; } // At least one entry successfully parsed boolean success = false; for(String line : replies) { final Map<String, Map<String, String>> file = this.parseFacts(line); if(null == file) { log.error(String.format("Error parsing line %s", line)); continue; } for(Map.Entry<String, Map<String, String>> f : file.entrySet()) { final String name = f.getKey(); // size -- Size in octets // modify -- Last modification time // create -- Creation time // type -- Entry type // unique -- Unique id of file/directory // perm -- File permissions, whether read, write, execute is allowed for the login id. // lang -- Language of the file name per IANA [11] registry. // media-type -- MIME media-type of file contents per IANA registry. // charset -- Character set per IANA registry (if not UTF-8) final Map<String, String> facts = f.getValue(); if(!facts.containsKey("type")) { log.error(String.format("No type fact in line %s", line)); continue; } final Path parsed; if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory)); } else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file)); } else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar final String[] type = facts.get("type").split(":"); if(type.length == 2) { final String target = type[1]; if(target.startsWith(String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file))); } } else { log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line)); continue; } } else { log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line)); continue; } if(!success) { if(parsed.isDirectory() && directory.getName().equals(name)) { log.warn(String.format("Possibly bogus response line %s", line)); } else { success = true; } } if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", name)); } continue; } if(facts.containsKey("size")) { parsed.attributes().setSize(Long.parseLong(facts.get("size"))); } if(facts.containsKey("unix.uid")) { parsed.attributes().setOwner(facts.get("unix.uid")); } if(facts.containsKey("unix.owner")) { parsed.attributes().setOwner(facts.get("unix.owner")); } if(facts.containsKey("unix.gid")) { parsed.attributes().setGroup(facts.get("unix.gid")); } if(facts.containsKey("unix.group")) { parsed.attributes().setGroup(facts.get("unix.group")); } if(facts.containsKey("unix.mode")) { parsed.attributes().setPermission(new Permission(facts.get("unix.mode"))); } else if(facts.containsKey("perm")) { if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) { Permission.Action user = Permission.Action.none; final String flags = facts.get("perm"); if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) { // RETR command may be applied to that object // Listing commands, LIST, NLST, and MLSD may be applied user = user.or(Permission.Action.read); } if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) { user = user.or(Permission.Action.write); } if(StringUtils.contains(flags, 'e')) { // CWD command naming the object should succeed user = user.or(Permission.Action.execute); if(parsed.isDirectory()) { user = user.or(Permission.Action.read); } } final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none); parsed.attributes().setPermission(permission); } } if(facts.containsKey("modify")) { // Time values are always represented in UTC parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify"))); } if(facts.containsKey("create")) { // Time values are always represented in UTC parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create"))); } children.add(parsed); } } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test(expected = FTPInvalidListException.class) public void testBrokenMlsd() throws Exception { Path path = new Path( "/Dummies_Infoblaetter", EnumSet.of(Path.Type.directory)); String[] replies = new String[]{ "Type=dir;Modify=20101209140859;Win32.ea=0x00000010; Dummies_Infoblaetter", }; new FTPMlsdListResponseReader().read(path, Arrays.asList(replies)); }
public void patchLike(final Long boardId, final boolean isIncreaseLike) { Board board = findByIdUsingPessimisticLock(boardId); board.patchLike(isIncreaseLike); }
@Test void 게시글_좋아요_처리를_한다() { // given Board savedBoard = boardRepository.save(게시글_생성_사진없음()); // when boardService.patchLike(savedBoard.getId(), true); // then assertThat(savedBoard.getLikeCount().getLikeCount()).isEqualTo(1); }
@Override public CompletableFuture<Void> setRole(NodeId nodeId, DeviceId deviceId, MastershipRole role) { checkNotNull(nodeId, NODE_ID_NULL); checkNotNull(deviceId, DEVICE_ID_NULL); checkNotNull(role, ROLE_NULL); CompletableFuture<MastershipEvent> eventFuture = null; switch (role) { case MASTER: eventFuture = store.setMaster(networkId, nodeId, deviceId); break; case STANDBY: eventFuture = store.setStandby(networkId, nodeId, deviceId); break; case NONE: eventFuture = store.relinquishRole(networkId, nodeId, deviceId); break; default: log.info("Unknown role; ignoring"); return CompletableFuture.completedFuture(null); } return eventFuture.thenAccept(this::post).thenApply(v -> null); }
@Test public void setRole() { mastershipMgr1.setRole(NID_OTHER, VDID1, MASTER); assertEquals("wrong local role:", NONE, mastershipMgr1.getLocalRole(VDID1)); assertEquals("wrong obtained role:", STANDBY, Futures.getUnchecked(mastershipMgr1.requestRoleFor(VDID1))); //set to master mastershipMgr1.setRole(NID_LOCAL, VDID1, MASTER); assertEquals("wrong local role:", MASTER, mastershipMgr1.getLocalRole(VDID1)); }
Collection<AzureAddress> getAddresses() { LOGGER.finest("Fetching OAuth Access Token"); final String accessToken = fetchAccessToken(); LOGGER.finest("Fetching instances for subscription '%s' and resourceGroup '%s'", subscriptionId, resourceGroup); Collection<AzureAddress> addresses = azureComputeApi.instances(subscriptionId, resourceGroup, scaleSet, tag, accessToken); LOGGER.finest("Found the following instances for project '%s' and zone '%s': %s", subscriptionId, resourceGroup, addresses); return addresses; }
@Test public void getAddressesCurrentSubscriptionCurrentResourceGroupCurrentScaleSetWithTag() { // given given(azureComputeApi.instances(SUBSCRIPTION_ID, RESOURCE_GROUP, SCALE_SET, TAG, ACCESS_TOKEN)).willReturn(ADDRESSES); AzureConfig azureConfig = AzureConfig.builder().setInstanceMetadataAvailable(true).setTag(TAG).build(); AzureClient azureClient = new AzureClient(azureMetadataApi, azureComputeApi, azureAuthenticator, azureConfig); // when Collection<AzureAddress> result = azureClient.getAddresses(); // then assertEquals(ADDRESSES, result); }
public TimelineEvent kill(String workflowId, User caller) { return terminate(workflowId, Actions.WorkflowInstanceAction.KILL, caller); }
@Test public void testKill() { when(instanceDao.terminateQueuedInstances( eq("sample-minimal-wf"), eq(Constants.TERMINATE_BATCH_LIMIT), eq(WorkflowInstance.Status.FAILED), anyString())) .thenReturn(Constants.TERMINATE_BATCH_LIMIT) .thenReturn(Constants.TERMINATE_BATCH_LIMIT) .thenReturn(Constants.TERMINATE_BATCH_LIMIT - 1); when(instanceDao.terminateRunningInstances( eq("sample-minimal-wf"), eq(Constants.TERMINATE_BATCH_LIMIT), eq(Actions.WorkflowInstanceAction.KILL), any(), anyString())) .thenReturn(Constants.TERMINATE_BATCH_LIMIT - 1); String res = actionHandler.kill("sample-minimal-wf", tester).getMessage(); assertEquals("Terminated [29] queued instances and terminating [9] running instances", res); }
@InvokeOnHeader(CONTROL_ACTION_LIST) public void performList(final Exchange exchange, AsyncCallback callback) { Message message = exchange.getMessage(); Map<String, Object> headers = message.getHeaders(); String subscribeChannel = (String) headers.getOrDefault(CONTROL_SUBSCRIBE_CHANNEL, configuration.getSubscribeChannel()); try { String filters = dynamicRouterControlService.getSubscriptionsForChannel(subscribeChannel); message.setBody(filters, String.class); } catch (Exception e) { exchange.setException(e); } finally { callback.done(false); } }
@Test void testPerformListActionWithException() { String subscribeChannel = "testChannel"; Map<String, Object> headers = Map.of( CONTROL_ACTION_HEADER, CONTROL_ACTION_LIST, CONTROL_SUBSCRIBE_CHANNEL, subscribeChannel); when(exchange.getMessage()).thenReturn(message); when(message.getHeaders()).thenReturn(headers); Mockito.doNothing().when(callback).done(false); Exception ex = new IllegalArgumentException("test exception"); Mockito.doThrow(ex).when(controlService).getSubscriptionsForChannel(subscribeChannel); producer.performList(exchange, callback); Mockito.verify(exchange, Mockito.times(1)).setException(ex); }
@PostMapping(params = "import=true") @Secured(action = ActionTypes.WRITE, signType = SignType.CONFIG) public RestResult<Map<String, Object>> importAndPublishConfig(HttpServletRequest request, @RequestParam(value = "src_user", required = false) String srcUser, @RequestParam(value = "namespace", required = false) String namespace, @RequestParam(value = "policy", defaultValue = "ABORT") SameConfigPolicy policy, MultipartFile file) throws NacosException { Map<String, Object> failedData = new HashMap<>(4); if (Objects.isNull(file)) { return RestResultUtils.buildResult(ResultCodeEnum.DATA_EMPTY, failedData); } namespace = NamespaceUtil.processNamespaceParameter(namespace); if (StringUtils.isNotBlank(namespace) && namespacePersistService.tenantInfoCountByTenantId(namespace) <= 0) { failedData.put("succCount", 0); return RestResultUtils.buildResult(ResultCodeEnum.NAMESPACE_NOT_EXIST, failedData); } if (StringUtils.isBlank(srcUser)) { srcUser = RequestUtil.getSrcUserName(request); } List<ConfigAllInfo> configInfoList = new ArrayList<>(); List<Map<String, String>> unrecognizedList = new ArrayList<>(); try { ZipUtils.UnZipResult unziped = ZipUtils.unzip(file.getBytes()); ZipUtils.ZipItem metaDataZipItem = unziped.getMetaDataItem(); RestResult<Map<String, Object>> errorResult; if (metaDataZipItem != null && Constants.CONFIG_EXPORT_METADATA_NEW.equals(metaDataZipItem.getItemName())) { // new export errorResult = parseImportDataV2(srcUser, unziped, configInfoList, unrecognizedList, namespace); } else { errorResult = parseImportData(srcUser, unziped, configInfoList, unrecognizedList, namespace); } if (errorResult != null) { return errorResult; } } catch (IOException e) { failedData.put("succCount", 0); LOGGER.error("parsing data failed", e); return RestResultUtils.buildResult(ResultCodeEnum.PARSING_DATA_FAILED, failedData); } if (CollectionUtils.isEmpty(configInfoList)) { failedData.put("succCount", 0); return RestResultUtils.buildResult(ResultCodeEnum.DATA_EMPTY, failedData); } final String srcIp = RequestUtil.getRemoteIp(request); String requestIpApp = RequestUtil.getAppName(request); final Timestamp time = TimeUtils.getCurrentTime(); Map<String, Object> saveResult = configInfoPersistService.batchInsertOrUpdate(configInfoList, srcUser, srcIp, null, policy); for (ConfigInfo configInfo : configInfoList) { ConfigChangePublisher.notifyConfigChange( new ConfigDataChangeEvent(false, configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), time.getTime())); ConfigTraceService.logPersistenceEvent(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant(), requestIpApp, time.getTime(), InetUtils.getSelfIP(), ConfigTraceService.PERSISTENCE_EVENT, ConfigTraceService.PERSISTENCE_TYPE_PUB, configInfo.getContent()); } // unrecognizedCount if (!unrecognizedList.isEmpty()) { saveResult.put("unrecognizedCount", unrecognizedList.size()); saveResult.put("unrecognizedData", unrecognizedList); } return RestResultUtils.success("导入成功", saveResult); }
@Test void testImportAndPublishConfig() throws Exception { MockedStatic<ZipUtils> zipUtilsMockedStatic = Mockito.mockStatic(ZipUtils.class); List<ZipUtils.ZipItem> zipItems = new ArrayList<>(); ZipUtils.ZipItem zipItem = new ZipUtils.ZipItem("test/test", "test"); zipItems.add(zipItem); ZipUtils.UnZipResult unziped = new ZipUtils.UnZipResult(zipItems, null); MockMultipartFile file = new MockMultipartFile("file", "test.zip", "application/zip", "test".getBytes()); zipUtilsMockedStatic.when(() -> ZipUtils.unzip(file.getBytes())).thenReturn(unziped); when(namespacePersistService.tenantInfoCountByTenantId("public")).thenReturn(1); Map<String, Object> map = new HashMap<>(); map.put("test", "test"); when(configInfoPersistService.batchInsertOrUpdate(anyList(), anyString(), anyString(), any(), any())).thenReturn(map); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.multipart(Constants.CONFIG_CONTROLLER_PATH).file(file) .param("import", "true").param("src_user", "test").param("namespace", "public").param("policy", "ABORT"); String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString(); String code = JacksonUtils.toObj(actualValue).get("code").toString(); assertEquals("200", code); Map<String, Object> resultMap = JacksonUtils.toObj(JacksonUtils.toObj(actualValue).get("data").toString(), Map.class); assertEquals(map.get("test"), resultMap.get("test").toString()); zipUtilsMockedStatic.close(); }
@SuppressWarnings("unchecked") private void publishContainerPausedEvent( ContainerEvent event) { if (publishNMContainerEvents) { ContainerPauseEvent pauseEvent = (ContainerPauseEvent) event; ContainerId containerId = pauseEvent.getContainerID(); ContainerEntity entity = createContainerEntity(containerId); Map<String, Object> entityInfo = new HashMap<String, Object>(); entityInfo.put(ContainerMetricsConstants.DIAGNOSTICS_INFO, pauseEvent.getDiagnostic()); entity.setInfo(entityInfo); Container container = context.getContainers().get(containerId); if (container != null) { TimelineEvent tEvent = new TimelineEvent(); tEvent.setId(ContainerMetricsConstants.PAUSED_EVENT_TYPE); tEvent.setTimestamp(event.getTimestamp()); entity.addEvent(tEvent); dispatcher.getEventHandler().handle(new TimelinePublishEvent(entity, containerId.getApplicationAttemptId().getApplicationId())); } } }
@Test public void testPublishContainerPausedEvent() { ApplicationId appId = ApplicationId.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId cId = ContainerId.newContainerId(appAttemptId, 1); ContainerEvent containerEvent = new ContainerPauseEvent(cId, "test pause"); publisher.createTimelineClient(appId); publisher.publishContainerEvent(containerEvent); publisher.stopTimelineClient(appId); dispatcher.await(); ContainerEntity cEntity = new ContainerEntity(); cEntity.setId(cId.toString()); TimelineEntity[] lastPublishedEntities = timelineClient.getLastPublishedEntities(); Assert.assertNotNull(lastPublishedEntities); Assert.assertEquals(1, lastPublishedEntities.length); TimelineEntity entity = lastPublishedEntities[0]; Assert.assertEquals(cEntity, entity); NavigableSet<TimelineEvent> events = entity.getEvents(); Assert.assertEquals(1, events.size()); Assert.assertEquals(ContainerMetricsConstants.PAUSED_EVENT_TYPE, events.iterator().next().getId()); Map<String, Object> info = entity.getInfo(); Assert.assertTrue( info.containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO)); Assert.assertEquals("test pause", info.get(ContainerMetricsConstants.DIAGNOSTICS_INFO)); }
public static Date parseDateLenient(String text) { if (text == null) { return null; } String normalized = normalize(text); for (DateTimeFormatter dateTimeFormatter : DATE_TIME_FORMATTERS) { try { ZonedDateTime zonedDateTime = ZonedDateTime.parse(normalized, dateTimeFormatter); return Date.from(Instant.from(zonedDateTime)); } catch (SecurityException e) { throw e; } catch (DateTimeParseException e) { //There's a bug in java 8 that if we include .withZone in the DateTimeFormatter, //that will override the offset/timezone id even if it included // in the original string. This is fixed in later versions of Java. // Once we move to Java 11, we can get rid of this. Can't make this up... try { LocalDateTime localDateTime = LocalDateTime.parse(normalized, dateTimeFormatter); return Date.from(Instant.from(localDateTime.atOffset(UTC))); } catch (SecurityException e2) { throw e2; } catch (Exception e2) { //swallow } } catch (Exception e) { //can get StringIndexOutOfBoundsException because of a bug in java 8 //ignore } } for (DateTimeFormatter dateFormatter : DATE_FORMATTERS) { try { TemporalAccessor temporalAccessor = dateFormatter.parse(normalized); ZonedDateTime localDate = LocalDate.from(temporalAccessor) .atStartOfDay() .atZone(MIDDAY.toZoneId()); return Date.from(Instant.from(localDate)); } catch (SecurityException e) { throw e; } catch (Exception e) { //ignore } } return null; }
@Test public void testTrickyDates() throws Exception { DateFormat df = new SimpleDateFormat("yyyy-MM-dd", new DateFormatSymbols(Locale.US)); //make sure there are no mis-parses of e.g. 90 = year 90 A.D, not 1990 Date date1980 = df.parse("1980-01-01"); Date date2010 = df.parse("2010-01-01"); for (String dateString : new String[]{ "11/14/08", "1/14/08", "1/2/08", "12/1/2008", "12/02/1996", "96/1/02", "96/12/02", "96/12/2", "1996/12/02", "Mon, 29 Jan 96 14:02 GMT", "7/20/95 1:12PM", "08/14/2000 12:48 AM", "8/4/2000 1:48 AM", "06/24/2008, Tuesday, 11 AM", }) { Date parsedDate = MailDateParser.parseDateLenient(dateString); assertNotNull(parsedDate); if (parsedDate != null) { assertTrue(parsedDate.getTime() > date1980.getTime(), "date must be after 1980:" + dateString + " >> + " + parsedDate); assertTrue(parsedDate.getTime() < date2010.getTime(), "date must be before 2020: " + dateString + " >> + " + parsedDate); } } //TODO: mime4j misparses these to pre 1980 dates //"Wed, 27 Dec 95 11:20:40 EST", //"26 Aug 00 11:14:52 EDT" // //We are still misparsing: 8/1/03 to a pre 1980 date }
public static int[] generateRandomNumber(int begin, int end, int size) { // 种子你可以随意生成,但不能重复 final int[] seed = ArrayUtil.range(begin, end); return generateRandomNumber(begin, end, size, seed); }
@Test public void generateRandomNumberTest(){ final int[] ints = NumberUtil.generateRandomNumber(10, 20, 5); assertEquals(5, ints.length); final Set<?> set = Convert.convert(Set.class, ints); assertEquals(5, set.size()); }
public void heartbeat(final String appName, final String id, final InstanceInfo info, final InstanceStatus overriddenStatus, boolean primeConnection) throws Throwable { if (primeConnection) { // We do not care about the result for priming request. replicationClient.sendHeartBeat(appName, id, info, overriddenStatus); return; } ReplicationTask replicationTask = new InstanceReplicationTask(targetHost, Action.Heartbeat, info, overriddenStatus, false) { @Override public EurekaHttpResponse<InstanceInfo> execute() throws Throwable { return replicationClient.sendHeartBeat(appName, id, info, overriddenStatus); } @Override public void handleFailure(int statusCode, Object responseEntity) throws Throwable { super.handleFailure(statusCode, responseEntity); if (statusCode == 404) { logger.warn("{}: missing entry.", getTaskName()); if (info != null) { logger.warn("{}: cannot find instance id {} and hence replicating the instance with status {}", getTaskName(), info.getId(), info.getStatus()); register(info); } } else if (config.shouldSyncWhenTimestampDiffers()) { InstanceInfo peerInstanceInfo = (InstanceInfo) responseEntity; if (peerInstanceInfo != null) { syncInstancesIfTimestampDiffers(appName, id, info, peerInstanceInfo); } } } }; long expiryTime = System.currentTimeMillis() + getLeaseRenewalOf(info); batchingDispatcher.process(taskId("heartbeat", info), replicationTask, expiryTime); }
@Test public void testHeartbeatReplicationFailure() throws Throwable { httpReplicationClient.withNetworkStatusCode(200, 200); httpReplicationClient.withBatchReply(404); // Not found, to trigger registration createPeerEurekaNode().heartbeat(instanceInfo.getAppName(), instanceInfo.getId(), instanceInfo, null, false); // Heartbeat replied with an error ReplicationInstance replicationInstance = expectSingleBatchRequest(); assertThat(replicationInstance.getAction(), is(equalTo(Action.Heartbeat))); // Second, registration task is scheduled replicationInstance = expectSingleBatchRequest(); assertThat(replicationInstance.getAction(), is(equalTo(Action.Register))); }
@ShellMethod(key = "compactions show all", value = "Shows all compactions that are in active timeline") public String compactionsAll( @ShellOption(value = {"--includeExtraMetadata"}, help = "Include extra metadata", defaultValue = "false") final boolean includeExtraMetadata, @ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly) { HoodieTableMetaClient client = checkAndGetMetaClient(); HoodieActiveTimeline activeTimeline = client.getActiveTimeline(); return printAllCompactions(activeTimeline, compactionPlanReader(this::readCompactionPlanForActiveTimeline, activeTimeline), includeExtraMetadata, sortByField, descending, limit, headerOnly); }
@Test public void testCompactionsAll() throws IOException { // create MOR table. new TableCommand().createTable( tablePath, tableName, HoodieTableType.MERGE_ON_READ.name(), "", TimelineLayoutVersion.VERSION_1, HoodieAvroPayload.class.getName()); CompactionTestUtils.setupAndValidateCompactionOperations(HoodieCLI.getTableMetaClient(), false, 3, 4, 3, 3); HoodieCLI.getTableMetaClient().reloadActiveTimeline(); Object result = shell.evaluate(() -> "compactions show all"); System.out.println(result.toString()); TableHeader header = new TableHeader().addTableHeaderField("Compaction Instant Time").addTableHeaderField("State") .addTableHeaderField("Total FileIds to be Compacted"); Map<String, Integer> fileIds = new HashMap(); fileIds.put("001", 3); fileIds.put("003", 4); fileIds.put("005", 3); fileIds.put("007", 3); List<Comparable[]> rows = new ArrayList<>(); Arrays.asList("001", "003", "005", "007").stream().sorted(Comparator.reverseOrder()).forEach(instant -> { rows.add(new Comparable[] {instant, "REQUESTED", fileIds.get(instant)}); }); String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows); assertEquals(expected, result.toString()); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldFindVarargWithSomeNullValues() { // Given: givenFunctions( function(EXPECTED, 0, STRING_VARARGS) ); // When: final KsqlScalarFunction fun = udfIndex.getFunction(Arrays.asList(null, SqlArgument.of(SqlTypes.STRING), null)); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
@Override public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats); LongColumnStatsDataInspector aggregateData = longInspectorFromStats(aggregateColStats); LongColumnStatsDataInspector newData = longInspectorFromStats(newColStats); Long lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData)); if (lowValue != null) { aggregateData.setLowValue(lowValue); } Long highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData)); if (highValue != null) { aggregateData.setHighValue(highValue); } aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator(); NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator(); List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst); aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(), ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs())); aggregateData.setNdvEstimator(ndvEstimatorsList.get(0)); KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator(); KllHistogramEstimator newKllEst = newData.getHistogramEstimator(); aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst)); aggregateColStats.getStatsData().setLongStats(aggregateData); }
@Test public void testMergeNullValues() { ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(long.class) .low(null) .high(null) .numNulls(1) .numDVs(0) .build()); merger.merge(aggrObj, aggrObj); ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(long.class) .low(null) .high(null) .numNulls(2) .numDVs(0) .build(); assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData()); }
@Bean public OpenAPI apiInfo() { return new OpenAPI() .info(new Info() .title(TITLE).description(DESCRIPTION) .version(VersionUtils.getVersion(getClass(), DEFAULT_SWAGGER_API_VERSION)) .contact(new Contact().name(CONTACT_NAME).url(CONTACT_URL).email(CONTACT_EMAIL)) ) .components(new Components() .addSecuritySchemes(org.apache.shenyu.common.constant.Constants.X_ACCESS_TOKEN, new SecurityScheme() .name(org.apache.shenyu.common.constant.Constants.X_ACCESS_TOKEN) .type(Type.APIKEY) .in(In.HEADER) .description(TOKEN_DESCRIPTION) ) ).addSecurityItem(new SecurityRequirement().addList(org.apache.shenyu.common.constant.Constants.X_ACCESS_TOKEN)); }
@Test public void testApiInfo() { OpenAPI actual = swaggerConfiguration.apiInfo(); assertNotNull(actual); Assertions.assertEquals(1, actual.getSecurity().size()); Assertions.assertEquals(1, actual.getSecurity().get(0).size()); Assertions.assertTrue(actual.getSecurity().get(0).containsKey(org.apache.shenyu.common.constant.Constants.X_ACCESS_TOKEN)); }
static void closeStateManager(final Logger log, final String logPrefix, final boolean closeClean, final boolean eosEnabled, final ProcessorStateManager stateMgr, final StateDirectory stateDirectory, final TaskType taskType) { // if EOS is enabled, wipe out the whole state store for unclean close since it is now invalid final boolean wipeStateStore = !closeClean && eosEnabled; final TaskId id = stateMgr.taskId(); log.trace("Closing state manager for {} task {}", taskType, id); final AtomicReference<ProcessorStateException> firstException = new AtomicReference<>(null); try { if (stateDirectory.lock(id)) { try { stateMgr.close(); } catch (final ProcessorStateException e) { firstException.compareAndSet(null, e); } finally { try { if (wipeStateStore) { log.debug("Wiping state stores for {} task {}", taskType, id); // we can just delete the whole dir of the task, including the state store images and the checkpoint files, // and then we write an empty checkpoint file indicating that the previous close is graceful and we just // need to re-bootstrap the restoration from the beginning Utils.delete(stateMgr.baseDir()); } } finally { stateDirectory.unlock(id); } } } else { log.error("Failed to acquire lock while closing the state store for {} task {}", taskType, id); } } catch (final IOException e) { final ProcessorStateException exception = new ProcessorStateException( String.format("%sFatal error while trying to close the state manager for task %s", logPrefix, id), e ); firstException.compareAndSet(null, exception); } final ProcessorStateException exception = firstException.get(); if (exception != null) { throw exception; } }
@Test public void testCloseStateManagerThrowsExceptionWhenDirty() { when(stateManager.taskId()).thenReturn(taskId); when(stateDirectory.lock(taskId)).thenReturn(true); doThrow(new ProcessorStateException("state manager failed to close")).when(stateManager).close(); assertThrows( ProcessorStateException.class, () -> StateManagerUtil.closeStateManager( logger, "logPrefix:", false, false, stateManager, stateDirectory, TaskType.ACTIVE)); verify(stateDirectory).unlock(taskId); }
static int parseInt(String key, @Nullable String value) { requireArgument((value != null) && !value.isEmpty(), "value of key %s was omitted", key); try { return Integer.parseInt(value); } catch (NumberFormatException e) { throw new IllegalArgumentException(String.format(US, "key %s value was set to %s, must be an integer", key, value), e); } }
@Test public void parseInt_exception() { assertThrows(IllegalArgumentException.class, () -> CaffeineSpec.parseInt("key", "value")); }
public static <T> Values<T> of(Iterable<T> elems) { return new Values<>(elems, Optional.absent(), Optional.absent(), false); }
@Test public void testSourceSplitAtFraction() throws Exception { List<Integer> elements = new ArrayList<>(); Random random = new Random(); for (int i = 0; i < 25; i++) { elements.add(random.nextInt()); } CreateSource<Integer> source = CreateSource.fromIterable(elements, VarIntCoder.of()); SourceTestUtils.assertSplitAtFractionExhaustive(source, PipelineOptionsFactory.create()); }
@SuppressWarnings("nullness") @VisibleForTesting public ProcessContinuation run( PartitionMetadata partition, RestrictionTracker<TimestampRange, Timestamp> tracker, OutputReceiver<DataChangeRecord> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator, BundleFinalizer bundleFinalizer) { final String token = partition.getPartitionToken(); final Timestamp startTimestamp = tracker.currentRestriction().getFrom(); final Timestamp endTimestamp = partition.getEndTimestamp(); // TODO: Potentially we can avoid this fetch, by enriching the runningAt timestamp when the // ReadChangeStreamPartitionDoFn#processElement is called final PartitionMetadata updatedPartition = Optional.ofNullable(partitionMetadataDao.getPartition(token)) .map(partitionMetadataMapper::from) .orElseThrow( () -> new IllegalStateException( "Partition " + token + " not found in metadata table")); try (ChangeStreamResultSet resultSet = changeStreamDao.changeStreamQuery( token, startTimestamp, endTimestamp, partition.getHeartbeatMillis())) { metrics.incQueryCounter(); while (resultSet.next()) { final List<ChangeStreamRecord> records = changeStreamRecordMapper.toChangeStreamRecords( updatedPartition, resultSet, resultSet.getMetadata()); Optional<ProcessContinuation> maybeContinuation; for (final ChangeStreamRecord record : records) { if (record instanceof DataChangeRecord) { maybeContinuation = dataChangeRecordAction.run( updatedPartition, (DataChangeRecord) record, tracker, receiver, watermarkEstimator); } else if (record instanceof HeartbeatRecord) { maybeContinuation = heartbeatRecordAction.run( updatedPartition, (HeartbeatRecord) record, tracker, watermarkEstimator); } else if (record instanceof ChildPartitionsRecord) { maybeContinuation = childPartitionsRecordAction.run( updatedPartition, (ChildPartitionsRecord) record, tracker, watermarkEstimator); } else { LOG.error("[{}] Unknown record type {}", token, record.getClass()); throw new IllegalArgumentException("Unknown record type " + record.getClass()); } if (maybeContinuation.isPresent()) { LOG.debug("[{}] Continuation present, returning {}", token, maybeContinuation); bundleFinalizer.afterBundleCommit( Instant.now().plus(BUNDLE_FINALIZER_TIMEOUT), updateWatermarkCallback(token, watermarkEstimator)); return maybeContinuation.get(); } } } bundleFinalizer.afterBundleCommit( Instant.now().plus(BUNDLE_FINALIZER_TIMEOUT), updateWatermarkCallback(token, watermarkEstimator)); } catch (SpannerException e) { /* If there is a split when a partition is supposed to be finished, the residual will try to perform a change stream query for an out of range interval. We ignore this error here, and the residual should be able to claim the end of the timestamp range, finishing the partition. */ if (isTimestampOutOfRange(e)) { LOG.info( "[{}] query change stream is out of range for {} to {}, finishing stream.", token, startTimestamp, endTimestamp, e); } else { throw e; } } catch (Exception e) { LOG.error( "[{}] query change stream had exception processing range {} to {}.", token, startTimestamp, endTimestamp, e); throw e; } LOG.debug("[{}] change stream completed successfully", token); if (tracker.tryClaim(endTimestamp)) { LOG.debug("[{}] Finishing partition", token); partitionMetadataDao.updateToFinished(token); metrics.decActivePartitionReadCounter(); LOG.info("[{}] After attempting to finish the partition", token); } return ProcessContinuation.stop(); }
@Test public void testQueryChangeStreamWithDataChangeRecord() { final Struct rowAsStruct = mock(Struct.class); final ChangeStreamResultSetMetadata resultSetMetadata = mock(ChangeStreamResultSetMetadata.class); final ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); final DataChangeRecord record1 = mock(DataChangeRecord.class); final DataChangeRecord record2 = mock(DataChangeRecord.class); when(record1.getRecordTimestamp()).thenReturn(PARTITION_START_TIMESTAMP); when(record2.getRecordTimestamp()).thenReturn(PARTITION_START_TIMESTAMP); when(changeStreamDao.changeStreamQuery( PARTITION_TOKEN, PARTITION_START_TIMESTAMP, PARTITION_END_TIMESTAMP, PARTITION_HEARTBEAT_MILLIS)) .thenReturn(resultSet); when(resultSet.next()).thenReturn(true); when(resultSet.getCurrentRowAsStruct()).thenReturn(rowAsStruct); when(resultSet.getMetadata()).thenReturn(resultSetMetadata); when(changeStreamRecordMapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata)) .thenReturn(Arrays.asList(record1, record2)); when(dataChangeRecordAction.run( partition, record1, restrictionTracker, outputReceiver, watermarkEstimator)) .thenReturn(Optional.empty()); when(dataChangeRecordAction.run( partition, record2, restrictionTracker, outputReceiver, watermarkEstimator)) .thenReturn(Optional.of(ProcessContinuation.stop())); when(watermarkEstimator.currentWatermark()).thenReturn(WATERMARK); final ProcessContinuation result = action.run( partition, restrictionTracker, outputReceiver, watermarkEstimator, bundleFinalizer); assertEquals(ProcessContinuation.stop(), result); verify(dataChangeRecordAction) .run(partition, record1, restrictionTracker, outputReceiver, watermarkEstimator); verify(dataChangeRecordAction) .run(partition, record2, restrictionTracker, outputReceiver, watermarkEstimator); verify(partitionMetadataDao).updateWatermark(PARTITION_TOKEN, WATERMARK_TIMESTAMP); verify(heartbeatRecordAction, never()).run(any(), any(), any(), any()); verify(childPartitionsRecordAction, never()).run(any(), any(), any(), any()); verify(restrictionTracker, never()).tryClaim(any()); }
@Override public Properties getConfig(RedisClusterNode node, String pattern) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_GET, pattern); List<String> r = syncFuture(f); if (r != null) { return Converters.toProperties(r); } return null; }
@Test public void testGetConfig() { RedisClusterNode master = getFirstMaster(); Properties config = connection.getConfig(master, "*"); assertThat(config.size()).isGreaterThan(20); }
@Override public String toString() { // we can't use uri.toString(), which escapes everything, because we want // illegal characters unescaped in the string, for glob processing, etc. StringBuilder buffer = new StringBuilder(); if (uri.getScheme() != null) { buffer.append(uri.getScheme()) .append(":"); } if (uri.getAuthority() != null) { buffer.append("//") .append(uri.getAuthority()); } if (uri.getPath() != null) { String path = uri.getPath(); if (path.indexOf('/')==0 && hasWindowsDrive(path) && // has windows drive uri.getScheme() == null && // but no scheme uri.getAuthority() == null) // or authority path = path.substring(1); // remove slash before drive buffer.append(path); } if (uri.getFragment() != null) { buffer.append("#") .append(uri.getFragment()); } return buffer.toString(); }
@Test (timeout = 30000) public void testNormalize() throws URISyntaxException { assertEquals("", new Path(".").toString()); assertEquals("..", new Path("..").toString()); assertEquals("/", new Path("/").toString()); assertEquals("/", new Path("//").toString()); assertEquals("/", new Path("///").toString()); assertEquals("//foo/", new Path("//foo/").toString()); assertEquals("//foo/", new Path("//foo//").toString()); assertEquals("//foo/bar", new Path("//foo//bar").toString()); assertEquals("/foo", new Path("/foo/").toString()); assertEquals("/foo", new Path("/foo/").toString()); assertEquals("foo", new Path("foo/").toString()); assertEquals("foo", new Path("foo//").toString()); assertEquals("foo", new Path("foo///").toString()); assertEquals("foo/bar", new Path("foo//bar").toString()); assertEquals("foo/bar", new Path("foo///bar").toString()); assertEquals("hdfs://foo/foo2/bar/baz/", new Path(new URI("hdfs://foo//foo2///bar/baz///")).toString()); if (Path.WINDOWS) { assertEquals("c:/a/b", new Path("c:\\a\\b").toString()); } }
public static PTransformMatcher parDoWithFnType(final Class<? extends DoFn> fnType) { return new PTransformMatcher() { @Override public boolean matches(AppliedPTransform<?, ?, ?> application) { DoFn<?, ?> fn; if (application.getTransform() instanceof ParDo.SingleOutput) { fn = ((ParDo.SingleOutput) application.getTransform()).getFn(); } else if (application.getTransform() instanceof ParDo.MultiOutput) { fn = ((ParDo.MultiOutput) application.getTransform()).getFn(); } else { return false; } return fnType.equals(fn.getClass()); } @Override public String toString() { return MoreObjects.toStringHelper("ParDoWithFnTypeMatcher") .add("fnType", fnType) .toString(); } }; }
@Test public void parDoWithFnTypeWithMatchingType() { DoFn<Object, Object> fn = new DoFn<Object, Object>() { @ProcessElement public void process(ProcessContext ctxt) {} }; AppliedPTransform<?, ?, ?> parDoSingle = getAppliedTransform(ParDo.of(fn)); AppliedPTransform<?, ?, ?> parDoMulti = getAppliedTransform(ParDo.of(fn).withOutputTags(new TupleTag<>(), TupleTagList.empty())); PTransformMatcher matcher = PTransformMatchers.parDoWithFnType(fn.getClass()); assertThat(matcher.matches(parDoSingle), is(true)); assertThat(matcher.matches(parDoMulti), is(true)); }
public static DynamicVoter parse(String input) { input = input.trim(); int atIndex = input.indexOf("@"); if (atIndex < 0) { throw new IllegalArgumentException("No @ found in dynamic voter string."); } if (atIndex == 0) { throw new IllegalArgumentException("Invalid @ at beginning of dynamic voter string."); } String idString = input.substring(0, atIndex); int nodeId; try { nodeId = Integer.parseInt(idString); } catch (NumberFormatException e) { throw new IllegalArgumentException("Failed to parse node id in dynamic voter string.", e); } if (nodeId < 0) { throw new IllegalArgumentException("Invalid negative node id " + nodeId + " in dynamic voter string."); } input = input.substring(atIndex + 1); if (input.isEmpty()) { throw new IllegalArgumentException("No hostname found after node id."); } String host; if (input.startsWith("[")) { int endBracketIndex = input.indexOf("]"); if (endBracketIndex < 0) { throw new IllegalArgumentException("Hostname began with left bracket, but no right " + "bracket was found."); } host = input.substring(1, endBracketIndex); input = input.substring(endBracketIndex + 1); } else { int endColonIndex = input.indexOf(":"); if (endColonIndex < 0) { throw new IllegalArgumentException("No colon following hostname could be found."); } host = input.substring(0, endColonIndex); input = input.substring(endColonIndex); } if (!input.startsWith(":")) { throw new IllegalArgumentException("Port section must start with a colon."); } input = input.substring(1); int endColonIndex = input.indexOf(":"); if (endColonIndex < 0) { throw new IllegalArgumentException("No colon following port could be found."); } String portString = input.substring(0, endColonIndex); int port; try { port = Integer.parseInt(portString); } catch (NumberFormatException e) { throw new IllegalArgumentException("Failed to parse port in dynamic voter string.", e); } if (port < 0 || port > 65535) { throw new IllegalArgumentException("Invalid port " + port + " in dynamic voter string."); } String directoryIdString = input.substring(endColonIndex + 1); Uuid directoryId; try { directoryId = Uuid.fromString(directoryIdString); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to parse directory ID in dynamic voter string.", e); } return new DynamicVoter(directoryId, nodeId, host, port); }
@Test public void testPortSectionMustStartWithAColon() { assertEquals("Port section must start with a colon.", assertThrows(IllegalArgumentException.class, () -> DynamicVoter.parse("5@[2001:4860:4860::8888]8020:__0IZ-0DRNazJ49kCZ1EMQ")). getMessage()); }
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) { Objects.requireNonNull(metric); if (batchMeasure == null) { return Optional.empty(); } Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(builder, batchMeasure); case LONG: return toLongMeasure(builder, batchMeasure); case DOUBLE: return toDoubleMeasure(builder, batchMeasure); case BOOLEAN: return toBooleanMeasure(builder, batchMeasure); case STRING: return toStringMeasure(builder, batchMeasure); case LEVEL: return toLevelMeasure(builder, batchMeasure); case NO_VALUE: return toNoValueMeasure(builder); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_for_LEVEL_Metric_maps_QualityGateStatus() { ScannerReport.Measure batchMeasure = ScannerReport.Measure.newBuilder() .setStringValue(StringValue.newBuilder().setValue(Measure.Level.OK.name())) .build(); Optional<Measure> measure = underTest.toMeasure(batchMeasure, SOME_LEVEL_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.LEVEL); assertThat(measure.get().getLevelValue()).isEqualTo(Measure.Level.OK); }
@Override public List<PartitionInfo> getRemotePartitions(Table table, List<String> partitionNames) { ImmutableList.Builder<Partition> partitionBuilder = ImmutableList.builder(); Map<String, Partition> existingPartitions = hmsOps.getPartitionByNames(table, partitionNames); partitionBuilder.addAll(existingPartitions.values()); return fileOps.getRemotePartitions(partitionBuilder.build()); }
@Test public void testGetRemotePartitions( @Mocked HiveTable table, @Mocked HiveMetastoreOperations hmsOps) { List<String> partitionNames = Lists.newArrayList("dt=20200101", "dt=20200102", "dt=20200103"); Map<String, Partition> partitionMap = Maps.newHashMap(); for (String name : partitionNames) { Map<String, String> parameters = Maps.newHashMap(); TextFileFormatDesc formatDesc = new TextFileFormatDesc("a", "b", "c", "d"); String fullPath = "hdfs://path_to_table/" + name; Partition partition = new Partition(parameters, RemoteFileInputFormat.PARQUET, formatDesc, fullPath, true); partitionMap.put(name, partition); } new Expectations() { { hmsOps.getPartitionByNames((Table) any, (List<String>) any); result = partitionMap; minTimes = 1; } }; List<PartitionInfo> partitionInfoList = hiveMetadata.getRemotePartitions(table, partitionNames); Assert.assertEquals(3, partitionInfoList.size()); }
public static String hashpw(String password, String salt) throws IllegalArgumentException { BCrypt B; String real_salt; byte passwordb[], saltb[], hashed[]; char minor = (char) 0; int rounds, off = 0; StringBuilder rs = new StringBuilder(); if (salt == null) { throw new IllegalArgumentException("salt cannot be null"); } int saltLength = salt.length(); if (saltLength < 28) { throw new IllegalArgumentException("Invalid salt"); } if (salt.charAt(0) != '$' || salt.charAt(1) != '2') { throw new IllegalArgumentException("Invalid salt version"); } if (salt.charAt(2) == '$') { off = 3; } else { minor = salt.charAt(2); if (minor != 'a' || salt.charAt(3) != '$') { throw new IllegalArgumentException("Invalid salt revision"); } off = 4; } if (saltLength - off < 25) { throw new IllegalArgumentException("Invalid salt"); } // Extract number of rounds if (salt.charAt(off + 2) > '$') { throw new IllegalArgumentException("Missing salt rounds"); } rounds = Integer.parseInt(salt.substring(off, off + 2)); real_salt = salt.substring(off + 3, off + 25); try { passwordb = (password + (minor >= 'a' ? "\000" : "")).getBytes("UTF-8"); } catch (UnsupportedEncodingException uee) { throw new AssertionError("UTF-8 is not supported"); } saltb = decode_base64(real_salt, BCRYPT_SALT_LEN); B = new BCrypt(); hashed = B.crypt_raw(passwordb, saltb, rounds); rs.append("$2"); if (minor >= 'a') { rs.append(minor); } rs.append("$"); if (rounds < 10) { rs.append("0"); } rs.append(rounds); rs.append("$"); encode_base64(saltb, saltb.length, rs); encode_base64(hashed, bf_crypt_ciphertext.length * 4 - 1, rs); return rs.toString(); }
@Test public void testHashpwTooLittleRounds() throws IllegalArgumentException { thrown.expect(IllegalArgumentException.class); BCrypt.hashpw("foo", "$2a$03$......................"); }
public static <T> T convert(Class<T> type, Object value) throws ConvertException { return convert((Type) type, value); }
@Test public void toAtomicIntegerArrayTest() { final String str = "1,2"; final AtomicIntegerArray atomicIntegerArray = Convert.convert(AtomicIntegerArray.class, str); assertEquals("[1, 2]", atomicIntegerArray.toString()); }
public CMap parse(RandomAccessRead randomAcccessRead) throws IOException { CMap result = new CMap(); Object previousToken = null; Object token = parseNextToken(randomAcccessRead); while (token != null) { if (token instanceof Operator) { Operator op = (Operator) token; if (op.op.equals("endcmap")) { // end of CMap reached, stop reading as there isn't any interesting info anymore break; } if (op.op.equals("usecmap") && previousToken instanceof LiteralName) { parseUsecmap((LiteralName) previousToken, result); } else if (previousToken instanceof Number) { if (op.op.equals("begincodespacerange")) { parseBegincodespacerange((Number) previousToken, randomAcccessRead, result); } else if (op.op.equals("beginbfchar")) { parseBeginbfchar((Number) previousToken, randomAcccessRead, result); } else if (op.op.equals("beginbfrange")) { parseBeginbfrange((Number) previousToken, randomAcccessRead, result); } else if (op.op.equals("begincidchar")) { parseBegincidchar((Number) previousToken, randomAcccessRead, result); } else if (op.op.equals("begincidrange") && previousToken instanceof Integer) { parseBegincidrange((Integer) previousToken, randomAcccessRead, result); } } } else if (token instanceof LiteralName) { parseLiteralName((LiteralName) token, randomAcccessRead, result); } previousToken = token; token = parseNextToken(randomAcccessRead); } return result; }
@Test void testIdentitybfrange() throws IOException { // use strict mode CMap cMap = new CMapParser(true) .parse(new RandomAccessReadBufferedFile( new File("src/test/resources/cmap", "Identitybfrange"))); assertEquals("Adobe-Identity-UCS", cMap.getName(), "wrong CMap name"); byte[] bytes = { 0, 65 }; assertEquals(new String(bytes, StandardCharsets.UTF_16BE), cMap.toUnicode(bytes), "Indentity 0x0048"); bytes = new byte[] { 0x30, 0x39 }; assertEquals(new String(bytes, StandardCharsets.UTF_16BE), cMap.toUnicode(bytes), "Indentity 0x3039"); // check border values for strict mode bytes = new byte[] { 0x30, (byte) 0xFF }; assertEquals(new String(bytes, StandardCharsets.UTF_16BE), cMap.toUnicode(bytes), "Indentity 0x30FF"); // check border values for strict mode bytes = new byte[] { 0x31, 0x00 }; assertEquals(new String(bytes, StandardCharsets.UTF_16BE), cMap.toUnicode(bytes), "Indentity 0x3100"); bytes = new byte[] { (byte) 0xFF, (byte) 0xFF }; assertEquals(new String(bytes, StandardCharsets.UTF_16BE), cMap.toUnicode(bytes), "Indentity 0xFFFF"); }
@Override public String pathPattern() { return buildExtensionPathPattern(scheme); }
@Test void shouldBuildPathPatternCorrectly() { var scheme = Scheme.buildFromType(FakeExtension.class); var listHandler = new ExtensionListHandler(scheme, client); var pathPattern = listHandler.pathPattern(); assertEquals("/apis/fake.halo.run/v1alpha1/fakes", pathPattern); }
@Override public JWKSet getJWKSet(JWKSetCacheRefreshEvaluator refreshEvaluator, long currentTime, T context) throws KeySourceException { var jwksUrl = discoverJwksUrl(); try (var jwkSetSource = new URLBasedJWKSetSource<>(jwksUrl, new HttpRetriever(httpClient))) { return jwkSetSource.getJWKSet(null, 0, context); } catch (IOException e) { throw new RemoteKeySourceException( "failed to fetch jwks from discovery document '%s'".formatted(discoveryUrl), e); } }
@Test void getJWKSet_noJwksUri(WireMockRuntimeInfo wm) { var discoveryUrl = URI.create(wm.getHttpBaseUrl()).resolve(DISCOVERY_PATH); stubFor(get(DISCOVERY_PATH).willReturn(okJson("{\"jwks_uri\": null}"))); var sut = new DiscoveryJwkSetSource<>(HttpClient.newHttpClient(), discoveryUrl); assertThrows(RemoteKeySourceException.class, () -> sut.getJWKSet(null, 0, null)); }
public static double estimateNumberOfHashCollisions(int numberOfValues, int hashSize) { checkState(0 <= numberOfValues && numberOfValues <= hashSize); if (hashSize == 0) { return 0d; } double estimateRescaleFactor = (double) numberOfValues / NUMBER_OF_VALUES; double estimateIndex = (double) NUMBER_OF_ESTIMATES * numberOfValues / hashSize; int lowerEstimateIndex = (int) floor(estimateIndex); int upperEstimateIndex = (int) ceil(estimateIndex); if (lowerEstimateIndex == upperEstimateIndex) { return COLLISION_ESTIMATES[lowerEstimateIndex] * estimateRescaleFactor; } double lowerEstimation = COLLISION_ESTIMATES[lowerEstimateIndex]; double upperEstimation = COLLISION_ESTIMATES[upperEstimateIndex]; double estimationIndexDistanceFromLower = estimateIndex - lowerEstimateIndex; return (lowerEstimation * (1 - estimationIndexDistanceFromLower) + upperEstimation * estimationIndexDistanceFromLower) * estimateRescaleFactor; }
@Test public void hashEstimatesShouldIncrease() { assertEquals(estimateNumberOfHashCollisions(0, 100), 0d); for (int i = 1; i <= HASH_TABLE_SIZE; ++i) { assertTrue(estimateNumberOfHashCollisions(i - 1, HASH_TABLE_SIZE) < estimateNumberOfHashCollisions(i, HASH_TABLE_SIZE)); } }
public static String getExactlyValue(final String value) { return null == value ? null : tryGetRealContentInBackticks(CharMatcher.anyOf(EXCLUDED_CHARACTERS).removeFrom(value)); }
@Test void assertGetExactlyValueWithReservedCharacters() { assertThat(SQLUtils.getExactlyValue("`xxx`", "`"), is("`xxx`")); assertThat(SQLUtils.getExactlyValue("[xxx]", "[]"), is("[xxx]")); assertThat(SQLUtils.getExactlyValue("\"xxx\"", "\""), is("\"xxx\"")); assertThat(SQLUtils.getExactlyValue("'xxx'", "'"), is("'xxx'")); }
List<String> liveKeysAsOrderedList() { return new ArrayList<String>(liveMap.keySet()); }
@Test public void empty1() { long now = 3000; assertNotNull(tracker.getOrCreate(key, now++)); now += ComponentTracker.DEFAULT_TIMEOUT + 1000; tracker.removeStaleComponents(now); assertEquals(0, tracker.liveKeysAsOrderedList().size()); assertEquals(0, tracker.getComponentCount()); assertNotNull(tracker.getOrCreate(key, now++)); }
public boolean isValidatedPath(final String path) { return pathPattern.matcher(path).find(); }
@Test void assertIsNotValidPathWithNullParentNode() { UniqueRuleItemNodePath uniqueRuleItemNodePath = new UniqueRuleItemNodePath(new RuleRootNodePath("foo"), "test_path"); assertFalse(uniqueRuleItemNodePath.isValidatedPath("/word1/word2/rules/test_foo/test_path/versions/1234")); assertFalse(uniqueRuleItemNodePath.isValidatedPath("/rules/test_foo/test/versions/1234")); assertFalse(uniqueRuleItemNodePath.isValidatedPath("/word1/word2/rules/foo/test_path/versions/")); }
@Override public boolean equals(@Nullable Object obj) { if (!(obj instanceof LocalResourceId)) { return false; } LocalResourceId other = (LocalResourceId) obj; return this.pathString.equals(other.pathString); }
@Test public void testEquals() { // TODO: Java core test failing on windows, https://github.com/apache/beam/issues/20475 assumeFalse(SystemUtils.IS_OS_WINDOWS); assertEquals(toResourceIdentifier("/root/tmp/"), toResourceIdentifier("/root/tmp/")); assertNotEquals(toResourceIdentifier("/root/tmp"), toResourceIdentifier("/root/tmp/")); }
static void checkValidIndexName(String indexName) { if (indexName.length() > MAX_INDEX_NAME_LENGTH) { throw new IllegalArgumentException( "Index name " + indexName + " cannot be longer than " + MAX_INDEX_NAME_LENGTH + " characters."); } Matcher matcher = ILLEGAL_INDEX_NAME_CHARS.matcher(indexName); if (matcher.find()) { throw new IllegalArgumentException( "Index name " + indexName + " is not a valid name. Character \"" + matcher.group() + "\" is not allowed."); } if (indexName.charAt(0) == '-' || indexName.charAt(0) == '_' || indexName.charAt(0) == '+') { throw new IllegalArgumentException( "Index name " + indexName + " can not start with -, _ or +."); } }
@Test public void testCheckValidIndexNameThrowsErrorWhenNameIsTooLong() { assertThrows( IllegalArgumentException.class, () -> checkValidIndexName(StringUtils.repeat("a", 300))); }
public static <T> T readVersionAndDeSerialize( SimpleVersionedSerializer<T> serializer, DataInputView in) throws IOException { checkNotNull(serializer, "serializer"); checkNotNull(in, "in"); final int version = in.readInt(); final int length = in.readInt(); final byte[] data = new byte[length]; in.readFully(data); return serializer.deserialize(version, data); }
@Test void testUnderflow() throws Exception { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy( () -> SimpleVersionedSerialization.readVersionAndDeSerialize( new TestStringSerializer(), new byte[7])); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldChooseSpecificOverMultipleVarArgs() { // Given: givenFunctions( function(EXPECTED, -1, STRING), function(OTHER, 0, STRING_VARARGS), function("two", 1, STRING, STRING_VARARGS) ); // When: final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(SqlTypes.STRING))); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
public void hasAllRequiredFields() { if (!actual.isInitialized()) { // MessageLite doesn't support reflection so this is the best we can do. failWithoutActual( simpleFact("expected to have all required fields set"), fact("but was", actualCustomStringRepresentationForProtoPackageMembersToCall()), simpleFact("(Lite runtime could not determine which fields were missing.)")); } }
@Test public void testHasAllRequiredFields_failures() { if (!config.messageWithoutRequiredFields().isPresent()) { return; } AssertionError e = expectFailure( whenTesting -> whenTesting .that(config.messageWithoutRequiredFields().get()) .hasAllRequiredFields()); expectRegex( e, "expected to have all required fields set\\s*but was: .*\\(Lite runtime could not" + " determine which fields were missing.\\)"); }
@Override public void define(Context context) { NewController controller = context.createController("api/sources") .setSince("4.2") .setDescription("Get details on source files. See also api/tests."); for (SourcesWsAction action : actions) { action.define(controller); } controller.done(); }
@Test public void define_ws() { SourcesWsAction[] actions = IntStream.range(0, 1 + new Random().nextInt(10)) .mapToObj(i -> { SourcesWsAction wsAction = mock(SourcesWsAction.class); doAnswer(invocation -> { WebService.NewController controller = invocation.getArgument(0); controller.createAction("action_" + i) .setHandler(wsAction); return null; }).when(wsAction).define(any(WebService.NewController.class)); return wsAction; }) .toArray(SourcesWsAction[]::new); SourcesWs underTest = new SourcesWs(actions); WebService.Context context = new WebService.Context(); underTest.define(context); WebService.Controller controller = context.controller("api/sources"); assertThat(controller).isNotNull(); assertThat(controller.since()).isEqualTo("4.2"); assertThat(controller.description()).isNotEmpty(); assertThat(controller.actions()).hasSize(actions.length); }
@Override public void handlerRule(final RuleData ruleData) { super.getWasmExtern(HANDLER_RULE_METHOD_NAME) .ifPresent(handlerPlugin -> callWASI(ruleData, handlerPlugin)); }
@Test public void handlerRuleTest() { pluginDataHandler.handlerRule(ruleData); testWasmPluginDataHandler.handlerRule(ruleData); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testValueIgnoreNonNumber() throws Exception { JmxCollector jc = new JmxCollector( "\n---\nrules:\n- pattern: `.*`\n name: foo\n value: a" .replace('`', '"')) .register(prometheusRegistry); assertNull(getSampleValue("foo", new String[] {}, new String[] {})); }
public CoercedExpressionResult coerce() { final Class<?> leftClass = left.getRawClass(); final Class<?> nonPrimitiveLeftClass = toNonPrimitiveType(leftClass); final Class<?> rightClass = right.getRawClass(); final Class<?> nonPrimitiveRightClass = toNonPrimitiveType(rightClass); boolean sameClass = leftClass == rightClass; boolean isUnificationExpression = left instanceof UnificationTypedExpression || right instanceof UnificationTypedExpression; if (sameClass || isUnificationExpression) { return new CoercedExpressionResult(left, right); } if (!canCoerce()) { throw new CoercedExpressionException(new InvalidExpressionErrorResult("Comparison operation requires compatible types. Found " + leftClass + " and " + rightClass)); } if ((nonPrimitiveLeftClass == Integer.class || nonPrimitiveLeftClass == Long.class) && nonPrimitiveRightClass == Double.class) { CastExpr castExpression = new CastExpr(PrimitiveType.doubleType(), this.left.getExpression()); return new CoercedExpressionResult( new TypedExpression(castExpression, double.class, left.getType()), right, false); } final boolean leftIsPrimitive = leftClass.isPrimitive() || Number.class.isAssignableFrom( leftClass ); final boolean canCoerceLiteralNumberExpr = canCoerceLiteralNumberExpr(leftClass); boolean rightAsStaticField = false; final Expression rightExpression = right.getExpression(); final TypedExpression coercedRight; if (leftIsPrimitive && canCoerceLiteralNumberExpr && rightExpression instanceof LiteralStringValueExpr) { final Expression coercedLiteralNumberExprToType = coerceLiteralNumberExprToType((LiteralStringValueExpr) right.getExpression(), leftClass); coercedRight = right.cloneWithNewExpression(coercedLiteralNumberExprToType); coercedRight.setType( leftClass ); } else if (shouldCoerceBToString(left, right)) { coercedRight = coerceToString(right); } else if (isNotBinaryExpression(right) && canBeNarrowed(leftClass, rightClass) && right.isNumberLiteral()) { coercedRight = castToClass(leftClass); } else if (leftClass == long.class && rightClass == int.class) { coercedRight = right.cloneWithNewExpression(new CastExpr(PrimitiveType.longType(), right.getExpression())); } else if (leftClass == Date.class && rightClass == String.class) { coercedRight = coerceToDate(right); rightAsStaticField = true; } else if (leftClass == LocalDate.class && rightClass == String.class) { coercedRight = coerceToLocalDate(right); rightAsStaticField = true; } else if (leftClass == LocalDateTime.class && rightClass == String.class) { coercedRight = coerceToLocalDateTime(right); rightAsStaticField = true; } else if (shouldCoerceBToMap()) { coercedRight = castToClass(toNonPrimitiveType(leftClass)); } else if (isBoolean(leftClass) && !isBoolean(rightClass)) { coercedRight = coerceBoolean(right); } else { coercedRight = right; } final TypedExpression coercedLeft; if (nonPrimitiveLeftClass == Character.class && shouldCoerceBToString(right, left)) { coercedLeft = coerceToString(left); } else { coercedLeft = left; } return new CoercedExpressionResult(coercedLeft, coercedRight, rightAsStaticField); }
@Test public void testStringToBooleanTrue() { final TypedExpression left = expr(THIS_PLACEHOLDER + ".getBooleanValue", Boolean.class); final TypedExpression right = expr("\"true\"", String.class); final CoercedExpression.CoercedExpressionResult coerce = new CoercedExpression(left, right, false).coerce(); assertThat(coerce.getCoercedRight()).isEqualTo(expr("true", Boolean.class)); }
@VisibleForTesting static int checkJar(Path file) throws Exception { final URI uri = file.toUri(); int numSevereIssues = 0; try (final FileSystem fileSystem = FileSystems.newFileSystem( new URI("jar:file", uri.getHost(), uri.getPath(), uri.getFragment()), Collections.emptyMap())) { if (isTestJarAndEmpty(file, fileSystem.getPath("/"))) { return 0; } if (!noticeFileExistsAndIsValid(fileSystem.getPath("META-INF", "NOTICE"), file)) { numSevereIssues++; } if (!licenseFileExistsAndIsValid(fileSystem.getPath("META-INF", "LICENSE"), file)) { numSevereIssues++; } numSevereIssues += getNumLicenseFilesOutsideMetaInfDirectory(file, fileSystem.getPath("/")); numSevereIssues += getFilesWithIncompatibleLicenses(file, fileSystem.getPath("/")); } return numSevereIssues; }
@Test void testRejectedOnInvalidLicenseFile(@TempDir Path tempDir) throws Exception { assertThat( JarFileChecker.checkJar( createJar( tempDir, Entry.fileEntry(VALID_NOTICE_CONTENTS, VALID_NOTICE_PATH), Entry.fileEntry( INVALID_LICENSE_CONTENTS, VALID_LICENSE_PATH)))) .isEqualTo(1); }
public SecureRandom createSecureRandom() throws NoSuchProviderException, NoSuchAlgorithmException { try { return getProvider() != null ? SecureRandom.getInstance(getAlgorithm(), getProvider()) : SecureRandom.getInstance(getAlgorithm()); } catch (NoSuchProviderException ex) { throw new NoSuchProviderException("no such secure random provider: " + getProvider()); } catch (NoSuchAlgorithmException ex) { throw new NoSuchAlgorithmException("no such secure random algorithm: " + getAlgorithm()); } }
@Test public void testDefaults() throws Exception { Assertions.assertNotNull(factoryBean.createSecureRandom()); }