focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static TriRpcStatus getStatus(Throwable throwable) { return getStatus(throwable, null); }
@Test void testGetStatus() { StatusRpcException rpcException = new StatusRpcException(TriRpcStatus.INTERNAL); Assertions.assertEquals(TriRpcStatus.INTERNAL.code, TriRpcStatus.getStatus(rpcException, null).code); Assertions.assertEquals( TriRpcStatus.DEADLINE_EXCEEDED.code, TriRpcStatus.getStatus(new RpcException(RpcException.TIMEOUT_EXCEPTION), null).code); Assertions.assertEquals( TriRpcStatus.DEADLINE_EXCEEDED.code, TriRpcStatus.getStatus(new TimeoutException(true, null, null), null).code); }
public static Timer getRaftApplyReadTimer() { return RAFT_APPLY_READ_TIMER; }
@Test void testRaftApplyReadTimer() { Timer raftApplyReadTimer = MetricsMonitor.getRaftApplyReadTimer(); raftApplyReadTimer.record(10, TimeUnit.SECONDS); raftApplyReadTimer.record(20, TimeUnit.SECONDS); assertEquals(0.5D, raftApplyReadTimer.totalTime(TimeUnit.MINUTES), 0.01); assertEquals(30D, raftApplyReadTimer.totalTime(TimeUnit.SECONDS), 0.01); }
public Attributes readDataset(Attributes attrs) { boolean wrappedInArray = next() == Event.START_ARRAY; if (wrappedInArray) next(); expect(Event.START_OBJECT); if (attrs == null) { attrs = new Attributes(); } fmi = null; next(); doReadDataset(attrs); if (wrappedInArray) next(); return attrs; }
@Test public void test() { StringReader reader = new StringReader(JSON); JsonParser parser = Json.createParser(reader); Attributes dataset = new JSONReader(parser).readDataset(null); assertArrayEquals(IS, dataset.getStrings(Tag.SelectorISValue)); assertArrayEquals(DS, dataset.getStrings(Tag.SelectorDSValue)); assertInfinityAndNaN(dataset.getDoubles(Tag.SelectorFDValue)); assertInfinityAndNaN(dataset.getFloats(Tag.SelectorFLValue)); assertArrayEquals(INTS, dataset.getInts(Tag.SelectorULValue)); assertArrayEquals(UINTS, dataset.getInts(Tag.SelectorUSValue)); assertArrayEquals(INTS, dataset.getInts(Tag.SelectorSLValue)); assertArrayEquals(INTS, dataset.getInts(Tag.SelectorSLValue)); assertArrayEquals(LONGS, dataset.getLongs(Tag.SelectorSVValue)); assertArrayEquals(LONGS, dataset.getLongs(Tag.SelectorUVValue)); }
public void update(String namespaceName, String extensionName) throws InterruptedException { if(BuiltInExtensionUtil.isBuiltIn(namespaceName)) { LOGGER.debug("SKIP BUILT-IN EXTENSION {}", NamingUtil.toExtensionId(namespaceName, extensionName)); return; } var extension = repositories.findPublicId(namespaceName, extensionName); var extensionUpdates = new HashMap<Long, String>(); updateExtensionPublicId(extension, extensionUpdates, false); if(!extensionUpdates.isEmpty()) { repositories.updateExtensionPublicIds(extensionUpdates); } var namespaceUpdates = new HashMap<Long, String>(); updateNamespacePublicId(extension, namespaceUpdates, false); if(!namespaceUpdates.isEmpty()) { repositories.updateNamespacePublicIds(namespaceUpdates); } }
@Test public void testUpdateUpstream() throws InterruptedException { var namespaceName = "foo"; var namespacePublicId = "123-456-789"; var extensionName = "bar"; var extensionPublicId = "abc-def-ghi"; var namespace = new Namespace(); namespace.setId(1L); namespace.setName(namespaceName); namespace.setPublicId("zzz-zzz-zzz"); var extension = new Extension(); extension.setId(2L); extension.setName(extensionName); extension.setPublicId("000-000-000"); extension.setNamespace(namespace); Mockito.when(repositories.findPublicId(namespaceName, extensionName)).thenReturn(extension); Mockito.when(idService.getUpstreamPublicIds(extension)).thenReturn(new PublicIds(namespacePublicId, extensionPublicId)); updateService.update(namespaceName, extensionName); Mockito.verify(repositories).updateExtensionPublicIds(Mockito.argThat((Map<Long, String> map) -> { return map.size() == 1 && map.get(extension.getId()).equals(extensionPublicId); })); Mockito.verify(repositories).updateNamespacePublicIds(Mockito.argThat((Map<Long, String> map) -> { return map.size() == 1 && map.get(namespace.getId()).equals(namespacePublicId); })); }
@NotNull public String joinPackage(String subPackage) { String parent = getParent(); return StringUtils.isBlank(parent) ? subPackage : (parent + StringPool.DOT + subPackage); }
@Test void joinPackageTest() { Assertions.assertEquals("com.baomidou.demo", GeneratorBuilder.packageConfigBuilder().joinPackage("demo")); Assertions.assertEquals("com.baomidou.mp.demo", GeneratorBuilder.packageConfigBuilder().moduleName("mp").joinPackage("demo")); Assertions.assertEquals("com.baomihua.demo", GeneratorBuilder.packageConfigBuilder().parent("com.baomihua").joinPackage("demo")); Assertions.assertEquals("com.baomihua.mp.demo", GeneratorBuilder.packageConfigBuilder().parent("com.baomihua").moduleName("mp").joinPackage("demo")); }
public SplitBrainMergePolicy getMergePolicy(String className) { if (className == null) { throw new InvalidConfigurationException("Class name is mandatory!"); } return getOrPutIfAbsent(mergePolicyMap, className, policyConstructorFunction); }
@Test public void getMergePolicy_withNotExistingMergePolicy() { assertThatThrownBy(() -> mergePolicyProvider.getMergePolicy("No such policy!")) .isInstanceOf(InvalidConfigurationException.class) .hasCauseInstanceOf(ClassNotFoundException.class); }
@Override public void onRelease(int primaryCode) { mParentListener.listener().onRelease(primaryCode); }
@Test public void testOnRelease() { mUnderTest.onRelease(66); Mockito.verify(mMockParentListener).onRelease(66); Mockito.verifyNoMoreInteractions(mMockParentListener); Mockito.verifyZeroInteractions(mMockKeyboardDismissAction); }
public static void read(int bitWidth, ByteBufferInputStream in, int currentCount, int[] currentBuffer) throws IOException { switch (vectorSupport) { case VECTOR_512: readBatchUsing512Vector(bitWidth, in, currentCount, currentBuffer); break; default: readBatch(bitWidth, in, currentCount, currentBuffer); } }
@Test public void testRead() throws IOException { for (int bitWidth = minBitWidth; bitWidth <= maxBitWidth; bitWidth++) { byte[] input = new byte[outputValues * bitWidth / 8]; for (int i = 0; i < input.length; i++) { input[i] = (byte) i; } ByteBufferInputStream inputStream = ByteBufferInputStream.wrap(ByteBuffer.wrap(input)); ParquetReadRouter.read(bitWidth, inputStream, 0, output); ParquetReadRouter.readBatch(bitWidth, inputStream, 0, outputBatch); assertArrayEquals(output, outputBatch); Assume.assumeTrue(ParquetReadRouter.getSupportVectorFromCPUFlags() == VectorSupport.VECTOR_512); ParquetReadRouter.readBatchUsing512Vector(bitWidth, inputStream, 0, outputBatchVector); assertArrayEquals(output, outputBatchVector); } }
@Override public ListenableFuture<?> execute(ResetSession statement, TransactionManager transactionManager, Metadata metadata, AccessControl accessControl, QueryStateMachine stateMachine, List<Expression> parameters) { List<String> parts = statement.getName().getParts(); if (parts.size() > 2) { throw new SemanticException(INVALID_SESSION_PROPERTY, statement, "Invalid session property '%s'", statement.getName()); } // validate the property name if (parts.size() == 1) { metadata.getSessionPropertyManager().getSystemSessionPropertyMetadata(parts.get(0)) .orElseThrow(() -> new SemanticException(INVALID_SESSION_PROPERTY, statement, "Session property %s does not exist", statement.getName())); } else { ConnectorId connectorId = metadata.getCatalogHandle(stateMachine.getSession(), parts.get(0)) .orElseThrow(() -> new SemanticException(MISSING_CATALOG, statement, "Catalog %s does not exist", parts.get(0))); metadata.getSessionPropertyManager().getConnectorSessionPropertyMetadata(connectorId, parts.get(1)) .orElseThrow(() -> new SemanticException(INVALID_SESSION_PROPERTY, statement, "Session property %s does not exist", statement.getName())); } stateMachine.addResetSessionProperties(statement.getName().toString()); return immediateFuture(null); }
@Test public void test() { Session session = testSessionBuilder(metadata.getSessionPropertyManager()) .setSystemProperty("foo", "bar") .setCatalogSessionProperty(CATALOG_NAME, "baz", "blah") .build(); QueryStateMachine stateMachine = createQueryStateMachine("reset foo", session, false, transactionManager, executor, metadata); ResetSessionTask resetSessionTask = new ResetSessionTask(); getFutureValue(resetSessionTask.execute( new ResetSession(QualifiedName.of(CATALOG_NAME, "baz")), transactionManager, metadata, accessControl, stateMachine, emptyList())); Set<String> sessionProperties = stateMachine.getResetSessionProperties(); assertEquals(sessionProperties, ImmutableSet.of("catalog.baz")); }
@Override public Database getDb(String name) { try { if (listDbNames().contains(name)) { return new Database(0, name); } else { return null; } } catch (StarRocksConnectorException e) { return null; } }
@Test public void testGetDb() { try { JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); dbResult.beforeFirst(); Database db = jdbcMetadata.getDb("test"); Assert.assertEquals("test", db.getOriginName()); } catch (Exception e) { Assert.fail(); } }
public void setContents(String contents) { this.contents = replaceOptionals(contents); }
@Test public void testSetContents() { RuleTemplate rt = new RuleTemplate("rt1", getTemplateContainer()); rt.setContents("Test template"); assertThat(rt.getContents()).isEqualTo("Test template\n"); }
@Override public String selectForUpdateSkipLocked() { return supportsSelectForUpdateSkipLocked ? " FOR UPDATE SKIP LOCKED" : ""; }
@Test void mySQL801DoesSupportSelectForUpdateSkipLocked() { assertThat(new MySqlDialect("MySQL", "8.0.1").selectForUpdateSkipLocked()) .isEqualTo(" FOR UPDATE SKIP LOCKED"); }
public LatencyProbe newProbe(String serviceName, String dataStructureName, String methodName) { ServiceProbes serviceProbes = getOrPutIfAbsent( metricsPerServiceMap, serviceName, metricsPerServiceConstructorFunction); return serviceProbes.newProbe(dataStructureName, methodName); }
@Test public void getProbe() { LatencyProbe probe = plugin.newProbe("foo", "queue", "somemethod"); assertNotNull(probe); }
@Override public List<ProviderInfo> route(SofaRequest request, List<ProviderInfo> providerInfos) { throw new UnsupportedOperationException(); }
@Test public void invoke() throws Exception { boolean error = false; try { new ExcludeRouter("*").route(null, null); } catch (Exception e) { error = e instanceof UnsupportedOperationException; } Assert.assertTrue(error); }
@Restricted(NoExternalUse.class) public static String[] printLogRecordHtml(LogRecord r, LogRecord prior) { String[] oldParts = prior == null ? new String[4] : logRecordPreformat(prior); String[] newParts = logRecordPreformat(r); for (int i = 0; i < /* not 4 */3; i++) { newParts[i] = "<span class='" + (newParts[i].equals(oldParts[i]) ? "logrecord-metadata-old" : "logrecord-metadata-new") + "'>" + newParts[i] + "</span>"; } newParts[3] = Util.xmlEscape(newParts[3]); return newParts; }
@Issue("JENKINS-20800") @Test public void printLogRecordHtml() { LogRecord lr = new LogRecord(Level.INFO, "Bad input <xml/>"); lr.setLoggerName("test"); assertEquals("Bad input &lt;xml/&gt;\n", Functions.printLogRecordHtml(lr, null)[3]); }
public static void toJson(ViewVersion version, JsonGenerator generator) throws IOException { Preconditions.checkArgument(version != null, "Cannot serialize null view version"); generator.writeStartObject(); generator.writeNumberField(VERSION_ID, version.versionId()); generator.writeNumberField(TIMESTAMP_MS, version.timestampMillis()); generator.writeNumberField(SCHEMA_ID, version.schemaId()); JsonUtil.writeStringMap(SUMMARY, version.summary(), generator); if (version.defaultCatalog() != null) { generator.writeStringField(DEFAULT_CATALOG, version.defaultCatalog()); } JsonUtil.writeStringArray( DEFAULT_NAMESPACE, Arrays.asList(version.defaultNamespace().levels()), generator); generator.writeArrayFieldStart(REPRESENTATIONS); for (ViewRepresentation representation : version.representations()) { ViewRepresentationParser.toJson(representation, generator); } generator.writeEndArray(); generator.writeEndObject(); }
@Test public void testSerializeViewVersion() { SQLViewRepresentation firstRepresentation = ImmutableSQLViewRepresentation.builder() .sql("select * from foo") .dialect("spark-sql") .build(); SQLViewRepresentation secondRepresentation = ImmutableSQLViewRepresentation.builder() .sql("select a, b, c from foo") .dialect("some-sql") .build(); ViewVersion viewVersion = ImmutableViewVersion.builder() .versionId(1) .timestampMillis(12345) .addRepresentations(firstRepresentation, secondRepresentation) .summary(ImmutableMap.of("user", "some-user")) .defaultNamespace(Namespace.of("one", "two")) .defaultCatalog("catalog") .schemaId(1) .build(); String expectedRepresentations = "[{\"type\":\"sql\",\"sql\":\"select * from foo\",\"dialect\":\"spark-sql\"}," + "{\"type\":\"sql\",\"sql\":\"select a, b, c from foo\",\"dialect\":\"some-sql\"}]"; String expectedViewVersion = String.format( "{\"version-id\":1,\"timestamp-ms\":12345,\"schema-id\":1,\"summary\":{\"user\":\"some-user\"}," + "\"default-catalog\":\"catalog\",\"default-namespace\":[\"one\",\"two\"],\"representations\":%s}", expectedRepresentations); assertThat(ViewVersionParser.toJson(viewVersion)) .as("Should be able to serialize valid view version") .isEqualTo(expectedViewVersion); }
public static String truncate(String str, int headLength, int tailLength) { if (isNullOrEmpty(str) || str.length() <= headLength + tailLength) { return str; } String head = str.substring(0, headLength); String tail = str.substring(str.length() - tailLength); return head + "..." + tail; }
@Test public void testTruncate() { assertNull(StringUtils.truncate(null, 10, 10)); assertEquals("http://use...ons/latest", StringUtils.truncate("http://username:password@myregistry.com:5000/versions/latest", 10, 10)); assertEquals("http://abc.com", StringUtils.truncate("http://abc.com", 10, 10)); }
public static Map<String, String> parseMap(String str) { if (str != null) { StringTokenizer tok = new StringTokenizer(str, ", \t\n\r"); HashMap<String, String> map = new HashMap<>(); while (tok.hasMoreTokens()) { String record = tok.nextToken(); int endIndex = record.indexOf('='); if (endIndex == -1) { throw new RuntimeException("Failed to parse Map from String"); } String key = record.substring(0, endIndex); String value = record.substring(endIndex + 1); map.put(key.trim(), value.trim()); } return Collections.unmodifiableMap(map); } else { return Collections.emptyMap(); } }
@Test public void testParseMap() { String stringMap = "key1=value1\n" + "key2=value2"; Map<String, String> m = parseMap(stringMap); assertThat(m, aMapWithSize(2)); assertThat(m, hasEntry("key1", "value1")); assertThat(m, hasEntry("key2", "value2")); }
public static String format(Date date) { return format(date, false, TIMEZONE_UTC); }
@Test public void testDateFormatString() { GregorianCalendar calendar = new GregorianCalendar(utcTimeZone(), Locale.US); // Calendar was created with current time, must clear it calendar.clear(); calendar.set(2018, Calendar.JUNE, 25); Date date = calendar.getTime(); String dateStr = ISO8601Utils.format(date); String expectedDate = "2018-06-25T00:00:00Z"; assertThat(dateStr).isEqualTo(expectedDate); }
static String strip(final String line) { return new Parser(line).parse(); }
@Test public void shouldReturnLineWithoutCommentAsIs() { // Given: final String line = "no comment here"; // Then: assertThat(CommentStripper.strip(line), is(sameInstance(line))); }
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) { // we will do the validation / topic-creation in a loop, until we have confirmed all topics // have existed with the expected number of partitions, or some create topic returns fatal errors. log.debug("Starting to validate internal topics {} in partition assignor.", topics); long currentWallClockMs = time.milliseconds(); final long deadlineMs = currentWallClockMs + retryTimeoutMs; Set<String> topicsNotReady = new HashSet<>(topics.keySet()); final Set<String> newlyCreatedTopics = new HashSet<>(); while (!topicsNotReady.isEmpty()) { final Set<String> tempUnknownTopics = new HashSet<>(); topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics); newlyCreatedTopics.addAll(topicsNotReady); if (!topicsNotReady.isEmpty()) { final Set<NewTopic> newTopics = new HashSet<>(); for (final String topicName : topicsNotReady) { if (tempUnknownTopics.contains(topicName)) { // for the tempUnknownTopics, don't create topic for them // we'll check again later if remaining retries > 0 continue; } final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName)); final Map<String, String> topicConfig = internalTopicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention); log.debug("Going to create topic {} with {} partitions and config {}.", internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), topicConfig); newTopics.add( new NewTopic( internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), Optional.of(replicationFactor)) .configs(topicConfig)); } // it's possible that although some topics are not ready yet because they // are temporarily not available, not that they do not exist; in this case // the new topics to create may be empty and hence we can skip here if (!newTopics.isEmpty()) { final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics); for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) { final String topicName = createTopicResult.getKey(); try { createTopicResult.getValue().get(); topicsNotReady.remove(topicName); } catch (final InterruptedException fatalException) { // this should not happen; if it ever happens it indicate a bug Thread.currentThread().interrupt(); log.error(INTERRUPTED_ERROR_MESSAGE, fatalException); throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException); } catch (final ExecutionException executionException) { final Throwable cause = executionException.getCause(); if (cause instanceof TopicExistsException) { // This topic didn't exist earlier or its leader not known before; just retain it for next round of validation. log.info( "Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n" + "Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n" + "Error message was: {}", topicName, retryBackOffMs, cause.toString()); } else { log.error("Unexpected error during topic creation for {}.\n" + "Error message was: {}", topicName, cause.toString()); if (cause instanceof UnsupportedVersionException) { final String errorMessage = cause.getMessage(); if (errorMessage != null && errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) { throw new StreamsException(String.format( "Could not create topic %s, because brokers don't support configuration replication.factor=-1." + " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.", topicName) ); } } else if (cause instanceof TimeoutException) { log.error("Creating topic {} timed out.\n" + "Error message was: {}", topicName, cause.toString()); } else { throw new StreamsException( String.format("Could not create topic %s.", topicName), cause ); } } } } } } if (!topicsNotReady.isEmpty()) { currentWallClockMs = time.milliseconds(); if (currentWallClockMs >= deadlineMs) { final String timeoutError = String.format("Could not create topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs); log.error(timeoutError); throw new TimeoutException(timeoutError); } log.info( "Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}", topicsNotReady, retryBackOffMs, deadlineMs - currentWallClockMs ); Utils.sleep(retryBackOffMs); } } log.debug("Completed validating internal topics and created {}", newlyCreatedTopics); return newlyCreatedTopics; }
@Test public void shouldCreateRequiredTopics() throws Exception { final InternalTopicConfig topicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap()); topicConfig.setNumberOfPartitions(1); final InternalTopicConfig topicConfig2 = new UnwindowedUnversionedChangelogTopicConfig(topic2, Collections.emptyMap()); topicConfig2.setNumberOfPartitions(1); final InternalTopicConfig topicConfig3 = new WindowedChangelogTopicConfig(topic3, Collections.emptyMap(), 10); topicConfig3.setNumberOfPartitions(1); final InternalTopicConfig topicConfig4 = new VersionedChangelogTopicConfig(topic4, Collections.emptyMap(), 12); topicConfig4.setNumberOfPartitions(1); internalTopicManager.makeReady(Collections.singletonMap(topic1, topicConfig)); internalTopicManager.makeReady(Collections.singletonMap(topic2, topicConfig2)); internalTopicManager.makeReady(Collections.singletonMap(topic3, topicConfig3)); internalTopicManager.makeReady(Collections.singletonMap(topic4, topicConfig4)); assertEquals(mkSet(topic1, topic2, topic3, topic4), mockAdminClient.listTopics().names().get()); assertEquals(new TopicDescription(topic1, false, new ArrayList<TopicPartitionInfo>() { { add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); } }), mockAdminClient.describeTopics(Collections.singleton(topic1)).topicNameValues().get(topic1).get()); assertEquals(new TopicDescription(topic2, false, new ArrayList<TopicPartitionInfo>() { { add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); } }), mockAdminClient.describeTopics(Collections.singleton(topic2)).topicNameValues().get(topic2).get()); assertEquals(new TopicDescription(topic3, false, new ArrayList<TopicPartitionInfo>() { { add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); } }), mockAdminClient.describeTopics(Collections.singleton(topic3)).topicNameValues().get(topic3).get()); assertEquals(new TopicDescription(topic4, false, new ArrayList<TopicPartitionInfo>() { { add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); } }), mockAdminClient.describeTopics(Collections.singleton(topic4)).topicNameValues().get(topic4).get()); final ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topic1); final ConfigResource resource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2); final ConfigResource resource3 = new ConfigResource(ConfigResource.Type.TOPIC, topic3); final ConfigResource resource4 = new ConfigResource(ConfigResource.Type.TOPIC, topic4); assertEquals( new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE), mockAdminClient.describeConfigs(Collections.singleton(resource)).values().get(resource).get().get(TopicConfig.CLEANUP_POLICY_CONFIG) ); assertEquals( new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), mockAdminClient.describeConfigs(Collections.singleton(resource2)).values().get(resource2).get().get(TopicConfig.CLEANUP_POLICY_CONFIG) ); assertEquals( new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE), mockAdminClient.describeConfigs(Collections.singleton(resource3)).values().get(resource3).get().get(TopicConfig.CLEANUP_POLICY_CONFIG) ); assertEquals( new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), mockAdminClient.describeConfigs(Collections.singleton(resource4)).values().get(resource4).get().get(TopicConfig.CLEANUP_POLICY_CONFIG) ); }
public AstNode rewrite(final AstNode node, final C context) { return rewriter.process(node, context); }
@Test public void shouldRewriteExplainWithQuery() { // Given: final Explain explain = new Explain(location, Optional.empty(), Optional.of(query)); when(mockRewriter.apply(query, context)).thenReturn(rewrittenQuery); // When: final AstNode rewritten = rewriter.rewrite(explain, context); // Then: assertThat(rewritten, is(new Explain( location, Optional.empty(), Optional.of(rewrittenQuery) ))); }
public abstract ResponseType getResponseType();
@Test(dataProvider = "envelopeResourceMethodDataProvider") public void testEnvelopeResponseType(RestLiResponseEnvelope responseEnvelope, ResourceMethod resourceMethod) { if (!ResponseTypeUtil.isDynamicallyDetermined(resourceMethod)) { ResponseType responseType = ResponseTypeUtil.fromMethodType(resourceMethod); Assert.assertEquals(responseEnvelope.getResponseType(), responseType); } }
public Set<MessageOutput> getMessageOutputs() { return ImmutableSet.<MessageOutput>builder() .addAll(runningMessageOutputs.asMap().values()) .add(defaultMessageOutput) .build(); }
@Test public void testMessageOutputsIncludesDefault() { Set<MessageOutput> outputs = registry.getMessageOutputs(); assertSame("we should only have the default MessageOutput", Iterables.getOnlyElement(outputs, null), messageOutput); }
public static java.time.Instant toJava(Instant timestamp) { return Optional.ofNullable(timestamp) .map(t -> java.time.Instant.ofEpochMilli(t.getMillis())) .orElse(null); }
@Test public void shouldConvertToJavaInstant() { assertThat(TimeUtil.toJava(null)).isNull(); assertThat(TimeUtil.toJava(org.joda.time.Instant.ofEpochMilli(0L))) .isEqualTo(java.time.Instant.ofEpochMilli(0L)); }
public static NacosRestTemplate getNacosRestTemplate(Logger logger) { return getNacosRestTemplate(new DefaultHttpClientFactory(logger)); }
@Test void testGetNacosRestTemplateForNullFactory() { assertThrows(NullPointerException.class, () -> { HttpClientBeanHolder.getNacosRestTemplate((HttpClientFactory) null); }); }
public static <K> KStreamHolder<K> build( final KStreamHolder<K> left, final KTableHolder<K> right, final StreamTableJoin<K> join, final RuntimeBuildContext buildContext, final JoinedFactory joinedFactory ) { final Formats leftFormats = join.getInternalFormats(); final QueryContext queryContext = join.getProperties().getQueryContext(); final QueryContext.Stacker stacker = QueryContext.Stacker.of(queryContext); final LogicalSchema leftSchema = left.getSchema(); final PhysicalSchema leftPhysicalSchema = PhysicalSchema.from( leftSchema, leftFormats.getKeyFeatures(), leftFormats.getValueFeatures() ); final Serde<GenericRow> leftSerde = buildContext.buildValueSerde( leftFormats.getValueFormat(), leftPhysicalSchema, stacker.push(SERDE_CTX).getQueryContext() ); final Serde<K> keySerde = left.getExecutionKeyFactory().buildKeySerde( leftFormats.getKeyFormat(), leftPhysicalSchema, queryContext ); final Joined<K, GenericRow, GenericRow> joined = joinedFactory.create( keySerde, leftSerde, null, StreamsUtil.buildOpName(queryContext) ); final LogicalSchema rightSchema = right.getSchema(); final JoinParams joinParams = JoinParamsFactory .create(join.getKeyColName(), leftSchema, rightSchema); final KStream<K, GenericRow> result; switch (join.getJoinType()) { case LEFT: result = left.getStream().leftJoin(right.getTable(), joinParams.getJoiner(), joined); break; case INNER: result = left.getStream().join(right.getTable(), joinParams.getJoiner(), joined); break; default: throw new IllegalStateException("invalid join type"); } return left.withStream(result, joinParams.getSchema()); }
@Test public void shouldFailOnOuterJoin() { // Given: givenOuterJoin(); // When: assertThrows( IllegalStateException.class, () -> join.build(planBuilder, planInfo) ); }
public final void containsEntry(@Nullable Object key, @Nullable Object value) { Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value); checkNotNull(actual); if (!actual.entrySet().contains(entry)) { List<@Nullable Object> keyList = singletonList(key); List<@Nullable Object> valueList = singletonList(value); if (actual.containsKey(key)) { Object actualValue = actual.get(key); /* * In the case of a null expected or actual value, clarify that the key *is* present and * *is* expected to be present. That is, get() isn't returning null to indicate that the key * is missing, and the user isn't making an assertion that the key is missing. */ StandardSubjectBuilder check = check("get(%s)", key); if (value == null || actualValue == null) { check = check.withMessage("key is present but with a different value"); } // See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription. check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value); } else if (hasMatchingToStringPair(actual.keySet(), keyList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain keys", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsValue(value)) { Set<@Nullable Object> keys = new LinkedHashSet<>(); for (Map.Entry<?, ?> actualEntry : actual.entrySet()) { if (Objects.equal(actualEntry.getValue(), value)) { keys.add(actualEntry.getKey()); } } failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain keys with that value", keys), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (hasMatchingToStringPair(actual.values(), valueList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain values", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else { failWithActual("expected to contain entry", entry); } } }
@Test public void containsNullEntry() { Map<String, String> actual = Maps.newHashMap(); actual.put(null, null); assertThat(actual).containsEntry(null, null); }
public Template getIndexTemplate(IndexSet indexSet) { final IndexSetMappingTemplate indexSetMappingTemplate = getTemplateIndexSetConfig(indexSet, indexSet.getConfig(), profileService); return indexMappingFactory.createIndexMapping(indexSet.getConfig()) .toTemplate(indexSetMappingTemplate); }
@Test void testUsesCustomMappingsWhileGettingTemplateWhenProfileHasNoOwnMappings() { final CustomFieldMappings individualCustomFieldMappings = new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "string"), new CustomFieldMapping("f2", "long") )); final TestIndexSet testIndexSet = indexSetConfig("test", "test-template-profiles", "custom", "000000000000000000000013", individualCustomFieldMappings); IndexMappingTemplate indexMappingTemplateMock = mock(IndexMappingTemplate.class); doReturn(indexMappingTemplateMock).when(indexMappingFactory).createIndexMapping(testIndexSet.getConfig()); underTest.getIndexTemplate(testIndexSet); verify(indexMappingTemplateMock).toTemplate( new IndexSetMappingTemplate("standard", "test_*", individualCustomFieldMappings) ); }
@Override public Result invoke(Invocation invocation) throws RpcException { Result result; String value = getUrl().getMethodParameter( RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString()) .trim(); if (ConfigUtils.isEmpty(value)) { // no mock result = this.invoker.invoke(invocation); } else if (value.startsWith(FORCE_KEY)) { if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "force mock", "", "force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : " + getUrl()); } // force:direct mock result = doMockInvoke(invocation, null); } else { // fail-mock try { result = this.invoker.invoke(invocation); // fix:#4585 if (result.getException() != null && result.getException() instanceof RpcException) { RpcException rpcException = (RpcException) result.getException(); if (rpcException.isBiz()) { throw rpcException; } else { result = doMockInvoke(invocation, rpcException); } } } catch (RpcException e) { if (e.isBiz()) { throw e; } if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "failed to mock invoke", "", "fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : " + getUrl(), e); } result = doMockInvoke(invocation, e); } } return result; }
@SuppressWarnings("unchecked") @Test void testMockInvokerFromOverride_Invoke_check_ListString_empty() { URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName()) .addParameter( REFER_KEY, URL.encode(PATH_KEY + "=" + IHelloService.class.getName() + "&" + "getListString.mock=force:return empty")) .addParameter("invoke_return_error", "true"); Invoker<IHelloService> cluster = getClusterInvoker(url); // Configured with mock RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("getListString"); Result ret = cluster.invoke(invocation); Assertions.assertEquals(0, ((List<String>) ret.getValue()).size()); }
@Override public int compareTo(@Nonnull ByteKey other) { checkNotNull(other, "other"); ByteIterator thisIt = value.iterator(); ByteIterator otherIt = other.value.iterator(); while (thisIt.hasNext() && otherIt.hasNext()) { // (byte & 0xff) converts [-128,127] bytes to [0,255] ints. int cmp = (thisIt.nextByte() & 0xff) - (otherIt.nextByte() & 0xff); if (cmp != 0) { return cmp; } } // If we get here, the prefix of both arrays is equal up to the shorter array. The array with // more bytes is larger. return value.size() - other.value.size(); }
@Test public void testCompareToExhaustive() { // Verify that the comparison gives the correct result for all values in both directions. for (int i = 0; i < TEST_KEYS.length; ++i) { for (int j = 0; j < TEST_KEYS.length; ++j) { ByteKey left = TEST_KEYS[i]; ByteKey right = TEST_KEYS[j]; int cmp = left.compareTo(right); if (i < j && !(cmp < 0)) { fail( String.format( "Expected that cmp(%s, %s) < 0, got %d [i=%d, j=%d]", left, right, cmp, i, j)); } else if (i == j && !(cmp == 0)) { fail( String.format( "Expected that cmp(%s, %s) == 0, got %d [i=%d, j=%d]", left, right, cmp, i, j)); } else if (i > j && !(cmp > 0)) { fail( String.format( "Expected that cmp(%s, %s) > 0, got %d [i=%d, j=%d]", left, right, cmp, i, j)); } } } }
@ApiOperation(value = "Assign device to edge (assignDeviceToEdge)", notes = "Creates assignment of an existing device to an instance of The Edge. " + EDGE_ASSIGN_ASYNC_FIRST_STEP_DESCRIPTION + "Second, remote edge service will receive a copy of assignment device " + EDGE_ASSIGN_RECEIVE_STEP_DESCRIPTION + "Third, once device will be delivered to edge service, it's going to be available for usage on remote edge instance." + TENANT_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('TENANT_ADMIN')") @RequestMapping(value = "/edge/{edgeId}/device/{deviceId}", method = RequestMethod.POST) @ResponseBody public Device assignDeviceToEdge(@Parameter(description = EDGE_ID_PARAM_DESCRIPTION) @PathVariable(EDGE_ID) String strEdgeId, @Parameter(description = DEVICE_ID_PARAM_DESCRIPTION) @PathVariable(DEVICE_ID) String strDeviceId) throws ThingsboardException { checkParameter(EDGE_ID, strEdgeId); checkParameter(DEVICE_ID, strDeviceId); EdgeId edgeId = new EdgeId(toUUID(strEdgeId)); Edge edge = checkEdgeId(edgeId, Operation.READ); DeviceId deviceId = new DeviceId(toUUID(strDeviceId)); checkDeviceId(deviceId, Operation.READ); return tbDeviceService.assignDeviceToEdge(getTenantId(), deviceId, edge, getCurrentUser()); }
@Test public void testAssignDeviceToEdge() throws Exception { Edge edge = constructEdge("My edge", "default"); Edge savedEdge = doPost("/api/edge", edge, Edge.class); Device device = new Device(); device.setName("My device"); device.setType("default"); Device savedDevice = doPost("/api/device", device, Device.class); Mockito.reset(tbClusterService, auditLogService, gatewayNotificationsService); doPost("/api/edge/" + savedEdge.getId().getId() + "/device/" + savedDevice.getId().getId(), Device.class); testNotifyEntityAllOneTime(savedDevice, savedDevice.getId(), savedDevice.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.ASSIGNED_TO_EDGE, savedDevice.getId().getId().toString(), savedEdge.getId().getId().toString(), savedEdge.getName()); testNotificationUpdateGatewayNever(); PageData<DeviceInfo> pageData = doGetTypedWithPageLink("/api/edge/" + savedEdge.getId().getId() + "/devices?", new TypeReference<>() {}, new PageLink(100)); Assert.assertEquals(1, pageData.getData().size()); Mockito.reset(tbClusterService, auditLogService, gatewayNotificationsService); doDelete("/api/edge/" + savedEdge.getId().getId() + "/device/" + savedDevice.getId().getId(), Device.class); testNotifyEntityAllOneTime(savedDevice, savedDevice.getId(), savedDevice.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.UNASSIGNED_FROM_EDGE, savedDevice.getId().getId().toString(), savedEdge.getId().getId().toString(), savedEdge.getName()); testNotificationUpdateGatewayNever(); pageData = doGetTypedWithPageLink("/api/edge/" + savedEdge.getId().getId() + "/devices?", new TypeReference<>() {}, new PageLink(100)); Assert.assertEquals(0, pageData.getData().size()); }
public void sendMessage(final Account account, final Device device, final Envelope message, final boolean online) { final String channel; if (device.getGcmId() != null) { channel = "gcm"; } else if (device.getApnId() != null) { channel = "apn"; } else if (device.getFetchesMessages()) { channel = "websocket"; } else { channel = "none"; } final boolean clientPresent; if (online) { clientPresent = clientPresenceManager.isPresent(account.getUuid(), device.getId()); if (clientPresent) { messagesManager.insert(account.getUuid(), device.getId(), message.toBuilder().setEphemeral(true).build()); } } else { messagesManager.insert(account.getUuid(), device.getId(), message); // We check for client presence after inserting the message to take a conservative view of notifications. If the // client wasn't present at the time of insertion but is now, they'll retrieve the message. If they were present // but disconnected before the message was delivered, we should send a notification. clientPresent = clientPresenceManager.isPresent(account.getUuid(), device.getId()); if (!clientPresent) { try { pushNotificationManager.sendNewMessageNotification(account, device.getId(), message.getUrgent()); } catch (final NotPushRegisteredException ignored) { } } } Metrics.counter(SEND_COUNTER_NAME, CHANNEL_TAG_NAME, channel, EPHEMERAL_TAG_NAME, String.valueOf(online), CLIENT_ONLINE_TAG_NAME, String.valueOf(clientPresent), URGENT_TAG_NAME, String.valueOf(message.getUrgent()), STORY_TAG_NAME, String.valueOf(message.getStory()), SEALED_SENDER_TAG_NAME, String.valueOf(!message.hasSourceUuid())) .increment(); }
@Test void testSendMessageGcmClientNotPresent() throws Exception { when(clientPresenceManager.isPresent(ACCOUNT_UUID, DEVICE_ID)).thenReturn(false); when(device.getGcmId()).thenReturn("gcm-id"); messageSender.sendMessage(account, device, message, false); verify(messagesManager).insert(ACCOUNT_UUID, DEVICE_ID, message); verify(pushNotificationManager).sendNewMessageNotification(account, device.getId(), message.getUrgent()); }
@Override public void doWork() { pollOnce(Long.MAX_VALUE); }
@Test public void testShouldCreateClientRequestAndSendWhenNodeIsReady() { final AbstractRequest.Builder<?> request = new StubRequestBuilder<>(); final Node node = new Node(1, "", 8080); final RequestAndCompletionHandler handler = new RequestAndCompletionHandler(time.milliseconds(), node, request, completionHandler); final TestInterBrokerSendThread sendThread = new TestInterBrokerSendThread(); final ClientRequest clientRequest = new ClientRequest("dest", request, 0, "1", 0, true, requestTimeoutMs, handler.handler); when(networkClient.newClientRequest( ArgumentMatchers.eq("1"), same(handler.request), anyLong(), ArgumentMatchers.eq(true), ArgumentMatchers.eq(requestTimeoutMs), same(handler.handler) )).thenReturn(clientRequest); when(networkClient.ready(node, time.milliseconds())).thenReturn(true); when(networkClient.poll(anyLong(), anyLong())).thenReturn(Collections.emptyList()); sendThread.enqueue(handler); sendThread.doWork(); verify(networkClient) .newClientRequest( ArgumentMatchers.eq("1"), same(handler.request), anyLong(), ArgumentMatchers.eq(true), ArgumentMatchers.eq(requestTimeoutMs), same(handler.handler)); verify(networkClient).ready(any(), anyLong()); verify(networkClient).send(same(clientRequest), anyLong()); verify(networkClient).poll(anyLong(), anyLong()); verifyNoMoreInteractions(networkClient); assertFalse(completionHandler.executedWithDisconnectedResponse); }
public static double toFixed(double value, int precision) { return BigDecimal.valueOf(value).setScale(precision, RoundingMode.HALF_UP).doubleValue(); }
@Test public void toFixedDouble() { double actualD = TbUtils.toFixed(doubleVal, 3); Assertions.assertEquals(-1, Double.compare(doubleVal, actualD)); Assertions.assertEquals(0, Double.compare(1729.173, actualD)); }
public static SerializableFunction<Row, Mutation> beamRowToMutationFn( Mutation.Op operation, String table) { return (row -> { switch (operation) { case INSERT: return MutationUtils.createMutationFromBeamRows(Mutation.newInsertBuilder(table), row); case DELETE: return Mutation.delete(table, MutationUtils.createKeyFromBeamRow(row)); case UPDATE: return MutationUtils.createMutationFromBeamRows(Mutation.newUpdateBuilder(table), row); case REPLACE: return MutationUtils.createMutationFromBeamRows(Mutation.newReplaceBuilder(table), row); case INSERT_OR_UPDATE: return MutationUtils.createMutationFromBeamRows( Mutation.newInsertOrUpdateBuilder(table), row); default: throw new IllegalArgumentException( String.format("Unknown mutation operation type: %s", operation)); } }); }
@Test public void testCreateUpdateMutationFromRowWithNulls() { Mutation expectedMutation = createMutationNulls(Mutation.Op.UPDATE); Mutation mutation = beamRowToMutationFn(Mutation.Op.UPDATE, TABLE).apply(WRITE_ROW_NULLS); assertEquals(expectedMutation, mutation); }
public ProtoFluentAssertion ignoringFieldAbsence() { return usingConfig(config.ignoringFieldAbsence()); }
@Test public void testIgnoringFieldAbsence() { Message message = parse("o_int: 3"); Message diffMessage = parse("o_int: 3 o_enum: DEFAULT"); // Make sure the implementation is reflexive. if (isProto3()) { expectThat(diffMessage).isEqualTo(message); expectThat(message).isEqualTo(diffMessage); } else { expectThat(diffMessage).isNotEqualTo(message); expectThat(message).isNotEqualTo(diffMessage); } expectThat(diffMessage).ignoringFieldAbsence().isEqualTo(message); expectThat(message).ignoringFieldAbsence().isEqualTo(diffMessage); if (!isProto3()) { Message customDefaultMessage = parse("o_int: 3"); Message diffCustomDefaultMessage = parse("o_int: 3 o_long_defaults_to_42: 42"); expectThat(diffCustomDefaultMessage).isNotEqualTo(customDefaultMessage); expectThat(diffCustomDefaultMessage).ignoringFieldAbsence().isEqualTo(customDefaultMessage); expectThat(customDefaultMessage).isNotEqualTo(diffCustomDefaultMessage); expectThat(customDefaultMessage).ignoringFieldAbsence().isEqualTo(diffCustomDefaultMessage); } if (!isProto3()) { expectFailureWhenTesting().that(diffMessage).isEqualTo(message); expectIsEqualToFailed(); expectThatFailure().hasMessageThat().contains("added: o_enum: DEFAULT"); } expectFailureWhenTesting().that(diffMessage).ignoringFieldAbsence().isNotEqualTo(message); expectIsNotEqualToFailed(); expectThatFailure().hasMessageThat().contains("matched: o_int: 3"); if (!isProto3()) { // Proto 3 doesn't cover the field at all when it's not set. expectThatFailure().hasMessageThat().contains("matched: o_enum: DEFAULT"); } }
public static byte[] deriveEnc(byte[] seed, byte[] nonce) { final MessageDigest md = DigestUtils.digest("SHA-256"); md.update(seed); if (nonce != null) md.update(nonce); md.update(new byte[] {0, 0, 0, 1}); return Arrays.copyOfRange(md.digest(), 0, 32); }
@Test public void shouldDeriveEncryptionKeyWithNonce() { assertEquals( "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", ByteArrayUtils.prettyHex(AESSecureMessaging.deriveEnc(Hex.decode("CA"), Hex.decode("FE"))) ); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { KeyAuthRuleHandle keyAuthRuleHandle = KeyAuthPluginDataHandler.CACHED_HANDLE.get() .obtainHandle(CacheKeyUtils.INST.getKey(rule)); if (Objects.isNull(keyAuthRuleHandle) || StringUtils.isBlank(keyAuthRuleHandle.getKeyName()) || StringUtils.isBlank(keyAuthRuleHandle.getKey())) { Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.KEY_NAME_AND_KEY_MUST_BE_CONFIGURED); return WebFluxResultUtils.result(exchange, error); } if (checkKey(exchange, keyAuthRuleHandle.getKeyName(), keyAuthRuleHandle.getKey())) { return chain.execute(exchange); } Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.ERROR_KEY); return WebFluxResultUtils.result(exchange, error); }
@Test public void testKeyAuthWithHeaderCredentials() { ruleData.setHandle("{\"keyName\":\"apiKey\",\"key\":\"key\"," + "\"hideCredentials\":\"false\"}"); keyAuthPluginDataHandler.handlerRule(ruleData); exchange = MockServerWebExchange.from(MockServerHttpRequest .get("localhost") .header("apiKey", "key") .build()); Mono<Void> mono = keyAuthPlugin.doExecute(exchange, chain, selectorData, ruleData); StepVerifier.create(mono).expectSubscription().verifyComplete(); }
public ExitStatus(Options options) { this.options = options; }
@Test void wip_with_passed_scenarios() { createWipRuntime(); bus.send(testCaseFinishedWithStatus(Status.PASSED)); assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x1))); }
@Nullable static String getNonEmptyBsonString(BsonValue bsonValue) { if (bsonValue == null || !bsonValue.isString()) return null; String stringValue = bsonValue.asString().getValue().trim(); return stringValue.isEmpty() ? null : stringValue; }
@Test void getNonEmptyBsonString_empty() { assertThat(getNonEmptyBsonString(new BsonString(" "))).isNull(); }
@Override public boolean add(long hash) { final int index = (int) hash & (register.length - 1); final int value = Long.numberOfTrailingZeros((hash >>> p) | pFenseMask) + 1; assert index < register.length; assert value <= (1 << 8) - 1; assert value <= 64 - p; if (value > register[index]) { register[index] = (byte) value; return true; } return false; }
@RequireAssertEnabled @Test(expected = AssertionError.class) public void testAdd_assertRegisterLength() { DenseHyperLogLogEncoder encoder = new DenseHyperLogLogEncoder(precision(), new byte[0]); encoder.add(5); }
public DoubleArrayAsIterable usingExactEquality() { return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_contains_success() { assertThat(array(1.1, 2.2, 3.3)).usingExactEquality().contains(2.2); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { String authorization = StringUtils.defaultString(exchange.getRequest().getHeaders().getFirst(HttpHeaders.AUTHORIZATION), exchange.getRequest().getURI().getUserInfo()); BasicAuthRuleHandle basicAuthRuleHandle = BasicAuthPluginDataHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(rule)); BasicAuthAuthenticationStrategy authenticationStrategy = Optional.ofNullable(basicAuthRuleHandle).map(BasicAuthRuleHandle::getBasicAuthAuthenticationStrategy).orElse(null); if (authenticationStrategy != null && authenticationStrategy.authenticate(basicAuthRuleHandle, authorization)) { return chain.execute(exchange); } return WebFluxResultUtils.result(exchange, ShenyuResultWrap.error(exchange, ShenyuResultEnum.ERROR_TOKEN)); }
@Test public void testDoExecuteWithoutHandle() { when(this.chain.execute(any())).thenReturn(Mono.empty()); Mono<Void> mono = basicAuthPlugin.doExecute(exchange, chain, selectorData, ruleData); StepVerifier.create(mono).expectSubscription().verifyComplete(); }
public T valueOf(Class<?> firstNameComponent, String secondNameComponent) { return valueOf( checkNotNull(firstNameComponent, "firstNameComponent").getName() + '#' + checkNotNull(secondNameComponent, "secondNameComponent")); }
@Test public void testComposedName() { TestConstant a = pool.valueOf(Object.class, "A"); assertThat(a.name(), is("java.lang.Object#A")); }
@Override public void validate(final Analysis analysis) { try { RULES.forEach(rule -> rule.check(analysis)); } catch (final KsqlException e) { throw new KsqlException(e.getMessage() + PULL_QUERY_SYNTAX_HELP, e); } QueryValidatorUtil.validateNoUserColumnsWithSameNameAsPseudoColumns(analysis); }
@Test public void shouldThrowWhenWhereClauseContainsDisallowedColumns() { try(MockedStatic<ColumnExtractor> columnExtractor = mockStatic(ColumnExtractor.class)) { //Given: givenWhereClauseWithDisallowedColumnNames(columnExtractor); // When: final Exception e = assertThrows( KsqlException.class, () -> validator.validate(analysis) ); // Then: assertThat(e.getMessage(), containsString( "Pull queries don't support the following columns in WHERE clauses: `ROWPARTITION`, `ROWOFFSET`")); } }
public boolean shouldAppend() { return append; }
@Test public void testAppendOption() { final DistCpOptions.Builder builder = new DistCpOptions.Builder( Collections.singletonList(new Path("hdfs://localhost:8020/source")), new Path("hdfs://localhost:8020/target/")) .withSyncFolder(true) .withAppend(true); Assert.assertTrue(builder.build().shouldAppend()); try { // make sure -append is only valid when -update is specified new DistCpOptions.Builder( Collections.singletonList(new Path("hdfs://localhost:8020/source")), new Path("hdfs://localhost:8020/target/")) .withAppend(true) .build(); fail("Append should fail if update option is not specified"); } catch (IllegalArgumentException e) { assertExceptionContains( "Append is valid only with update options", e); } try { // make sure -append is invalid when skipCrc is specified new DistCpOptions.Builder( Collections.singletonList(new Path("hdfs://localhost:8020/source")), new Path("hdfs://localhost:8020/target/")) .withSyncFolder(true) .withAppend(true) .withSkipCRC(true) .build(); fail("Append should fail if skipCrc option is specified"); } catch (IllegalArgumentException e) { assertExceptionContains( "Append is disallowed when skipping CRC", e); } }
@Override public String getMethod() { return PATH; }
@Test public void testGetMyDefaultAdministratorRightsWithAllSetForChannel() { GetMyDefaultAdministratorRights getMyDefaultAdministratorRights = GetMyDefaultAdministratorRights .builder() .forChannels(true) .build(); assertEquals("getMyDefaultAdministratorRights", getMyDefaultAdministratorRights.getMethod()); assertDoesNotThrow(getMyDefaultAdministratorRights::validate); }
@Udf public String lcase( @UdfParameter(description = "The string to lower-case") final String input) { if (input == null) { return null; } return input.toLowerCase(); }
@Test public void shouldConvertToLowerCase() { final String result = udf.lcase("FoO bAr"); assertThat(result, is("foo bar")); }
@Override public int getRowCount() { return 1; }
@Test public void testGetRowCount() { // Run the test final int result = _aggregationResultSetUnderTest.getRowCount(); // Verify the results assertEquals(1, result); }
@Override public int hashCode() { return Objects.hash( threadName, threadState, activeTasks, standbyTasks, mainConsumerClientId, restoreConsumerClientId, producerClientIds, adminClientId); }
@Test public void shouldNotBeEqualIfDifferInProducerClientIds() { final ThreadMetadata differProducerClientIds = new ThreadMetadataImpl( THREAD_NAME, THREAD_STATE, MAIN_CONSUMER_CLIENT_ID, RESTORE_CONSUMER_CLIENT_ID, mkSet(CLIENT_ID_1), ADMIN_CLIENT_ID, ACTIVE_TASKS, STANDBY_TASKS ); assertThat(threadMetadata, not(equalTo(differProducerClientIds))); assertThat(threadMetadata.hashCode(), not(equalTo(differProducerClientIds.hashCode()))); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestReplicate2() { internalEncodeLogHeader(buffer, 0, 1000, 1000, () -> 500_000_000L); final ReplicateRequest2Encoder requestEncoder = new ReplicateRequest2Encoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(2) .correlationId(5) .srcRecordingId(17) .dstRecordingId(2048) .stopPosition(4096) .channelTagId(123) .subscriptionTagId(321) .srcControlStreamId(10) .srcControlChannel("CTRL ch") .liveDestination("live destination") .replicationChannel("replication channel"); dissectControlRequest(CMD_IN_REPLICATE2, buffer, 0, builder); assertEquals("[0.500000000] " + CONTEXT + ": " + CMD_IN_REPLICATE2.name() + " [1000/1000]:" + " controlSessionId=2" + " correlationId=5" + " srcRecordingId=17" + " dstRecordingId=2048" + " stopPosition=4096" + " channelTagId=123" + " subscriptionTagId=321" + " srcControlStreamId=10" + " srcControlChannel=CTRL ch" + " liveDestination=live destination" + " replicationChannel=replication channel", builder.toString()); }
@Override public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) { String taskType = RealtimeToOfflineSegmentsTask.TASK_TYPE; List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>(); for (TableConfig tableConfig : tableConfigs) { String realtimeTableName = tableConfig.getTableName(); if (tableConfig.getTableType() != TableType.REALTIME) { LOGGER.warn("Skip generating task: {} for non-REALTIME table: {}", taskType, realtimeTableName); continue; } LOGGER.info("Start generating task configs for table: {} for task: {}", realtimeTableName, taskType); // Only schedule 1 task of this type, per table Map<String, TaskState> incompleteTasks = TaskGeneratorUtils.getIncompleteTasks(taskType, realtimeTableName, _clusterInfoAccessor); if (!incompleteTasks.isEmpty()) { LOGGER.warn("Found incomplete tasks: {} for same table: {} and task type: {}. Skipping task generation.", incompleteTasks.keySet(), realtimeTableName, taskType); continue; } // Get all segment metadata for completed segments (DONE/UPLOADED status). List<SegmentZKMetadata> completedSegmentsZKMetadata = new ArrayList<>(); Map<Integer, String> partitionToLatestLLCSegmentName = new HashMap<>(); Set<Integer> allPartitions = new HashSet<>(); getCompletedSegmentsInfo(realtimeTableName, completedSegmentsZKMetadata, partitionToLatestLLCSegmentName, allPartitions); if (completedSegmentsZKMetadata.isEmpty()) { LOGGER.info("No realtime-completed segments found for table: {}, skipping task generation: {}", realtimeTableName, taskType); continue; } allPartitions.removeAll(partitionToLatestLLCSegmentName.keySet()); if (!allPartitions.isEmpty()) { LOGGER.info( "Partitions: {} have no completed segments. Table: {} is not ready for {}. Skipping task generation.", allPartitions, realtimeTableName, taskType); continue; } TableTaskConfig tableTaskConfig = tableConfig.getTaskConfig(); Preconditions.checkState(tableTaskConfig != null); Map<String, String> taskConfigs = tableTaskConfig.getConfigsForTaskType(taskType); Preconditions.checkState(taskConfigs != null, "Task config shouldn't be null for table: %s", realtimeTableName); // Get the bucket size and buffer String bucketTimePeriod = taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUCKET_TIME_PERIOD_KEY, DEFAULT_BUCKET_PERIOD); String bufferTimePeriod = taskConfigs.getOrDefault(RealtimeToOfflineSegmentsTask.BUFFER_TIME_PERIOD_KEY, DEFAULT_BUFFER_PERIOD); long bucketMs = TimeUtils.convertPeriodToMillis(bucketTimePeriod); long bufferMs = TimeUtils.convertPeriodToMillis(bufferTimePeriod); // Get watermark from RealtimeToOfflineSegmentsTaskMetadata ZNode. WindowStart = watermark. WindowEnd = // windowStart + bucket. long windowStartMs = getWatermarkMs(realtimeTableName, completedSegmentsZKMetadata, bucketMs); long windowEndMs = windowStartMs + bucketMs; // Find all COMPLETED segments with data overlapping execution window: windowStart (inclusive) to windowEnd // (exclusive) List<String> segmentNames = new ArrayList<>(); List<String> downloadURLs = new ArrayList<>(); Set<String> lastLLCSegmentPerPartition = new HashSet<>(partitionToLatestLLCSegmentName.values()); boolean skipGenerate = false; while (true) { // Check that execution window is older than bufferTime if (windowEndMs > System.currentTimeMillis() - bufferMs) { LOGGER.info( "Window with start: {} and end: {} is not older than buffer time: {} configured as {} ago. Skipping task " + "generation: {}", windowStartMs, windowEndMs, bufferMs, bufferTimePeriod, taskType); skipGenerate = true; break; } for (SegmentZKMetadata segmentZKMetadata : completedSegmentsZKMetadata) { String segmentName = segmentZKMetadata.getSegmentName(); long segmentStartTimeMs = segmentZKMetadata.getStartTimeMs(); long segmentEndTimeMs = segmentZKMetadata.getEndTimeMs(); // Check overlap with window if (windowStartMs <= segmentEndTimeMs && segmentStartTimeMs < windowEndMs) { // If last completed segment is being used, make sure that segment crosses over end of window. // In the absence of this check, CONSUMING segments could contain some portion of the window. That data // would be skipped forever. if (lastLLCSegmentPerPartition.contains(segmentName) && segmentEndTimeMs < windowEndMs) { LOGGER.info("Window data overflows into CONSUMING segments for partition of segment: {}. Skipping task " + "generation: {}", segmentName, taskType); skipGenerate = true; break; } segmentNames.add(segmentName); downloadURLs.add(segmentZKMetadata.getDownloadUrl()); } } if (skipGenerate || !segmentNames.isEmpty()) { break; } LOGGER.info("Found no eligible segments for task: {} with window [{} - {}), moving to the next time bucket", taskType, windowStartMs, windowEndMs); windowStartMs = windowEndMs; windowEndMs += bucketMs; } if (skipGenerate) { continue; } Map<String, String> configs = MinionTaskUtils.getPushTaskConfig(realtimeTableName, taskConfigs, _clusterInfoAccessor); configs.putAll(getBaseTaskConfigs(tableConfig, segmentNames)); configs.put(MinionConstants.DOWNLOAD_URL_KEY, StringUtils.join(downloadURLs, MinionConstants.URL_SEPARATOR)); configs.put(MinionConstants.UPLOAD_URL_KEY, _clusterInfoAccessor.getVipUrl() + "/segments"); // Segment processor configs configs.put(RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, String.valueOf(windowStartMs)); configs.put(RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, String.valueOf(windowEndMs)); String roundBucketTimePeriod = taskConfigs.get(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY); if (roundBucketTimePeriod != null) { configs.put(RealtimeToOfflineSegmentsTask.ROUND_BUCKET_TIME_PERIOD_KEY, roundBucketTimePeriod); } // NOTE: Check and put both keys for backward-compatibility String mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY); if (mergeType == null) { mergeType = taskConfigs.get(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY); } if (mergeType != null) { configs.put(RealtimeToOfflineSegmentsTask.MERGE_TYPE_KEY, mergeType); configs.put(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY, mergeType); } for (Map.Entry<String, String> entry : taskConfigs.entrySet()) { if (entry.getKey().endsWith(RealtimeToOfflineSegmentsTask.AGGREGATION_TYPE_KEY_SUFFIX)) { configs.put(entry.getKey(), entry.getValue()); } } String maxNumRecordsPerSegment = taskConfigs.get(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY); if (maxNumRecordsPerSegment != null) { configs.put(RealtimeToOfflineSegmentsTask.MAX_NUM_RECORDS_PER_SEGMENT_KEY, maxNumRecordsPerSegment); } pinotTaskConfigs.add(new PinotTaskConfig(taskType, configs)); LOGGER.info("Finished generating task configs for table: {} for task: {}", realtimeTableName, taskType); } return pinotTaskConfigs; }
@Test public void testBuffer() { Map<String, Map<String, String>> taskConfigsMap = new HashMap<>(); taskConfigsMap.put(RealtimeToOfflineSegmentsTask.TASK_TYPE, new HashMap<>()); TableConfig realtimeTableConfig = getRealtimeTableConfig(taskConfigsMap); // default buffer - 2d long now = System.currentTimeMillis(); long watermarkMs = now - TimeUnit.DAYS.toMillis(1); ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class); when(mockClusterInfoProvide.getTaskStates(RealtimeToOfflineSegmentsTask.TASK_TYPE)).thenReturn(new HashMap<>()); when(mockClusterInfoProvide .getMinionTaskMetadataZNRecord(RealtimeToOfflineSegmentsTask.TASK_TYPE, REALTIME_TABLE_NAME)) .thenReturn(new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, watermarkMs).toZNRecord()); SegmentZKMetadata segmentZKMetadata = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, watermarkMs - 100, watermarkMs + 100, TimeUnit.MILLISECONDS, null); when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)) .thenReturn(Lists.newArrayList(segmentZKMetadata)); when(mockClusterInfoProvide.getIdealState(REALTIME_TABLE_NAME)).thenReturn(getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList(segmentZKMetadata.getSegmentName()))); RealtimeToOfflineSegmentsTaskGenerator generator = new RealtimeToOfflineSegmentsTaskGenerator(); generator.init(mockClusterInfoProvide); List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig)); assertTrue(pinotTaskConfigs.isEmpty()); // custom buffer Map<String, String> taskConfigs = new HashMap<>(); taskConfigs.put(RealtimeToOfflineSegmentsTask.BUFFER_TIME_PERIOD_KEY, "15d"); taskConfigsMap.put(RealtimeToOfflineSegmentsTask.TASK_TYPE, taskConfigs); realtimeTableConfig = getRealtimeTableConfig(taskConfigsMap); watermarkMs = now - TimeUnit.DAYS.toMillis(10); when(mockClusterInfoProvide .getMinionTaskMetadataZNRecord(RealtimeToOfflineSegmentsTask.TASK_TYPE, REALTIME_TABLE_NAME)) .thenReturn(new RealtimeToOfflineSegmentsTaskMetadata(REALTIME_TABLE_NAME, watermarkMs).toZNRecord()); segmentZKMetadata = getSegmentZKMetadata("testTable__0__0__12345", Status.DONE, watermarkMs - 100, watermarkMs + 100, TimeUnit.MILLISECONDS, null); when(mockClusterInfoProvide.getSegmentsZKMetadata(REALTIME_TABLE_NAME)) .thenReturn(Lists.newArrayList(segmentZKMetadata)); when(mockClusterInfoProvide.getIdealState(REALTIME_TABLE_NAME)).thenReturn(getIdealState(REALTIME_TABLE_NAME, Lists.newArrayList(segmentZKMetadata.getSegmentName()))); pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(realtimeTableConfig)); assertTrue(pinotTaskConfigs.isEmpty()); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldStartQueries() { // Given: when(ksqlEngine.getPersistentQueries()).thenReturn(ImmutableList.of(persistentQuery)); // When: standaloneExecutor.startAsync(); // Then: verify(persistentQuery).start(); }
public static String getNativeDataTypeSimpleName( ValueMetaInterface v ) { try { return v.getType() != ValueMetaInterface.TYPE_BINARY ? v.getNativeDataTypeClass().getSimpleName() : "Binary"; } catch ( KettleValueException e ) { LogChannelInterface log = new LogChannel( v ); log.logDebug( BaseMessages.getString( PKG, "FieldHelper.Log.UnknownNativeDataTypeSimpleName" ) ); return "Object"; } }
@Test public void getNativeDataTypeSimpleName_String() { ValueMetaString v = new ValueMetaString(); assertEquals( "String", FieldHelper.getNativeDataTypeSimpleName( v ) ); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testEmptyControlBatch() { buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); ByteBuffer buffer = ByteBuffer.allocate(1024); int currentOffset = 1; // Empty control batch should not cause an exception DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.MAGIC_VALUE_V2, 1L, (short) 0, -1, 0, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH, TimestampType.CREATE_TIME, time.milliseconds(), true, true); currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes())); commitTransaction(buffer, 1L, currentOffset); buffer.flip(); MemoryRecords records = MemoryRecords.readableRecords(buffer); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(body -> { FetchRequest request = (FetchRequest) body; assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel()); return true; }, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); assertEquals(fetchedRecords.get(tp0).size(), 2); }
public static String createImage(final @NotNull Path path) throws IOException { return createImage(ImageIO.read(Files.newInputStream(path))); }
@Test void testCreateImageOutputLength() throws IOException { final BufferedImage realImage = new BufferedImage(64, 64, BufferedImage.TYPE_INT_RGB); final String result = IOUtil.createImage(realImage); assertTrue(result.length() <= Short.MAX_VALUE); }
@Override public AdminUserDO getUser(Long id) { return userMapper.selectById(id); }
@Test public void testGetUser() { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(); userMapper.insert(dbUser); // 准备参数 Long userId = dbUser.getId(); // 调用 AdminUserDO user = userService.getUser(userId); // 断言 assertPojoEquals(dbUser, user); }
public static String sha1Digest(File file) { try (InputStream is = new BufferedInputStream(new FileInputStream(file))) { return Base64.getEncoder().encodeToString(DigestUtils.sha1(is)); } catch (IOException e) { throw ExceptionUtils.bomb(e); } }
@Test void shouldCalculateSha1Digest() throws IOException { File tempFile = tempDir.toPath().resolve("testFile.txt").toFile(); FileUtils.writeStringToFile(tempFile, "12345", UTF_8); assertThat(FileHandler.sha1Digest(tempFile)).isEqualTo("jLIjfQZ5yojbZGTqxg2pY0VROWQ="); }
@Override public int read(ByteBuffer dst) throws IOException { if (mClosed) { throw new ClosedChannelException(); } int maxReadable = dst.remaining(); int position = dst.position(); synchronized (this) { ByteBuf buf = Unpooled.wrappedBuffer(dst); buf.writerIndex(0); int bytesRead = mReader.transferTo(buf); Preconditions.checkState(bytesRead <= maxReadable, "buffer overflow"); if (bytesRead > 0) { dst.position(position + bytesRead); } return bytesRead; } }
@Test public void dependentInternalPosition() throws Exception { int dataSize = 100; ByteBuffer buffer = ByteBuffer.allocate(dataSize); buffer.clear(); assertEquals(dataSize, IOUtils.read(mChannel, buffer)); buffer.flip(); assertTrue(BufferUtils.equalIncreasingByteBuffer(0, dataSize, buffer)); buffer.clear(); ByteBuf buf = Unpooled.wrappedBuffer(buffer); buf.writerIndex(0); assertEquals(dataSize, mReader.transferTo(buf)); assertTrue(BufferUtils.equalIncreasingByteBuffer(dataSize, dataSize, buffer)); }
public static boolean isTopic(String destinationName) { if (destinationName == null) { throw new IllegalArgumentException("destinationName is null"); } return destinationName.startsWith("topic:"); }
@Test public void testIsTopicNullDestinationName() { assertThrows(IllegalArgumentException.class, () -> DestinationNameParser.isTopic(null)); }
@Override public void deleteTenantPackage(Long id) { // 校验存在 validateTenantPackageExists(id); // 校验正在使用 validateTenantUsed(id); // 删除 tenantPackageMapper.deleteById(id); }
@Test public void testDeleteTenantPackage_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> tenantPackageService.deleteTenantPackage(id), TENANT_PACKAGE_NOT_EXISTS); }
public Set<String> getEntityTypes() { return ENTITY_TYPES; }
@Test public void testGetEntityTypes() throws Exception { String text = "America is a big country."; System.setProperty(NamedEntityParser.SYS_PROP_NER_IMPL, NLTKNERecogniser.class.getName()); Tika tika = new Tika( new TikaConfig(NamedEntityParser.class.getResourceAsStream("tika-config.xml"))); Metadata md = new Metadata(); tika.parse(new ByteArrayInputStream(text.getBytes(StandardCharsets.UTF_8)), md); Set<String> names = new HashSet<>(Arrays.asList(md.getValues("NER_NAMES"))); if (names.size() != 0) { assertTrue(names.contains("America")); assertTrue(names.size() == 1); } }
@Override public int hashCode() { return operand.hashCode(); }
@Test void requireThatHashCodeIsImplemented() { Predicate predicate = SimplePredicates.newPredicate(); assertEquals(new Negation(predicate).hashCode(), new Negation(predicate).hashCode()); }
public Response get(URL url, Request request) throws IOException { return call(HttpMethods.GET, url, request); }
@Test public void testCall_secureClientOnUnverifiableServer() throws IOException { FailoverHttpClient httpClient = newHttpClient(false, false); Mockito.when(mockHttpRequest.execute()).thenThrow(new SSLPeerUnverifiedException("unverified")); try (Response response = httpClient.get(new URL("https://insecure"), fakeRequest(null))) { Assert.fail("Secure caller should fail if cannot verify server"); } catch (SSLException ex) { Assert.assertEquals("unverified", ex.getMessage()); Mockito.verifyNoInteractions(logger); } }
public Object get(final Object bean) { return get(this.patternParts, bean, false); }
@Test public void beanPathTest2() { final BeanPath pattern = new BeanPath("[userInfo][examInfoDict][0][id]"); assertEquals("userInfo", pattern.patternParts.get(0)); assertEquals("examInfoDict", pattern.patternParts.get(1)); assertEquals("0", pattern.patternParts.get(2)); assertEquals("id", pattern.patternParts.get(3)); }
protected Timestamp convertIntegerToTimestamp( Long longValue ) { if ( longValue == null ) { return null; } Long nanos = longValue; if ( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_MILLISECONDS.equalsIgnoreCase( conversionMode ) ) { // Convert milliseconds to nanoseconds! nanos *= 1000000L; } long ss = TimeUnit.SECONDS.convert( nanos, TimeUnit.NANOSECONDS ); long ms = TimeUnit.MILLISECONDS.convert( nanos, TimeUnit.NANOSECONDS ); long ns = TimeUnit.NANOSECONDS.convert( ss, TimeUnit.SECONDS ); int leftNs = (int) ( nanos - ns ); Timestamp timestamp = new Timestamp( ms ); timestamp.setNanos( leftNs ); return timestamp; }
@Test public void testConvertIntegerToTimestamp_DefaultMode() throws KettleValueException { System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE_LEGACY ); ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp(); Timestamp result = valueMetaTimestamp.convertIntegerToTimestamp( TIMESTAMP_AS_NANOSECONDS ); assertEquals( TIMESTAMP_WITH_NANOSECONDS, result ); System.setProperty( Const.KETTLE_TIMESTAMP_NUMBER_CONVERSION_MODE, "Something invalid!" ); valueMetaTimestamp = new ValueMetaTimestamp(); result = valueMetaTimestamp.convertIntegerToTimestamp( TIMESTAMP_AS_NANOSECONDS ); assertEquals( TIMESTAMP_WITH_NANOSECONDS, result ); }
public static boolean validateCSConfiguration( final Configuration oldConfParam, final Configuration newConf, final RMContext rmContext) throws IOException { // ensure that the oldConf is deep copied Configuration oldConf = new Configuration(oldConfParam); QueueMetrics.setConfigurationValidation(oldConf, true); QueueMetrics.setConfigurationValidation(newConf, true); CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler(); CapacityScheduler newCs = new CapacityScheduler(); try { //TODO: extract all the validation steps and replace reinitialize with //the specific validation steps newCs.setConf(oldConf); newCs.setRMContext(rmContext); newCs.init(oldConf); newCs.addNodes(liveScheduler.getAllNodes()); newCs.reinitialize(newConf, rmContext, true); return true; } finally { newCs.stop(); } }
@Test public void testValidateCSConfigDominantRCAbsoluteModeParentMaxVcoreExceeded() throws Exception { setUpMockRM(true); RMContext rmContext = mockRM.getRMContext(); CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); CapacitySchedulerConfiguration newConfiguration = new CapacitySchedulerConfiguration(cs.getConfiguration()); newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, VCORE_EXCEEDED_MAXRES); try { CapacitySchedulerConfigValidator .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); fail("Parent maximum capacity exceeded"); } catch (IOException e) { Assert.assertTrue(e.getCause().getMessage() .startsWith("Max resource configuration")); } finally { mockRM.stop(); } }
public String view(TableIdentifier ident) { return SLASH.join( "v1", prefix, "namespaces", RESTUtil.encodeNamespace(ident.namespace()), "views", RESTUtil.encodeString(ident.name())); }
@Test public void viewWithSlash() { TableIdentifier ident = TableIdentifier.of("n/s", "vi/ew-name"); assertThat(withPrefix.view(ident)) .isEqualTo("v1/ws/catalog/namespaces/n%2Fs/views/vi%2Few-name"); assertThat(withoutPrefix.view(ident)).isEqualTo("v1/namespaces/n%2Fs/views/vi%2Few-name"); }
@PUT @Path("/registry/{registrationId}") @Produces(MediaType.TEXT_PLAIN) public Uni<Boolean> update(@PathParam("registrationId") String registrationId, final Record record) { record.setRegistration(registrationId); return provider.update(record); }
@Test public void testUpdate() { final ServiceRegistration service = createTestService(); final Response registerResponse = given() .body(service) .contentType(MediaType.APPLICATION_JSON) .post("/service-discovery/registry"); final Response getResponse = when() .get("service-discovery/service/{name}", service.getName()); // verify expected response and extract the returned list List<Map<String, Object>> results = (List<Map<String, Object>>) getResponse.then() .statusCode(200) .extract() .as(List.class); Map<String, Object> record = results.get(0); record.put("status", Status.DOWN.toString()); final Response updateResponse = given() .body(record) .contentType(MediaType.APPLICATION_JSON) .put("/service-discovery/registry/{registrationId}", record.get("registration")); updateResponse.then() .statusCode(200) .body(is(Boolean.TRUE.toString())); final Response getResult = given() .queryParam("includeOutOfService", true) .when() .get("service-discovery/service/{name}", service.getName()); // verify expected response and extract the returned list List<Map<String, Object>> updatedResults = (List<Map<String, Object>>) getResult.then() .statusCode(200) .extract() .as(List.class); Map<String, Object> updatedRecord = updatedResults.get(0); assertEquals(Status.DOWN.toString(), updatedRecord.get("status"), "Updated record does not have a down status"); }
@SuppressWarnings("unchecked") @Override public <S extends StateStore> S getStateStore(final String name) { final StateStore store = stateManager.getGlobalStore(name); return (S) getReadWriteStore(store); }
@Test public void shouldNotAllowInitForTimestampedWindowStore() { when(stateManager.getGlobalStore(GLOBAL_TIMESTAMPED_WINDOW_STORE_NAME)).thenReturn(mock(TimestampedWindowStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_TIMESTAMPED_WINDOW_STORE_NAME); try { store.init((StateStoreContext) null, null); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } }
public static void disablePushConsumption(DefaultMqPushConsumerWrapper wrapper, Set<String> topics) { Set<String> subscribedTopic = wrapper.getSubscribedTopics(); if (subscribedTopic.stream().anyMatch(topics::contains)) { suspendPushConsumer(wrapper); return; } resumePushConsumer(wrapper); }
@Test public void testDisablePullConsumptionWithNoSubTractTopics() { subscribedTopics = new HashSet<>(); subscribedTopics.add("test-topic-2"); subscribedTopics.add("test-topic-3"); pushConsumerWrapper.setSubscribedTopics(subscribedTopics); pushConsumerWrapper.setProhibition(true); RocketMqPushConsumerController.disablePushConsumption(pushConsumerWrapper, prohibitionTopics); Assert.assertFalse(pushConsumerWrapper.isProhibition()); // 恢复消费后,再次下发禁消费配置 MQClientInstance clientFactory = pushConsumerWrapper.getClientFactory(); Mockito.reset(clientFactory); RocketMqPushConsumerController.disablePushConsumption(pushConsumerWrapper, prohibitionTopics); Mockito.verify(clientFactory, Mockito.times(0)) .registerConsumer(Mockito.any(), Mockito.any()); }
public static long readVLong(ByteData arr, long position) { byte b = arr.get(position++); if(b == (byte) 0x80) throw new RuntimeException("Attempting to read null value as long"); long value = b & 0x7F; while ((b & 0x80) != 0) { b = arr.get(position++); value <<= 7; value |= (b & 0x7F); } return value; }
@Test public void testReadVLongHollowBlobInput() throws IOException { HollowBlobInput hbi = HollowBlobInput.serial(BYTES_VALUE_129); Assert.assertEquals(129l, VarInt.readVLong(hbi)); }
@Override public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException { if (metric == null) { throw new NullPointerException("metric == null"); } return metric; }
@Test public void registeringAGaugeTriggersNoNotification() { assertThat(registry.register("thing", gauge)).isEqualTo(gauge); verify(listener, never()).onGaugeAdded("thing", gauge); }
public void processProperties(Properties properties) { if (properties == null) { return; } this.description = InterpolationUtil.interpolate(this.description, properties); for (License l : this.getLicenses()) { l.setName(InterpolationUtil.interpolate(l.getName(), properties)); l.setUrl(InterpolationUtil.interpolate(l.getUrl(), properties)); } this.name = InterpolationUtil.interpolate(this.name, properties); this.projectURL = InterpolationUtil.interpolate(this.projectURL, properties); this.organization = InterpolationUtil.interpolate(this.organization, properties); this.parentGroupId = InterpolationUtil.interpolate(this.parentGroupId, properties); this.parentArtifactId = InterpolationUtil.interpolate(this.parentArtifactId, properties); this.parentVersion = InterpolationUtil.interpolate(this.parentVersion, properties); }
@Test public void testProcessProperties() { String text = "This is a test of '${key}' '${nested}'"; Model instance = new Model(); instance.setName(text); instance.processProperties(null); String expResults = "This is a test of '${key}' '${nested}'"; assertEquals(expResults, instance.getName()); Properties prop = new Properties(); prop.setProperty("key", "value"); prop.setProperty("nested", "nested ${key}"); instance.setName(text); instance.processProperties(prop); expResults = "This is a test of 'value' 'nested value'"; assertEquals(expResults, instance.getName()); }
public synchronized void maybeAddPartition(TopicPartition topicPartition) { maybeFailWithError(); throwIfPendingState("send"); if (isTransactional()) { if (!hasProducerId()) { throw new IllegalStateException("Cannot add partition " + topicPartition + " to transaction before completing a call to initTransactions"); } else if (currentState != State.IN_TRANSACTION) { throw new IllegalStateException("Cannot add partition " + topicPartition + " to transaction while in state " + currentState); } else if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) { return; } else { log.debug("Begin adding new partition {} to transaction", topicPartition); txnPartitionMap.getOrCreate(topicPartition); newPartitionsInTransaction.add(topicPartition); } } }
@Test public void testFailIfNotReadyForSendNoOngoingTransaction() { doInitTransactions(); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); }
@Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { List<GpuDevice> usableGpus; try { usableGpus = gpuDiscoverer.getGpusUsableByYarn(); if (usableGpus == null || usableGpus.isEmpty()) { String message = "GPU is enabled on the NodeManager, but couldn't find " + "any usable GPU devices, please double check configuration!"; LOG.error(message); throwIfNecessary(new ResourceHandlerException(message), configuration); } } catch (YarnException e) { LOG.error("Exception when trying to get usable GPU device", e); throw new ResourceHandlerException(e); } for (GpuDevice gpu : usableGpus) { gpuAllocator.addGpu(gpu); } // And initialize cgroups this.cGroupsHandler.initializeCGroupController( CGroupsHandler.CGroupController.DEVICES); return null; }
@Test public void testBootstrapWithMockGpuDiscoverer() throws Exception { GpuDiscoverer mockDiscoverer = mock(GpuDiscoverer.class); Configuration conf = new YarnConfiguration(); mockDiscoverer.initialize(conf, nvidiaBinaryHelper); expected.expect(ResourceHandlerException.class); gpuResourceHandler.bootstrap(conf); }
public @NonNull String head(int numChars) throws IOException { char[] buf = new char[numChars]; int read = 0; try (Reader r = new FileReader(file)) { while (read < numChars) { int d = r.read(buf, read, buf.length - read); if (d < 0) break; read += d; } return new String(buf, 0, read); } }
@Test public void shortHead() throws Exception { File f = tmp.newFile(); Files.writeString(f.toPath(), "hello", Charset.defaultCharset()); TextFile t = new TextFile(f); assertEquals("hello", t.head(35)); }
@Override public void validatePostList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<PostDO> posts = postMapper.selectBatchIds(ids); Map<Long, PostDO> postMap = convertMap(posts, PostDO::getId); // 校验 ids.forEach(id -> { PostDO post = postMap.get(id); if (post == null) { throw exception(POST_NOT_FOUND); } if (!CommonStatusEnum.ENABLE.getStatus().equals(post.getStatus())) { throw exception(POST_NOT_ENABLE, post.getName()); } }); }
@Test public void testValidatePostList_notEnable() { // mock 数据 PostDO postDO = randomPostDO().setStatus(CommonStatusEnum.DISABLE.getStatus()); postMapper.insert(postDO); // 准备参数 List<Long> ids = singletonList(postDO.getId()); // 调用, 并断言异常 assertServiceException(() -> postService.validatePostList(ids), POST_NOT_ENABLE, postDO.getName()); }
public List<Stream> match(Message message) { final Set<Stream> result = Sets.newHashSet(); final Set<String> blackList = Sets.newHashSet(); for (final Rule rule : rulesList) { if (blackList.contains(rule.getStreamId())) { continue; } final StreamRule streamRule = rule.getStreamRule(); final StreamRuleType streamRuleType = streamRule.getType(); final Stream.MatchingType matchingType = rule.getMatchingType(); if (!ruleTypesNotNeedingFieldPresence.contains(streamRuleType) && !message.hasField(streamRule.getField())) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } continue; } final Stream stream; if (streamRuleType != StreamRuleType.REGEX) { stream = rule.match(message); } else { stream = rule.matchWithTimeOut(message, streamProcessingTimeout, TimeUnit.MILLISECONDS); } if (stream == null) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } } else { result.add(stream); if (matchingType == Stream.MatchingType.OR) { // blacklist stream because it is already matched blackList.add(rule.getStreamId()); } } } final Stream defaultStream = defaultStreamProvider.get(); boolean alreadyRemovedDefaultStream = false; for (Stream stream : result) { if (stream.getRemoveMatchesFromDefaultStream()) { if (alreadyRemovedDefaultStream || message.removeStream(defaultStream)) { alreadyRemovedDefaultStream = true; if (LOG.isTraceEnabled()) { LOG.trace("Successfully removed default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } else { // A previously executed message processor (or Illuminate) has likely already removed the // default stream from the message. Now, the message has matched a stream in the Graylog // MessageFilterChain, and the matching stream is also set to remove the default stream. // This is usually from user-defined stream rules, and is generally not a problem. cannotRemoveDefaultMeter.inc(); if (LOG.isTraceEnabled()) { LOG.trace("Couldn't remove default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } } } return ImmutableList.copyOf(result); }
@Test public void testMultipleStreamWithDifferentMatching() { final String dummyField = "dummyField"; final String dummyValue = "dummyValue"; final StreamRule streamRule1 = getStreamRuleMock("StreamRule1Id", StreamRuleType.EXACT, dummyField, dummyValue); final StreamRule streamRule2 = getStreamRuleMock("StreamRule2Id", StreamRuleType.EXACT, dummyField, "not" + dummyValue); final Stream stream1 = mock(Stream.class); when(stream1.getId()).thenReturn("Stream1Id"); when(stream1.getMatchingType()).thenReturn(Stream.MatchingType.OR); when(stream1.getStreamRules()).thenReturn(Lists.newArrayList(streamRule1, streamRule2)); final Stream stream2 = mock(Stream.class); when(stream2.getId()).thenReturn("Stream2Id"); when(stream2.getMatchingType()).thenReturn(Stream.MatchingType.AND); when(stream2.getStreamRules()).thenReturn(Lists.newArrayList(streamRule1, streamRule2)); final Message message = mock(Message.class); when(message.getField(eq(dummyField))).thenReturn(dummyValue); final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream1, stream2)); final List<Stream> result = engine.match(message); assertThat(result).hasSize(1); assertThat(result).contains(stream1); assertThat(result).doesNotContain(stream2); }
@Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } NacosConfig that = (NacosConfig) o; return Objects.equals(url, that.url) && Objects.equals(namespace, that.namespace) && Objects.equals(username, that.username) && Objects.equals(password, that.password) && Objects.equals(acm, that.acm); }
@Test public void testEquals() { assertEquals(nacosConfig, nacosConfig); assertEquals(nacosConfig, that); assertNotEquals(null, nacosConfig); assertNotEquals(nacosConfig, new Object()); }
static SonarEdition parseEdition(String edition) { String str = trimToEmpty(edition.toUpperCase(Locale.ENGLISH)); try { return SonarEdition.valueOf(str); } catch (IllegalArgumentException e) { throw new IllegalStateException(format("Invalid edition found in '%s': '%s'", EDITION_FILE_PATH, str)); } }
@Test void throw_ISE_if_edition_is_invalid() { assertThatThrownBy(() -> MetadataLoader.parseEdition("trash")) .isInstanceOf(IllegalStateException.class) .hasMessage("Invalid edition found in '/sonar-edition.txt': 'TRASH'"); }
public static SelectExpression parseSelectExpression(final String expressionText) { final SqlBaseParser.SelectItemContext parseCtx = GrammarParseUtil.getParseTree( expressionText, SqlBaseParser::selectItem ); if (!(parseCtx instanceof SqlBaseParser.SelectSingleContext)) { throw new IllegalArgumentException("Illegal select item type in: " + expressionText); } final SqlBaseParser.SelectSingleContext selectSingleContext = (SqlBaseParser.SelectSingleContext) parseCtx; if (selectSingleContext.identifier() == null) { throw new IllegalArgumentException("Select item must have identifier in: " + expressionText); } return SelectExpression.of( ColumnName.of(ParserUtil.getIdentifierText(selectSingleContext.identifier())), new AstBuilder(TypeRegistry.EMPTY).buildExpression(selectSingleContext.expression()) ); }
@Test public void shouldThrowOnSelectExpressionWithoutAlias() { // When: final Exception e = assertThrows( IllegalArgumentException.class, () -> parseSelectExpression("1 + 2") ); // Then: assertThat(e.getMessage(), containsString("Select item must have identifier in: 1 + 2")); }
public static void tryEnrichClusterEntryPointError(@Nullable Throwable root) { tryEnrichOutOfMemoryError( root, JM_METASPACE_OOM_ERROR_MESSAGE, JM_DIRECT_OOM_ERROR_MESSAGE, JM_HEAP_SPACE_OOM_ERROR_MESSAGE); }
@Test public void testAnyOtherOOMHandling() { String message = "Any other message won't be changed."; OutOfMemoryError error = new OutOfMemoryError(message); ClusterEntryPointExceptionUtils.tryEnrichClusterEntryPointError(error); assertThat(error.getMessage(), is(message)); }
@NonNull public URI selectedIdentityProvider(@NonNull SelectedIdpRequest request) { var selectedIdp = request.selectedIdentityProvider(); if (selectedIdp == null || selectedIdp.isBlank()) { throw new ValidationException(new Message("error.noProvider")); } var session = mustFindSession(request.sessionId()); var step2 = session.selectSectoralIdpStep().redirectToSectoralIdp(selectedIdp); var federatedLogin = step2.idpRedirectUri(); var newSession = session.toBuilder().trustedSectoralIdpStep(step2).build(); sessionRepo.save(newSession); return federatedLogin; }
@Test void selectIdp_noSession() { var sessionRepo = mock(SessionRepo.class); var sessionId = IdGenerator.generateID(); var selectedIdpIssuer = "https://aok-testfalen.example.com"; when(sessionRepo.load(sessionId)).thenReturn(null); var sut = new AuthService(BASE_URI, null, null, sessionRepo, null, null); var req = new SelectedIdpRequest(sessionId, selectedIdpIssuer); // when assertThrows(ValidationException.class, () -> sut.selectedIdentityProvider(req)); // then verify(sessionRepo).load(sessionId); }
@Override public boolean hasLogBlockLength() { switch (super.getVersion()) { case DEFAULT_VERSION: return false; case 1: return true; default: return false; } }
@Test public void testHasLogBlockLength() { assertFalse(verDefault.hasLogBlockLength()); assertTrue(verCurrent.hasLogBlockLength()); HoodieLogFormatVersion verNew = new HoodieLogFormatVersion(HoodieLogFormat.CURRENT_VERSION + 1); assertFalse(verNew.hasLogBlockLength()); }
@Override public MaterialPollResult responseMessageForLatestRevision(String responseBody) { Map responseBodyMap = getResponseMap(responseBody); return new MaterialPollResult(toMaterialDataMap(responseBodyMap), toSCMRevision(responseBodyMap)); }
@Test public void shouldBuildSCMDataFromLatestRevisionResponse() throws Exception { String responseBodyWithSCMData = "{\"revision\":{\"revision\":\"r1\",\"timestamp\":\"2011-07-14T19:43:37.100Z\"},\"scm-data\":{\"key-one\":\"value-one\"}}"; MaterialPollResult pollResult = messageHandler.responseMessageForLatestRevision(responseBodyWithSCMData); Map<String, String> scmData = new HashMap<>(); scmData.put("key-one", "value-one"); assertThat(pollResult.getMaterialData(), is(scmData)); assertThat(pollResult.getRevisions().get(0).getRevision(), is("r1")); }
public CompletableFuture<Void> handlePullQuery( final ServiceContext serviceContext, final PullPhysicalPlan pullPhysicalPlan, final ConfiguredStatement<Query> statement, final RoutingOptions routingOptions, final PullQueryWriteStream pullQueryQueue, final CompletableFuture<Void> shouldCancelRequests ) { final List<KsqlPartitionLocation> allLocations = pullPhysicalPlan.getMaterialization().locator() .locate( pullPhysicalPlan.getKeys(), routingOptions, routingFilterFactory, pullPhysicalPlan.getPlanType() == PullPhysicalPlanType.RANGE_SCAN ); final Map<Integer, List<Host>> emptyPartitions = allLocations.stream() .filter(loc -> loc.getNodes().stream().noneMatch(node -> node.getHost().isSelected())) .collect(Collectors.toMap( KsqlPartitionLocation::getPartition, loc -> loc.getNodes().stream().map(KsqlNode::getHost).collect(Collectors.toList()))); if (!emptyPartitions.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Unable to execute pull query. " + emptyPartitions.entrySet() .stream() .map(kv -> String.format( "Partition %s failed to find valid host. Hosts scanned: %s", kv.getKey(), kv.getValue())) .collect(Collectors.joining(", ", "[", "]"))); LOG.debug(materializationException.getMessage()); throw materializationException; } // at this point we should filter out the hosts that we should not route to final List<KsqlPartitionLocation> locations = allLocations .stream() .map(KsqlPartitionLocation::removeFilteredHosts) .collect(Collectors.toList()); final CompletableFuture<Void> completableFuture = new CompletableFuture<>(); coordinatorExecutorService.submit(() -> { try { executeRounds(serviceContext, pullPhysicalPlan, statement, routingOptions, locations, pullQueryQueue, shouldCancelRequests); completableFuture.complete(null); } catch (Throwable t) { completableFuture.completeExceptionally(t); } }); return completableFuture; }
@Test public void shouldCallRouteQuery_twoRound() throws InterruptedException, ExecutionException { // Given: locate(location1, location2, location3, location4); doAnswer(i -> { throw new StandbyFallbackException("Error!"); }).when(pullPhysicalPlan).execute(eq(ImmutableList.of(location1, location3)), any(), any()); when(ksqlClient.makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any(), any())) .thenAnswer(new Answer() { private int count = 0; public Object answer(InvocationOnMock i) { Map<String, ?> requestProperties = i.getArgument(3); PullQueryWriteStream rowConsumer = i.getArgument(4); if (++count == 1) { assertThat(requestProperties.get( KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS), is("2,4")); rowConsumer.write( ImmutableList.of( StreamedRow.header(queryId, logicalSchema), StreamedRow.pullRow(GenericRow.fromList(ROW2), Optional.empty()))); } else { assertThat(requestProperties.get( KsqlRequestConfig.KSQL_REQUEST_QUERY_PULL_PARTITIONS), is("1,3")); rowConsumer.write( ImmutableList.of( StreamedRow.header(queryId, logicalSchema), StreamedRow.pullRow(GenericRow.fromList(ROW1), Optional.empty()))); } return RestResponse.successful(200, 2); } } ); // When: CompletableFuture<Void> future = haRouting.handlePullQuery(serviceContext, pullPhysicalPlan, statement, routingOptions, pullQueryQueue, disconnect); future.get(); // Then: verify(pullPhysicalPlan).execute(eq(ImmutableList.of(location1, location3)), any(), any()); verify(ksqlClient, times(2)).makeQueryRequest(eq(node2.location()), any(), any(), any(), any(), any(), any()); assertThat(pullQueryQueue.size(), is(2)); assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW2)); assertThat(pullQueryQueue.pollRow(1, TimeUnit.SECONDS).getRow(), is(ROW1)); }
public static String validateSubject(String claimName, String claimValue) throws ValidateException { return validateString(claimName, claimValue); }
@Test public void testValidateClaimNameOverrideDisallowsEmptyNullAndWhitespace() { assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", "")); assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", null)); assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", " ")); }
@Override public void verify(byte[] data, byte[] signature, MessageDigest digest) { final byte[] decrypted = engine.processBlock(signature, 0, signature.length); final int delta = checkSignature(decrypted, digest); final int offset = decrypted.length - digest.getDigestLength() - delta; digest.update(decrypted, 1, offset - 1); digest.update(data); if (!CryptoUtils.compare(digest.digest(), decrypted, offset)) { throw new VerificationException("Invalid signature"); } }
@Test public void shouldThrowVerificationExceptionIfTrailerIsDifferentFromDigestAlgorithm() { final byte[] challenge = CryptoUtils.random(40); final byte[] signature = sign(0x54, challenge, ISOTrailers.TRAILER_SHA1, "SHA1"); thrown.expect(VerificationException.class); thrown.expectMessage("Trailer does not match digest algorithm"); new DssRsaSignatureVerifier(PUBLIC).verify(challenge, signature, "SHA-512"); }
public static Map<String, DATA_TYPE> getTargetFieldsTypeMap(final List<Field<?>> fields, final Model model) { Map<String, DATA_TYPE> toReturn = new LinkedHashMap<>(); if (model.getMiningSchema() != null && model.getMiningSchema().getMiningFields() != null) { for (MiningField miningField : model.getMiningSchema().getMiningFields()) { if (MiningField.UsageType.TARGET.equals(miningField.getUsageType()) || MiningField.UsageType.PREDICTED.equals(miningField.getUsageType())) { toReturn.put(miningField.getName(), getDATA_TYPE(fields,miningField.getName())); } } } return toReturn; }
@Test void getTargetFieldsWithoutTargetFieldsWithTargets() { final Model model = new RegressionModel(); final DataDictionary dataDictionary = new DataDictionary(); final MiningSchema miningSchema = new MiningSchema(); final Targets targets = new Targets(); IntStream.range(0, 3).forEach(i -> { final DataField dataField = getRandomDataField(); dataDictionary.addDataFields(dataField); final MiningField miningField = getMiningField(dataField.getName(), MiningField.UsageType.ACTIVE); miningSchema.addMiningFields(miningField); final Target targetField = getTarget(dataField.getName(), null); targets.addTargets(targetField); }); model.setMiningSchema(miningSchema); model.setTargets(targets); Map<String, DATA_TYPE> retrieved = org.kie.pmml.compiler.api.utils.ModelUtils.getTargetFieldsTypeMap(getFieldsFromDataDictionary(dataDictionary), model); assertThat(retrieved).isNotNull(); assertThat(retrieved).isEmpty(); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testOnTimerDeclaredInSubclass() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Callback"); thrown.expectMessage("declared in a different class"); thrown.expectMessage(DoFnWithOnlyCallback.TIMER_ID); thrown.expectMessage(not(mentionsState())); thrown.expectMessage(mentionsTimers()); DoFnSignatures.getSignature( new DoFnWithOnlyCallback() { @TimerId(DoFnWithOnlyCallback.TIMER_ID) private final TimerSpec myfield1 = TimerSpecs.timer(TimeDomain.EVENT_TIME); @ProcessElement @Override public void foo(ProcessContext context) {} }.getClass()); }
@Override public int compareTo(Timestamp o) { checkArgument(o instanceof MastershipBasedTimestamp, "Must be MastershipBasedTimestamp", o); MastershipBasedTimestamp that = (MastershipBasedTimestamp) o; return ComparisonChain.start() .compare(this.termNumber, that.termNumber) .compare(this.sequenceNumber, that.sequenceNumber) .result(); }
@SuppressWarnings("SelfComparison") @Test public final void testCompareTo() { assertTrue(TS_1_1.compareTo(TS_1_1) == 0); assertTrue(TS_1_1.compareTo(new MastershipBasedTimestamp(1, 1)) == 0); assertTrue(TS_1_1.compareTo(TS_1_2) < 0); assertTrue(TS_1_2.compareTo(TS_1_1) > 0); assertTrue(TS_1_2.compareTo(TS_2_1) < 0); assertTrue(TS_1_2.compareTo(TS_2_2) < 0); assertTrue(TS_2_1.compareTo(TS_1_1) > 0); assertTrue(TS_2_2.compareTo(TS_1_1) > 0); }
@Override public final <T extends DnsRecord> T decodeRecord(ByteBuf in) throws Exception { final int startOffset = in.readerIndex(); final String name = decodeName(in); final int endOffset = in.writerIndex(); if (endOffset - in.readerIndex() < 10) { // Not enough data in.readerIndex(startOffset); return null; } final DnsRecordType type = DnsRecordType.valueOf(in.readUnsignedShort()); final int aClass = in.readUnsignedShort(); final long ttl = in.readUnsignedInt(); final int length = in.readUnsignedShort(); final int offset = in.readerIndex(); if (endOffset - offset < length) { // Not enough data in.readerIndex(startOffset); return null; } @SuppressWarnings("unchecked") T record = (T) decodeRecord(name, type, aClass, ttl, in, offset, length); in.readerIndex(offset + length); return record; }
@Test public void testTruncatedPacket() throws Exception { ByteBuf buffer = Unpooled.buffer(); buffer.writeByte(0); buffer.writeShort(DnsRecordType.A.intValue()); buffer.writeShort(1); buffer.writeInt(32); // Write a truncated last value. buffer.writeByte(0); DefaultDnsRecordDecoder decoder = new DefaultDnsRecordDecoder(); try { int readerIndex = buffer.readerIndex(); assertNull(decoder.decodeRecord(buffer)); assertEquals(readerIndex, buffer.readerIndex()); } finally { buffer.release(); } }
public static boolean intersects(RectangleLineIntersector intersector, PointList pointList) { int len = pointList.size(); if (len == 0) throw new IllegalArgumentException("PointList must not be empty"); Coordinate coords = new Coordinate(pointList.getLon(0), pointList.getLat(0)); if (len == 1) return intersector.intersects(coords, coords); for (int pointIndex = 1; pointIndex < len; pointIndex++) { Coordinate nextCoords = new Coordinate(pointList.getLon(pointIndex), pointList.getLat(pointIndex)); if (intersector.intersects(coords, nextCoords)) return true; coords = nextCoords; } return false; }
@Test public void testIntersect() { // --- // | | // --------- // | | | | // -------- // |_| // // use ISO 19115 standard (minLon, maxLon followed by minLat(south!),maxLat) assertTrue(new BBox(12, 15, 12, 15).intersects(new BBox(13, 14, 11, 16))); // assertFalse(new BBox(15, 12, 12, 15).intersects(new BBox(16, 15, 11, 14))); // DOES NOT WORK: use bottom to top coord for lat // assertFalse(new BBox(6, 2, 11, 6).intersects(new BBox(5, 3, 12, 5))); // so, use bottom-left and top-right corner! assertTrue(new BBox(2, 6, 6, 11).intersects(new BBox(3, 5, 5, 12))); // DOES NOT WORK: use bottom to top coord for lat and right to left for lon // assertFalse(new BBox(6, 11, 11, 6).intersects(new BBox(5, 10, 12, 7))); // so, use bottom-right and top-left corner assertTrue(new BBox(6, 11, 6, 11).intersects(new BBox(7, 10, 5, 12))); }
public static void disableConsumption(KafkaConsumerWrapper kafkaConsumerWrapper, Set<String> prohibitionTopics) { Set<String> originalTopics = kafkaConsumerWrapper.getOriginalTopics(); // Not subscribed to any Topic, so no action is required if (originalTopics.size() == 0) { return; } Collection<TopicPartition> originalPartitions = kafkaConsumerWrapper.getOriginalPartitions(); KafkaConsumer<?, ?> kafkaConsumer = kafkaConsumerWrapper.getKafkaConsumer(); Collection<String> subtractTopics = CollectionUtils.subtract(originalTopics, prohibitionTopics); if (kafkaConsumerWrapper.isAssign()) { kafkaConsumer.assign(originalPartitions.stream().filter(obj -> subtractTopics.contains(obj.topic())) .collect(Collectors.toSet())); return; } kafkaConsumer.subscribe(subtractTopics); }
@Test public void testDisableConsumptionWithNoProhibitionTopics() { KafkaConsumer<?, ?> mockConsumer = Mockito.mock(KafkaConsumer.class); KafkaConsumerWrapper kafkaConsumerWrapper = new KafkaConsumerWrapper(mockConsumer); HashSet<String> originalTopics = new HashSet<>(); originalTopics.add("testTopic-1"); kafkaConsumerWrapper.setOriginalTopics(originalTopics); kafkaConsumerWrapper.setAssign(false); Set<String> prohibitionTopics = new HashSet<>(); KafkaConsumerController.disableConsumption(kafkaConsumerWrapper, prohibitionTopics); Mockito.verify(mockConsumer, Mockito.times(1)).subscribe( Collections.singletonList("testTopic-1")); }
@SuppressForbidden("Deliberate invocation") public String reverseLookup() { final String hostName = inetSocketAddress.getHostName(); reverseLookedUp = true; return hostName; }
@Test @SuppressForbidden("Intentional invocation of InetSocketAddress#getHostName()") public void testReverseLookup() throws Exception { final InetSocketAddress inetSocketAddress = new InetSocketAddress(Inet4Address.getLoopbackAddress(), 12345); final ResolvableInetSocketAddress address = new ResolvableInetSocketAddress(inetSocketAddress); assertThat(address.isReverseLookedUp()).isFalse(); assertThat(address.reverseLookup()).isEqualTo(inetSocketAddress.getHostName()); assertThat(address.isReverseLookedUp()).isTrue(); }
@Override public Integer call() throws Exception { super.call(); if (fileValue != null) { value = Files.readString(Path.of(fileValue.toString().trim())); } if (isLiteral(value) || type == Type.STRING) { value = wrapAsJsonLiteral(value); } Duration ttl = expiration == null ? null : Duration.parse(expiration); MutableHttpRequest<String> request = HttpRequest .PUT(apiUri("/namespaces/") + namespace + "/kv/" + key, value) .contentType(MediaType.APPLICATION_JSON_TYPE); if (ttl != null) { request.header("ttl", ttl.toString()); } try (DefaultHttpClient client = client()) { client.toBlocking().exchange(this.requestOptions(request)); } return 0; }
@Test void string() throws IOException, ResourceExpiredException { try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) { EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class); embeddedServer.start(); String[] args = { "--server", embeddedServer.getURL().toString(), "--user", "myuser:pass:word", "io.kestra.cli", "string", "stringValue" }; PicocliRunner.call(KvUpdateCommand.class, ctx, args); KVStoreService kvStoreService = ctx.getBean(KVStoreService.class); KVStore kvStore = kvStoreService.get(null, "io.kestra.cli", null); assertThat(kvStore.getValue("string").get(), is(new KVValue("stringValue"))); assertThat(((InternalKVStore)kvStore).getRawValue("string").get(), is("\"stringValue\"")); } }
public ModeConfiguration getModeConfiguration() { return null == modeConfig ? new ModeConfiguration("Standalone", null) : modeConfig; }
@Test void assertGetDefaultModeConfiguration() { ContextManagerBuilderParameter param = new ContextManagerBuilderParameter(null, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList(), new Properties(), null, null, false); assertThat(param.getModeConfiguration().getType(), is("Standalone")); assertNull(param.getModeConfiguration().getRepository()); }
protected Connection connectNow() { return connectNow(Duration.ofSeconds(45)); }
@Test void testConnectTimeout() { assertThatExceptionOfType(IllegalStateException.class) .isThrownBy(() -> new TestClientTransport(Mono.never()).connectNow(Duration.ofMillis(1))); }