focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static String getLocalhostName() { return localHostName; }
@Test public void testGetLocalhostName() { Assert.assertNotEquals("127.0.0.1", IpUtils.getLocalhostName()); Assert.assertNotEquals("localhost", IpUtils.getLocalhostName()); }
public static List<String> getCurrentStackTraceToList(int trimHeadLevels, int reserveLevels) { return getStackTraceToList(Thread.currentThread(), trimHeadLevels, reserveLevels); }
@Test public void testGetCurrentStackTraceToList() { List<String> trace = LogUtil.getCurrentStackTraceToList(); System.out.println(trace); Assert.assertTrue(trace.get(0).contains("java.lang.Thread.getStackTrace")); }
@Override public String exportResources( VariableSpace space, Map<String, ResourceDefinition> definitions, ResourceNamingInterface resourceNamingInterface, Repository repository, IMetaStore metaStore ) throws KettleException { try { // Try to load the transformation from repository or file. // Modify this recursively too... // // NOTE: there is no need to clone this step because the caller is // responsible for this. // // First load the executor transformation metadata... // TransMeta executorTransMeta = loadTransformationMeta( repository, space ); // Also go down into the mapping transformation and export the files // there. (mapping recursively down) // String proposedNewFilename = executorTransMeta.exportResources( executorTransMeta, definitions, resourceNamingInterface, repository, metaStore ); // To get a relative path to it, we inject // ${Internal.Entry.Current.Directory} // String newFilename = "${" + Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY + "}/" + proposedNewFilename; // Set the correct filename inside the XML. // executorTransMeta.setFilename( newFilename ); // exports always reside in the root directory, in case we want to turn // this into a file repository... // executorTransMeta.setRepositoryDirectory( new RepositoryDirectory() ); // change it in the entry // fileName = newFilename; setSpecificationMethod( ObjectLocationSpecificationMethod.FILENAME ); return proposedNewFilename; } catch ( Exception e ) { throw new KettleException( BaseMessages.getString( PKG, "MetaInjectMeta.Exception.UnableToLoadTrans", fileName ) ); } }
@Test public void exportResources() throws KettleException { VariableSpace variableSpace = mock( VariableSpace.class ); ResourceNamingInterface resourceNamingInterface = mock( ResourceNamingInterface.class ); Repository repository = mock( Repository.class ); IMetaStore metaStore = mock( IMetaStore.class ); MetaInjectMeta injectMetaSpy = spy( metaInjectMeta ); TransMeta transMeta = mock( TransMeta.class ); Map<String, ResourceDefinition> definitions = Collections.<String, ResourceDefinition>emptyMap(); doReturn( TEST_FILE_NAME ).when( transMeta ).exportResources( transMeta, definitions, resourceNamingInterface, repository, metaStore ); doReturn( transMeta ).when( injectMetaSpy ).loadTransformationMeta( repository, variableSpace ); String actualExportedFileName = injectMetaSpy.exportResources( variableSpace, definitions, resourceNamingInterface, repository, metaStore ); assertEquals( TEST_FILE_NAME, actualExportedFileName ); assertEquals( EXPORTED_FILE_NAME, injectMetaSpy.getFileName() ); verify( transMeta ).exportResources( transMeta, definitions, resourceNamingInterface, repository, metaStore ); }
@Override public boolean checkVersionConstraint(String version, String constraint) { return StringUtils.isNullOrEmpty(constraint) || "*".equals(constraint) || Version.parse(version).satisfies(constraint); }
@Test void checkVersionConstraint() { assertFalse(versionManager.checkVersionConstraint("1.4.3", ">2.0.0")); // simple assertTrue(versionManager.checkVersionConstraint("1.4.3", ">=1.4.0 & <1.6.0")); // range assertTrue(versionManager.checkVersionConstraint("undefined", "*")); // assertTrue(versionManager.checkVersionConstraint("1.0.0", ">=1.0.0-SNAPSHOT")); // issue #440 assertThrows(LexerException.class, () -> versionManager.checkVersionConstraint("1.0.0", ">=1.0.0-SNAPSHOT")); }
public static TaskExecutorProcessSpec processSpecFromConfig(final Configuration config) { try { return createMemoryProcessSpec( config, PROCESS_MEMORY_UTILS.memoryProcessSpecFromConfig(config)); } catch (IllegalConfigurationException e) { throw new IllegalConfigurationException( "TaskManager memory configuration failed: " + e.getMessage(), e); } }
@Test void testProcessSpecFromConfigWithExternalResource() { final Configuration config = new Configuration(); config.setString( ExternalResourceOptions.EXTERNAL_RESOURCE_LIST.key(), EXTERNAL_RESOURCE_NAME_1); config.setLong( ExternalResourceOptions.getAmountConfigOptionForResource(EXTERNAL_RESOURCE_NAME_1), 1); config.set(TaskManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.ofMebiBytes(4096)); final TaskExecutorProcessSpec taskExecutorProcessSpec = TaskExecutorProcessUtils.processSpecFromConfig(config); assertThat(taskExecutorProcessSpec.getExtendedResources()).hasSize(1); assertThat( taskExecutorProcessSpec .getExtendedResources() .get(EXTERNAL_RESOURCE_NAME_1) .getValue() .longValue()) .isOne(); }
@Override public Object evaluate(final Object... args) { final long start = time.nanoseconds(); try { return kudf.evaluate(args); } finally { sensor.record(time.nanoseconds() - start); } }
@Test public void shouldRecordEvenIfExceptionThrown(){ final UdfMetricProducer metricProducer = new UdfMetricProducer(sensor, args -> { time.sleep(10); throw new RuntimeException("boom"); }, time); try { metricProducer.evaluate("foo"); } catch (final Exception e) { // ignored } final KafkaMetric metric = metrics.metric(metricName); final Double actual = (Double) metric.metricValue(); assertThat(actual.longValue(), equalTo(TimeUnit.MILLISECONDS.toNanos(10))); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldSupportExplicitEmitFinalForCtas() { // Given: final SingleStatementContext stmt = givenQuery("CREATE TABLE X AS SELECT COUNT(1) FROM TEST1 GROUP BY ROWKEY EMIT FINAL;"); // When: final Query result = ((QueryContainer) builder.buildStatement(stmt)).getQuery(); // Then: assertThat("Should be push", result.isPullQuery(), is(false)); assertThat(result.getRefinement().get().getOutputRefinement(), is(OutputRefinement.FINAL)); }
public ShareFetch<K, V> collect(final ShareFetchBuffer fetchBuffer) { ShareFetch<K, V> fetch = ShareFetch.empty(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final ShareCompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final ShareCompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) { break; } if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { if (fetch.isEmpty()) { fetchBuffer.poll(); } throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else { final TopicIdPartition tp = nextInLineFetch.partition; ShareInFlightBatch<K, V> batch = nextInLineFetch.fetchRecords( deserializers, recordsRemaining, fetchConfig.checkCrcs); if (batch.isEmpty()) { nextInLineFetch.drain(); } recordsRemaining -= batch.numRecords(); fetch.add(tp, batch); if (batch.getException() != null) { throw batch.getException(); } else if (batch.hasCachedException()) { break; } } } } catch (KafkaException e) { if (fetch.isEmpty()) { throw e; } } return fetch; }
@Test public void testFetchNormal() { int recordCount = DEFAULT_MAX_POLL_RECORDS; buildDependencies(); subscribeAndAssign(topicAPartition0); ShareCompletedFetch completedFetch = completedFetchBuilder .recordCount(recordCount) .build(); // Validate that the buffer is empty until after we add the fetch data. assertTrue(fetchBuffer.isEmpty()); fetchBuffer.add(completedFetch); assertFalse(fetchBuffer.isEmpty()); // Validate that the completed fetch isn't initialized just because we add it to the buffer. assertFalse(completedFetch.isInitialized()); // Fetch the data and validate that we get all the records we want back. ShareFetch<String, String> fetch = fetchCollector.collect(fetchBuffer); assertFalse(fetch.isEmpty()); assertEquals(recordCount, fetch.numRecords()); // When we collected the data from the buffer, this will cause the completed fetch to get initialized. assertTrue(completedFetch.isInitialized()); // However, even though we've collected the data, it isn't (completely) consumed yet. assertFalse(completedFetch.isConsumed()); // The buffer is now considered "empty" because our queue is empty. assertTrue(fetchBuffer.isEmpty()); assertNull(fetchBuffer.peek()); assertNull(fetchBuffer.poll()); // However, while the queue is "empty", the next-in-line fetch is actually still in the buffer. assertNotNull(fetchBuffer.nextInLineFetch()); // Now attempt to collect more records from the fetch buffer. fetch = fetchCollector.collect(fetchBuffer); assertEquals(0, fetch.numRecords()); assertTrue(fetch.isEmpty()); // However, once we read *past* the end of the records in the ShareCompletedFetch, then we will call // drain on it, and it will be considered all consumed. assertTrue(completedFetch.isConsumed()); }
@Override public Collection<V> values() { Collection<V> values = Sets.newHashSet(); items.values().forEach(v -> values.add(serializer.decode(v))); return values; }
@Test public void testValues() throws Exception { //Tests value set generation fillMap(10); Set<Integer> values = (Set<Integer>) map.values(); for (int i = 0; i < 10; i++) { assertTrue("The key set doesn't contain all keys 0-9", values.contains(i)); } assertEquals("The key set has an incorrect number of entries", 10, values.size()); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testShouldThrownUnreleasedInstanceIdExceptionWhenNewMemberJoinsWithInUseInstanceId() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setInstanceId(memberId1) .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .build()) .withAssignment(memberId1, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .withAssignmentEpoch(10)) .build(); // Member 2 joins the consumer group with an in-use instance id. assertThrows(UnreleasedInstanceIdException.class, () -> context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId2) .setInstanceId(memberId1) .setMemberEpoch(0) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); }
public Lease acquire() throws Exception { String path = internals.attemptLock(-1, null, null); return makeLease(path); }
@Test public void testSimple() throws Exception { Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient( server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); try { InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", 1); assertNotNull(semaphore.acquire(timing.forWaiting().seconds(), TimeUnit.SECONDS)); assertNull(semaphore.acquire(timing.forWaiting().seconds(), TimeUnit.SECONDS)); } finally { TestCleanState.closeAndTestClean(client); } }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { try { final CopyFileRequest copy = new CopyFileRequest() .name(target.getName()) .parentID(fileid.getFileId(target.getParent())) .mode(1); // Overwrite final File file = new FilesApi(session.getClient()).filesCopy( fileid.getFileId(source), copy); listener.sent(status.getLength()); fileid.cache(target, file.getId()); return target.withAttributes(new StoregateAttributesFinderFeature(session, fileid).toAttributes(file)); } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map("Cannot copy {0}", e, source); } }
@Test public void testCopyDirectoryServerSide() throws Exception { final StoregateIdProvider fileid = new StoregateIdProvider(session); final Path top = new StoregateDirectoryFeature(session, fileid).mkdir(new Path( String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path directory = new StoregateDirectoryFeature(session, fileid).mkdir(new Path(top, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final String name = new AlphanumericRandomStringService().random(); final TransferStatus status = new TransferStatus(); final Path file = new StoregateTouchFeature(session, fileid).touch(new Path(directory, name, EnumSet.of(Path.Type.file)), status); final Path target_parent = new StoregateDirectoryFeature(session, fileid).mkdir(new Path(top, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path target = new Path(target_parent, directory.getName(), EnumSet.of(Path.Type.directory)); final StoregateCopyFeature feature = new StoregateCopyFeature(session, fileid); assertTrue(feature.isSupported(directory, target)); final Path copy = new StoregateCopyFeature(session, fileid).copy(directory, target, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener()); assertNotEquals(file.attributes().getFileId(), copy.attributes().getFileId()); assertTrue(new DefaultFindFeature(session).find(file)); assertTrue(new DefaultFindFeature(session).find(target)); assertTrue(new DefaultFindFeature(session).find(copy)); new StoregateDeleteFeature(session, fileid).delete(Collections.singletonList(top), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
protected static void addObjectToBuilders(List<MetricsPacket.Builder> builders, JsonNode object) { MetricsPacket.Builder builder = new MetricsPacket.Builder(ServiceId.toServiceId(object.get("application").textValue())); builder.timestamp(Instant.ofEpochSecond(object.get("timestamp").longValue())); if (object.has("metrics")) { JsonNode metrics = object.get("metrics"); Iterator<?> keys = metrics.fieldNames(); while(keys.hasNext()) { String key = (String) keys.next(); builder.putMetric(MetricId.toMetricId(key), metrics.get(key).asLong()); } } if (object.has("dimensions")) { JsonNode dimensions = object.get("dimensions"); Iterator<?> keys = dimensions.fieldNames(); while(keys.hasNext()) { String key = (String) keys.next(); builder.putDimension(DimensionId.toDimensionId(key), dimensions.get(key).asText()); } } builder.addConsumers(Set.of(ConsumerId.toConsumerId("Vespa"))); builders.add(builder); }
@Test public void testJSONObjectIsCorrectlyConvertedToMetricsPacket() { List<MetricsPacket.Builder> builders = new ArrayList<>(); JsonNode hostLifePacket = generateHostLifePacket(); NodeMetricGatherer.addObjectToBuilders(builders, hostLifePacket); MetricsPacket packet = builders.remove(0).build(); assertEquals("host_life", packet.service().id); assertEquals(123, packet.timestamp().getEpochSecond()); assertEquals(12L, packet.metrics().get(MetricId.toMetricId("uptime"))); assertEquals(1L, packet.metrics().get(MetricId.toMetricId("alive"))); assertEquals(Set.of(ConsumerId.toConsumerId("Vespa")), packet.consumers()); }
@Override protected MessageProducer createConsumerEndpoint(ConsumerDestination destination, String group, ExtendedConsumerProperties<RocketMQConsumerProperties> extendedConsumerProperties) throws Exception { boolean anonymous = !StringUtils.hasLength(group); /*** * When using DLQ, at least the group property must be provided for proper naming of the DLQ destination * According to https://docs.spring.io/spring-cloud-stream/docs/3.2.1/reference/html/spring-cloud-stream.html#spring-cloud-stream-reference */ if (anonymous && NamespaceUtil.isDLQTopic(destination.getName())) { throw new RuntimeException( "group must be configured for DLQ" + destination.getName()); } group = anonymous ? RocketMQUtils.anonymousGroup(destination.getName()) : group; RocketMQUtils.mergeRocketMQProperties(binderConfigurationProperties, extendedConsumerProperties.getExtension()); extendedConsumerProperties.getExtension().setGroup(group); RocketMQInboundChannelAdapter inboundChannelAdapter = new RocketMQInboundChannelAdapter( destination.getName(), extendedConsumerProperties); ErrorInfrastructure errorInfrastructure = registerErrorInfrastructure(destination, group, extendedConsumerProperties); if (extendedConsumerProperties.getMaxAttempts() > 1) { inboundChannelAdapter .setRetryTemplate(buildRetryTemplate(extendedConsumerProperties)); inboundChannelAdapter.setRecoveryCallback(errorInfrastructure.getRecoverer()); } else { inboundChannelAdapter.setErrorChannel(errorInfrastructure.getErrorChannel()); } return inboundChannelAdapter; }
@Test public void createAnymousConsumerEndpoint() throws Exception { ExtendedConsumerProperties<RocketMQConsumerProperties> extendedConsumerProperties = new ExtendedConsumerProperties<>(new RocketMQConsumerProperties()); extendedConsumerProperties.populateBindingName("input1"); TestConsumerDestination destination = new TestConsumerDestination("test"); MessageProducer consumerEndpoint = binder.createConsumerEndpoint(destination, null, extendedConsumerProperties); Assertions.assertThat(consumerEndpoint).isNotNull(); Assertions.assertThat(extendedConsumerProperties.getExtension().getGroup()) .isEqualTo(RocketMQConst.DEFAULT_GROUP + "_test"); }
@Override public ExportResult<To> export(UUID jobId, AD authData, Optional<ExportInformation> exportInfo) throws Exception { Optional<ExportInformation> infoWithConvertedResource = exportInfo.map( (ei) -> ei.copyWithResource(exportInformationConverter.apply(ei.getContainerResource()))); ExportResult<From> originalResult = exporter.export(jobId, authData, infoWithConvertedResource); return originalResult.copyWithExportedData( containerResourceConverter.apply(originalResult.getExportedData())); }
@Test public void shouldHandleConversionFromPhotoExporterToMediaExporterUsingAnyInputContainerResource() throws Exception { Exporter<AuthData, PhotosContainerResource> photosExporter = (jobId, authData, exportInformation) -> new ExportResult<>( ResultType.END, exportInformation .map(ei -> (PhotosContainerResource) ei.getContainerResource()) .get()); AnyToAnyExporter<AuthData, PhotosContainerResource, MediaContainerResource> mediaExporter = new AnyToAnyExporter<>( photosExporter, MediaContainerResource::photoToMedia, (cr) -> { assertThat(cr).isInstanceOf(DateRangeContainerResource.class); return new PhotosContainerResource(null, null); }); ExportInformation ei = new ExportInformation(null, new DateRangeContainerResource(0, 0)); ExportResult<MediaContainerResource> actual = mediaExporter.export(null, null, Optional.of(ei)); MediaContainerResource expected = new MediaContainerResource(null, null, null); assertThat(actual.getExportedData()).isEqualTo(expected); }
@Override public RpcType getRpcType() { return rpcType; }
@Test void getRpcType() { Assertions.assertEquals(RpcType.UNARY, method.getRpcType()); }
public static <T> Map<T, Integer> countMap(Iterable<T> collection) { return IterUtil.countMap(null == collection ? null : collection.iterator()); }
@Test public void countMapTest() { final ArrayList<String> list = CollUtil.newArrayList("a", "b", "c", "c", "a", "b", "d"); final Map<String, Integer> countMap = CollUtil.countMap(list); assertEquals(Integer.valueOf(2), countMap.get("a")); assertEquals(Integer.valueOf(2), countMap.get("b")); assertEquals(Integer.valueOf(2), countMap.get("c")); assertEquals(Integer.valueOf(1), countMap.get("d")); }
public synchronized List<String> setLevel(String namespace, Level level) { Objects.requireNonNull(namespace, "Logging namespace may not be null"); Objects.requireNonNull(level, "Level may not be null"); log.info("Setting level of namespace {} and children to {}", namespace, level); List<org.apache.log4j.Logger> childLoggers = loggers(namespace); List<String> result = new ArrayList<>(); for (org.apache.log4j.Logger logger: childLoggers) { setLevel(logger, level); result.add(logger.getName()); } Collections.sort(result); return result; }
@Test public void testSetLevel() { Logger root = logger("root"); root.setLevel(Level.ERROR); Logger x = logger("a.b.c.p.X"); Logger y = logger("a.b.c.p.Y"); Logger z = logger("a.b.c.p.Z"); Logger w = logger("a.b.c.s.W"); x.setLevel(Level.INFO); y.setLevel(Level.INFO); z.setLevel(Level.INFO); w.setLevel(Level.INFO); // We don't explicitly register a logger for a.b.c.p, so it won't appear in the list of current loggers; // one should be created by the Loggers instance when we set the level TestLoggers loggers = new TestLoggers(root, x, y, z, w); List<String> modified = loggers.setLevel("a.b.c.p", Level.DEBUG); assertEquals(Arrays.asList("a.b.c.p", "a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z"), modified); assertEquals(Level.DEBUG.toString(), loggers.level("a.b.c.p").level()); assertEquals(Level.DEBUG, x.getLevel()); assertEquals(Level.DEBUG, y.getLevel()); assertEquals(Level.DEBUG, z.getLevel()); LoggerLevel expectedLevel = new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME); LoggerLevel actualLevel = loggers.level("a.b.c.p"); assertEquals(expectedLevel, actualLevel); // Sleep a little and adjust the level of a leaf logger time.sleep(10); loggers.setLevel("a.b.c.p.X", Level.ERROR); expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); actualLevel = loggers.level("a.b.c.p.X"); assertEquals(expectedLevel, actualLevel); // Make sure that the direct parent logger and a sibling logger remain unaffected expectedLevel = new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME); actualLevel = loggers.level("a.b.c.p"); assertEquals(expectedLevel, actualLevel); expectedLevel = new LoggerLevel(Level.DEBUG.toString(), INITIAL_TIME); actualLevel = loggers.level("a.b.c.p.Y"); assertEquals(expectedLevel, actualLevel); // Set the same level again, and verify that the last modified time hasn't been altered time.sleep(10); loggers.setLevel("a.b.c.p.X", Level.ERROR); expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); actualLevel = loggers.level("a.b.c.p.X"); assertEquals(expectedLevel, actualLevel); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testUploadGraph() throws IOException { DataflowPipelineOptions options = buildPipelineOptions(); options.setExperiments(Arrays.asList("upload_graph")); Pipeline p = buildDataflowPipeline(options); p.run(); ArgumentCaptor<Job> jobCaptor = ArgumentCaptor.forClass(Job.class); Mockito.verify(mockJobs).create(eq(PROJECT_ID), eq(REGION_ID), jobCaptor.capture()); assertValidJob(jobCaptor.getValue()); assertTrue(jobCaptor.getValue().getSteps().isEmpty()); assertTrue( jobCaptor .getValue() .getStepsLocation() .startsWith("gs://valid-bucket/temp/staging/dataflow_graph")); }
@Override public GroupAssignment assign( GroupSpec groupSpec, SubscribedTopicDescriber subscribedTopicDescriber ) throws PartitionAssignorException { if (groupSpec.memberIds().isEmpty()) { return new GroupAssignment(Collections.emptyMap()); } else if (groupSpec.subscriptionType() == SubscriptionType.HOMOGENEOUS) { return assignHomogeneousGroup(groupSpec, subscribedTopicDescriber); } else { return assignHeterogeneousGroup(groupSpec, subscribedTopicDescriber); } }
@Test public void testMixedStaticMembership() throws PartitionAssignorException { SubscribedTopicDescriber subscribedTopicMetadata = new SubscribedTopicDescriberImpl( Collections.singletonMap( topic1Uuid, new TopicMetadata( topic1Uuid, topic1Name, 5, Collections.emptyMap() ) ) ); // Initialize members with instance Ids. Map<String, MemberSubscriptionAndAssignmentImpl> members = new TreeMap<>(); members.put(memberA, new MemberSubscriptionAndAssignmentImpl( Optional.empty(), Optional.of("instanceA"), Collections.singleton(topic1Uuid), Assignment.EMPTY )); members.put(memberC, new MemberSubscriptionAndAssignmentImpl( Optional.empty(), Optional.of("instanceC"), Collections.singleton(topic1Uuid), Assignment.EMPTY )); // Initialize member without an instance Id. members.put(memberB, new MemberSubscriptionAndAssignmentImpl( Optional.empty(), Optional.empty(), Collections.singleton(topic1Uuid), Assignment.EMPTY )); GroupSpec groupSpec = new GroupSpecImpl( members, SubscriptionType.HOMOGENEOUS, invertedTargetAssignment(members) ); GroupAssignment initialAssignment = assignor.assign( groupSpec, subscribedTopicMetadata ); // Remove memberA and add it back with a different member Id but same instance Id. members.remove(memberA); members.put("memberA1", new MemberSubscriptionAndAssignmentImpl( Optional.empty(), Optional.of("instanceA"), Collections.singleton(topic1Uuid), Assignment.EMPTY )); groupSpec = new GroupSpecImpl( members, SubscriptionType.HOMOGENEOUS, invertedTargetAssignment(members) ); GroupAssignment reassignedAssignment = assignor.assign( groupSpec, subscribedTopicMetadata ); // Assert that the assignments did not change. assertEquals( initialAssignment.members().get(memberA).partitions(), reassignedAssignment.members().get("memberA1").partitions() ); assertEquals( initialAssignment.members().get(memberB).partitions(), reassignedAssignment.members().get(memberB).partitions() ); assertEquals( initialAssignment.members().get(memberC).partitions(), reassignedAssignment.members().get(memberC).partitions() ); }
@Override public void createUser(String username, String password) { String sql = "INSERT INTO users (username, password, enabled) VALUES (?, ?, ?)"; try { EmbeddedStorageContextHolder.addSqlContext(sql, username, password, true); databaseOperate.blockUpdate(); } finally { EmbeddedStorageContextHolder.cleanAllContext(); } }
@Test void testCreateUser() { embeddedUserPersistService.createUser("username", "password"); Mockito.verify(databaseOperate).blockUpdate(); }
private static HighAvailabilityServices createCustomHAServices( Configuration config, Executor executor) throws FlinkException { return createCustomHAServices( config.get(HighAvailabilityOptions.HA_MODE), config, executor); }
@Test public void testCreateCustomHAServices() throws Exception { Configuration config = new Configuration(); HighAvailabilityServices haServices = new TestingHighAvailabilityServices(); TestHAFactory.haServices = haServices; Executor executor = Executors.directExecutor(); config.set(HighAvailabilityOptions.HA_MODE, TestHAFactory.class.getName()); // when HighAvailabilityServices actualHaServices = HighAvailabilityServicesUtils.createAvailableOrEmbeddedServices( config, executor, NoOpFatalErrorHandler.INSTANCE); // then assertSame(haServices, actualHaServices); // when actualHaServices = HighAvailabilityServicesUtils.createHighAvailabilityServices( config, executor, AddressResolution.NO_ADDRESS_RESOLUTION, RpcSystem.load(), NoOpFatalErrorHandler.INSTANCE); // then assertSame(haServices, actualHaServices); }
public void start() { if (stateUpdaterThread == null) { if (!restoredActiveTasks.isEmpty() || !exceptionsAndFailedTasks.isEmpty()) { throw new IllegalStateException("State updater started with non-empty output queues. " + BUG_ERROR_MESSAGE); } stateUpdaterThread = new StateUpdaterThread(name, metrics, changelogReader); stateUpdaterThread.start(); shutdownGate = new CountDownLatch(1); // initialize the last commit as of now to prevent first commit happens immediately this.lastCommitMs = time.milliseconds(); } }
@Test public void shouldNotResumeNonExistingTasks() throws Exception { stateUpdater.start(); verifyPausedTasks(); verifyRestoredActiveTasks(); verifyUpdatingTasks(); verifyExceptionsAndFailedTasks(); }
@Override public boolean isValid(@Nullable String value, ConstraintValidatorContext context) { return value == null || !value.isEmpty(); }
@Test void isValid_shouldValidateNull() { assertTrue(validator.isValid(null, context)); }
public static double calculateForChars(String input) { final Map<Character, Long> charCountMap = input.chars() .mapToObj(c -> (char) c) .collect(Collectors.groupingBy(p -> p, Collectors.counting())); double result = 0; for (Character c : charCountMap.keySet()) { double probabilityForChar = (double) charCountMap.get(c) / input.length(); result += probabilityForChar * logBaseTwo(1 / probabilityForChar); } return result; }
@Test public void testEntropyCalcForChars() { assertEquals(0D, ShannonEntropy.calculateForChars("1111")); assertEquals(0D, ShannonEntropy.calculateForChars("5555555555"), 0.0D); assertEquals(0D, ShannonEntropy.calculateForChars("5555555555"), 0.0D); assertEquals(0.46899559358928133D, ShannonEntropy.calculateForChars("1555555555")); assertEquals(1.0D, ShannonEntropy.calculateForChars("1111155555")); assertEquals(3.3219280948873635D, ShannonEntropy.calculateForChars("1234567890")); assertEquals(5.1699250014423095D, ShannonEntropy.calculateForChars("1234567890qwertyuiopasdfghjklzxcvbnm")); }
@Override public Map<String, String> getMetadata(Server server) { if (server instanceof ServiceCombServer) { return ((ServiceCombServer) server).getMetadata(); } return super.getMetadata(server); }
@Test public void getMetadata() { final ServiceCombServerIntrospector introspector = new ServiceCombServerIntrospector(); final Map<String, String> metadata = introspector.getMetadata(new Server("localhost:9090")); Assert.assertEquals(metadata, Collections.emptyMap()); final ServiceCombServer serviceCombServer = Mockito.mock(ServiceCombServer.class); final HashMap<String, String> meta = new HashMap<>(); Mockito.when(serviceCombServer.getMetadata()).thenReturn(meta); assertEquals(introspector.getMetadata(serviceCombServer), meta); }
public void deleteUser(String addr, String username, long millis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { DeleteUserRequestHeader requestHeader = new DeleteUserRequestHeader(username); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.AUTH_DELETE_USER, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(addr, request, millis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return; } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void testDeleteUser() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); mqClientAPI.deleteUser(defaultBrokerAddr, "", defaultTimeout); }
public FEELFnResult<List<Object>> invoke(@ParameterName( "list" ) List list, @ParameterName( "position" ) BigDecimal position) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if ( position == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be null")); } if ( position.intValue() == 0 ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be zero (parameter 'position' is 1-based)")); } if ( position.abs().intValue() > list.size() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "inconsistent with 'list' size")); } // spec requires us to return a new list List<Object> result = new ArrayList<>( list ); if( position.intValue() > 0 ) { result.remove( position.intValue()-1 ); } else { result.remove( list.size()+position.intValue() ); } return FEELFnResult.ofResult( result ); }
@Test void invokePositionNegative() { FunctionTestUtil.assertResultList(removeFunction.invoke(Collections.singletonList(1), BigDecimal.valueOf(-1)) , Collections.emptyList()); FunctionTestUtil.assertResultList( removeFunction.invoke(Arrays.asList(1, "test", BigDecimal.valueOf(14)), BigDecimal.valueOf(-1)), Arrays.asList(1, "test")); FunctionTestUtil.assertResultList( removeFunction.invoke(Arrays.asList(1, "test", BigDecimal.valueOf(14)), BigDecimal.valueOf(-2)), Arrays.asList(1, BigDecimal.valueOf(14))); FunctionTestUtil.assertResultList( removeFunction.invoke(Arrays.asList(1, "test", BigDecimal.valueOf(14)), BigDecimal.valueOf(-3)), Arrays.asList("test", BigDecimal.valueOf(14))); }
@Override public Iterable<Duplication> getDuplications(Component file) { checkFileComponentArgument(file); Collection<Duplication> res = this.duplications.asMap().get(file.getKey()); if (res == null) { return Collections.emptyList(); } return res; }
@Test public void getDuplications_throws_NPE_if_Component_argument_is_null() { assertThatThrownBy(() -> underTest.getDuplications(null)) .isInstanceOf(NullPointerException.class) .hasMessage("file can not be null"); }
public static boolean containsGender( @NonNull CharSequence text, @NonNull JavaEmojiUtils.Gender gender) { return JavaEmojiUtils.containsGender(text, gender); }
@Test public void testContainsGender() { Assert.assertFalse(EmojiUtils.containsGender("\uD83E\uDDD4", JavaEmojiUtils.Gender.Man)); Assert.assertFalse(EmojiUtils.containsGender("\uD83E\uDDD4", JavaEmojiUtils.Gender.Woman)); Assert.assertFalse( EmojiUtils.containsGender("\uD83E\uDDD4\uD83C\uDFFE", JavaEmojiUtils.Gender.Man)); Assert.assertFalse( EmojiUtils.containsGender("\uD83E\uDDD4\uD83C\uDFFE", JavaEmojiUtils.Gender.Woman)); Assert.assertFalse( EmojiUtils.containsGender("\uD83E\uDDD4\uD83C\uDFFB\u200D♀", JavaEmojiUtils.Gender.Man)); Assert.assertTrue( EmojiUtils.containsGender("\uD83E\uDDD4\uD83C\uDFFB\u200D♀", JavaEmojiUtils.Gender.Woman)); Assert.assertTrue( EmojiUtils.containsGender("\uD83E\uDDD4\uD83C\uDFFB\u200D♂", JavaEmojiUtils.Gender.Man)); Assert.assertTrue( EmojiUtils.containsGender("\\uD83E\uDDD4\u200D♂️", JavaEmojiUtils.Gender.Man)); }
public List<TenantCapacity> getCapacityList4CorrectUsage(long lastId, int pageSize) { TenantCapacityMapper tenantCapacityMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.TENANT_CAPACITY); MapperContext context = new MapperContext(); context.putWhereParameter(FieldConstant.ID, lastId); context.putWhereParameter(FieldConstant.LIMIT_SIZE, pageSize); MapperResult mapperResult = tenantCapacityMapper.getCapacityList4CorrectUsage(context); try { return jdbcTemplate.query(mapperResult.getSql(), mapperResult.getParamList().toArray(), (rs, rowNum) -> { TenantCapacity tenantCapacity = new TenantCapacity(); tenantCapacity.setId(rs.getLong("id")); tenantCapacity.setTenant(rs.getString("tenant_id")); return tenantCapacity; }); } catch (CannotGetJdbcConnectionException e) { FATAL_LOG.error("[db-error]", e); throw e; } }
@Test void testGetCapacityList4CorrectUsage() { List<TenantCapacity> list = new ArrayList<>(); TenantCapacity tenantCapacity = new TenantCapacity(); tenantCapacity.setTenant("test"); list.add(tenantCapacity); long lastId = 1; int pageSize = 1; when(jdbcTemplate.query(anyString(), eq(new Object[] {lastId, pageSize}), any(RowMapper.class))).thenReturn(list); List<TenantCapacity> ret = service.getCapacityList4CorrectUsage(lastId, pageSize); assertEquals(list.size(), ret.size()); assertEquals(tenantCapacity.getTenant(), ret.get(0).getTenant()); //mock get connection fail when(jdbcTemplate.query(anyString(), eq(new Object[] {lastId, pageSize}), any(RowMapper.class))).thenThrow( new CannotGetJdbcConnectionException("conn fail")); try { service.getCapacityList4CorrectUsage(lastId, pageSize); assertTrue(false); } catch (Exception e) { assertEquals("conn fail", e.getMessage()); } }
@Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); try { boolean result = false; defaultMQAdminExt.start(); if (commandLine.hasOption('b')) { String addr = commandLine.getOptionValue('b').trim(); result = defaultMQAdminExt.cleanExpiredConsumerQueueByAddr(addr); } else { String cluster = commandLine.getOptionValue('c'); if (null != cluster) cluster = cluster.trim(); result = defaultMQAdminExt.cleanExpiredConsumerQueue(cluster); } System.out.printf(result ? "success" : "false"); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e); } finally { defaultMQAdminExt.shutdown(); } }
@Test public void testExecute() throws SubCommandException { CleanExpiredCQSubCommand cmd = new CleanExpiredCQSubCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] {"-b 127.0.0.1:" + listenPort(), "-c default-cluster"}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); cmd.execute(commandLine, options, null); }
public String render(String inline, Map<String, Object> variables) throws IllegalVariableEvaluationException { return this.render(inline, variables, this.variableConfiguration.getRecursiveRendering()); }
@Test void shouldKeepKeyOrderWhenRenderingMap() throws IllegalVariableEvaluationException { final Map<String, Object> input = new LinkedHashMap<>(); input.put("foo-1", "A"); input.put("foo-2", "B"); final Map<String, Object> input_value3 = new LinkedHashMap<>(); input_value3.put("bar-1", "C"); input_value3.put("bar-2", "D"); input_value3.put("bar-3", "E"); // input.put("foo-3", input_value3); final Map<String, Object> result = variableRenderer.render(input, Map.of()); assertThat(result.keySet(), contains("foo-1", "foo-2", "foo-3")); final Map<String, Object> result_value3 = (Map<String, Object>) result.get("foo-3"); assertThat(result_value3.keySet(), contains("bar-1", "bar-2", "bar-3")); }
@Override public String getMethod() { return PATH; }
@Test public void testGetChatMenuButtonForChat() { GetChatMenuButton getChatMenuButton = GetChatMenuButton .builder() .chatId(123456L) .build(); assertEquals("getChatMenuButton", getChatMenuButton.getMethod()); assertDoesNotThrow(getChatMenuButton::validate); }
@Override public Serde<GenericRow> create( final FormatInfo format, final PersistenceSchema schema, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> srClientFactory, final String loggerNamePrefix, final ProcessingLogContext processingLogContext, final Optional<TrackedCallback> tracker ) { final Serde<List<?>> formatSerde = innerFactory.createFormatSerde("Value", format, schema, ksqlConfig, srClientFactory, false); final Serde<GenericRow> genericRowSerde = toGenericRowSerde(formatSerde, schema); final Serde<GenericRow> loggingSerde = innerFactory.wrapInLoggingSerde( genericRowSerde, loggerNamePrefix, processingLogContext, queryId); final Serde<GenericRow> serde = tracker .map(callback -> innerFactory.wrapInTrackingSerde(loggingSerde, callback)) .orElse(loggingSerde); serde.configure(Collections.emptyMap(), false); return serde; }
@Test public void shouldCreateInnerSerde() { // When: factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt, Optional.empty()); // Then: verify(innerFactory).createFormatSerde("Value", format, schema, config, srClientFactory, false); }
@Override public EncryptTableRuleConfiguration swapToObject(final YamlEncryptTableRuleConfiguration yamlConfig) { Collection<EncryptColumnRuleConfiguration> columns = new LinkedList<>(); for (Entry<String, YamlEncryptColumnRuleConfiguration> entry : yamlConfig.getColumns().entrySet()) { entry.getValue().setName(entry.getKey()); columns.add(columnSwapper.swapToObject(entry.getValue())); } return new EncryptTableRuleConfiguration(yamlConfig.getName(), columns); }
@Test void assertSwapToObject() { Map<String, YamlEncryptColumnRuleConfiguration> columns = Collections.singletonMap("test_column", buildYamlEncryptColumnRuleConfiguration()); YamlEncryptTableRuleConfiguration yamlEncryptTableRuleConfig = new YamlEncryptTableRuleConfiguration(); yamlEncryptTableRuleConfig.setName("test_table"); yamlEncryptTableRuleConfig.setColumns(columns); EncryptTableRuleConfiguration actualEncryptTableRuleConfig = swapper.swapToObject(yamlEncryptTableRuleConfig); assertThat(actualEncryptTableRuleConfig.getName(), is("test_table")); Collection<EncryptColumnRuleConfiguration> actualColumns = actualEncryptTableRuleConfig.getColumns(); assertThat(actualColumns.size(), is(1)); EncryptColumnRuleConfiguration actualEncryptColumnRuleConfig = actualColumns.iterator().next(); assertThat(actualEncryptColumnRuleConfig.getName(), is("test_column")); assertThat(actualEncryptColumnRuleConfig.getCipher().getName(), is("encrypt_cipher")); assertThat(actualEncryptColumnRuleConfig.getCipher().getEncryptorName(), is("test_encryptor")); assertTrue(actualEncryptColumnRuleConfig.getAssistedQuery().isPresent()); assertThat(actualEncryptColumnRuleConfig.getAssistedQuery().get().getName(), is("encrypt_assisted")); assertTrue(actualEncryptColumnRuleConfig.getLikeQuery().isPresent()); assertThat(actualEncryptColumnRuleConfig.getLikeQuery().get().getName(), is("encrypt_like")); }
@Override public Class<?> getNativeDataTypeClass() throws KettleValueException { // Not implemented for base class throw new KettleValueException( getTypeDesc() + " does not implement this method" ); }
@Test public void testGetNativeDataTypeClass() { ValueMetaInterface base = new ValueMetaBase(); Class<?> clazz = null; try { clazz = base.getNativeDataTypeClass(); fail(); } catch ( KettleValueException expected ) { // ValueMetaBase should throw an exception, as all sub-classes should override assertNull( clazz ); } }
public static MetricsSource makeSource(Object source) { return new MetricsSourceBuilder(source, DefaultMetricsFactory.getAnnotatedMetricsFactory()).build(); }
@Test(expected=MetricsException.class) public void testEmptyMetrics() { MetricsAnnotations.makeSource(new EmptyMetrics()); }
public static List<WeightedHostAddress> prioritize(WeightedHostAddress[] records) { final List<WeightedHostAddress> result = new LinkedList<>(); // sort by priority (ascending) SortedMap<Integer, Set<WeightedHostAddress>> byPriority = new TreeMap<>(); for(final WeightedHostAddress record : records) { if (byPriority.containsKey(record.getPriority())) { byPriority.get(record.getPriority()).add(record); } else { final Set<WeightedHostAddress> set = new HashSet<>(); set.add(record); byPriority.put(record.getPriority(), set); } } // now, randomize each priority set by weight. for(Map.Entry<Integer, Set<WeightedHostAddress>> weights : byPriority.entrySet()) { List<WeightedHostAddress> zeroWeights = new LinkedList<>(); int totalWeight = 0; final Iterator<WeightedHostAddress> i = weights.getValue().iterator(); while (i.hasNext()) { final WeightedHostAddress next = i.next(); if (next.weight == 0) { // set aside, as these should be considered last according to the RFC. zeroWeights.add(next); i.remove(); continue; } totalWeight += next.getWeight(); } int iterationWeight = totalWeight; Iterator<WeightedHostAddress> iter = weights.getValue().iterator(); while (iter.hasNext()) { int needle = new Random().nextInt(iterationWeight); while (true) { final WeightedHostAddress record = iter.next(); needle -= record.getWeight(); if (needle <= 0) { result.add(record); iter.remove(); iterationWeight -= record.getWeight(); break; } } iter = weights.getValue().iterator(); } // finally, append the hosts with zero priority (shuffled) Collections.shuffle(zeroWeights); result.addAll(zeroWeights); } return result; }
@Test public void testOneHost() throws Exception { // setup final DNSUtil.WeightedHostAddress host = new DNSUtil.WeightedHostAddress("host", 5222, false, 1, 1); // do magic final List<DNSUtil.WeightedHostAddress> result = DNSUtil.prioritize(new DNSUtil.WeightedHostAddress[]{host}); // verify assertEquals( 1, result.size() ); assertEquals(host, result.get(0)); }
static Properties resolveConsumerProperties(Map<String, String> options, Object keySchema, Object valueSchema) { Properties properties = from(options); withSerdeConsumerProperties(true, options, keySchema, properties); withSerdeConsumerProperties(false, options, valueSchema, properties); return properties; }
@Test public void when_consumerProperties_avroPropertyIsDefined_then_itsNotOverwritten() { // key assertThat(PropertiesResolver.resolveConsumerProperties(Map.of( OPTION_KEY_FORMAT, AVRO_FORMAT, KEY_DESERIALIZER, "deserializer" ), DUMMY_SCHEMA, null)).containsExactlyInAnyOrderEntriesOf(Map.of( KEY_DESERIALIZER, "deserializer", OPTION_KEY_AVRO_SCHEMA, DUMMY_SCHEMA )); // value assertThat(PropertiesResolver.resolveConsumerProperties(Map.of( OPTION_KEY_FORMAT, UNKNOWN_FORMAT, OPTION_VALUE_FORMAT, AVRO_FORMAT, VALUE_DESERIALIZER, "deserializer" ), null, DUMMY_SCHEMA)).containsExactlyInAnyOrderEntriesOf(Map.of( VALUE_DESERIALIZER, "deserializer", OPTION_VALUE_AVRO_SCHEMA, DUMMY_SCHEMA )); }
public String describe() { return "--- EXIT CODE (" + returnValue() + ") ---\n" + "--- STANDARD OUT ---\n" + outputForDisplayAsString() + "\n" + "--- STANDARD ERR ---\n" + errorForDisplayAsString() + "\n" + "---\n"; }
@Test void shouldDescribeResult() { List<CommandArgument> args = List.of(new StringArgument("foo"), new PasswordArgument("bar")); List<SecretString> secrets = List.of(new PasswordArgument("quux")); ConsoleResult result = new ConsoleResult(42, List.of(" foo ", " bar ", " baz ", " abc "), List.of(" quux ", " bang "), args, secrets); assertThat(result.describe()).contains("--- EXIT CODE (42) ---"); assertThat(result.describe()).contains("--- STANDARD OUT ---"); assertThat(result.describe()).contains("--- STANDARD ERR ---"); }
public Object getParameter(String key) { return param.get(key); }
@Test void testGetParameter() { assertNull(identityContext.getParameter(TEST)); identityContext.setParameter(TEST, TEST); assertEquals(TEST, identityContext.getParameter(TEST)); }
protected static boolean isSingleQuoted(String input) { if (input == null || input.isBlank()) { return false; } return input.matches("(^" + QUOTE_CHAR + "{1}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{1})"); }
@Test public void testSingleQuotedNegative3() { assertFalse(isSingleQuoted("\"\"double quoted is not single quoted\"\"")); }
@Override public AppResponse process(Flow flow, ActivationWithCodeRequest body) { var authAppSession = appSessionService.getSession(body.getAuthSessionId()); if (!State.AUTHENTICATED.name().equals(authAppSession.getState())){ return new NokResponse(); } appSession = new AppSession(); appSession.setState(State.INITIALIZED.name()); appSession.setFlow(body.isReRequestLetter() ? ReApplyActivateActivationCode.NAME : ActivateAccountAndAppFlow.NAME); appSession.setActivationMethod(ActivationMethod.LETTER); appSession.setAction(body.isReRequestLetter() ? "re_request_letter" : "activation_by_letter"); AppAuthenticator appAuthenticator = appAuthenticatorService.findByUserAppId(body.getUserAppId()); appSession.setAccountId(appAuthenticator.getAccountId()); appSession.setUserAppId(appAuthenticator.getUserAppId()); appSession.setDeviceName(appAuthenticator.getDeviceName()); appSession.setInstanceId(appAuthenticator.getInstanceId()); Map<String, String> result = digidClient.getRegistrationByAccount(appAuthenticator.getAccountId()); if (!result.get(lowerUnderscore(STATUS)).equals("OK")) return new NokResponse(); var registrationId = result.get(lowerUnderscore(REGISTRATION_ID)); if (registrationId != null) { appSession.setRegistrationId(Long.valueOf(registrationId)); } appSession.setWithBsn(Boolean.valueOf(result.get("has_bsn"))); digidClient.remoteLog("1089", Map.of( lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new AppSessionResponse(appSession.getId(), Instant.now().getEpochSecond()); }
@Test void processOKTest() { var mockedAppSession = new AppSession(); mockedAppSession.setAccountId(1L); mockedAppSession.setUserAppId(USER_APP_ID); mockedAppSession.setId(APP_SESSION_ID); when(appSessionService.getSession(anyString())).thenReturn(mockedAppSession); when(digidClientMock.getRegistrationByAccount(anyLong())).thenReturn(Map.of( lowerUnderscore(STATUS), "OK", lowerUnderscore(REGISTRATION_ID), "1234" )); mockedAppSession.setRegistrationId(1234L); AppResponse appResponse = startActivationWithCode.process(mockedFlow, activationWithCodeRequest()); assertEquals(1234L, mockedAppSession.getRegistrationId()); }
public int getIndex(String name) { for (int i = 0; i < this.size(); i++) { if (get(i).getName().equals(name)) { return i; } } throw new IllegalArgumentException("param '" + name + "' not found"); }
@Test public void getIndex_shouldThrowExceptionIfNameNotFound() { assertThatThrownBy(() -> new ParamsConfig().getIndex("foo")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("param 'foo' not found"); }
public static DeviceKey createDeviceKeyUsingCommunityName(DeviceKeyId id, String label, String name) { DefaultAnnotations annotations = builder().set(AnnotationKeys.NAME, name).build(); return new DeviceKey(id, label, Type.COMMUNITY_NAME, annotations); }
@Test(expected = NullPointerException.class) public void testCreateDeviceKeyUsingCommunityNameWithNull() { DeviceKey deviceKey = DeviceKey.createDeviceKeyUsingCommunityName(null, null, null); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 code,不好清理 public void deleteNotifyTemplate(Long id) { // 校验存在 validateNotifyTemplateExists(id); // 删除 notifyTemplateMapper.deleteById(id); }
@Test public void testDeleteNotifyTemplate_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> notifyTemplateService.deleteNotifyTemplate(id), NOTIFY_TEMPLATE_NOT_EXISTS); }
@ScalarOperator(DIVIDE) @SqlType(StandardTypes.REAL) public static long divide(@SqlType(StandardTypes.REAL) long left, @SqlType(StandardTypes.REAL) long right) { return floatToRawIntBits(intBitsToFloat((int) left) / intBitsToFloat((int) right)); }
@Test public void testDivide() { assertFunction("REAL'12.34' / REAL'56.78'", REAL, 12.34f / 56.78f); assertFunction("REAL'-17.34' / REAL'-22.891'", REAL, -17.34f / -22.891f); assertFunction("REAL'-89.123' / REAL'754.0'", REAL, -89.123f / 754.0f); assertFunction("REAL'-0.0' / REAL'0.0'", REAL, -0.0f / 0.0f); assertFunction("REAL'-17.71' / REAL'-1.0'", REAL, -17.71f / -1.0f); }
@Override public void setCallbackExecutor(final ExecutorService callbackExecutor) { this.callbackExecutor = callbackExecutor; }
@Test public void testSetCallbackExecutor() { ExecutorService customized = Executors.newCachedThreadPool(); remotingClient.setCallbackExecutor(customized); assertThat(remotingClient.getCallbackExecutor()).isEqualTo(customized); }
@Override public SpanCustomizer tag(String key, String value) { return tracer.currentSpanCustomizer().tag(key, value); }
@Test void tag() { span.start(); try (SpanInScope scope = tracing.tracer().withSpanInScope(span)) { spanCustomizer.tag("foo", "bar"); } span.flush(); assertThat(spans).flatExtracting(s -> s.tags().entrySet()) .containsExactly(entry("foo", "bar")); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowTransactionRuleStatement sqlStatement, final ContextManager contextManager) { return Collections.singleton(new LocalDataQueryResultRow(rule.getDefaultType().name(), rule.getProviderType(), rule.getProps())); }
@Test void assertExecuteWithXA() throws SQLException { TransactionRule rule = new TransactionRule(createTransactionRuleConfiguration(TransactionType.XA.name(), "Atomikos", PropertiesBuilder.build(new Property("host", "127.0.0.1"), new Property("databaseName", "jbossts"))), Collections.emptyMap()); ContextManager contextManager = mockContextManager(rule); engine = setUp(contextManager); engine.executeQuery(); Collection<LocalDataQueryResultRow> actual = engine.getRows(); assertThat(actual.size(), is(1)); Iterator<LocalDataQueryResultRow> iterator = actual.iterator(); LocalDataQueryResultRow row = iterator.next(); assertThat(row.getCell(1), is(TransactionType.XA.name())); assertThat(row.getCell(2), is("Atomikos")); String props = (String) row.getCell(3); assertTrue(props.contains("\"databaseName\":\"jbossts\"")); assertTrue(props.contains("\"host\":\"127.0.0.1\"")); }
public static Field p(String fieldName) { return SELECT_ALL_FROM_SOURCES_ALL.where(fieldName); }
@Test void contains_uri_with_annotation() { String q = Q.p("f1").containsUri(A.a("key", "value"), "https://test.uri") .build(); assertEquals(q, "yql=select * from sources * where f1 contains ([{\"key\":\"value\"}]uri(\"https://test.uri\"))"); }
public static void executeIgnore(Runnable runnable) { DataPermission dataPermission = getDisableDataPermissionDisable(); DataPermissionContextHolder.add(dataPermission); try { // 执行 runnable runnable.run(); } finally { DataPermissionContextHolder.remove(); } }
@Test public void testExecuteIgnore() { DataPermissionUtils.executeIgnore(() -> assertFalse(DataPermissionContextHolder.get().enable())); }
public static JsonMapper validateJsonMapper(JsonMapper jsonMapper) { try { final String serializedJob = jsonMapper.serialize(getJobForTesting()); testTimeFields(serializedJob); testUseFieldsNotMethods(serializedJob); testUsePolymorphism(serializedJob); testCanConvertBackToJob(jsonMapper, serializedJob); return jsonMapper; } catch (Exception e) { throw new IllegalArgumentException("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.", e); } }
@Test void testInvalidGsonJsonMapper() { assertThatThrownBy(() -> validateJsonMapper(new InvalidGsonJsonMapper(new GsonBuilder().create() )) ) .isInstanceOf(IllegalArgumentException.class) .hasMessage("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way."); }
public static Expression generateFilterExpression(SearchArgument sarg) { return translate(sarg.getExpression(), sarg.getLeaves()); }
@Test public void testStringType() { SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); SearchArgument arg = builder.startAnd().equals("string", PredicateLeaf.Type.STRING, "Joe").end().build(); UnboundPredicate expected = Expressions.equal("string", "Joe"); UnboundPredicate actual = (UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg); assertPredicatesMatch(expected, actual); }
@PostMapping("/authorize") @Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用") @Parameters({ @Parameter(name = "response_type", required = true, description = "响应类型", example = "code"), @Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"), @Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数 @Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"), @Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"), @Parameter(name = "state", example = "1") }) public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType, @RequestParam("client_id") String clientId, @RequestParam(value = "scope", required = false) String scope, @RequestParam("redirect_uri") String redirectUri, @RequestParam(value = "auto_approve") Boolean autoApprove, @RequestParam(value = "state", required = false) String state) { @SuppressWarnings("unchecked") Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class); scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap()); // 0. 校验用户已经登录。通过 Spring Security 实现 // 1.1 校验 responseType 是否满足 code 或者 token 值 OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType); // 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内 OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null, grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri); // 2.1 假设 approved 为 null,说明是场景一 if (Boolean.TRUE.equals(autoApprove)) { // 如果无法自动授权通过,则返回空 url,前端不进行跳转 if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) { return success(null); } } else { // 2.2 假设 approved 非 null,说明是场景二 // 如果计算后不通过,则跳转一个错误链接 if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) { return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state, "access_denied", "User denied access")); } } // 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向 List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue); if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) { return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state)); } // 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向 return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state)); }
@Test public void testApproveOrDeny_grantTypeError() { // 调用,并断言 assertServiceException(() -> oauth2OpenController.approveOrDeny(randomString(), null, null, null, null, null), new ErrorCode(400, "response_type 参数值只允许 code 和 token")); }
public static Caffeine<Object, Object> from(CaffeineSpec spec) { Caffeine<Object, Object> builder = spec.toBuilder(); builder.strictParsing = false; return builder; }
@Test public void fromSpec() { assertThat(Caffeine.from(CaffeineSpec.parse(""))).isNotNull(); }
@CheckReturnValue public static <T> Observable<T> create( @NonNull T data, @NonNull Activity activity, @Nullable CharSequence message, @LayoutRes int progressLayoutId) { Dialog dialog = new Dialog(activity, R.style.ProgressDialog); dialog.setContentView(progressLayoutId); if (!TextUtils.isEmpty(message)) { TextView messageView = dialog.findViewById(R.id.progress_dialog_message_text_view); messageView.setVisibility(View.VISIBLE); messageView.setText(message); } dialog.setTitle(null); dialog.setCancelable(false); dialog.setOwnerActivity(activity); dialog.show(); return Observable.using( () -> dialog, (Function<Dialog, ObservableSource<T>>) d1 -> Observable.just(data), Dialog::dismiss, true); }
@Test public void testLifecycleWithError() throws Exception { ActivityController<FragmentActivity> controller = Robolectric.buildActivity(FragmentActivity.class); controller.setup(); Data data = Mockito.mock(Data.class); Mockito.doThrow(new RuntimeException()).when(data).call(anyInt()); final Data errorData = Mockito.mock(Data.class); Assert.assertNull(ShadowDialog.getLatestDialog()); final Disposable disposable = RxProgressDialog.create(data, controller.get(), R.layout.progress_window_for_test) .map( d -> { Assert.assertNotNull(ShadowDialog.getLatestDialog()); Assert.assertTrue(ShadowDialog.getLatestDialog().isShowing()); d.call(1); return d; }) .subscribe(d -> d.call(2), throwable -> errorData.call(0)); Mockito.verify(errorData).call(0); Mockito.verifyNoMoreInteractions(errorData); Mockito.verify(data).call(1); Mockito.verifyNoMoreInteractions(data); Assert.assertNotNull(ShadowDialog.getLatestDialog()); Assert.assertFalse(ShadowDialog.getLatestDialog().isShowing()); disposable.dispose(); Assert.assertNotNull(ShadowDialog.getLatestDialog()); Assert.assertFalse(ShadowDialog.getLatestDialog().isShowing()); }
public static String processNamespaceParameter(String tenant) { if (StringUtils.isBlank(tenant) || NAMESPACE_PUBLIC_KEY.equalsIgnoreCase(tenant) || NAMESPACE_NULL_KEY.equalsIgnoreCase(tenant)) { return getNamespaceDefaultId(); } return tenant.trim(); }
@Test void testProcessTenantParameter() { String strPublic = "public"; String strEmpty = ""; assertEquals(strEmpty, NamespaceUtil.processNamespaceParameter(strPublic)); String strNull = "null"; assertEquals(strEmpty, NamespaceUtil.processNamespaceParameter(strNull)); assertEquals(strEmpty, NamespaceUtil.processNamespaceParameter(strEmpty)); assertEquals(strEmpty, NamespaceUtil.processNamespaceParameter(null)); String strAbc = "abc"; assertEquals(strAbc, NamespaceUtil.processNamespaceParameter(strAbc)); String strdef123 = "def123"; assertEquals(strdef123, NamespaceUtil.processNamespaceParameter(strdef123)); String strAbcHasSpace = " abc "; assertEquals(strAbc, NamespaceUtil.processNamespaceParameter(strAbcHasSpace)); }
@Udf(description = "Converts an INT value in radians to a value in degrees") public Double degrees( @UdfParameter( value = "value", description = "The value in radians to convert to degrees." ) final Integer value ) { return degrees(value == null ? null : value.doubleValue()); }
@Test public void shouldHandlePositive() { assertThat(udf.degrees(Math.PI), closeTo(180.0, 0.000000000000001)); assertThat(udf.degrees(2 * Math.PI), closeTo(360.0, 0.000000000000001)); assertThat(udf.degrees(1.2345), closeTo(70.73163980890013, 0.000000000000001)); assertThat(udf.degrees(2), closeTo(114.59155902616465, 0.000000000000001)); assertThat(udf.degrees(2L), closeTo(114.59155902616465, 0.000000000000001)); }
@Override public void removeIngress(String uid) { checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_INGRESS_UID); synchronized (this) { if (isIngressInUse(uid)) { final String error = String.format(MSG_INGRESS, uid, ERR_IN_USE); throw new IllegalStateException(error); } Ingress ingress = k8sIngressStore.removeIngress(uid); if (ingress != null) { log.info(String.format(MSG_INGRESS, ingress.getMetadata().getName(), MSG_REMOVED)); } } }
@Test(expected = IllegalArgumentException.class) public void testRemoveIngressWithNull() { target.removeIngress(null); }
public static AggregateOperation1<CharSequence, StringBuilder, String> concatenating() { return AggregateOperation .withCreate(StringBuilder::new) .<CharSequence>andAccumulate(StringBuilder::append) .andCombine(StringBuilder::append) .andExportFinish(StringBuilder::toString); }
@Test public void when_concatenatingEmptyItems_withDelimiterPrefixSuffix() { validateOpWithoutDeduct( concatenating(",", "(", ")"), StringBuilder::toString, "A", "", "(A", "(A", "(A)" ); validateOpWithoutDeduct( concatenating(",", "(", ")"), StringBuilder::toString, "", "B", "(", "(B", "(B)" ); validateOpWithoutDeduct( concatenating(",", "(", ")"), StringBuilder::toString, "", "", "(", "(", "()" ); }
public static SonarEdition loadEdition(System2 system) { URL url = system.getResource(EDITION_FILE_PATH); if (url == null) { return SonarEdition.COMMUNITY; } try (Scanner scanner = new Scanner(url.openStream(), StandardCharsets.UTF_8)) { String editionInFile = scanner.nextLine(); return parseEdition(editionInFile); } catch (IOException e) { throw new IllegalStateException(format(CAN_NOT_LOAD_FROM_CLASSPATH, EDITION_FILE_PATH), e); } }
@Test void load_edition_defaults_to_community_if_file_not_found() throws MalformedURLException { when(system.getResource(anyString())).thenReturn(new File("target/unknown").toURI().toURL()); SonarEdition edition = MetadataLoader.loadEdition(System2.INSTANCE); assertThat(edition).isEqualTo(SonarEdition.COMMUNITY); }
public static <K, V> StateSerdes<K, V> withBuiltinTypes( final String topic, final Class<K> keyClass, final Class<V> valueClass) { return new StateSerdes<>(topic, Serdes.serdeFrom(keyClass), Serdes.serdeFrom(valueClass)); }
@Test public void shouldThrowIfTopicNameIsNullForBuiltinTypes() { assertThrows(NullPointerException.class, () -> StateSerdes.withBuiltinTypes(null, byte[].class, byte[].class)); }
public final T fromJson(Reader in) throws IOException { JsonReader reader = new JsonReader(in); return read(reader); }
@Test public void testFromJson_String_TrailingData() throws IOException { assertThat(adapter.fromJson("\"a\"1")).isEqualTo("a"); }
public void addAppender(Appender<E> newAppender) { if (newAppender == null) { throw new IllegalArgumentException("Null argument disallowed"); } appenderList.addIfAbsent(newAppender); }
@Test public void testAddAppender() throws Exception { TestEvent event = new TestEvent(); NOPAppender<TestEvent> ta = new NOPAppender<TestEvent>(); ta.start(); aai.addAppender(ta); ta = new NOPAppender<TestEvent>(); ta.setName("test"); ta.start(); aai.addAppender(ta); int size = aai.appendLoopOnAppenders(event); assertTrue("Incorrect number of appenders", size == 2); }
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) { meta = (ScriptValuesMetaMod) smi; data = (ScriptValuesModData) sdi; if ( super.init( smi, sdi ) ) { // Add init code here. // Get the actual Scripts from our MetaData jsScripts = meta.getJSScripts(); for ( int j = 0; j < jsScripts.length; j++ ) { switch ( jsScripts[ j ].getScriptType() ) { case ScriptValuesScript.TRANSFORM_SCRIPT: strTransformScript = jsScripts[ j ].getScript(); break; case ScriptValuesScript.START_SCRIPT: strStartScript = jsScripts[ j ].getScript(); break; case ScriptValuesScript.END_SCRIPT: strEndScript = jsScripts[ j ].getScript(); break; default: break; } } return true; } return false; }
@Test public void variableIsSetInScopeOfStep() throws Exception { ScriptValuesMod step = StepMockUtil.getStep( ScriptValuesMod.class, ScriptValuesMetaMod.class, "test" ); RowMeta input = new RowMeta(); input.addValueMeta( new ValueMetaString( "str" ) ); step.setInputRowMeta( input ); step = spy( step ); doReturn( new Object[] { "" } ).when( step ).getRow(); ScriptValuesMetaMod meta = new ScriptValuesMetaMod(); meta.setCompatible( false ); meta.allocate( 1 ); meta.setFieldname( new String[] { "str" } ); meta.setType( new int[] { ValueMetaInterface.TYPE_STRING } ); meta.setReplace( new boolean[] { true } ); meta.setJSScripts( new ScriptValuesScript[] { new ScriptValuesScript( ScriptValuesScript.TRANSFORM_SCRIPT, "script", "setVariable('temp', 'pass', 'r');\nstr = getVariable('temp', 'fail');" ) } ); ScriptValuesModData data = new ScriptValuesModData(); step.init( meta, data ); Object[] expectedRow = { "pass" }; Object[] row = TransTestingUtil.execute( step, meta, data, 1, false ).get( 0 ); TransTestingUtil.assertResult( expectedRow, row ); }
@Override public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) { if (input.isEmpty()) return List.of(); List<Token> tokens = textToTokens(input, analyzerFactory.getAnalyzer(language, stemMode, removeAccents)); log.log(Level.FINEST, () -> "Tokenized '" + language + "' text='" + input + "' into: n=" + tokens.size() + ", tokens=" + tokens); return tokens; }
@Test public void testOptionalPath() { String languageCode = Language.ENGLISH.languageCode(); LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder() .analysis( Map.of(languageCode, new LuceneAnalysisConfig.Analysis.Builder().tokenFilters(List.of( new LuceneAnalysisConfig .Analysis .TokenFilters .Builder() .name("englishMinimalStem")))) ).build(); LuceneLinguistics linguistics = new LuceneLinguistics(enConfig, new ComponentRegistry<>()); Iterable<Token> tokens = linguistics .getTokenizer() .tokenize("Dogs and Cats", Language.ENGLISH, StemMode.ALL, false); assertEquals(List.of("Dog", "and", "Cat"), tokenStrings(tokens)); }
public static void createTopics( Logger log, String bootstrapServers, Map<String, String> commonClientConf, Map<String, String> adminClientConf, Map<String, NewTopic> topics, boolean failOnExisting) throws Throwable { // this method wraps the call to createTopics() that takes admin client, so that we can // unit test the functionality with MockAdminClient. The exception is caught and // re-thrown so that admin client is closed when the method returns. try (Admin adminClient = createAdminClient(bootstrapServers, commonClientConf, adminClientConf)) { createTopics(log, adminClient, topics, failOnExisting); } catch (Exception e) { log.warn("Failed to create or verify topics {}", topics, e); throw e; } }
@Test public void testCreateOneTopic() throws Throwable { Map<String, NewTopic> newTopics = Collections.singletonMap(TEST_TOPIC, NEW_TEST_TOPIC); WorkerUtils.createTopics(log, adminClient, newTopics, true); assertEquals(Collections.singleton(TEST_TOPIC), adminClient.listTopics().names().get()); assertEquals( new TopicDescription( TEST_TOPIC, false, Collections.singletonList( new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList(), Collections.emptyList(), Collections.emptyList()))), adminClient.describeTopics( Collections.singleton(TEST_TOPIC)).topicNameValues().get(TEST_TOPIC).get() ); }
@Bean public PluginDataHandler tarsPluginDataHandler() { return new TarsPluginDataHandler(); }
@Test public void testTarsPluginDataHandler() { applicationContextRunner.run(context -> { PluginDataHandler handler = context.getBean("tarsPluginDataHandler", PluginDataHandler.class); assertNotNull(handler); } ); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldBeAbleToExplicitlyUnlockAPipeline() throws Exception { String pipelineXmlPartial = (""" <pipeline name="pipeline" lockBehavior="%s"> <materials> <hg url="/hgrepo"/> </materials> <stage name="mingle"> <jobs> <job name="functional"> <artifacts> <log src="artifact1.xml" dest="cruise-output" /> </artifacts> </job> </jobs> </stage> </pipeline> """).formatted(PipelineConfig.LOCK_VALUE_NONE); PipelineConfig pipeline = xmlLoader.fromXmlPartial(pipelineXmlPartial, PipelineConfig.class); assertThat(pipeline.hasExplicitLock()).isTrue(); assertThat(pipeline.explicitLock()).isFalse(); }
@Override public String getConfig(final String dataId) { try { return configService.getConfig(dataId, NacosPathConstants.GROUP, NacosPathConstants.DEFAULT_TIME_OUT); } catch (NacosException e) { LOG.error("Get data from nacos error.", e); throw new ShenyuException(e.getMessage()); } }
@Test public void testOnRuleChanged() throws NacosException { when(configService.getConfig(anyString(), anyString(), anyLong())).thenReturn(null); RuleData ruleData = RuleData.builder() .id(MOCK_ID) .name(MOCK_NAME) .pluginName(MOCK_PLUGIN_NAME) .selectorId(MOCK_SELECTOR_ID) .build(); nacosDataChangedListener.onRuleChanged(ImmutableList.of(ruleData), DataEventTypeEnum.DELETE); nacosDataChangedListener.onRuleChanged(ImmutableList.of(ruleData), DataEventTypeEnum.REFRESH); nacosDataChangedListener.onRuleChanged(ImmutableList.of(ruleData), DataEventTypeEnum.MYSELF); nacosDataChangedListener.onRuleChanged(ImmutableList.of(ruleData), DataEventTypeEnum.CREATE); verify(configService, times(6)).publishConfig(any(String.class), any(String.class), any(String.class), any(String.class)); }
@Udf(description = "Returns the sign of an INT value, denoted by 1, 0 or -1.") public Integer sign( @UdfParameter( value = "value", description = "The value to get the sign of." ) final Integer value ) { return value == null ? null : Integer.signum(value); }
@Test public void shouldHandleZero() { assertThat(udf.sign(0.0), is(0)); assertThat(udf.sign(0), is(0)); }
public ConfigCenterBuilder protocol(String protocol) { this.protocol = protocol; return getThis(); }
@Test void protocol() { ConfigCenterBuilder builder = ConfigCenterBuilder.newBuilder(); builder.protocol("protocol"); Assertions.assertEquals("protocol", builder.build().getProtocol()); }
@VisibleForTesting List<LogFile> applyBundleSizeLogFileLimit(List<LogFile> allLogs) { final ImmutableList.Builder<LogFile> truncatedLogFileList = ImmutableList.builder(); // Always collect the in-memory log and the newest on-disk log file // Keep collecting until we pass LOG_COLLECTION_SIZE_LIMIT final AtomicBoolean oneFileAdded = new AtomicBoolean(false); final AtomicLong collectedSize = new AtomicLong(); allLogs.stream().sorted(Comparator.comparing(LogFile::lastModified).reversed()).forEach(logFile -> { if (logFile.id().equals(IN_MEMORY_LOGFILE_ID)) { truncatedLogFileList.add(logFile); } else if (!oneFileAdded.get() || collectedSize.get() < LOG_COLLECTION_SIZE_LIMIT) { truncatedLogFileList.add(logFile); oneFileAdded.set(true); collectedSize.addAndGet(logFile.size()); } }); return truncatedLogFileList.build(); }
@Test public void testLogSizeLimiterWithEnoughSpaceLeft() { final List<LogFile> fullLoglist = List.of( new LogFile("memory", "server.mem.log", 500, Instant.now()), new LogFile("0", "server.log", 500, Instant.now()), new LogFile("1", "server.log.1.gz", 500, Instant.now()), new LogFile("2", "server.log.2.gz", 500, Instant.now()) ); final List<LogFile> shrinkedList = supportBundleService.applyBundleSizeLogFileLimit(fullLoglist); assertThat(shrinkedList).containsExactlyInAnyOrderElementsOf(fullLoglist); }
public void statusUpdate(final String asgName, final ASGStatus newStatus) { long expiryTime = System.currentTimeMillis() + maxProcessingDelayMs; nonBatchingDispatcher.process( asgName, new AsgReplicationTask(targetHost, Action.StatusUpdate, asgName, newStatus) { public EurekaHttpResponse<?> execute() { return replicationClient.statusUpdate(asgName, newStatus); } }, expiryTime ); }
@Test public void testAsgStatusUpdate() throws Throwable { createPeerEurekaNode().statusUpdate(instanceInfo.getASGName(), ASGStatus.DISABLED); Object newAsgStatus = expectRequestType(RequestType.AsgStatusUpdate); assertThat(newAsgStatus, is(equalTo((Object) ASGStatus.DISABLED))); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "overridden generic resource methods") public void testTicket3694() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(Ticket3694ResourceExtendedType.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /foo:\n" + " post:\n" + " tags:\n" + " - Foo\n" + " summary: Foo List in Interface\n" + " operationId: foo\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*': {}\n" + " /bar:\n" + " post:\n" + " operationId: bar\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " type: string\n" + " /another:\n" + " post:\n" + " operationId: another\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*': {}"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); reader = new Reader(new OpenAPI()); openAPI = reader.read(Ticket3694Resource.class); yaml = "openapi: 3.0.1\n" + "paths:\n" + " /foo:\n" + " post:\n" + " tags:\n" + " - Foo\n" + " summary: Foo List in Interface\n" + " operationId: foo\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*': {}\n" + " /bar:\n" + " post:\n" + " operationId: bar\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " type: string\n" + " /another:\n" + " post:\n" + " operationId: another\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*': {}"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); reader = new Reader(new OpenAPI()); openAPI = reader.read(Ticket3694ResourceSimple.class); yaml = "openapi: 3.0.1\n" + "paths:\n" + " /bar:\n" + " post:\n" + " operationId: bar\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " type: string"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); reader = new Reader(new OpenAPI()); openAPI = reader.read(Ticket3694ResourceSimpleSameReturn.class); yaml = "openapi: 3.0.1\n" + "paths:\n" + " /bar:\n" + " post:\n" + " operationId: bar\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " type: array\n" + " items:\n" + " type: string\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*': {}"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
@SqlNullable @Description("Returns TRUE if the LineString or Multi-LineString's start and end points are coincident") @ScalarFunction("ST_IsClosed") @SqlType(BOOLEAN) public static Boolean stIsClosed(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { Geometry geometry = deserialize(input); validateType("ST_IsClosed", geometry, EnumSet.of(LINE_STRING, MULTI_LINE_STRING)); if (geometry instanceof LineString) { return ((LineString) geometry).isClosed(); } else if (geometry instanceof MultiLineString) { return ((MultiLineString) geometry).isClosed(); } // This would be handled in validateType, but for completeness. throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("Invalid type for isClosed: %s", geometry.getGeometryType())); }
@Test public void testSTIsClosed() { assertFunction("ST_IsClosed(ST_GeometryFromText('LINESTRING (1 1, 2 2, 1 3, 1 1)'))", BOOLEAN, true); assertFunction("ST_IsClosed(ST_GeometryFromText('LINESTRING (1 1, 2 2, 1 3)'))", BOOLEAN, false); assertFunction("ST_IsClosed(ST_GeometryFromText('MULTILINESTRING ((1 1, 2 2, 1 3, 1 1), (4 4, 5 5))'))", BOOLEAN, false); assertFunction("ST_IsClosed(ST_GeometryFromText('MULTILINESTRING ((1 1, 2 2, 1 3, 1 1), (4 4, 5 4, 5 5, 4 5, 4 4))'))", BOOLEAN, true); assertInvalidFunction("ST_IsClosed(ST_GeometryFromText('POLYGON ((1 1, 1 4, 4 4, 4 1, 1 1))'))", "ST_IsClosed only applies to LINE_STRING or MULTI_LINE_STRING. Input type is: POLYGON"); }
static boolean allowDestinationRange(String prev, String next) { if (prev.isEmpty() || next.isEmpty()) { return false; } int prevCode = prev.codePointAt(0); int nextCode = next.codePointAt(0); // Allow the new destination string if: // 1. It is sequential with the previous one and differs only in the low-order byte // 2. The previous string does not contain any UTF-16 surrogates return allowCodeRange(prevCode, nextCode) && prev.codePointCount(0, prev.length()) == 1; }
@Test void testAllowDestinationRange() { // Denied (bogus) assertFalse(ToUnicodeWriter.allowDestinationRange("", "")); assertFalse(ToUnicodeWriter.allowDestinationRange("0", "")); assertFalse(ToUnicodeWriter.allowDestinationRange("", "0")); // Denied (non sequential) assertFalse(ToUnicodeWriter.allowDestinationRange("0", "A")); assertFalse(ToUnicodeWriter.allowDestinationRange("A", "a")); // Denied (overflow) assertFalse(ToUnicodeWriter.allowDestinationRange("ÿ", "Ā")); // Allowed (sequential w/o surrogate) assertTrue(ToUnicodeWriter.allowDestinationRange(" ", "!")); assertTrue(ToUnicodeWriter.allowDestinationRange("(", ")")); assertTrue(ToUnicodeWriter.allowDestinationRange("0", "1")); assertTrue(ToUnicodeWriter.allowDestinationRange("a", "b")); assertTrue(ToUnicodeWriter.allowDestinationRange("A", "B")); assertTrue(ToUnicodeWriter.allowDestinationRange("À", "Á")); assertTrue(ToUnicodeWriter.allowDestinationRange("þ", "ÿ")); // Denied (ligatures) assertFalse(ToUnicodeWriter.allowDestinationRange("ff", "fi")); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) { int laneCount = 1; if (way.hasTag("lanes")) { String noLanes = way.getTag("lanes"); String[] noLanesTok = noLanes.split(";|\\."); if (noLanesTok.length > 0) { try { int noLanesInt = Integer.parseInt(noLanesTok[0]); // there was a proposal with negative lanes but I cannot find it if (noLanesInt < 0) laneCount = 1; else if (noLanesInt > 6) laneCount = 6; else laneCount = noLanesInt; } catch (NumberFormatException ex) { // ignore if no number } } } lanesEnc.setInt(false, edgeId, edgeIntAccess, laneCount); }
@Test void basic() { ReaderWay readerWay = new ReaderWay(1); EdgeIntAccess edgeIntAccess = new ArrayEdgeIntAccess(1); int edgeId = 0; readerWay.setTag("lanes", "4"); parser.handleWayTags(edgeId, edgeIntAccess, readerWay, relFlags); Assertions.assertEquals(4, lanesEnc.getInt(false, edgeId, edgeIntAccess)); }
public FEELFnResult<String> invoke(@ParameterName("string") String string) { if ( string == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null")); } else { return FEELFnResult.ofResult( string.toUpperCase() ); } }
@Test void invokeNull() { FunctionTestUtil.assertResultError(stringUpperCaseFunction.invoke(null), InvalidParametersEvent.class); }
public final <KIn, VIn, KOut, VOut> void addProcessor(final String name, final ProcessorSupplier<KIn, VIn, KOut, VOut> supplier, final String... predecessorNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); Objects.requireNonNull(predecessorNames, "predecessor names must not be null"); ApiUtils.checkSupplier(supplier); if (nodeFactories.containsKey(name)) { throw new TopologyException("Processor " + name + " is already added."); } if (predecessorNames.length == 0) { throw new TopologyException("Processor " + name + " must have at least one parent"); } for (final String predecessor : predecessorNames) { Objects.requireNonNull(predecessor, "predecessor name must not be null"); if (predecessor.equals(name)) { throw new TopologyException("Processor " + name + " cannot be a predecessor of itself."); } if (!nodeFactories.containsKey(predecessor)) { throw new TopologyException("Predecessor processor " + predecessor + " is not added yet for " + name); } } nodeFactories.put(name, new ProcessorNodeFactory<>(name, predecessorNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, predecessorNames); nodeGroups = null; }
@Test public void shouldNotAllowNullNameWhenAddingProcessor() { assertThrows( NullPointerException.class, () -> builder.addProcessor( null, (ProcessorSupplier<Object, Object, Object, Object>) () -> null ) ); }
public String create(final String secret, final String bucket, String region, final String key, final String method, final long expiry) { if(StringUtils.isBlank(region)) { // Only for AWS switch(session.getSignatureVersion()) { case AWS4HMACSHA256: // Region is required for AWS4-HMAC-SHA256 signature region = S3LocationFeature.DEFAULT_REGION.getIdentifier(); } } final Host bookmark = session.getHost(); return new RestS3Service(new AWSCredentials(StringUtils.strip(bookmark.getCredentials().getUsername()), StringUtils.strip(secret))) { @Override public String getEndpoint() { if(S3Session.isAwsHostname(bookmark.getHostname())) { return bookmark.getProtocol().getDefaultHostname(); } return bookmark.getHostname(); } @Override protected void initializeProxy(final HttpClientBuilder httpClientBuilder) { // } }.createSignedUrlUsingSignatureVersion( session.getSignatureVersion().toString(), region, method, bucket, key, null, null, expiry / 1000, false, true, new HostPreferences(bookmark).getBoolean("s3.bucket.virtualhost.disable")); }
@Test public void testDefaultHostnameWithProfile() { final Calendar expiry = Calendar.getInstance(TimeZone.getTimeZone("UTC")); expiry.add(Calendar.MILLISECOND, (int) TimeUnit.DAYS.toMillis(7)); final Host host = new Host(new S3Protocol() { @Override public String getDefaultHostname() { return "h"; } }, "h"); final S3Session session = new S3Session(host); final String url = new S3PresignedUrlProvider(session).create(PROPERTIES.get("s3.secret"), "test-us-east-1-cyberduck", null, "f", "GET", expiry.getTimeInMillis()); assertNotNull(url); assertEquals("test-us-east-1-cyberduck.h", URI.create(url).getHost()); }
public ContentInfo verify(ContentInfo signedMessage, Date date) { final SignedData signedData = SignedData.getInstance(signedMessage.getContent()); final X509Certificate cert = certificate(signedData); certificateVerifier.verify(cert, date); final X500Name name = X500Name.getInstance(cert.getIssuerX500Principal().getEncoded()); try { final CMSSignedData cms = new CMSSignedData(signedMessage); cms.verifySignatures(signerId -> { if (!name.equals(signerId.getIssuer())) { throw new VerificationException("Issuer does not match certificate"); } if (!cert.getSerialNumber().equals(signerId.getSerialNumber())) { throw new VerificationException("Serial number does not match certificate"); } return new JcaSignerInfoVerifierBuilder(digestProvider).setProvider(bcProvider).build(cert); }); } catch (CMSException e) { throw new VerificationException("Could not verify CMS", e); } return signedData.getEncapContentInfo(); }
@Test public void verifyValidPcaRdwCms() throws Exception { final ContentInfo signedMessage = ContentInfo.getInstance(fixture("pca-rdw")); final ContentInfo message = new CmsVerifier(new CertificateVerifier.None()).verify(signedMessage); assertEquals("0.4.0.127.0.7.3.2.1", message.getContentType().getId()); assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", Hex.toHexString( DigestUtils.digest("SHA1").digest(((ASN1OctetString) message.getContent()).getOctets()) )); }
public QueryObjectBundle rewriteQuery(@Language("SQL") String query, QueryConfiguration queryConfiguration, ClusterType clusterType) { return rewriteQuery(query, queryConfiguration, clusterType, false); }
@Test public void testRewriteUnknown() { QueryBundle queryBundle = getQueryRewriter().rewriteQuery("SELECT null, null unknown", CONFIGURATION, CONTROL); assertCreateTableAs(queryBundle.getQuery(), "SELECT\n" + " CAST(null AS bigint)\n" + ", CAST(null AS bigint) unknown"); }
public static Map<String, String[]> getQueryMap(String query) { Map<String, String[]> map = new HashMap<>(); String[] params = query.split(PARAM_CONCATENATE); for (String param : params) { String[] paramSplit = param.split("="); if (paramSplit.length == 0) { continue; // We found no key-/value-pair, so continue on the next param } String name = decodeQuery(paramSplit[0]); // hack for SOAP request (generally) if (name.trim().startsWith("<?")) { // $NON-NLS-1$ map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$ return map; } // the post payload is not key=value if((param.startsWith("=") && paramSplit.length == 1) || paramSplit.length > 2) { map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$ return map; } String value = ""; if(paramSplit.length>1) { value = decodeQuery(paramSplit[1]); } String[] known = map.get(name); if(known == null) { known = new String[] {value}; } else { String[] tmp = new String[known.length+1]; tmp[tmp.length-1] = value; System.arraycopy(known, 0, tmp, 0, known.length); known = tmp; } map.put(name, known); } return map; }
@Test void testGetQueryMapValueContainingAmpersand() { // see https://bz.apache.org/bugzilla/show_bug.cgi?id=58413 String query = "login=toto1&pwd=Welcome%261"; Map<String, String[]> params = RequestViewHTTP.getQueryMap(query); Assertions.assertNotNull(params); Assertions.assertEquals(2, params.size()); String[] param1 = params.get("login"); Assertions.assertNotNull(param1); Assertions.assertEquals(1, param1.length); Assertions.assertEquals("toto1", param1[0]); String[] param2 = params.get("pwd"); Assertions.assertNotNull(param2); Assertions.assertEquals(1, param2.length); Assertions.assertEquals("Welcome&1", param2[0]); }
public Stream<Hit> stream() { if (nPostingLists == 0) { return Stream.empty(); } return StreamSupport.stream(new PredicateSpliterator(), false); }
@Test void requireThatNotIsSupported_NoMatch() { PredicateSearch search = createPredicateSearch( new byte[]{1}, postingList(SubqueryBitmap.ALL_SUBQUERIES, entry(0, 0x00010001)), postingList(SubqueryBitmap.ALL_SUBQUERIES, entry(0, 0x00010000, 0x00ff0001))); assertEquals(List.of().toString(), search.stream().toList().toString()); }
Set<TopicIdPartition> bufferedPartitions() { lock.lock(); try { final Set<TopicIdPartition> partitions = new HashSet<>(); if (nextInLineFetch != null && !nextInLineFetch.isConsumed()) { partitions.add(nextInLineFetch.partition); } completedFetches.forEach(cf -> partitions.add(cf.partition)); return partitions; } finally { lock.unlock(); } }
@Test public void testBufferedPartitions() { try (ShareFetchBuffer fetchBuffer = new ShareFetchBuffer(logContext)) { fetchBuffer.setNextInLineFetch(completedFetch(topicAPartition0)); fetchBuffer.add(completedFetch(topicAPartition1)); fetchBuffer.add(completedFetch(topicAPartition2)); assertEquals(allPartitions, fetchBuffer.bufferedPartitions()); fetchBuffer.setNextInLineFetch(null); assertEquals(partitions(topicAPartition1, topicAPartition2), fetchBuffer.bufferedPartitions()); fetchBuffer.poll(); assertEquals(partitions(topicAPartition2), fetchBuffer.bufferedPartitions()); fetchBuffer.poll(); assertEquals(partitions(), fetchBuffer.bufferedPartitions()); } }
@Deprecated public static MessageType convert(StructType struct, FieldProjectionFilter filter) { return convert(struct, filter, true, new Configuration()); }
@Test public void testConvertLogicalI64Type() { LogicalTypeAnnotation timestampLogicalType = LogicalTypeAnnotation.timestampType(true, TimeUnit.MILLIS); String fieldName = "logicalI64Type"; Short fieldId = 0; ThriftType timestampI64Type = new ThriftType.I64Type(); timestampI64Type.setLogicalTypeAnnotation(timestampLogicalType); StructType thriftStruct = buildOneFieldThriftStructType(fieldName, fieldId, timestampI64Type); MessageType actual = ThriftSchemaConvertVisitor.convert(thriftStruct, FieldProjectionFilter.ALL_COLUMNS); Type expectedParquetField = Types.primitive(PrimitiveTypeName.INT64, Repetition.REQUIRED) .as(timestampLogicalType) .named(fieldName) .withId(fieldId); MessageType expected = buildOneFieldParquetMessage(expectedParquetField); assertEquals(expected, actual); }
public static Stream<Path> iterPaths(Path path) { Deque<Path> parents = new ArrayDeque<>(path.getNameCount()); // Push parents to the front of the stack, so the "root" is at the front Path next = path; while (next != null) { parents.addFirst(next); next = next.getParent(); } // now just iterate straight over them return ImmutableList.copyOf(parents).stream(); }
@Test void testJustFile() { assertEquals( paths("a"), MorePaths.iterPaths(Paths.get("a")).collect(toList()) ); }
@ApiOperation(value = "Delete a model", tags = { "Models" }, code = 204) @ApiResponses(value = { @ApiResponse(code = 204, message = "Indicates the model was found and has been deleted. Response-body is intentionally empty."), @ApiResponse(code = 404, message = "Indicates the requested model was not found.") }) @DeleteMapping("/repository/models/{modelId}") @ResponseStatus(HttpStatus.NO_CONTENT) public void deleteModel(@ApiParam(name = "modelId") @PathVariable String modelId) { Model model = getModelFromRequest(modelId); repositoryService.deleteModel(model.getId()); }
@Test @Deployment(resources = { "org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml" }) public void testUpdateModelNoFields() throws Exception { Model model = null; try { Calendar now = Calendar.getInstance(); now.set(Calendar.MILLISECOND, 0); processEngineConfiguration.getClock().setCurrentTime(now.getTime()); model = repositoryService.newModel(); model.setCategory("Model category"); model.setKey("Model key"); model.setMetaInfo("Model metainfo"); model.setName("Model name"); model.setVersion(2); model.setDeploymentId(deploymentId); repositoryService.saveModel(model); // Use empty request-node, nothing should be changed after update ObjectNode requestNode = objectMapper.createObjectNode(); HttpPut httpPut = new HttpPut(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_MODEL, model.getId())); httpPut.setEntity(new StringEntity(requestNode.toString())); CloseableHttpResponse response = executeRequest(httpPut, HttpStatus.SC_OK); JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent()); closeResponse(response); assertThat(responseNode).isNotNull(); assertThatJson(responseNode) .when(Option.IGNORING_EXTRA_FIELDS) .isEqualTo("{" + "name: 'Model name'," + "key: 'Model key'," + "category: 'Model category'," + "version: 2," + "metaInfo: 'Model metainfo'," + "deploymentId: '" + deploymentId + "'," + "id: '" + model.getId() + "'," + "url: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_MODEL, model.getId()) + "'," + "deploymentUrl: '" + SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT, deploymentId) + "'," + "createTime: " + new TextNode(getISODateStringWithTZ(now.getTime())) + "," + "lastUpdateTime: " + new TextNode(getISODateStringWithTZ(now.getTime())) + "}"); } finally { try { repositoryService.deleteModel(model.getId()); } catch (Throwable ignore) { // Ignore, model might not be created } } }
Configuration getEffectiveConfiguration(String[] args) throws CliArgsException { final CommandLine commandLine = cli.parseCommandLineOptions(args, true); final Configuration effectiveConfiguration = new Configuration(baseConfiguration); effectiveConfiguration.addAll(cli.toConfiguration(commandLine)); effectiveConfiguration.set(DeploymentOptions.TARGET, KubernetesSessionClusterExecutor.NAME); return effectiveConfiguration; }
@Test void testHeapMemoryPropertyWithConfigDefaultValue() throws Exception { final String[] args = new String[] {"-e", KubernetesSessionClusterExecutor.NAME}; final KubernetesSessionCli cli = createFlinkKubernetesCustomCliWithJmAndTmTotalMemory(1024); final Configuration executorConfig = cli.getEffectiveConfiguration(args); final ClusterClientFactory<String> clientFactory = getClusterClientFactory(executorConfig); final ClusterSpecification clusterSpecification = clientFactory.getClusterSpecification(executorConfig); assertThat(clusterSpecification.getMasterMemoryMB()).isEqualTo(1024); assertThat(clusterSpecification.getTaskManagerMemoryMB()).isEqualTo(1024); }
@VisibleForTesting static boolean shouldVerifySslHostname(final Map<String, Object> config) { final Object endpointIdentificationAlgoConfig = config.get(KsqlConfig.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); if (endpointIdentificationAlgoConfig == null) { return false; } final String endpointIdentificationAlgo = endpointIdentificationAlgoConfig.toString(); if (endpointIdentificationAlgo.isEmpty() || endpointIdentificationAlgo .equalsIgnoreCase(KsqlConfig.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_NONE)) { return false; } else if (endpointIdentificationAlgo .equalsIgnoreCase(KsqlConfig.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_HTTPS)) { return true; } else { throw new ConfigException("Endpoint identification algorithm not supported: " + endpointIdentificationAlgo); } }
@Test public void shouldDisableHostnameVerificationOnEmptyConfig() { // When / Then: assertThat(DefaultConnectClientFactory.shouldVerifySslHostname(CONFIGS_WITH_HOSTNAME_VERIFICATION_EMPTY), is(false)); }
public static List<PlanNodeId> scheduleOrder(PlanNode root) { ImmutableList.Builder<PlanNodeId> schedulingOrder = ImmutableList.builder(); root.accept(new Visitor(), schedulingOrder::add); return schedulingOrder.build(); }
@Test public void testJoinOrder() { PlanBuilder planBuilder = new PlanBuilder(TEST_SESSION, new PlanNodeIdAllocator(), METADATA); TableScanNode a = planBuilder.tableScan(emptyList(), emptyMap()); TableScanNode b = planBuilder.tableScan(emptyList(), emptyMap()); List<PlanNodeId> order = scheduleOrder(planBuilder.join(JoinType.INNER, a, b)); assertEquals(order, ImmutableList.of(b.getId(), a.getId())); }
public static void checkConfigExist(Path configFile) { if (!configFile.toFile().exists()) { throw CommonError.fileNotExistFailed("SeaTunnel", "read", configFile.toString()); } }
@Test void testExpectedError() { String root = System.getProperty("java.io.tmpdir"); // Unix Path: /tmp/not/existed // Windows Path: %SystemDrive%\Users\<username>\AppData\Local\Temp\not\existed Path path = Paths.get(root, "not", "existed"); SeaTunnelRuntimeException exception = Assertions.assertThrows( SeaTunnelRuntimeException.class, () -> FileUtils.checkConfigExist(path)); Assertions.assertEquals( "ErrorCode:[COMMON-22], ErrorDescription:[SeaTunnel read file '" + path + "' failed, because it not existed.]", exception.getMessage()); }
@Override public String uniqueHash() { return this.uniqueHash; }
@Test public void hashingWithOriginalSource() throws InvalidIRException { String source = "input { stdin {} } output { stdout {} }"; PipelineIR pipelineIR = new PipelineIR(makeInputSection(), makeFilterSection(), makeOutputSection(), source); assertEquals(pipelineIR.uniqueHash(), Util.digest(source)); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testFetchTopicIdUpgradeDowngrade() { buildFetcher(); TopicIdPartition fooWithoutId = new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 0)); // Assign foo without a topic id. subscriptions.assignFromUser(singleton(fooWithoutId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithoutId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithoutId.topicPartition(), 0); // Fetch should use version 12. assertEquals(1, sendFetches()); client.prepareResponse( fetchRequestMatcher((short) 12, singletonMap(fooWithoutId, new PartitionData( fooWithoutId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, fooWithoutId, records, Errors.NONE, 100L, 0) ); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); // Upgrade. TopicIdPartition fooWithId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); subscriptions.assignFromUser(singleton(fooWithId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithId.topicPartition(), 0); // Fetch should use latest version. assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // foo with old topic id should be removed from the session. client.prepareResponse( fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(fooWithId, new PartitionData( fooWithId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, fooWithId, records, Errors.NONE, 100L, 0) ); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); // Downgrade. subscriptions.assignFromUser(singleton(fooWithoutId.topicPartition())); client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithoutId), tp -> validLeaderEpoch)); subscriptions.seek(fooWithoutId.topicPartition(), 0); // Fetch should use version 12. assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); // foo with old topic id should be removed from the session. client.prepareResponse( fetchRequestMatcher((short) 12, singletonMap(fooWithoutId, new PartitionData( fooWithoutId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch)) ), emptyList() ), fullFetchResponse(1, fooWithoutId, records, Errors.NONE, 100L, 0) ); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); fetchRecords(); }
public SqlType getExpressionSqlType(final Expression expression) { return getExpressionSqlType(expression, Collections.emptyMap()); }
@Test public void shouldEvaluateBooleanSchemaForLikeExpression() { final Expression expression = new LikePredicate(COL1, new StringLiteral("%foo"), Optional.empty()); final SqlType exprType0 = expressionTypeManager.getExpressionSqlType(expression); assertThat(exprType0, is(SqlTypes.BOOLEAN)); }
public synchronized void write(Mutation tableRecord) throws IllegalStateException { write(ImmutableList.of(tableRecord)); }
@Test public void testWriteSingleRecordShouldThrowExceptionWhenCalledBeforeExecuteDdlStatement() { // arrange Mutation testMutation = Mutation.newInsertOrUpdateBuilder("SingerId") .set("SingerId") .to(1) .set("FirstName") .to("Marc") .set("LastName") .to("Richards") .build(); // act & assert assertThrows(IllegalStateException.class, () -> testManager.write(testMutation)); }
@Override public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) { return getSqlRecordIteratorBatch(value, descending, null); }
@Test public void getRecordsWithCursorUsingExactValueDepending() { var expectedOrder = List.of(7, 4, 1); performCursorTest(expectedOrder, cursor -> store.getSqlRecordIteratorBatch(1, true, cursor)); }