focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public RatingValue increment(Rating rating) { if (value.compareTo(rating) > 0) { value = rating; } this.set = true; return this; }
@Test public void increment_sets_value_and_increments_value() { verifySetValue(new RatingValue().increment(B), B); }
VarianceAccumulator combineWith(VarianceAccumulator otherVariance) { if (EMPTY.equals(this)) { return otherVariance; } if (EMPTY.equals(otherVariance)) { return this; } BigDecimal increment = calculateIncrement(this, otherVariance); BigDecimal combinedVariance = this.variance().add(otherVariance.variance()).add(increment); return newVarianceAccumulator( combinedVariance, this.count().add(otherVariance.count()), this.sum().add(otherVariance.sum())); }
@Test public void testCombinesEmptyWithEmpty() { VarianceAccumulator result = VarianceAccumulator.EMPTY.combineWith(VarianceAccumulator.EMPTY); assertEquals(VarianceAccumulator.EMPTY, result); }
public synchronized void setWorker( DataflowWorkExecutor worker, BatchModeExecutionContext executionContext) { checkArgument(worker != null, "worker must be non-null"); checkState(this.worker == null, "Can only call setWorker once"); this.worker = worker; this.executionContext = executionContext; }
@Test public void setWorker() { // We should be able to set the worker the first time. statusClient.setWorker(worker, executionContext); thrown.expect(IllegalStateException.class); thrown.expectMessage("setWorker once"); statusClient.setWorker(worker, executionContext); }
@Override public Object getValue() { try { return mBeanServerConn.getAttribute(getObjectName(), attributeName); } catch (IOException e) { return null; } catch (JMException e) { return null; } }
@Test public void returnsNullIfObjectNamePatternAmbiguous() throws Exception { ObjectName objectName = new ObjectName("JmxAttributeGaugeTest:type=test,*"); JmxAttributeGauge gauge = new JmxAttributeGauge(mBeanServer, objectName, "Value"); assertThat(gauge.getValue()).isNull(); }
char[] decode(final ByteBuf in) { final CharBuffer charBuffer = CharBuffer.allocate(in.capacity()); encoder.reset(); final ByteBuffer nioBuffer = in.nioBuffer(); encoder.decode(nioBuffer, charBuffer, false); final char[] buf = new char[charBuffer.position()]; charBuffer.flip(); charBuffer.get(buf); // Netty won't update the reader-index of the original buffer when its nio-buffer representation is read from. Adjust the position of the original buffer. in.readerIndex(nioBuffer.position()); return buf; }
@Test public void testDecodePartialMultibyteStringInSteps() throws Exception { final byte[] multibyteCharacter = "\u3053\u308C".getBytes(StandardCharsets.UTF_8); // two 3-byte characters. assert multibyteCharacter.length == 6; final XMLLightweightParser parser = new XMLLightweightParser(); final ByteBuf in = ByteBufAllocator.DEFAULT.buffer(10); // Execute system under test. in.writeBytes(Arrays.copyOfRange(multibyteCharacter, 0, 2)); parser.decode(in); in.writeBytes(Arrays.copyOfRange(multibyteCharacter, 2, 6)); final char[] result = parser.decode(in); // Verify results. assertEquals(2, result.length); assertEquals('\u3053', result[0]); assertEquals('\u308C', result[1]); assertEquals(6, in.readerIndex()); }
public Grok cachedGrokForPattern(String pattern) { return cachedGrokForPattern(pattern, false); }
@Test public void cachedGrokForPatternWithNamedCaptureOnlyThrowsRuntimeException() { expectedException.expectMessage("No definition for key 'EMPTY' found, aborting"); expectedException.expect(RuntimeException.class); expectedException.expectCause(Matchers.any(IllegalArgumentException.class)); final Set<GrokPattern> newPatterns = Collections.singleton(GrokPattern.create("EMPTY", "")); when(grokPatternService.loadAll()).thenReturn(newPatterns); eventBus.post(GrokPatternsUpdatedEvent.create(Collections.singleton("EMPTY"))); grokPatternRegistry.cachedGrokForPattern("%{EMPTY}", true); }
public void wash() { synchronized (this) { var machineState = getWashingMachineState(); LOGGER.info("{}: Actual machine state: {}", Thread.currentThread().getName(), machineState); if (this.washingMachineState == WashingMachineState.WASHING) { LOGGER.error("Cannot wash if the machine has been already washing!"); return; } this.washingMachineState = WashingMachineState.WASHING; } LOGGER.info("{}: Doing the washing", Thread.currentThread().getName()); this.delayProvider.executeAfterDelay(50, TimeUnit.MILLISECONDS, this::endOfWashing); }
@Test void wash() { var washingMachine = new WashingMachine(fakeDelayProvider); washingMachine.wash(); washingMachine.wash(); var machineStateGlobal = washingMachine.getWashingMachineState(); fakeDelayProvider.task.run(); // washing machine remains in washing state assertEquals(WashingMachineState.WASHING, machineStateGlobal); // washing machine goes back to enabled state assertEquals(WashingMachineState.ENABLED, washingMachine.getWashingMachineState()); }
@Override public void build(final DefaultGoPublisher publisher, final EnvironmentVariableContext environmentVariableContext, TaskExtension taskExtension, ArtifactExtension artifactExtension, PluginRequestProcessorRegistry pluginRequestProcessorRegistry, Charset consoleLogCharset) { ExecutionResult executionResult = null; try { executionResult = taskExtension.execute(pluginId, (task, pluginDescriptor) -> executeTask(task, publisher, environmentVariableContext, consoleLogCharset)); } catch (Exception e) { logException(publisher, e); } finally { JobConsoleLoggerInternal.unsetContext(); } if (executionResult == null) { logError(publisher, "ExecutionResult cannot be null. Please return a success or a failure response."); } else if (!executionResult.isSuccessful()) { logError(publisher, executionResult.getMessagesForDisplay()); } }
@Test public void shouldUnsetTaskExecutionContextFromJobConsoleLoggerWhenTaskExecutionThrowsException() { final PluggableTaskBuilder builder = spy(new PluggableTaskBuilder(runIfConfigs, cancelBuilder, pluggableTask, "", "")); taskExtension = mock(TaskExtension.class); when(taskExtension.execute(eq(TEST_PLUGIN_ID), any())).thenThrow(new RuntimeException("something")); assertThatThrownBy(() -> builder.build(goPublisher, variableContext, taskExtension, null, null, UTF_8)) .hasMessage("something"); assertThat((TaskExecutionContext) ReflectionUtil.getStaticField(JobConsoleLogger.class, "context")).isNull(); }
@Override public AnalysisPhase getAnalysisPhase() { return ANALYSIS_PHASE; }
@Test public void testGetAnalysisPhase() { VersionFilterAnalyzer instance = new VersionFilterAnalyzer(); instance.initialize(getSettings()); AnalysisPhase expResult = AnalysisPhase.POST_INFORMATION_COLLECTION3; AnalysisPhase result = instance.getAnalysisPhase(); assertEquals(expResult, result); }
public void init(String keyId, String applicationKey, String exportService) throws BackblazeCredentialsException, IOException { // Fetch all the available buckets and use that to find which region the user is in ListBucketsResponse listBucketsResponse = null; String userRegion = null; // The Key ID starts with the region identifier number, so reorder the regions such that // the first region is most likely the user's region String regionId = keyId.substring(0, 3); BACKBLAZE_REGIONS.sort( (String region1, String region2) -> { if (region1.endsWith(regionId)) { return -1; } return 0; }); Throwable s3Exception = null; for (String region : BACKBLAZE_REGIONS) { try { s3Client = backblazeS3ClientFactory.createS3Client(keyId, applicationKey, region); listBucketsResponse = s3Client.listBuckets(); userRegion = region; break; } catch (S3Exception e) { s3Exception = e; if (s3Client != null) { s3Client.close(); } if (e.statusCode() == 403) { monitor.debug(() -> String.format("User is not in region %s", region)); } } } if (listBucketsResponse == null || userRegion == null) { throw new BackblazeCredentialsException( "User's credentials or permissions are not valid for any regions available", s3Exception); } bucketName = getOrCreateBucket(s3Client, listBucketsResponse, userRegion, exportService); }
@Test public void testInitBucketCreated() throws BackblazeCredentialsException, IOException { Bucket bucket = Bucket.builder().name("invalid-name").build(); when(s3Client.listBuckets()).thenReturn(ListBucketsResponse.builder().buckets(bucket).build()); BackblazeDataTransferClient client = createDefaultClient(); client.init(KEY_ID, APP_KEY, EXPORT_SERVICE); verify(s3Client, times(1)).createBucket(any(CreateBucketRequest.class)); }
public void unregisterPartition(ResultPartitionID partitionId) { checkNotNull(partitionId); synchronized (registeredHandlers) { LOG.debug("unregistering {}", partitionId); // NOTE: tolerate un-registration of non-registered task (unregister is always called // in the cleanup phase of a task even if it never came to the registration - see // Task.java) registeredHandlers.remove(partitionId); } }
@Test void unregisterPartition() { ResultPartitionID partitionId1 = new ResultPartitionID(); ResultPartitionID partitionId2 = new ResultPartitionID(); TaskEventDispatcher ted = new TaskEventDispatcher(); AllWorkersDoneEvent event = new AllWorkersDoneEvent(); assertThat(ted.publish(partitionId1, event)).isFalse(); ted.registerPartition(partitionId1); ted.registerPartition(partitionId2); OneShotEventListener eventListener1a = new OneShotEventListener(event); ZeroShotEventListener eventListener1b = new ZeroShotEventListener(); OneShotEventListener eventListener2 = new OneShotEventListener(event); ted.subscribeToEvent(partitionId1, eventListener1a, AllWorkersDoneEvent.class); ted.subscribeToEvent(partitionId2, eventListener1b, AllWorkersDoneEvent.class); ted.subscribeToEvent(partitionId1, eventListener2, AllWorkersDoneEvent.class); ted.unregisterPartition(partitionId2); // publish something for partitionId1 triggering all according listeners assertThat(ted.publish(partitionId1, event)).isTrue(); assertThat(eventListener1a.fired) .withFailMessage("listener should have fired for AllWorkersDoneEvent") .isTrue(); assertThat(eventListener2.fired) .withFailMessage("listener should have fired for AllWorkersDoneEvent") .isTrue(); // now publish something for partitionId2 which should not trigger any listeners assertThat(ted.publish(partitionId2, event)).isFalse(); }
public List<String> getLiveBrokers() { List<String> brokerUrls = new ArrayList<>(); try { byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true); brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData); JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData)); JsonNode brokerResourceNode = jsonObject.get("mapFields"); Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields(); while (resourceEntries.hasNext()) { JsonNode resource = resourceEntries.next().getValue(); Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields(); while (brokerEntries.hasNext()) { Entry<String, JsonNode> brokerEntry = brokerEntries.next(); String brokerName = brokerEntry.getKey(); if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) { brokerUrls.add(getHostPort(brokerName)); } } } } catch (Exception e) { LOGGER.warn("Exception while reading External view from zookeeper", e); // ignore } return brokerUrls; }
@Test public void testGetBrokerListByInstanceConfigTlsDefault() { configureData(_instanceConfigTls, false); final List<String> brokers = _externalViewReaderUnderTest.getLiveBrokers(); assertEquals(brokers, Arrays.asList("first.pug-pinot-broker-headless:8099")); }
public static <T> T getFromJSONArray(String text, Class<T> elementClass) { try { return OBJECT_MAPPER.readValue(text, elementClass); } catch (JsonProcessingException e) { throw new RuntimeException("getFromJSONArray exception.", e); } }
@Test public void testGetJsonArray() { EsRestClient.EsIndex[] esIndices = getFromJSONArray(jsonArray, EsRestClient.EsIndex[].class); System.out.println(JSONObject.valueToString(esIndices)); }
@SuppressWarnings("unchecked") protected final <E> boolean emitFromTraverser(@Nonnull int[] ordinals, @Nonnull Traverser<E> traverser) { E item; if (pendingItem != null) { item = (E) pendingItem; pendingItem = null; } else { item = traverser.next(); } for (; item != null; item = traverser.next()) { if (!tryEmit(ordinals, item)) { pendingItem = item; return false; } } return true; }
@Test public void when_emitFromTraverserTo1And2_then_emittedTo1And2() { // Given Traverser<Object> trav = Traversers.traverseItems(MOCK_ITEM, MOCK_ITEM); boolean done; do { // When done = p.emitFromTraverser(ORDINALS_1_2, trav); // Then validateReceptionAtOrdinals(MOCK_ITEM, ORDINALS_1_2); } while (!done); }
@Override public void execute(ComputationStep.Context context) { new DepthTraversalTypeAwareCrawler( new TypeAwareVisitorAdapter(CrawlerDepthLimit.PROJECT, PRE_ORDER) { @Override public void visitProject(Component project) { executeForProject(project); } }).visit(treeRootHolder.getRoot()); }
@Test void new_measure_has_condition_on_leak_period_when_all_conditions_on_specific_metric_has_same_QG_level() { int rawValue = 0; Condition fixedCondition = createLessThanCondition(INT_METRIC_1, "1"); Condition periodCondition = createLessThanCondition(INT_METRIC_1, "1"); qualityGateHolder.setQualityGate(new QualityGate(SOME_QG_UUID, SOME_QG_NAME, of(fixedCondition, periodCondition))); Measure measure = newMeasureBuilder().create(rawValue); measureRepository.addRawMeasure(PROJECT_REF, INT_METRIC_1_KEY, measure); underTest.execute(new TestComputationStepContext()); Optional<Measure> rawMeasure1 = measureRepository.getAddedRawMeasure(PROJECT_REF, INT_METRIC_1_KEY); assertThat(rawMeasure1.get()) .hasQualityGateLevel(ERROR) .hasQualityGateText(dumbResultTextAnswer(periodCondition, ERROR, rawValue)); }
public FluentBackoff withInitialBackoff(Duration initialBackoff) { checkArgument( initialBackoff.isLongerThan(Duration.ZERO), "initialBackoff %s must be at least 1 millisecond", initialBackoff); return new FluentBackoff( exponent, initialBackoff, maxBackoff, maxCumulativeBackoff, maxRetries, throttledTimeCounter); }
@Test public void testInvalidInitialBackoff() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("initialBackoff PT0S must be at least 1 millisecond"); defaultBackoff.withInitialBackoff(Duration.ZERO); }
@Override public <KR> KStream<KR, V> selectKey(final KeyValueMapper<? super K, ? super V, ? extends KR> mapper) { return selectKey(mapper, NamedInternal.empty()); }
@Test public void shouldNotAllowNullMapperOnSelectKey() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.selectKey(null)); assertThat(exception.getMessage(), equalTo("mapper can't be null")); }
public static Row toBeamRowStrict(GenericRecord record, @Nullable Schema schema) { if (schema == null) { schema = toBeamSchema(record.getSchema()); } Row.Builder builder = Row.withSchema(schema); org.apache.avro.Schema avroSchema = record.getSchema(); for (Field field : schema.getFields()) { Object value = record.get(field.getName()); org.apache.avro.Schema fieldAvroSchema = avroSchema.getField(field.getName()).schema(); builder.addValue(convertAvroFieldStrict(value, fieldAvroSchema, field.getType())); } return builder.build(); }
@Test public void testGenericRecordToBeamRow() { GenericRecord genericRecord = getGenericRecord(); Row row = AvroUtils.toBeamRowStrict(getGenericRecord(), null); assertEquals(getBeamRow(), row); // Alternatively, a timestamp-millis logical type can have a joda datum. genericRecord.put("timestampMillis", new DateTime(genericRecord.get("timestampMillis"))); row = AvroUtils.toBeamRowStrict(getGenericRecord(), null); assertEquals(getBeamRow(), row); }
public boolean shouldBlock() { return blocking; }
@Test public void testSetBlokcing() { final DistCpOptions.Builder builder = new DistCpOptions.Builder( Collections.singletonList(new Path("hdfs://localhost:8020/source")), new Path("hdfs://localhost:8020/target/")); Assert.assertTrue(builder.build().shouldBlock()); builder.withBlocking(false); Assert.assertFalse(builder.build().shouldBlock()); }
@Override public List<MetricSample> collect() { List<MetricSample> list = new ArrayList<>(); if (!isCollectEnabled()) { return list; } collectRequests(list); collectQPS(list); collectRT(list); return list; }
@Test public void testRtAggregation() { metricsDispatcher.addListener(collector); ConfigManager configManager = applicationModel.getApplicationConfigManager(); MetricsConfig config = configManager.getMetrics().orElse(null); AggregationConfig aggregationConfig = new AggregationConfig(); aggregationConfig.setEnabled(true); config.setAggregation(aggregationConfig); List<Long> rtList = new ArrayList<>(); rtList.add(10L); rtList.add(20L); rtList.add(30L); for (Long requestTime : rtList) { RequestEvent requestEvent = RequestEvent.toRequestEvent( applicationModel, null, null, null, invocation, MetricsSupport.getSide(invocation), MethodMetric.isServiceLevel(applicationModel)); TestRequestEvent testRequestEvent = new TestRequestEvent(requestEvent.getSource(), requestEvent.getTypeWrapper()); testRequestEvent.putAttachment(MetricsConstants.INVOCATION, invocation); testRequestEvent.putAttachment(ATTACHMENT_KEY_SERVICE, MetricsSupport.getInterfaceName(invocation)); testRequestEvent.putAttachment(MetricsConstants.INVOCATION_SIDE, MetricsSupport.getSide(invocation)); testRequestEvent.setRt(requestTime); MetricsEventBus.post(testRequestEvent, () -> null); } List<MetricSample> samples = collector.collect(); for (MetricSample sample : samples) { GaugeMetricSample gaugeMetricSample = (GaugeMetricSample<?>) sample; if (gaugeMetricSample.getName().endsWith("max.milliseconds.aggregate")) { Assertions.assertEquals(30, gaugeMetricSample.applyAsDouble()); } if (gaugeMetricSample.getName().endsWith("min.milliseconds.aggregate")) { Assertions.assertEquals(10L, gaugeMetricSample.applyAsDouble()); } if (gaugeMetricSample.getName().endsWith("avg.milliseconds.aggregate")) { Assertions.assertEquals(20L, gaugeMetricSample.applyAsDouble()); } } }
public AppNamespace findPublicAppNamespace(String namespaceName) { List<AppNamespace> appNamespaces = appNamespaceRepository.findByNameAndIsPublic(namespaceName, true); if (CollectionUtils.isEmpty(appNamespaces)) { return null; } return appNamespaces.get(0); }
@Test @Sql(scripts = "/sql/appnamespaceservice/init-appnamespace.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testFindPublicAppNamespaceByName() { Assert.assertNotNull(appNamespaceService.findPublicAppNamespace("datasourcexml")); Assert.assertNull(appNamespaceService.findPublicAppNamespace("TFF.song0711-02")); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testReadAndMatchBytes() { run( "bytes data = read('karate-logo.png')", "match data == read('karate-logo.png')" ); }
@Override public GenericProtobufNativeRecord read(byte[] bytes, int offset, int length) { try { if (!(bytes.length == length && offset == 0)) { //skip unnecessary bytes copy bytes = Arrays.copyOfRange(bytes, offset, offset + length); } return new GenericProtobufNativeRecord(schemaVersion, descriptor, fields, DynamicMessage.parseFrom(descriptor, bytes)); } catch (InvalidProtocolBufferException e) { throw new SchemaSerializationException(e); } }
@Test public void testGetNativeRecord() { message = TestMessage.newBuilder().setStringField(STRING_FIELD_VLUE).setDoubleField(DOUBLE_FIELD_VLUE).build(); GenericProtobufNativeReader genericProtobufNativeReader = new GenericProtobufNativeReader(genericProtobufNativeSchema.getProtobufNativeSchema()); GenericRecord record = genericProtobufNativeReader.read(message.toByteArray()); assertEquals(record.getField("stringField"), STRING_FIELD_VLUE); assertEquals(record.getField("doubleField"), DOUBLE_FIELD_VLUE); assertEquals(SchemaType.PROTOBUF_NATIVE, record.getSchemaType()); DynamicMessage nativeRecord = (DynamicMessage) record.getNativeObject(); assertEquals(nativeRecord.getField(nativeRecord.getDescriptorForType().findFieldByName("stringField")), STRING_FIELD_VLUE); assertEquals(nativeRecord.getField(nativeRecord.getDescriptorForType().findFieldByName("doubleField")), DOUBLE_FIELD_VLUE); }
public DynamicThrottlePolicy setWeight(double weight) { this.weight = Math.pow(weight, 0.5); return this; }
@Test void twoWeightedPoliciesWithUnboundedTaskQueue() { for (int repeat = 0; repeat < 3; repeat++) { long operations = 1_000_000; int workPerSuccess = 6 + (int) (30 * Math.random()); int numberOfWorkers = 1 + (int) (10 * Math.random()); int maximumTasksPerWorker = 100_000; int workerParallelism = 32; ManualTimer timer = new ManualTimer(); DynamicThrottlePolicy policy1 = new DynamicThrottlePolicy(timer); DynamicThrottlePolicy policy2 = new DynamicThrottlePolicy(timer).setWeight(0.5); Summary summary = run(operations, workPerSuccess, numberOfWorkers, maximumTasksPerWorker, workerParallelism, timer, policy1, policy2); double minMaxPending = numberOfWorkers * workerParallelism; double maxMaxPending = numberOfWorkers * maximumTasksPerWorker; assertInRange(minMaxPending, summary.averagePending, maxMaxPending); // Actual shares are not distributed perfectly proportionally to weights, but close enough. assertInRange(minMaxPending * 0.6, summary.averageWindows[0], maxMaxPending * 0.6); assertInRange(minMaxPending * 0.4, summary.averageWindows[1], maxMaxPending * 0.4); assertInRange(1, summary.inefficiency, 1.02); assertInRange(0, summary.waste, 0); } }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void sessionWindowZeroArgCountShouldPreserveTopologyStructure() { final StreamsBuilder builder = new StreamsBuilder(); builder.stream("input-topic") .groupByKey() .windowedBy(SessionWindows.with(ofMillis(1))) .count(); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-AGGREGATE-0000000002\n" + " Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true)); }
@Override public void createService(Service service, AbstractSelector selector) throws NacosException { }
@Test void testCreateService() { Service service = new Service(); Assertions.assertDoesNotThrow(() -> { delegate.createService(service, new NoneSelector()); }); }
public FEELFnResult<Object> invoke(@ParameterName("list") List list) { if ( list == null || list.isEmpty() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty")); } else { try { return FEELFnResult.ofResult(Collections.max(list, new InterceptNotComparableComparator())); } catch (ClassCastException e) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable")); } } }
@Test void invokeEmptyList() { FunctionTestUtil.assertResultError(maxFunction.invoke(Collections.emptyList()), InvalidParametersEvent.class); }
final void ensureAvailable(int len) { if (available() < len) { if (buffer != null) { int newCap = Math.max(Math.max(buffer.length << 1, buffer.length + len), firstGrowthSize); buffer = Arrays.copyOf(buffer, newCap); } else { buffer = new byte[len > initialSize / 2 ? len * 2 : initialSize]; } } }
@Test public void testEnsureAvailable() { out.buffer = null; out.ensureAvailable(5); assertEquals(10, out.buffer.length); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Type.BOOLEAN) throw new DataException("Invalid schema type for BooleanConverter: " + schema.type().toString()); try { return serializer.serialize(topic, (Boolean) value); } catch (ClassCastException e) { throw new DataException("BooleanConverter is not compatible with objects of type " + value.getClass()); } }
@Test public void testFromConnectInvalidValue() { assertThrows(DataException.class, () -> converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, "true")); }
public boolean isActive() { return active.get(); }
@Test public void testIsActive() { DynamicThreadPoolExecutor executor = new DynamicThreadPoolExecutor( 1, 1, 1000L, TimeUnit.MILLISECONDS, 1000L, true, 1000L, new ArrayBlockingQueue<>(1), "test", Thread::new, new ThreadPoolExecutor.DiscardOldestPolicy()); Assert.assertTrue(executor.isActive()); // waiting for terminated executor.destroy(); while (!executor.isTerminated()) { } Assert.assertFalse(executor.isActive()); executor.destroy(); Assert.assertFalse(executor.isActive()); }
public static boolean isCompatible(Class<?> c, Object o) { boolean pt = c.isPrimitive(); if (o == null) { return !pt; } if (pt) { c = getBoxedClass(c); } return c == o.getClass() || c.isInstance(o); }
@Test void testIsCompatibleWithArray() { assertFalse(ReflectUtils.isCompatible(new Class[] {short.class, int.class}, new Object[] {(short) 1})); assertFalse(ReflectUtils.isCompatible(new Class[] {double.class}, new Object[] {"hello"})); assertTrue(ReflectUtils.isCompatible(new Class[] {double.class}, new Object[] {1.2})); }
static TypeName getNativeType(TypeName typeName) { if (typeName instanceof ParameterizedTypeName) { return getNativeType((ParameterizedTypeName) typeName); } String simpleName = ((ClassName) typeName).simpleName(); if (simpleName.equals(Address.class.getSimpleName())) { return TypeName.get(String.class); } else if (simpleName.startsWith("Uint")) { return TypeName.get(BigInteger.class); } else if (simpleName.equals(Utf8String.class.getSimpleName())) { return TypeName.get(String.class); } else if (simpleName.startsWith("Bytes") || simpleName.equals("DynamicBytes")) { return TypeName.get(byte[].class); } else if (simpleName.startsWith("Bool")) { return TypeName.get(java.lang.Boolean.class); // boolean cannot be a parameterized type } else if (simpleName.equals(Byte.class.getSimpleName())) { return TypeName.get(java.lang.Byte.class); } else if (simpleName.equals(Char.class.getSimpleName())) { return TypeName.get(Character.class); } else if (simpleName.equals(Double.class.getSimpleName())) { return TypeName.get(java.lang.Double.class); } else if (simpleName.equals(Float.class.getSimpleName())) { return TypeName.get(java.lang.Float.class); } else if (simpleName.equals(Int.class.getSimpleName())) { return TypeName.get(Integer.class); } else if (simpleName.equals(Long.class.getSimpleName())) { return TypeName.get(java.lang.Long.class); } else if (simpleName.equals(Short.class.getSimpleName())) { return TypeName.get(java.lang.Short.class); } else if (simpleName.startsWith("Int")) { return TypeName.get(BigInteger.class); } else { throw new UnsupportedOperationException( "Unsupported type: " + typeName + ", no native type mapping exists."); } }
@Test public void testGetNativeTypeParameterized() { assertEquals( getNativeType( ParameterizedTypeName.get( ClassName.get(DynamicArray.class), TypeName.get(Address.class))), (ParameterizedTypeName.get(ClassName.get(List.class), TypeName.get(String.class)))); }
@Override public Executor getExecutor() { return null; }
@Test void getExecutor() { // Default listener executor is null. assertNull(new MockShardListener().getExecutor()); }
public static AddressHolder getAddressHolder(String address) { return getAddressHolder(address, -1); }
@Test public void testParsingHostAndPort() { AddressHolder addressHolder = AddressUtil.getAddressHolder("[fe80::62c5:*:fe05:480a%en0]:8080"); assertEquals("fe80::62c5:*:fe05:480a", addressHolder.getAddress()); assertEquals(8080, addressHolder.getPort()); assertEquals("en0", addressHolder.getScopeId()); addressHolder = AddressUtil.getAddressHolder("[::ffff:192.0.2.128]:5700"); assertEquals("::ffff:192.0.2.128", addressHolder.getAddress()); assertEquals(5700, addressHolder.getPort()); addressHolder = AddressUtil.getAddressHolder("192.168.1.1:5700"); assertEquals("192.168.1.1", addressHolder.getAddress()); assertEquals(5700, addressHolder.getPort()); addressHolder = AddressUtil.getAddressHolder("hazelcast.com:80"); assertEquals("hazelcast.com", addressHolder.getAddress()); assertEquals(80, addressHolder.getPort()); }
@Override public int hashCode() { return Objects.hash(taskId, topicPartitions); }
@Test public void shouldNotBeEqualsIfDifferInTopicPartitions() { final TaskMetadataImpl differTopicPartitions = new TaskMetadataImpl( TASK_ID, mkSet(TP_0), COMMITTED_OFFSETS, END_OFFSETS, TIME_CURRENT_IDLING_STARTED); assertThat(taskMetadata, not(equalTo(differTopicPartitions))); assertThat(taskMetadata.hashCode(), not(equalTo(differTopicPartitions.hashCode()))); }
public static Object[] toArray(Object arrayObj) { if (arrayObj == null) { return null; } if (!arrayObj.getClass().isArray()) { throw new ClassCastException("'arrayObj' is not an array, can't cast to Object[]"); } int length = Array.getLength(arrayObj); Object[] array = new Object[length]; if (length > 0) { for (int i = 0; i < length; ++i) { array[i] = Array.get(arrayObj, i); } } return array; }
@Test public void testToArrayException() { Assertions.assertThrows(ClassCastException.class, () -> { Object[] array = ArrayUtils.toArray(new Object()); }); }
@SuppressWarnings("varargs") @SafeVarargs @Udf public final <T> T coalesce(final T first, final T... others) { if (first != null) { return first; } if (others == null) { return null; } return Arrays.stream(others) .filter(Objects::nonNull) .findFirst() .orElse(null); }
@Test public void shouldReturnFirstNonNullEntity() { assertThat(udf.coalesce(1, 2, 3), is(1)); assertThat(udf.coalesce(null, "a", "b", "c", "d"), is("a")); assertThat(udf.coalesce(null, ImmutableList.of(), null), is(ImmutableList.of())); assertThat(udf.coalesce(null, null, 1.0), is(1.0)); }
public static List<URL> classifyUrls(List<URL> urls, Predicate<URL> predicate) { return urls.stream().filter(predicate).collect(Collectors.toList()); }
@Test public void testClassifyUrls() { String address1 = "remote://root:alibaba@127.0.0.1:9090"; URL url1 = UrlUtils.parseURL(address1, null); String address2 = "consumer://root:alibaba@127.0.0.1:9090"; URL url2 = UrlUtils.parseURL(address2, null); String address3 = "remote://root:alibaba@127.0.0.1"; URL url3 = UrlUtils.parseURL(address3, null); String address4 = "consumer://root:alibaba@127.0.0.1"; URL url4 = UrlUtils.parseURL(address4, null); List<URL> urls = new ArrayList<>(); urls.add(url1); urls.add(url2); urls.add(url3); urls.add(url4); List<URL> consumerUrls = UrlUtils.classifyUrls(urls, UrlUtils::isConsumer); assertEquals(2, consumerUrls.size()); assertTrue(consumerUrls.contains(url2)); assertTrue(consumerUrls.contains(url4)); List<URL> nonConsumerUrls = UrlUtils.classifyUrls(urls, url -> !UrlUtils.isConsumer(url)); assertEquals(2, nonConsumerUrls.size()); assertTrue(nonConsumerUrls.contains(url1)); assertTrue(nonConsumerUrls.contains(url3)); }
public static void isNull(Object object, String message) { if (object != null) { throw new IllegalArgumentException(message); } }
@Test(expected = IllegalArgumentException.class) public void assertIsNull() { Assert.isNull("", "object is null"); }
public void loadConfiguration() { try { if (null != loggingAdapter) { loggingAdapter.loadConfiguration(loggingProperties); } } catch (Throwable t) { LOGGER.warn("Load {} Configuration of Nacos fail, message: {}", LOGGER.getClass().getName(), t.getMessage()); } }
@Test void testLoadConfiguration() throws NoSuchFieldException, IllegalAccessException { instance = NacosLogging.getInstance(); Field nacosLogging = NacosLogging.class.getDeclaredField("loggingAdapter"); nacosLogging.setAccessible(true); nacosLogging.set(instance, loggingAdapter); instance.loadConfiguration(); Mockito.verify(loggingAdapter, Mockito.times(1)).loadConfiguration(loggingProperties); }
@Override public synchronized void putConnectorConfig(String connector, Map<String, String> properties, TargetState targetState) { ConnectorState state = connectors.get(connector); if (state == null) connectors.put(connector, new ConnectorState(properties, targetState)); else { state.connConfig = properties; if (targetState != null) { state.targetState = targetState; } } if (updateListener != null) updateListener.onConnectorConfigUpdate(connector); }
@Test public void testPutConnectorConfig() { configStore.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); ClusterConfigState configState = configStore.snapshot(); assertTrue(configState.contains(CONNECTOR_IDS.get(0))); // Default initial target state of STARTED should be used if no explicit target state is specified assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(0), configState.connectorConfig(CONNECTOR_IDS.get(0))); assertEquals(1, configState.connectors().size()); verify(configUpdateListener).onConnectorConfigUpdate(eq(CONNECTOR_IDS.get(0))); }
@Override public String ping(RedisClusterNode node) { return execute(node, RedisCommands.PING); }
@Test public void testClusterPing() { RedisClusterNode master = getFirstMaster(); String res = connection.ping(master); assertThat(res).isEqualTo("PONG"); }
@Override public boolean add(V value) { lock.lock(); try { checkComparator(); BinarySearchResult<V> res = binarySearch(value); int index = 0; if (res.getIndex() < 0) { index = -(res.getIndex() + 1); } else { index = res.getIndex() + 1; } get(commandExecutor.evalWriteNoRetryAsync(getRawName(), codec, RedisCommands.EVAL_VOID, "local len = redis.call('llen', KEYS[1]);" + "if tonumber(ARGV[1]) < len then " + "local pivot = redis.call('lindex', KEYS[1], ARGV[1]);" + "redis.call('linsert', KEYS[1], 'before', pivot, ARGV[2]);" + "return;" + "end;" + "redis.call('rpush', KEYS[1], ARGV[2]);", Arrays.asList(getRawName()), index, encode(value))); return true; } finally { lock.unlock(); } }
@Test public void testDuplicates() { RPriorityQueue<TestObject> set = redisson.getPriorityQueue("set"); set.add(new TestObject("1", "2")); set.add(new TestObject("2", "3")); set.add(new TestObject("5", "6")); set.add(new TestObject("1", "2")); set.add(new TestObject("3", "4")); Assertions.assertEquals(5, set.size()); assertThat(set).containsExactly(new TestObject("1", "2"), new TestObject("1", "2"), new TestObject("2", "3"), new TestObject("3", "4"), new TestObject("5", "6")); }
@Override public Map<String, Object> toMap() { return Cloner.deep(this.data); }
@Test public void testToMap() throws Exception { Event e = new Event(); Map<String, Object> original = e.getData(); Map<String, Object> clone = e.toMap(); assertFalse(original == clone); assertEquals(original, clone); }
public static boolean areEqual(Object o1, Object o2) { return o1 instanceof Number && o2 instanceof Number ? areNumericEqual((Number) o1, (Number) o2) : Objects.equals(o1, o2); }
@Test public void areEqualWithObjects() { assertThat(OperatorUtils.areEqual(null, null)).isTrue(); assertThat(OperatorUtils.areEqual("test", "test")).isTrue(); assertThat(OperatorUtils.areEqual(new Object(), new Object())).isFalse(); assertThat(OperatorUtils.areEqual(null, "test")).isFalse(); assertThat(OperatorUtils.areEqual("test", null)).isFalse(); assertThat(OperatorUtils.areEqual(new Object(), null)).isFalse(); assertThat(OperatorUtils.areEqual(null, new Object())).isFalse(); assertThat(OperatorUtils.areEqual(new Object(), "test")).isFalse(); assertThat(OperatorUtils.areEqual("test", new Object())).isFalse(); }
@Override protected void runTask() { LOGGER.debug("Updating currently processed jobs... "); convertAndProcessJobs(new ArrayList<>(backgroundJobServer.getJobSteward().getJobsInProgress()), this::updateCurrentlyProcessingJob); }
@Test void jobsThatAreProcessedAreBeingUpdatedWithAHeartbeat() { // GIVEN final Job job = anEnqueuedJob().withId().build(); startProcessingJob(job); // WHEN runTask(task); // THEN verify(storageProvider).save(singletonList(job)); ProcessingState processingState = job.getJobState(); Assertions.assertThat(processingState.getUpdatedAt()).isAfter(processingState.getCreatedAt()); }
public static URL setThreadName(URL url, String defaultName) { String name = url.getParameter(THREAD_NAME_KEY, defaultName); name = name + "-" + url.getAddress(); url = url.addParameter(THREAD_NAME_KEY, name); return url; }
@Test void testSetThreadName() throws Exception { URL url = new ServiceConfigURL("dubbo", "localhost", 1234).addParameter(THREAD_NAME_KEY, "custom-thread"); url = ExecutorUtil.setThreadName(url, "default-name"); assertThat(url.getParameter(THREAD_NAME_KEY), equalTo("custom-thread-localhost:1234")); }
public Iterable<InetAddress> iterableIps() { return () -> new Iterator<>() { private final BigInteger maxValue = lastAddressInteger(); private BigInteger current = addressInteger; public boolean hasNext() { return current.compareTo(maxValue) <= 0; } public InetAddress next() { if (!hasNext()) throw new NoSuchElementException(); InetAddress inetAddress = bitsToInetAddress(current, addressLength); current = current.add(BigInteger.ONE); return inetAddress; } }; }
@Test public void iterableIps() { assertEquals(List.of("10.12.14.24", "10.12.14.25", "10.12.14.26", "10.12.14.27", "10.12.14.28", "10.12.14.29", "10.12.14.30", "10.12.14.31"), StreamSupport.stream(CidrBlock.fromString("10.12.14.24/29").iterableIps().spliterator(), false) .map(InetAddressUtil::toString) .collect(Collectors.toList())); assertEquals(List.of("10.12.14.24"), StreamSupport.stream(CidrBlock.fromString("10.12.14.24/32").iterableIps().spliterator(), false) .map(InetAddressUtil::toString) .collect(Collectors.toList())); }
public SchemaMapping fromParquet(MessageType parquetSchema) { List<Type> fields = parquetSchema.getFields(); List<TypeMapping> mappings = fromParquet(fields); List<Field> arrowFields = fields(mappings); return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings); }
@Test(expected = IllegalStateException.class) public void testParquetInt32TimestampMillisToArrow() { converter.fromParquet(Types.buildMessage() .addField(Types.optional(INT32) .as(LogicalTypeAnnotation.timestampType(false, MILLIS)) .named("a")) .named("root")); }
@Override public int compare(Event a, Event b) { return eventOrder.compare(a, b); }
@Test void verifyTestRunStartedSortedCorrectly() { assertAll( () -> assertThat(comparator.compare(runStarted, runStarted), equalTo(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, testRead), lessThan(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, testParsed), lessThan(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, suggested), lessThan(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, feature1Case1Started), lessThan(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, feature1Case2Started), lessThan(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, feature1Case3Started), lessThan(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, feature2Case1Started), lessThan(EQUAL_TO)), () -> assertThat(comparator.compare(runStarted, runFinished), lessThan(EQUAL_TO))); }
@Override public Map<JobID, ResourceCounter> removePendingTaskManager( PendingTaskManagerId pendingTaskManagerId) { Preconditions.checkNotNull(pendingTaskManagerId); final PendingTaskManager pendingTaskManager = Preconditions.checkNotNull(pendingTaskManagers.remove(pendingTaskManagerId)); totalPendingResource = totalPendingResource.subtract(pendingTaskManager.getTotalResourceProfile()); LOG.debug("Remove pending task manager {}.", pendingTaskManagerId); totalAndDefaultSlotProfilesToPendingTaskManagers.compute( Tuple2.of( pendingTaskManager.getTotalResourceProfile(), pendingTaskManager.getDefaultSlotResourceProfile()), (ignored, pendingTMSet) -> { Preconditions.checkNotNull(pendingTMSet).remove(pendingTaskManager); return pendingTMSet.isEmpty() ? null : pendingTMSet; }); return pendingTaskManager.getPendingSlotAllocationRecords(); }
@Test void testRemoveUnknownPendingTaskManager() { assertThatThrownBy( () -> { final FineGrainedTaskManagerTracker taskManagerTracker = new FineGrainedTaskManagerTracker(); taskManagerTracker.removePendingTaskManager( PendingTaskManagerId.generate()); }) .isInstanceOf(NullPointerException.class); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComDelayedInsertPacket() { assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_DELAYED_INSERT, payload, connectionSession), instanceOf(MySQLUnsupportedCommandPacket.class)); }
@Override protected Optional<String> componentUuidToEntityUuid(String componentUuid) { return Optional.empty(); }
@Test public void componentUuidToProjectUuid() { assertThat(githubWebhookUserSession.componentUuidToEntityUuid("test")).isEmpty(); }
public void clear() { scesimModelDescriptor.clear(); clearDatas(); }
@Test public void clear() { model.clear(); verify(model, times(1)).clearDatas(); }
List<Condition> run(boolean useKRaft) { List<Condition> warnings = new ArrayList<>(); checkKafkaReplicationConfig(warnings); checkKafkaBrokersStorage(warnings); if (useKRaft) { // Additional checks done for KRaft clusters checkKRaftControllerStorage(warnings); checkKRaftControllerCount(warnings); checkKafkaMetadataVersion(warnings); checkInterBrokerProtocolVersionInKRaft(warnings); checkLogMessageFormatVersionInKRaft(warnings); } else { // Additional checks done for ZooKeeper-based clusters checkKafkaLogMessageFormatVersion(warnings); checkKafkaInterBrokerProtocolVersion(warnings); checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings); } return warnings; }
@Test public void testKRaftWithTwoMixedNodes() { Kafka kafka = new KafkaBuilder(KAFKA) .editSpec() .editKafka() .withConfig(Map.of( // We want to avoid unrelated warnings KafkaConfiguration.DEFAULT_REPLICATION_FACTOR, 1, KafkaConfiguration.MIN_INSYNC_REPLICAS, 1 )) .endKafka() .endSpec() .build(); KafkaNodePool mixed = new KafkaNodePoolBuilder(MIXED) .editSpec() .withReplicas(2) .endSpec() .build(); KafkaSpecChecker checker = generateChecker(kafka, List.of(mixed), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); List<Condition> warnings = checker.run(true); assertThat(warnings, hasSize(1)); assertThat(warnings.get(0).getReason(), is("KafkaKRaftControllerNodeCount")); assertThat(warnings.get(0).getMessage(), is("Running KRaft controller quorum with two nodes is not advisable as both nodes will be needed to avoid downtime. It is recommended that a minimum of three nodes are used.")); }
@Override public List<? extends BoundedSource<Row>> split( long desiredBundleSizeBytes, PipelineOptions options) throws Exception { ArrayList<ScanTaskSource> splits = new ArrayList<>(); switch (scanConfig.getScanType()) { case TABLE: TableScan tableScan = getTableScan(); if (desiredBundleSizeBytes > 0) { tableScan = tableScan.option(TableProperties.SPLIT_SIZE, Long.toString(desiredBundleSizeBytes)); } try (CloseableIterable<CombinedScanTask> tasks = tableScan.planTasks()) { for (CombinedScanTask combinedScanTask : tasks) { splits.add(new ScanTaskSource(scanConfig, combinedScanTask)); } } catch (IOException e) { throw new RuntimeException(e); } break; case BATCH: throw new UnsupportedOperationException("BATCH scan not supported"); default: throw new UnsupportedOperationException("Unknown scan type: " + scanConfig.getScanType()); } return splits; }
@Test public void testInitialSplitting() throws Exception { TableIdentifier tableId = TableIdentifier.of("default", "table" + Long.toString(UUID.randomUUID().hashCode(), 16)); Table simpleTable = warehouse.createTable(tableId, TestFixtures.SCHEMA); simpleTable .newFastAppend() .appendFile( warehouse.writeRecords( "file1s1.parquet", simpleTable.schema(), TestFixtures.FILE1SNAPSHOT1)) .appendFile( warehouse.writeRecords( "file2s1.parquet", simpleTable.schema(), TestFixtures.FILE2SNAPSHOT1)) .appendFile( warehouse.writeRecords( "file3s1.parquet", simpleTable.schema(), TestFixtures.FILE3SNAPSHOT1)) .commit(); PipelineOptions options = PipelineOptionsFactory.create(); Map<String, String> catalogProps = ImmutableMap.<String, String>builder() .put("type", CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP) .put("warehouse", warehouse.location) .build(); BoundedSource<Row> source = new ScanSource( IcebergScanConfig.builder() .setCatalogConfig( IcebergCatalogConfig.builder() .setCatalogName("name") .setCatalogProperties(catalogProps) .build()) .setScanType(IcebergScanConfig.ScanType.TABLE) .setTableIdentifier(simpleTable.name().replace("hadoop.", "").split("\\.")) .setSchema(IcebergUtils.icebergSchemaToBeamSchema(TestFixtures.SCHEMA)) .build()); // Input data for this test is tiny so try a number of very small split sizes SourceTestUtils.assertSourcesEqualReferenceSource(source, source.split(1, options), options); SourceTestUtils.assertSourcesEqualReferenceSource(source, source.split(2, options), options); SourceTestUtils.assertSourcesEqualReferenceSource(source, source.split(5, options), options); SourceTestUtils.assertSourcesEqualReferenceSource(source, source.split(10, options), options); SourceTestUtils.assertSourcesEqualReferenceSource(source, source.split(100, options), options); SourceTestUtils.assertSourcesEqualReferenceSource(source, source.split(1000, options), options); }
public synchronized byte[] mac(byte[] message) { ConfidentialStore cs = ConfidentialStore.get(); if (mac == null || cs != lastCS) { lastCS = cs; mac = createMac(); } return chop(mac.doFinal(message)); }
@Test public void testTruncatedMacOnNonFips() { HMACConfidentialKey key1 = new HMACConfidentialKey("test", 16); String str = key1.mac("Hello World"); String pattern = "[0-9A-Fa-f]{32}"; assertThat(str, matchesPattern(pattern)); }
public int getOrDefault(final int key, final int defaultValue) { final int value = get(key); return missingValue != value ? value : defaultValue; }
@Test void getOrDefaultShouldReturnADefaultValueWhenNoMappingExists() { final int key = 42; final int defaultValue = 8; assertEquals(defaultValue, map.getOrDefault(key, defaultValue)); }
public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException { if ( dbMetaData == null ) { throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) ); } if ( rsMetaData == null ) { throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) ); } try { return dbMetaData.getDriverMajorVersion() > 3 ? rsMetaData.getColumnLabel( index ) : rsMetaData.getColumnName( index ); } catch ( Exception e ) { throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e ); } }
@Test public void testGetLegacyColumnNameDriverGreaterThanThreeFieldDB() throws Exception { DatabaseMetaData databaseMetaData = mock( DatabaseMetaData.class ); doReturn( 5 ).when( databaseMetaData ).getDriverMajorVersion(); assertEquals( "DB", new MySQLDatabaseMeta().getLegacyColumnName( databaseMetaData, getResultSetMetaData(), 5 ) ); }
@Override public T next() { lock.lock(); try { if (false == this.hasNext()) { throw new NoSuchElementException("Has no next range!"); } return nextUncheck(); } finally { lock.unlock(); } }
@Test public void rangeDayOfYearTest() { DateTime start = DateUtil.parse("2017-01-01"); DateTime end = DateUtil.parse("2017-01-05"); // 测试不包含开始结束时间的情况 DateRange range = new DateRange(start, end, DateField.DAY_OF_YEAR, 1, false, false); assertEquals(DateUtil.parse("2017-01-02"), range.next()); assertEquals(DateUtil.parse("2017-01-03"), range.next()); assertEquals(DateUtil.parse("2017-01-04"), range.next()); try { range.next(); fail("不包含结束时间情况下,下一个元素不应该存在!"); } catch (NoSuchElementException ignored) { } }
public String stringify(boolean value) { throw new UnsupportedOperationException( "stringify(boolean) was called on a non-boolean stringifier: " + toString()); }
@Test public void testIntervalStringifier() { PrimitiveStringifier stringifier = INTERVAL_STRINGIFIER; assertEquals("null", stringifier.stringify(null)); assertEquals("<INVALID>", stringifier.stringify(Binary.EMPTY)); assertEquals( "<INVALID>", stringifier.stringify(Binary.fromConstantByteArray(new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}))); assertEquals("<INVALID>", stringifier.stringify(Binary.fromReusedByteArray(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }))); ByteBuffer buffer = ByteBuffer.allocate(12); assertEquals( "interval(0 months, 0 days, 0 millis)", stringifier.stringify(Binary.fromConstantByteBuffer(buffer))); buffer.putInt(0x03000000); buffer.putInt(0x06000000); buffer.putInt(0x09000000); buffer.flip(); assertEquals( "interval(3 months, 6 days, 9 millis)", stringifier.stringify(Binary.fromConstantByteBuffer(buffer))); buffer.clear(); buffer.putInt(0xFFFFFFFF); buffer.putInt(0xFEFFFFFF); buffer.putInt(0xFDFFFFFF); buffer.flip(); assertEquals( "interval(4294967295 months, 4294967294 days, 4294967293 millis)", stringifier.stringify(Binary.fromReusedByteBuffer(buffer))); checkThrowingUnsupportedException(stringifier, Binary.class); }
public static <T> Collection<T> reject( Iterable<T> iterable, Predicate<? super T> predicate) { return FJIterate.reject(iterable, predicate, false); }
@Test public void reject() { this.iterables.each(this::basicReject); }
public Header offset(final int offset) { this.offset = offset; return this; }
@Test void offsetIsRelativeToTheBufferStart() { final Header header = new Header(42, 3, "xyz"); assertEquals(0, header.offset()); header.offset(142); assertEquals(142, header.offset()); }
@Bean public AuthorizationAttributeSourceAdvisor authorizationAttributeSourceAdvisor( @Qualifier("shiroSecurityManager") final DefaultWebSecurityManager securityManager) { AuthorizationAttributeSourceAdvisor authorizationAttributeSourceAdvisor = new AuthorizationAttributeSourceAdvisor(); authorizationAttributeSourceAdvisor.setSecurityManager(securityManager); return authorizationAttributeSourceAdvisor; }
@Test public void testAuthorizationAttributeSourceAdvisor() { AuthorizationAttributeSourceAdvisor advisor = shiroConfiguration.authorizationAttributeSourceAdvisor(securityManager); assertEquals(securityManager, advisor.getSecurityManager()); }
@Override public Ring<T> createRing(Map<T, Integer> pointsMap) { return _ringFactory.createRing(pointsMap); }
@Test(groups = { "small", "back-end" }) public void testFactoryWithDistributionBased() { RingFactory<String> factory = new DelegatingRingFactory<>(configBuilder("distributionBased", null)); Ring<String> ring = factory.createRing(buildPointsMap(10)); assertTrue(ring instanceof DistributionNonDiscreteRing); }
@Override public String getSchemaName(final int column) { Preconditions.checkArgument(1 == column); return ""; }
@Test void assertGetSchemaName() throws SQLException { assertThat(actualMetaData.getSchemaName(1), is("")); }
@SuppressWarnings( "unchecked" ) public JSONArray getDatabases() { JSONArray list = new JSONArray(); for ( int i = 0; i < repositoriesMeta.nrDatabases(); i++ ) { JSONObject databaseJSON = new JSONObject(); databaseJSON.put( "name", repositoriesMeta.getDatabase( i ).getName() ); list.add( databaseJSON ); } return list; }
@Test public void testGetDatabases() throws Exception { when( repositoriesMeta.nrDatabases() ).thenReturn( 1 ); when( repositoriesMeta.getDatabase( 0 ) ).thenReturn( databaseMeta ); when( databaseMeta.getName() ).thenReturn( DATABASE_NAME ); String databases = controller.getDatabases().toString(); assertEquals( "[{\"name\":\"DATABASE NAME\"}]", databases ); }
@VisibleForTesting protected void cancel() { closeSnapshotIO(); if (resourceCleanupOwnershipTaken.compareAndSet(false, true)) { cleanup(); } }
@Test void testCancelBeforeRun() throws Exception { task.cancel(true); Thread runner = startTask(task); assertThatThrownBy(task::get).isInstanceOf(CancellationException.class); runner.join(); assertThat(testAsyncSnapshotCallable.getInvocationOrder()) .containsExactly(METHOD_CANCEL, METHOD_CLEANUP); assertThat(testProvidedResource.isClosed()).isTrue(); }
static void createCompactedTopic(String topicName, short partitions, short replicationFactor, Admin admin) { NewTopic topicDescription = TopicAdmin.defineTopic(topicName). compacted(). partitions(partitions). replicationFactor(replicationFactor). build(); CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false); try { admin.createTopics(singleton(topicDescription), args).values().get(topicName).get(); log.info("Created topic '{}'", topicName); } catch (InterruptedException e) { Thread.interrupted(); throw new ConnectException("Interrupted while attempting to create/find topic '" + topicName + "'", e); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof TopicExistsException) { log.debug("Unable to create topic '{}' since it already exists.", topicName); return; } if (cause instanceof UnsupportedVersionException) { log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." + " Falling back to assume topic exists or will be auto-created by the broker.", topicName); return; } if (cause instanceof TopicAuthorizationException) { log.debug("Not authorized to create topic(s) '{}' upon the brokers." + " Falling back to assume topic(s) exist or will be auto-created by the broker.", topicName); return; } if (cause instanceof ClusterAuthorizationException) { log.debug("Not authorized to create topic '{}'." + " Falling back to assume topic exists or will be auto-created by the broker.", topicName); return; } if (cause instanceof InvalidConfigurationException) { throw new ConnectException("Unable to create topic '" + topicName + "': " + cause.getMessage(), cause); } if (cause instanceof TimeoutException) { // Timed out waiting for the operation to complete throw new ConnectException("Timed out while checking for or creating topic '" + topicName + "'." + " This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.", cause); } throw new ConnectException("Error while attempting to create/find topic '" + topicName + "'", e); } }
@Test public void testCreateCompactedTopicFailsWithTimeoutException() throws Exception { Map<String, KafkaFuture<Void>> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TimeoutException("Timeout"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); Throwable ce = assertThrows(ConnectException.class, () -> MirrorUtils.createCompactedTopic(TOPIC, (short) 1, (short) 1, admin), "Should have exception thrown"); assertInstanceOf(TimeoutException.class, ce.getCause()); verify(future).get(); verify(ctr).values(); verify(admin).createTopics(any(), any()); }
public Object resolve(final Expression expression) { return new Visitor().process(expression, null); }
@Test public void shouldResolveNullLiteral() { // Given: final SqlType type = SqlTypes.STRING; final Expression exp = new NullLiteral(); // When: final Object o = new GenericExpressionResolver(type, FIELD_NAME, registry, config, "insert value", false).resolve(exp); // Then: assertThat(o, Matchers.nullValue()); }
public static byte[] createSaltedPassword(byte[] salt, String password, int iters) throws SaslException { Mac mac = createSha1Hmac(password.getBytes(StandardCharsets.UTF_8)); mac.update(salt); mac.update(new byte[]{0, 0, 0, 1}); byte[] result = mac.doFinal(); byte[] previous = null; for (int i = 1; i < iters; i++) { mac.update(previous != null ? previous : result); previous = mac.doFinal(); for (int x = 0; x < result.length; x++) { result[x] ^= previous[x]; } } return result; }
@Test public void testCreateSaltedPassword() throws Exception { // Setup test fixture. final byte[] salt = StringUtils.decodeHex("4125c247e43ab1e93c6dff76"); final String password = "pencil"; final int iterations = 4096; // Execute system under test. final byte[] result = ScramUtils.createSaltedPassword(salt, password, iterations); // Verify results. assertArrayEquals(StringUtils.decodeHex("1d96ee3a529b5a5f9e47c01f229a2cb8a6e15f7d"), result); }
public static Validator validRegex() { return (name, val) -> { if (!(val instanceof List)) { throw new IllegalArgumentException("validator should only be used with " + "LIST of STRING defs"); } final StringBuilder regexBuilder = new StringBuilder(); for (Object item : (List)val) { if (!(item instanceof String)) { throw new IllegalArgumentException("validator should only be used with " + "LIST of STRING defs"); } if (regexBuilder.length() > 0) { regexBuilder.append("|"); } regexBuilder.append((String)item); } try { Pattern.compile(regexBuilder.toString()); } catch (final Exception e) { throw new ConfigException(name, val, "Not valid regular expression: " + e.getMessage()); } }; }
@Test public void shouldNotThrowOnValidRegex() { // Given final Validator validator = ConfigValidators.validRegex(); // When: validator.ensureValid("propName", Collections.singletonList("prefix_.*")); // Then: did not throw. }
@Override public boolean trySetCapacity(int capacity) { return get(trySetCapacityAsync(capacity)); }
@Test public void testAddRemoveFullQueueError() { RBoundedBlockingQueue<Integer> queue1 = redisson.getBoundedBlockingQueue("bounded-queue:testAddRemoveFullQueueError"); assertThat(queue1.trySetCapacity(1)).isTrue(); assertThat(queue1.add(12)).isTrue(); assertThat(queue1.remove()).isEqualTo(12); assertThat(queue1.add(1)).isTrue(); try { queue1.add(2); } catch (RedisException e) { assertThat(e.getCause()).isInstanceOf(IllegalStateException.class); } }
public ConnectionTimeout create(final PreferencesReader preferences) throws FactoryException { try { if(null == constructor) { constructor = ConstructorUtils.getMatchingAccessibleConstructor(clazz, PreferencesReader.class); } return constructor.newInstance(preferences); } catch(InstantiationException | InvocationTargetException | IllegalAccessException e) { throw new FactoryException(e.getMessage(), e); } }
@Test public void testCreate() { assertNotNull(ConnectionTimeoutFactory.get()); assertNotNull(ConnectionTimeoutFactory.get(new Host(new TestProtocol()))); }
public void begin(InterpretationContext ec, String localName, Attributes attributes) { if ("substitutionProperty".equals(localName)) { addWarn("[substitutionProperty] element has been deprecated. Please use the [property] element instead."); } String name = attributes.getValue(NAME_ATTRIBUTE); String value = attributes.getValue(VALUE_ATTRIBUTE); String scopeStr = attributes.getValue(SCOPE_ATTRIBUTE); Scope scope = ActionUtil.stringToScope(scopeStr); if (checkFileAttributeSanity(attributes)) { String file = attributes.getValue(FILE_ATTRIBUTE); file = ec.subst(file); try { FileInputStream istream = new FileInputStream(file); loadAndSetProperties(ec, istream, scope); } catch (FileNotFoundException e) { addError("Could not find properties file [" + file + "].", e); } catch (IOException e1) { addError("Could not read properties file [" + file + "].", e1); } } else if (checkResourceAttributeSanity(attributes)) { String resource = attributes.getValue(RESOURCE_ATTRIBUTE); resource = ec.subst(resource); URL resourceURL = Loader.getResourceBySelfClassLoader(resource); if (resourceURL == null) { addError("Could not find resource [" + resource + "]."); } else { try { InputStream istream = resourceURL.openStream(); loadAndSetProperties(ec, istream, scope); } catch (IOException e) { addError("Could not read resource file [" + resource + "].", e); } } } else if (checkValueNameAttributesSanity(attributes)) { value = RegularEscapeUtil.basicEscape(value); // now remove both leading and trailing spaces value = value.trim(); value = ec.subst(value); ActionUtil.setProperty(ec, name, value, scope); } else { addError(INVALID_ATTRIBUTES); } }
@Test public void nameValuePairWithPrerequisiteSubsitution() { context.putProperty("w", "wor"); atts.setValue("name", "v1"); atts.setValue("value", "${w}k"); propertyAction.begin(ec, null, atts); assertEquals("work", ec.getProperty("v1")); }
@Override public void executeUpdate(final UnregisterStorageUnitStatement sqlStatement, final ContextManager contextManager) { if (!sqlStatement.isIfExists()) { checkExisted(sqlStatement.getStorageUnitNames()); } checkInUsed(sqlStatement); try { contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().unregisterStorageUnits(database.getName(), sqlStatement.getStorageUnitNames()); } catch (final SQLException | ShardingSphereServerException ex) { throw new StorageUnitsOperateException("unregister", sqlStatement.getStorageUnitNames(), ex); } }
@Test void assertExecuteUpdateWithStorageUnitInUsed() { ShardingSphereRule rule = mock(ShardingSphereRule.class, RETURNS_DEEP_STUBS); DataSourceMapperRuleAttribute ruleAttribute = mock(DataSourceMapperRuleAttribute.class); when(ruleAttribute.getDataSourceMapper()).thenReturn(Collections.singletonMap("", Collections.singleton("foo_ds"))); when(rule.getAttributes()).thenReturn(new RuleAttributes(ruleAttribute)); when(database.getRuleMetaData()).thenReturn(new RuleMetaData(Collections.singleton(rule))); assertThrows(InUsedStorageUnitException.class, () -> executor.executeUpdate(new UnregisterStorageUnitStatement(Collections.singleton("foo_ds"), false, false), mock(ContextManager.class))); }
public static FilePredicate create(Collection<FilePredicate> predicates) { if (predicates.isEmpty()) { return TruePredicate.TRUE; } OrPredicate result = new OrPredicate(); for (FilePredicate filePredicate : predicates) { if (filePredicate == TruePredicate.TRUE) { return TruePredicate.TRUE; } else if (filePredicate == FalsePredicate.FALSE) { continue; } else if (filePredicate instanceof OrPredicate orPredicate) { result.predicates.addAll(orPredicate.predicates); } else { result.predicates.add(filePredicate); } } return result; }
@Test public void simplifyAndExpressionsWhenTrue() { PathPatternPredicate pathPatternPredicate1 = new PathPatternPredicate(PathPattern.create("foo1/**")); PathPatternPredicate pathPatternPredicate2 = new PathPatternPredicate(PathPattern.create("foo2/**")); FilePredicate andPredicate = OrPredicate.create(Arrays.asList(pathPatternPredicate1, TruePredicate.TRUE, pathPatternPredicate2)); assertThat(andPredicate).isEqualTo(TruePredicate.TRUE); }
@Override public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets( String groupId, Set<TopicPartition> partitions, DeleteConsumerGroupOffsetsOptions options) { SimpleAdminApiFuture<CoordinatorKey, Map<TopicPartition, Errors>> future = DeleteConsumerGroupOffsetsHandler.newFuture(groupId); DeleteConsumerGroupOffsetsHandler handler = new DeleteConsumerGroupOffsetsHandler(groupId, partitions, logContext); invokeDriver(handler, future, options.timeoutMs); return new DeleteConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), partitions); }
@Test public void testDeleteConsumerGroupOffsetsRetriableErrors() throws Exception { // Retriable errors should be retried final TopicPartition tp1 = new TopicPartition("foo", 0); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse( prepareOffsetDeleteResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); /* * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a * FindCoordinatorResponse. * * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response */ env.kafkaClient().prepareResponse( prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse( prepareOffsetDeleteResponse(Errors.COORDINATOR_NOT_AVAILABLE)); env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse( prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); final DeleteConsumerGroupOffsetsResult errorResult1 = env.adminClient() .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); assertNull(errorResult1.all().get()); assertNull(errorResult1.partitionResult(tp1).get()); } }
static String getAlgorithmForOid(String oid) throws NoSuchAlgorithmException { if ("1.2.840.10045.2.1".equals(oid)) { return "EC"; } else if ("1.2.840.113549.1.1.1".equals(oid)) { return "RSA"; } else if ("1.2.840.10040.4.1".equals(oid)) { return "DSA"; } else { throw new NoSuchAlgorithmException("Unknown algorithm OID " + oid); } }
@Test public void getAlgorithmForOid_UnknownOid_Failure() throws Exception { try { PubkeyUtils.getAlgorithmForOid("1.3.66666.2000.4000.1"); fail("Should throw NoSuchAlgorithmException"); } catch (NoSuchAlgorithmException expected) { } }
@Udf public String chr(@UdfParameter( description = "Decimal codepoint") final Integer decimalCode) { if (decimalCode == null) { return null; } if (!Character.isValidCodePoint(decimalCode)) { return null; } final char[] resultChars = Character.toChars(decimalCode); return String.valueOf(resultChars); }
@Test public void shouldReturnNullForTooShortUTF16String() { final String result = udf.chr("\\u065"); assertThat(result, is(nullValue())); }
public static String buildGlueExpression(Map<Column, Domain> partitionPredicates) { List<String> perColumnExpressions = new ArrayList<>(); int expressionLength = 0; for (Map.Entry<Column, Domain> partitionPredicate : partitionPredicates.entrySet()) { String columnName = partitionPredicate.getKey().getName(); if (JSQL_PARSER_RESERVED_KEYWORDS.contains(columnName.toUpperCase(ENGLISH))) { // The column name is a reserved keyword in the grammar of the SQL parser used internally by Glue API continue; } Domain domain = partitionPredicate.getValue(); if (domain != null && !domain.isAll()) { Optional<String> columnExpression = buildGlueExpressionForSingleDomain(columnName, domain); if (columnExpression.isPresent()) { int newExpressionLength = expressionLength + columnExpression.get().length(); if (expressionLength > 0) { newExpressionLength += CONJUNCT_SEPARATOR.length(); } if (newExpressionLength > GLUE_EXPRESSION_CHAR_LIMIT) { continue; } perColumnExpressions.add((columnExpression.get())); expressionLength = newExpressionLength; } } } return Joiner.on(CONJUNCT_SEPARATOR).join(perColumnExpressions); }
@Test public void testBigintConversion() { Map<Column, Domain> predicates = new PartitionFilterBuilder(HIVE_TYPE_TRANSLATOR) .addBigintValues("col1", Long.MAX_VALUE) .build(); String expression = buildGlueExpression(predicates); assertEquals(expression, format("((col1 = %d))", Long.MAX_VALUE)); }
static ProcessorSupplier readMapIndexSupplier(MapIndexScanMetadata indexScanMetadata) { return new MapIndexScanProcessorSupplier(indexScanMetadata); }
@Test public void test_whenFilterExistsWithoutSpecificProjection_sorted() { List<JetSqlRow> expected = new ArrayList<>(); for (int i = count; i > 0; i--) { map.put(i, new Person("value-" + i, i)); if (i > count / 2) { expected.add(jetRow((count - i + 1), "value-" + (count - i + 1), (count - i + 1))); } } IndexConfig indexConfig = new IndexConfig(IndexType.SORTED, "age").setName(randomName()); map.addIndex(indexConfig); IndexFilter filter = new IndexRangeFilter(intValue(0), true, intValue(count / 2), true); MapIndexScanMetadata metadata = metadata(indexConfig.getName(), filter, 2, false); TestSupport .verifyProcessor(adaptSupplier(MapIndexScanP.readMapIndexSupplier(metadata))) .hazelcastInstance(instance()) .jobConfig(new JobConfig().setArgument(SQL_ARGUMENTS_KEY_NAME, emptyList())) .outputChecker(LENIENT_SAME_ITEMS_IN_ORDER) .disableSnapshots() .disableProgressAssertion() .expectOutput(expected); }
public Map<String, String> connectorBaseConfig(SourceAndTarget sourceAndTarget, Class<?> connectorClass) { Map<String, String> props = new HashMap<>(); props.putAll(rawProperties); props.keySet().retainAll(allConfigNames()); props.putAll(stringsWithPrefix(CONFIG_PROVIDERS_CONFIG)); props.putAll(stringsWithPrefix("replication.policy")); Map<String, String> sourceClusterProps = clusterProps(sourceAndTarget.source()); // attrs non prefixed with producer|consumer|admin props.putAll(clusterConfigsWithPrefix(SOURCE_CLUSTER_PREFIX, sourceClusterProps)); // attrs prefixed with producer|consumer|admin props.putAll(clientConfigsWithPrefix(SOURCE_PREFIX, sourceClusterProps)); Map<String, String> targetClusterProps = clusterProps(sourceAndTarget.target()); props.putAll(clusterConfigsWithPrefix(TARGET_CLUSTER_PREFIX, targetClusterProps)); props.putAll(clientConfigsWithPrefix(TARGET_PREFIX, targetClusterProps)); props.putIfAbsent(NAME, connectorClass.getSimpleName()); props.putIfAbsent(CONNECTOR_CLASS, connectorClass.getName()); props.putIfAbsent(SOURCE_CLUSTER_ALIAS, sourceAndTarget.source()); props.putIfAbsent(TARGET_CLUSTER_ALIAS, sourceAndTarget.target()); // override with connector-level properties props.putAll(stringsWithPrefixStripped(sourceAndTarget.source() + "->" + sourceAndTarget.target() + ".")); // disabled by default props.putIfAbsent(MirrorConnectorConfig.ENABLED, "false"); // don't transform -- the worker will handle transformation of Connector and Task configs return props; }
@Test public void testIncludesTopicFilterProperties() { MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps( "clusters", "a, b", "source->target.topics", "topic1, topic2", "source->target.topics.exclude", "topic3")); SourceAndTarget sourceAndTarget = new SourceAndTarget("source", "target"); Map<String, String> connectorProps = mirrorConfig.connectorBaseConfig(sourceAndTarget, MirrorSourceConnector.class); DefaultTopicFilter.TopicFilterConfig filterConfig = new DefaultTopicFilter.TopicFilterConfig(connectorProps); assertEquals(Arrays.asList("topic1", "topic2"), filterConfig.getList("topics"), "source->target.topics should be passed through to TopicFilters."); assertEquals(Collections.singletonList("topic3"), filterConfig.getList("topics.exclude"), "source->target.topics.exclude should be passed through to TopicFilters."); }
public static <T, R> Function<T, R> toUncheckedFunction(FunctionRaisingIOE<T, R> fun) { return fun::unchecked; }
@Test public void testUncheckedFunction() throws Throwable { // java function which should raise a FileNotFoundException // wrapped into an unchecked exeption final Function<String, Object> fn = toUncheckedFunction((String a) -> { throw new FileNotFoundException(a); }); intercept(UncheckedIOException.class, "missing", () -> fn.apply("missing")); }
@Override public void stopTrackingAndReleaseClusterPartitions( Collection<IntermediateDataSetID> dataSetsToRelease) { for (IntermediateDataSetID dataSetID : dataSetsToRelease) { final DataSetEntry dataSetEntry = clusterPartitions.remove(dataSetID); final Set<ResultPartitionID> partitionIds = dataSetEntry.getPartitionIds(); shuffleEnvironment.releasePartitionsLocally(partitionIds); } }
@Test void stopTrackingAndReleaseClusterPartitions() throws Exception { final TestingShuffleEnvironment testingShuffleEnvironment = new TestingShuffleEnvironment(); final CompletableFuture<Collection<ResultPartitionID>> shuffleReleaseFuture = new CompletableFuture<>(); testingShuffleEnvironment.releasePartitionsLocallyFuture = shuffleReleaseFuture; final ResultPartitionID resultPartitionId1 = new ResultPartitionID(); final ResultPartitionID resultPartitionId2 = new ResultPartitionID(); final IntermediateDataSetID dataSetId1 = new IntermediateDataSetID(); final IntermediateDataSetID dataSetId2 = new IntermediateDataSetID(); final TaskExecutorPartitionTracker partitionTracker = new TaskExecutorPartitionTrackerImpl(testingShuffleEnvironment); partitionTracker.startTrackingPartition( new JobID(), new TaskExecutorPartitionInfo( new TestingShuffleDescriptor(resultPartitionId1), dataSetId1, 1)); partitionTracker.startTrackingPartition( new JobID(), new TaskExecutorPartitionInfo( new TestingShuffleDescriptor(resultPartitionId2), dataSetId2, 1)); partitionTracker.promoteJobPartitions(Collections.singleton(resultPartitionId1)); partitionTracker.stopTrackingAndReleaseClusterPartitions(Collections.singleton(dataSetId1)); assertThatFuture(shuffleReleaseFuture) .eventuallySucceeds() .satisfies(actual -> assertThat(actual).contains(resultPartitionId1)); }
public void walk(Consumer<Tree<T>> consumer) { consumer.accept(this); final List<Tree<T>> children = getChildren(); if (CollUtil.isNotEmpty(children)) { children.forEach((tree) -> tree.walk(consumer)); } }
@Test public void walkTest() { List<String> ids = new ArrayList<>(); final Tree<String> tree = TreeUtil.buildSingle(nodeList, "0"); tree.walk((tr) -> ids.add(tr.getId())); assertEquals(7, ids.size()); }
public static Number getNumber(Object o, Number defaultValue) { if (o instanceof Number) { return (Number) o; } try { return Double.valueOf(String.valueOf(o)); } catch (NumberFormatException e) { return defaultValue; } }
@Test public void testGetNumberForDifferentFormats() { assertEquals(1, Tools.getNumber(1, null).intValue(), 1); assertEquals(1.0, Tools.getNumber(1, null).doubleValue(), 0.0); assertEquals(42, Tools.getNumber(42.23, null).intValue()); assertEquals(42.23, Tools.getNumber(42.23, null).doubleValue(), 0.0); assertEquals(17, Tools.getNumber("17", null).intValue()); assertEquals(17.0, Tools.getNumber("17", null).doubleValue(), 0.0); assertEquals(23, Tools.getNumber("23.42", null).intValue()); assertEquals(23.42, Tools.getNumber("23.42", null).doubleValue(), 0.0); assertNull(Tools.getNumber(null, null)); assertNull(Tools.getNumber(null, null)); assertEquals(1, Tools.getNumber(null, 1).intValue()); assertEquals(1.0, Tools.getNumber(null, 1).doubleValue(), 0.0); }
public HadoopCatalog() {}
@Test public void testNamespaceExists() throws IOException { HadoopCatalog catalog = hadoopCatalog(); TableIdentifier tbl1 = TableIdentifier.of("db", "ns1", "ns2", "metadata"); TableIdentifier tbl2 = TableIdentifier.of("db", "ns2", "ns3", "tbl2"); TableIdentifier tbl3 = TableIdentifier.of("db", "ns3", "tbl4"); TableIdentifier tbl4 = TableIdentifier.of("db", "metadata"); Lists.newArrayList(tbl1, tbl2, tbl3, tbl4) .forEach(t -> catalog.createTable(t, SCHEMA, PartitionSpec.unpartitioned())); assertThat(catalog.namespaceExists(Namespace.of("db", "ns1", "ns2"))) .as("Should be true as namespace exists") .isTrue(); assertThat(catalog.namespaceExists(Namespace.of("db", "db2", "ns2"))) .as("Should be false as namespace doesn't exist") .isFalse(); }
public PDField getField(String fullyQualifiedName) { // get the field from the cache if there is one. if (fieldCache != null) { return fieldCache.get(fullyQualifiedName); } // get the field from the field tree for (PDField field : getFieldTree()) { if (field.getFullyQualifiedName().equals(fullyQualifiedName)) { return field; } } return null; }
@Test void testPDFBox3347() throws IOException, URISyntaxException { String sourceUrl = "https://issues.apache.org/jira/secure/attachment/12968302/KYF%20211%20Best%C3%A4llning%202014.pdf"; try (PDDocument doc = Loader.loadPDF( RandomAccessReadBuffer.createBufferFromStream(new URI(sourceUrl).toURL().openStream()))) { PDField field = doc.getDocumentCatalog().getAcroForm().getField("Krematorier"); List<PDAnnotationWidget> widgets = field.getWidgets(); Set<String> set = new TreeSet<>(); for (PDAnnotationWidget annot : widgets) { PDAppearanceDictionary ap = annot.getAppearance(); PDAppearanceEntry normalAppearance = ap.getNormalAppearance(); Set<COSName> nameSet = normalAppearance.getSubDictionary().keySet(); assertTrue(nameSet.contains(COSName.Off)); for (COSName name : nameSet) { if (!name.equals(COSName.Off)) { set.add(name.getName()); } } } assertEquals("[Nynäshamn, Råcksta, Silverdal, Skogskrem, St Botvid, Storkällan]", set.toString()); } }
@Override public boolean shouldWait() { RingbufferContainer ringbuffer = getRingBufferContainerOrNull(); if (resultSet == null) { resultSet = new ReadResultSetImpl<>(minSize, maxSize, getNodeEngine().getSerializationService(), filter); sequence = startSequence; } if (ringbuffer == null) { return minSize > 0; } sequence = ringbuffer.clampReadSequenceToBounds(sequence); if (minSize == 0) { if (sequence < ringbuffer.tailSequence() + 1) { readMany(ringbuffer); } return false; } if (resultSet.isMinSizeReached()) { // enough items have been read, we are done. return false; } if (sequence == ringbuffer.tailSequence() + 1) { // the sequence is not readable return true; } readMany(ringbuffer); return !resultSet.isMinSizeReached(); }
@Test public void whenTooFarAfterTail() { ringbuffer.add("tail"); ReadManyOperation op = getReadManyOperation(ringbuffer.tailSequence() + 2, 1, 1, null); // since there is an item, we don't need to wait boolean shouldWait = op.shouldWait(); assertTrue(shouldWait); ReadResultSetImpl response = getReadResultSet(op); assertEquals(0, response.readCount()); assertEquals(0, response.getNextSequenceToReadFrom()); }
@Override protected void validateDataImpl(TenantId tenantId, AdminSettings adminSettings) { validateString("Key", adminSettings.getKey()); if (adminSettings.getJsonValue() == null) { throw new DataValidationException("Json value should be specified!"); } }
@Test void testValidateNameInvocation() { AdminSettings adminSettings = new AdminSettings(); adminSettings.setKey("jwt"); adminSettings.setJsonValue(JacksonUtil.toJsonNode("{}")); validator.validateDataImpl(tenantId, adminSettings); verify(validator).validateString("Key", adminSettings.getKey()); }
@Override public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime) { final KeyValueSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime); cleanupExpiredSegments(streamTime); return segment; }
@Test public void shouldNotCreateSegmentThatIsAlreadyExpired() { final long streamTime = updateStreamTimeAndCreateSegment(7); assertNull(segments.getOrCreateSegmentIfLive(0, context, streamTime)); assertFalse(new File(context.stateDir(), "test/test.0").exists()); }
public abstract void verify(String value);
@Test(expected = UnsupportedOperationException.class) public void testVerify_ShouldThrowUnsupportedOperationException() { attribute.verify("testValue"); }
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) { SinkConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Sink Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName() .equals(existingConfig.getSourceSubscriptionName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().putIfAbsent(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getTopicToSerdeClassName() != null) { newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getTopicToSchemaType() != null) { newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { SinkConfig finalMergedConfig = mergedConfig; newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } finalMergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getTransformFunction() != null) { mergedConfig.setTransformFunction(newConfig.getTransformFunction()); } if (newConfig.getTransformFunctionClassName() != null) { mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName()); } if (newConfig.getTransformFunctionConfig() != null) { mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig()); } return mergedConfig; }
@Test public void testMergeDifferentSecrets() { SinkConfig sinkConfig = createSinkConfig(); Map<String, String> mySecrets = new HashMap<>(); mySecrets.put("MyKey", "MyValue"); SinkConfig newSinkConfig = createUpdatedSinkConfig("secrets", mySecrets); SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newSinkConfig); assertEquals( mergedConfig.getSecrets(), mySecrets ); mergedConfig.setSecrets(sinkConfig.getSecrets()); assertEquals( new Gson().toJson(sinkConfig), new Gson().toJson(mergedConfig) ); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyRejectsNull() { ImmutableMultimap<Integer, String> multimap = ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); try { assertThat(multimap).containsExactlyEntriesIn(null); fail("Should have thrown."); } catch (NullPointerException expected) { } }
public static String squeezeStatement(String sql) { TokenSource tokens = getLexer(sql, ImmutableSet.of()); StringBuilder sb = new StringBuilder(); while (true) { Token token = tokens.nextToken(); if (token.getType() == Token.EOF) { break; } if (token.getType() == SqlBaseLexer.WS) { sb.append(' '); } else { sb.append(token.getText()); } } return sb.toString().trim(); }
@Test public void testSqueezeStatementAlternateDelimiter() { String sql = "select * from\n foo\n order by x // "; assertEquals(squeezeStatement(sql), "select * from foo order by x //"); }
@CheckForNull @Override public Map<Path, Set<Integer>> branchChangedLines(String targetBranchName, Path projectBaseDir, Set<Path> changedFiles) { return branchChangedLinesWithFileMovementDetection(targetBranchName, projectBaseDir, toChangedFileByPathsMap(changedFiles)); }
@Test public void branchChangedLines_given2NestedSubmodulesWithChangesInTheBottomSubmodule_detectChanges() throws IOException, GitAPIException { Git gitForRepo2, gitForRepo3; Path worktreeForRepo2, worktreeForRepo3; worktreeForRepo2 = temp.newFolder().toPath(); gitForRepo2 = createRepository(worktreeForRepo2); worktreeForRepo3 = temp.newFolder().toPath(); gitForRepo3 = createRepository(worktreeForRepo3); createAndCommitFile("sub2.js", gitForRepo3, worktreeForRepo3); addSubmodule(gitForRepo2, "sub2", worktreeForRepo3.toUri().toString()); addSubmodule(git, "sub1", worktreeForRepo2.toUri().toString()); File mainFolderWithAllSubmodules = temp.newFolder().toPath().toRealPath(LinkOption.NOFOLLOW_LINKS).toFile(); Git.cloneRepository() .setURI(worktree.toUri().toString()) .setDirectory(mainFolderWithAllSubmodules) .setCloneSubmodules(true) .call(); Path submodule2Path = mainFolderWithAllSubmodules.toPath().resolve("sub1/sub2"); Repository submodule2 = new RepositoryBuilder().findGitDir(submodule2Path.toFile()).build(); Git gitForSubmodule2 = new Git(submodule2); gitForSubmodule2.branchCreate().setName("develop").call(); gitForSubmodule2.checkout().setName("develop").call(); Path submodule2File = mainFolderWithAllSubmodules.toPath().resolve("sub1/sub2/sub2.js"); Files.write(submodule2File, randomizedContent("sub2.js", 3).getBytes(), StandardOpenOption.APPEND); gitForSubmodule2.add().addFilepattern("sub2.js").call(); gitForSubmodule2.commit().setAuthor("joe", "joe@example.com").setMessage("important change").call(); Map<Path, Set<Integer>> changedLines = newScmProvider().branchChangedLines("master", submodule2Path, Set.of(submodule2File)); assertThat(changedLines).hasSize(1); assertThat(changedLines.entrySet().iterator().next().getValue()).containsOnly(4, 5, 6); }
public JobExecutionRecord getJobExecutionRecord(long jobId) { return jobExecutionRecords.get().get(jobId); }
@Test public void test_getJobExecutionRecordFromClient() { HazelcastInstance client = createHazelcastClient(); Pipeline p = Pipeline.create(); p.readFrom(Sources.streamFromProcessor("source", ProcessorMetaSupplier.of(() -> new NoOutputSourceP()))) .withoutTimestamps() .writeTo(Sinks.logger()); Job job = instance.getJet().newJob(p, new JobConfig() .setProcessingGuarantee(ProcessingGuarantee.EXACTLY_ONCE) .setSnapshotIntervalMillis(100)); JobRepository jobRepository = new JobRepository(client); // This simulates operation executed by Management Center (see JobManager.getSnapshotDetails) assertTrueEventually(() -> assertNotNull(jobRepository.getJobExecutionRecord(job.getId()))); client.shutdown(); }
@Override public void accept(ServerWebExchange exchange, CachedResponse cachedResponse) { ServerHttpResponse response = exchange.getResponse(); long calculatedMaxAgeInSeconds = calculateMaxAgeInSeconds(exchange.getRequest(), cachedResponse, configuredTimeToLive); rewriteCacheControlMaxAge(response.getHeaders(), calculatedMaxAgeInSeconds); }
@Test void otherCacheControlValuesAreNotRemoved_whenMaxAgeIsModified() { inputExchange.getResponse().getHeaders().setCacheControl("max-stale=12, min-stale=1, max-age=1234"); Duration timeToLive = Duration.ofSeconds(30); CachedResponse inputCachedResponse = CachedResponse.create(HttpStatus.OK).timestamp(clock.instant()).build(); SetMaxAgeHeaderAfterCacheExchangeMutator toTest = new SetMaxAgeHeaderAfterCacheExchangeMutator(timeToLive, clock, false); toTest.accept(inputExchange, inputCachedResponse); String[] cacheControlValues = StringUtils .tokenizeToStringArray(inputExchange.getResponse().getHeaders().getCacheControl(), ","); assertThat(cacheControlValues).contains("max-stale=12", "min-stale=1"); }
@Operation(summary = "list", description = "List host-components") @GetMapping("/services/{serviceId}") public ResponseEntity<List<HostComponentVO>> listByService( @PathVariable Long clusterId, @PathVariable Long serviceId) { return ResponseEntity.success(hostComponentService.listByService(clusterId, serviceId)); }
@Test void listByServiceReturnsEmptyForInvalidServiceId() { Long clusterId = 1L; Long serviceId = 999L; when(hostComponentService.listByService(clusterId, serviceId)).thenReturn(List.of()); ResponseEntity<List<HostComponentVO>> response = hostComponentController.listByService(clusterId, serviceId); assertTrue(response.isSuccess()); assertTrue(response.getData().isEmpty()); }