focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void initializeIfNeeded() { if (state() == State.CREATED) { StateManagerUtil.registerStateStores(log, logPrefix, topology, stateMgr, stateDirectory, processorContext); // with and without EOS we would check for checkpointing at each commit during running, // and the file may be deleted in which case we should checkpoint immediately, // therefore we initialize the snapshot as empty offsetSnapshotSinceLastFlush = Collections.emptyMap(); // no topology needs initialized, we can transit to RUNNING // right after registered the stores transitionTo(State.RESTORING); transitionTo(State.RUNNING); processorContext.initialize(); log.info("Initialized"); } else if (state() == State.RESTORING) { throw new IllegalStateException("Illegal state " + state() + " while initializing standby task " + id); } }
@Test public void shouldTransitToRunningAfterInitialization() { doNothing().when(stateManager).registerStateStores(any(), any()); task = createStandbyTask(); assertEquals(CREATED, task.state()); task.initializeIfNeeded(); assertEquals(RUNNING, task.state()); // initialize should be idempotent task.initializeIfNeeded(); assertEquals(RUNNING, task.state()); }
public MapConfig setTimeToLiveSeconds(int timeToLiveSeconds) { this.timeToLiveSeconds = timeToLiveSeconds; return this; }
@Test public void testSetTimeToLiveSeconds() { assertEquals(1234, new MapConfig().setTimeToLiveSeconds(1234).getTimeToLiveSeconds()); }
@Override public ObjectNode encode(Intent intent, CodecContext context) { checkNotNull(intent, "Intent cannot be null"); final ObjectNode result = context.mapper().createObjectNode() .put(TYPE, intent.getClass().getSimpleName()) .put(ID, intent.id().toString()) .put(KEY, intent.key().toString()) .put(APP_ID, UrlEscapers.urlPathSegmentEscaper() .escape(intent.appId().name())); if (intent.resourceGroup() != null) { result.put(RESOURCE_GROUP, intent.resourceGroup().toString()); } final ArrayNode jsonResources = result.putArray(RESOURCES); intent.resources() .forEach(resource -> { if (resource instanceof Link) { jsonResources.add(context.codec(Link.class).encode((Link) resource, context)); } else { jsonResources.add(resource.toString()); } }); IntentService service = context.getService(IntentService.class); IntentState state = service.getIntentState(intent.key()); if (state != null) { result.put(STATE, state.toString()); } return result; }
@Test public void hostToHostIntent() { final HostToHostIntent intent = HostToHostIntent.builder() .appId(appId) .one(id1) .two(id2) .build(); final JsonCodec<HostToHostIntent> intentCodec = context.codec(HostToHostIntent.class); assertThat(intentCodec, notNullValue()); final ObjectNode intentJson = intentCodec.encode(intent, context); assertThat(intentJson, matchesIntent(intent)); }
static Map<String, SerializableFunction<String, Double>> getCategoricalPredictorsMap(final List<CategoricalPredictor> categoricalPredictors) { final Map<String, List<CategoricalPredictor>> groupedCollectors = categoricalPredictors.stream() .collect(groupingBy(categoricalPredictor ->categoricalPredictor.getField())); return groupedCollectors.entrySet().stream() .map(entry -> { Map<String, Double> groupedCategoricalPredictorMap = getGroupedCategoricalPredictorMap(entry.getValue()); SerializableFunction<String, Double> function = input -> KiePMMLRegressionTable.evaluateCategoricalPredictor(input, groupedCategoricalPredictorMap); return new AbstractMap.SimpleEntry<>(entry.getKey(), function); }) .collect(Collectors.toMap(AbstractMap.SimpleEntry::getKey, AbstractMap.SimpleEntry::getValue)); }
@Test void getCategoricalPredictorsMap() { final List<CategoricalPredictor> categoricalPredictors = IntStream.range(0, 3).mapToObj(index -> IntStream.range(0, 3).mapToObj(i -> { String predictorName = "predictorName-" + index; double coefficient = 1.23 * i; return PMMLModelTestUtils.getCategoricalPredictor(predictorName, i, coefficient); }) .collect(Collectors.toList())).reduce((categoricalPredictors1, categoricalPredictors2) -> { List<CategoricalPredictor> toReturn = new ArrayList<>(); toReturn.addAll(categoricalPredictors1); toReturn.addAll(categoricalPredictors2); return toReturn; }).get(); Map<String, SerializableFunction<String, Double>> retrieved = KiePMMLRegressionTableFactory.getCategoricalPredictorsMap(categoricalPredictors); final Map<String, List<CategoricalPredictor>> groupedCollectors = categoricalPredictors.stream() .collect(groupingBy(categoricalPredictor ->categoricalPredictor.getField())); assertThat(retrieved).hasSameSizeAs(groupedCollectors); groupedCollectors.keySet().forEach(predictName -> assertThat(retrieved).containsKey(predictName)); }
public static boolean canDrop( FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) { Objects.requireNonNull(pred, "pred cannnot be null"); Objects.requireNonNull(columns, "columns cannnot be null"); return pred.accept(new DictionaryFilter(columns, dictionaries)); }
@Test public void testGtEqDouble() throws Exception { DoubleColumn d = doubleColumn("double_field"); double highest = Double.MIN_VALUE; for (int value : intValues) { highest = Math.max(highest, toDouble(value)); } assertTrue("Should drop: >= highest + 0.00000001", canDrop(gtEq(d, highest + 0.00000001), ccmd, dictionaries)); assertFalse("Should not drop: >= highest", canDrop(gtEq(d, highest), ccmd, dictionaries)); assertFalse( "Should not drop: contains matching values", canDrop(gtEq(d, Double.MIN_VALUE), ccmd, dictionaries)); }
@Override public boolean init(final SegmentFileOptions opts) { if (opts.isNewFile) { return loadNewFile(opts); } else { return loadExistsFile(opts); } }
@Test public void testInitAndLoad() { assertTrue(init()); }
public static Criterion matchIPv6NDSourceLinkLayerAddress(MacAddress mac) { return new IPv6NDLinkLayerAddressCriterion(mac, Type.IPV6_ND_SLL); }
@Test public void testMatchIPv6NDSourceLinkLayerAddressMethod() { Criterion matchSrcLlAddr = Criteria.matchIPv6NDSourceLinkLayerAddress(llMac1); IPv6NDLinkLayerAddressCriterion srcLlCriterion = checkAndConvert(matchSrcLlAddr, Criterion.Type.IPV6_ND_SLL, IPv6NDLinkLayerAddressCriterion.class); assertThat(srcLlCriterion.mac(), is(equalTo(llMac1))); }
public static void tryEnrichOutOfMemoryError( @Nullable Throwable root, @Nullable String jvmMetaspaceOomNewErrorMessage, @Nullable String jvmDirectOomNewErrorMessage, @Nullable String jvmHeapSpaceOomNewErrorMessage) { updateDetailMessage( root, t -> { if (isMetaspaceOutOfMemoryError(t)) { return jvmMetaspaceOomNewErrorMessage; } else if (isDirectOutOfMemoryError(t)) { return jvmDirectOomNewErrorMessage; } else if (isHeapSpaceOutOfMemoryError(t)) { return jvmHeapSpaceOomNewErrorMessage; } return null; }); }
@Test void testTryEnrichTaskExecutorErrorCanHandleNullValueWithoutCausingException() { ExceptionUtils.tryEnrichOutOfMemoryError(null, "", "", ""); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Header with Ref") public void testHeaderWithRef() { Components components = new Components(); components.addHeaders("Header", new Header().description("Header Description")); OpenAPI oas = new OpenAPI() .info(new Info().description("info")) .components(components); Reader reader = new Reader(oas); OpenAPI openAPI = reader.read(RefHeaderResource.class); String yaml = "openapi: 3.0.1\n" + "info:\n" + " description: info\n" + "paths:\n" + " /path:\n" + " get:\n" + " summary: Simple get operation\n" + " description: Defines a simple get operation with no inputs and a complex output\n" + " operationId: getWithPayloadResponse\n" + " responses:\n" + " \"200\":\n" + " description: voila!\n" + " headers:\n" + " Rate-Limit-Limit:\n" + " description: The number of allowed requests in the current period\n" + " $ref: '#/components/headers/Header'\n" + " style: simple\n" + " schema:\n" + " type: integer\n" + " deprecated: true\n" + "components:\n" + " headers:\n" + " Header:\n" + " description: Header Description\n"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
@VisibleForTesting boolean find(String searchField) { Pattern p = Pattern.compile(searchField); for (PartitionFieldNode node : roots) { if (find(node, p)) { return true; } } return false; }
@Test public void testFind() throws MetaException { PersistenceManager mockPm = Mockito.mock(PersistenceManager.class); List<String> projectionFields = Arrays.asList("sd", "createTime", "sd.location", "parameters"); PartitionProjectionEvaluator projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); projectionFields = Arrays.asList("sd", "createTime", "parameters"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd.serdeInfo.serializationLib"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd.location"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(SD_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd.location"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertFalse(projectionEvaluator.find(SERDE_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd.serdeInfo.serializationLib"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(SERDE_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd.serdeInfo"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(SERDE_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertFalse(projectionEvaluator.find(SD_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd.cols"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(CD_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd.cols.name"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); Assert.assertTrue(projectionEvaluator.find(CD_PATTERN)); projectionFields = Arrays.asList("createTime", "parameters", "sd", "sd.location"); projectionEvaluator = new PartitionProjectionEvaluator(mockPm, fieldNameToColumnName, projectionFields, false, false, null, null); // CD_PATTERN should exist since sd gets expanded to all the child nodes Assert.assertTrue(projectionEvaluator.find(CD_PATTERN)); }
public static String checkRequiredProperty(Properties properties, String key) { if (properties == null) { throw new IllegalArgumentException("Properties are required"); } String value = properties.getProperty(key); return checkHasText(value, "Property '" + key + "' is required"); }
@Test public void test_checkRequiredProperty_whenNoProperty() { Properties properties = new Properties(); properties.setProperty("other-key", "other-value"); Assertions.assertThatThrownBy(() -> checkRequiredProperty(properties, "some-key")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Property 'some-key' is required"); }
public ReferenceBuilder<T> url(String url) { this.url = url; return getThis(); }
@Test void url() { ReferenceBuilder builder = new ReferenceBuilder(); builder.url("url"); Assertions.assertEquals("url", builder.build().getUrl()); }
@Udf public <T> List<T> concat( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null && right == null) { return null; } final int leftSize = left != null ? left.size() : 0; final int rightSize = right != null ? right.size() : 0; final List<T> result = new ArrayList<>(leftSize + rightSize); if (left != null) { result.addAll(left); } if (right != null) { result.addAll(right); } return result; }
@Test public void shouldConcatArraysContainingNulls() { final List<String> input1 = Arrays.asList(null, "bar"); final List<String> input2 = Arrays.asList("foo"); final List<String> result = udf.concat(input1, input2); assertThat(result, is(Arrays.asList(null, "bar", "foo"))); }
@Override public Integer rank(V o) { return get(rankAsync(o)); }
@Test public void testRank() { RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple"); set.add(0.1, "a"); set.add(0.2, "b"); set.add(0.3, "c"); set.add(0.4, "d"); set.add(0.5, "e"); set.add(0.6, "f"); set.add(0.7, "g"); assertThat(set.revRank("d")).isEqualTo(3); assertThat(set.revRank(Arrays.asList("d", "a", "g", "abc", "f"))).isEqualTo(Arrays.asList(3, 6, 0, null, 1)); assertThat(set.rank("abc")).isNull(); }
public T cloneData(int sourceIndex, int targetIndex) { if (sourceIndex < 0 || sourceIndex >= scesimData.size()) { throw new IndexOutOfBoundsException(new StringBuilder().append("SourceIndex out of range ").append(sourceIndex).toString()); } if (targetIndex < 0 || targetIndex > scesimData.size()) { throw new IndexOutOfBoundsException(new StringBuilder().append("TargetIndex out of range ").append(targetIndex).toString()); } T scesimDataByIndex = getDataByIndex(sourceIndex); T clonedScesimData = (T) scesimDataByIndex.cloneInstance(); scesimData.add(targetIndex, clonedScesimData); return clonedScesimData; }
@Test public void cloneData() { final Scenario cloned = model.getDataByIndex(3); final Scenario clone = model.cloneData(3, 4); assertThat(clone).isNotNull(); assertThat(model.scesimData.get(4)).isEqualTo(clone); assertThat(clone.getDescription()).isEqualTo(cloned.getDescription()); }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof DefaultTrafficSelector) { DefaultTrafficSelector that = (DefaultTrafficSelector) obj; return Objects.equals(criteria, that.criteria); } return false; }
@Test public void testEquals() { final TrafficSelector selector1 = DefaultTrafficSelector.builder() .add(Criteria.matchLambda(new OchSignal(GridType.FLEX, ChannelSpacing.CHL_100GHZ, 1, 1))) .build(); final TrafficSelector sameAsSelector1 = DefaultTrafficSelector.builder() .add(Criteria.matchLambda(new OchSignal(GridType.FLEX, ChannelSpacing.CHL_100GHZ, 1, 1))) .build(); final TrafficSelector selector2 = DefaultTrafficSelector.builder() .add(Criteria.matchLambda(new OchSignal(GridType.FLEX, ChannelSpacing.CHL_50GHZ, 1, 1))) .build(); new EqualsTester() .addEqualityGroup(selector1, sameAsSelector1) .addEqualityGroup(selector2) .testEquals(); }
@Override public RMContainer getRMContainer(ContainerId containerId) { SchedulerApplicationAttempt attempt = getCurrentAttemptForContainer(containerId); return (attempt == null) ? null : attempt.getRMContainer(containerId); }
@Test(timeout=60000) public void testContainerRecoveredByNode() throws Exception { System.out.println("Starting testContainerRecoveredByNode"); final int maxMemory = 10 * 1024; YarnConfiguration conf = getConf(); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); conf.setBoolean( YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true); conf.set( YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); MockRM rm1 = new MockRM(conf); try { rm1.start(); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(200, rm1) .withAppName("name") .withUser("user") .withAcls(new HashMap<ApplicationAccessType, String>()) .withUnmanagedAM(false) .withQueue("default") .withMaxAppAttempts(-1) .withCredentials(null) .withAppType("Test") .withWaitForAppAcceptedState(false) .withKeepContainers(true) .build(); RMApp app1 = MockRMAppSubmitter.submit(rm1, data); MockNM nm1 = new MockNM("127.0.0.1:1234", 10240, rm1.getResourceTrackerService()); nm1.registerNode(); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1); am1.allocate("127.0.0.1", 8192, 1, new ArrayList<ContainerId>()); YarnScheduler scheduler = rm1.getResourceScheduler(); RMNode node1 = MockNodes.newNodeInfo( 0, Resources.createResource(maxMemory), 1, "127.0.0.2"); ContainerId containerId = ContainerId.newContainerId( app1.getCurrentAppAttempt().getAppAttemptId(), 2); NMContainerStatus containerReport = NMContainerStatus.newInstance(containerId, 0, ContainerState.RUNNING, Resource.newInstance(1024, 1), "recover container", 0, Priority.newInstance(0), 0); List<NMContainerStatus> containerReports = new ArrayList<>(); containerReports.add(containerReport); scheduler.handle(new NodeAddedSchedulerEvent(node1, containerReports)); RMContainer rmContainer = scheduler.getRMContainer(containerId); //verify queue name when rmContainer is recovered if (scheduler instanceof CapacityScheduler) { Assert.assertEquals( app1.getQueue(), rmContainer.getQueueName()); } else { Assert.assertEquals(app1.getQueue(), rmContainer.getQueueName()); } } finally { rm1.stop(); System.out.println("Stopping testContainerRecoveredByNode"); } }
protected void unregisterAllTimeouts() { for (Timeout<K> timeout : timeouts.values()) { timeout.cancel(); } timeouts.clear(); }
@Test void testUnregisterAllTimeouts() { final ManuallyTriggeredScheduledExecutorService scheduledExecutorService = new ManuallyTriggeredScheduledExecutorService(); DefaultTimerService<AllocationID> timerService = new DefaultTimerService<>(scheduledExecutorService, 100L); timerService.start((ignoredA, ignoredB) -> {}); timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.registerTimeout(new AllocationID(), 10, TimeUnit.SECONDS); timerService.unregisterAllTimeouts(); Map<?, ?> timeouts = timerService.getTimeouts(); assertThat(timeouts).isEmpty(); for (ScheduledFuture<?> scheduledTask : scheduledExecutorService.getAllScheduledTasks()) { assertThat(scheduledTask.isCancelled()).isTrue(); } }
@Override public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle) { return ((JdbcColumnHandle) columnHandle).getColumnMetadata(); }
@Test public void getColumnMetadata() { assertEquals( metadata.getColumnMetadata(SESSION, tableHandle, new JdbcColumnHandle(CONNECTOR_ID, "text", JDBC_VARCHAR, VARCHAR, true, Optional.empty())), new ColumnMetadata("text", VARCHAR)); }
@Override public DirectoryTimestamp getDirectoryTimestamp() { return DirectoryTimestamp.implicit; }
@Test public void testFeatures() { assertEquals(Protocol.DirectoryTimestamp.implicit, new LocalProtocol().getDirectoryTimestamp()); }
public static List<ArtifactInformation> getArtifacts(List<String> stagingFiles) { ImmutableList.Builder<ArtifactInformation> artifactsBuilder = ImmutableList.builder(); Set<String> deduplicatedStagingFiles = new LinkedHashSet<>(stagingFiles); for (String path : deduplicatedStagingFiles) { File file; String stagedName = null; if (path.contains("=")) { String[] components = path.split("=", 2); file = new File(components[1]); stagedName = components[0]; } else { file = new File(path); } // Spurious items get added to the classpath, but ignoring silently can cause confusion. // Therefore, issue logs if a file does not exist before ignoring. The level will be warning // if they have a staged name, as those are likely to cause problems or unintended behavior // (e.g., dataflow-worker.jar, windmill_main). if (!file.exists()) { if (stagedName != null) { LOG.warn( "Stage Artifact '{}' with the name '{}' was not found, staging will be ignored.", file, stagedName); } else { LOG.info("Stage Artifact '{}' was not found, staging will be ignored.", file); } continue; } ArtifactInformation.Builder artifactBuilder = ArtifactInformation.newBuilder(); artifactBuilder.setTypeUrn(BeamUrns.getUrn(StandardArtifacts.Types.FILE)); artifactBuilder.setRoleUrn(BeamUrns.getUrn(StandardArtifacts.Roles.STAGING_TO)); HashCode hashCode; if (file.isDirectory()) { File zippedFile; try { zippedFile = zipDirectory(file); hashCode = Files.asByteSource(zippedFile).hash(Hashing.sha256()); } catch (IOException e) { throw new RuntimeException(e); } artifactBuilder.setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(zippedFile.getPath()) .setSha256(hashCode.toString()) .build() .toByteString()); } else { try { hashCode = Files.asByteSource(file).hash(Hashing.sha256()); } catch (IOException e) { throw new RuntimeException(e); } artifactBuilder.setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(file.getPath()) .setSha256(hashCode.toString()) .build() .toByteString()); } if (stagedName == null) { stagedName = createStagingFileName(file, hashCode); } artifactBuilder.setRolePayload( RunnerApi.ArtifactStagingToRolePayload.newBuilder() .setStagedName(stagedName) .build() .toByteString()); artifactsBuilder.add(artifactBuilder.build()); } return artifactsBuilder.build(); }
@Test public void testGetArtifactsExistingNoLogs() throws Exception { File file1 = File.createTempFile("file1-", ".txt"); file1.deleteOnExit(); File file2 = File.createTempFile("file2-", ".txt"); file2.deleteOnExit(); List<ArtifactInformation> artifacts = Environments.getArtifacts(ImmutableList.of(file1.getAbsolutePath(), "file2=" + file2)); assertThat(artifacts, hasSize(2)); expectedLogs.verifyNotLogged("was not found"); }
public static <T extends Comparable<? super T>> T max(Collection<T> coll) { return isEmpty(coll) ? null : Collections.max(coll); }
@Test public void maxTest() { List<Integer> list = Arrays.asList(1, 2, 3, 4, 5, 6); assertEquals((Integer) 6, CollUtil.max(list)); }
public static Criterion matchSctpSrc(TpPort sctpPort) { return new SctpPortCriterion(sctpPort, Type.SCTP_SRC); }
@Test public void testMatchSctpSrcMethod() { Criterion matchSctpSrc = Criteria.matchSctpSrc(tpPort1); SctpPortCriterion sctpPortCriterion = checkAndConvert(matchSctpSrc, Criterion.Type.SCTP_SRC, SctpPortCriterion.class); assertThat(sctpPortCriterion.sctpPort(), is(equalTo(tpPort1))); }
public int doWork() { final long nowNs = nanoClock.nanoTime(); cachedNanoClock.update(nowNs); dutyCycleTracker.measureAndUpdate(nowNs); final int workCount = commandQueue.drain(CommandProxy.RUN_TASK, Configuration.COMMAND_DRAIN_LIMIT); final long shortSendsBefore = shortSends.get(); final int bytesSent = doSend(nowNs); int bytesReceived = 0; if (0 == bytesSent || ++dutyCycleCounter >= dutyCycleRatio || (controlPollDeadlineNs - nowNs < 0) || shortSendsBefore < shortSends.get()) { bytesReceived = controlTransportPoller.pollTransports(); dutyCycleCounter = 0; controlPollDeadlineNs = nowNs + statusMessageReadTimeoutNs; } if (reResolutionCheckIntervalNs > 0 && (reResolutionDeadlineNs - nowNs) < 0) { reResolutionDeadlineNs = nowNs + reResolutionCheckIntervalNs; controlTransportPoller.checkForReResolutions(nowNs, conductorProxy); } return workCount + bytesSent + bytesReceived; }
@Test void shouldNotSendUntilStatusMessageReceived() { final UnsafeBuffer buffer = new UnsafeBuffer(ByteBuffer.allocateDirect(PAYLOAD.length)); buffer.putBytes(0, PAYLOAD); appendUnfragmentedMessage(rawLog, 0, INITIAL_TERM_ID, 0, headerWriter, buffer, 0, PAYLOAD.length); sender.doWork(); assertThat(receivedFrames.size(), is(1)); setupHeader.wrap(receivedFrames.remove()); assertThat(setupHeader.headerType(), is(HeaderFlyweight.HDR_TYPE_SETUP)); final StatusMessageFlyweight msg = mock(StatusMessageFlyweight.class); when(msg.consumptionTermId()).thenReturn(INITIAL_TERM_ID); when(msg.consumptionTermOffset()).thenReturn(0); when(msg.receiverWindowLength()).thenReturn(ALIGNED_FRAME_LENGTH); publication.onStatusMessage(msg, rcvAddress, mockDriverConductorProxy); sender.doWork(); assertThat(receivedFrames.size(), is(1)); dataHeader.wrap(new UnsafeBuffer(receivedFrames.remove())); assertThat(dataHeader.frameLength(), is(FRAME_LENGTH)); assertThat(dataHeader.termId(), is(INITIAL_TERM_ID)); assertThat(dataHeader.streamId(), is(STREAM_ID)); assertThat(dataHeader.sessionId(), is(SESSION_ID)); assertThat(dataHeader.termOffset(), is(offsetOfMessage(1))); assertThat(dataHeader.headerType(), is(HeaderFlyweight.HDR_TYPE_DATA)); assertThat(dataHeader.flags(), is(DataHeaderFlyweight.BEGIN_AND_END_FLAGS)); assertThat(dataHeader.version(), is((short)HeaderFlyweight.CURRENT_VERSION)); }
protected String getMethodResourceName(Invoker<?> invoker, Invocation invocation) { StringBuilder buf = new StringBuilder(64); buf.append(invoker.getInterface().getName()) .append(":") .append(invocation.getMethodName()) .append("("); boolean isFirst = true; for (Class<?> clazz : invocation.getParameterTypes()) { if (!isFirst) { buf.append(","); } buf.append(clazz.getName()); isFirst = false; } buf.append(")"); return buf.toString(); }
@Test public void testGetResourceNameWithPrefix() { Invoker invoker = mock(Invoker.class); when(invoker.getInterface()).thenReturn(DemoService.class); Invocation invocation = mock(Invocation.class); Method method = DemoService.class.getMethods()[0]; when(invocation.getMethodName()).thenReturn(method.getName()); when(invocation.getParameterTypes()).thenReturn(method.getParameterTypes()); //test with default prefix String resourceName = filter.getMethodResourceName(invoker, invocation, DubboAdapterGlobalConfig.getDubboProviderPrefix()); System.out.println("resourceName = " + resourceName); assertEquals("dubbo:provider:com.alibaba.csp.sentinel.adapter.dubbo.provider.DemoService:sayHello(java.lang.String,int)", resourceName); resourceName = filter.getMethodResourceName(invoker, invocation, DubboAdapterGlobalConfig.getDubboConsumerPrefix()); assertEquals("dubbo:consumer:com.alibaba.csp.sentinel.adapter.dubbo.provider.DemoService:sayHello(java.lang.String,int)", resourceName); //test with custom prefix SentinelConfig.setConfig(DubboAdapterGlobalConfig.DUBBO_PROVIDER_RES_NAME_PREFIX_KEY, "my:dubbo:provider:"); SentinelConfig.setConfig(DubboAdapterGlobalConfig.DUBBO_CONSUMER_RES_NAME_PREFIX_KEY, "my:dubbo:consumer:"); resourceName = filter.getMethodResourceName(invoker, invocation, DubboAdapterGlobalConfig.getDubboProviderPrefix()); assertEquals("my:dubbo:provider:com.alibaba.csp.sentinel.adapter.dubbo.provider.DemoService:sayHello(java.lang.String,int)", resourceName); resourceName = filter.getMethodResourceName(invoker, invocation, DubboAdapterGlobalConfig.getDubboConsumerPrefix()); assertEquals("my:dubbo:consumer:com.alibaba.csp.sentinel.adapter.dubbo.provider.DemoService:sayHello(java.lang.String,int)", resourceName); }
@Override public void handle(Request request, Response response) throws Exception { response.stream().setMediaType(MediaTypes.TXT); IOUtils.write(server.getVersion(), response.stream().output(), UTF_8); }
@Test public void returns_version_as_plain_text() throws Exception { when(server.getVersion()).thenReturn("6.4-SNAPSHOT"); DumbResponse response = new DumbResponse(); underTest.handle(mock(Request.class), response); assertThat(new TestResponse(response).getInput()).isEqualTo("6.4-SNAPSHOT"); }
@Override public MapSettings setProperty(String key, String value) { return (MapSettings) super.setProperty(key, value); }
@Test public void testSetNullFloat() { Settings settings = new MapSettings(); settings.setProperty("foo", (Float) null); assertThat(settings.getFloat("foo")).isNull(); }
@Override public int run(String[] args) throws Exception { Options opts = new Options(); opts.addOption("lnl", LIST_LABELS_CMD, false, "List cluster node-label collection"); opts.addOption("lna", LIST_CLUSTER_ATTRIBUTES, false, "List cluster node-attribute collection"); opts.addOption("h", HELP_CMD, false, "Displays help for all commands."); opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false, "This is DEPRECATED, will be removed in future releases. Directly access node label store, " + "with this option, all node label related operations" + " will NOT connect RM. Instead, they will" + " access/modify stored node labels directly." + " By default, it is false (access via RM)." + " AND PLEASE NOTE: if you configured " + YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR + " to a local directory" + " (instead of NFS or HDFS), this option will only work" + " when the command run on the machine where RM is running." + " Also, this option is UNSTABLE, could be removed in future" + " releases."); int exitCode = -1; CommandLine parsedCli = null; try { parsedCli = new GnuParser().parse(opts, args); } catch (MissingArgumentException ex) { sysout.println("Missing argument for options"); printUsage(opts); return exitCode; } createAndStartYarnClient(); if (parsedCli.hasOption(DIRECTLY_ACCESS_NODE_LABEL_STORE)) { accessLocal = true; } if (parsedCli.hasOption(LIST_LABELS_CMD)) { printClusterNodeLabels(); } else if(parsedCli.hasOption(LIST_CLUSTER_ATTRIBUTES)){ printClusterNodeAttributes(); } else if (parsedCli.hasOption(HELP_CMD)) { printUsage(opts); return 0; } else { syserr.println("Invalid Command Usage : "); printUsage(opts); } return 0; }
@Test public void testGetClusterNodeAttributes() throws Exception { when(client.getClusterAttributes()).thenReturn(ImmutableSet .of(NodeAttributeInfo.newInstance(NodeAttributeKey.newInstance("GPU"), NodeAttributeType.STRING), NodeAttributeInfo .newInstance(NodeAttributeKey.newInstance("CPU"), NodeAttributeType.STRING))); ClusterCLI cli = createAndGetClusterCLI(); int rc = cli.run(new String[] {ClusterCLI.CMD, "-" + ClusterCLI.LIST_CLUSTER_ATTRIBUTES}); assertEquals(0, rc); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("rm.yarn.io/GPU(STRING)"); pw.println("rm.yarn.io/CPU(STRING)"); pw.close(); verify(sysOut).println(baos.toString("UTF-8")); }
@VisibleForTesting Object evaluate(final GenericRow row) { return term.getValue(new TermEvaluationContext(row)); }
@Test public void shouldEvaluateCastToDate() { // Given: final Expression cast1 = new Cast( new TimestampLiteral(Timestamp.from(Instant.ofEpochMilli(864000500))), new Type(SqlPrimitiveType.of("DATE")) ); final Expression cast2 = new Cast( new StringLiteral("2017-11-13"), new Type(SqlPrimitiveType.of("DATE")) ); final Expression cast3 = new Cast( new DateLiteral(new Date(864000000)), new Type(SqlPrimitiveType.of("DATE")) ); // When: InterpretedExpression interpreter1 = interpreter(cast1); InterpretedExpression interpreter2 = interpreter(cast2); InterpretedExpression interpreter3 = interpreter(cast3); // Then: assertThat(interpreter1.evaluate(ROW), is(new Date(864000000))); assertThat(interpreter2.evaluate(ROW), is(new Date(1510531200000L))); assertThat(interpreter3.evaluate(ROW), is(new Date(864000000))); }
@Override public AppResponse process(Flow flow, ActivationWithCodeRequest body) { var authAppSession = appSessionService.getSession(body.getAuthSessionId()); if (!State.AUTHENTICATED.name().equals(authAppSession.getState())){ return new NokResponse(); } appSession = new AppSession(); appSession.setState(State.INITIALIZED.name()); appSession.setFlow(body.isReRequestLetter() ? ReApplyActivateActivationCode.NAME : ActivateAccountAndAppFlow.NAME); appSession.setActivationMethod(ActivationMethod.LETTER); appSession.setAction(body.isReRequestLetter() ? "re_request_letter" : "activation_by_letter"); AppAuthenticator appAuthenticator = appAuthenticatorService.findByUserAppId(body.getUserAppId()); appSession.setAccountId(appAuthenticator.getAccountId()); appSession.setUserAppId(appAuthenticator.getUserAppId()); appSession.setDeviceName(appAuthenticator.getDeviceName()); appSession.setInstanceId(appAuthenticator.getInstanceId()); Map<String, String> result = digidClient.getRegistrationByAccount(appAuthenticator.getAccountId()); if (!result.get(lowerUnderscore(STATUS)).equals("OK")) return new NokResponse(); var registrationId = result.get(lowerUnderscore(REGISTRATION_ID)); if (registrationId != null) { appSession.setRegistrationId(Long.valueOf(registrationId)); } appSession.setWithBsn(Boolean.valueOf(result.get("has_bsn"))); digidClient.remoteLog("1089", Map.of( lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new AppSessionResponse(appSession.getId(), Instant.now().getEpochSecond()); }
@Test void processNOKTest() { var mockedAppSession = new AppSession(); mockedAppSession.setAccountId(1L); mockedAppSession.setUserAppId(USER_APP_ID); mockedAppSession.setId(APP_SESSION_ID); var mockedAppAuthenticator = new AppAuthenticator(); mockedAppAuthenticator.setUserAppId(USER_APP_ID); mockedAppAuthenticator.setDeviceName(DEVICE_NAME); mockedAppAuthenticator.setInstanceId("test"); mockedAppAuthenticator.setAccountId(2L); when(appAuthenticatorService.findByUserAppId(USER_APP_ID)).thenReturn(mockedAppAuthenticator); when(appSessionService.getSession(APP_SESSION_ID)).thenReturn(mockedAppSession); when(digidClientMock.getRegistrationByAccount(mockedAppAuthenticator.getAccountId())).thenReturn(requestNOK); AppResponse appResponse = startActivationWithCode.process(mockedFlow, activationWithCodeRequest()); assertTrue(appResponse instanceof NokResponse); }
@Override public Boolean run(final Session<?> session) throws BackgroundException { final Metadata feature = session.getFeature(Metadata.class); if(log.isDebugEnabled()) { log.debug(String.format("Run with feature %s", feature)); } for(Path file : files) { if(this.isCanceled()) { throw new ConnectionCanceledException(); } this.write(session, feature, file); } return true; }
@Test public void testRunAdd() throws Exception { final List<Path> files = new ArrayList<>(); final Path p = new Path("a", EnumSet.of(Path.Type.file)); files.add(p); final Map<String, String> previous = new HashMap<>(); previous.put("k1", "v1"); p.attributes().setMetadata(previous); final Map<String, String> updated = new HashMap<>(); updated.put("k1", "v1"); updated.put("k2", "v2"); WriteMetadataWorker worker = new WriteMetadataWorker(files, updated, false, new DisabledProgressListener()) { @Override public void cleanup(final Boolean map) { fail(); } }; final AtomicBoolean call = new AtomicBoolean(); worker.run(new NullSession(new Host(new TestProtocol())) { @Override @SuppressWarnings("unchecked") public <T> T _getFeature(final Class<T> type) { if(type == Metadata.class) { return (T) new Metadata() { @Override public Map<String, String> getDefault(final Local local) { return Collections.emptyMap(); } @Override public Map<String, String> getMetadata(final Path file) { throw new UnsupportedOperationException(); } @Override public void setMetadata(final Path file, final TransferStatus status) { assertTrue(status.getMetadata().containsKey("k1")); assertTrue(status.getMetadata().containsKey("k2")); assertEquals("v1", status.getMetadata().get("k1")); assertEquals("v2", status.getMetadata().get("k2")); call.set(true); } }; } return super._getFeature(type); } }); assertTrue(call.get()); }
public String build( final String cellValue ) { switch ( type ) { case FORALL: return buildForAll( cellValue ); case INDEXED: return buildMulti( cellValue ); default: return buildSingle( cellValue ); } }
@Test public void testForAllAndCSV() { final String snippet = "forall(&&){something == $}"; final SnippetBuilder snip = new SnippetBuilder(snippet); final String result = snip.build("x, y"); assertThat(result).isEqualTo("something == x && something == y"); }
public Collection<SQLException> closeConnections(final boolean forceRollback) { Collection<SQLException> result = new LinkedList<>(); synchronized (cachedConnections) { resetSessionVariablesIfNecessary(cachedConnections.values(), result); for (Connection each : cachedConnections.values()) { try { if (forceRollback && connectionSession.getTransactionStatus().isInTransaction()) { each.rollback(); } each.close(); } catch (final SQLException ex) { result.add(ex); } } cachedConnections.clear(); } if (!forceRollback) { connectionPostProcessors.clear(); } return result; }
@Test void assertCloseConnectionsAndResetVariables() throws SQLException { connectionSession.getRequiredSessionVariableRecorder().setVariable("key", "default"); Connection connection = mock(Connection.class, RETURNS_DEEP_STUBS); when(connection.getMetaData().getDatabaseProductName()).thenReturn("PostgreSQL"); databaseConnectionManager.getCachedConnections().put("", connection); databaseConnectionManager.closeConnections(false); verify(connection.createStatement()).execute("RESET ALL"); assertTrue(connectionSession.getRequiredSessionVariableRecorder().isEmpty()); }
@Override public Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor( List<ExecutionAttemptID> executionAttemptIds) { final Map<ExecutionVertexID, ExecutionAttemptID> vertexIdToExecutionId = new HashMap<>(); executionAttemptIds.forEach( executionId -> vertexIdToExecutionId.put(executionId.getExecutionVertexId(), executionId)); checkState( vertexIdToExecutionId.size() == executionAttemptIds.size(), "SlotSharingExecutionSlotAllocator does not support one execution vertex to have multiple concurrent executions"); final List<ExecutionVertexID> vertexIds = executionAttemptIds.stream() .map(ExecutionAttemptID::getExecutionVertexId) .collect(Collectors.toList()); return allocateSlotsForVertices(vertexIds).stream() .collect( Collectors.toMap( vertexAssignment -> vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment -> new ExecutionSlotAssignment( vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment.getLogicalSlotFuture()))); }
@Test void testBulkClearIfPhysicalSlotRequestFails() { TestingPhysicalSlotRequestBulkChecker bulkChecker = new TestingPhysicalSlotRequestBulkChecker(); AllocationContext context = createBulkCheckerContextWithEv12GroupAndEv3Group(bulkChecker); context.allocateSlotsFor(EV1, EV3); SlotRequestId slotRequestId = context.getSlotProvider().getFirstRequestOrFail().getSlotRequestId(); context.getSlotProvider() .getResultForRequestId(slotRequestId) .completeExceptionally(new Throwable()); PhysicalSlotRequestBulk bulk = bulkChecker.getBulk(); assertThat(bulk.getPendingRequests()).isEmpty(); }
protected void handleHAProxyTLV(HAProxyTLV tlv, Channel channel) { byte[] valueBytes = ByteBufUtil.getBytes(tlv.content()); if (!BinaryUtil.isAscii(valueBytes)) { return; } AttributeKey<String> key = AttributeKeys.valueOf( HAProxyConstants.PROXY_PROTOCOL_TLV_PREFIX + String.format("%02x", tlv.typeByteValue())); channel.attr(key).set(new String(valueBytes, CharsetUtil.UTF_8)); }
@Test public void handleHAProxyTLV() { when(channel.attr(any(AttributeKey.class))).thenReturn(attribute); doNothing().when(attribute).set(any()); ByteBuf content = Unpooled.buffer(); content.writeBytes("xxxx".getBytes(StandardCharsets.UTF_8)); HAProxyTLV haProxyTLV = new HAProxyTLV((byte) 0xE1, content); nettyRemotingServer.handleHAProxyTLV(haProxyTLV, channel); }
public static QueryBuilder createQuery(ComponentTextSearchQuery query, ComponentTextSearchFeature... features) { checkArgument(features.length > 0, "features cannot be empty"); BoolQueryBuilder esQuery = boolQuery().must( createQuery(query, features, UseCase.GENERATE_RESULTS) .orElseThrow(() -> new IllegalStateException("No text search features found to generate search results. Features: " + Arrays.toString(features)))); createQuery(query, features, UseCase.CHANGE_ORDER_OF_RESULTS) .ifPresent(esQuery::should); return esQuery; }
@Test public void create_query() { QueryBuilder result = createQuery(ComponentTextSearchQuery.builder() .setQueryText("SonarQube").setFieldKey("key").setFieldName("name").build(), ComponentTextSearchFeatureRepertoire.KEY); assertJson(result.toString()).isSimilarTo("{" + " \"bool\" : {" + " \"must\" : [{" + " \"bool\" : {" + " \"should\" : [{" + " \"match\" : {" + " \"key.sortable_analyzer\" : {" + " \"query\" : \"SonarQube\"," + " \"boost\" : 50.0\n" + " }" + " }" + " }]" + " }" + " }]" + " }" + "}"); }
public void writeCallSite(CallSiteReference callSiteReference) throws IOException { writeSimpleName(callSiteReference.getName()); writer.write('('); writeQuotedString(callSiteReference.getMethodName()); writer.write(", "); writeMethodProtoDescriptor(callSiteReference.getMethodProto()); for (EncodedValue encodedValue : callSiteReference.getExtraArguments()) { writer.write(", "); writeEncodedValue(encodedValue); } writer.write(")@"); MethodHandleReference methodHandle = callSiteReference.getMethodHandle(); if (methodHandle.getMethodHandleType() != MethodHandleType.INVOKE_STATIC) { throw new IllegalArgumentException("The linker method handle for a call site must be of type invoke-static"); } writeMethodDescriptor((MethodReference)callSiteReference.getMethodHandle().getMemberReference()); }
@Test public void testWriteCallsite() throws IOException { DexFormattedWriter writer = new DexFormattedWriter(output); writer.writeCallSite(getCallSiteReference()); Assert.assertEquals( "callsiteName(\"callSiteMethodName\", " + "(Lparam1;Lparam2;)Lreturn/type;, Ldefining/class;->fieldName:Lfield/type;, " + "Ldefining/class;->methodName(Lparam1;Lparam2;)Lreturn/type;)@" + "Ldefining/class;->methodName(Lparam1;Lparam2;)Lreturn/type;", output.toString()); }
private static TriggerStateMachine stateMachineForAfterProcessingTime( RunnerApi.Trigger.AfterProcessingTime trigger) { AfterDelayFromFirstElementStateMachine stateMachine = AfterProcessingTimeStateMachine.pastFirstElementInPane(); for (RunnerApi.TimestampTransform transform : trigger.getTimestampTransformsList()) { switch (transform.getTimestampTransformCase()) { case ALIGN_TO: stateMachine = stateMachine.alignedTo( Duration.millis(transform.getAlignTo().getPeriod()), new Instant(transform.getAlignTo().getOffset())); break; case DELAY: stateMachine = stateMachine.plusDelayOf(Duration.millis(transform.getDelay().getDelayMillis())); break; case TIMESTAMPTRANSFORM_NOT_SET: throw new IllegalArgumentException( String.format("Required field 'timestamp_transform' not set in %s", transform)); default: throw new IllegalArgumentException( String.format( "Unknown timestamp transform case: %s", transform.getTimestampTransformCase())); } } return stateMachine; }
@Test public void testStateMachineForAfterProcessingTime() { Duration minutes = Duration.standardMinutes(94); Duration hours = Duration.standardHours(13); RunnerApi.Trigger trigger = RunnerApi.Trigger.newBuilder() .setAfterProcessingTime( RunnerApi.Trigger.AfterProcessingTime.newBuilder() .addTimestampTransforms( RunnerApi.TimestampTransform.newBuilder() .setDelay( RunnerApi.TimestampTransform.Delay.newBuilder() .setDelayMillis(minutes.getMillis()))) .addTimestampTransforms( RunnerApi.TimestampTransform.newBuilder() .setAlignTo( RunnerApi.TimestampTransform.AlignTo.newBuilder() .setPeriod(hours.getMillis())))) .build(); AfterDelayFromFirstElementStateMachine machine = (AfterDelayFromFirstElementStateMachine) TriggerStateMachines.stateMachineForTrigger(trigger); assertThat(machine.getTimeDomain(), equalTo(TimeDomain.PROCESSING_TIME)); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldIgnoreDeserializeJsonObjectCaseMismatch() { // Given: final Map<String, Object> anOrder = ImmutableMap.<String, Object>builder() .put("CASEFIELD", 1L) .build(); final byte[] bytes = serializeJson(anOrder); // When: final Struct result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(new Struct(ORDER_SCHEMA))); }
@Override public boolean isAsync() { return isAsync; }
@Test(dataProvider = "caches") @CacheSpec(population = Population.EMPTY) public void cacheFactory_byMethodHandle( BoundedLocalCache<Int, Int> cache, CacheContext context) throws Throwable { var methodHandleFactory = new MethodHandleBasedFactory(cache.getClass()); var staticFactory = LocalCacheFactory.loadFactory(cache.getClass().getSimpleName()); assertThat(methodHandleFactory).isNotSameInstanceAs(staticFactory); var c1 = methodHandleFactory.newInstance( context.caffeine(), /* cacheLoader */ null, context.isAsync()); var c2 = staticFactory.newInstance( context.caffeine(), /* cacheLoader */ null, context.isAsync()); assertThat(c1.getClass()).isEqualTo(c2.getClass()); }
@Nullable @Override public Message decode(@Nonnull final RawMessage rawMessage) { final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress()); final String json = gelfMessage.getJSON(decompressSizeLimit, charset); final JsonNode node; try { node = objectMapper.readTree(json); if (node == null) { throw new IOException("null result"); } } catch (final Exception e) { log.error("Could not parse JSON, first 400 characters: " + StringUtils.abbreviate(json, 403), e); throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e); } try { validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress()); } catch (IllegalArgumentException e) { log.trace("Invalid GELF message <{}>", node); throw e; } // Timestamp. final double messageTimestamp = timestampValue(node); final DateTime timestamp; if (messageTimestamp <= 0) { timestamp = rawMessage.getTimestamp(); } else { // we treat this as a unix timestamp timestamp = Tools.dateTimeFromDouble(messageTimestamp); } final Message message = messageFactory.createMessage( stringValue(node, "short_message"), stringValue(node, "host"), timestamp ); message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message")); final String file = stringValue(node, "file"); if (file != null && !file.isEmpty()) { message.addField("file", file); } final long line = longValue(node, "line"); if (line > -1) { message.addField("line", line); } // Level is set by server if not specified by client. final int level = intValue(node, "level"); if (level > -1) { message.addField("level", level); } // Facility is set by server if not specified by client. final String facility = stringValue(node, "facility"); if (facility != null && !facility.isEmpty()) { message.addField("facility", facility); } // Add additional data if there is some. final Iterator<Map.Entry<String, JsonNode>> fields = node.fields(); while (fields.hasNext()) { final Map.Entry<String, JsonNode> entry = fields.next(); String key = entry.getKey(); // Do not index useless GELF "version" field. if ("version".equals(key)) { continue; } // Don't include GELF syntax underscore in message field key. if (key.startsWith("_") && key.length() > 1) { key = key.substring(1); } // We already set short_message and host as message and source. Do not add as fields again. if ("short_message".equals(key) || "host".equals(key)) { continue; } // Skip standard or already set fields. if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) { continue; } // Convert JSON containers to Strings, and pick a suitable number representation. final JsonNode value = entry.getValue(); final Object fieldValue; if (value.isContainerNode()) { fieldValue = value.toString(); } else if (value.isFloatingPointNumber()) { fieldValue = value.asDouble(); } else if (value.isIntegralNumber()) { fieldValue = value.asLong(); } else if (value.isNull()) { log.debug("Field [{}] is NULL. Skipping.", key); continue; } else if (value.isTextual()) { fieldValue = value.asText(); } else { log.debug("Field [{}] has unknown value type. Skipping.", key); continue; } message.addField(key, fieldValue); } return message; }
@Test public void decodeFailsWithBlankHost() throws Exception { final String json = "{" + "\"version\": \"1.1\"," + "\"host\": \" \"," + "\"short_message\": \"A short message that helps you identify what is going on\"" + "}"; final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8)); assertThatIllegalArgumentException().isThrownBy(() -> codec.decode(rawMessage)) .withNoCause() .withMessageMatching("GELF message <[0-9a-f-]+> has empty mandatory \"host\" field."); }
@Override public void getConfig(ComponentsConfig.Builder builder) { builder.setApplyOnRestart(getDeferChangesUntilRestart()); // Sufficient to set on one config builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); }
@Test void requireThatWeCanGetTheZoneConfig() { DeployState state = new DeployState.Builder().properties(new TestProperties().setHostedVespa(true)) .zone(new Zone(SystemName.cd, Environment.test, RegionName.from("some-region"))) .build(); MockRoot root = new MockRoot("foo", state); ContainerCluster<?> cluster = new ApplicationContainerCluster(root, "container0", "container1", state); ConfigserverConfig.Builder builder = new ConfigserverConfig.Builder(); cluster.getConfig(builder); ConfigserverConfig config = new ConfigserverConfig(builder); assertEquals(Environment.test.value(), config.environment()); assertEquals("some-region", config.region()); assertEquals("cd", config.system()); }
public void typeInputToConsole(List<String> inputs) { for (String input : inputs) { processInputStream.println(input); processInputStream.flush(); } processInputStream.close(); }
@Test void shouldTypeInputToConsole() { OutputStream processInputStream = new ByteArrayOutputStream(); Process process = getMockedProcess(processInputStream); ProcessWrapper processWrapper = new ProcessWrapper(process, null, "", inMemoryConsumer(), UTF_8, null); ArrayList<String> inputs = new ArrayList<>(); inputs.add("input1"); inputs.add("input2"); processWrapper.typeInputToConsole(inputs); String input = processInputStream.toString(); String[] parts = input.split("\\r?\\n"); assertThat(parts[0]).isEqualTo("input1"); assertThat(parts[1]).isEqualTo("input2"); }
@Override public void open(ExecutionContext ctx) throws Exception { super.open(ctx); equaliser = genRecordEqualiser.newInstance(ctx.getRuntimeContext().getUserCodeClassLoader()); }
@Test public void testWithoutGenerateUpdateBefore() throws Exception { ProcTimeMiniBatchDeduplicateKeepLastRowFunction func = createFunction(false, true, minTime.toMilliseconds()); OneInputStreamOperatorTestHarness<RowData, RowData> testHarness = createTestHarness(func); testHarness.open(); testHarness.processElement(insertRecord("book", 1L, 10)); testHarness.processElement(insertRecord("book", 2L, 11)); // output is empty because bundle not trigger yet. assertThat(testHarness.getOutput()).isEmpty(); testHarness.processElement(insertRecord("book", 1L, 13)); List<Object> expectedOutput = new ArrayList<>(); expectedOutput.add(insertRecord("book", 2L, 11)); expectedOutput.add(insertRecord("book", 1L, 13)); assertor.assertOutputEqualsSorted("output wrong.", expectedOutput, testHarness.getOutput()); testHarness.processElement(insertRecord("book", 1L, 12)); testHarness.processElement(insertRecord("book", 2L, 11)); testHarness.processElement(insertRecord("book", 3L, 11)); expectedOutput.add(updateAfterRecord("book", 1L, 12)); expectedOutput.add(updateAfterRecord("book", 2L, 11)); expectedOutput.add(insertRecord("book", 3L, 11)); testHarness.close(); assertor.assertOutputEqualsSorted("output wrong.", expectedOutput, testHarness.getOutput()); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatShowVariables() { // Given: final ListVariables listVariables = new ListVariables(Optional.empty()); // When: final String formatted = SqlFormatter.formatSql(listVariables); // Then: assertThat(formatted, is("SHOW VARIABLES")); }
public <T> Serializer<T> getSerializer(Class<T> c) { Serialization<T> serializer = getSerialization(c); if (serializer != null) { return serializer.getSerializer(c); } return null; }
@Test public void testSerializationKeyIsTrimmed() { Configuration conf = new Configuration(); conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, " org.apache.hadoop.io.serializer.WritableSerialization "); SerializationFactory factory = new SerializationFactory(conf); assertNotNull("Valid class must be returned", factory.getSerializer(LongWritable.class)); }
static Errors appendGroupMetadataErrorToResponseError(Errors appendError) { switch (appendError) { case UNKNOWN_TOPIC_OR_PARTITION: case NOT_ENOUGH_REPLICAS: case REQUEST_TIMED_OUT: return COORDINATOR_NOT_AVAILABLE; case NOT_LEADER_OR_FOLLOWER: case KAFKA_STORAGE_ERROR: return NOT_COORDINATOR; case MESSAGE_TOO_LARGE: case RECORD_LIST_TOO_LARGE: case INVALID_FETCH_SIZE: return UNKNOWN_SERVER_ERROR; default: return appendError; } }
@Test public void testJoinGroupAppendErrorConversion() { assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, appendGroupMetadataErrorToResponseError(Errors.UNKNOWN_TOPIC_OR_PARTITION)); assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, appendGroupMetadataErrorToResponseError(Errors.NOT_ENOUGH_REPLICAS)); assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, appendGroupMetadataErrorToResponseError(Errors.REQUEST_TIMED_OUT)); assertEquals(Errors.NOT_COORDINATOR, appendGroupMetadataErrorToResponseError(Errors.NOT_LEADER_OR_FOLLOWER)); assertEquals(Errors.NOT_COORDINATOR, appendGroupMetadataErrorToResponseError(Errors.KAFKA_STORAGE_ERROR)); assertEquals(Errors.UNKNOWN_SERVER_ERROR, appendGroupMetadataErrorToResponseError(Errors.MESSAGE_TOO_LARGE)); assertEquals(Errors.UNKNOWN_SERVER_ERROR, appendGroupMetadataErrorToResponseError(Errors.RECORD_LIST_TOO_LARGE)); assertEquals(Errors.UNKNOWN_SERVER_ERROR, appendGroupMetadataErrorToResponseError(Errors.INVALID_FETCH_SIZE)); assertEquals(Errors.LEADER_NOT_AVAILABLE, Errors.LEADER_NOT_AVAILABLE); }
public void loadXML( Node stepnode, List<DatabaseMeta> databases, IMetaStore metaStore ) throws KettleXMLException { // Load the URL // setUrl( XMLHandler.getTagValue( stepnode, "wsURL" ) ); // Load the operation // setOperationName( XMLHandler.getTagValue( stepnode, "wsOperation" ) ); setOperationRequestName( XMLHandler.getTagValue( stepnode, "wsOperationRequest" ) ); setOperationNamespace( XMLHandler.getTagValue( stepnode, "wsOperationNamespace" ) ); setInFieldContainerName( XMLHandler.getTagValue( stepnode, "wsInFieldContainer" ) ); setInFieldArgumentName( XMLHandler.getTagValue( stepnode, "wsInFieldArgument" ) ); setOutFieldContainerName( XMLHandler.getTagValue( stepnode, "wsOutFieldContainer" ) ); setOutFieldArgumentName( XMLHandler.getTagValue( stepnode, "wsOutFieldArgument" ) ); setProxyHost( XMLHandler.getTagValue( stepnode, "proxyHost" ) ); setProxyPort( XMLHandler.getTagValue( stepnode, "proxyPort" ) ); setHttpLogin( XMLHandler.getTagValue( stepnode, "httpLogin" ) ); setHttpPassword( XMLHandler.getTagValue( stepnode, "httpPassword" ) ); setCallStep( Const.toInt( XMLHandler.getTagValue( stepnode, "callStep" ), DEFAULT_STEP ) ); setPassingInputData( "Y".equalsIgnoreCase( XMLHandler.getTagValue( stepnode, "passingInputData" ) ) ); String compat = XMLHandler.getTagValue( stepnode, "compatible" ); setCompatible( Utils.isEmpty( compat ) || "Y".equalsIgnoreCase( compat ) ); setRepeatingElementName( XMLHandler.getTagValue( stepnode, "repeating_element" ) ); setReturningReplyAsString( "Y".equalsIgnoreCase( XMLHandler.getTagValue( stepnode, "reply_as_string" ) ) ); // Load the input fields mapping // getFieldsIn().clear(); Node fields = XMLHandler.getSubNode( stepnode, "fieldsIn" ); int nrfields = XMLHandler.countNodes( fields, "field" ); for ( int i = 0; i < nrfields; ++i ) { Node fnode = XMLHandler.getSubNodeByNr( fields, "field", i ); WebServiceField field = new WebServiceField(); field.setName( XMLHandler.getTagValue( fnode, "name" ) ); field.setWsName( XMLHandler.getTagValue( fnode, "wsName" ) ); field.setXsdType( XMLHandler.getTagValue( fnode, "xsdType" ) ); getFieldsIn().add( field ); } // Load the output fields mapping // getFieldsOut().clear(); fields = XMLHandler.getSubNode( stepnode, "fieldsOut" ); nrfields = XMLHandler.countNodes( fields, "field" ); for ( int i = 0; i < nrfields; ++i ) { Node fnode = XMLHandler.getSubNodeByNr( fields, "field", i ); WebServiceField field = new WebServiceField(); field.setName( XMLHandler.getTagValue( fnode, "name" ) ); field.setWsName( XMLHandler.getTagValue( fnode, "wsName" ) ); field.setXsdType( XMLHandler.getTagValue( fnode, "xsdType" ) ); getFieldsOut().add( field ); } }
@Test public void testLoadXml() throws Exception { Node node = getTestNode(); DatabaseMeta dbMeta = mock( DatabaseMeta.class ); IMetaStore metastore = mock( IMetaStore.class ); WebServiceMeta webServiceMeta = new WebServiceMeta( node, Collections.singletonList( dbMeta ), metastore ); assertEquals( "httpUser", webServiceMeta.getHttpLogin() ); assertEquals( "tryandguess", webServiceMeta.getHttpPassword() ); assertEquals( "http://webservices.gama-system.com/exchangerates.asmx?WSDL", webServiceMeta.getUrl() ); assertEquals( "GetCurrentExchangeRate", webServiceMeta.getOperationName() ); assertEquals( "opRequestName", webServiceMeta.getOperationRequestName() ); assertEquals( "GetCurrentExchangeRateResult", webServiceMeta.getOutFieldArgumentName() ); assertEquals( "aProxy", webServiceMeta.getProxyHost() ); assertEquals( "4444", webServiceMeta.getProxyPort() ); assertEquals( 1, webServiceMeta.getCallStep() ); assertFalse( webServiceMeta.isPassingInputData() ); assertTrue( webServiceMeta.isCompatible() ); assertFalse( webServiceMeta.isReturningReplyAsString() ); List<WebServiceField> fieldsIn = webServiceMeta.getFieldsIn(); assertEquals( 3, fieldsIn.size() ); assertWebServiceField( fieldsIn.get( 0 ), "Bank", "strBank", "string", 2 ); assertWebServiceField( fieldsIn.get( 1 ), "ToCurrency", "strCurrency", "string", 2 ); assertWebServiceField( fieldsIn.get( 2 ), "Rank", "intRank", "int", 5 ); List<WebServiceField> fieldsOut = webServiceMeta.getFieldsOut(); assertEquals( 1, fieldsOut.size() ); assertWebServiceField( fieldsOut.get( 0 ), "GetCurrentExchangeRateResult", "GetCurrentExchangeRateResult", "decimal", 6 ); WebServiceMeta clone = webServiceMeta.clone(); assertNotSame( clone, webServiceMeta ); assertEquals( clone.getXML(), webServiceMeta.getXML() ); }
void truncateTable() throws KettleDatabaseException { if ( !meta.isPartitioningEnabled() && !meta.isTableNameInField() ) { // Only the first one truncates in a non-partitioned step copy // if ( meta.truncateTable() && ( ( getCopy() == 0 && getUniqueStepNrAcrossSlaves() == 0 ) || !Utils.isEmpty( getPartitionID() ) ) ) { data.db.truncateTable( environmentSubstitute( meta.getSchemaName() ), environmentSubstitute( meta .getTableName() ) ); } } }
@Test public void testTruncateTable_on() throws Exception { when( tableOutputMeta.truncateTable() ).thenReturn( true ); when( tableOutputSpy.getCopy() ).thenReturn( 0 ); when( tableOutputSpy.getUniqueStepNrAcrossSlaves() ).thenReturn( 0 ); when( tableOutputMeta.getTableName() ).thenReturn( "fooTable" ); when( tableOutputMeta.getSchemaName() ).thenReturn( "barSchema" ); tableOutputSpy.truncateTable(); verify( db ).truncateTable( any(), any() ); }
public static String getApplication(Invocation invocation, String defaultValue) { if (invocation == null || invocation.getAttachments() == null) { throw new IllegalArgumentException("Bad invocation instance"); } return invocation.getAttachment(DUBBO_APPLICATION_KEY, defaultValue); }
@Test public void testGetApplication() { Invocation invocation = mock(Invocation.class); when(invocation.getAttachments()).thenReturn(new HashMap<String, String>()); when(invocation.getAttachment(DubboUtils.DUBBO_APPLICATION_KEY, "")).thenReturn("consumerA"); String application = DubboUtils.getApplication(invocation, ""); verify(invocation).getAttachment(DubboUtils.DUBBO_APPLICATION_KEY, ""); assertEquals("consumerA", application); }
@Override public String create() { return UuidFactoryImpl.INSTANCE.create(); }
@Test public void test_format_of_uuid() { String uuid = underTest.create(); assertThat(uuid.length()).isGreaterThan(10).isLessThan(40); // URL-safe: only letters, digits, dash and underscore. assertThat(uuid).matches("^[\\w\\-_]+$"); }
public int getPathLength() { return beginPath.length + endPath.length; }
@Test public void zeroLevelAncestorPahLength(){ final NodeModel parent = root(); final NodeRelativePath nodeRelativePath = new NodeRelativePath(parent, parent); assertThat(nodeRelativePath.getPathLength(), equalTo(0)); }
@Override public boolean match(Message msg, StreamRule rule) { if (msg.getField(rule.getField()) == null) return rule.getInverted(); try { final Pattern pattern = patternCache.get(rule.getValue()); final CharSequence charSequence = new InterruptibleCharSequence(msg.getField(rule.getField()).toString()); return rule.getInverted() ^ pattern.matcher(charSequence).find(); } catch (ExecutionException e) { LOG.error("Unable to get pattern from regex cache: ", e); } return false; }
@Test public void testMissingFieldShouldNotMatch() throws Exception { final StreamRule rule = getSampleRule(); rule.setField("nonexistingfield"); rule.setValue("^foo"); final Message msg = getSampleMessage(); final StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
public static List<String> parse(@Nullable String input, boolean escapeComma, boolean trim) { if (null == input || input.isEmpty()) { return Collections.emptyList(); } Stream<String> tokenStream; if (escapeComma) { // Use regular expression to split on "," unless it is "\," // Use a non-positive limit to apply the replacement as many times as possible and to ensure trailing empty // strings shall not be discarded tokenStream = Arrays.stream(input.split("(?<!\\\\),", -1)) .map(s -> s.replace("\\,", ",")); } else { tokenStream = Arrays.stream(input.split(",")); } if (trim) { tokenStream = tokenStream.map(String::trim); } return tokenStream.collect(Collectors.toList()); }
@Test public void testEscapeFalseTrimTrue() { String input = "abc\\,def.ghi, abc.def.ghi\n"; List<String> expectedOutput = Arrays.asList("abc\\", "def.ghi", "abc.def.ghi"); Assert.assertEquals(CsvParser.parse(input, false, true), expectedOutput); }
@Override public String getMethod() { return PATH; }
@Test public void testGetChatMenuButtonAsWebAppWithEmptyText() { SetChatMenuButton setChatMenuButton = SetChatMenuButton .builder() .chatId("123456") .menuButton(MenuButtonWebApp .builder() .text("") .webAppInfo(WebAppInfo.builder().url("My url").build()) .build()) .build(); assertEquals("setChatMenuButton", setChatMenuButton.getMethod()); Throwable thrown = assertThrows(TelegramApiValidationException.class, setChatMenuButton::validate); assertEquals("Text can't be empty", thrown.getMessage()); }
public static String getMethodResourceName(SofaRequest request) { StringBuilder buf = new StringBuilder(64); buf.append(request.getInterfaceName()) .append("#") .append(request.getMethodName()) .append("("); boolean isFirst = true; for (String methodArgSig : request.getMethodArgSigs()) { if (!isFirst) { buf.append(","); } else { isFirst = false; } buf.append(methodArgSig); } buf.append(")"); return buf.toString(); }
@Test public void testGetMethodResourceName() { SofaRequest request = new SofaRequest(); request.setInterfaceName("com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService"); request.setMethodName("sayHello"); request.setMethodArgSigs(new String[]{"java.lang.String", "int"}); String methodResourceName = SofaRpcUtils.getMethodResourceName(request); assertEquals("com.alibaba.csp.sentinel.adapter.sofa.rpc.service.DemoService#sayHello(java.lang.String,int)", methodResourceName); }
public Map<String, Map<MessageQueue, Long>> invokeBrokerToGetConsumerStatus(final String addr, final String topic, final String group, final String clientAddr, final long timeoutMillis) throws RemotingException, MQClientException, InterruptedException { GetConsumerStatusRequestHeader requestHeader = new GetConsumerStatusRequestHeader(); requestHeader.setTopic(topic); requestHeader.setGroup(group); requestHeader.setClientAddr(clientAddr); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.INVOKE_BROKER_TO_GET_CONSUMER_STATUS, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { if (response.getBody() != null) { GetConsumerStatusBody body = GetConsumerStatusBody.decode(response.getBody(), GetConsumerStatusBody.class); return body.getConsumerTable(); } } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void assertInvokeBrokerToGetConsumerStatus() throws RemotingException, InterruptedException, MQClientException { mockInvokeSync(); GetConsumerStatusBody responseBody = new GetConsumerStatusBody(); responseBody.getConsumerTable().put("key", new HashMap<>()); responseBody.getMessageQueueTable().put(new MessageQueue(), 1L); setResponseBody(responseBody); Map<String, Map<MessageQueue, Long>> actual = mqClientAPI.invokeBrokerToGetConsumerStatus(defaultBrokerAddr, defaultTopic, "", "", defaultTimeout); assertNotNull(actual); assertEquals(1, actual.size()); }
public static StringBuilder append(Date date, StringBuilder sb) { return formatter().append0(checkNotNull(date, "date"), checkNotNull(sb, "sb")); }
@Test public void testAppend() { StringBuilder sb = new StringBuilder(); append(DATE, sb); assertEquals("Sun, 06 Nov 1994 08:49:37 GMT", sb.toString()); }
@Override public void handlerRule(final RuleData ruleData) { Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> { ContextMappingRuleHandle contextMappingRuleHandle = GsonUtils.getInstance().fromJson(s, ContextMappingRuleHandle.class); CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(ruleData), contextMappingRuleHandle); }); }
@Test public void handlerRuleTest() { contextPathPluginDataHandler.handlerRule(RuleData.builder().handle("{}").build()); }
@VisibleForTesting protected void cancel() { closeSnapshotIO(); if (resourceCleanupOwnershipTaken.compareAndSet(false, true)) { cleanup(); } }
@Test void testCancelRun() throws Exception { Thread runner = startTask(task); while (testBlocker.getWaitersCount() < 1) { Thread.sleep(1L); } task.cancel(true); testBlocker.unblockExceptionally(); assertThatThrownBy(task::get).isInstanceOf(CancellationException.class); runner.join(); assertThat(testAsyncSnapshotCallable.getInvocationOrder()) .containsExactly(METHOD_CALL, METHOD_CANCEL, METHOD_CLEANUP); assertThat(testProvidedResource.isClosed()).isTrue(); assertThat(testBlocker.isClosed()).isTrue(); }
@Override public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, CreateDownloadShareRequest options, final PasswordCallback callback) throws BackgroundException { try { if(log.isDebugEnabled()) { log.debug(String.format("Create download share for %s", file)); } if(null == options) { options = new CreateDownloadShareRequest(); log.warn(String.format("Use default share options %s", options)); } final Long fileid = Long.parseLong(nodeid.getVersionId(file)); final Host bookmark = session.getHost(); if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(file)) { // get existing file key associated with the sharing user final FileKey key = new NodesApi(session.getClient()).requestUserFileKey(fileid, null, null); final EncryptedFileKey encFileKey = TripleCryptConverter.toCryptoEncryptedFileKey(key); final UserKeyPairContainer keyPairContainer = session.getKeyPairForFileKey(encFileKey.getVersion()); final UserKeyPair userKeyPair = TripleCryptConverter.toCryptoUserKeyPair(keyPairContainer); final Credentials passphrase = new TripleCryptKeyPair().unlock(callback, bookmark, userKeyPair); final PlainFileKey plainFileKey = Crypto.decryptFileKey(encFileKey, userKeyPair.getUserPrivateKey(), passphrase.getPassword().toCharArray()); // encrypt file key with a new key pair final UserKeyPair pair; if(null == options.getPassword()) { pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), callback.prompt( bookmark, LocaleFactory.localizedString("Passphrase", "Cryptomator"), LocaleFactory.localizedString("Provide additional login credentials", "Credentials"), new LoginOptions().icon(session.getHost().getProtocol().disk()) ).getPassword().toCharArray()); } else { pair = Crypto.generateUserKeyPair(session.requiredKeyPairVersion(), options.getPassword().toCharArray()); } final EncryptedFileKey encryptedFileKey = Crypto.encryptFileKey(plainFileKey, pair.getUserPublicKey()); options.setPassword(null); options.setKeyPair(TripleCryptConverter.toSwaggerUserKeyPairContainer(pair)); options.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptedFileKey)); } final DownloadShare share = new SharesApi(session.getClient()).createDownloadShare( options.nodeId(fileid), StringUtils.EMPTY, null); final String help; if(null == share.getExpireAt()) { help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3")); } else { final long expiry = share.getExpireAt().getMillis(); help = MessageFormat.format(LocaleFactory.localizedString("{0} URL"), LocaleFactory.localizedString("Pre-Signed", "S3")) + " (" + MessageFormat.format(LocaleFactory.localizedString("Expires {0}", "S3") + ")", UserDateFormatterFactory.get().getShortFormat(expiry * 1000) ); } final Matcher matcher = Pattern.compile(SDSSession.VERSION_REGEX).matcher(session.softwareVersion().getRestApiVersion()); if(matcher.matches()) { if(new Version(matcher.group(1)).compareTo(new Version("4.26")) < 0) { return new DescriptiveUrl(URI.create(String.format("%s://%s/#/public/shares-downloads/%s", bookmark.getProtocol().getScheme(), bookmark.getHostname(), share.getAccessKey())), DescriptiveUrl.Type.signed, help); } } return new DescriptiveUrl(URI.create(String.format("%s://%s/public/download-shares/%s", bookmark.getProtocol().getScheme(), bookmark.getHostname(), share.getAccessKey())), DescriptiveUrl.Type.signed, help); } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map(e); } catch(CryptoException e) { throw new TripleCryptExceptionMappingService().map(e); } }
@Test public void testToUrlExpiry() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final DescriptiveUrl url = new SDSShareFeature(session, nodeid).toDownloadUrl(test, Share.Sharee.world, new CreateDownloadShareRequest() .expiration(new ObjectExpiration().enableExpiration(true).expireAt(new DateTime(1744300800000L))) .notifyCreator(false) .sendMail(false) .sendSms(false) .password(null) .mailRecipients(null) .mailSubject(null) .mailBody(null) .maxDownloads(null), new DisabledPasswordCallback()); assertNotEquals(DescriptiveUrl.EMPTY, url); assertEquals(DescriptiveUrl.Type.signed, url.getType()); assertTrue(url.getUrl().startsWith("https://duck.dracoon.com/public/download-shares/")); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception { Http2HeadersSink sink = new Http2HeadersSink( streamId, headers, maxHeaderListSize, validateHeaders); // Check for dynamic table size updates, which must occur at the beginning: // https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2 decodeDynamicTableSizeUpdates(in); decode(in, sink); // Now that we've read all of our headers we can perform the validation steps. We must // delay throwing until this point to prevent dynamic table corruption. sink.finish(); }
@Test public void requestPseudoHeaderInResponse() throws Exception { final ByteBuf in = Unpooled.buffer(200); try { HpackEncoder hpackEncoder = new HpackEncoder(true); Http2Headers toEncode = new DefaultHttp2Headers(); toEncode.add(":status", "200"); toEncode.add(":method", "GET"); hpackEncoder.encodeHeaders(1, in, toEncode, NEVER_SENSITIVE); final Http2Headers decoded = new DefaultHttp2Headers(); assertThrows(Http2Exception.StreamException.class, new Executable() { @Override public void execute() throws Throwable { hpackDecoder.decode(1, in, decoded, true); } }); } finally { in.release(); } }
public static boolean isListEqual(List<String> firstList, List<String> secondList) { if (firstList == null && secondList == null) { return true; } if (firstList == null || secondList == null) { return false; } if (firstList == secondList) { return true; } if (firstList.size() != secondList.size()) { return false; } boolean flag1 = firstList.containsAll(secondList); boolean flag2 = secondList.containsAll(firstList); return flag1 && flag2; }
@Test void testIsListEqualForEquals() { List<String> list1 = Arrays.asList("1", "2", "3"); List<String> list2 = Arrays.asList("1", "2", "3"); assertTrue(CollectionUtils.isListEqual(list1, list1)); assertTrue(CollectionUtils.isListEqual(list1, list2)); assertTrue(CollectionUtils.isListEqual(list2, list1)); }
public static Map<String, Object> getNameFieldMap(Class<? extends Enum<?>> clazz, String fieldName) { if(null == clazz || StrUtil.isBlank(fieldName)){ return null; } final Enum<?>[] enums = clazz.getEnumConstants(); if (null == enums) { return null; } final Map<String, Object> map = MapUtil.newHashMap(enums.length, true); for (Enum<?> e : enums) { map.put(e.name(), ReflectUtil.getFieldValue(e, fieldName)); } return map; }
@Test public void getNameFieldMapTest() { Map<String, Object> enumMap = EnumUtil.getNameFieldMap(TestEnum.class, "type"); assert enumMap != null; assertEquals("type1", enumMap.get("TEST1")); }
@Override public <VR> KStream<K, VR> flatMapValues(final ValueMapper<? super V, ? extends Iterable<? extends VR>> mapper) { return flatMapValues(withKey(mapper)); }
@Test public void shouldNotAllowNullMapperOnFlatMapValues() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatMapValues((ValueMapper<Object, Iterable<Object>>) null)); assertThat(exception.getMessage(), equalTo("valueMapper can't be null")); }
public static Configuration adjustForLocalExecution(Configuration config) { UNUSED_CONFIG_OPTIONS.forEach( option -> warnAndRemoveOptionHasNoEffectIfSet(config, option)); setConfigOptionToPassedMaxIfNotSet( config, TaskManagerOptions.CPU_CORES, LOCAL_EXECUTION_CPU_CORES); setConfigOptionToPassedMaxIfNotSet( config, TaskManagerOptions.TASK_HEAP_MEMORY, LOCAL_EXECUTION_TASK_MEMORY); setConfigOptionToPassedMaxIfNotSet( config, TaskManagerOptions.TASK_OFF_HEAP_MEMORY, LOCAL_EXECUTION_TASK_MEMORY); adjustNetworkMemoryForLocalExecution(config); setConfigOptionToDefaultIfNotSet( config, TaskManagerOptions.MANAGED_MEMORY_SIZE, DEFAULT_MANAGED_MEMORY_SIZE); // Set valid default values for unused config options which should have been removed. config.set( TaskManagerOptions.FRAMEWORK_HEAP_MEMORY, TaskManagerOptions.FRAMEWORK_HEAP_MEMORY.defaultValue()); config.set( TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY, TaskManagerOptions.FRAMEWORK_OFF_HEAP_MEMORY.defaultValue()); config.set( TaskManagerOptions.JVM_METASPACE, TaskManagerOptions.JVM_METASPACE.defaultValue()); config.set( TaskManagerOptions.JVM_OVERHEAD_MAX, TaskManagerOptions.JVM_OVERHEAD_MAX.defaultValue()); config.set( TaskManagerOptions.JVM_OVERHEAD_MIN, TaskManagerOptions.JVM_OVERHEAD_MAX.defaultValue()); return config; }
@Test public void testNetworkMinAdjustForLocalExecutionIfMaxSet() { MemorySize networkMemorySize = MemorySize.ofMebiBytes(1); Configuration configuration = new Configuration(); configuration.set(TaskManagerOptions.NETWORK_MEMORY_MAX, networkMemorySize); TaskExecutorResourceUtils.adjustForLocalExecution(configuration); assertThat(configuration.get(TaskManagerOptions.NETWORK_MEMORY_MIN)) .isEqualTo(networkMemorySize); assertThat(configuration.get(TaskManagerOptions.NETWORK_MEMORY_MAX)) .isEqualTo(networkMemorySize); }
@Override public Map<String, ByteBuffer> performAssignment(String leaderId, String protocol, List<JoinGroupResponseMember> allMemberMetadata, WorkerCoordinator coordinator) { log.debug("Performing task assignment"); Map<String, ExtendedWorkerState> memberConfigs = new HashMap<>(); for (JoinGroupResponseMember member : allMemberMetadata) { memberConfigs.put( member.memberId(), IncrementalCooperativeConnectProtocol.deserializeMetadata(ByteBuffer.wrap(member.metadata()))); } log.debug("Member configs: {}", memberConfigs); // The new config offset is the maximum seen by any member. We always perform assignment using this offset, // even if some members have fallen behind. The config offset used to generate the assignment is included in // the response so members that have fallen behind will not use the assignment until they have caught up. long maxOffset = memberConfigs.values().stream().map(ExtendedWorkerState::offset).max(Long::compare).get(); log.debug("Max config offset root: {}, local snapshot config offsets root: {}", maxOffset, coordinator.configSnapshot().offset()); short protocolVersion = ConnectProtocolCompatibility.fromProtocol(protocol).protocolVersion(); Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator); if (leaderOffset == null) { Map<String, ExtendedAssignment> assignments = fillAssignments( memberConfigs.keySet(), Assignment.CONFIG_MISMATCH, leaderId, memberConfigs.get(leaderId).url(), maxOffset, ClusterAssignment.EMPTY, 0, protocolVersion); return serializeAssignments(assignments, protocolVersion); } return performTaskAssignment(leaderId, leaderOffset, memberConfigs, coordinator, protocolVersion); }
@Test public void testProtocolV1() { // Sanity test to make sure that the right protocol is chosen during the assignment connectors.clear(); String leader = "followMe"; List<JoinGroupResponseData.JoinGroupResponseMember> memberMetadata = new ArrayList<>(); ExtendedAssignment leaderAssignment = new ExtendedAssignment( IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, leader, "followMe:618", CONFIG_OFFSET, Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), 0 ); ExtendedWorkerState leaderState = new ExtendedWorkerState("followMe:618", CONFIG_OFFSET, leaderAssignment); JoinGroupResponseData.JoinGroupResponseMember leaderMetadata = new JoinGroupResponseData.JoinGroupResponseMember() .setMemberId(leader) .setMetadata(IncrementalCooperativeConnectProtocol.serializeMetadata(leaderState, false).array()); memberMetadata.add(leaderMetadata); WorkerCoordinator coordinator = mock(WorkerCoordinator.class); when(coordinator.configSnapshot()).thenReturn(configState()); Map<String, ByteBuffer> serializedAssignments = assignor.performAssignment( leader, ConnectProtocolCompatibility.COMPATIBLE.protocol(), memberMetadata, coordinator ); serializedAssignments.forEach((worker, serializedAssignment) -> { ExtendedAssignment assignment = IncrementalCooperativeConnectProtocol.deserializeAssignment(serializedAssignment); assertEquals( IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1, assignment.version(), "Incorrect protocol version in assignment for worker " + worker ); }); }
public void setInitializedReplQueues(boolean v) { this.initializedReplQueues = v; }
@Test public void testBlockReportAfterDataNodeRestart() throws Exception { Configuration conf = new HdfsConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(3).storagesPerDatanode(1).build()) { cluster.waitActive(); BlockManager blockManager = cluster.getNamesystem().getBlockManager(); DistributedFileSystem fs = cluster.getFileSystem(); final Path filePath = new Path("/tmp.txt"); final long fileLen = 1L; DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L); DFSTestUtil.waitForReplication(fs, filePath, (short) 3, 60000); ArrayList<DataNode> datanodes = cluster.getDataNodes(); assertEquals(datanodes.size(), 3); // Stop RedundancyMonitor. blockManager.setInitializedReplQueues(false); // Delete the replica on the first datanode. DataNode dn = datanodes.get(0); int dnIpcPort = dn.getIpcPort(); File dnDir = dn.getFSDataset().getVolumeList().get(0).getCurrentDir(); String[] children = FileUtil.list(dnDir); for (String s : children) { if (!s.equals("VERSION")) { FileUtil.fullyDeleteContents(new File(dnDir, s)); } } // The number of replicas is still 3 because the datanode has not sent // a new block report. FileStatus stat = fs.getFileStatus(filePath); BlockLocation[] locs = fs.getFileBlockLocations(stat, 0, stat.getLen()); assertEquals(3, locs[0].getHosts().length); // Restart the first datanode. cluster.restartDataNode(0, true); // Wait for the block report to be processed. cluster.waitDatanodeFullyStarted(cluster.getDataNode(dnIpcPort), 10000); cluster.waitFirstBRCompleted(0, 10000); // The replica num should be 2. locs = fs.getFileBlockLocations(stat, 0, stat.getLen()); assertEquals(2, locs[0].getHosts().length); } }
@Override public Path createSnapshot(Path path, String name) throws IOException { return super.createSnapshot(fullPath(path), name); }
@Test(timeout = 30000) public void testCreateSnapshot() throws Exception { Path snapRootPath = new Path("/snapPath"); Path chRootedSnapRootPath = new Path("/a/b/snapPath"); Configuration conf = new Configuration(); conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class); URI chrootUri = URI.create("mockfs://foo/a/b"); ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf); FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem()) .getRawFileSystem(); chrootFs.createSnapshot(snapRootPath, "snap1"); verify(mockFs).createSnapshot(chRootedSnapRootPath, "snap1"); }
public Matrix getSymbolEmissionProbabilities() { return b; }
@Test public void testGetSymbolEmissionProbabilities() { System.out.println("getSymbolEmissionProbabilities"); HMM hmm = new HMM(pi, Matrix.of(a), Matrix.of(b)); Matrix result = hmm.getSymbolEmissionProbabilities(); for (int i = 0; i < b.length; i++) { for (int j = 0; j < b[i].length; j++) { assertEquals(b[i][j], result.get(i, j), 1E-7); } } }
@PublicAPI(usage = ACCESS) public static Set<Location> ofPackage(String pkg) { ImmutableSet.Builder<Location> result = ImmutableSet.builder(); for (Location location : getLocationsOf(asResourceName(pkg))) { result.add(location); } return result.build(); }
@Test public void locations_of_packages_from_mixed_URIs() { Set<Location> locations = Locations.ofPackage("com.tngtech"); assertThat(urisOf(locations)).contains( resolvedUri(getClass(), "/com/tngtech"), resolvedUri(DataProvider.class, "/com/tngtech") ); }
@Override public PipelineDef parse(Path pipelineDefPath, Configuration globalPipelineConfig) throws Exception { return parse(mapper.readTree(pipelineDefPath.toFile()), globalPipelineConfig); }
@Test void testUdfDefinition() throws Exception { URL resource = Resources.getResource("definitions/pipeline-definition-with-udf.yaml"); YamlPipelineDefinitionParser parser = new YamlPipelineDefinitionParser(); PipelineDef pipelineDef = parser.parse(Paths.get(resource.toURI()), new Configuration()); assertThat(pipelineDef).isEqualTo(pipelineDefWithUdf); }
public boolean compatibleVersion(String acceptableVersionRange, String actualVersion) { V pluginVersion = parseVersion(actualVersion); // Treat a single version "1.4" as a left bound, equivalent to "[1.4,)" if (acceptableVersionRange.matches(VERSION_REGEX)) { return ge(pluginVersion, parseVersion(acceptableVersionRange)); } // Otherwise ensure it is a version range with bounds Matcher matcher = INTERVAL_PATTERN.matcher(acceptableVersionRange); Preconditions.checkArgument(matcher.matches(), "invalid version range"); String leftBound = matcher.group("left"); String rightBound = matcher.group("right"); Preconditions.checkArgument( leftBound != null || rightBound != null, "left and right bounds cannot both be empty"); BiPredicate<V, V> leftComparator = acceptableVersionRange.startsWith("[") ? VersionChecker::ge : VersionChecker::gt; BiPredicate<V, V> rightComparator = acceptableVersionRange.endsWith("]") ? VersionChecker::le : VersionChecker::lt; if (leftBound != null && !leftComparator.test(pluginVersion, parseVersion(leftBound))) { return false; } if (rightBound != null && !rightComparator.test(pluginVersion, parseVersion(rightBound))) { return false; } return true; }
@Test public void testRange_between() { Assert.assertTrue(checker.compatibleVersion("[2.3,4.3]", "2.4")); Assert.assertTrue(checker.compatibleVersion("(2.3,4.3]", "4.2")); Assert.assertTrue(checker.compatibleVersion("[2.3,4.3)", "2.4")); Assert.assertTrue(checker.compatibleVersion("(2.3,4.3)", "4.2")); }
static void validateValues(EvaluationContext ctx, Object start, Object end) { if (start.getClass() != end.getClass()) { ctx.notifyEvt(() -> new ASTEventBase(FEELEvent.Severity.ERROR, Msg.createMessage(Msg.X_TYPE_INCOMPATIBLE_WITH_Y_TYPE, start, end), null)); throw new EndpointOfRangeOfDifferentTypeException(); } valueMustBeValid(ctx, start); valueMustBeValid(ctx, end); }
@Test void validateValuesTrueTest() { validateValues(ctx, BigDecimal.valueOf(1), BigDecimal.valueOf(3)); verify(listener, never()).onEvent(any(FEELEvent.class)); validateValues(ctx, LocalDate.of(2021, 1, 1), LocalDate.of(2021, 1, 3)); verify(listener, never()).onEvent(any(FEELEvent.class)); }
public Span nextSpan(ConsumerRecord<?, ?> record) { // Even though the type is ConsumerRecord, this is not a (remote) consumer span. Only "poll" // events create consumer spans. Since this is a processor span, we use the normal sampler. TraceContextOrSamplingFlags extracted = extractAndClearTraceIdHeaders(processorExtractor, record.headers(), record.headers()); Span result = tracer.nextSpan(extracted); if (extracted.context() == null && !result.isNoop()) { addTags(record, result); } return result; }
@Test void nextSpan_shouldnt_tag_binary_key() { ConsumerRecord<byte[], String> record = new ConsumerRecord<>(TEST_TOPIC, 0, 1, new byte[1], TEST_VALUE); kafkaTracing.nextSpan(record).start().finish(); assertThat(spans.get(0).tags()) .containsOnly(entry("kafka.topic", TEST_TOPIC)); }
@Override @SuppressFBWarnings("PATH_TRAVERSAL_IN") // suppressing because we are using the getValidFilePath public String getMimeType(String file) { if (file == null || !file.contains(".")) { return null; } String mimeType = null; // may not work on Lambda until mailcap package is present https://github.com/aws/serverless-java-container/pull/504 try { mimeType = Files.probeContentType(Paths.get(file)); } catch (IOException | InvalidPathException e) { log("unable to probe for content type, will use fallback", e); } if (mimeType == null) { try { String mimeTypeGuess = URLConnection.guessContentTypeFromName(new File(file).getName()); if (mimeTypeGuess !=null) { mimeType = mimeTypeGuess; } } catch (Exception e) { log("couldn't find a better contentType than " + mimeType + " for file " + file, e); } } return mimeType; }
@Test void getMimeType_mimeTypeOfCorrectFile_expectMime() { String tmpFilePath = TMP_DIR + "test_text.txt"; AwsServletContext ctx = new AwsServletContext(null); String mimeType = ctx.getMimeType(tmpFilePath); assertEquals("text/plain", mimeType); mimeType = ctx.getMimeType("file://" + tmpFilePath); assertEquals("text/plain", mimeType); }
public static String fullClassName(CompileUnit unit) { return unit.pkg + "." + unit.mainClassName; }
@Test public void fullClassName() { CompileUnit unit = new CompileUnit(Foo.class.getPackage().getName(), Foo.class.getSimpleName(), ""); Assert.assertEquals(CodeGenerator.fullClassName(unit), Foo.class.getName()); }
public AdvancedNetworkConfig setEnabled(boolean enabled) { this.enabled = enabled; return this; }
@Test public void testFailFast_whenWanPublisherRequiresUndefinedEndpointConfig() { Config config = new Config(); config.getAdvancedNetworkConfig().setEnabled(true); config.addWanReplicationConfig( new WanReplicationConfig() .setName("seattle-tokyo") .addBatchReplicationPublisherConfig( new WanBatchPublisherConfig() .setClusterName("target-cluster") .setEndpoint("does-not-exist"))); Assert.assertThrows(InvalidConfigurationException.class, () -> createHazelcastInstance(config)); }
@Override public void shutdown() { shutdown.set(true); }
@Test public void shutdown() throws Exception { ManagedExecutorService executorService = newManagedExecutorService(); Future<Object> future = executorService.submit(() -> { LockSupport.parkNanos(TimeUnit.SECONDS.toNanos(1)); return null; }); executorService.shutdown(); assertTrue(executorService.isShutdown()); future.get(); }
public final void topoSort(final CaseInsensitiveString root, final PipelineDependencyState pipelineDependencyState) throws Exception { Hashtable<CaseInsensitiveString, CycleState> state = new Hashtable<>(); Stack<CaseInsensitiveString> visiting = new Stack<>(); if (!state.containsKey(root)) { tsort(root, pipelineDependencyState, state, visiting); } else if (state.get(root) == CycleState.VISITING) { throw ExceptionUtils.bomb("Unexpected node in visiting state: " + root); } assertHasVisitedAllNodesInTree(state); }
@Test public void shouldThrowExceptionWhenDependencyExistsWithUnknownPipeline() { when(state.getDependencyMaterials(new CaseInsensitiveString("a"))).thenReturn(new Node(new Node.DependencyNode(new CaseInsensitiveString("b"), new CaseInsensitiveString("stage")))); when(state.getDependencyMaterials(new CaseInsensitiveString("b"))).thenReturn(new Node(new Node.DependencyNode(new CaseInsensitiveString("z"), new CaseInsensitiveString("stage")))); when(state.hasPipeline(new CaseInsensitiveString("a"))).thenReturn(true); when(state.hasPipeline(new CaseInsensitiveString("b"))).thenReturn(true); when(state.hasPipeline(new CaseInsensitiveString("z"))).thenReturn(false); try { project.topoSort(new CaseInsensitiveString("a"), state); } catch (Exception e) { assertThat(e.getMessage(), is("Pipeline 'z' does not exist. It is used from pipeline 'b'.")); } }
public DdlCommandResult execute( final String sql, final DdlCommand ddlCommand, final boolean withQuery, final Set<SourceName> withQuerySources ) { return execute(sql, ddlCommand, withQuery, withQuerySources, false); }
@Test public void shouldAlterStream() { // Given: alterSource = new AlterSourceCommand(EXISTING_STREAM, DataSourceType.KSTREAM.getKsqlType(), NEW_COLUMNS); // When: final DdlCommandResult result = cmdExec.execute(SQL_TEXT, alterSource, false, NO_QUERY_SOURCES); // Then: assertThat(result.isSuccess(), is(true)); assertThat(metaStore.getSource(EXISTING_STREAM).getSchema().columns().size(), is(10)); assertThat(metaStore.getSource(EXISTING_STREAM).getSqlExpression(), is("sqlexpression\nsome ksql")); }
public static boolean isGeneral(CharSequence value) { return isMatchRegex(GENERAL, value); }
@Test public void isGeneralTest() { String str = ""; boolean general = Validator.isGeneral(str, -1, 5); assertTrue(general); str = "123_abc_ccc"; general = Validator.isGeneral(str, -1, 100); assertTrue(general); // 不允许中文 str = "123_abc_ccc中文"; general = Validator.isGeneral(str, -1, 100); assertFalse(general); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { if(directory.isRoot()) { return new AttributedList<Path>(Collections.singletonList( new MantaAccountHomeInfo(session.getHost().getCredentials().getUsername(), session.getHost().getDefaultPath()).getNormalizedHomePath())); } final AttributedList<Path> children = new AttributedList<>(); final Iterator<MantaObject> objectsIter; try { objectsIter = session.getClient().listObjects(directory.getAbsolute()).iterator(); } catch(MantaObjectException e) { throw new MantaExceptionMappingService().map("Listing directory {0} failed", e, directory); } catch(MantaClientHttpResponseException e) { throw new MantaHttpExceptionMappingService().map("Listing directory {0} failed", e, directory); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Listing directory {0} failed", e); } final MantaObjectAttributeAdapter adapter = new MantaObjectAttributeAdapter(session); while(objectsIter.hasNext()) { MantaObject o = objectsIter.next(); final Path file = new Path(directory, PathNormalizer.name(o.getPath()), EnumSet.of(o.isDirectory() ? Path.Type.directory : Path.Type.file), adapter.toAttributes(o) ); children.add(file); listener.chunk(directory, children); } return children; }
@Test public void testAccountRoot() throws Exception { final MantaAccountHomeInfo root = new MantaAccountHomeInfo(session.getHost().getCredentials().getUsername(), session.getHost().getDefaultPath()); final Path directory = root.getAccountRoot(); final AttributedList<Path> list = new MantaListService(session) .list(directory, new DisabledListProgressListener()); assertFalse(list.isEmpty()); for(Path f : list) { assertSame(directory, f.getParent()); assertEquals(directory.getName(), f.getParent().getName()); } }
public static Builder builder() { return new Builder(); }
@TestTemplate public void fastAppendWithDuplicates() { assertThat(listManifestFiles()).isEmpty(); table .newFastAppend() .appendFile(FILE_A) .appendFile(DataFiles.builder(SPEC).copy(FILE_A).build()) .appendFile(FILE_A) .commit(); assertThat(table.currentSnapshot().summary()) .hasSize(11) .containsEntry(SnapshotSummary.ADDED_FILES_PROP, "1") .containsEntry(SnapshotSummary.ADDED_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.ADDED_RECORDS_PROP, "1") .containsEntry(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP, "1") .containsEntry(SnapshotSummary.TOTAL_DATA_FILES_PROP, "1") .containsEntry(SnapshotSummary.TOTAL_DELETE_FILES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_EQ_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_POS_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.TOTAL_RECORDS_PROP, "1"); }
@Override public PageResult<BrokerageUserDO> getBrokerageUserPage(BrokerageUserPageReqVO pageReqVO) { List<Long> childIds = getChildUserIdsByLevel(pageReqVO.getBindUserId(), pageReqVO.getLevel()); // 有”绑定用户编号“查询条件时,没有查到下级会员,直接返回空 if (pageReqVO.getBindUserId() != null && CollUtil.isEmpty(childIds)) { return PageResult.empty(); } return brokerageUserMapper.selectPage(pageReqVO, childIds); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetBrokerageUserPage() { // mock 数据 BrokerageUserDO dbBrokerageUser = randomPojo(BrokerageUserDO.class, o -> { // 等会查询到 o.setBindUserId(null); o.setBrokerageEnabled(null); o.setCreateTime(null); }); brokerageUserMapper.insert(dbBrokerageUser); // 测试 brokerageUserId 不匹配 brokerageUserMapper.insert(cloneIgnoreId(dbBrokerageUser, o -> o.setBindUserId(null))); // 测试 brokerageEnabled 不匹配 brokerageUserMapper.insert(cloneIgnoreId(dbBrokerageUser, o -> o.setBrokerageEnabled(null))); // 测试 createTime 不匹配 brokerageUserMapper.insert(cloneIgnoreId(dbBrokerageUser, o -> o.setCreateTime(null))); // 准备参数 BrokerageUserPageReqVO reqVO = new BrokerageUserPageReqVO(); reqVO.setBindUserId(null); reqVO.setBrokerageEnabled(null); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<BrokerageUserDO> pageResult = brokerageUserService.getBrokerageUserPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbBrokerageUser, pageResult.getList().get(0)); }
@VisibleForTesting List<LogFile> applyBundleSizeLogFileLimit(List<LogFile> allLogs) { final ImmutableList.Builder<LogFile> truncatedLogFileList = ImmutableList.builder(); // Always collect the in-memory log and the newest on-disk log file // Keep collecting until we pass LOG_COLLECTION_SIZE_LIMIT final AtomicBoolean oneFileAdded = new AtomicBoolean(false); final AtomicLong collectedSize = new AtomicLong(); allLogs.stream().sorted(Comparator.comparing(LogFile::lastModified).reversed()).forEach(logFile -> { if (logFile.id().equals(IN_MEMORY_LOGFILE_ID)) { truncatedLogFileList.add(logFile); } else if (!oneFileAdded.get() || collectedSize.get() < LOG_COLLECTION_SIZE_LIMIT) { truncatedLogFileList.add(logFile); oneFileAdded.set(true); collectedSize.addAndGet(logFile.size()); } }); return truncatedLogFileList.build(); }
@Test public void testLogSizeLimiterWithLimitedSpace() { final List<LogFile> fullLoglist = List.of( new LogFile("memory", "server.mem.log", 500, Instant.now()), new LogFile("0", "server.log", 65 * 1024 * 1024, Instant.now()), new LogFile("1", "server.log.1.gz", 500, Instant.now().minus(1, ChronoUnit.DAYS)), new LogFile("2", "server.log.2.gz", 500, Instant.now().minus(2, ChronoUnit.DAYS)) ); final List<LogFile> shrinkedList = supportBundleService.applyBundleSizeLogFileLimit(fullLoglist); assertThat(shrinkedList).hasSize(2); assertThat(shrinkedList).extracting(LogFile::id).contains("memory", "0"); }
@Override public Long getLongAndRemove(K name) { return null; }
@Test public void testGetLongAndRemove() { assertNull(HEADERS.getLongAndRemove("name1")); }
public void bind(SocketAddress localAddress) { bind(localAddress, Integer.MAX_VALUE); }
@Test public void test_bind_whenAlreadyBound() { Reactor reactor = newReactor(); try (AsyncServerSocket socket = newAsyncServerSocket(reactor)) { InetSocketAddress local = createLoopBackAddressWithEphemeralPort(); socket.bind(local); assertThrows(UncheckedIOException.class, () -> socket.bind(local)); } }
@Override public List<DictDataDO> getDictDataList(Integer status, String dictType) { List<DictDataDO> list = dictDataMapper.selectListByStatusAndDictType(status, dictType); list.sort(COMPARATOR_TYPE_AND_SORT); return list; }
@Test public void testGetDictDataList() { // mock 数据 DictDataDO dictDataDO01 = randomDictDataDO().setDictType("yunai").setSort(2) .setStatus(CommonStatusEnum.ENABLE.getStatus()); dictDataMapper.insert(dictDataDO01); DictDataDO dictDataDO02 = randomDictDataDO().setDictType("yunai").setSort(1) .setStatus(CommonStatusEnum.ENABLE.getStatus()); dictDataMapper.insert(dictDataDO02); DictDataDO dictDataDO03 = randomDictDataDO().setDictType("yunai").setSort(3) .setStatus(CommonStatusEnum.DISABLE.getStatus()); dictDataMapper.insert(dictDataDO03); DictDataDO dictDataDO04 = randomDictDataDO().setDictType("yunai2").setSort(3) .setStatus(CommonStatusEnum.DISABLE.getStatus()); dictDataMapper.insert(dictDataDO04); // 准备参数 Integer status = CommonStatusEnum.ENABLE.getStatus(); String dictType = "yunai"; // 调用 List<DictDataDO> dictDataDOList = dictDataService.getDictDataList(status, dictType); // 断言 assertEquals(2, dictDataDOList.size()); assertPojoEquals(dictDataDO02, dictDataDOList.get(0)); assertPojoEquals(dictDataDO01, dictDataDOList.get(1)); }
public static <T> ClassPluginDocumentation<T> of(JsonSchemaGenerator jsonSchemaGenerator, RegisteredPlugin plugin, Class<? extends T> cls, Class<T> baseCls) { return new ClassPluginDocumentation<>(jsonSchemaGenerator, plugin, cls, baseCls, null); }
@SuppressWarnings("unchecked") @Test void tasks() throws URISyntaxException { Helpers.runApplicationContext(throwConsumer((applicationContext) -> { JsonSchemaGenerator jsonSchemaGenerator = applicationContext.getBean(JsonSchemaGenerator.class); Path plugins = Paths.get(Objects.requireNonNull(ClassPluginDocumentationTest.class.getClassLoader().getResource("plugins")).toURI()); PluginScanner pluginScanner = new PluginScanner(ClassPluginDocumentationTest.class.getClassLoader()); List<RegisteredPlugin> scan = pluginScanner.scan(plugins); assertThat(scan.size(), is(1)); assertThat(scan.getFirst().getTasks().size(), is(1)); ClassPluginDocumentation<? extends Task> doc = ClassPluginDocumentation.of(jsonSchemaGenerator, scan.getFirst(), scan.getFirst().getTasks().getFirst(), Task.class); assertThat(doc.getDocExamples().size(), is(2)); assertThat(doc.getIcon(), is(notNullValue())); assertThat(doc.getInputs().size(), is(5)); assertThat(doc.getDocLicense(), is("EE")); // simple assertThat(((Map<String, String>) doc.getInputs().get("format")).get("type"), is("string")); assertThat(((Map<String, String>) doc.getInputs().get("format")).get("default"), is("{}")); assertThat(((Map<String, String>) doc.getInputs().get("format")).get("pattern"), is(".*")); assertThat(((Map<String, String>) doc.getInputs().get("format")).get("description"), containsString("of this input")); // definitions assertThat(doc.getDefs().size(), is(5)); // enum Map<String, Object> enumProperties = (Map<String, Object>) ((Map<String, Object>) ((Map<String, Object>) doc.getDefs().get("io.kestra.plugin.templates.ExampleTask-PropertyChildInput")).get("properties")).get("childEnum"); assertThat(((List<String>) enumProperties.get("enum")).size(), is(2)); assertThat(((List<String>) enumProperties.get("enum")), containsInAnyOrder("VALUE_1", "VALUE_2")); Map<String, Object> childInput = (Map<String, Object>) ((Map<String, Object>) doc.getDefs().get("io.kestra.plugin.templates.ExampleTask-PropertyChildInput")).get("properties"); // array Map<String, Object> childInputList = (Map<String, Object>) childInput.get("list"); assertThat((String) (childInputList).get("type"), is("array")); assertThat((String) (childInputList).get("title"), is("List of string")); assertThat((Integer) (childInputList).get("minItems"), is(1)); assertThat(((Map<String, String>) (childInputList).get("items")).get("type"), is("string")); // map Map<String, Object> childInputMap = (Map<String, Object>) childInput.get("map"); assertThat((String) (childInputMap).get("type"), is("object")); assertThat((Boolean) (childInputMap).get("$dynamic"), is(true)); assertThat(((Map<String, String>) (childInputMap).get("additionalProperties")).get("type"), is("number")); // output Map<String, Object> childOutput = (Map<String, Object>) ((Map<String, Object>) doc.getDefs().get("io.kestra.plugin.templates.AbstractTask-OutputChild")).get("properties"); assertThat(((Map<String, String>) childOutput.get("value")).get("type"), is("string")); assertThat(((Map<String, Object>) childOutput.get("outputChildMap")).get("type"), is("object")); assertThat(((Map<String, String>)((Map<String, Object>) childOutput.get("outputChildMap")).get("additionalProperties")).get("$ref"), containsString("OutputMap")); // required Map<String, Object> propertiesChild = (Map<String, Object>) doc.getDefs().get("io.kestra.plugin.templates.ExampleTask-PropertyChildInput"); assertThat(((List<String>) propertiesChild.get("required")).size(), is(3)); // output ref Map<String, Object> outputMap = ((Map<String, Object>) ((Map<String, Object>) doc.getDefs().get("io.kestra.plugin.templates.AbstractTask-OutputMap")).get("properties")); assertThat(outputMap.size(), is(2)); assertThat(((Map<String, Object>) outputMap.get("code")).get("type"), is("integer")); })); }
@Override public S3ClientBuilder createBuilder(S3Options s3Options) { return createBuilder(S3Client.builder(), s3Options); }
@Test public void testSetEndpoint() { URI endpointOverride = URI.create("https://localhost"); when(s3Options.getEndpoint()).thenReturn(endpointOverride); DefaultS3ClientBuilderFactory.createBuilder(builder, s3Options); verify(builder).endpointOverride(endpointOverride); verifyNoMoreInteractions(builder); }
@Override protected void verifyConditions(ScesimModelDescriptor scesimModelDescriptor, ScenarioRunnerData scenarioRunnerData, ExpressionEvaluatorFactory expressionEvaluatorFactory, Map<String, Object> requestContext) { for (InstanceGiven input : scenarioRunnerData.getGivens()) { FactIdentifier factIdentifier = input.getFactIdentifier(); List<ScenarioExpect> assertionOnFact = scenarioRunnerData.getExpects().stream() .filter(elem -> !elem.isNewFact()) .filter(elem -> Objects.equals(elem.getFactIdentifier(), factIdentifier)).collect(toList()); // check if this fact has something to check if (assertionOnFact.isEmpty()) { continue; } getScenarioResultsFromGivenFacts(scesimModelDescriptor, assertionOnFact, input, expressionEvaluatorFactory).forEach(scenarioRunnerData::addResult); } }
@Test public void verifyConditions_scenario2() { List<InstanceGiven> scenario2Inputs = extractGivenValuesForScenario2(); List<ScenarioExpect> scenario2Outputs = runnerHelper.extractExpectedValues(scenario2.getUnmodifiableFactMappingValues()); ScenarioRunnerData scenarioRunnerData2 = new ScenarioRunnerData(); scenario2Inputs.forEach(scenarioRunnerData2::addGiven); scenario2Outputs.forEach(scenarioRunnerData2::addExpect); runnerHelper.verifyConditions(simulation.getScesimModelDescriptor(), scenarioRunnerData2, expressionEvaluatorFactory, null); assertThat(scenarioRunnerData2.getResults()).hasSize(2); }
public void collectLog(LogEntry logEntry) { if (logEntry.getLevel() == null || minLogLevel == null) { LOGGER.warn("Log level or threshold level is null. Skipping."); return; } if (logEntry.getLevel().compareTo(minLogLevel) < 0) { LOGGER.debug("Log level below threshold. Skipping."); return; } buffer.offer(logEntry); if (logCount.incrementAndGet() >= BUFFER_THRESHOLD) { flushBuffer(); } }
@Test void whenDebugLogIsCollected_thenNoLogsShouldBeStored() { logAggregator.collectLog(createLogEntry(LogLevel.DEBUG, "Sample debug log message")); verifyNoInteractionsWithCentralLogStore(); }
public Map<String, String> build() { Map<String, String> builder = new HashMap<>(); configureFileSystem(builder); configureNetwork(builder); configureCluster(builder); configureSecurity(builder); configureOthers(builder); LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]", builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY), builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY)); return builder; }
@Test public void test_default_settings_for_standalone_mode() throws Exception { File homeDir = temp.newFolder(); Props props = new Props(new Properties()); props.set(SEARCH_PORT.getKey(), "1234"); props.set(ES_PORT.getKey(), "5678"); props.set(SEARCH_HOST.getKey(), "127.0.0.1"); props.set(PATH_HOME.getKey(), homeDir.getAbsolutePath()); props.set(PATH_DATA.getKey(), temp.newFolder().getAbsolutePath()); props.set(PATH_TEMP.getKey(), temp.newFolder().getAbsolutePath()); props.set(PATH_LOGS.getKey(), temp.newFolder().getAbsolutePath()); props.set(CLUSTER_NAME.getKey(), "sonarqube"); EsSettings esSettings = new EsSettings(props, new EsInstallation(props), system); Map<String, String> generated = esSettings.build(); assertThat(generated).containsEntry("transport.port", "5678") .containsEntry("transport.host", "127.0.0.1") .containsEntry("http.port", "1234") .containsEntry("http.host", "127.0.0.1") // no cluster, but cluster and node names are set though .containsEntry("cluster.name", "sonarqube") .containsEntry("node.name", "sonarqube") .containsEntry("discovery.type", "single-node") .doesNotContainKey("discovery.seed_hosts") .doesNotContainKey("cluster.initial_master_nodes"); assertThat(generated.get("path.data")).isNotNull(); assertThat(generated.get("path.logs")).isNotNull(); assertThat(generated.get("path.home")).isNull(); assertThat(generated.get("path.conf")).isNull(); assertThat(generated) .containsEntry("discovery.initial_state_timeout", "30s") .containsEntry("action.auto_create_index", "false"); }
@Override public void verifyCompatibility(WindowFn<?, ?> other) throws IncompatibleWindowException { if (!this.isCompatible(other)) { throw new IncompatibleWindowException( other, String.format( "%s is only compatible with %s.", Sessions.class.getSimpleName(), Sessions.class.getSimpleName())); } }
@Test public void testVerifyCompatibility() throws IncompatibleWindowException { Sessions.withGapDuration(Duration.millis(10)) .verifyCompatibility(Sessions.withGapDuration(Duration.millis(10))); thrown.expect(IncompatibleWindowException.class); Sessions.withGapDuration(Duration.millis(10)) .verifyCompatibility(FixedWindows.of(Duration.millis(10))); }
public boolean hasCpe() { return !cpe.isEmpty(); }
@Test @SuppressWarnings("squid:S2699") public void testHasCpe() { //already tested, this is just left so the IDE doesn't recreate it. }
public List<V> topologicalSort() { Map<V, Integer> degree = inDegree(); // determine all vertices with zero in-degree Stack<V> zeroVertices = new Stack<>(); // stack as good as any here for (V v : degree.keySet()) { if (degree.get(v) == 0) { zeroVertices.push(v); } } // determine the topological order List<V> result = new ArrayList<>(); while (!zeroVertices.isEmpty()) { V vertex = zeroVertices.pop(); // choose a vertex with zero in-degree result.add(vertex); // vertex 'v' is next in topological order // "remove" vertex 'v' by updating its neighbors for (V neighbor : neighbors.get(vertex)) { degree.put(neighbor, degree.get(neighbor) - 1); // remember any vertices that now have zero in-degree if (degree.get(neighbor) == 0) { zeroVertices.push(neighbor); } } } // check that we have used the entire graph (if not, there was a cycle) if (result.size() != neighbors.size()) { return null; } return result; }
@Test void topologicalSort() { List<Character> result = graph.topologicalSort(); List<Character> expected = Arrays.asList('D', 'E', 'A', 'B', 'F', 'G', 'C'); assertEquals(expected, result); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final List<Path> deleted = new ArrayList<Path>(); for(Map.Entry<Path, TransferStatus> entry : files.entrySet()) { boolean skip = false; final Path file = entry.getKey(); for(Path d : deleted) { if(file.isChild(d)) { skip = true; break; } } if(skip) { continue; } deleted.add(file); callback.delete(file); try { final TransferStatus status = entry.getValue(); session.getClient().execute(this.toRequest(file, status), new VoidResponseHandler()); } catch(SardineException e) { throw new DAVExceptionMappingService().map("Cannot delete {0}", e, file); } catch(IOException e) { throw new HttpExceptionMappingService().map(e, file); } } }
@Test public void testDeleteFileWithLock() throws Exception { final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new DAVTouchFeature(session).touch(test, new TransferStatus()); String lock = null; try { lock = new DAVLockFeature(session).lock(test); } catch(InteroperabilityException e) { // Not supported } assertTrue(new DAVFindFeature(session).find(test)); new DAVDeleteFeature(session).delete(Collections.singletonMap(test, new TransferStatus().withLockId(lock)), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new DAVFindFeature(session).find(test)); }
public IntervalSet intersect(IntervalSet other) { List<Interval> intersection = new ArrayList<>(2); for (Interval int1 : getIntervals()) { for (Interval int2 : other.getIntervals()) { Interval i = int1.intersect(int2); if (i.isValid()) { intersection.add(i); } } } if (intersection.isEmpty()) { return NEVER; } return new IntervalSet(IntervalUtils.normalize(intersection)); }
@Test public void intersect() { for (int i = 0; i < 100; i++) { List<Long> sortedTimes = ThreadLocalRandom.current().longs(0, Long.MAX_VALUE).distinct().limit(4).sorted() .boxed().collect(Collectors.toList()); Interval int1 = Interval.between(sortedTimes.get(0), sortedTimes.get(2)); Interval int2 = Interval.between(sortedTimes.get(1), sortedTimes.get(3)); IntervalSet is = new IntervalSet(int1).intersect(new IntervalSet(int2)); Set<Interval> s = Sets.newHashSet(is.getIntervals()); Assert.assertEquals(1, s.size()); Assert.assertTrue(s.contains(Interval.between(sortedTimes.get(1), sortedTimes.get(2)))); is = new IntervalSet(Interval.after(sortedTimes.get(2))); Assert.assertFalse(is.intersect(is.negate()).isValid()); is = new IntervalSet(Interval.between(sortedTimes.get(1), sortedTimes.get(2))); Assert.assertFalse(is.intersect(is.negate()).isValid()); } }
public int getServerPort() { int port = getUri().getPort(); if(port == -1) { if(isSecure()) { port = DEFAULT_HTTPS_PORT; } else { port = DEFAULT_HTTP_PORT; } } return port; }
@Test void testGetServerPort() { { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); DiscFilterRequest request = new DiscFilterRequest(httpReq); assertEquals(request.getServerPort(), 80); } { URI uri = URI.create("https://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); DiscFilterRequest request = new DiscFilterRequest(httpReq); assertEquals(request.getServerPort(), 443); } }